org.apache.spark.sql.Row.getDouble()方法的使用及代码示例

x33g5p2x  于2022-01-28 转载在 其他  
字(9.6k)|赞(0)|评价(0)|浏览(205)

本文整理了Java中org.apache.spark.sql.Row.getDouble方法的一些代码示例,展示了Row.getDouble的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Row.getDouble方法的具体详情如下:
包路径:org.apache.spark.sql.Row
类名称:Row
方法名:getDouble

Row.getDouble介绍

暂无

代码示例

代码示例来源:origin: org.apache.spark/spark-sql_2.11

@Override public Object evaluate(Row buffer) {
  if (buffer.isNullAt(0)) {
   // If the buffer value is still null, we return null.
   return null;
  } else {
   // Otherwise, the intermediate sum is the final result.
   return buffer.getDouble(0);
  }
 }
}

代码示例来源:origin: org.apache.spark/spark-sql

@Override public Object evaluate(Row buffer) {
  if (buffer.isNullAt(0)) {
   // If the buffer value is still null, we return null.
   return null;
  } else {
   // Otherwise, the intermediate sum is the final result.
   return buffer.getDouble(0);
  }
 }
}

代码示例来源:origin: apache/phoenix

@Override
public double getDouble(int columnIndex) throws SQLException {
  wasNull = getCurrentRow().isNullAt(columnIndex-1);
  return wasNull ? 0 : getCurrentRow().getDouble(columnIndex-1);
}

代码示例来源:origin: org.apache.spark/spark-sql_2.11

@Override public Object evaluate(Row buffer) {
  if (buffer.isNullAt(0)) {
   // If the bufferSum is still null, we return null because this function has not got
   // any input row.
   return null;
  } else {
   // Otherwise, we calculate the special average value.
   return buffer.getDouble(0) / buffer.getLong(1) + 100.0;
  }
 }
}

代码示例来源:origin: org.apache.spark/spark-sql

@Override public Object evaluate(Row buffer) {
  if (buffer.isNullAt(0)) {
   // If the bufferSum is still null, we return null because this function has not got
   // any input row.
   return null;
  } else {
   // Otherwise, we calculate the special average value.
   return buffer.getDouble(0) / buffer.getLong(1) + 100.0;
  }
 }
}

代码示例来源:origin: org.apache.spark/spark-sql_2.10

@Test
public void pivot() {
 Dataset<Row> df = spark.table("courseSales");
 List<Row> actual = df.groupBy("year")
  .pivot("course", Arrays.asList("dotNET", "Java"))
  .agg(sum("earnings")).orderBy("year").collectAsList();
 Assert.assertEquals(2012, actual.get(0).getInt(0));
 Assert.assertEquals(15000.0, actual.get(0).getDouble(1), 0.01);
 Assert.assertEquals(20000.0, actual.get(0).getDouble(2), 0.01);
 Assert.assertEquals(2013, actual.get(1).getInt(0));
 Assert.assertEquals(48000.0, actual.get(1).getDouble(1), 0.01);
 Assert.assertEquals(30000.0, actual.get(1).getDouble(2), 0.01);
}

代码示例来源:origin: org.apache.spark/spark-sql_2.11

@Test
public void pivot() {
 Dataset<Row> df = spark.table("courseSales");
 List<Row> actual = df.groupBy("year")
  .pivot("course", Arrays.asList("dotNET", "Java"))
  .agg(sum("earnings")).orderBy("year").collectAsList();
 Assert.assertEquals(2012, actual.get(0).getInt(0));
 Assert.assertEquals(15000.0, actual.get(0).getDouble(1), 0.01);
 Assert.assertEquals(20000.0, actual.get(0).getDouble(2), 0.01);
 Assert.assertEquals(2013, actual.get(1).getInt(0));
 Assert.assertEquals(48000.0, actual.get(1).getDouble(1), 0.01);
 Assert.assertEquals(30000.0, actual.get(1).getDouble(2), 0.01);
}

代码示例来源:origin: org.apache.spark/spark-sql

@Test
public void pivot() {
 Dataset<Row> df = spark.table("courseSales");
 List<Row> actual = df.groupBy("year")
  .pivot("course", Arrays.asList("dotNET", "Java"))
  .agg(sum("earnings")).orderBy("year").collectAsList();
 Assert.assertEquals(2012, actual.get(0).getInt(0));
 Assert.assertEquals(15000.0, actual.get(0).getDouble(1), 0.01);
 Assert.assertEquals(20000.0, actual.get(0).getDouble(2), 0.01);
 Assert.assertEquals(2013, actual.get(1).getInt(0));
 Assert.assertEquals(48000.0, actual.get(1).getDouble(1), 0.01);
 Assert.assertEquals(30000.0, actual.get(1).getDouble(2), 0.01);
}

代码示例来源:origin: org.apache.spark/spark-sql

@Override public void update(MutableAggregationBuffer buffer, Row input) {
 // This input Row only has a single column storing the input value in Double.
 // We only update the buffer when the input value is not null.
 if (!input.isNullAt(0)) {
  if (buffer.isNullAt(0)) {
   // If the buffer value (the intermediate result of the sum) is still null,
   // we set the input value to the buffer.
   buffer.update(0, input.getDouble(0));
  } else {
   // Otherwise, we add the input value to the buffer value.
   Double newValue = input.getDouble(0) + buffer.getDouble(0);
   buffer.update(0, newValue);
  }
 }
}

代码示例来源:origin: org.apache.spark/spark-sql

@Override public void merge(MutableAggregationBuffer buffer1, Row buffer2) {
 // buffer1 and buffer2 have the same structure.
 // We only update the buffer1 when the input buffer2's value is not null.
 if (!buffer2.isNullAt(0)) {
  if (buffer1.isNullAt(0)) {
   // If the buffer value (intermediate result of the sum) is still null,
   // we set the it as the input buffer's value.
   buffer1.update(0, buffer2.getDouble(0));
  } else {
   // Otherwise, we add the input buffer's value (buffer1) to the mutable
   // buffer's value (buffer2).
   Double newValue = buffer2.getDouble(0) + buffer1.getDouble(0);
   buffer1.update(0, newValue);
  }
 }
}

代码示例来源:origin: org.apache.spark/spark-sql_2.11

@Override public void update(MutableAggregationBuffer buffer, Row input) {
 // This input Row only has a single column storing the input value in Double.
 // We only update the buffer when the input value is not null.
 if (!input.isNullAt(0)) {
  if (buffer.isNullAt(0)) {
   // If the buffer value (the intermediate result of the sum) is still null,
   // we set the input value to the buffer.
   buffer.update(0, input.getDouble(0));
  } else {
   // Otherwise, we add the input value to the buffer value.
   Double newValue = input.getDouble(0) + buffer.getDouble(0);
   buffer.update(0, newValue);
  }
 }
}

代码示例来源:origin: org.apache.spark/spark-sql_2.11

@Override public void merge(MutableAggregationBuffer buffer1, Row buffer2) {
 // buffer1 and buffer2 have the same structure.
 // We only update the buffer1 when the input buffer2's value is not null.
 if (!buffer2.isNullAt(0)) {
  if (buffer1.isNullAt(0)) {
   // If the buffer value (intermediate result of the sum) is still null,
   // we set the it as the input buffer's value.
   buffer1.update(0, buffer2.getDouble(0));
  } else {
   // Otherwise, we add the input buffer's value (buffer1) to the mutable
   // buffer's value (buffer2).
   Double newValue = buffer2.getDouble(0) + buffer1.getDouble(0);
   buffer1.update(0, newValue);
  }
 }
}

代码示例来源:origin: org.apache.spark/spark-sql_2.11

@Override public void update(MutableAggregationBuffer buffer, Row input) {
 // This input Row only has a single column storing the input value in Double.
 // We only update the buffer when the input value is not null.
 if (!input.isNullAt(0)) {
  // If the buffer value (the intermediate result of the sum) is still null,
  // we set the input value to the buffer and set the bufferCount to 1.
  if (buffer.isNullAt(0)) {
   buffer.update(0, input.getDouble(0));
   buffer.update(1, 1L);
  } else {
   // Otherwise, update the bufferSum and increment bufferCount.
   Double newValue = input.getDouble(0) + buffer.getDouble(0);
   buffer.update(0, newValue);
   buffer.update(1, buffer.getLong(1) + 1L);
  }
 }
}

代码示例来源:origin: org.apache.spark/spark-sql

@Override public void update(MutableAggregationBuffer buffer, Row input) {
 // This input Row only has a single column storing the input value in Double.
 // We only update the buffer when the input value is not null.
 if (!input.isNullAt(0)) {
  // If the buffer value (the intermediate result of the sum) is still null,
  // we set the input value to the buffer and set the bufferCount to 1.
  if (buffer.isNullAt(0)) {
   buffer.update(0, input.getDouble(0));
   buffer.update(1, 1L);
  } else {
   // Otherwise, update the bufferSum and increment bufferCount.
   Double newValue = input.getDouble(0) + buffer.getDouble(0);
   buffer.update(0, newValue);
   buffer.update(1, buffer.getLong(1) + 1L);
  }
 }
}

代码示例来源:origin: org.apache.spark/spark-sql_2.11

@Override public void merge(MutableAggregationBuffer buffer1, Row buffer2) {
 // buffer1 and buffer2 have the same structure.
 // We only update the buffer1 when the input buffer2's sum value is not null.
 if (!buffer2.isNullAt(0)) {
  if (buffer1.isNullAt(0)) {
   // If the buffer value (intermediate result of the sum) is still null,
   // we set the it as the input buffer's value.
   buffer1.update(0, buffer2.getDouble(0));
   buffer1.update(1, buffer2.getLong(1));
  } else {
   // Otherwise, we update the bufferSum and bufferCount.
   Double newValue = buffer2.getDouble(0) + buffer1.getDouble(0);
   buffer1.update(0, newValue);
   buffer1.update(1, buffer1.getLong(1) + buffer2.getLong(1));
  }
 }
}

代码示例来源:origin: org.apache.spark/spark-sql

@Override public void merge(MutableAggregationBuffer buffer1, Row buffer2) {
 // buffer1 and buffer2 have the same structure.
 // We only update the buffer1 when the input buffer2's sum value is not null.
 if (!buffer2.isNullAt(0)) {
  if (buffer1.isNullAt(0)) {
   // If the buffer value (intermediate result of the sum) is still null,
   // we set the it as the input buffer's value.
   buffer1.update(0, buffer2.getDouble(0));
   buffer1.update(1, buffer2.getLong(1));
  } else {
   // Otherwise, we update the bufferSum and bufferCount.
   Double newValue = buffer2.getDouble(0) + buffer1.getDouble(0);
   buffer1.update(0, newValue);
   buffer1.update(1, buffer1.getLong(1) + buffer2.getLong(1));
  }
 }
}

代码示例来源:origin: org.apache.spark/spark-sql_2.11

@SuppressWarnings("unchecked")
@Test
public void udf1Test() {
 spark.range(1, 10).toDF("value").createOrReplaceTempView("df");
 spark.udf().registerJavaUDAF("myDoubleAvg", MyDoubleAvg.class.getName());
 Row result = spark.sql("SELECT myDoubleAvg(value) as my_avg from df").head();
 Assert.assertEquals(105.0, result.getDouble(0), 1.0e-6);
}

代码示例来源:origin: org.apache.spark/spark-sql

@SuppressWarnings("unchecked")
@Test
public void udf1Test() {
 spark.range(1, 10).toDF("value").createOrReplaceTempView("df");
 spark.udf().registerJavaUDAF("myDoubleAvg", MyDoubleAvg.class.getName());
 Row result = spark.sql("SELECT myDoubleAvg(value) as my_avg from df").head();
 Assert.assertEquals(105.0, result.getDouble(0), 1.0e-6);
}

代码示例来源:origin: org.apache.spark/spark-sql_2.10

Metadata.empty()), schema.apply("e"));
Row first = df.select("a", "b", "c", "d", "e").first();
Assert.assertEquals(bean.getA(), first.getDouble(0), 0.0);

代码示例来源:origin: org.apache.spark/spark-sql

Metadata.empty()), schema.apply("e"));
Row first = df.select("a", "b", "c", "d", "e").first();
Assert.assertEquals(bean.getA(), first.getDouble(0), 0.0);

相关文章