org.apache.spark.sql.Row.getLong()方法的使用及代码示例

x33g5p2x  于2022-01-28 转载在 其他  
字(9.8k)|赞(0)|评价(0)|浏览(274)

本文整理了Java中org.apache.spark.sql.Row.getLong方法的一些代码示例,展示了Row.getLong的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Row.getLong方法的具体详情如下:
包路径:org.apache.spark.sql.Row
类名称:Row
方法名:getLong

Row.getLong介绍

暂无

代码示例

代码示例来源:origin: apache/phoenix

@Override
public long getLong(int columnIndex) throws SQLException {
  wasNull = getCurrentRow().isNullAt(columnIndex-1);
  return wasNull ? 0 :  getCurrentRow().getLong(columnIndex-1);
}

代码示例来源:origin: org.apache.spark/spark-sql

@Override public Object evaluate(Row buffer) {
  if (buffer.isNullAt(0)) {
   // If the bufferSum is still null, we return null because this function has not got
   // any input row.
   return null;
  } else {
   // Otherwise, we calculate the special average value.
   return buffer.getDouble(0) / buffer.getLong(1) + 100.0;
  }
 }
}

代码示例来源:origin: org.apache.spark/spark-sql_2.11

@Override public Object evaluate(Row buffer) {
  if (buffer.isNullAt(0)) {
   // If the bufferSum is still null, we return null because this function has not got
   // any input row.
   return null;
  } else {
   // Otherwise, we calculate the special average value.
   return buffer.getDouble(0) / buffer.getLong(1) + 100.0;
  }
 }
}

代码示例来源:origin: org.apache.spark/spark-sql

@Test
public void testSampleBy() {
 Dataset<Row> df = spark.range(0, 100, 1, 2).select(col("id").mod(3).as("key"));
 Dataset<Row> sampled = df.stat().sampleBy("key", ImmutableMap.of(0, 0.1, 1, 0.2), 0L);
 List<Row> actual = sampled.groupBy("key").count().orderBy("key").collectAsList();
 Assert.assertEquals(0, actual.get(0).getLong(0));
 Assert.assertTrue(0 <= actual.get(0).getLong(1) && actual.get(0).getLong(1) <= 8);
 Assert.assertEquals(1, actual.get(1).getLong(0));
 Assert.assertTrue(2 <= actual.get(1).getLong(1) && actual.get(1).getLong(1) <= 13);
}

代码示例来源:origin: org.apache.spark/spark-sql_2.11

@Test
public void testSampleBy() {
 Dataset<Row> df = spark.range(0, 100, 1, 2).select(col("id").mod(3).as("key"));
 Dataset<Row> sampled = df.stat().sampleBy("key", ImmutableMap.of(0, 0.1, 1, 0.2), 0L);
 List<Row> actual = sampled.groupBy("key").count().orderBy("key").collectAsList();
 Assert.assertEquals(0, actual.get(0).getLong(0));
 Assert.assertTrue(0 <= actual.get(0).getLong(1) && actual.get(0).getLong(1) <= 8);
 Assert.assertEquals(1, actual.get(1).getLong(0));
 Assert.assertTrue(2 <= actual.get(1).getLong(1) && actual.get(1).getLong(1) <= 13);
}

代码示例来源:origin: org.apache.spark/spark-sql_2.10

@Test
public void testSampleBy() {
 Dataset<Row> df = spark.range(0, 100, 1, 2).select(col("id").mod(3).as("key"));
 Dataset<Row> sampled = df.stat().sampleBy("key", ImmutableMap.of(0, 0.1, 1, 0.2), 0L);
 List<Row> actual = sampled.groupBy("key").count().orderBy("key").collectAsList();
 Assert.assertEquals(0, actual.get(0).getLong(0));
 Assert.assertTrue(0 <= actual.get(0).getLong(1) && actual.get(0).getLong(1) <= 8);
 Assert.assertEquals(1, actual.get(1).getLong(0));
 Assert.assertTrue(2 <= actual.get(1).getLong(1) && actual.get(1).getLong(1) <= 13);
}

代码示例来源:origin: org.apache.spark/spark-sql_2.11

@Test
public void testJsonRDDToDataFrame() {
 // This is a test for the deprecated API in SPARK-15615.
 JavaRDD<String> rdd = jsc.parallelize(Arrays.asList("{\"a\": 2}"));
 Dataset<Row> df = spark.read().json(rdd);
 Assert.assertEquals(1L, df.count());
 Assert.assertEquals(2L, df.collectAsList().get(0).getLong(0));
}

代码示例来源:origin: org.apache.spark/spark-sql_2.10

@Test
public void testJsonRDDToDataFrame() {
 // This is a test for the deprecated API in SPARK-15615.
 JavaRDD<String> rdd = jsc.parallelize(Arrays.asList("{\"a\": 2}"));
 Dataset<Row> df = spark.read().json(rdd);
 Assert.assertEquals(1L, df.count());
 Assert.assertEquals(2L, df.collectAsList().get(0).getLong(0));
}

代码示例来源:origin: org.apache.spark/spark-sql

@Test
public void testJsonRDDToDataFrame() {
 // This is a test for the deprecated API in SPARK-15615.
 JavaRDD<String> rdd = jsc.parallelize(Arrays.asList("{\"a\": 2}"));
 Dataset<Row> df = spark.read().json(rdd);
 Assert.assertEquals(1L, df.count());
 Assert.assertEquals(2L, df.collectAsList().get(0).getLong(0));
}

代码示例来源:origin: org.apache.spark/spark-sql_2.11

@Test
public void testCrosstab() {
 Dataset<Row> df = spark.table("testData2");
 Dataset<Row> crosstab = df.stat().crosstab("a", "b");
 String[] columnNames = crosstab.schema().fieldNames();
 Assert.assertEquals("a_b", columnNames[0]);
 Assert.assertEquals("1", columnNames[1]);
 Assert.assertEquals("2", columnNames[2]);
 List<Row> rows = crosstab.collectAsList();
 rows.sort(crosstabRowComparator);
 Integer count = 1;
 for (Row row : rows) {
  Assert.assertEquals(row.get(0).toString(), count.toString());
  Assert.assertEquals(1L, row.getLong(1));
  Assert.assertEquals(1L, row.getLong(2));
  count++;
 }
}

代码示例来源:origin: apache/metron

.agg(sum("value"))
    .head()
    .getLong(0);
LOG.debug("{} profile measurement(s) written to HBase", count);

代码示例来源:origin: org.apache.spark/spark-sql

@Test
public void testCrosstab() {
 Dataset<Row> df = spark.table("testData2");
 Dataset<Row> crosstab = df.stat().crosstab("a", "b");
 String[] columnNames = crosstab.schema().fieldNames();
 Assert.assertEquals("a_b", columnNames[0]);
 Assert.assertEquals("1", columnNames[1]);
 Assert.assertEquals("2", columnNames[2]);
 List<Row> rows = crosstab.collectAsList();
 rows.sort(crosstabRowComparator);
 Integer count = 1;
 for (Row row : rows) {
  Assert.assertEquals(row.get(0).toString(), count.toString());
  Assert.assertEquals(1L, row.getLong(1));
  Assert.assertEquals(1L, row.getLong(2));
  count++;
 }
}

代码示例来源:origin: org.apache.spark/spark-sql_2.10

@Test
public void testCrosstab() {
 Dataset<Row> df = spark.table("testData2");
 Dataset<Row> crosstab = df.stat().crosstab("a", "b");
 String[] columnNames = crosstab.schema().fieldNames();
 Assert.assertEquals("a_b", columnNames[0]);
 Assert.assertEquals("1", columnNames[1]);
 Assert.assertEquals("2", columnNames[2]);
 List<Row> rows = crosstab.collectAsList();
 rows.sort(crosstabRowComparator);
 Integer count = 1;
 for (Row row : rows) {
  Assert.assertEquals(row.get(0).toString(), count.toString());
  Assert.assertEquals(1L, row.getLong(1));
  Assert.assertEquals(1L, row.getLong(2));
  count++;
 }
}

代码示例来源:origin: org.apache.spark/spark-sql_2.11

@Override public void merge(MutableAggregationBuffer buffer1, Row buffer2) {
 // buffer1 and buffer2 have the same structure.
 // We only update the buffer1 when the input buffer2's sum value is not null.
 if (!buffer2.isNullAt(0)) {
  if (buffer1.isNullAt(0)) {
   // If the buffer value (intermediate result of the sum) is still null,
   // we set the it as the input buffer's value.
   buffer1.update(0, buffer2.getDouble(0));
   buffer1.update(1, buffer2.getLong(1));
  } else {
   // Otherwise, we update the bufferSum and bufferCount.
   Double newValue = buffer2.getDouble(0) + buffer1.getDouble(0);
   buffer1.update(0, newValue);
   buffer1.update(1, buffer1.getLong(1) + buffer2.getLong(1));
  }
 }
}

代码示例来源:origin: org.apache.spark/spark-sql

@Override public void merge(MutableAggregationBuffer buffer1, Row buffer2) {
 // buffer1 and buffer2 have the same structure.
 // We only update the buffer1 when the input buffer2's sum value is not null.
 if (!buffer2.isNullAt(0)) {
  if (buffer1.isNullAt(0)) {
   // If the buffer value (intermediate result of the sum) is still null,
   // we set the it as the input buffer's value.
   buffer1.update(0, buffer2.getDouble(0));
   buffer1.update(1, buffer2.getLong(1));
  } else {
   // Otherwise, we update the bufferSum and bufferCount.
   Double newValue = buffer2.getDouble(0) + buffer1.getDouble(0);
   buffer1.update(0, newValue);
   buffer1.update(1, buffer1.getLong(1) + buffer2.getLong(1));
  }
 }
}

代码示例来源:origin: org.apache.spark/spark-sql

@SuppressWarnings("unchecked")
@Test
public void udf4Test() {
 spark.udf().register("inc", (Long i) -> i + 1, DataTypes.LongType);
 spark.range(10).toDF("x").createOrReplaceTempView("tmp");
 // This tests when Java UDFs are required to be the semantically same (See SPARK-9435).
 List<Row> results = spark.sql("SELECT inc(x) FROM tmp GROUP BY inc(x)").collectAsList();
 Assert.assertEquals(10, results.size());
 long sum = 0;
 for (Row result : results) {
  sum += result.getLong(0);
 }
 Assert.assertEquals(55, sum);
}

代码示例来源:origin: org.apache.spark/spark-sql_2.11

@SuppressWarnings("unchecked")
@Test
public void udf4Test() {
 spark.udf().register("inc", (Long i) -> i + 1, DataTypes.LongType);
 spark.range(10).toDF("x").createOrReplaceTempView("tmp");
 // This tests when Java UDFs are required to be the semantically same (See SPARK-9435).
 List<Row> results = spark.sql("SELECT inc(x) FROM tmp GROUP BY inc(x)").collectAsList();
 Assert.assertEquals(10, results.size());
 long sum = 0;
 for (Row result : results) {
  sum += result.getLong(0);
 }
 Assert.assertEquals(55, sum);
}

代码示例来源:origin: org.apache.spark/spark-sql_2.10

@SuppressWarnings("unchecked")
 @Test
 public void udf4Test() {
  spark.udf().register("inc", (Long i) -> i + 1, DataTypes.LongType);

  spark.range(10).toDF("x").createOrReplaceTempView("tmp");
  // This tests when Java UDFs are required to be the semantically same (See SPARK-9435).
  List<Row> results = spark.sql("SELECT inc(x) FROM tmp GROUP BY inc(x)").collectAsList();
  Assert.assertEquals(10, results.size());
  long sum = 0;
  for (Row result : results) {
   sum += result.getLong(0);
  }
  Assert.assertEquals(55, sum);
 }
}

代码示例来源:origin: org.apache.spark/spark-sql_2.11

Assert.assertEquals(intValue, simpleRow.getInt(5));
Assert.assertEquals(intValue, simpleRow.get(5));
Assert.assertEquals(longValue, simpleRow.getLong(6));
Assert.assertEquals(longValue, simpleRow.get(6));
Assert.assertEquals(longValue, simpleRow.getLong(7));
Assert.assertEquals(longValue, simpleRow.get(7));

代码示例来源:origin: org.apache.spark/spark-sql_2.10

Assert.assertEquals(intValue, simpleRow.getInt(5));
Assert.assertEquals(intValue, simpleRow.get(5));
Assert.assertEquals(longValue, simpleRow.getLong(6));
Assert.assertEquals(longValue, simpleRow.get(6));
Assert.assertEquals(longValue, simpleRow.getLong(7));
Assert.assertEquals(longValue, simpleRow.get(7));

相关文章