org.apache.spark.sql.Row.getLong()方法的使用及代码示例

x33g5p2x  于2022-01-28 转载在 其他  
字(9.8k)|赞(0)|评价(0)|浏览(262)

本文整理了Java中org.apache.spark.sql.Row.getLong方法的一些代码示例,展示了Row.getLong的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Row.getLong方法的具体详情如下:
包路径:org.apache.spark.sql.Row
类名称:Row
方法名:getLong

Row.getLong介绍

暂无

代码示例

代码示例来源:origin: apache/phoenix

  1. @Override
  2. public long getLong(int columnIndex) throws SQLException {
  3. wasNull = getCurrentRow().isNullAt(columnIndex-1);
  4. return wasNull ? 0 : getCurrentRow().getLong(columnIndex-1);
  5. }

代码示例来源:origin: org.apache.spark/spark-sql

  1. @Override public Object evaluate(Row buffer) {
  2. if (buffer.isNullAt(0)) {
  3. // If the bufferSum is still null, we return null because this function has not got
  4. // any input row.
  5. return null;
  6. } else {
  7. // Otherwise, we calculate the special average value.
  8. return buffer.getDouble(0) / buffer.getLong(1) + 100.0;
  9. }
  10. }
  11. }

代码示例来源:origin: org.apache.spark/spark-sql_2.11

  1. @Override public Object evaluate(Row buffer) {
  2. if (buffer.isNullAt(0)) {
  3. // If the bufferSum is still null, we return null because this function has not got
  4. // any input row.
  5. return null;
  6. } else {
  7. // Otherwise, we calculate the special average value.
  8. return buffer.getDouble(0) / buffer.getLong(1) + 100.0;
  9. }
  10. }
  11. }

代码示例来源:origin: org.apache.spark/spark-sql

  1. @Test
  2. public void testSampleBy() {
  3. Dataset<Row> df = spark.range(0, 100, 1, 2).select(col("id").mod(3).as("key"));
  4. Dataset<Row> sampled = df.stat().sampleBy("key", ImmutableMap.of(0, 0.1, 1, 0.2), 0L);
  5. List<Row> actual = sampled.groupBy("key").count().orderBy("key").collectAsList();
  6. Assert.assertEquals(0, actual.get(0).getLong(0));
  7. Assert.assertTrue(0 <= actual.get(0).getLong(1) && actual.get(0).getLong(1) <= 8);
  8. Assert.assertEquals(1, actual.get(1).getLong(0));
  9. Assert.assertTrue(2 <= actual.get(1).getLong(1) && actual.get(1).getLong(1) <= 13);
  10. }

代码示例来源:origin: org.apache.spark/spark-sql_2.11

  1. @Test
  2. public void testSampleBy() {
  3. Dataset<Row> df = spark.range(0, 100, 1, 2).select(col("id").mod(3).as("key"));
  4. Dataset<Row> sampled = df.stat().sampleBy("key", ImmutableMap.of(0, 0.1, 1, 0.2), 0L);
  5. List<Row> actual = sampled.groupBy("key").count().orderBy("key").collectAsList();
  6. Assert.assertEquals(0, actual.get(0).getLong(0));
  7. Assert.assertTrue(0 <= actual.get(0).getLong(1) && actual.get(0).getLong(1) <= 8);
  8. Assert.assertEquals(1, actual.get(1).getLong(0));
  9. Assert.assertTrue(2 <= actual.get(1).getLong(1) && actual.get(1).getLong(1) <= 13);
  10. }

代码示例来源:origin: org.apache.spark/spark-sql_2.10

  1. @Test
  2. public void testSampleBy() {
  3. Dataset<Row> df = spark.range(0, 100, 1, 2).select(col("id").mod(3).as("key"));
  4. Dataset<Row> sampled = df.stat().sampleBy("key", ImmutableMap.of(0, 0.1, 1, 0.2), 0L);
  5. List<Row> actual = sampled.groupBy("key").count().orderBy("key").collectAsList();
  6. Assert.assertEquals(0, actual.get(0).getLong(0));
  7. Assert.assertTrue(0 <= actual.get(0).getLong(1) && actual.get(0).getLong(1) <= 8);
  8. Assert.assertEquals(1, actual.get(1).getLong(0));
  9. Assert.assertTrue(2 <= actual.get(1).getLong(1) && actual.get(1).getLong(1) <= 13);
  10. }

代码示例来源:origin: org.apache.spark/spark-sql_2.11

  1. @Test
  2. public void testJsonRDDToDataFrame() {
  3. // This is a test for the deprecated API in SPARK-15615.
  4. JavaRDD<String> rdd = jsc.parallelize(Arrays.asList("{\"a\": 2}"));
  5. Dataset<Row> df = spark.read().json(rdd);
  6. Assert.assertEquals(1L, df.count());
  7. Assert.assertEquals(2L, df.collectAsList().get(0).getLong(0));
  8. }

代码示例来源:origin: org.apache.spark/spark-sql_2.10

  1. @Test
  2. public void testJsonRDDToDataFrame() {
  3. // This is a test for the deprecated API in SPARK-15615.
  4. JavaRDD<String> rdd = jsc.parallelize(Arrays.asList("{\"a\": 2}"));
  5. Dataset<Row> df = spark.read().json(rdd);
  6. Assert.assertEquals(1L, df.count());
  7. Assert.assertEquals(2L, df.collectAsList().get(0).getLong(0));
  8. }

代码示例来源:origin: org.apache.spark/spark-sql

  1. @Test
  2. public void testJsonRDDToDataFrame() {
  3. // This is a test for the deprecated API in SPARK-15615.
  4. JavaRDD<String> rdd = jsc.parallelize(Arrays.asList("{\"a\": 2}"));
  5. Dataset<Row> df = spark.read().json(rdd);
  6. Assert.assertEquals(1L, df.count());
  7. Assert.assertEquals(2L, df.collectAsList().get(0).getLong(0));
  8. }

代码示例来源:origin: org.apache.spark/spark-sql_2.11

  1. @Test
  2. public void testCrosstab() {
  3. Dataset<Row> df = spark.table("testData2");
  4. Dataset<Row> crosstab = df.stat().crosstab("a", "b");
  5. String[] columnNames = crosstab.schema().fieldNames();
  6. Assert.assertEquals("a_b", columnNames[0]);
  7. Assert.assertEquals("1", columnNames[1]);
  8. Assert.assertEquals("2", columnNames[2]);
  9. List<Row> rows = crosstab.collectAsList();
  10. rows.sort(crosstabRowComparator);
  11. Integer count = 1;
  12. for (Row row : rows) {
  13. Assert.assertEquals(row.get(0).toString(), count.toString());
  14. Assert.assertEquals(1L, row.getLong(1));
  15. Assert.assertEquals(1L, row.getLong(2));
  16. count++;
  17. }
  18. }

代码示例来源:origin: apache/metron

  1. .agg(sum("value"))
  2. .head()
  3. .getLong(0);
  4. LOG.debug("{} profile measurement(s) written to HBase", count);

代码示例来源:origin: org.apache.spark/spark-sql

  1. @Test
  2. public void testCrosstab() {
  3. Dataset<Row> df = spark.table("testData2");
  4. Dataset<Row> crosstab = df.stat().crosstab("a", "b");
  5. String[] columnNames = crosstab.schema().fieldNames();
  6. Assert.assertEquals("a_b", columnNames[0]);
  7. Assert.assertEquals("1", columnNames[1]);
  8. Assert.assertEquals("2", columnNames[2]);
  9. List<Row> rows = crosstab.collectAsList();
  10. rows.sort(crosstabRowComparator);
  11. Integer count = 1;
  12. for (Row row : rows) {
  13. Assert.assertEquals(row.get(0).toString(), count.toString());
  14. Assert.assertEquals(1L, row.getLong(1));
  15. Assert.assertEquals(1L, row.getLong(2));
  16. count++;
  17. }
  18. }

代码示例来源:origin: org.apache.spark/spark-sql_2.10

  1. @Test
  2. public void testCrosstab() {
  3. Dataset<Row> df = spark.table("testData2");
  4. Dataset<Row> crosstab = df.stat().crosstab("a", "b");
  5. String[] columnNames = crosstab.schema().fieldNames();
  6. Assert.assertEquals("a_b", columnNames[0]);
  7. Assert.assertEquals("1", columnNames[1]);
  8. Assert.assertEquals("2", columnNames[2]);
  9. List<Row> rows = crosstab.collectAsList();
  10. rows.sort(crosstabRowComparator);
  11. Integer count = 1;
  12. for (Row row : rows) {
  13. Assert.assertEquals(row.get(0).toString(), count.toString());
  14. Assert.assertEquals(1L, row.getLong(1));
  15. Assert.assertEquals(1L, row.getLong(2));
  16. count++;
  17. }
  18. }

代码示例来源:origin: org.apache.spark/spark-sql_2.11

  1. @Override public void merge(MutableAggregationBuffer buffer1, Row buffer2) {
  2. // buffer1 and buffer2 have the same structure.
  3. // We only update the buffer1 when the input buffer2's sum value is not null.
  4. if (!buffer2.isNullAt(0)) {
  5. if (buffer1.isNullAt(0)) {
  6. // If the buffer value (intermediate result of the sum) is still null,
  7. // we set the it as the input buffer's value.
  8. buffer1.update(0, buffer2.getDouble(0));
  9. buffer1.update(1, buffer2.getLong(1));
  10. } else {
  11. // Otherwise, we update the bufferSum and bufferCount.
  12. Double newValue = buffer2.getDouble(0) + buffer1.getDouble(0);
  13. buffer1.update(0, newValue);
  14. buffer1.update(1, buffer1.getLong(1) + buffer2.getLong(1));
  15. }
  16. }
  17. }

代码示例来源:origin: org.apache.spark/spark-sql

  1. @Override public void merge(MutableAggregationBuffer buffer1, Row buffer2) {
  2. // buffer1 and buffer2 have the same structure.
  3. // We only update the buffer1 when the input buffer2's sum value is not null.
  4. if (!buffer2.isNullAt(0)) {
  5. if (buffer1.isNullAt(0)) {
  6. // If the buffer value (intermediate result of the sum) is still null,
  7. // we set the it as the input buffer's value.
  8. buffer1.update(0, buffer2.getDouble(0));
  9. buffer1.update(1, buffer2.getLong(1));
  10. } else {
  11. // Otherwise, we update the bufferSum and bufferCount.
  12. Double newValue = buffer2.getDouble(0) + buffer1.getDouble(0);
  13. buffer1.update(0, newValue);
  14. buffer1.update(1, buffer1.getLong(1) + buffer2.getLong(1));
  15. }
  16. }
  17. }

代码示例来源:origin: org.apache.spark/spark-sql

  1. @SuppressWarnings("unchecked")
  2. @Test
  3. public void udf4Test() {
  4. spark.udf().register("inc", (Long i) -> i + 1, DataTypes.LongType);
  5. spark.range(10).toDF("x").createOrReplaceTempView("tmp");
  6. // This tests when Java UDFs are required to be the semantically same (See SPARK-9435).
  7. List<Row> results = spark.sql("SELECT inc(x) FROM tmp GROUP BY inc(x)").collectAsList();
  8. Assert.assertEquals(10, results.size());
  9. long sum = 0;
  10. for (Row result : results) {
  11. sum += result.getLong(0);
  12. }
  13. Assert.assertEquals(55, sum);
  14. }

代码示例来源:origin: org.apache.spark/spark-sql_2.11

  1. @SuppressWarnings("unchecked")
  2. @Test
  3. public void udf4Test() {
  4. spark.udf().register("inc", (Long i) -> i + 1, DataTypes.LongType);
  5. spark.range(10).toDF("x").createOrReplaceTempView("tmp");
  6. // This tests when Java UDFs are required to be the semantically same (See SPARK-9435).
  7. List<Row> results = spark.sql("SELECT inc(x) FROM tmp GROUP BY inc(x)").collectAsList();
  8. Assert.assertEquals(10, results.size());
  9. long sum = 0;
  10. for (Row result : results) {
  11. sum += result.getLong(0);
  12. }
  13. Assert.assertEquals(55, sum);
  14. }

代码示例来源:origin: org.apache.spark/spark-sql_2.10

  1. @SuppressWarnings("unchecked")
  2. @Test
  3. public void udf4Test() {
  4. spark.udf().register("inc", (Long i) -> i + 1, DataTypes.LongType);
  5. spark.range(10).toDF("x").createOrReplaceTempView("tmp");
  6. // This tests when Java UDFs are required to be the semantically same (See SPARK-9435).
  7. List<Row> results = spark.sql("SELECT inc(x) FROM tmp GROUP BY inc(x)").collectAsList();
  8. Assert.assertEquals(10, results.size());
  9. long sum = 0;
  10. for (Row result : results) {
  11. sum += result.getLong(0);
  12. }
  13. Assert.assertEquals(55, sum);
  14. }
  15. }

代码示例来源:origin: org.apache.spark/spark-sql_2.11

  1. Assert.assertEquals(intValue, simpleRow.getInt(5));
  2. Assert.assertEquals(intValue, simpleRow.get(5));
  3. Assert.assertEquals(longValue, simpleRow.getLong(6));
  4. Assert.assertEquals(longValue, simpleRow.get(6));
  5. Assert.assertEquals(longValue, simpleRow.getLong(7));
  6. Assert.assertEquals(longValue, simpleRow.get(7));

代码示例来源:origin: org.apache.spark/spark-sql_2.10

  1. Assert.assertEquals(intValue, simpleRow.getInt(5));
  2. Assert.assertEquals(intValue, simpleRow.get(5));
  3. Assert.assertEquals(longValue, simpleRow.getLong(6));
  4. Assert.assertEquals(longValue, simpleRow.get(6));
  5. Assert.assertEquals(longValue, simpleRow.getLong(7));
  6. Assert.assertEquals(longValue, simpleRow.get(7));

相关文章