org.apache.spark.sql.Row.getString()方法的使用及代码示例

x33g5p2x  于2022-01-28 转载在 其他  
字(8.2k)|赞(0)|评价(0)|浏览(396)

本文整理了Java中org.apache.spark.sql.Row.getString方法的一些代码示例,展示了Row.getString的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Row.getString方法的具体详情如下:
包路径:org.apache.spark.sql.Row
类名称:Row
方法名:getString

Row.getString介绍

暂无

代码示例

代码示例来源:origin: databricks/learning-spark

public String call(Row row) {
   return row.getString(0);
  }});
System.out.println(topTweetText.collect());

代码示例来源:origin: mahmoudparsian/data-algorithms-book

@Override
 public String call(Row row) {
  return "Name: " + row.getString(0) + ", City: " + row.getString(1);
 }
}).collect();

代码示例来源:origin: mahmoudparsian/data-algorithms-book

@Override
 public String call(Row row) {
  return "Name: " + row.getString(0);
 }
}).collect();

代码示例来源:origin: mahmoudparsian/data-algorithms-book

@Override
 public String call(Row row) {
   return "Name: " + row.getString(0);
 }
}).collect();

代码示例来源:origin: mahmoudparsian/data-algorithms-book

@Override
 public String call(Row row) { return "Name: " + row.getString(0); }
}).collect();

代码示例来源:origin: apache/phoenix

@Override
public String getString(int columnIndex) throws SQLException {
  wasNull = getCurrentRow().isNullAt(columnIndex-1);
  return wasNull ? null : getCurrentRow().getString(columnIndex-1);
}

代码示例来源:origin: apache/phoenix

@Override
public URL getURL(int columnIndex) throws SQLException {
  try {
    return new URL(getCurrentRow().getString(columnIndex-1));
  } catch (MalformedURLException e) {
    throw new SQLExceptionInfo.Builder(SQLExceptionCode.MALFORMED_URL).setRootCause(e)
        .build().buildException();
  }
}

代码示例来源:origin: org.apache.spark/spark-sql_2.11

@Test
 public void testUDF() {
  UserDefinedFunction foo = udf((Integer i, String s) -> i.toString() + s, DataTypes.StringType);
  Dataset<Row> df = spark.table("testData").select(foo.apply(col("key"), col("value")));
  String[] result = df.collectAsList().stream().map(row -> row.getString(0))
   .toArray(String[]::new);
  String[] expected = spark.table("testData").collectAsList().stream()
   .map(row -> row.get(0).toString() + row.getString(1)).toArray(String[]::new);
  Assert.assertArrayEquals(expected, result);
 }
}

代码示例来源:origin: org.apache.spark/spark-sql

@Test
 public void testUDF() {
  UserDefinedFunction foo = udf((Integer i, String s) -> i.toString() + s, DataTypes.StringType);
  Dataset<Row> df = spark.table("testData").select(foo.apply(col("key"), col("value")));
  String[] result = df.collectAsList().stream().map(row -> row.getString(0))
   .toArray(String[]::new);
  String[] expected = spark.table("testData").collectAsList().stream()
   .map(row -> row.get(0).toString() + row.getString(1)).toArray(String[]::new);
  Assert.assertArrayEquals(expected, result);
 }
}

代码示例来源:origin: org.datavec/datavec-spark_2.11

@Override
  public String call(Row row) throws Exception {
    return row.getString(0);
  }
});

代码示例来源:origin: org.datavec/datavec-spark

@Override
  public String call(Row row) throws Exception {
    return row.getString(0);
  }
});

代码示例来源:origin: phuonglh/vn.vitk

@Override
  public String call(Row row) throws Exception {
    return row.getString(columnIndex);
  }
}

代码示例来源:origin: mahmoudparsian/data-algorithms-book

.map((Row row) -> "Name: " + row.getString(0)).collect();
 sqlContext.sql("SELECT name FROM parquetFile WHERE age >= 13 AND age <= 19");
teenagerNames = teenagers2.toJavaRDD()
    .map((Row row) -> "Name: " + row.getString(0)).collect();
    .map((Row row) -> "Name: " + row.getString(0)).collect();
    .map((Row row) -> "Name: " + row.getString(0) + ", City: " + row.getString(1)).collect();

代码示例来源:origin: org.apache.spark/spark-sql

@Test
public void dataFrameRDDOperations() {
 List<Person> personList = new ArrayList<>(2);
 Person person1 = new Person();
 person1.setName("Michael");
 person1.setAge(29);
 personList.add(person1);
 Person person2 = new Person();
 person2.setName("Yin");
 person2.setAge(28);
 personList.add(person2);
 JavaRDD<Row> rowRDD = jsc.parallelize(personList).map(
   person -> RowFactory.create(person.getName(), person.getAge()));
 List<StructField> fields = new ArrayList<>(2);
 fields.add(DataTypes.createStructField("", DataTypes.StringType, false));
 fields.add(DataTypes.createStructField("age", DataTypes.IntegerType, false));
 StructType schema = DataTypes.createStructType(fields);
 Dataset<Row> df = spark.createDataFrame(rowRDD, schema);
 df.createOrReplaceTempView("people");
 List<String> actual = spark.sql("SELECT * FROM people").toJavaRDD()
  .map(row -> row.getString(0) + "_" + row.get(1)).collect();
 List<String> expected = new ArrayList<>(2);
 expected.add("Michael_29");
 expected.add("Yin_28");
 Assert.assertEquals(expected, actual);
}

代码示例来源:origin: org.apache.spark/spark-sql_2.10

@Test
public void dataFrameRDDOperations() {
 List<Person> personList = new ArrayList<>(2);
 Person person1 = new Person();
 person1.setName("Michael");
 person1.setAge(29);
 personList.add(person1);
 Person person2 = new Person();
 person2.setName("Yin");
 person2.setAge(28);
 personList.add(person2);
 JavaRDD<Row> rowRDD = jsc.parallelize(personList).map(
   person -> RowFactory.create(person.getName(), person.getAge()));
 List<StructField> fields = new ArrayList<>(2);
 fields.add(DataTypes.createStructField("", DataTypes.StringType, false));
 fields.add(DataTypes.createStructField("age", DataTypes.IntegerType, false));
 StructType schema = DataTypes.createStructType(fields);
 Dataset<Row> df = spark.createDataFrame(rowRDD, schema);
 df.createOrReplaceTempView("people");
 List<String> actual = spark.sql("SELECT * FROM people").toJavaRDD()
  .map(row -> row.getString(0) + "_" + row.get(1)).collect();
 List<String> expected = new ArrayList<>(2);
 expected.add("Michael_29");
 expected.add("Yin_28");
 Assert.assertEquals(expected, actual);
}

代码示例来源:origin: org.apache.spark/spark-sql_2.11

@Test
public void dataFrameRDDOperations() {
 List<Person> personList = new ArrayList<>(2);
 Person person1 = new Person();
 person1.setName("Michael");
 person1.setAge(29);
 personList.add(person1);
 Person person2 = new Person();
 person2.setName("Yin");
 person2.setAge(28);
 personList.add(person2);
 JavaRDD<Row> rowRDD = jsc.parallelize(personList).map(
   person -> RowFactory.create(person.getName(), person.getAge()));
 List<StructField> fields = new ArrayList<>(2);
 fields.add(DataTypes.createStructField("", DataTypes.StringType, false));
 fields.add(DataTypes.createStructField("age", DataTypes.IntegerType, false));
 StructType schema = DataTypes.createStructType(fields);
 Dataset<Row> df = spark.createDataFrame(rowRDD, schema);
 df.createOrReplaceTempView("people");
 List<String> actual = spark.sql("SELECT * FROM people").toJavaRDD()
  .map(row -> row.getString(0) + "_" + row.get(1)).collect();
 List<String> expected = new ArrayList<>(2);
 expected.add("Michael_29");
 expected.add("Yin_28");
 Assert.assertEquals(expected, actual);
}

代码示例来源:origin: Erik-ly/SprakProject

public Iterable<Tuple2<Long, Long>> call(Tuple2<String, Row> tuple) 
    throws Exception {
  
  Row row = tuple._2;
  String orderCategoryIds = row.getString(8);
  String[] orderCategoryIdsSplited = orderCategoryIds.split(",");
  
  List<Tuple2<Long, Long>> list = new ArrayList<Tuple2<Long, Long>>();
  for(String orderCategoryId : orderCategoryIdsSplited) {
    list.add(new Tuple2<Long, Long>(Long.valueOf(orderCategoryId), 1L));
  }
  
  return list;
}

代码示例来源:origin: org.apache.spark/spark-sql

Assert.assertEquals(booleanValue, simpleRow.getBoolean(14));
Assert.assertEquals(booleanValue, simpleRow.get(14));
Assert.assertEquals(stringValue, simpleRow.getString(15));
Assert.assertEquals(stringValue, simpleRow.get(15));
Assert.assertEquals(binaryValue, simpleRow.get(16));

代码示例来源:origin: org.apache.spark/spark-sql_2.11

Assert.assertEquals(booleanValue, simpleRow.getBoolean(14));
Assert.assertEquals(booleanValue, simpleRow.get(14));
Assert.assertEquals(stringValue, simpleRow.getString(15));
Assert.assertEquals(stringValue, simpleRow.get(15));
Assert.assertEquals(binaryValue, simpleRow.get(16));

代码示例来源:origin: org.apache.spark/spark-sql_2.10

Assert.assertEquals(booleanValue, simpleRow.getBoolean(14));
Assert.assertEquals(booleanValue, simpleRow.get(14));
Assert.assertEquals(stringValue, simpleRow.getString(15));
Assert.assertEquals(stringValue, simpleRow.get(15));
Assert.assertEquals(binaryValue, simpleRow.get(16));

相关文章