org.apache.spark.sql.Row.getAs()方法的使用及代码示例

x33g5p2x  于2022-01-28 转载在 其他  
字(6.8k)|赞(0)|评价(0)|浏览(616)

本文整理了Java中org.apache.spark.sql.Row.getAs方法的一些代码示例,展示了Row.getAs的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Row.getAs方法的具体详情如下:
包路径:org.apache.spark.sql.Row
类名称:Row
方法名:getAs

Row.getAs介绍

暂无

代码示例

代码示例来源:origin: cloudera-labs/envelope

@Override
public Row reduce(Row a, Row b) {
 return new RowWithSchema(SCHEMA, name,
   a.<Boolean>getAs("result") && b.<Boolean>getAs("result"));
}

代码示例来源:origin: uk.gov.gchq.gaffer/parquet-store

private boolean isVisible(final Row e, final String visibility, final Authorisations auths) throws VisibilityParseException {
    if (e.getAs(visibility) != null) {
      final VisibilityEvaluator visibilityEvaluator = new VisibilityEvaluator(auths);
      final ElementVisibility elementVisibility = new ElementVisibility((String) e.getAs(visibility));
      return visibilityEvaluator.evaluate(elementVisibility);
    } else {
      return true;
    }
  }
}

代码示例来源:origin: cloudera-labs/envelope

@Override
public Row getPrecedingTime(Row row) {
 Date date;
 try {
  date = format.parse(row.<String>getAs(field.name()));
 } catch (ParseException e) {
  throw new RuntimeException(e);
 }
 String precedingDateString = format.format(new Date(date.getTime() - 1));
 
 return new RowWithSchema(getSchema(), precedingDateString);
}

代码示例来源:origin: psal/jstylo

@Override
  public LabeledPoint call(Row arg0) throws Exception {
    return new LabeledPoint(arg0.getDouble(1),arg0.getAs(0));
  }
}

代码示例来源:origin: cloudera-labs/envelope

@Override
public int compare(Row first, Row second) {
 Timestamp ts1 = first.getAs(field.name());
 Timestamp ts2 = second.getAs(field.name());
 
 if (ts1.before(ts2)) {
  return -1;
 } else if (ts1.after(ts2)) {
  return 1;
 } else {
  return 0;
 }
}

代码示例来源:origin: edu.usc.ir/age-predictor-cli

public void call(Row event) {
    SparseVector sp = (SparseVector) event.getAs("normFeature");
    
    double prediction = linModel.predict(Vectors.sparse(sp.size(), sp.indices(), sp.values()));
    System.out.println((String) event.getAs("document"));
    System.out.println("Prediction: " + prediction);
  }
});

代码示例来源:origin: cloudera-labs/envelope

@Override
public Row getPrecedingTime(Row row) {
 Date date;
 try {
  date = format.parse(row.<String>getAs(field.name()));
 } catch (ParseException e) {
  throw new RuntimeException(e);
 }
 String precedingDateString = format.format(precedingDate(date));
 
 return new RowWithSchema(getSchema(), precedingDateString);
}

代码示例来源:origin: org.apache.spark/spark-mllib_2.11

public void validatePrediction(Dataset<Row> predictionAndLabels) {
 for (Row r : predictionAndLabels.collectAsList()) {
  double prediction = r.getAs(0);
  double label = r.getAs(1);
  assertEquals(label, prediction, 1E-5);
 }
}

代码示例来源:origin: org.apache.spark/spark-mllib_2.10

public void validatePrediction(Dataset<Row> predictionAndLabels) {
 for (Row r : predictionAndLabels.collectAsList()) {
  double prediction = r.getAs(0);
  double label = r.getAs(1);
  assertEquals(label, prediction, 1E-5);
 }
}

代码示例来源:origin: org.apache.spark/spark-mllib

public void validatePrediction(Dataset<Row> predictionAndLabels) {
 for (Row r : predictionAndLabels.collectAsList()) {
  double prediction = r.getAs(0);
  double label = r.getAs(1);
  assertEquals(label, prediction, 1E-5);
 }
}

代码示例来源:origin: org.apache.spark/spark-sql_2.11

Seq<Integer> result = first.getAs(1);
Assert.assertEquals(bean.getB().length, result.length());
for (int i = 0; i < result.length(); i++) {
 bean.getC().get("hello"),
 Ints.toArray(JavaConverters.seqAsJavaListConverter(outputBuffer).asJava()));
Seq<String> d = first.getAs(3);
Assert.assertEquals(bean.getD().size(), d.length());
for (int i = 0; i < d.length(); i++) {

代码示例来源:origin: cloudera-labs/envelope

@Override
public Row getPrecedingTime(Row row) {
 return new RowWithSchema(getSchema(), row.<Long>getAs(field.name()) - 1);
}

代码示例来源:origin: org.apache.spark/spark-sql_2.10

Seq<Integer> result = first.getAs(1);
Assert.assertEquals(bean.getB().length, result.length());
for (int i = 0; i < result.length(); i++) {
 bean.getC().get("hello"),
 Ints.toArray(JavaConverters.seqAsJavaListConverter(outputBuffer).asJava()));
Seq<String> d = first.getAs(3);
Assert.assertEquals(bean.getD().size(), d.length());
for (int i = 0; i < d.length(); i++) {

代码示例来源:origin: org.apache.spark/spark-sql

Seq<Integer> result = first.getAs(1);
Assert.assertEquals(bean.getB().length, result.length());
for (int i = 0; i < result.length(); i++) {
 bean.getC().get("hello"),
 Ints.toArray(JavaConverters.seqAsJavaListConverter(outputBuffer).asJava()));
Seq<String> d = first.getAs(3);
Assert.assertEquals(bean.getD().size(), d.length());
for (int i = 0; i < d.length(); i++) {

代码示例来源:origin: cloudera-labs/envelope

@Override
public Row getPrecedingTime(Row row) {
 Timestamp time = row.<Timestamp>getAs(field.name());
 
 return new RowWithSchema(getSchema(), getPrecedingTimestamp(time));
}

代码示例来源:origin: cloudera-labs/envelope

@Test
public void testCopyTime() {
 Row row = new RowWithSchema(schemaWithTMs, 1000L, null);
 Row copied = PlannerUtils.copyTime(row, firstTM, row, secondTM);
 
 assertEquals(copied.<Long>getAs("second"), (Long)1000L);
}

代码示例来源:origin: org.apache.spark/spark-mllib

@Test
 public void testSummarizer() {
  dataset.select(col("features"));
  Row result = dataset
   .select(Summarizer.metrics("mean", "max", "count").summary(col("features")))
   .first().getStruct(0);
  Vector meanVec = result.getAs("mean");
  Vector maxVec = result.getAs("max");
  long count = result.getAs("count");

  assertEquals(2L, count);
  assertArrayEquals(new double[]{2.0, 3.0}, meanVec.toArray(), 0.0);
  assertArrayEquals(new double[]{3.0, 4.0}, maxVec.toArray(), 0.0);
 }
}

代码示例来源:origin: org.apache.spark/spark-mllib_2.11

@Test
 public void verifyLibSVMDF() {
  Dataset<Row> dataset = spark.read().format("libsvm").option("vectorType", "dense")
   .load(path);
  Assert.assertEquals("label", dataset.columns()[0]);
  Assert.assertEquals("features", dataset.columns()[1]);
  Row r = dataset.first();
  Assert.assertEquals(1.0, r.getDouble(0), 1e-15);
  DenseVector v = r.getAs(1);
  Assert.assertEquals(Vectors.dense(1.0, 0.0, 2.0, 0.0, 3.0, 0.0), v);
 }
}

代码示例来源:origin: org.apache.spark/spark-mllib_2.10

@Test
 public void verifyLibSVMDF() {
  Dataset<Row> dataset = spark.read().format("libsvm").option("vectorType", "dense")
   .load(path);
  Assert.assertEquals("label", dataset.columns()[0]);
  Assert.assertEquals("features", dataset.columns()[1]);
  Row r = dataset.first();
  Assert.assertEquals(1.0, r.getDouble(0), 1e-15);
  DenseVector v = r.getAs(1);
  Assert.assertEquals(Vectors.dense(1.0, 0.0, 2.0, 0.0, 3.0, 0.0), v);
 }
}

代码示例来源:origin: org.apache.spark/spark-mllib

@Test
 public void verifyLibSVMDF() {
  Dataset<Row> dataset = spark.read().format("libsvm").option("vectorType", "dense")
   .load(path);
  Assert.assertEquals("label", dataset.columns()[0]);
  Assert.assertEquals("features", dataset.columns()[1]);
  Row r = dataset.first();
  Assert.assertEquals(1.0, r.getDouble(0), 1e-15);
  DenseVector v = r.getAs(1);
  Assert.assertEquals(Vectors.dense(1.0, 0.0, 2.0, 0.0, 3.0, 0.0), v);
 }
}

相关文章