org.apache.spark.sql.Row.getStruct()方法的使用及代码示例

x33g5p2x  于2022-01-28 转载在 其他  
字(4.5k)|赞(0)|评价(0)|浏览(259)

本文整理了Java中org.apache.spark.sql.Row.getStruct方法的一些代码示例,展示了Row.getStruct的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Row.getStruct方法的具体详情如下:
包路径:org.apache.spark.sql.Row
类名称:Row
方法名:getStruct

Row.getStruct介绍

暂无

代码示例

代码示例来源:origin: cloudera-labs/envelope

@Override
public Row getStruct(int arg0) {
 return internalRow.getStruct(arg0);
}

代码示例来源:origin: org.apache.spark/spark-sql

private static void appendValue(WritableColumnVector dst, DataType t, Row src, int fieldIdx) {
 if (t instanceof ArrayType) {
  ArrayType at = (ArrayType)t;
  if (src.isNullAt(fieldIdx)) {
   dst.appendNull();
  } else {
   List<Object> values = src.getList(fieldIdx);
   dst.appendArray(values.size());
   for (Object o : values) {
    appendValue(dst.arrayData(), at.elementType(), o);
   }
  }
 } else if (t instanceof StructType) {
  StructType st = (StructType)t;
  if (src.isNullAt(fieldIdx)) {
   dst.appendStruct(true);
  } else {
   dst.appendStruct(false);
   Row c = src.getStruct(fieldIdx);
   for (int i = 0; i < st.fields().length; i++) {
    appendValue(dst.getChild(i), st.fields()[i].dataType(), c, i);
   }
  }
 } else {
  appendValue(dst, t, src.get(fieldIdx));
 }
}

代码示例来源:origin: org.apache.spark/spark-sql_2.10

private static void appendValue(ColumnVector dst, DataType t, Row src, int fieldIdx) {
 if (t instanceof ArrayType) {
  ArrayType at = (ArrayType)t;
  if (src.isNullAt(fieldIdx)) {
   dst.appendNull();
  } else {
   List<Object> values = src.getList(fieldIdx);
   dst.appendArray(values.size());
   for (Object o : values) {
    appendValue(dst.arrayData(), at.elementType(), o);
   }
  }
 } else if (t instanceof StructType) {
  StructType st = (StructType)t;
  if (src.isNullAt(fieldIdx)) {
   dst.appendStruct(true);
  } else {
   dst.appendStruct(false);
   Row c = src.getStruct(fieldIdx);
   for (int i = 0; i < st.fields().length; i++) {
    appendValue(dst.getChildColumn(i), st.fields()[i].dataType(), c, i);
   }
  }
 } else {
  appendValue(dst, t, src.get(fieldIdx));
 }
}

代码示例来源:origin: org.apache.spark/spark-sql_2.11

private static void appendValue(WritableColumnVector dst, DataType t, Row src, int fieldIdx) {
 if (t instanceof ArrayType) {
  ArrayType at = (ArrayType)t;
  if (src.isNullAt(fieldIdx)) {
   dst.appendNull();
  } else {
   List<Object> values = src.getList(fieldIdx);
   dst.appendArray(values.size());
   for (Object o : values) {
    appendValue(dst.arrayData(), at.elementType(), o);
   }
  }
 } else if (t instanceof StructType) {
  StructType st = (StructType)t;
  if (src.isNullAt(fieldIdx)) {
   dst.appendStruct(true);
  } else {
   dst.appendStruct(false);
   Row c = src.getStruct(fieldIdx);
   for (int i = 0; i < st.fields().length; i++) {
    appendValue(dst.getChild(i), st.fields()[i].dataType(), c, i);
   }
  }
 } else {
  appendValue(dst, t, src.get(fieldIdx));
 }
}

代码示例来源:origin: Netflix/iceberg

record.put(i, convert(field.schema(), row.getStruct(i)));
 break;
case ARRAY:

代码示例来源:origin: io.snappydata/snappy-spark-sql

private static void appendValue(ColumnVector dst, DataType t, Row src, int fieldIdx) {
 if (t instanceof ArrayType) {
  ArrayType at = (ArrayType)t;
  if (src.isNullAt(fieldIdx)) {
   dst.appendNull();
  } else {
   List<Object> values = src.getList(fieldIdx);
   dst.appendArray(values.size());
   for (Object o : values) {
    appendValue(dst.arrayData(), at.elementType(), o);
   }
  }
 } else if (t instanceof StructType) {
  StructType st = (StructType)t;
  if (src.isNullAt(fieldIdx)) {
   dst.appendStruct(true);
  } else {
   dst.appendStruct(false);
   Row c = src.getStruct(fieldIdx);
   for (int i = 0; i < st.fields().length; i++) {
    appendValue(dst.getChildColumn(i), st.fields()[i].dataType(), c, i);
   }
  }
 } else {
  appendValue(dst, t, src.get(fieldIdx));
 }
}

代码示例来源:origin: Netflix/iceberg

Types.StructType st = (Types.StructType) childType;
assertEquals(prefix + "." + fieldName, st,
  expected.getStruct(c, st.fields().size()), actual.getStruct(c));
break;

代码示例来源:origin: org.apache.spark/spark-mllib_2.11

@Test
 public void testSummarizer() {
  dataset.select(col("features"));
  Row result = dataset
   .select(Summarizer.metrics("mean", "max", "count").summary(col("features")))
   .first().getStruct(0);
  Vector meanVec = result.getAs("mean");
  Vector maxVec = result.getAs("max");
  long count = result.getAs("count");

  assertEquals(2L, count);
  assertArrayEquals(new double[]{2.0, 3.0}, meanVec.toArray(), 0.0);
  assertArrayEquals(new double[]{3.0, 4.0}, maxVec.toArray(), 0.0);
 }
}

代码示例来源:origin: org.apache.spark/spark-mllib

@Test
 public void testSummarizer() {
  dataset.select(col("features"));
  Row result = dataset
   .select(Summarizer.metrics("mean", "max", "count").summary(col("features")))
   .first().getStruct(0);
  Vector meanVec = result.getAs("mean");
  Vector maxVec = result.getAs("max");
  long count = result.getAs("count");

  assertEquals(2L, count);
  assertArrayEquals(new double[]{2.0, 3.0}, meanVec.toArray(), 0.0);
  assertArrayEquals(new double[]{3.0, 4.0}, maxVec.toArray(), 0.0);
 }
}

相关文章