本文整理了Java中org.apache.spark.sql.Row.getStruct
方法的一些代码示例,展示了Row.getStruct
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Row.getStruct
方法的具体详情如下:
包路径:org.apache.spark.sql.Row
类名称:Row
方法名:getStruct
暂无
代码示例来源:origin: cloudera-labs/envelope
@Override
public Row getStruct(int arg0) {
return internalRow.getStruct(arg0);
}
代码示例来源:origin: org.apache.spark/spark-sql
private static void appendValue(WritableColumnVector dst, DataType t, Row src, int fieldIdx) {
if (t instanceof ArrayType) {
ArrayType at = (ArrayType)t;
if (src.isNullAt(fieldIdx)) {
dst.appendNull();
} else {
List<Object> values = src.getList(fieldIdx);
dst.appendArray(values.size());
for (Object o : values) {
appendValue(dst.arrayData(), at.elementType(), o);
}
}
} else if (t instanceof StructType) {
StructType st = (StructType)t;
if (src.isNullAt(fieldIdx)) {
dst.appendStruct(true);
} else {
dst.appendStruct(false);
Row c = src.getStruct(fieldIdx);
for (int i = 0; i < st.fields().length; i++) {
appendValue(dst.getChild(i), st.fields()[i].dataType(), c, i);
}
}
} else {
appendValue(dst, t, src.get(fieldIdx));
}
}
代码示例来源:origin: org.apache.spark/spark-sql_2.10
private static void appendValue(ColumnVector dst, DataType t, Row src, int fieldIdx) {
if (t instanceof ArrayType) {
ArrayType at = (ArrayType)t;
if (src.isNullAt(fieldIdx)) {
dst.appendNull();
} else {
List<Object> values = src.getList(fieldIdx);
dst.appendArray(values.size());
for (Object o : values) {
appendValue(dst.arrayData(), at.elementType(), o);
}
}
} else if (t instanceof StructType) {
StructType st = (StructType)t;
if (src.isNullAt(fieldIdx)) {
dst.appendStruct(true);
} else {
dst.appendStruct(false);
Row c = src.getStruct(fieldIdx);
for (int i = 0; i < st.fields().length; i++) {
appendValue(dst.getChildColumn(i), st.fields()[i].dataType(), c, i);
}
}
} else {
appendValue(dst, t, src.get(fieldIdx));
}
}
代码示例来源:origin: org.apache.spark/spark-sql_2.11
private static void appendValue(WritableColumnVector dst, DataType t, Row src, int fieldIdx) {
if (t instanceof ArrayType) {
ArrayType at = (ArrayType)t;
if (src.isNullAt(fieldIdx)) {
dst.appendNull();
} else {
List<Object> values = src.getList(fieldIdx);
dst.appendArray(values.size());
for (Object o : values) {
appendValue(dst.arrayData(), at.elementType(), o);
}
}
} else if (t instanceof StructType) {
StructType st = (StructType)t;
if (src.isNullAt(fieldIdx)) {
dst.appendStruct(true);
} else {
dst.appendStruct(false);
Row c = src.getStruct(fieldIdx);
for (int i = 0; i < st.fields().length; i++) {
appendValue(dst.getChild(i), st.fields()[i].dataType(), c, i);
}
}
} else {
appendValue(dst, t, src.get(fieldIdx));
}
}
代码示例来源:origin: Netflix/iceberg
record.put(i, convert(field.schema(), row.getStruct(i)));
break;
case ARRAY:
代码示例来源:origin: io.snappydata/snappy-spark-sql
private static void appendValue(ColumnVector dst, DataType t, Row src, int fieldIdx) {
if (t instanceof ArrayType) {
ArrayType at = (ArrayType)t;
if (src.isNullAt(fieldIdx)) {
dst.appendNull();
} else {
List<Object> values = src.getList(fieldIdx);
dst.appendArray(values.size());
for (Object o : values) {
appendValue(dst.arrayData(), at.elementType(), o);
}
}
} else if (t instanceof StructType) {
StructType st = (StructType)t;
if (src.isNullAt(fieldIdx)) {
dst.appendStruct(true);
} else {
dst.appendStruct(false);
Row c = src.getStruct(fieldIdx);
for (int i = 0; i < st.fields().length; i++) {
appendValue(dst.getChildColumn(i), st.fields()[i].dataType(), c, i);
}
}
} else {
appendValue(dst, t, src.get(fieldIdx));
}
}
代码示例来源:origin: Netflix/iceberg
Types.StructType st = (Types.StructType) childType;
assertEquals(prefix + "." + fieldName, st,
expected.getStruct(c, st.fields().size()), actual.getStruct(c));
break;
代码示例来源:origin: org.apache.spark/spark-mllib_2.11
@Test
public void testSummarizer() {
dataset.select(col("features"));
Row result = dataset
.select(Summarizer.metrics("mean", "max", "count").summary(col("features")))
.first().getStruct(0);
Vector meanVec = result.getAs("mean");
Vector maxVec = result.getAs("max");
long count = result.getAs("count");
assertEquals(2L, count);
assertArrayEquals(new double[]{2.0, 3.0}, meanVec.toArray(), 0.0);
assertArrayEquals(new double[]{3.0, 4.0}, maxVec.toArray(), 0.0);
}
}
代码示例来源:origin: org.apache.spark/spark-mllib
@Test
public void testSummarizer() {
dataset.select(col("features"));
Row result = dataset
.select(Summarizer.metrics("mean", "max", "count").summary(col("features")))
.first().getStruct(0);
Vector meanVec = result.getAs("mean");
Vector maxVec = result.getAs("max");
long count = result.getAs("count");
assertEquals(2L, count);
assertArrayEquals(new double[]{2.0, 3.0}, meanVec.toArray(), 0.0);
assertArrayEquals(new double[]{3.0, 4.0}, maxVec.toArray(), 0.0);
}
}
内容来源于网络,如有侵权,请联系作者删除!