org.apache.spark.sql.Row.size()方法的使用及代码示例

x33g5p2x  于2022-01-28 转载在 其他  
字(4.5k)|赞(0)|评价(0)|浏览(164)

本文整理了Java中org.apache.spark.sql.Row.size方法的一些代码示例,展示了Row.size的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Row.size方法的具体详情如下:
包路径:org.apache.spark.sql.Row
类名称:Row
方法名:size

Row.size介绍

暂无

代码示例

代码示例来源:origin: apache/kylin

@Override
  public String[] call(Row row) throws Exception {
    String[] result = new String[row.size()];
    for (int i = 0; i < row.size(); i++) {
      final Object o = row.get(i);
      if (o != null) {
        result[i] = o.toString();
      } else {
        result[i] = null;
      }
    }
    return result;
  }
});

代码示例来源:origin: cloudera-labs/envelope

@Override
public int size() {
 return internalRow.size();
}

代码示例来源:origin: org.apache.kylin/kylin-engine-spark

@Override
  public String[] call(Row row) throws Exception {
    String[] result = new String[row.size()];
    for (int i = 0; i < row.size(); i++) {
      final Object o = row.get(i);
      if (o != null) {
        result[i] = o.toString();
      } else {
        result[i] = null;
      }
    }
    return result;
  }
});

代码示例来源:origin: cloudera-labs/envelope

for (int fieldIndex = 0; fieldIndex < mutation.size(); fieldIndex++) {
 values.add(mutation.get(fieldIndex));

代码示例来源:origin: cloudera-labs/envelope

@Override
 public Row call(Tuple2<Iterable<Row>, Iterable<Row>> cogrouped) throws Exception {
  // There should only be one 'into' record per key
  Row intoRow = cogrouped._1().iterator().next();
  Row[] fromRows = Iterables.toArray(cogrouped._2(), Row.class);
  int intoRowNumFields = intoRow.size();
  Object[] nestedValues = new Object[intoRowNumFields + 1];
  for (int i = 0; i < intoRowNumFields; i++) {
   nestedValues[i] = intoRow.get(i);
  }
  nestedValues[intoRowNumFields] = fromRows;
  Row nested = RowFactory.create(nestedValues);
  return nested;
 }
}

代码示例来源:origin: org.datavec/datavec-spark_2.11

/**
 * Convert a list of rows to a matrix
 * @param rows the list of rows to convert
 * @return the converted matrix
 */
public static INDArray toMatrix(List<Row> rows) {
  INDArray ret = Nd4j.create(rows.size(), rows.get(0).size());
  for (int i = 0; i < ret.rows(); i++) {
    for (int j = 0; j < ret.columns(); j++) {
      ret.putScalar(i, j, rows.get(i).getDouble(j));
    }
  }
  return ret;
}

代码示例来源:origin: org.datavec/datavec-spark

/**
 * Convert a list of rows to a matrix
 * @param rows the list of rows to convert
 * @return the converted matrix
 */
public static INDArray toMatrix(List<Row> rows) {
  INDArray ret = Nd4j.create(rows.size(), rows.get(0).size());
  for (int i = 0; i < ret.rows(); i++) {
    for (int j = 0; j < ret.columns(); j++) {
      ret.putScalar(i, j, rows.get(i).getDouble(j));
    }
  }
  return ret;
}

代码示例来源:origin: org.datavec/datavec-spark_2.11

@Override
  public List<Writable> call(Row v1) throws Exception {
    List<Writable> ret = new ArrayList<>();
    if (v1.size() != schema.numColumns())
      throw new IllegalArgumentException("Invalid number of columns for row " + v1.size()
              + " should have matched schema columns " + schema.numColumns());
    for (int i = 0; i < v1.size(); i++) {
      if (v1.get(i) == null)
        throw new IllegalStateException("Row item " + i + " is null");
      switch (schema.getType(i)) {
        case Double:
          ret.add(new DoubleWritable(v1.getDouble(i)));
          break;
        case Float:
          ret.add(new FloatWritable(v1.getFloat(i)));
          break;
        case Integer:
          ret.add(new IntWritable(v1.getInt(i)));
          break;
        case Long:
          ret.add(new LongWritable(v1.getLong(i)));
          break;
        default:
          throw new IllegalStateException("Illegal type");
      }

    }
    return ret;
  }
}

代码示例来源:origin: org.datavec/datavec-spark

@Override
  public List<Writable> call(Row v1) throws Exception {
    List<Writable> ret = new ArrayList<>();
    if (v1.size() != schema.numColumns())
      throw new IllegalArgumentException("Invalid number of columns for row " + v1.size()
              + " should have matched schema columns " + schema.numColumns());
    for (int i = 0; i < v1.size(); i++) {
      if (v1.get(i) == null)
        throw new IllegalStateException("Row item " + i + " is null");
      switch (schema.getType(i)) {
        case Double:
          ret.add(new DoubleWritable(v1.getDouble(i)));
          break;
        case Float:
          ret.add(new FloatWritable(v1.getFloat(i)));
          break;
        case Integer:
          ret.add(new IntWritable(v1.getInt(i)));
          break;
        case Long:
          ret.add(new LongWritable(v1.getLong(i)));
          break;
        default:
          throw new IllegalStateException("Illegal type");
      }

    }
    return ret;
  }
}

代码示例来源:origin: uk.gov.gchq.gaffer/parquet-store

LOGGER.trace("First Row object to be aggregated: {}", v1);
LOGGER.trace("Second Row object to be aggregated: {}", v2);
ArrayList<Object> outputRow = new ArrayList<>(v1.size());
if (isEntity) {
  for (final String col : columnToPaths.get(ParquetStoreConstants.VERTEX)) {

代码示例来源:origin: org.datavec/datavec-spark_2.11

for (int i = 0; i < row.size(); i++) {
  switch (schema.getType(i)) {
    case Double:

代码示例来源:origin: org.datavec/datavec-spark

for (int i = 0; i < row.size(); i++) {
  switch (schema.getType(i)) {
    case Double:

代码示例来源:origin: cloudera-labs/envelope

StructField[] fields = ((StructType) type).fields();
if (fields.length != input.size()) {
 throw new RuntimeException(String.format("Type[%s] - Invalid size of input Row: %s", type, item));

相关文章