本文整理了Java中org.apache.spark.sql.Row.getBoolean
方法的一些代码示例,展示了Row.getBoolean
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Row.getBoolean
方法的具体详情如下:
包路径:org.apache.spark.sql.Row
类名称:Row
方法名:getBoolean
暂无
代码示例来源:origin: apache/phoenix
@Override
public boolean getBoolean(int columnIndex) throws SQLException {
wasNull = getCurrentRow().isNullAt(columnIndex-1);
return wasNull ? false : getCurrentRow().getBoolean(columnIndex-1);
}
代码示例来源:origin: cloudera-labs/envelope
@Override
public boolean getBoolean(int arg0) {
return internalRow.getBoolean(arg0);
}
代码示例来源:origin: cloudera-labs/envelope
private boolean evaluateStepByValueDecision(Set<Step> steps) {
Optional<Step> optionalStep = StepUtils.getStepForName(stepByValueStepName, steps);
if (!optionalStep.isPresent()) {
throw new RuntimeException("Unknown decision step's value step: " + stepByValueStepName);
}
if (!(optionalStep.get() instanceof DataStep)) {
throw new RuntimeException("Decision step's value step is not a data step: " + optionalStep.get().getName());
}
Dataset<Row> valueDataset = ((DataStep)optionalStep.get()).getData();
if (valueDataset.schema().fields().length != 1 ||
valueDataset.schema().fields()[0].dataType() != DataTypes.BooleanType ||
valueDataset.count() != 1)
{
throw new RuntimeException("Decision step's value step must contain a single boolean column with a single row");
}
boolean decision = valueDataset.collectAsList().get(0).getBoolean(0);
return decision;
}
代码示例来源:origin: org.apache.spark/spark-sql_2.10
Assert.assertEquals(doubleValue, simpleRow.get(11));
Assert.assertEquals(decimalValue, simpleRow.get(12));
Assert.assertEquals(booleanValue, simpleRow.getBoolean(13));
Assert.assertEquals(booleanValue, simpleRow.get(13));
Assert.assertEquals(booleanValue, simpleRow.getBoolean(14));
Assert.assertEquals(booleanValue, simpleRow.get(14));
Assert.assertEquals(stringValue, simpleRow.getString(15));
代码示例来源:origin: org.apache.spark/spark-sql_2.11
Assert.assertEquals(doubleValue, simpleRow.get(11));
Assert.assertEquals(decimalValue, simpleRow.get(12));
Assert.assertEquals(booleanValue, simpleRow.getBoolean(13));
Assert.assertEquals(booleanValue, simpleRow.get(13));
Assert.assertEquals(booleanValue, simpleRow.getBoolean(14));
Assert.assertEquals(booleanValue, simpleRow.get(14));
Assert.assertEquals(stringValue, simpleRow.getString(15));
代码示例来源:origin: org.apache.spark/spark-sql
Assert.assertEquals(doubleValue, simpleRow.get(11));
Assert.assertEquals(decimalValue, simpleRow.get(12));
Assert.assertEquals(booleanValue, simpleRow.getBoolean(13));
Assert.assertEquals(booleanValue, simpleRow.get(13));
Assert.assertEquals(booleanValue, simpleRow.getBoolean(14));
Assert.assertEquals(booleanValue, simpleRow.get(14));
Assert.assertEquals(stringValue, simpleRow.getString(15));
代码示例来源:origin: cloudera-labs/envelope
boolean decision = decisionDataset.collectAsList().get(0).getBoolean(1);
代码示例来源:origin: cerner/bunsen
/**
* Returns the latest versions of a given set of value sets.
*
* @param uris a set of URIs for which to retrieve the latest versions, or null to load them all
* @param includeExperimental whether to include value sets marked as experimental
* @return a map of value set URIs to the latest versions for them.
*/
public Map<String,String> getLatestVersions(final Set<String> uris, boolean includeExperimental) {
// Reduce by the concept map URI to return only the latest version
// per concept map. Spark's provided max aggregation function
// only works on numeric types, so we jump into RDDs and perform
// the reduce by hand.
JavaRDD<UrlAndVersion> members = this.valueSets.select("url", "version", "experimental")
.toJavaRDD()
.filter(row -> (uris == null || uris.contains(row.getString(0)))
&& (includeExperimental || row.isNullAt(2) || !row.getBoolean(2)))
.mapToPair(row -> new Tuple2<>(row.getString(0), row.getString(1)))
.reduceByKey((leftVersion, rightVersion) ->
leftVersion.compareTo(rightVersion) > 0 ? leftVersion : rightVersion)
.map(tuple -> new UrlAndVersion(tuple._1, tuple._2));
return spark.createDataset(members.rdd(), URL_AND_VERSION_ENCODER)
.collectAsList()
.stream()
.collect(Collectors.toMap(UrlAndVersion::getUrl,
UrlAndVersion::getVersion));
}
代码示例来源:origin: com.cerner.bunsen/bunsen-core
/**
* Returns the latest versions of a given set of value sets.
*
* @param uris a set of URIs for which to retrieve the latest versions, or null to load them all
* @param includeExperimental whether to include value sets marked as experimental
* @return a map of value set URIs to the latest versions for them.
*/
public Map<String,String> getLatestVersions(final Set<String> uris, boolean includeExperimental) {
// Reduce by the concept map URI to return only the latest version
// per concept map. Spark's provided max aggregation function
// only works on numeric types, so we jump into RDDs and perform
// the reduce by hand.
JavaRDD<UrlAndVersion> members = this.valueSets.select("url", "version", "experimental")
.toJavaRDD()
.filter(row -> (uris == null || uris.contains(row.getString(0)))
&& (includeExperimental || row.isNullAt(2) || !row.getBoolean(2)))
.mapToPair(row -> new Tuple2<>(row.getString(0), row.getString(1)))
.reduceByKey((leftVersion, rightVersion) ->
leftVersion.compareTo(rightVersion) > 0 ? leftVersion : rightVersion)
.map(tuple -> new UrlAndVersion(tuple._1, tuple._2));
return spark.createDataset(members.rdd(), URL_AND_VERSION_ENCODER)
.collectAsList()
.stream()
.collect(Collectors.toMap(UrlAndVersion::getUrl,
UrlAndVersion::getVersion));
}
代码示例来源:origin: com.cerner.bunsen/bunsen-core
/**
* Returns the latest versions of a given set of concept maps.
*
* @param urls a set of URLs to retrieve the latest version for, or null to load them all.
* @param includeExperimental flag to include concept maps marked as experimental
*
* @return a map of concept map URLs to the latest version for them.
*/
public Map<String,String> getLatestVersions(final Set<String> urls,
boolean includeExperimental) {
// Reduce by the concept map URI to return only the latest version
// per concept map. Spark's provided max aggregation function
// only works on numeric types, so we jump into RDDs and perform
// the reduce by hand.
JavaRDD<UrlAndVersion> changes = this.conceptMaps.select(col("url"),
col("version"),
col("experimental"))
.toJavaRDD()
.filter(row -> (urls == null || urls.contains(row.getString(0)))
&& (includeExperimental || row.isNullAt(2) || !row.getBoolean(2)))
.mapToPair(row -> new Tuple2<>(row.getString(0), row.getString(1)))
.reduceByKey((leftVersion, rightVersion) ->
leftVersion.compareTo(rightVersion) > 0 ? leftVersion : rightVersion)
.map(tuple -> new UrlAndVersion(tuple._1, tuple._2));
return this.spark.createDataset(changes.rdd(), URL_AND_VERSION_ENCODER)
.collectAsList()
.stream()
.collect(Collectors.toMap(UrlAndVersion::getUrl,
UrlAndVersion::getVersion));
}
代码示例来源:origin: cerner/bunsen
/**
* Returns the latest versions of a given set of concept maps.
*
* @param urls a set of URLs to retrieve the latest version for, or null to load them all.
* @param includeExperimental flag to include concept maps marked as experimental
*
* @return a map of concept map URLs to the latest version for them.
*/
public Map<String,String> getLatestVersions(final Set<String> urls,
boolean includeExperimental) {
// Reduce by the concept map URI to return only the latest version
// per concept map. Spark's provided max aggregation function
// only works on numeric types, so we jump into RDDs and perform
// the reduce by hand.
JavaRDD<UrlAndVersion> changes = this.conceptMaps.select(col("url"),
col("version"),
col("experimental"))
.toJavaRDD()
.filter(row -> (urls == null || urls.contains(row.getString(0)))
&& (includeExperimental || row.isNullAt(2) || !row.getBoolean(2)))
.mapToPair(row -> new Tuple2<>(row.getString(0), row.getString(1)))
.reduceByKey((leftVersion, rightVersion) ->
leftVersion.compareTo(rightVersion) > 0 ? leftVersion : rightVersion)
.map(tuple -> new UrlAndVersion(tuple._1, tuple._2));
return this.spark.createDataset(changes.rdd(), URL_AND_VERSION_ENCODER)
.collectAsList()
.stream()
.collect(Collectors.toMap(UrlAndVersion::getUrl,
UrlAndVersion::getVersion));
}
代码示例来源:origin: cloudera-labs/envelope
break;
case BOOL:
kuduRow.addBoolean(fieldName, plan.getBoolean(fieldIndex));
break;
case BINARY:
代码示例来源:origin: Netflix/iceberg
return row.getBoolean(ord);
case INTEGER:
return row.getInt(ord);
内容来源于网络,如有侵权,请联系作者删除!