org.apache.spark.rdd.RDD.count()方法的使用及代码示例

x33g5p2x  于2022-01-28 转载在 其他  
字(3.4k)|赞(0)|评价(0)|浏览(286)

本文整理了Java中org.apache.spark.rdd.RDD.count方法的一些代码示例,展示了RDD.count的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。RDD.count方法的具体详情如下:
包路径:org.apache.spark.rdd.RDD
类名称:RDD
方法名:count

RDD.count介绍

暂无

代码示例

代码示例来源:origin: ddf-project/DDF

@Test
public void testSampling() throws DDFException {
 DDF ddf2 = manager.sql2ddf("select * from airline", false);
 Assert.assertEquals(25, ddf2.VIEWS.getRandomSample(25).size());
 SparkDDF sampleDDF = (SparkDDF) ddf2.VIEWS.getRandomSample(0.5, false, 1);
 Assert.assertEquals(25, ddf2.VIEWS.getRandomSample(25).size());
 Assert.assertTrue(sampleDDF.getRDD(Object[].class).count() > 10);
}

代码示例来源:origin: Stratio/deep-spark

/**
 * Test filter EQ.
 *
 * @param <W> the type parameter
 */
@Test(alwaysRun = true, dependsOnGroups = { "FunctionalTests" })
protected <W> void testFilterEQ() {
  DeepSparkContext context = getDeepSparkContext();
  try {
    Filter[] filters = null;
    Filter filter = new Filter("id", FilterType.EQ, "TestDataSet");
    filters = new Filter[] { filter };
    ExtractorConfig<W> inputConfigEntity2 = getFilterConfig(filters);
    RDD<W> inputRDDEntity2 = context.createRDD(inputConfigEntity2);
    assertEquals(inputRDDEntity2.count(), 1);
  } finally {
    context.stop();
  }
}

代码示例来源:origin: Stratio/deep-spark

/**
 * Test filter NEQ.
 *
 * @param <W> the type parameter
 */
@Test
protected <W> void testFilterNEQ() {
  DeepSparkContext context = getDeepSparkContext();
  try {
    Filter[] filters = null;
    Filter filter = new Filter("id", FilterType.NEQ, "TestDataSet");
    filters = new Filter[] { filter };
    ExtractorConfig<W> inputConfigEntity = getFilterConfig(filters);
    RDD<W> inputRDDEntity = context.createRDD(inputConfigEntity);
    assertEquals(inputRDDEntity.count(), 0);
  } finally {
    context.stop();
  }
}

代码示例来源:origin: Stratio/deep-spark

/**
 * It tests if the extractor can read from the data store
 *
 * @param <W> the type parameter
 */
@Test(alwaysRun = true, groups = { "FunctionalTests" })
public <W> void testRead() {
  DeepSparkContext context = getDeepSparkContext();
  try {
    ExtractorConfig<W> inputConfigEntity = getReadExtractorConfig(databaseExtractorName, tableRead,
        inputEntity);
    RDD<W> inputRDDEntity = context.createRDD(inputConfigEntity);
    Assert.assertEquals(READ_COUNT_EXPECTED, inputRDDEntity.count());
    if (inputConfigEntity.getEntityClass().isAssignableFrom(Cells.class)) {
      Assert.assertEquals(((Cells) inputRDDEntity.first()).getCellByName("message").getCellValue(),
          READ_FIELD_EXPECTED);
      Assert.assertEquals(((Cells) inputRDDEntity.first()).getCellByName("id").getCellValue(),
          ID_MESSAGE_EXPECTED);
    } else {
      Assert.assertEquals(((MessageTestEntity) inputRDDEntity.first()).getMessage(), READ_FIELD_EXPECTED);
      Assert.assertEquals(((MessageTestEntity) inputRDDEntity.first()).getId(), ID_MESSAGE_EXPECTED);
    }
  } finally {
    context.stop();
  }
}

代码示例来源:origin: Stratio/deep-spark

assertEquals(1, inputRDDEntity.count());
Assert.assertEquals(WORD_COUNT_SPECTED.longValue(), ((Long) outputRDDEntity.cache().count()).longValue());

代码示例来源:origin: Stratio/deep-spark

assertEquals(1, inputRDDEntity.count());
  Assert.assertEquals(((Long) outputRDDEntity.cache().count()).longValue(),
      WORD_COUNT_SPECTED.longValue());
} finally {

代码示例来源:origin: ai.h2o/sparkling-water-ml

training.cache();
if(training.count() == 0 &&
    MissingValuesHandling.Skip == _parms._missing_values_handling) {
  throw new H2OIllegalArgumentException("No rows left in the dataset after filtering out rows with missing values. Ignore columns with many NAs or set missing_values_handling to 'MeanImputation'.");

相关文章