org.apache.hadoop.mapred.RecordReader.getProgress()方法的使用及代码示例

x33g5p2x  于2022-01-28 转载在 其他  
字(4.4k)|赞(0)|评价(0)|浏览(110)

本文整理了Java中org.apache.hadoop.mapred.RecordReader.getProgress方法的一些代码示例,展示了RecordReader.getProgress的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。RecordReader.getProgress方法的具体详情如下:
包路径:org.apache.hadoop.mapred.RecordReader
类名称:RecordReader
方法名:getProgress

RecordReader.getProgress介绍

[英]How much of the input has the RecordReader consumed i.e. has been processed by?
[中]RecordReader消耗了多少输入,即被处理了多少输入?

代码示例

代码示例来源:origin: prestodb/presto

@Override
  public float getProgress()
      throws IOException
  {
    return delegate.getProgress();
  }
}

代码示例来源:origin: apache/hive

@Override
 public float getProgress() throws IOException {
  return rr.getProgress();
 }
};

代码示例来源:origin: prestodb/presto

private void updateCompletedBytes()
{
  try {
    long newCompletedBytes = (long) (totalBytes * recordReader.getProgress());
    completedBytes = min(totalBytes, max(completedBytes, newCompletedBytes));
  }
  catch (IOException ignored) {
  }
}

代码示例来源:origin: apache/hive

@Override
public float getProgress() {
 try {
  return baseRecordReader.getProgress();
 } catch (IOException e) {
  LOG.warn("Exception in HCatRecord reader", e);
 }
 return 0.0f; // errored
}

代码示例来源:origin: apache/hive

@Override
public float getProgress() throws IOException {
 if (isSorted) {
  return super.getProgress();
 }
 return recordReader.getProgress();
}

代码示例来源:origin: apache/hive

@Override
public float getProgress() throws IOException {
 if (isSorted) {
  return super.getProgress();
 }
 return recordReader.getProgress();
}

代码示例来源:origin: apache/drill

@Override
public float getProgress() throws IOException {
 if (isSorted) {
  return super.getProgress();
 }
 return recordReader.getProgress();
}

代码示例来源:origin: apache/drill

@Override
public float getProgress() throws IOException {
 if (isSorted) {
  return super.getProgress();
 }
 return recordReader.getProgress();
}

代码示例来源:origin: apache/hive

@Override
public float getProgress() throws IOException {
 if (this.getIOContext().isBinarySearching()) {
  return 0;
 } else {
  return recordReader.getProgress();
 }
}

代码示例来源:origin: apache/drill

@Override
public float getProgress() throws IOException {
 if (this.getIOContext().isBinarySearching()) {
  return 0;
 } else {
  return recordReader.getProgress();
 }
}

代码示例来源:origin: apache/hive

value.selectedInUse = vectorizedRowBatchBase.selectedInUse;
 copyFromBase(value);
 progress = baseReader.getProgress();
 return true;
 value.cols[ix] = recordIdColumnVector;
progress = baseReader.getProgress();
return true;

代码示例来源:origin: apache/hive

assertEquals(1.0, reader.getProgress(), 0.00001);
reader.close();

代码示例来源:origin: cwensel/cascading

public float getProgress() throws IOException
 {
 return delegate.getProgress();
 }
}

代码示例来源:origin: uber/hudi

@Override
 public float getProgress() throws IOException {
  return parquetReader.getProgress();
 }
}

代码示例来源:origin: uber/hudi

@Override
 public float getProgress() throws IOException {
  return this.reader.getProgress();
 }
}

代码示例来源:origin: uber/hudi

@Override
 public float getProgress() throws IOException {
  return parquetReader.getProgress();
 }
}

代码示例来源:origin: io.hops/hadoop-mapreduce-client-core

/**
  * Report progress as the minimum of all child RR progress.
  */
 public float getProgress() throws IOException {
  float ret = 1.0f;
  for (RecordReader<K,? extends Writable> rr : kids) {
   ret = Math.min(ret, rr.getProgress());
  }
  return ret;
 }
}

代码示例来源:origin: org.apache.hive.hcatalog/hive-hcatalog-core

@Override
public float getProgress() {
 try {
  return baseRecordReader.getProgress();
 } catch (IOException e) {
  LOG.warn("Exception in HCatRecord reader", e);
 }
 return 0.0f; // errored
}

代码示例来源:origin: uber/hudi

@Override
 public float getProgress() throws IOException {
  return Math.min(parquetReader.getProgress(), logRecordScanner.getProgress());
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-mapreduce-client-core

/**
 * test DBInputFormat class. Class should split result for chunks
 * @throws Exception
 */
@Test(timeout = 10000)
public void testDBInputFormat() throws Exception {
 JobConf configuration = new JobConf();
 setupDriver(configuration);
 
 DBInputFormat<NullDBWritable> format = new DBInputFormat<NullDBWritable>();
 format.setConf(configuration);
 format.setConf(configuration);
 DBInputFormat.DBInputSplit splitter = new DBInputFormat.DBInputSplit(1, 10);
 Reporter reporter = mock(Reporter.class);
 RecordReader<LongWritable, NullDBWritable> reader = format.getRecordReader(
   splitter, configuration, reporter);
 configuration.setInt(MRJobConfig.NUM_MAPS, 3);
 InputSplit[] lSplits = format.getSplits(configuration, 3);
 assertEquals(5, lSplits[0].getLength());
 assertEquals(3, lSplits.length);
 // test reader .Some simple tests
 assertEquals(LongWritable.class, reader.createKey().getClass());
 assertEquals(0, reader.getPos());
 assertEquals(0, reader.getProgress(), 0.001);
 reader.close();
}

相关文章