org.apache.hadoop.mapreduce.Counter.getValue()方法的使用及代码示例

x33g5p2x  于2022-01-18 转载在 其他  
字(12.1k)|赞(0)|评价(0)|浏览(122)

本文整理了Java中org.apache.hadoop.mapreduce.Counter.getValue()方法的一些代码示例,展示了Counter.getValue()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Counter.getValue()方法的具体详情如下:
包路径:org.apache.hadoop.mapreduce.Counter
类名称:Counter
方法名:getValue

Counter.getValue介绍

[英]What is the current value of this counter?
[中]这个计数器的当前值是多少?

代码示例

代码示例来源:origin: apache/incubator-gobblin

private static long getRecordCountFromCounter(TaskAttemptContext context, Enum<?> counterName) {
  try {
   Method getCounterMethod = context.getClass().getMethod("getCounter", Enum.class);
   return ((Counter) getCounterMethod.invoke(context, counterName)).getValue();
  } catch (Exception e) {
   throw new RuntimeException("Error reading record count counter", e);
  }
 }
}

代码示例来源:origin: thinkaurelius/titan

@Override
public long getContextCounter(TaskInputOutputContext context, String group, String name) {
  return context.getCounter(group, name).getValue();
}

代码示例来源:origin: thinkaurelius/titan

@Override
public long getContextCounter(TaskInputOutputContext context, String group, String name) {
  return context.getCounter(group, name).getValue();
}

代码示例来源:origin: thinkaurelius/titan

@Override
public long getCustom(String metric) {
  return counters.getGroup(HadoopContextScanMetrics.CUSTOM_COUNTER_GROUP).findCounter(metric).getValue();
}

代码示例来源:origin: thinkaurelius/titan

@Override
public long getCustom(String metric) {
  return counters.getGroup(HadoopContextScanMetrics.CUSTOM_COUNTER_GROUP).findCounter(metric).getValue();
}

代码示例来源:origin: apache/incubator-druid

@Override
public Map<String, Object> getStats()
{
 if (groupByJob == null) {
  return null;
 }
 try {
  Counters jobCounters = groupByJob.getCounters();
  Map<String, Object> metrics = TaskMetricsUtils.makeIngestionRowMetrics(
    jobCounters.findCounter(HadoopDruidIndexerConfig.IndexJobCounters.ROWS_PROCESSED_COUNTER).getValue(),
    jobCounters.findCounter(HadoopDruidIndexerConfig.IndexJobCounters.ROWS_PROCESSED_WITH_ERRORS_COUNTER).getValue(),
    jobCounters.findCounter(HadoopDruidIndexerConfig.IndexJobCounters.ROWS_UNPARSEABLE_COUNTER).getValue(),
    jobCounters.findCounter(HadoopDruidIndexerConfig.IndexJobCounters.ROWS_THROWN_AWAY_COUNTER).getValue()
  );
  return metrics;
 }
 catch (IllegalStateException ise) {
  log.debug("Couldn't get counters due to job state");
  return null;
 }
 catch (Exception e) {
  log.debug(e, "Encountered exception in getStats().");
  return null;
 }
}

代码示例来源:origin: apache/incubator-druid

@Override
public Map<String, Object> getStats()
{
 if (job == null) {
  return null;
 }
 try {
  Counters jobCounters = job.getCounters();
  Map<String, Object> metrics = TaskMetricsUtils.makeIngestionRowMetrics(
    jobCounters.findCounter(HadoopDruidIndexerConfig.IndexJobCounters.ROWS_PROCESSED_COUNTER).getValue(),
    jobCounters.findCounter(HadoopDruidIndexerConfig.IndexJobCounters.ROWS_PROCESSED_WITH_ERRORS_COUNTER)
          .getValue(),
    jobCounters.findCounter(HadoopDruidIndexerConfig.IndexJobCounters.ROWS_UNPARSEABLE_COUNTER).getValue(),
    jobCounters.findCounter(HadoopDruidIndexerConfig.IndexJobCounters.ROWS_THROWN_AWAY_COUNTER).getValue()
  );
  return metrics;
 }
 catch (IllegalStateException ise) {
  log.debug("Couldn't get counters due to job state");
  return null;
 }
 catch (Exception e) {
  log.debug(e, "Encountered exception in getStats().");
  return null;
 }
}

代码示例来源:origin: apache/incubator-druid

@Override
public Map<String, Object> getStats()
{
 if (groupByJob == null) {
  return null;
 }
 try {
  Counters jobCounters = groupByJob.getCounters();
  Map<String, Object> metrics = TaskMetricsUtils.makeIngestionRowMetrics(
    jobCounters.findCounter(HadoopDruidIndexerConfig.IndexJobCounters.ROWS_PROCESSED_COUNTER).getValue(),
    jobCounters.findCounter(HadoopDruidIndexerConfig.IndexJobCounters.ROWS_PROCESSED_WITH_ERRORS_COUNTER).getValue(),
    jobCounters.findCounter(HadoopDruidIndexerConfig.IndexJobCounters.ROWS_UNPARSEABLE_COUNTER).getValue(),
    jobCounters.findCounter(HadoopDruidIndexerConfig.IndexJobCounters.ROWS_THROWN_AWAY_COUNTER).getValue()
  );
  return metrics;
 }
 catch (IllegalStateException ise) {
  log.debug("Couldn't get counters due to job state");
  return null;
 }
 catch (Exception e) {
  log.debug(e, "Encountered exception in getStats().");
  return null;
 }
}

代码示例来源:origin: thinkaurelius/titan

@Override
public long get(Metric metric) {
  return counters.getGroup(HadoopContextScanMetrics.STANDARD_COUNTER_GROUP).findCounter(metric.name()).getValue();
}

代码示例来源:origin: thinkaurelius/titan

@Override
public long get(Metric metric) {
  return counters.getGroup(HadoopContextScanMetrics.STANDARD_COUNTER_GROUP).findCounter(metric.name()).getValue();
}

代码示例来源:origin: apache/ignite

/** {@inheritDoc} */
@Override public void incrAllCounters(CounterGroupBase<Counter> rightGroup) {
  for (final Counter counter : rightGroup)
    cntrs.findCounter(name, counter.getName()).increment(counter.getValue());
}

代码示例来源:origin: apache/ignite

/** {@inheritDoc} */
@Override public synchronized void incrAllCounters(AbstractCounters<Counter, CounterGroup> other) {
  for (CounterGroup group : other) {
    for (Counter counter : group) {
      findCounter(group.getName(), counter.getName()).increment(counter.getValue());
    }
  }
}

代码示例来源:origin: apache/incubator-druid

private void handleParseException(ParseException pe, Context context)
{
 context.getCounter(HadoopDruidIndexerConfig.IndexJobCounters.INVALID_ROW_COUNTER).increment(1);
 Counter unparseableCounter = context.getCounter(HadoopDruidIndexerConfig.IndexJobCounters.ROWS_UNPARSEABLE_COUNTER);
 Counter processedWithErrorsCounter = context.getCounter(HadoopDruidIndexerConfig.IndexJobCounters.ROWS_PROCESSED_WITH_ERRORS_COUNTER);
 if (pe.isFromPartiallyValidRow()) {
  processedWithErrorsCounter.increment(1);
 } else {
  unparseableCounter.increment(1);
 }
 if (config.isLogParseExceptions()) {
  log.error(pe, "Encountered parse exception: ");
 }
 long rowsUnparseable = unparseableCounter.getValue();
 long rowsProcessedWithError = processedWithErrorsCounter.getValue();
 if (rowsUnparseable + rowsProcessedWithError > config.getMaxParseExceptions()) {
  log.error("Max parse exceptions exceeded, terminating task...");
  throw new RuntimeException("Max parse exceptions exceeded, terminating task...", pe);
 }
}

代码示例来源:origin: apache/hbase

@Override
protected Job doLoad(Configuration conf, HTableDescriptor htd) throws Exception {
 Job job = super.doLoad(conf, htd);
 this.numRowsLoadedWithExp1 = job.getCounters().findCounter(Counters.ROWS_VIS_EXP_1).getValue();
 this.numRowsLoadedWithExp2 = job.getCounters().findCounter(Counters.ROWS_VIS_EXP_2).getValue();
 this.numRowsLoadWithExp3 = job.getCounters().findCounter(Counters.ROWS_VIS_EXP_3).getValue();
 this.numRowsLoadWithExp4 = job.getCounters().findCounter(Counters.ROWS_VIS_EXP_4).getValue();
 System.out.println("Rows loaded with cell visibility " + VISIBILITY_EXPS[0] + " : "
   + this.numRowsLoadedWithExp1);
 System.out.println("Rows loaded with cell visibility " + VISIBILITY_EXPS[1] + " : "
   + this.numRowsLoadedWithExp2);
 System.out.println("Rows loaded with cell visibility " + VISIBILITY_EXPS[2] + " : "
   + this.numRowsLoadWithExp3);
 System.out.println("Rows loaded with cell visibility " + VISIBILITY_EXPS[3] + " : "
   + this.numRowsLoadWithExp4);
 return job;
}

代码示例来源:origin: apache/hbase

/**
 * Run the RowCounter map reduce job and verify the row count.
 *
 * @param args the command line arguments to be used for rowcounter job.
 * @param expectedCount the expected row count (result of map reduce job).
 * @throws Exception
 */
private void runRowCount(String[] args, int expectedCount) throws Exception {
 Job job = RowCounter.createSubmittableJob(TEST_UTIL.getConfiguration(), args);
 long start = System.currentTimeMillis();
 job.waitForCompletion(true);
 long duration = System.currentTimeMillis() - start;
 LOG.debug("row count duration (ms): " + duration);
 assertTrue(job.isSuccessful());
 Counter counter = job.getCounters().findCounter(RowCounter.RowCounterMapper.Counters.ROWS);
 assertEquals(expectedCount, counter.getValue());
}

代码示例来源:origin: apache/hbase

private void runVerifyReplication(String[] args, int expectedGoodRows, int expectedBadRows)
  throws IOException, InterruptedException, ClassNotFoundException {
 Job job = new VerifyReplication().createSubmittableJob(new Configuration(conf1), args);
 if (job == null) {
  fail("Job wasn't created, see the log");
 }
 if (!job.waitForCompletion(true)) {
  fail("Job failed, see the log");
 }
 assertEquals(expectedGoodRows,
  job.getCounters().findCounter(VerifyReplication.Verifier.Counters.GOODROWS).getValue());
 assertEquals(expectedBadRows,
  job.getCounters().findCounter(VerifyReplication.Verifier.Counters.BADROWS).getValue());
}

代码示例来源:origin: apache/hbase

/**
 * Verify scan counters are emitted from the job
 * @param job
 * @throws IOException
 */
private void verifyJobCountersAreEmitted(Job job) throws IOException {
 Counters counters = job.getCounters();
 Counter counter
  = counters.findCounter(TableRecordReaderImpl.HBASE_COUNTER_GROUP_NAME, "RPC_CALLS");
 assertNotNull("Unable to find Job counter for HBase scan metrics, RPC_CALLS", counter);
 assertTrue("Counter value for RPC_CALLS should be larger than 0", counter.getValue() > 0);
}

代码示例来源:origin: apache/hbase

@Test
public void testSyncTable() throws Exception {
 final TableName sourceTableName = TableName.valueOf(name.getMethodName() + "_source");
 final TableName targetTableName = TableName.valueOf(name.getMethodName() + "_target");
 Path testDir = TEST_UTIL.getDataTestDirOnTestFS("testSyncTable");
 writeTestData(sourceTableName, targetTableName);
 hashSourceTable(sourceTableName, testDir);
 Counters syncCounters = syncTables(sourceTableName, targetTableName, testDir);
 assertEqualTables(90, sourceTableName, targetTableName);
 assertEquals(60, syncCounters.findCounter(Counter.ROWSWITHDIFFS).getValue());
 assertEquals(10, syncCounters.findCounter(Counter.SOURCEMISSINGROWS).getValue());
 assertEquals(10, syncCounters.findCounter(Counter.TARGETMISSINGROWS).getValue());
 assertEquals(50, syncCounters.findCounter(Counter.SOURCEMISSINGCELLS).getValue());
 assertEquals(50, syncCounters.findCounter(Counter.TARGETMISSINGCELLS).getValue());
 assertEquals(20, syncCounters.findCounter(Counter.DIFFERENTCELLVALUES).getValue());
 TEST_UTIL.deleteTable(sourceTableName);
 TEST_UTIL.deleteTable(targetTableName);
}

代码示例来源:origin: apache/hbase

@Test
public void testSyncTableDoPutsFalse() throws Exception {
 final TableName sourceTableName = TableName.valueOf(name.getMethodName() + "_source");
 final TableName targetTableName = TableName.valueOf(name.getMethodName() + "_target");
 Path testDir = TEST_UTIL.getDataTestDirOnTestFS("testSyncTableDoPutsFalse");
 writeTestData(sourceTableName, targetTableName);
 hashSourceTable(sourceTableName, testDir);
 Counters syncCounters = syncTables(sourceTableName, targetTableName,
   testDir, "--doPuts=false");
 assertTargetDoPutsFalse(70, sourceTableName, targetTableName);
 assertEquals(60, syncCounters.findCounter(Counter.ROWSWITHDIFFS).getValue());
 assertEquals(10, syncCounters.findCounter(Counter.SOURCEMISSINGROWS).getValue());
 assertEquals(10, syncCounters.findCounter(Counter.TARGETMISSINGROWS).getValue());
 assertEquals(50, syncCounters.findCounter(Counter.SOURCEMISSINGCELLS).getValue());
 assertEquals(50, syncCounters.findCounter(Counter.TARGETMISSINGCELLS).getValue());
 assertEquals(20, syncCounters.findCounter(Counter.DIFFERENTCELLVALUES).getValue());
 TEST_UTIL.deleteTable(sourceTableName);
 TEST_UTIL.deleteTable(targetTableName);
}

代码示例来源:origin: apache/hbase

@Test
public void testSyncTableDoDeletesFalse() throws Exception {
 final TableName sourceTableName = TableName.valueOf(name.getMethodName() + "_source");
 final TableName targetTableName = TableName.valueOf(name.getMethodName() + "_target");
 Path testDir = TEST_UTIL.getDataTestDirOnTestFS("testSyncTableDoDeletesFalse");
 writeTestData(sourceTableName, targetTableName);
 hashSourceTable(sourceTableName, testDir);
 Counters syncCounters = syncTables(sourceTableName, targetTableName,
   testDir, "--doDeletes=false");
 assertTargetDoDeletesFalse(100, sourceTableName, targetTableName);
 assertEquals(60, syncCounters.findCounter(Counter.ROWSWITHDIFFS).getValue());
 assertEquals(10, syncCounters.findCounter(Counter.SOURCEMISSINGROWS).getValue());
 assertEquals(10, syncCounters.findCounter(Counter.TARGETMISSINGROWS).getValue());
 assertEquals(50, syncCounters.findCounter(Counter.SOURCEMISSINGCELLS).getValue());
 assertEquals(50, syncCounters.findCounter(Counter.TARGETMISSINGCELLS).getValue());
 assertEquals(20, syncCounters.findCounter(Counter.DIFFERENTCELLVALUES).getValue());
 TEST_UTIL.deleteTable(sourceTableName);
 TEST_UTIL.deleteTable(targetTableName);
}

相关文章