org.apache.hadoop.tools.rumen.ZombieJob.getTaskInfo()方法的使用及代码示例

x33g5p2x  于2022-02-05 转载在 其他  
字(4.7k)|赞(0)|评价(0)|浏览(117)

本文整理了Java中org.apache.hadoop.tools.rumen.ZombieJob.getTaskInfo()方法的一些代码示例,展示了ZombieJob.getTaskInfo()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。ZombieJob.getTaskInfo()方法的具体详情如下:
包路径:org.apache.hadoop.tools.rumen.ZombieJob
类名称:ZombieJob
方法名:getTaskInfo

ZombieJob.getTaskInfo介绍

暂无

代码示例

代码示例来源:origin: ch.cern.hadoop/hadoop-rumen

@Override
public TaskInfo getTaskInfo(TaskType taskType, int taskNumber) {
 return getTaskInfo(getLoggedTask(taskType, taskNumber));
}

代码示例来源:origin: com.github.jiayuhan-it/hadoop-rumen

@Override
public TaskInfo getTaskInfo(TaskType taskType, int taskNumber) {
 return getTaskInfo(getLoggedTask(taskType, taskNumber));
}

代码示例来源:origin: ch.cern.hadoop/hadoop-rumen

long mapInputBytes = getTaskInfo(mapTask).getInputBytes();
if (mapInputBytes < 0) {
 LOG.warn("InputBytes for task "+mapTask.getTaskID()+" is not defined.");

代码示例来源:origin: com.github.jiayuhan-it/hadoop-rumen

long mapInputBytes = getTaskInfo(mapTask).getInputBytes();
if (mapInputBytes < 0) {
 LOG.warn("InputBytes for task "+mapTask.getTaskID()+" is not defined.");

代码示例来源:origin: ch.cern.hadoop/hadoop-rumen

if (loggedAttempt == null) {
 TaskInfo taskInfo = getTaskInfo(loggedTask);
 return makeUpTaskAttemptInfo(taskType, taskInfo, taskAttemptNumber,
   taskNumber, locality);
  TaskInfo taskInfo = getTaskInfo(loggedTask);
  return makeUpTaskAttemptInfo(taskType, taskInfo, taskAttemptNumber,
    taskNumber, locality);

代码示例来源:origin: com.github.jiayuhan-it/hadoop-rumen

if (loggedAttempt == null) {
 TaskInfo taskInfo = getTaskInfo(loggedTask);
 return makeUpTaskAttemptInfo(taskType, taskInfo, taskAttemptNumber,
   taskNumber, locality);
  TaskInfo taskInfo = getTaskInfo(loggedTask);
  return makeUpTaskAttemptInfo(taskType, taskInfo, taskAttemptNumber,
    taskNumber, locality);

代码示例来源:origin: ch.cern.hadoop/hadoop-rumen

if (loggedAttempt == null) {
 TaskInfo taskInfo = getTaskInfo(loggedTask);
 return makeUpTaskAttemptInfo(taskType, taskInfo, taskAttemptNumber,
   taskNumber, locality);
  TaskInfo taskInfo = getTaskInfo(loggedTask);
  return makeUpTaskAttemptInfo(taskType, taskInfo, taskAttemptNumber,
    taskNumber, locality);

代码示例来源:origin: com.github.jiayuhan-it/hadoop-rumen

if (loggedAttempt == null) {
 TaskInfo taskInfo = getTaskInfo(loggedTask);
 return makeUpTaskAttemptInfo(taskType, taskInfo, taskAttemptNumber,
   taskNumber, locality);
  TaskInfo taskInfo = getTaskInfo(loggedTask);
  return makeUpTaskAttemptInfo(taskType, taskInfo, taskAttemptNumber,
    taskNumber, locality);

代码示例来源:origin: com.github.jiayuhan-it/hadoop-rumen

@SuppressWarnings("hiding") 
private TaskAttemptInfo scaleInfo(LoggedTask loggedTask,
  LoggedTaskAttempt loggedAttempt, int locality, int loggedLocality,
  double rackLocalOverNodeLocal, double rackRemoteOverNodeLocal) {
 TaskInfo taskInfo = getTaskInfo(loggedTask);
 double[] factors = new double[] { 1.0, rackLocalOverNodeLocal,
   rackRemoteOverNodeLocal };
 double scaleFactor = factors[locality] / factors[loggedLocality];
 State state = convertState(loggedAttempt.getResult());
 if (loggedTask.getTaskType() == Values.MAP) {
  long taskTime = 0;
  if (loggedAttempt.getStartTime() == 0) {
   taskTime = makeUpMapRuntime(state, locality);
  } else {
   taskTime = loggedAttempt.getFinishTime() - loggedAttempt.getStartTime();
  }
  taskTime = sanitizeTaskRuntime(taskTime, loggedAttempt.getAttemptID());
  taskTime *= scaleFactor;
  return new MapTaskAttemptInfo
   (state, taskInfo, taskTime, loggedAttempt.allSplitVectors());
 } else {
  throw new IllegalArgumentException("taskType can only be MAP: "
    + loggedTask.getTaskType());
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-rumen

@SuppressWarnings("hiding") 
private TaskAttemptInfo scaleInfo(LoggedTask loggedTask,
  LoggedTaskAttempt loggedAttempt, int locality, int loggedLocality,
  double rackLocalOverNodeLocal, double rackRemoteOverNodeLocal) {
 TaskInfo taskInfo = getTaskInfo(loggedTask);
 double[] factors = new double[] { 1.0, rackLocalOverNodeLocal,
   rackRemoteOverNodeLocal };
 double scaleFactor = factors[locality] / factors[loggedLocality];
 State state = convertState(loggedAttempt.getResult());
 if (loggedTask.getTaskType() == Values.MAP) {
  long taskTime = 0;
  if (loggedAttempt.getStartTime() == 0) {
   taskTime = makeUpMapRuntime(state, locality);
  } else {
   taskTime = loggedAttempt.getFinishTime() - loggedAttempt.getStartTime();
  }
  taskTime = sanitizeTaskRuntime(taskTime, loggedAttempt.getAttemptID());
  taskTime *= scaleFactor;
  return new MapTaskAttemptInfo
   (state, taskInfo, taskTime, loggedAttempt.allSplitVectors());
 } else {
  throw new IllegalArgumentException("taskType can only be MAP: "
    + loggedTask.getTaskType());
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-rumen

private TaskAttemptInfo getTaskAttemptInfo(LoggedTask loggedTask,
  LoggedTaskAttempt loggedAttempt) {
 TaskInfo taskInfo = getTaskInfo(loggedTask);

代码示例来源:origin: com.github.jiayuhan-it/hadoop-rumen

private TaskAttemptInfo getTaskAttemptInfo(LoggedTask loggedTask,
  LoggedTaskAttempt loggedAttempt) {
 TaskInfo taskInfo = getTaskInfo(loggedTask);

相关文章