本文整理了Java中org.apache.hadoop.tools.rumen.ZombieJob.sanitizeTaskRuntime()
方法的一些代码示例,展示了ZombieJob.sanitizeTaskRuntime()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。ZombieJob.sanitizeTaskRuntime()
方法的具体详情如下:
包路径:org.apache.hadoop.tools.rumen.ZombieJob
类名称:ZombieJob
方法名:sanitizeTaskRuntime
暂无
代码示例来源:origin: com.github.jiayuhan-it/hadoop-rumen
private TaskAttemptInfo makeUpTaskAttemptInfo(TaskType taskType, TaskInfo taskInfo,
int taskAttemptNumber, int taskNumber, int locality) {
if (taskType == TaskType.MAP) {
State state = State.SUCCEEDED;
long runtime = 0;
// make up state
state = makeUpState(taskAttemptNumber, job.getMapperTriesToSucceed());
runtime = makeUpMapRuntime(state, locality);
runtime = sanitizeTaskRuntime(runtime, makeTaskAttemptID(taskType,
taskNumber, taskAttemptNumber));
TaskAttemptInfo tai
= new MapTaskAttemptInfo(state, taskInfo, runtime, null);
return tai;
} else if (taskType == TaskType.REDUCE) {
State state = State.SUCCEEDED;
long shuffleTime = 0;
long sortTime = 0;
long reduceTime = 0;
// TODO make up state
// state = makeUpState(taskAttemptNumber, job.getReducerTriesToSucceed());
reduceTime = makeUpReduceRuntime(state);
TaskAttemptInfo tai = new ReduceTaskAttemptInfo
(state, taskInfo, shuffleTime, sortTime, reduceTime, null);
return tai;
}
throw new IllegalArgumentException("taskType is neither MAP nor REDUCE: "
+ taskType);
}
代码示例来源:origin: ch.cern.hadoop/hadoop-rumen
private TaskAttemptInfo makeUpTaskAttemptInfo(TaskType taskType, TaskInfo taskInfo,
int taskAttemptNumber, int taskNumber, int locality) {
if (taskType == TaskType.MAP) {
State state = State.SUCCEEDED;
long runtime = 0;
// make up state
state = makeUpState(taskAttemptNumber, job.getMapperTriesToSucceed());
runtime = makeUpMapRuntime(state, locality);
runtime = sanitizeTaskRuntime(runtime, makeTaskAttemptID(taskType,
taskNumber, taskAttemptNumber));
TaskAttemptInfo tai
= new MapTaskAttemptInfo(state, taskInfo, runtime, null);
return tai;
} else if (taskType == TaskType.REDUCE) {
State state = State.SUCCEEDED;
long shuffleTime = 0;
long sortTime = 0;
long reduceTime = 0;
// TODO make up state
// state = makeUpState(taskAttemptNumber, job.getReducerTriesToSucceed());
reduceTime = makeUpReduceRuntime(state);
TaskAttemptInfo tai = new ReduceTaskAttemptInfo
(state, taskInfo, shuffleTime, sortTime, reduceTime, null);
return tai;
}
throw new IllegalArgumentException("taskType is neither MAP nor REDUCE: "
+ taskType);
}
代码示例来源:origin: com.github.jiayuhan-it/hadoop-rumen
@SuppressWarnings("hiding")
private TaskAttemptInfo scaleInfo(LoggedTask loggedTask,
LoggedTaskAttempt loggedAttempt, int locality, int loggedLocality,
double rackLocalOverNodeLocal, double rackRemoteOverNodeLocal) {
TaskInfo taskInfo = getTaskInfo(loggedTask);
double[] factors = new double[] { 1.0, rackLocalOverNodeLocal,
rackRemoteOverNodeLocal };
double scaleFactor = factors[locality] / factors[loggedLocality];
State state = convertState(loggedAttempt.getResult());
if (loggedTask.getTaskType() == Values.MAP) {
long taskTime = 0;
if (loggedAttempt.getStartTime() == 0) {
taskTime = makeUpMapRuntime(state, locality);
} else {
taskTime = loggedAttempt.getFinishTime() - loggedAttempt.getStartTime();
}
taskTime = sanitizeTaskRuntime(taskTime, loggedAttempt.getAttemptID());
taskTime *= scaleFactor;
return new MapTaskAttemptInfo
(state, taskInfo, taskTime, loggedAttempt.allSplitVectors());
} else {
throw new IllegalArgumentException("taskType can only be MAP: "
+ loggedTask.getTaskType());
}
}
代码示例来源:origin: ch.cern.hadoop/hadoop-rumen
@SuppressWarnings("hiding")
private TaskAttemptInfo scaleInfo(LoggedTask loggedTask,
LoggedTaskAttempt loggedAttempt, int locality, int loggedLocality,
double rackLocalOverNodeLocal, double rackRemoteOverNodeLocal) {
TaskInfo taskInfo = getTaskInfo(loggedTask);
double[] factors = new double[] { 1.0, rackLocalOverNodeLocal,
rackRemoteOverNodeLocal };
double scaleFactor = factors[locality] / factors[loggedLocality];
State state = convertState(loggedAttempt.getResult());
if (loggedTask.getTaskType() == Values.MAP) {
long taskTime = 0;
if (loggedAttempt.getStartTime() == 0) {
taskTime = makeUpMapRuntime(state, locality);
} else {
taskTime = loggedAttempt.getFinishTime() - loggedAttempt.getStartTime();
}
taskTime = sanitizeTaskRuntime(taskTime, loggedAttempt.getAttemptID());
taskTime *= scaleFactor;
return new MapTaskAttemptInfo
(state, taskInfo, taskTime, loggedAttempt.allSplitVectors());
} else {
throw new IllegalArgumentException("taskType can only be MAP: "
+ loggedTask.getTaskType());
}
}
代码示例来源:origin: ch.cern.hadoop/hadoop-rumen
taskTime = loggedAttempt.getFinishTime() - loggedAttempt.getStartTime();
taskTime = sanitizeTaskRuntime(taskTime, loggedAttempt.getAttemptID());
return new MapTaskAttemptInfo(state, taskInfo, taskTime, allSplitVectors);
} else if (loggedTask.getTaskType() == Values.REDUCE) {
long mergeTime = mergeDone - shuffleDone;
long reduceTime = finishTime - mergeDone;
reduceTime = sanitizeTaskRuntime(reduceTime, loggedAttempt.getAttemptID());
代码示例来源:origin: com.github.jiayuhan-it/hadoop-rumen
taskTime = loggedAttempt.getFinishTime() - loggedAttempt.getStartTime();
taskTime = sanitizeTaskRuntime(taskTime, loggedAttempt.getAttemptID());
return new MapTaskAttemptInfo(state, taskInfo, taskTime, allSplitVectors);
} else if (loggedTask.getTaskType() == Values.REDUCE) {
long mergeTime = mergeDone - shuffleDone;
long reduceTime = finishTime - mergeDone;
reduceTime = sanitizeTaskRuntime(reduceTime, loggedAttempt.getAttemptID());
内容来源于网络,如有侵权,请联系作者删除!