org.apache.hadoop.mapred.Task.isMapTask()方法的使用及代码示例

x33g5p2x  于2022-01-30 转载在 其他  
字(6.9k)|赞(0)|评价(0)|浏览(143)

本文整理了Java中org.apache.hadoop.mapred.Task.isMapTask()方法的一些代码示例,展示了Task.isMapTask()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Task.isMapTask()方法的具体详情如下:
包路径:org.apache.hadoop.mapred.Task
类名称:Task
方法名:isMapTask

Task.isMapTask介绍

暂无

代码示例

代码示例来源:origin: com.facebook.hadoop/hadoop-core

public void write(DataOutput out) throws IOException {
 out.writeBoolean(shouldDie);
 if (t != null) {
  out.writeBoolean(true);
  out.writeBoolean(t.isMapTask());
  t.write(out);
 } else {
  out.writeBoolean(false);
 }
}
public void readFields(DataInput in) throws IOException {

代码示例来源:origin: org.apache.hadoop/hadoop-mapred

public void write(DataOutput out) throws IOException {
 out.writeBoolean(shouldDie);
 if (t != null) {
  out.writeBoolean(true);
  out.writeBoolean(t.isMapTask());
  t.write(out);
 } else {
  out.writeBoolean(false);
 }
}
public void readFields(DataInput in) throws IOException {

代码示例来源:origin: ch.cern.hadoop/hadoop-mapreduce-client-core

public void write(DataOutput out) throws IOException {
 out.writeBoolean(shouldDie);
 if (t != null) {
  out.writeBoolean(true);
  out.writeBoolean(t.isMapTask());
  t.write(out);
 } else {
  out.writeBoolean(false);
 }
}
public void readFields(DataInput in) throws IOException {

代码示例来源:origin: org.apache.hadoop/hadoop-mapred

public void launchJvm(TaskRunner t, JvmEnv env) {
 if (t.getTask().isMapTask()) {
  mapJvmManager.reapJvm(t, env);
 } else {
  reduceJvmManager.reapJvm(t, env);
 }
}

代码示例来源:origin: org.jvnet.hudson.hadoop/hadoop-core

public void write(DataOutput out) throws IOException {
 out.writeBoolean(shouldDie);
 if (t != null) {
  out.writeBoolean(true);
  out.writeBoolean(t.isMapTask());
  t.write(out);
 } else {
  out.writeBoolean(false);
 }
}
public void readFields(DataInput in) throws IOException {

代码示例来源:origin: io.hops/hadoop-mapreduce-client-core

public void write(DataOutput out) throws IOException {
 out.writeBoolean(shouldDie);
 if (t != null) {
  out.writeBoolean(true);
  out.writeBoolean(t.isMapTask());
  t.write(out);
 } else {
  out.writeBoolean(false);
 }
}
public void readFields(DataInput in) throws IOException {

代码示例来源:origin: com.facebook.hadoop/hadoop-core

public void launchJvm(TaskRunner t, JvmEnv env) {
 if (t.getTask().isMapTask()) {
  mapJvmManager.reapJvm(t, env);
 } else {
  reduceJvmManager.reapJvm(t, env);
 }
}

代码示例来源:origin: com.facebook.hadoop/hadoop-core

public void write(DataOutput out) throws IOException {
 super.write(out);
 out.writeBoolean(task.isMapTask());
 task.write(out);
}

代码示例来源:origin: com.facebook.hadoop/hadoop-core

public Task() {
 taskStatus = TaskStatus.createTaskStatus(isMapTask());
 taskId = new TaskAttemptID();
 spilledRecordsCounter = counters.findCounter(Counter.SPILLED_RECORDS);
}

代码示例来源:origin: com.facebook.hadoop/hadoop-core

public void taskFinished(TaskRunner tr) {
 if (tr.getTask().isMapTask()) {
  mapJvmManager.taskFinished(tr);
 } else {
  reduceJvmManager.taskFinished(tr);
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-mapreduce-client-core

/**
 * Localize the given JobConf to be specific for this task.
 */
public void localizeConfiguration(JobConf conf) throws IOException {
 conf.set(JobContext.TASK_ID, taskId.getTaskID().toString()); 
 conf.set(JobContext.TASK_ATTEMPT_ID, taskId.toString());
 conf.setBoolean(JobContext.TASK_ISMAP, isMapTask());
 conf.setInt(JobContext.TASK_PARTITION, partition);
 conf.set(JobContext.ID, taskId.getJobID().toString());
}

代码示例来源:origin: com.facebook.hadoop/hadoop-core

/**
 * Localize the given JobConf to be specific for this task.
 */
public void localizeConfiguration(JobConf conf) throws IOException {
 conf.set("mapred.tip.id", taskId.getTaskID().toString()); 
 conf.set("mapred.task.id", taskId.toString());
 conf.setBoolean("mapred.task.is.map", isMapTask());
 conf.setInt("mapred.task.partition", partition);
 conf.set("mapred.job.id", taskId.getJobID().toString());
}

代码示例来源:origin: org.jvnet.hudson.hadoop/hadoop-core

/**
 * Localize the given JobConf to be specific for this task.
 */
public void localizeConfiguration(JobConf conf) throws IOException {
 conf.set("mapred.tip.id", taskId.getTaskID().toString()); 
 conf.set("mapred.task.id", taskId.toString());
 conf.setBoolean("mapred.task.is.map", isMapTask());
 conf.setInt("mapred.task.partition", partition);
 conf.set("mapred.job.id", taskId.getJobID().toString());
}

代码示例来源:origin: com.github.jiayuhan-it/hadoop-mapreduce-client-core

/**
 * Localize the given JobConf to be specific for this task.
 */
public void localizeConfiguration(JobConf conf) throws IOException {
 conf.set(JobContext.TASK_ID, taskId.getTaskID().toString()); 
 conf.set(JobContext.TASK_ATTEMPT_ID, taskId.toString());
 conf.setBoolean(JobContext.TASK_ISMAP, isMapTask());
 conf.setInt(JobContext.TASK_PARTITION, partition);
 conf.set(JobContext.ID, taskId.getJobID().toString());
}

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

/**
 * Localize the given JobConf to be specific for this task.
 */
public void localizeConfiguration(JobConf conf) throws IOException {
 conf.set(JobContext.TASK_ID, taskId.getTaskID().toString()); 
 conf.set(JobContext.TASK_ATTEMPT_ID, taskId.toString());
 conf.setBoolean(JobContext.TASK_ISMAP, isMapTask());
 conf.setInt(JobContext.TASK_PARTITION, partition);
 conf.set(JobContext.ID, taskId.getJobID().toString());
}

代码示例来源:origin: io.hops/hadoop-mapreduce-client-core

/**
 * Localize the given JobConf to be specific for this task.
 */
public void localizeConfiguration(JobConf conf) throws IOException {
 conf.set(JobContext.TASK_ID, taskId.getTaskID().toString()); 
 conf.set(JobContext.TASK_ATTEMPT_ID, taskId.toString());
 conf.setBoolean(JobContext.TASK_ISMAP, isMapTask());
 conf.setInt(JobContext.TASK_PARTITION, partition);
 conf.set(JobContext.ID, taskId.getJobID().toString());
}

代码示例来源:origin: ch.cern.hadoop/hadoop-mapreduce-client-core

public Task() {
 taskStatus = TaskStatus.createTaskStatus(isMapTask());
 taskId = new TaskAttemptID();
 spilledRecordsCounter = 
  counters.findCounter(TaskCounter.SPILLED_RECORDS);
 failedShuffleCounter = 
  counters.findCounter(TaskCounter.FAILED_SHUFFLE);
 mergedMapOutputsCounter = 
  counters.findCounter(TaskCounter.MERGED_MAP_OUTPUTS);
 gcUpdater = new GcTimeUpdater();
}

代码示例来源:origin: org.apache.hadoop/hadoop-mapred

public Task() {
 taskStatus = TaskStatus.createTaskStatus(isMapTask());
 taskId = new TaskAttemptID();
 spilledRecordsCounter = 
  counters.findCounter(TaskCounter.SPILLED_RECORDS);
 failedShuffleCounter = 
  counters.findCounter(TaskCounter.FAILED_SHUFFLE);
 mergedMapOutputsCounter = 
  counters.findCounter(TaskCounter.MERGED_MAP_OUTPUTS);
 gcUpdater = new GcTimeUpdater();
}

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

public Task() {
 taskStatus = TaskStatus.createTaskStatus(isMapTask());
 taskId = new TaskAttemptID();
 spilledRecordsCounter = 
  counters.findCounter(TaskCounter.SPILLED_RECORDS);
 failedShuffleCounter = 
  counters.findCounter(TaskCounter.FAILED_SHUFFLE);
 mergedMapOutputsCounter = 
  counters.findCounter(TaskCounter.MERGED_MAP_OUTPUTS);
 gcUpdater = new GcTimeUpdater();
}

代码示例来源:origin: io.hops/hadoop-mapreduce-client-core

public Task() {
 taskStatus = TaskStatus.createTaskStatus(isMapTask());
 taskId = new TaskAttemptID();
 spilledRecordsCounter = 
  counters.findCounter(TaskCounter.SPILLED_RECORDS);
 failedShuffleCounter = 
  counters.findCounter(TaskCounter.FAILED_SHUFFLE);
 mergedMapOutputsCounter = 
  counters.findCounter(TaskCounter.MERGED_MAP_OUTPUTS);
 gcUpdater = new GcTimeUpdater();
}

相关文章