本文整理了Java中org.apache.hadoop.mapred.jobcontrol.Job.getAssignedJobID()
方法的一些代码示例,展示了Job.getAssignedJobID()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Job.getAssignedJobID()
方法的具体详情如下:
包路径:org.apache.hadoop.mapred.jobcontrol.Job
类名称:Job
方法名:getAssignedJobID
暂无
代码示例来源:origin: org.apache.pig/pig
@Override
public void kill() {
try {
if (jc != null && jc.getRunningJobs().size() > 0) {
log.info("Received kill signal");
for (Job job : jc.getRunningJobs()) {
org.apache.hadoop.mapreduce.Job mrJob = job.getJob();
try {
if (mrJob != null) {
mrJob.killJob();
}
} catch (Exception ir) {
throw new IOException(ir);
}
log.info("Job " + job.getAssignedJobID() + " killed");
String timeStamp = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss")
.format(Calendar.getInstance().getTime());
System.err.println(timeStamp + " Job " + job.getAssignedJobID() + " killed");
}
}
} catch (Exception e) {
log.warn("Encounter exception on cleanup:" + e);
}
}
代码示例来源:origin: org.apache.hadoop/hadoop-mapred-test
public void testGetAssignedJobId() throws Exception {
JobConf jc = new JobConf();
Job j = new Job(jc);
//Just make sure no exception is thrown
assertNull(j.getAssignedJobID());
org.apache.hadoop.mapreduce.Job mockjob = mock(org.apache.hadoop.mapreduce.Job.class);
org.apache.hadoop.mapreduce.JobID jid = new org.apache.hadoop.mapreduce.JobID("test",0);
when(mockjob.getJobID()).thenReturn(jid);
j.setJob(mockjob);
JobID expected = new JobID("test",0);
assertEquals(expected, j.getAssignedJobID());
verify(mockjob).getJobID();
}
代码示例来源:origin: org.apache.pig/pig
MRJobStats addMRJobStats(Job job) {
MapReduceOper mro = jobMroMap.get(job);
if (mro == null) {
LOG.warn("unable to get MR oper for job: " + job.toString());
return null;
}
MRJobStats js = mroJobMap.get(mro);
JobID jobId = job.getAssignedJobID();
js.setId(jobId);
js.setAlias(mro);
js.setConf(job.getJobConf());
return js;
}
代码示例来源:origin: org.apache.pig/pig
public static Iterator<TaskReport> getTaskReports(Job job, TaskType type) throws IOException {
if (job.getJobConf().getBoolean(PigConfiguration.PIG_NO_TASK_REPORT, false)) {
LOG.info("TaskReports are disabled for job: " + job.getAssignedJobID());
return null;
}
Cluster cluster = new Cluster(job.getJobConf());
try {
org.apache.hadoop.mapreduce.Job mrJob = cluster.getJob(job.getAssignedJobID());
if (mrJob == null) { // In local mode, mrJob will be null
mrJob = job.getJob();
}
org.apache.hadoop.mapreduce.TaskReport[] reports = mrJob.getTaskReports(type);
return Lists.newArrayList(reports).iterator();
} catch (InterruptedException ir) {
throw new IOException(ir);
}
}
代码示例来源:origin: org.apache.pig/pig
@Private
public static void setBackendException(Job job, Exception e) {
JobID jobId = job.getAssignedJobID();
if (jobId == null) {
return;
}
PigStats.get().setBackendException(jobId.toString(), e);
}
代码示例来源:origin: org.apache.pig/pig
public static Counters getCounters(Job job) throws IOException {
try {
Cluster cluster = new Cluster(job.getJobConf());
org.apache.hadoop.mapreduce.Job mrJob = cluster.getJob(job.getAssignedJobID());
if (mrJob == null) { // In local mode, mrJob will be null
mrJob = job.getJob();
}
return new Counters(mrJob.getCounters());
} catch (Exception ir) {
throw new IOException(ir);
}
}
代码示例来源:origin: org.apache.pig/pig
/**
* Returns the count for the given counter name in the counter group
* 'MultiStoreCounters'
*
* @param job the MR job
* @param jobClient the Hadoop job client
* @param counterName the counter name
* @return the count of the given counter name
*/
public static long getMultiStoreCount(Job job, JobClient jobClient,
String counterName) {
long value = -1;
try {
RunningJob rj = jobClient.getJob(job.getAssignedJobID());
if (rj != null) {
Counters.Counter counter = rj.getCounters().getGroup(
MULTI_STORE_COUNTER_GROUP).getCounterForName(counterName);
value = counter.getValue();
}
} catch (IOException e) {
LOG.warn("Failed to get the counter for " + counterName, e);
}
return value;
}
代码示例来源:origin: ch.cern.hadoop/hadoop-mapreduce-client-jobclient
@Test (timeout = 30000)
public void testGetAssignedJobId() throws Exception {
JobConf jc = new JobConf();
Job j = new Job(jc);
//Just make sure no exception is thrown
assertNull(j.getAssignedJobID());
org.apache.hadoop.mapreduce.Job mockjob = mock(org.apache.hadoop.mapreduce.Job.class);
org.apache.hadoop.mapreduce.JobID jid = new org.apache.hadoop.mapreduce.JobID("test",0);
when(mockjob.getJobID()).thenReturn(jid);
j.setJob(mockjob);
JobID expected = new JobID("test",0);
assertEquals(expected, j.getAssignedJobID());
verify(mockjob).getJobID();
}
代码示例来源:origin: org.apache.pig/pig
if (job.getAssignedJobID() != null){
log.info("HadoopJobId: "+job.getAssignedJobID());
job.getAssignedJobID().toString());
Job j = (Job) object;
if (j != null) {
msg.append(j.getAssignedJobID()).append(",");
log.info("job " + job.getAssignedJobID() + " has failed! Stop running all dependent jobs");
代码示例来源:origin: org.apache.pig/pig
private void getStats(Job job, boolean errNotDbg,
PigContext pigContext) throws ExecException {
JobID MRJobID = job.getAssignedJobID();
String jobMessage = job.getMessage();
Exception backendException = null;
内容来源于网络,如有侵权,请联系作者删除!