本文整理了Java中org.apache.hadoop.mapred.jobcontrol.Job
类的一些代码示例,展示了Job
类的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Job
类的具体详情如下:
包路径:org.apache.hadoop.mapred.jobcontrol.Job
类名称:Job
[英]This class encapsulates a MapReduce job and its dependency. It monitors the states of the depending jobs and updates the state of this job. A job starts in the WAITING state. If it does not have any depending jobs, or all of the depending jobs are in SUCCESS state, then the job state will become READY. If any depending jobs fail, the job will fail too. When in READY state, the job can be submitted to Hadoop for execution, with the state changing into RUNNING state. From RUNNING state, the job can get into SUCCESS or FAILED state, depending the status of the job execution.
[中]此类封装MapReduce作业及其依赖项。它监视从属作业的状态并更新此作业的状态。作业在等待状态下开始。如果没有任何从属作业,或者所有从属作业都处于成功状态,则作业状态将变为就绪。如果任何依赖作业失败,该作业也将失败。当处于就绪状态时,作业可以提交给Hadoop执行,状态变为运行状态。从运行状态开始,作业可以进入成功或失败状态,具体取决于作业执行的状态。
代码示例来源:origin: org.apache.hadoop/hadoop-mapred-test
public void testGetAssignedJobId() throws Exception {
JobConf jc = new JobConf();
Job j = new Job(jc);
//Just make sure no exception is thrown
assertNull(j.getAssignedJobID());
org.apache.hadoop.mapreduce.Job mockjob = mock(org.apache.hadoop.mapreduce.Job.class);
org.apache.hadoop.mapreduce.JobID jid = new org.apache.hadoop.mapreduce.JobID("test",0);
when(mockjob.getJobID()).thenReturn(jid);
j.setJob(mockjob);
JobID expected = new JobID("test",0);
assertEquals(expected, j.getAssignedJobID());
verify(mockjob).getJobID();
}
代码示例来源:origin: org.jvnet.hudson.hadoop/hadoop-core
private void addToQueue(Job aJob) {
Map<String, Job> queue = getQueue(aJob.getState());
addToQueue(aJob, queue);
}
代码示例来源:origin: ch.cern.hadoop/hadoop-mapreduce-client-core
/**
* This is no-op method for backward compatibility. It's a behavior change
* from 1.x, we can not change job ids from job.
*
* @param mapredJobID
* the mapred job ID for this job.
*/
@Deprecated
public synchronized void setMapredJobID(String mapredJobID) {
setAssignedJobID(JobID.forName(mapredJobID));
}
}
代码示例来源:origin: stackoverflow.com
public boolean print(Job aJob){
boolean result = false;
if (isAvailable()){
currentJob=aJob;
aJob.setPrinter(this);
aJob.setState(Job.PRINTING);
result = true;
}
System.err.println("Error");
return result;
}
代码示例来源:origin: org.jvnet.hudson.hadoop/hadoop-core
checkRunningState();
for (int i = 0; i < n; i++) {
pred = this.dependingJobs.get(i);
int s = pred.checkState();
if (s == Job.WAITING || s == Job.READY || s == Job.RUNNING) {
break; // a pred is still not completed, continue in WAITING
this.state = Job.DEPENDENT_FAILED;
this.message = "depending job " + i + " with jobID "
+ pred.getJobID() + " failed. " + pred.getMessage();
break;
代码示例来源:origin: com.facebook.hadoop/hadoop-core
/**
* Add a new job.
* @param aJob the new job
*/
synchronized public String addJob(Job aJob) {
String id = this.getNextJobID();
aJob.setJobID(id);
aJob.setState(Job.WAITING);
this.addToQueue(aJob);
return id;
}
代码示例来源:origin: ch.cern.hadoop/hadoop-mapreduce-client-core
public static JobControl createValueAggregatorJobs(String args[]
, Class<? extends ValueAggregatorDescriptor>[] descriptors) throws IOException {
JobControl theControl = new JobControl("ValueAggregatorJobs");
ArrayList<Job> dependingJobs = new ArrayList<Job>();
JobConf aJobConf = createValueAggregatorJob(args);
if(descriptors != null)
setAggregatorDescriptors(aJobConf, descriptors);
Job aJob = new Job(aJobConf, dependingJobs);
theControl.addJob(aJob);
return theControl;
}
代码示例来源:origin: ch.cern.hadoop/hadoop-mapreduce-client-jobclient
@Test(timeout = 30000)
public void testAddingDependingJob() throws Exception {
Job job_1 = getCopyJob();
ArrayList<Job> dependingJobs = new ArrayList<Job>();
JobControl jc = new JobControl("Test");
jc.addJob(job_1);
Assert.assertEquals(Job.WAITING, job_1.getState());
Assert.assertTrue(job_1.addDependingJob(new Job(job_1.getJobConf(),
dependingJobs)));
}
代码示例来源:origin: org.apache.hadoop/hadoop-mapred-test
inPaths_1.add(indir);
JobConf jobConf_1 = JobControlTestUtils.createCopyJob(inPaths_1, outdir_1);
Job job_1 = new Job(jobConf_1, dependingJobs);
ArrayList<Path> inPaths_2 = new ArrayList<Path>();
inPaths_2.add(indir);
JobConf jobConf_2 = JobControlTestUtils.createCopyJob(inPaths_2, outdir_2);
Job job_2 = new Job(jobConf_2, dependingJobs);
dependingJobs.add(job_1);
dependingJobs.add(job_2);
Job job_3 = new Job(jobConf_3, dependingJobs);
dependingJobs = new ArrayList<Job>();
dependingJobs.add(job_3);
Job job_4 = new Job(jobConf_4, dependingJobs);
if (job_1.getState() != Job.FAILED &&
job_1.getState() != Job.DEPENDENT_FAILED &&
job_1.getState() != Job.SUCCESS) {
String states = "job_1: " + job_1.getState() + "\n";
throw new Exception("The state of job_1 is not in a complete state\n" + states);
if (job_2.getState() != Job.FAILED &&
job_2.getState() != Job.DEPENDENT_FAILED &&
job_2.getState() != Job.SUCCESS) {
String states = "job_2: " + job_2.getState() + "\n";
throw new Exception("The state of job_2 is not in a complete state\n" + states);
代码示例来源:origin: ch.cern.hadoop/hadoop-mapreduce-client-jobclient
@SuppressWarnings("deprecation")
@Test(timeout = 30000)
public void testJobState() throws Exception {
Job job_1 = getCopyJob();
JobControl jc = new JobControl("Test");
jc.addJob(job_1);
Assert.assertEquals(Job.WAITING, job_1.getState());
job_1.setState(Job.SUCCESS);
Assert.assertEquals(Job.WAITING, job_1.getState());
org.apache.hadoop.mapreduce.Job mockjob =
mock(org.apache.hadoop.mapreduce.Job.class);
org.apache.hadoop.mapreduce.JobID jid =
new org.apache.hadoop.mapreduce.JobID("test", 0);
when(mockjob.getJobID()).thenReturn(jid);
job_1.setJob(mockjob);
Assert.assertEquals("job_test_0000", job_1.getMapredJobID());
job_1.setMapredJobID("job_test_0001");
Assert.assertEquals("job_test_0000", job_1.getMapredJobID());
jc.stop();
}
代码示例来源:origin: org.apache.pig/pig
private void getStats(Job job, boolean errNotDbg,
PigContext pigContext) throws ExecException {
JobID MRJobID = job.getAssignedJobID();
String jobMessage = job.getMessage();
Exception backendException = null;
if (MRJobID == null) {
if (job.getState() == Job.SUCCESS) {
代码示例来源:origin: org.apache.pig/pig
public static Counters getCounters(Job job) throws IOException {
try {
Cluster cluster = new Cluster(job.getJobConf());
org.apache.hadoop.mapreduce.Job mrJob = cluster.getJob(job.getAssignedJobID());
if (mrJob == null) { // In local mode, mrJob will be null
mrJob = job.getJob();
}
return new Counters(mrJob.getCounters());
} catch (Exception ir) {
throw new IOException(ir);
}
}
代码示例来源:origin: org.apache.pig/pig
MRJobStats addMRJobStats(Job job) {
MapReduceOper mro = jobMroMap.get(job);
if (mro == null) {
LOG.warn("unable to get MR oper for job: " + job.toString());
return null;
}
MRJobStats js = mroJobMap.get(mro);
JobID jobId = job.getAssignedJobID();
js.setId(jobId);
js.setAlias(mro);
js.setConf(job.getJobConf());
return js;
}
代码示例来源:origin: org.apache.pig/pig
JobConf jobConf = jobsWithoutIds.get(0).getJobConf();
try {
String port = jobConf.get(MRConfiguration.JOB_TRACKER_HTTP_ADDRESS);
JobConf jobConfCopy = job.getJobConf();
jobConfCopy.set("pig.script.submitted.timestamp",
Long.toString(scriptSubmittedTimestamp));
jobConfCopy.set("pig.job.submitted.timestamp",
Long.toString(System.currentTimeMillis()));
job.setJobConf(jobConfCopy);
if (job.getAssignedJobID() != null){
log.info("HadoopJobId: "+job.getAssignedJobID());
job.getAssignedJobID().toString());
Job j = (Job) object;
if (j != null) {
msg.append(j.getAssignedJobID()).append(",");
log.info("job " + job.getAssignedJobID() + " has failed! Stop running all dependent jobs");
代码示例来源:origin: org.apache.pig/pig
@Override
public void kill() {
try {
if (jc != null && jc.getRunningJobs().size() > 0) {
log.info("Received kill signal");
for (Job job : jc.getRunningJobs()) {
org.apache.hadoop.mapreduce.Job mrJob = job.getJob();
try {
if (mrJob != null) {
mrJob.killJob();
}
} catch (Exception ir) {
throw new IOException(ir);
}
log.info("Job " + job.getAssignedJobID() + " killed");
String timeStamp = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss")
.format(Calendar.getInstance().getTime());
System.err.println(timeStamp + " Job " + job.getAssignedJobID() + " killed");
}
}
} catch (Exception e) {
log.warn("Encounter exception on cleanup:" + e);
}
}
代码示例来源:origin: org.apache.pig/pig
@Private
public static void setBackendException(Job job, Exception e) {
JobID jobId = job.getAssignedJobID();
if (jobId == null) {
return;
}
PigStats.get().setBackendException(jobId.toString(), e);
}
代码示例来源:origin: org.apache.pig/pig
/**
* Updates the statistics after a patch of jobs is done
*
* @param jc the job control
*/
public static void accumulateStats(JobControl jc) {
SimplePigStats ps = (SimplePigStats)PigStats.get();
MRScriptState ss = MRScriptState.get();
for (Job job : jc.getSuccessfulJobs()) {
MRJobStats js = addSuccessJobStats(ps, job);
if (js != null) {
ss.emitjobFinishedNotification(js);
}
}
for (Job job : jc.getFailedJobs()) {
MRJobStats js = addFailedJobStats(ps, job);
if (js != null) {
js.setErrorMsg(job.getMessage());
ss.emitJobFailedNotification(js);
}
}
}
代码示例来源:origin: io.hops/hadoop-mapreduce-client-core
public static JobControl createValueAggregatorJobs(String args[]
, Class<? extends ValueAggregatorDescriptor>[] descriptors) throws IOException {
JobControl theControl = new JobControl("ValueAggregatorJobs");
ArrayList<Job> dependingJobs = new ArrayList<Job>();
JobConf aJobConf = createValueAggregatorJob(args);
if(descriptors != null)
setAggregatorDescriptors(aJobConf, descriptors);
Job aJob = new Job(aJobConf, dependingJobs);
theControl.addJob(aJob);
return theControl;
}
代码示例来源:origin: ch.cern.hadoop/hadoop-mapreduce-client-jobclient
inPaths_1.add(indir);
JobConf jobConf_1 = JobControlTestUtils.createCopyJob(inPaths_1, outdir_1);
Job job_1 = new Job(jobConf_1, dependingJobs);
ArrayList<Path> inPaths_2 = new ArrayList<Path>();
inPaths_2.add(indir);
JobConf jobConf_2 = JobControlTestUtils.createCopyJob(inPaths_2, outdir_2);
Job job_2 = new Job(jobConf_2, dependingJobs);
dependingJobs.add(job_1);
dependingJobs.add(job_2);
Job job_3 = new Job(jobConf_3, dependingJobs);
dependingJobs = new ArrayList<Job>();
dependingJobs.add(job_3);
Job job_4 = new Job(jobConf_4, dependingJobs);
if (job_1.getState() != Job.FAILED &&
job_1.getState() != Job.DEPENDENT_FAILED &&
job_1.getState() != Job.SUCCESS) {
String states = "job_1: " + job_1.getState() + "\n";
throw new Exception("The state of job_1 is not in a complete state\n" + states);
if (job_2.getState() != Job.FAILED &&
job_2.getState() != Job.DEPENDENT_FAILED &&
job_2.getState() != Job.SUCCESS) {
String states = "job_2: " + job_2.getState() + "\n";
throw new Exception("The state of job_2 is not in a complete state\n" + states);
代码示例来源:origin: com.facebook.hadoop/hadoop-core
checkRunningState();
for (int i = 0; i < n; i++) {
pred = this.dependingJobs.get(i);
int s = pred.checkState();
if (s == Job.WAITING || s == Job.READY || s == Job.RUNNING) {
break; // a pred is still not completed, continue in WAITING
this.state = Job.DEPENDENT_FAILED;
this.message = "depending job " + i + " with jobID "
+ pred.getJobID() + " failed. " + pred.getMessage();
break;
内容来源于网络,如有侵权,请联系作者删除!