本文整理了Java中org.apache.tez.dag.api.Vertex.setTaskEnvironment()
方法的一些代码示例,展示了Vertex.setTaskEnvironment()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Vertex.setTaskEnvironment()
方法的具体详情如下:
包路径:org.apache.tez.dag.api.Vertex
类名称:Vertex
方法名:setTaskEnvironment
[英]Set the Key-Value pairs of environment variables for tasks of this vertex. This method should be used if different vertices need different env. Else, set environment for all vertices via Tezconfiguration#TEZ_TASK_LAUNCH_ENV
[中]为该顶点的任务设置环境变量的键值对。如果不同的顶点需要不同的环境,则应使用此方法。否则,通过TEZ配置#TEZ_任务_启动_环境为所有顶点设置环境
代码示例来源:origin: apache/drill
private Vertex createVertex(JobConf conf, ReduceWork reduceWork,
LocalResource appJarLr, List<LocalResource> additionalLr, FileSystem fs,
Path mrScratchDir, Context ctx) throws Exception {
// set up operator plan
conf.set(Utilities.INPUT_NAME, reduceWork.getName());
Utilities.setReduceWork(conf, reduceWork, mrScratchDir, false);
// create the directories FileSinkOperators need
Utilities.createTmpDirs(conf, reduceWork);
VertexExecutionContext vertexExecutionContext = createVertexExecutionContext(reduceWork);
// create the vertex
Vertex reducer = Vertex.create(reduceWork.getName(),
ProcessorDescriptor.create(ReduceTezProcessor.class.getName()).
setUserPayload(TezUtils.createUserPayloadFromConf(conf)),
reduceWork.isAutoReduceParallelism() ?
reduceWork.getMaxReduceTasks() :
reduceWork.getNumReduceTasks(), getContainerResource(conf));
reducer.setTaskEnvironment(getContainerEnvironment(conf, false));
reducer.setExecutionContext(vertexExecutionContext);
reducer.setTaskLaunchCmdOpts(getContainerJavaOpts(conf));
Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
localResources.put(getBaseName(appJarLr), appJarLr);
for (LocalResource lr: additionalLr) {
localResources.put(getBaseName(lr), lr);
}
reducer.addTaskLocalFiles(localResources);
return reducer;
}
代码示例来源:origin: apache/drill
.setUserPayload(serializedConf), numTasks, getContainerResource(conf));
map.setTaskEnvironment(getContainerEnvironment(conf, true));
map.setExecutionContext(executionContext);
map.setTaskLaunchCmdOpts(getContainerJavaOpts(conf));
代码示例来源:origin: apache/hive
private Vertex createVertex(JobConf conf, ReduceWork reduceWork, FileSystem fs,
Path mrScratchDir, Context ctx, Map<String, LocalResource> localResources)
throws Exception {
// set up operator plan
conf.set(Utilities.INPUT_NAME, reduceWork.getName());
Utilities.setReduceWork(conf, reduceWork, mrScratchDir, false);
// create the directories FileSinkOperators need
Utilities.createTmpDirs(conf, reduceWork);
VertexExecutionContext vertexExecutionContext = createVertexExecutionContext(reduceWork);
// create the vertex
Vertex reducer = Vertex.create(reduceWork.getName(),
ProcessorDescriptor.create(ReduceTezProcessor.class.getName()).
setUserPayload(TezUtils.createUserPayloadFromConf(conf)),
reduceWork.isAutoReduceParallelism() ?
reduceWork.getMaxReduceTasks() :
reduceWork.getNumReduceTasks(), getContainerResource(conf));
reducer.setTaskEnvironment(getContainerEnvironment(conf, false));
reducer.setExecutionContext(vertexExecutionContext);
reducer.setTaskLaunchCmdOpts(getContainerJavaOpts(conf));
reducer.addTaskLocalFiles(localResources);
return reducer;
}
代码示例来源:origin: apache/hive
.setUserPayload(serializedConf), numTasks, getContainerResource(conf));
map.setTaskEnvironment(getContainerEnvironment(conf, true));
map.setExecutionContext(executionContext);
map.setTaskLaunchCmdOpts(getContainerJavaOpts(conf));
代码示例来源:origin: cwensel/cascading
private Vertex newVertex( FlowNode flowNode, Configuration conf, int parallelism )
{
conf.set( FlowNode.CASCADING_FLOW_NODE, pack( flowNode, conf ) ); // todo: pack into payload directly
ProcessorDescriptor descriptor = ProcessorDescriptor.create( FlowProcessor.class.getName() );
descriptor.setUserPayload( getPayload( conf ) );
Vertex vertex = Vertex.create( flowNode.getID(), descriptor, parallelism );
if( environment != null )
vertex.setTaskEnvironment( environment );
return vertex;
}
代码示例来源:origin: cascading/cascading-hadoop2-tez
private Vertex newVertex( FlowNode flowNode, Configuration conf, int parallelism )
{
conf.set( FlowNode.CASCADING_FLOW_NODE, pack( flowNode, conf ) ); // todo: pack into payload directly
ProcessorDescriptor descriptor = ProcessorDescriptor.create( FlowProcessor.class.getName() );
descriptor.setUserPayload( getPayload( conf ) );
Vertex vertex = Vertex.create( flowNode.getID(), descriptor, parallelism );
if( environment != null )
vertex.setTaskEnvironment( environment );
return vertex;
}
代码示例来源:origin: org.apache.tez/tez-tests
mapVertex.addTaskLocalFiles(commonLocalResources)
.addDataSource("MRInput", dataSource)
.setTaskLaunchCmdOpts(MRHelpers.getJavaOptsForMRMapper(mapStageConf)).setTaskEnvironment(mapEnv);
vertices.add(mapVertex);
ivertex.addTaskLocalFiles(commonLocalResources)
.setTaskLaunchCmdOpts(MRHelpers.getJavaOptsForMRReducer(
intermediateReduceStageConfs[i])).setTaskEnvironment(reduceEnv);
vertices.add(ivertex);
finalReduceConf, NullOutputFormat.class).build())
.setTaskLaunchCmdOpts(MRHelpers.getJavaOptsForMRReducer(finalReduceConf))
.setTaskEnvironment(reduceEnv);
vertices.add(finalReduceVertex);
} else {
代码示例来源:origin: com.facebook.presto.hive/hive-apache
private Vertex createVertex(JobConf conf, ReduceWork reduceWork,
LocalResource appJarLr, List<LocalResource> additionalLr, FileSystem fs,
Path mrScratchDir, Context ctx) throws Exception {
// set up operator plan
conf.set(Utilities.INPUT_NAME, reduceWork.getName());
Utilities.setReduceWork(conf, reduceWork, mrScratchDir, false);
// create the directories FileSinkOperators need
Utilities.createTmpDirs(conf, reduceWork);
// create the vertex
Vertex reducer = Vertex.create(reduceWork.getName(),
ProcessorDescriptor.create(ReduceTezProcessor.class.getName()).
setUserPayload(TezUtils.createUserPayloadFromConf(conf)),
reduceWork.isAutoReduceParallelism() ? reduceWork.getMaxReduceTasks() : reduceWork
.getNumReduceTasks(), getContainerResource(conf));
reducer.setTaskEnvironment(getContainerEnvironment(conf, false));
reducer.setTaskLaunchCmdOpts(getContainerJavaOpts(conf));
Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
localResources.put(getBaseName(appJarLr), appJarLr);
for (LocalResource lr: additionalLr) {
localResources.put(getBaseName(lr), lr);
}
reducer.addTaskLocalFiles(localResources);
return reducer;
}
代码示例来源:origin: org.apache.tez/tez-tests
MRHelpers.getResourceForMRMapper(mapStageConf));
mapVertex.setTaskLaunchCmdOpts(MRHelpers.getJavaOptsForMRMapper(mapStageConf));
mapVertex.setTaskEnvironment(mapEnv);
intermediateNumReduceTasks, MRHelpers.getResourceForMRReducer(iReduceStageConf));
intermediateVertex.setTaskLaunchCmdOpts(MRHelpers.getJavaOptsForMRReducer(iReduceStageConf));
intermediateVertex.setTaskEnvironment(reduceEnv);
MRHelpers.getResourceForMRReducer(finalReduceConf));
finalReduceVertex.setTaskLaunchCmdOpts(MRHelpers.getJavaOptsForMRReducer(finalReduceConf));
finalReduceVertex.setTaskEnvironment(reduceEnv);
代码示例来源:origin: org.apache.tez/tez-mapreduce
: MRHelpers.getJavaOptsForMRReducer(stageConf);
vertex.setTaskEnvironment(taskEnv)
.addTaskLocalFiles(taskLocalResources)
.setLocationHint(VertexLocationHint.create(locations))
代码示例来源:origin: com.facebook.presto.hive/hive-apache
.setUserPayload(serializedConf), numTasks, getContainerResource(conf));
map.setTaskEnvironment(getContainerEnvironment(conf, true));
map.setTaskLaunchCmdOpts(getContainerJavaOpts(conf));
代码示例来源:origin: org.apache.pig/pig
vertex.setTaskEnvironment(mapTaskEnv);
} else {
vertex.setTaskLaunchCmdOpts(reduceTaskLaunchCmdOpts);
vertex.setTaskEnvironment(reduceTaskEnv);
内容来源于网络,如有侵权,请联系作者删除!