本文整理了Java中org.apache.tez.dag.api.Vertex.setTaskLaunchCmdOpts()
方法的一些代码示例,展示了Vertex.setTaskLaunchCmdOpts()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Vertex.setTaskLaunchCmdOpts()
方法的具体详情如下:
包路径:org.apache.tez.dag.api.Vertex
类名称:Vertex
方法名:setTaskLaunchCmdOpts
[英]Set the command opts for tasks of this vertex. This method should be used when different vertices have different opts. Else, set the launch opts for ' all vertices via Tezconfiguration#TEZ_TASK_LAUNCH_CMD_OPTS
[中]为该顶点的任务设置命令opts。当不同的顶点具有不同的选项时,应使用此方法。否则,为“所有顶点通过TEZ配置”#TEZ_任务_launch_CMD_选项”设置启动选项
代码示例来源:origin: apache/drill
private Vertex createVertex(JobConf conf, ReduceWork reduceWork,
LocalResource appJarLr, List<LocalResource> additionalLr, FileSystem fs,
Path mrScratchDir, Context ctx) throws Exception {
// set up operator plan
conf.set(Utilities.INPUT_NAME, reduceWork.getName());
Utilities.setReduceWork(conf, reduceWork, mrScratchDir, false);
// create the directories FileSinkOperators need
Utilities.createTmpDirs(conf, reduceWork);
VertexExecutionContext vertexExecutionContext = createVertexExecutionContext(reduceWork);
// create the vertex
Vertex reducer = Vertex.create(reduceWork.getName(),
ProcessorDescriptor.create(ReduceTezProcessor.class.getName()).
setUserPayload(TezUtils.createUserPayloadFromConf(conf)),
reduceWork.isAutoReduceParallelism() ?
reduceWork.getMaxReduceTasks() :
reduceWork.getNumReduceTasks(), getContainerResource(conf));
reducer.setTaskEnvironment(getContainerEnvironment(conf, false));
reducer.setExecutionContext(vertexExecutionContext);
reducer.setTaskLaunchCmdOpts(getContainerJavaOpts(conf));
Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
localResources.put(getBaseName(appJarLr), appJarLr);
for (LocalResource lr: additionalLr) {
localResources.put(getBaseName(lr), lr);
}
reducer.addTaskLocalFiles(localResources);
return reducer;
}
代码示例来源:origin: apache/drill
map.setTaskLaunchCmdOpts(getContainerJavaOpts(conf));
代码示例来源:origin: apache/hive
private Vertex createVertex(JobConf conf, ReduceWork reduceWork, FileSystem fs,
Path mrScratchDir, Context ctx, Map<String, LocalResource> localResources)
throws Exception {
// set up operator plan
conf.set(Utilities.INPUT_NAME, reduceWork.getName());
Utilities.setReduceWork(conf, reduceWork, mrScratchDir, false);
// create the directories FileSinkOperators need
Utilities.createTmpDirs(conf, reduceWork);
VertexExecutionContext vertexExecutionContext = createVertexExecutionContext(reduceWork);
// create the vertex
Vertex reducer = Vertex.create(reduceWork.getName(),
ProcessorDescriptor.create(ReduceTezProcessor.class.getName()).
setUserPayload(TezUtils.createUserPayloadFromConf(conf)),
reduceWork.isAutoReduceParallelism() ?
reduceWork.getMaxReduceTasks() :
reduceWork.getNumReduceTasks(), getContainerResource(conf));
reducer.setTaskEnvironment(getContainerEnvironment(conf, false));
reducer.setExecutionContext(vertexExecutionContext);
reducer.setTaskLaunchCmdOpts(getContainerJavaOpts(conf));
reducer.addTaskLocalFiles(localResources);
return reducer;
}
代码示例来源:origin: apache/hive
map.setTaskLaunchCmdOpts(getContainerJavaOpts(conf));
代码示例来源:origin: cascading/cascading-hadoop2-tez
private void addRemoteDebug( FlowNode flowNode, Vertex vertex )
{
String value = System.getProperty( "test.debug.node", null );
if( Util.isEmpty( value ) )
return;
if( !flowNode.getSourceElementNames().contains( value ) && asInt( value ) != flowNode.getOrdinal() )
return;
LOG.warn( "remote debugging enabled with property: {}, on node: {}, with node id: {}", "test.debug.node", value, flowNode.getID() );
String opts = vertex.getTaskLaunchCmdOpts();
if( opts == null )
opts = "";
String address = System.getProperty( "test.debug.address", "localhost:5005" ).trim();
opts += " -agentlib:jdwp=transport=dt_socket,server=n,address=" + address + ",suspend=y";
vertex.setTaskLaunchCmdOpts( opts );
}
代码示例来源:origin: cwensel/cascading
private void addRemoteDebug( FlowNode flowNode, Vertex vertex )
{
String value = System.getProperty( "test.debug.node", null );
if( Util.isEmpty( value ) )
return;
if( !flowNode.getSourceElementNames().contains( value ) && asInt( value ) != flowNode.getOrdinal() )
return;
LOG.warn( "remote debugging enabled with property: {}, on node: {}, with node id: {}", "test.debug.node", value, flowNode.getID() );
String opts = vertex.getTaskLaunchCmdOpts();
if( opts == null )
opts = "";
String address = System.getProperty( "test.debug.address", "localhost:5005" ).trim();
opts += " -agentlib:jdwp=transport=dt_socket,server=n,address=" + address + ",suspend=y";
vertex.setTaskLaunchCmdOpts( opts );
}
代码示例来源:origin: cwensel/cascading
private void addRemoteProfiling( FlowNode flowNode, Vertex vertex )
{
String value = System.getProperty( "test.profile.node", null );
if( Util.isEmpty( value ) )
return;
if( !flowNode.getSourceElementNames().contains( value ) && asInt( value ) != flowNode.getOrdinal() )
return;
LOG.warn( "remote profiling enabled with property: {}, on node: {}, with node id: {}", "test.profile.node", value, flowNode.getID() );
String opts = vertex.getTaskLaunchCmdOpts();
if( opts == null )
opts = "";
String path = System.getProperty( "test.profile.path", "/tmp/jfr/" );
if( !path.endsWith( "/" ) )
path += "/";
LOG.warn( "remote profiling property: {}, logging to path: {}", "test.profile.path", path );
opts += String.format( " -XX:+UnlockCommercialFeatures -XX:+FlightRecorder -XX:FlightRecorderOptions=defaultrecording=true,dumponexit=true,dumponexitpath=%1$s%2$s,disk=true,repository=%1$s%2$s", path, flowNode.getID() );
vertex.setTaskLaunchCmdOpts( opts );
}
代码示例来源:origin: cascading/cascading-hadoop2-tez
private void addRemoteProfiling( FlowNode flowNode, Vertex vertex )
{
String value = System.getProperty( "test.profile.node", null );
if( Util.isEmpty( value ) )
return;
if( !flowNode.getSourceElementNames().contains( value ) && asInt( value ) != flowNode.getOrdinal() )
return;
LOG.warn( "remote profiling enabled with property: {}, on node: {}, with node id: {}", "test.profile.node", value, flowNode.getID() );
String opts = vertex.getTaskLaunchCmdOpts();
if( opts == null )
opts = "";
String path = System.getProperty( "test.profile.path", "/tmp/jfr/" );
if( !path.endsWith( "/" ) )
path += "/";
LOG.warn( "remote profiling property: {}, logging to path: {}", "test.profile.path", path );
opts += String.format( " -XX:+UnlockCommercialFeatures -XX:+FlightRecorder -XX:FlightRecorderOptions=defaultrecording=true,dumponexit=true,dumponexitpath=%1$s%2$s,disk=true,repository=%1$s%2$s", path, flowNode.getID() );
vertex.setTaskLaunchCmdOpts( opts );
}
代码示例来源:origin: org.apache.tez/tez-tests
mapVertex.addTaskLocalFiles(commonLocalResources)
.addDataSource("MRInput", dataSource)
.setTaskLaunchCmdOpts(MRHelpers.getJavaOptsForMRMapper(mapStageConf)).setTaskEnvironment(mapEnv);
vertices.add(mapVertex);
MRHelpers.getResourceForMRReducer(intermediateReduceStageConfs[i]));
ivertex.addTaskLocalFiles(commonLocalResources)
.setTaskLaunchCmdOpts(MRHelpers.getJavaOptsForMRReducer(
intermediateReduceStageConfs[i])).setTaskEnvironment(reduceEnv);
vertices.add(ivertex);
.addDataSink("MROutput", MROutputLegacy.createConfigBuilder(
finalReduceConf, NullOutputFormat.class).build())
.setTaskLaunchCmdOpts(MRHelpers.getJavaOptsForMRReducer(finalReduceConf))
.setTaskEnvironment(reduceEnv);
vertices.add(finalReduceVertex);
代码示例来源:origin: com.facebook.presto.hive/hive-apache
private Vertex createVertex(JobConf conf, ReduceWork reduceWork,
LocalResource appJarLr, List<LocalResource> additionalLr, FileSystem fs,
Path mrScratchDir, Context ctx) throws Exception {
// set up operator plan
conf.set(Utilities.INPUT_NAME, reduceWork.getName());
Utilities.setReduceWork(conf, reduceWork, mrScratchDir, false);
// create the directories FileSinkOperators need
Utilities.createTmpDirs(conf, reduceWork);
// create the vertex
Vertex reducer = Vertex.create(reduceWork.getName(),
ProcessorDescriptor.create(ReduceTezProcessor.class.getName()).
setUserPayload(TezUtils.createUserPayloadFromConf(conf)),
reduceWork.isAutoReduceParallelism() ? reduceWork.getMaxReduceTasks() : reduceWork
.getNumReduceTasks(), getContainerResource(conf));
reducer.setTaskEnvironment(getContainerEnvironment(conf, false));
reducer.setTaskLaunchCmdOpts(getContainerJavaOpts(conf));
Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
localResources.put(getBaseName(appJarLr), appJarLr);
for (LocalResource lr: additionalLr) {
localResources.put(getBaseName(lr), lr);
}
reducer.addTaskLocalFiles(localResources);
return reducer;
}
代码示例来源:origin: org.apache.tez/tez-tests
mapVertex = Vertex.create("initialmap", mapProcessorDescriptor, -1,
MRHelpers.getResourceForMRMapper(mapStageConf));
mapVertex.setTaskLaunchCmdOpts(MRHelpers.getJavaOptsForMRMapper(mapStageConf));
mapVertex.setTaskEnvironment(mapEnv);
intermediateVertex = Vertex.create("intermediate_reducer", iReduceProcessorDescriptor,
intermediateNumReduceTasks, MRHelpers.getResourceForMRReducer(iReduceStageConf));
intermediateVertex.setTaskLaunchCmdOpts(MRHelpers.getJavaOptsForMRReducer(iReduceStageConf));
intermediateVertex.setTaskEnvironment(reduceEnv);
finalReduceVertex = Vertex.create("finalreduce", finalReduceProcessorDescriptor, 1,
MRHelpers.getResourceForMRReducer(finalReduceConf));
finalReduceVertex.setTaskLaunchCmdOpts(MRHelpers.getJavaOptsForMRReducer(finalReduceConf));
finalReduceVertex.setTaskEnvironment(reduceEnv);
代码示例来源:origin: com.facebook.presto.hive/hive-apache
map.setTaskLaunchCmdOpts(getContainerJavaOpts(conf));
代码示例来源:origin: org.apache.tez/tez-mapreduce
.addTaskLocalFiles(taskLocalResources)
.setLocationHint(VertexLocationHint.create(locations))
.setTaskLaunchCmdOpts(taskJavaOpts);
代码示例来源:origin: org.apache.pig/pig
vertex.setTaskLaunchCmdOpts(mapTaskLaunchCmdOpts);
vertex.setTaskEnvironment(mapTaskEnv);
} else {
vertex.setTaskLaunchCmdOpts(reduceTaskLaunchCmdOpts);
vertex.setTaskEnvironment(reduceTaskEnv);
内容来源于网络,如有侵权,请联系作者删除!