org.apache.hadoop.yarn.util.Records类的使用及代码示例

x33g5p2x  于2022-01-28 转载在 其他  
字(11.8k)|赞(0)|评价(0)|浏览(155)

本文整理了Java中org.apache.hadoop.yarn.util.Records类的一些代码示例,展示了Records类的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Records类的具体详情如下:
包路径:org.apache.hadoop.yarn.util.Records
类名称:Records

Records介绍

[英]Convenient API record utils
[中]方便的API记录utils

代码示例

代码示例来源:origin: Qihoo360/XLearning

if (!pathRemote.getFileSystem(conf).exists(pathRemote)) {
   throw new IOException("cacheFile path " + pathRemote + " not existed!");
   pathRemote = new Path(path);
  if (!pathRemote.getFileSystem(conf).exists(pathRemote)) {
   throw new IOException("cacheArchive path " + pathRemote + " not existed!");
appMasterEnv.put(XLearningConstants.Environment.XLEARNING_CONTAINER_MAX_MEMORY.toString(), String.valueOf(newAppResponse.getMaximumResourceCapability().getMemory()));
appMasterLaunchcommands.add(command.toString());
Resource capability = Records.newRecord(Resource.class);
capability.setMemory(conf.getInt(XLearningConfiguration.XLEARNING_AM_MEMORY, XLearningConfiguration.DEFAULT_XLEARNING_AM_MEMORY));
capability.setVirtualCores(conf.getInt(XLearningConfiguration.XLEARNING_AM_CORES, XLearningConfiguration.DEFAULT_XLEARNING_AM_CORES));
applicationContext.setResource(capability);
ContainerLaunchContext amContainer = ContainerLaunchContext.newInstance(
  localResources, appMasterEnv, appMasterLaunchcommands, null, null, null);
Priority priority = Records.newRecord(Priority.class);
priority.setPriority(conf.getInt(XLearningConfiguration.XLEARNING_APP_PRIORITY, XLearningConfiguration.DEFAULT_XLEARNING_APP_PRIORITY));
applicationContext.setPriority(priority);
applicationContext.setQueue(conf.get(XLearningConfiguration.XLEARNING_APP_QUEUE, XLearningConfiguration.DEFAULT_XLEARNING_APP_QUEUE));

代码示例来源:origin: Qihoo360/XLearning

String line;
 try {
  if (hostLocalPath.getFileSystem(xlConf).exists(hostLocalPath)) {
   FSDataInputStream in = hostLocalPath.getFileSystem(xlConf).open(hostLocalPath);
   BufferedReader br = new BufferedReader(new InputStreamReader(in));
   line = br.readLine();
Priority priority = Records.newRecord(Priority.class);
priority.setPriority(appPriority);
Resource workerCapability = Records.newRecord(Resource.class);
workerCapability.setMemory(workerMemory);
workerCapability.setVirtualCores(workerVCores);
workerContainerRequest = new ContainerRequest(workerCapability, hostLocals, null, priority);
LOG.info("Create worker container request: " + workerContainerRequest.toString());
 Resource psCapability = Records.newRecord(Resource.class);
 psCapability.setMemory(psMemory);
 psCapability.setVirtualCores(psVCores);
 psContainerRequest = new ContainerRequest(psCapability, hostLocals, null, priority);

代码示例来源:origin: apache/flink

final FileSystem fs = FileSystem.get(yarnConfiguration);
final Path homeDir = fs.getHomeDirectory();
    fs.getScheme().startsWith("file")) {
  LOG.warn("The file system scheme is '" + fs.getScheme() + "'. This indicates that the "
      + "specified Hadoop configuration path is wrong and the system is using the default Hadoop configuration values."
amContainer.setLocalResources(localResources);
fs.close();
amContainer.setEnvironment(appMasterEnv);
Resource capability = Records.newRecord(Resource.class);
capability.setMemory(clusterSpecification.getMasterMemoryMB());
capability.setVirtualCores(1);

代码示例来源:origin: apache/ignite

FileSystem fs = FileSystem.get(conf);
ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);
amContainer.setCommands(
  Collections.singletonList(
    Environment.JAVA_HOME.$() + "/bin/java -Xmx512m " + ApplicationMaster.class.getName()
amContainer.setLocalResources(Collections.singletonMap(IgniteYarnUtils.JAR_NAME, appMasterJar));
amContainer.setEnvironment(appMasterEnv);
  final Token<?> tokens[] = fs.addDelegationTokens(tokRenewer, creds);
Resource capability = Records.newRecord(Resource.class);
capability.setMemory(512);
capability.setVirtualCores(1);

代码示例来源:origin: Qihoo360/XLearning

public static LocalResource createApplicationResource(FileSystem fs, Path path, LocalResourceType type)
  throws IOException {
 LocalResource localResource = Records.newRecord(LocalResource.class);
 FileStatus fileStatus = fs.getFileStatus(path);
 localResource.setResource(ConverterUtils.getYarnUrlFromPath(path));
 localResource.setSize(fileStatus.getLen());
 localResource.setTimestamp(fileStatus.getModificationTime());
 localResource.setType(type);
 localResource.setVisibility(LocalResourceVisibility.APPLICATION);
 return localResource;
}

代码示例来源:origin: apache/ignite

/**
 * @param file Path.
 * @param fs File system.
 * @param type Local resource type.
 * @throws Exception If failed.
 */
public static LocalResource setupFile(Path file, FileSystem fs, LocalResourceType type)
  throws Exception {
  LocalResource resource = Records.newRecord(LocalResource.class);
  file = fs.makeQualified(file);
  FileStatus stat = fs.getFileStatus(file);
  resource.setResource(ConverterUtils.getYarnUrlFromPath(file));
  resource.setSize(stat.getLen());
  resource.setTimestamp(stat.getModificationTime());
  resource.setType(type);
  resource.setVisibility(LocalResourceVisibility.APPLICATION);
  return resource;
}

代码示例来源:origin: apache/flink

private static LocalResource registerLocalResource(FileSystem fs, Path remoteRsrcPath) throws IOException {
  LocalResource localResource = Records.newRecord(LocalResource.class);
  FileStatus jarStat = fs.getFileStatus(remoteRsrcPath);
  localResource.setResource(ConverterUtils.getYarnUrlFromURI(remoteRsrcPath.toUri()));
  localResource.setSize(jarStat.getLen());
  localResource.setTimestamp(jarStat.getModificationTime());
  localResource.setType(LocalResourceType.FILE);
  localResource.setVisibility(LocalResourceVisibility.APPLICATION);
  return localResource;
}

代码示例来源:origin: Qihoo360/XLearning

@Override
public CancelDelegationTokenResponse cancelDelegationToken(
  CancelDelegationTokenRequest request) throws IOException {
 if (!isAllowedDelegationTokenOp()) {
  throw new IOException(
    "Delegation Token can be cancelled only with kerberos authentication");
 }
 org.apache.hadoop.yarn.api.records.Token protoToken = request.getDelegationToken();
 Token<MRDelegationTokenIdentifier> token =
   new Token<MRDelegationTokenIdentifier>(
     protoToken.getIdentifier().array(), protoToken.getPassword()
     .array(), new Text(protoToken.getKind()), new Text(
     protoToken.getService()));
 String user = UserGroupInformation.getCurrentUser().getUserName();
 jhsDTSecretManager.cancelToken(token, user);
 return Records.newRecord(CancelDelegationTokenResponse.class);
}

代码示例来源:origin: apache/incubator-gobblin

private void requestContainer(Optional<String> preferredNode) {
 Priority priority = Records.newRecord(Priority.class);
 priority.setPriority(0);
 Resource capability = Records.newRecord(Resource.class);
 int maxMemoryCapacity = this.maxResourceCapacity.get().getMemory();
 capability.setMemory(this.requestedContainerMemoryMbs <= maxMemoryCapacity ?
   this.requestedContainerMemoryMbs : maxMemoryCapacity);
 int maxCoreCapacity = this.maxResourceCapacity.get().getVirtualCores();
 capability.setVirtualCores(this.requestedContainerCores <= maxCoreCapacity ?
   this.requestedContainerCores : maxCoreCapacity);
 String[] preferredNodes = preferredNode.isPresent() ? new String[] {preferredNode.get()} : null;
 this.amrmClientAsync.addContainerRequest(
   new AMRMClient.ContainerRequest(capability, preferredNodes, null, priority));
}

代码示例来源:origin: apache/ignite

if (checkContainer(c)) {
  try {
    ContainerLaunchContext ctx = Records.newRecord(ContainerLaunchContext.class);
      ctx.setTokens(allTokens.duplicate());
    Map<String, String> env = new HashMap<>(ctx.getEnvironment());
      env.put("JVM_OPTS", props.jvmOpts());
    ctx.setEnvironment(env);
        c.getId(),
        c.getNodeId(),
        c.getResource().getVirtualCores(),
        c.getResource().getMemory()));

代码示例来源:origin: apache/drill

/**
 * Given this generic description of an application, create the detailed YARN
 * application submission context required to launch the application.
 *
 * @param conf
 *          the YARN configuration obtained by reading the Hadoop
 *          configuration files
 * @return the completed application launch context for the given application
 * @throws IOException
 *           if localized resources are not found in the distributed file
 *           system (such as HDFS)
 */
public ContainerLaunchContext createLaunchContext(YarnConfiguration conf)
  throws IOException {
 // Set up the container launch context
 ContainerLaunchContext container = Records
   .newRecord(ContainerLaunchContext.class);
 // Set up the list of commands to run. Here, we assume that we run only
 // one command.
 container.setCommands(Collections.singletonList(getCommand()));
 // Add localized resources
 container.setLocalResources(resources);
 // Environment.
 container.setEnvironment(env);
 return container;
}

代码示例来源:origin: apache/drill

public Resource getCapability() {
 // Set up resource type requirements for ApplicationMaster
 Resource capability = Records.newRecord(Resource.class);
 capability.setMemory(memoryMb);
 capability.setVirtualCores(vCores);
 DoYUtil.callSetDiskIfExists(capability, disks);
 return capability;
}

代码示例来源:origin: com.cloudera.kitten/kitten-common

public Priority createPriority(int priority) {
  Priority p = Records.newRecord(Priority.class);
  p.setPriority(priority);
  return p;
 }
}

代码示例来源:origin: apache/incubator-gobblin

ContainerLaunchContext amContainerLaunchContext = Records.newRecord(ContainerLaunchContext.class);
amContainerLaunchContext.setLocalResources(appMasterLocalResources);
amContainerLaunchContext.setEnvironment(YarnHelixUtils.getEnvironmentVariables(this.yarnConfiguration));
amContainerLaunchContext.setCommands(Lists.newArrayList(buildApplicationMasterCommand(resource.getMemory())));
if (UserGroupInformation.isSecurityEnabled()) {
 setupSecurityTokens(amContainerLaunchContext);
appSubmissionContext.setResource(resource);
appSubmissionContext.setQueue(this.appQueueName);
appSubmissionContext.setPriority(Priority.newInstance(0));
appSubmissionContext.setAMContainerSpec(amContainerLaunchContext);

代码示例来源:origin: org.apache.hadoop/hadoop-yarn-server-resourcemanager

public RMApp submitApp(int masterMemory, String name, String user,
  Map<ApplicationAccessType, String> acls, boolean unmanaged, String queue,
  int maxAppAttempts, Credentials ts, String appType,
  boolean waitForAccepted, boolean keepContainers) throws Exception {
 Resource resource = Records.newRecord(Resource.class);
 resource.setMemorySize(masterMemory);
 return submitApp(resource, name, user, acls, unmanaged, queue,
   maxAppAttempts, ts, appType, waitForAccepted, keepContainers,
   false, null, 0, null, true, Priority.newInstance(0));
}

代码示例来源:origin: apache/drill

assert memoryMb != 0;
Priority priorityRec = Records.newRecord(Priority.class);
priorityRec.setPriority(priority);
Resource capability = Records.newRecord(Resource.class);
capability.setMemory(memoryMb);
capability.setVirtualCores(vCores);
DoYUtil.callSetDiskIfExists(capability, disks);

代码示例来源:origin: org.apache.hadoop/hadoop-yarn-server-common

protected ResourceRequest createResourceRequest(long id, String resource,
  int memory, int vCores, int priority, ExecutionType execType,
  int containers) {
 ResourceRequest req = Records.newRecord(ResourceRequest.class);
 req.setAllocationRequestId(id);
 req.setResourceName(resource);
 req.setCapability(Resource.newInstance(memory, vCores));
 req.setPriority(Priority.newInstance(priority));
 req.setExecutionTypeRequest(ExecutionTypeRequest.newInstance(execType));
 req.setNumContainers(containers);
 return req;
}

代码示例来源:origin: uber/AthenaX

false, (int) job.taskManagerMemoryMb());
amContainer.setLocalResources(localResources);
);
amContainer.setEnvironment(appMasterEnv);
Resource capability = Records.newRecord(Resource.class);
capability.setMemory(getFlinkConfiguration()
  .getInteger(JobManagerOptions.JOB_MANAGER_HEAP_MEMORY));
capability.setVirtualCores(1);

代码示例来源:origin: io.hops/hadoop-yarn-api

@Public
@Stable
public static Resource newInstance(long memory, int vCores, int gpus) {
 Resource resource = Records.newRecord(Resource.class);
 resource.setMemorySize(memory);
 resource.setVirtualCores(vCores);
 resource.setGPUs(gpus);
 return resource;
}

代码示例来源:origin: com.github.jiayuhan-it/hadoop-yarn-server-nodemanager

public static ResourceLocalizationSpec newResourceLocalizationSpec(
  LocalResource rsrc, Path path) {
 URL local = ConverterUtils.getYarnUrlFromPath(path);
 ResourceLocalizationSpec resourceLocalizationSpec =
   Records.newRecord(ResourceLocalizationSpec.class);
 resourceLocalizationSpec.setDestinationDirectory(local);
 resourceLocalizationSpec.setResource(rsrc);
 return resourceLocalizationSpec;
}

相关文章

Records类方法