本文整理了Java中org.apache.hadoop.yarn.util.resource.Resources.componentwiseMin
方法的一些代码示例,展示了Resources.componentwiseMin
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Resources.componentwiseMin
方法的具体详情如下:
包路径:org.apache.hadoop.yarn.util.resource.Resources
类名称:Resources
方法名:componentwiseMin
暂无
代码示例来源:origin: com.github.jiayuhan-it/hadoop-yarn-server-resourcemanager
private void updateDemandForApp(FSAppAttempt sched, Resource maxRes) {
sched.updateDemand();
Resource toAdd = sched.getDemand();
if (LOG.isDebugEnabled()) {
LOG.debug("Counting resource from " + sched.getName() + " " + toAdd
+ "; Total resource consumption for " + getName() + " now "
+ demand);
}
demand = Resources.add(demand, toAdd);
demand = Resources.componentwiseMin(demand, maxRes);
}
代码示例来源:origin: ch.cern.hadoop/hadoop-yarn-server-resourcemanager
private void updateDemandForApp(FSAppAttempt sched, Resource maxRes) {
sched.updateDemand();
Resource toAdd = sched.getDemand();
if (LOG.isDebugEnabled()) {
LOG.debug("Counting resource from " + sched.getName() + " " + toAdd
+ "; Total resource consumption for " + getName() + " now "
+ demand);
}
demand = Resources.add(demand, toAdd);
demand = Resources.componentwiseMin(demand, maxRes);
}
代码示例来源:origin: org.apache.hadoop/hadoop-yarn-server-resourcemanager
/**
* Get the minimum capacity in the specified time range.
*
* @param interval the {@link ReservationInterval} to be searched
* @return minimum resource allocation
*/
public Resource getMinimumCapacityInInterval(ReservationInterval interval) {
Resource minCapacity =
Resource.newInstance(Integer.MAX_VALUE, Integer.MAX_VALUE);
long start = interval.getStartTime();
long end = interval.getEndTime();
NavigableMap<Long, Resource> capacityRange =
getRangeOverlapping(start, end).getCumulative();
if (!capacityRange.isEmpty()) {
for (Map.Entry<Long, Resource> entry : capacityRange.entrySet()) {
if (entry.getValue() != null) {
minCapacity =
Resources.componentwiseMin(minCapacity, entry.getValue());
}
}
}
return minCapacity;
}
代码示例来源:origin: org.apache.hadoop/hadoop-yarn-server-resourcemanager
@Override
public Resource getMaximumResourceCapability(String queueName) {
if(queueName == null || queueName.isEmpty()) {
return getMaximumResourceCapability();
}
CSQueue queue = getQueue(queueName);
if (queue == null) {
LOG.error("Unknown queue: " + queueName);
return getMaximumResourceCapability();
}
if (!(queue instanceof LeafQueue)) {
LOG.error("queue " + queueName + " is not an leaf queue");
return getMaximumResourceCapability();
}
// queue.getMaxAllocation returns *configured* maximum allocation.
// getMaximumResourceCapability() returns maximum allocation considers
// per-node maximum resources. So return (component-wise) min of the two.
Resource queueMaxAllocation = ((LeafQueue)queue).getMaximumAllocation();
Resource clusterMaxAllocationConsiderNodeMax =
getMaximumResourceCapability();
return Resources.componentwiseMin(queueMaxAllocation,
clusterMaxAllocationConsiderNodeMax);
}
代码示例来源:origin: org.apache.hadoop/hadoop-yarn-server-resourcemanager
@Override
public void updateDemand() {
// Compute demand by iterating through apps in the queue
// Limit demand to maxResources
writeLock.lock();
try {
demand = Resources.createResource(0);
for (FSQueue childQueue : childQueues) {
childQueue.updateDemand();
Resource toAdd = childQueue.getDemand();
demand = Resources.add(demand, toAdd);
if (LOG.isDebugEnabled()) {
LOG.debug("Counting resource from " + childQueue.getName() + " " +
toAdd + "; Total resource demand for " + getName() +
" now " + demand);
}
}
// Cap demand to maxShare to limit allocation to maxShare
demand = Resources.componentwiseMin(demand, getMaxShare());
} finally {
writeLock.unlock();
}
if (LOG.isDebugEnabled()) {
LOG.debug("The updated demand for " + getName() + " is " + demand +
"; the max is " + getMaxShare());
}
}
代码示例来源:origin: org.apache.hadoop/hadoop-yarn-server-resourcemanager
@Override
public Resource getMaximumResourceCapability(String queueName) {
if(queueName == null || queueName.isEmpty()) {
return getMaximumResourceCapability();
}
FSQueue queue = queueMgr.getQueue(queueName);
Resource schedulerLevelMaxResourceCapability =
getMaximumResourceCapability();
if (queue == null) {
return schedulerLevelMaxResourceCapability;
}
Resource queueMaxResourceCapability = queue.getMaximumContainerAllocation();
if (queueMaxResourceCapability.equals(Resources.unbounded())) {
return schedulerLevelMaxResourceCapability;
} else {
return Resources.componentwiseMin(schedulerLevelMaxResourceCapability,
queueMaxResourceCapability);
}
}
代码示例来源:origin: ch.cern.hadoop/hadoop-yarn-server-resourcemanager
public synchronized Resource getTotalPendingResourcesConsideringUserLimit(
Resource resources) {
Map<String, Resource> userNameToHeadroom = new HashMap<String, Resource>();
Resource pendingConsideringUserLimit = Resource.newInstance(0, 0);
for (FiCaSchedulerApp app : activeApplications) {
String userName = app.getUser();
if (!userNameToHeadroom.containsKey(userName)) {
User user = getUser(userName);
Resource headroom = Resources.subtract(
computeUserLimit(app, resources, minimumAllocation, user, null),
user.getUsed());
// Make sure none of the the components of headroom is negative.
headroom = Resources.componentwiseMax(headroom, Resources.none());
userNameToHeadroom.put(userName, headroom);
}
Resource minpendingConsideringUserLimit =
Resources.componentwiseMin(userNameToHeadroom.get(userName),
app.getTotalPendingRequests());
Resources.addTo(pendingConsideringUserLimit, minpendingConsideringUserLimit);
Resources.subtractFrom(userNameToHeadroom.get(userName),
minpendingConsideringUserLimit);
}
return pendingConsideringUserLimit;
}
代码示例来源:origin: com.github.jiayuhan-it/hadoop-yarn-server-resourcemanager
public synchronized Resource getTotalPendingResourcesConsideringUserLimit(
Resource resources) {
Map<String, Resource> userNameToHeadroom = new HashMap<String, Resource>();
Resource pendingConsideringUserLimit = Resource.newInstance(0, 0);
for (FiCaSchedulerApp app : activeApplications) {
String userName = app.getUser();
if (!userNameToHeadroom.containsKey(userName)) {
User user = getUser(userName);
Resource headroom = Resources.subtract(
computeUserLimit(app, resources, minimumAllocation, user, null),
user.getUsed());
// Make sure none of the the components of headroom is negative.
headroom = Resources.componentwiseMax(headroom, Resources.none());
userNameToHeadroom.put(userName, headroom);
}
Resource minpendingConsideringUserLimit =
Resources.componentwiseMin(userNameToHeadroom.get(userName),
app.getTotalPendingRequests());
Resources.addTo(pendingConsideringUserLimit, minpendingConsideringUserLimit);
Resources.subtractFrom(userNameToHeadroom.get(userName),
minpendingConsideringUserLimit);
}
return pendingConsideringUserLimit;
}
代码示例来源:origin: ch.cern.hadoop/hadoop-yarn-server-resourcemanager
@Override
public void updateDemand() {
// Compute demand by iterating through apps in the queue
// Limit demand to maxResources
Resource maxRes = scheduler.getAllocationConfiguration()
.getMaxResources(getName());
demand = Resources.createResource(0);
for (FSQueue childQueue : childQueues) {
childQueue.updateDemand();
Resource toAdd = childQueue.getDemand();
if (LOG.isDebugEnabled()) {
LOG.debug("Counting resource from " + childQueue.getName() + " " +
toAdd + "; Total resource consumption for " + getName() +
" now " + demand);
}
demand = Resources.add(demand, toAdd);
demand = Resources.componentwiseMin(demand, maxRes);
if (Resources.equals(demand, maxRes)) {
break;
}
}
if (LOG.isDebugEnabled()) {
LOG.debug("The updated demand for " + getName() + " is " + demand +
"; the max is " + maxRes);
}
}
代码示例来源:origin: org.apache.hadoop/hadoop-yarn-server-resourcemanager
@Override
public void updateDemand() {
// Compute demand by iterating through apps in the queue
// Limit demand to maxResources
Resource tmpDemand = Resources.createResource(0);
readLock.lock();
try {
for (FSAppAttempt sched : runnableApps) {
sched.updateDemand();
Resources.addTo(tmpDemand, sched.getDemand());
}
for (FSAppAttempt sched : nonRunnableApps) {
sched.updateDemand();
Resources.addTo(tmpDemand, sched.getDemand());
}
} finally {
readLock.unlock();
}
// Cap demand to maxShare to limit allocation to maxShare
demand = Resources.componentwiseMin(tmpDemand, getMaxShare());
if (LOG.isDebugEnabled()) {
LOG.debug("The updated demand for " + getName() + " is " + demand
+ "; the max is " + getMaxShare());
LOG.debug("The updated fairshare for " + getName() + " is "
+ getFairShare());
}
}
代码示例来源:origin: com.github.jiayuhan-it/hadoop-yarn-server-resourcemanager
@Override
public void updateDemand() {
// Compute demand by iterating through apps in the queue
// Limit demand to maxResources
Resource maxRes = scheduler.getAllocationConfiguration()
.getMaxResources(getName());
demand = Resources.createResource(0);
for (FSQueue childQueue : childQueues) {
childQueue.updateDemand();
Resource toAdd = childQueue.getDemand();
if (LOG.isDebugEnabled()) {
LOG.debug("Counting resource from " + childQueue.getName() + " " +
toAdd + "; Total resource consumption for " + getName() +
" now " + demand);
}
demand = Resources.add(demand, toAdd);
demand = Resources.componentwiseMin(demand, maxRes);
if (Resources.equals(demand, maxRes)) {
break;
}
}
if (LOG.isDebugEnabled()) {
LOG.debug("The updated demand for " + getName() + " is " + demand +
"; the max is " + maxRes);
}
}
代码示例来源:origin: ch.cern.hadoop/hadoop-yarn-server-resourcemanager
Resource queueMaxAvailableResources =
Resources.subtract(queue.getMaxShare(), queueUsage);
Resource maxAvailableResource = Resources.componentwiseMin(
clusterAvailableResources, queueMaxAvailableResources);
代码示例来源:origin: com.github.jiayuhan-it/hadoop-yarn-server-resourcemanager
Resource queueMaxAvailableResources =
Resources.subtract(queue.getMaxShare(), queueUsage);
Resource maxAvailableResource = Resources.componentwiseMin(
clusterAvailableResources, queueMaxAvailableResources);
代码示例来源:origin: ch.cern.hadoop/hadoop-yarn-server-resourcemanager
@Test(timeout=1000)
public void testComponentwiseMin() {
assertEquals(createResource(1, 1),
componentwiseMin(createResource(1, 1), createResource(2, 2)));
assertEquals(createResource(1, 1),
componentwiseMin(createResource(2, 2), createResource(1, 1)));
assertEquals(createResource(1, 1),
componentwiseMin(createResource(1, 2), createResource(2, 1)));
}
}
代码示例来源:origin: org.apache.hadoop/hadoop-yarn-common
@Test(timeout = 1000)
public void testComponentwiseMin() {
assertEquals(createResource(1, 1),
componentwiseMin(createResource(1, 1), createResource(2, 2)));
assertEquals(createResource(1, 1),
componentwiseMin(createResource(2, 2), createResource(1, 1)));
assertEquals(createResource(1, 1),
componentwiseMin(createResource(1, 2), createResource(2, 1)));
assertEquals(createResource(1, 1, 1),
componentwiseMin(createResource(1, 1, 1), createResource(2, 2, 2)));
assertEquals(createResource(1, 1, 0),
componentwiseMin(createResource(2, 2, 2), createResource(1, 1)));
assertEquals(createResource(1, 1, 2),
componentwiseMin(createResource(1, 2, 2), createResource(2, 1, 3)));
}
代码示例来源:origin: ch.cern.hadoop/hadoop-yarn-server-resourcemanager
private Resource getHeadroom(User user, Resource currentResourceLimit,
Resource clusterResource, Resource userLimit) {
/**
* Headroom is:
* min(
* min(userLimit, queueMaxCap) - userConsumed,
* queueMaxCap - queueUsedResources
* )
*
* ( which can be expressed as,
* min (userLimit - userConsumed, queuMaxCap - userConsumed,
* queueMaxCap - queueUsedResources)
* )
*
* given that queueUsedResources >= userConsumed, this simplifies to
*
* >> min (userlimit - userConsumed, queueMaxCap - queueUsedResources) <<
*
*/
Resource headroom =
Resources.componentwiseMin(
Resources.subtract(userLimit, user.getUsed()),
Resources.subtract(currentResourceLimit, queueUsage.getUsed())
);
// Normalize it before return
headroom =
Resources.roundDown(resourceCalculator, headroom, minimumAllocation);
return headroom;
}
代码示例来源:origin: com.github.jiayuhan-it/hadoop-yarn-server-resourcemanager
private Resource getHeadroom(User user, Resource currentResourceLimit,
Resource clusterResource, Resource userLimit) {
/**
* Headroom is:
* min(
* min(userLimit, queueMaxCap) - userConsumed,
* queueMaxCap - queueUsedResources
* )
*
* ( which can be expressed as,
* min (userLimit - userConsumed, queuMaxCap - userConsumed,
* queueMaxCap - queueUsedResources)
* )
*
* given that queueUsedResources >= userConsumed, this simplifies to
*
* >> min (userlimit - userConsumed, queueMaxCap - queueUsedResources) <<
*
*/
Resource headroom =
Resources.componentwiseMin(
Resources.subtract(userLimit, user.getUsed()),
Resources.subtract(currentResourceLimit, queueUsage.getUsed())
);
// Normalize it before return
headroom =
Resources.roundDown(resourceCalculator, headroom, minimumAllocation);
return headroom;
}
代码示例来源:origin: org.apache.hadoop/hadoop-yarn-server-resourcemanager
/**
* Helper method to compute the amount of minshare starvation.
*
* @return the extent of minshare starvation
*/
private Resource minShareStarvation() {
// If demand < minshare, we should use demand to determine starvation
Resource starvation =
Resources.componentwiseMin(getMinShare(), getDemand());
Resources.subtractFromNonNegative(starvation, getResourceUsage());
boolean starved = !Resources.isNone(starvation);
long now = scheduler.getClock().getTime();
if (!starved) {
// Record that the queue is not starved
setLastTimeAtMinShare(now);
}
if (now - lastTimeAtMinShare < getMinSharePreemptionTimeout()) {
// the queue is not starved for the preemption timeout
starvation = Resources.clone(Resources.none());
}
return starvation;
}
代码示例来源:origin: org.apache.hadoop/hadoop-yarn-server-resourcemanager
/**
* Helper method that computes the extent of fairshare starvation.
* @return freshly computed fairshare starvation
*/
Resource fairShareStarvation() {
long now = scheduler.getClock().getTime();
Resource threshold = Resources.multiply(
getFairShare(), getQueue().getFairSharePreemptionThreshold());
Resource fairDemand = Resources.componentwiseMin(threshold, demand);
// Check if the queue is starved for fairshare
boolean starved = isUsageBelowShare(getResourceUsage(), fairDemand);
if (!starved) {
lastTimeAtFairShare = now;
}
if (!starved ||
now - lastTimeAtFairShare <
getQueue().getFairSharePreemptionTimeout()) {
fairshareStarvation = Resources.none();
} else {
// The app has been starved for longer than preemption-timeout.
fairshareStarvation =
Resources.subtractFromNonNegative(fairDemand, getResourceUsage());
}
return fairshareStarvation;
}
代码示例来源:origin: ch.cern.hadoop/hadoop-yarn-server-resourcemanager
maxResources = new ResourceInfo(queue.getMaxShare());
maxResources = new ResourceInfo(
Resources.componentwiseMin(queue.getMaxShare(),
scheduler.getClusterResource()));
内容来源于网络,如有侵权,请联系作者删除!