| Modifier and Type | Method and Description |
|---|---|
void |
RMApplicationHistoryWriter.containerFinished(RMContainer container) |
void |
RMApplicationHistoryWriter.containerStarted(RMContainer container) |
| Modifier and Type | Method and Description |
|---|---|
void |
SystemMetricsPublisher.containerCreated(RMContainer container,
long createdTime) |
void |
SystemMetricsPublisher.containerFinished(RMContainer container,
long finishedTime) |
| Modifier and Type | Method and Description |
|---|---|
Map<RMContainer,Long> |
ProportionalCapacityPreemptionPolicy.getToPreemptContainers() |
Map<org.apache.hadoop.yarn.api.records.ApplicationAttemptId,Set<RMContainer>> |
ReservedContainerCandidatesSelector.selectCandidates(Map<org.apache.hadoop.yarn.api.records.ApplicationAttemptId,Set<RMContainer>> selectedCandidates,
org.apache.hadoop.yarn.api.records.Resource clusterResource,
org.apache.hadoop.yarn.api.records.Resource totalPreemptedResourceAllowed) |
abstract Map<org.apache.hadoop.yarn.api.records.ApplicationAttemptId,Set<RMContainer>> |
PreemptionCandidatesSelector.selectCandidates(Map<org.apache.hadoop.yarn.api.records.ApplicationAttemptId,Set<RMContainer>> selectedCandidates,
org.apache.hadoop.yarn.api.records.Resource clusterResource,
org.apache.hadoop.yarn.api.records.Resource totalPreemptedResourceAllowed)
Get preemption candidates from computed resource sharing and already
selected candidates.
|
Map<org.apache.hadoop.yarn.api.records.ApplicationAttemptId,Set<RMContainer>> |
IntraQueueCandidatesSelector.selectCandidates(Map<org.apache.hadoop.yarn.api.records.ApplicationAttemptId,Set<RMContainer>> selectedCandidates,
org.apache.hadoop.yarn.api.records.Resource clusterResource,
org.apache.hadoop.yarn.api.records.Resource totalPreemptedResourceAllowed) |
Map<org.apache.hadoop.yarn.api.records.ApplicationAttemptId,Set<RMContainer>> |
FifoCandidatesSelector.selectCandidates(Map<org.apache.hadoop.yarn.api.records.ApplicationAttemptId,Set<RMContainer>> selectedCandidates,
org.apache.hadoop.yarn.api.records.Resource clusterResource,
org.apache.hadoop.yarn.api.records.Resource totalPreemptionAllowed) |
| Modifier and Type | Method and Description |
|---|---|
static boolean |
CapacitySchedulerPreemptionUtils.isContainerAlreadySelected(RMContainer container,
Map<org.apache.hadoop.yarn.api.records.ApplicationAttemptId,Set<RMContainer>> selectedCandidates) |
static boolean |
CapacitySchedulerPreemptionUtils.tryPreemptContainerAndDeductResToObtain(org.apache.hadoop.yarn.util.resource.ResourceCalculator rc,
org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.CapacitySchedulerPreemptionContext context,
Map<String,org.apache.hadoop.yarn.api.records.Resource> resourceToObtainByPartitions,
RMContainer rmContainer,
org.apache.hadoop.yarn.api.records.Resource clusterResource,
Map<org.apache.hadoop.yarn.api.records.ApplicationAttemptId,Set<RMContainer>> preemptMap,
org.apache.hadoop.yarn.api.records.Resource totalPreemptionAllowed)
Invoke this method to preempt container based on resToObtain.
|
| Modifier and Type | Method and Description |
|---|---|
void |
FifoIntraQueuePreemptionPlugin.computeAppsIdealAllocation(org.apache.hadoop.yarn.api.records.Resource clusterResource,
org.apache.hadoop.yarn.api.records.Resource partitionBasedResource,
TempQueuePerPartition tq,
Map<org.apache.hadoop.yarn.api.records.ApplicationAttemptId,Set<RMContainer>> selectedCandidates,
org.apache.hadoop.yarn.api.records.Resource totalPreemptedResourceAllowed,
org.apache.hadoop.yarn.api.records.Resource queueReassignableResource,
float maxAllowablePreemptLimit) |
static void |
CapacitySchedulerPreemptionUtils.deductPreemptableResourcesBasedSelectedCandidates(org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.CapacitySchedulerPreemptionContext context,
Map<org.apache.hadoop.yarn.api.records.ApplicationAttemptId,Set<RMContainer>> selectedCandidates) |
static boolean |
CapacitySchedulerPreemptionUtils.isContainerAlreadySelected(RMContainer container,
Map<org.apache.hadoop.yarn.api.records.ApplicationAttemptId,Set<RMContainer>> selectedCandidates) |
Map<org.apache.hadoop.yarn.api.records.ApplicationAttemptId,Set<RMContainer>> |
ReservedContainerCandidatesSelector.selectCandidates(Map<org.apache.hadoop.yarn.api.records.ApplicationAttemptId,Set<RMContainer>> selectedCandidates,
org.apache.hadoop.yarn.api.records.Resource clusterResource,
org.apache.hadoop.yarn.api.records.Resource totalPreemptedResourceAllowed) |
abstract Map<org.apache.hadoop.yarn.api.records.ApplicationAttemptId,Set<RMContainer>> |
PreemptionCandidatesSelector.selectCandidates(Map<org.apache.hadoop.yarn.api.records.ApplicationAttemptId,Set<RMContainer>> selectedCandidates,
org.apache.hadoop.yarn.api.records.Resource clusterResource,
org.apache.hadoop.yarn.api.records.Resource totalPreemptedResourceAllowed)
Get preemption candidates from computed resource sharing and already
selected candidates.
|
Map<org.apache.hadoop.yarn.api.records.ApplicationAttemptId,Set<RMContainer>> |
IntraQueueCandidatesSelector.selectCandidates(Map<org.apache.hadoop.yarn.api.records.ApplicationAttemptId,Set<RMContainer>> selectedCandidates,
org.apache.hadoop.yarn.api.records.Resource clusterResource,
org.apache.hadoop.yarn.api.records.Resource totalPreemptedResourceAllowed) |
Map<org.apache.hadoop.yarn.api.records.ApplicationAttemptId,Set<RMContainer>> |
FifoCandidatesSelector.selectCandidates(Map<org.apache.hadoop.yarn.api.records.ApplicationAttemptId,Set<RMContainer>> selectedCandidates,
org.apache.hadoop.yarn.api.records.Resource clusterResource,
org.apache.hadoop.yarn.api.records.Resource totalPreemptionAllowed) |
static boolean |
CapacitySchedulerPreemptionUtils.tryPreemptContainerAndDeductResToObtain(org.apache.hadoop.yarn.util.resource.ResourceCalculator rc,
org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.CapacitySchedulerPreemptionContext context,
Map<String,org.apache.hadoop.yarn.api.records.Resource> resourceToObtainByPartitions,
RMContainer rmContainer,
org.apache.hadoop.yarn.api.records.Resource clusterResource,
Map<org.apache.hadoop.yarn.api.records.ApplicationAttemptId,Set<RMContainer>> preemptMap,
org.apache.hadoop.yarn.api.records.Resource totalPreemptionAllowed)
Invoke this method to preempt container based on resToObtain.
|
| Modifier and Type | Method and Description |
|---|---|
void |
RMAppAttemptMetrics.updatePreemptionInfo(org.apache.hadoop.yarn.api.records.Resource resource,
RMContainer container) |
| Modifier and Type | Class and Description |
|---|---|
class |
RMContainerImpl |
| Modifier and Type | Method and Description |
|---|---|
int |
RMContainerImpl.compareTo(RMContainer o) |
| Modifier and Type | Field and Description |
|---|---|
protected Map<org.apache.hadoop.yarn.api.records.ContainerId,RMContainer> |
SchedulerApplicationAttempt.liveContainers |
protected List<RMContainer> |
SchedulerApplicationAttempt.newlyAllocatedContainers |
protected Map<org.apache.hadoop.yarn.api.records.ContainerId,RMContainer> |
SchedulerApplicationAttempt.newlyDecreasedContainers |
protected Map<org.apache.hadoop.yarn.api.records.ContainerId,RMContainer> |
SchedulerApplicationAttempt.newlyIncreasedContainers |
protected Map<org.apache.hadoop.yarn.api.records.Priority,Map<org.apache.hadoop.yarn.api.records.NodeId,RMContainer>> |
SchedulerApplicationAttempt.reservedContainers |
| Modifier and Type | Method and Description |
|---|---|
protected RMContainer |
SchedulerNode.getContainer(org.apache.hadoop.yarn.api.records.ContainerId containerId)
Get the container for the specified container ID.
|
RMContainer |
SchedulerNode.getReservedContainer() |
RMContainer |
SchedContainerChangeRequest.getRMContainer() |
RMContainer |
YarnScheduler.getRMContainer(org.apache.hadoop.yarn.api.records.ContainerId containerId)
Get the container for the given containerId.
|
RMContainer |
SchedulerApplicationAttempt.getRMContainer(org.apache.hadoop.yarn.api.records.ContainerId id) |
RMContainer |
AbstractYarnScheduler.getRMContainer(org.apache.hadoop.yarn.api.records.ContainerId containerId) |
RMContainer |
SchedulerApplicationAttempt.reserve(SchedulerNode node,
org.apache.hadoop.yarn.api.records.Priority priority,
RMContainer rmContainer,
org.apache.hadoop.yarn.api.records.Container container) |
| Modifier and Type | Method and Description |
|---|---|
List<RMContainer> |
SchedulerNode.getCopiedListOfRunningContainers() |
Collection<RMContainer> |
SchedulerAppReport.getLiveContainers()
Get the list of live containers
|
Collection<RMContainer> |
SchedulerApplicationAttempt.getLiveContainers()
Get the live containers of the application.
|
Map<org.apache.hadoop.yarn.api.records.ContainerId,RMContainer> |
SchedulerApplicationAttempt.getLiveContainersMap() |
Collection<RMContainer> |
SchedulerAppReport.getReservedContainers()
Get the list of reserved containers
|
List<RMContainer> |
SchedulerApplicationAttempt.getReservedContainers()
Get the list of reserved containers
|
| Modifier and Type | Method and Description |
|---|---|
void |
SchedulerNode.allocateContainer(RMContainer rmContainer)
The Scheduler has allocated containers on this node to the given
application.
|
void |
AbstractYarnScheduler.completedContainer(RMContainer rmContainer,
org.apache.hadoop.yarn.api.records.ContainerStatus containerStatus,
RMContainerEventType event) |
protected abstract void |
AbstractYarnScheduler.completedContainerInternal(RMContainer rmContainer,
org.apache.hadoop.yarn.api.records.ContainerStatus containerStatus,
RMContainerEventType event) |
void |
PreemptableResourceScheduler.killReservedContainer(RMContainer container)
If the scheduler support container reservations, this method is used to
ask the scheduler to drop the reservation for the given container.
|
void |
PreemptableResourceScheduler.markContainerForKillable(RMContainer container)
Ask the scheduler to forcibly interrupt the container given as input
|
void |
PreemptableResourceScheduler.markContainerForPreemption(org.apache.hadoop.yarn.api.records.ApplicationAttemptId aid,
RMContainer container)
Ask the scheduler to obtain back the container from a specific application
by issuing a preemption request
|
void |
Queue.recoverContainer(org.apache.hadoop.yarn.api.records.Resource clusterResource,
SchedulerApplicationAttempt schedulerAttempt,
RMContainer rmContainer)
Recover the state of the queue for a given container.
|
void |
SchedulerNode.recoverContainer(RMContainer rmContainer) |
void |
AppSchedulingInfo.recoverContainer(RMContainer rmContainer) |
void |
SchedulerApplicationAttempt.recoverContainer(SchedulerNode node,
RMContainer rmContainer) |
RMContainer |
SchedulerApplicationAttempt.reserve(SchedulerNode node,
org.apache.hadoop.yarn.api.records.Priority priority,
RMContainer rmContainer,
org.apache.hadoop.yarn.api.records.Container container) |
boolean |
SchedulerApplicationAttempt.reserveIncreasedContainer(SchedulerNode node,
org.apache.hadoop.yarn.api.records.Priority priority,
RMContainer rmContainer,
org.apache.hadoop.yarn.api.records.Resource reservedResource) |
abstract void |
SchedulerNode.reserveResource(SchedulerApplicationAttempt attempt,
org.apache.hadoop.yarn.api.records.Priority priority,
RMContainer container)
Reserve container for the attempt on this node.
|
protected void |
SchedulerNode.setReservedContainer(RMContainer reservedContainer) |
| Constructor and Description |
|---|
SchedContainerChangeRequest(RMContext rmContext,
SchedulerNode schedulerNode,
RMContainer rmContainer,
org.apache.hadoop.yarn.api.records.Resource targetCapacity) |
| Modifier and Type | Method and Description |
|---|---|
RMContainer |
CSAssignment.getExcessReservation() |
| Modifier and Type | Method and Description |
|---|---|
List<RMContainer> |
CSAssignment.getContainersToKill() |
Map<String,TreeSet<RMContainer>> |
LeafQueue.getIgnoreExclusivityRMContainers() |
Iterator<RMContainer> |
AbstractCSQueue.getKillableContainers(String partition) |
| Modifier and Type | Method and Description |
|---|---|
void |
ParentQueue.attachContainer(org.apache.hadoop.yarn.api.records.Resource clusterResource,
FiCaSchedulerApp application,
RMContainer rmContainer) |
void |
LeafQueue.attachContainer(org.apache.hadoop.yarn.api.records.Resource clusterResource,
FiCaSchedulerApp application,
RMContainer rmContainer) |
void |
CSQueue.attachContainer(org.apache.hadoop.yarn.api.records.Resource clusterResource,
FiCaSchedulerApp application,
RMContainer container)
Attach a container to this queue
|
void |
ParentQueue.completedContainer(org.apache.hadoop.yarn.api.records.Resource clusterResource,
FiCaSchedulerApp application,
FiCaSchedulerNode node,
RMContainer rmContainer,
org.apache.hadoop.yarn.api.records.ContainerStatus containerStatus,
RMContainerEventType event,
CSQueue completedChildQueue,
boolean sortQueues) |
void |
LeafQueue.completedContainer(org.apache.hadoop.yarn.api.records.Resource clusterResource,
FiCaSchedulerApp application,
FiCaSchedulerNode node,
RMContainer rmContainer,
org.apache.hadoop.yarn.api.records.ContainerStatus containerStatus,
RMContainerEventType event,
CSQueue childQueue,
boolean sortQueues) |
void |
CSQueue.completedContainer(org.apache.hadoop.yarn.api.records.Resource clusterResource,
FiCaSchedulerApp application,
FiCaSchedulerNode node,
RMContainer container,
org.apache.hadoop.yarn.api.records.ContainerStatus containerStatus,
RMContainerEventType event,
CSQueue childQueue,
boolean sortQueues)
A container assigned to the queue has completed.
|
protected void |
CapacityScheduler.completedContainerInternal(RMContainer rmContainer,
org.apache.hadoop.yarn.api.records.ContainerStatus containerStatus,
RMContainerEventType event) |
void |
ParentQueue.detachContainer(org.apache.hadoop.yarn.api.records.Resource clusterResource,
FiCaSchedulerApp application,
RMContainer rmContainer) |
void |
LeafQueue.detachContainer(org.apache.hadoop.yarn.api.records.Resource clusterResource,
FiCaSchedulerApp application,
RMContainer rmContainer) |
void |
CSQueue.detachContainer(org.apache.hadoop.yarn.api.records.Resource clusterResource,
FiCaSchedulerApp application,
RMContainer container)
Detach a container from this queue
|
void |
CapacityScheduler.killReservedContainer(RMContainer container) |
void |
CapacityScheduler.markContainerForKillable(RMContainer killableContainer) |
void |
CapacityScheduler.markContainerForPreemption(org.apache.hadoop.yarn.api.records.ApplicationAttemptId aid,
RMContainer cont) |
void |
ParentQueue.recoverContainer(org.apache.hadoop.yarn.api.records.Resource clusterResource,
SchedulerApplicationAttempt attempt,
RMContainer rmContainer) |
void |
LeafQueue.recoverContainer(org.apache.hadoop.yarn.api.records.Resource clusterResource,
SchedulerApplicationAttempt attempt,
RMContainer rmContainer) |
void |
CSAssignment.setExcessReservation(RMContainer rmContainer) |
void |
ParentQueue.unreserveIncreasedContainer(org.apache.hadoop.yarn.api.records.Resource clusterResource,
FiCaSchedulerApp app,
FiCaSchedulerNode node,
RMContainer rmContainer) |
void |
LeafQueue.unreserveIncreasedContainer(org.apache.hadoop.yarn.api.records.Resource clusterResource,
FiCaSchedulerApp app,
FiCaSchedulerNode node,
RMContainer rmContainer) |
void |
CSQueue.unreserveIncreasedContainer(org.apache.hadoop.yarn.api.records.Resource clusterResource,
FiCaSchedulerApp app,
FiCaSchedulerNode node,
RMContainer rmContainer)
We have a reserved increased container in the queue, we need to unreserve
it.
|
| Modifier and Type | Method and Description |
|---|---|
void |
CSAssignment.setContainersToKill(List<RMContainer> containersToKill) |
| Constructor and Description |
|---|
CSAssignment(FiCaSchedulerApp application,
RMContainer excessReservation) |
CSAssignment(org.apache.hadoop.yarn.api.records.Resource resource,
NodeType type,
RMContainer excessReservation,
FiCaSchedulerApp application,
CSAssignment.SkippedType skipped,
boolean fulfilledReservation) |
| Modifier and Type | Method and Description |
|---|---|
RMContainer |
ContainerAllocation.getContainerToBeUnreserved() |
| Modifier and Type | Method and Description |
|---|---|
List<RMContainer> |
ContainerAllocation.getToKillContainers() |
| Modifier and Type | Method and Description |
|---|---|
CSAssignment |
RegularContainerAllocator.assignContainers(org.apache.hadoop.yarn.api.records.Resource clusterResource,
FiCaSchedulerNode node,
SchedulingMode schedulingMode,
ResourceLimits resourceLimits,
RMContainer reservedContainer) |
CSAssignment |
IncreaseContainerAllocator.assignContainers(org.apache.hadoop.yarn.api.records.Resource clusterResource,
FiCaSchedulerNode node,
SchedulingMode schedulingMode,
ResourceLimits resourceLimits,
RMContainer reservedContainer) |
CSAssignment |
ContainerAllocator.assignContainers(org.apache.hadoop.yarn.api.records.Resource clusterResource,
FiCaSchedulerNode node,
SchedulingMode schedulingMode,
ResourceLimits resourceLimits,
RMContainer reservedContainer) |
abstract CSAssignment |
AbstractContainerAllocator.assignContainers(org.apache.hadoop.yarn.api.records.Resource clusterResource,
FiCaSchedulerNode node,
SchedulingMode schedulingMode,
ResourceLimits resourceLimits,
RMContainer reservedContainer)
allocate needs to handle following stuffs:
Select request: Select a request to allocate.
|
protected CSAssignment |
AbstractContainerAllocator.getCSAssignmentFromAllocateResult(org.apache.hadoop.yarn.api.records.Resource clusterResource,
ContainerAllocation result,
RMContainer rmContainer) |
| Modifier and Type | Method and Description |
|---|---|
void |
ContainerAllocation.setToKillContainers(List<RMContainer> toKillContainers) |
| Constructor and Description |
|---|
ContainerAllocation(RMContainer containerToBeUnreserved,
org.apache.hadoop.yarn.api.records.Resource resourceToBeAllocated,
AllocationState state) |
| Modifier and Type | Method and Description |
|---|---|
RMContainer |
KillableContainer.getRMContainer() |
| Modifier and Type | Method and Description |
|---|---|
Map<String,Map<org.apache.hadoop.yarn.api.records.ContainerId,RMContainer>> |
PreemptableQueue.getKillableContainers() |
Iterator<RMContainer> |
PreemptionManager.getKillableContainers(String queueName,
String partition) |
Map<org.apache.hadoop.yarn.api.records.ContainerId,RMContainer> |
PreemptionManager.getKillableContainersMap(String queueName,
String partition) |
| Constructor and Description |
|---|
KillableContainer(RMContainer container,
String partition,
String leafQueueName) |
| Constructor and Description |
|---|
PreemptableQueue(Map<String,org.apache.hadoop.yarn.api.records.Resource> totalKillableResources,
Map<String,Map<org.apache.hadoop.yarn.api.records.ContainerId,RMContainer>> killableContainers) |
| Modifier and Type | Method and Description |
|---|---|
RMContainer |
FiCaSchedulerApp.allocate(NodeType type,
FiCaSchedulerNode node,
org.apache.hadoop.yarn.api.records.Priority priority,
org.apache.hadoop.yarn.api.records.ResourceRequest request,
org.apache.hadoop.yarn.api.records.Container container) |
RMContainer |
FiCaSchedulerApp.findNodeToUnreserve(org.apache.hadoop.yarn.api.records.Resource clusterResource,
FiCaSchedulerNode node,
org.apache.hadoop.yarn.api.records.Priority priority,
org.apache.hadoop.yarn.api.records.Resource minimumUnreservedResource) |
| Modifier and Type | Method and Description |
|---|---|
Map<org.apache.hadoop.yarn.api.records.ContainerId,RMContainer> |
FiCaSchedulerNode.getKillableContainers() |
| Modifier and Type | Method and Description |
|---|---|
CSAssignment |
FiCaSchedulerApp.assignContainers(org.apache.hadoop.yarn.api.records.Resource clusterResource,
FiCaSchedulerNode node,
ResourceLimits currentResourceLimits,
SchedulingMode schedulingMode,
RMContainer reservedContainer) |
boolean |
FiCaSchedulerApp.containerCompleted(RMContainer rmContainer,
org.apache.hadoop.yarn.api.records.ContainerStatus containerStatus,
RMContainerEventType event,
String partition) |
void |
FiCaSchedulerApp.nodePartitionUpdated(RMContainer rmContainer,
String oldPartition,
String newPartition) |
void |
FiCaSchedulerApp.reserve(org.apache.hadoop.yarn.api.records.Priority priority,
FiCaSchedulerNode node,
RMContainer rmContainer,
org.apache.hadoop.yarn.api.records.Container container) |
boolean |
FiCaSchedulerApp.reserveIncreasedContainer(org.apache.hadoop.yarn.api.records.Priority priority,
FiCaSchedulerNode node,
RMContainer rmContainer,
org.apache.hadoop.yarn.api.records.Resource reservedResource) |
void |
FiCaSchedulerNode.reserveResource(SchedulerApplicationAttempt application,
org.apache.hadoop.yarn.api.records.Priority priority,
RMContainer container) |
boolean |
FiCaSchedulerApp.unreserve(org.apache.hadoop.yarn.api.records.Priority priority,
FiCaSchedulerNode node,
RMContainer rmContainer) |
| Modifier and Type | Method and Description |
|---|---|
RMContainer |
ContainerPreemptEvent.getContainer() |
| Constructor and Description |
|---|
ContainerPreemptEvent(org.apache.hadoop.yarn.api.records.ApplicationAttemptId aid,
RMContainer container,
SchedulerEventType type) |
| Modifier and Type | Method and Description |
|---|---|
RMContainer |
FSAppAttempt.allocate(NodeType type,
FSSchedulerNode node,
org.apache.hadoop.yarn.api.records.Priority priority,
org.apache.hadoop.yarn.api.records.ResourceRequest request,
org.apache.hadoop.yarn.api.records.Container container) |
RMContainer |
Schedulable.preemptContainer()
Preempt a container from this Schedulable if possible.
|
RMContainer |
FSParentQueue.preemptContainer() |
RMContainer |
FSLeafQueue.preemptContainer() |
RMContainer |
FSAppAttempt.preemptContainer()
Preempt a running container according to the priority
|
| Modifier and Type | Method and Description |
|---|---|
Set<RMContainer> |
FSAppAttempt.getPreemptionContainers() |
| Modifier and Type | Method and Description |
|---|---|
void |
FSAppAttempt.addPreemption(RMContainer container,
long time) |
protected void |
FairScheduler.completedContainerInternal(RMContainer rmContainer,
org.apache.hadoop.yarn.api.records.ContainerStatus containerStatus,
RMContainerEventType event)
Clean up a completed container.
|
void |
FSAppAttempt.containerCompleted(RMContainer rmContainer,
org.apache.hadoop.yarn.api.records.ContainerStatus containerStatus,
RMContainerEventType event) |
Long |
FSAppAttempt.getContainerPreemptionTime(RMContainer container) |
void |
FSParentQueue.recoverContainer(org.apache.hadoop.yarn.api.records.Resource clusterResource,
SchedulerApplicationAttempt schedulerAttempt,
RMContainer rmContainer) |
void |
FSLeafQueue.recoverContainer(org.apache.hadoop.yarn.api.records.Resource clusterResource,
SchedulerApplicationAttempt schedulerAttempt,
RMContainer rmContainer) |
void |
FSSchedulerNode.reserveResource(SchedulerApplicationAttempt application,
org.apache.hadoop.yarn.api.records.Priority priority,
RMContainer container) |
protected void |
FairScheduler.warnOrKillContainer(RMContainer container) |
| Modifier and Type | Method and Description |
|---|---|
RMContainer |
FifoScheduler.getRMContainer(org.apache.hadoop.yarn.api.records.ContainerId containerId) |
| Modifier and Type | Method and Description |
|---|---|
protected void |
FifoScheduler.completedContainerInternal(RMContainer rmContainer,
org.apache.hadoop.yarn.api.records.ContainerStatus containerStatus,
RMContainerEventType event) |
| Modifier and Type | Method and Description |
|---|---|
void |
OrderingPolicy.containerAllocated(S schedulableEntity,
RMContainer r)
The passed SchedulableEntity has been allocated the passed Container,
take appropriate action (depending on comparator, a reordering of the
SchedulableEntity may be required)
|
void |
FifoOrderingPolicyForPendingApps.containerAllocated(S schedulableEntity,
RMContainer r) |
void |
FifoOrderingPolicy.containerAllocated(S schedulableEntity,
RMContainer r) |
void |
FairOrderingPolicy.containerAllocated(S schedulableEntity,
RMContainer r) |
abstract void |
AbstractComparatorOrderingPolicy.containerAllocated(S schedulableEntity,
RMContainer r) |
void |
OrderingPolicy.containerReleased(S schedulableEntity,
RMContainer r)
The passed SchedulableEntity has released the passed Container,
take appropriate action (depending on comparator, a reordering of the
SchedulableEntity may be required)
|
void |
FifoOrderingPolicyForPendingApps.containerReleased(S schedulableEntity,
RMContainer r) |
void |
FifoOrderingPolicy.containerReleased(S schedulableEntity,
RMContainer r) |
void |
FairOrderingPolicy.containerReleased(S schedulableEntity,
RMContainer r) |
abstract void |
AbstractComparatorOrderingPolicy.containerReleased(S schedulableEntity,
RMContainer r) |
Copyright © 2017 Apache Software Foundation. All Rights Reserved.