const Name … const stateKey … type stateData … func (d *stateData) Clone() framework.StateData { … } type informationForClaim … type DynamicResources … // New initializes a new plugin and returns it. func New(ctx context.Context, plArgs runtime.Object, fh framework.Handle, fts feature.Features) (framework.Plugin, error) { … } var _ … var _ … var _ … var _ … var _ … var _ … var _ … // Name returns name of the plugin. It is used in logs, etc. func (pl *DynamicResources) Name() string { … } // EventsToRegister returns the possible events that may make a Pod // failed by this plugin schedulable. func (pl *DynamicResources) EventsToRegister(_ context.Context) ([]framework.ClusterEventWithHint, error) { … } // PreEnqueue checks if there are known reasons why a pod currently cannot be // scheduled. When this fails, one of the registered events can trigger another // attempt. func (pl *DynamicResources) PreEnqueue(ctx context.Context, pod *v1.Pod) (status *framework.Status) { … } // isSchedulableAfterClaimChange is invoked for add and update claim events reported by // an informer. It checks whether that change made a previously unschedulable // pod schedulable. It errs on the side of letting a pod scheduling attempt // happen. The delete claim event will not invoke it, so newObj will never be nil. func (pl *DynamicResources) isSchedulableAfterClaimChange(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{ … } // isSchedulableAfterPodChange is invoked for update pod events reported by // an informer. It checks whether that change adds the ResourceClaim(s) that the // pod has been waiting for. func (pl *DynamicResources) isSchedulableAfterPodChange(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{ … } // isSchedulableAfterResourceSliceChange is invoked for add and update slice events reported by // an informer. Such changes can make an unschedulable pod schedulable when the pod requests a device // and the change adds a suitable device. // // For the sake of faster execution and avoiding code duplication, isSchedulableAfterResourceSliceChange // only checks whether the pod uses claims. All of the more detailed checks are done in the scheduling // attempt. // // The delete claim event will not invoke it, so newObj will never be nil. func (pl *DynamicResources) isSchedulableAfterResourceSliceChange(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{ … } // podResourceClaims returns the ResourceClaims for all pod.Spec.PodResourceClaims. func (pl *DynamicResources) podResourceClaims(pod *v1.Pod) ([]*resourceapi.ResourceClaim, error) { … } // foreachPodResourceClaim checks that each ResourceClaim for the pod exists. // It calls an optional handler for those claims that it finds. func (pl *DynamicResources) foreachPodResourceClaim(pod *v1.Pod, cb func(podResourceName string, claim *resourceapi.ResourceClaim)) error { … } // PreFilter invoked at the prefilter extension point to check if pod has all // immediate claims bound. UnschedulableAndUnresolvable is returned if // the pod cannot be scheduled at the moment on any node. func (pl *DynamicResources) PreFilter(ctx context.Context, state *framework.CycleState, pod *v1.Pod) (*framework.PreFilterResult, *framework.Status) { … } type claimListerForAssumeCache … func (cl *claimListerForAssumeCache) ListAllAllocated() ([]*resourceapi.ResourceClaim, error) { … } // PreFilterExtensions returns prefilter extensions, pod add and remove. func (pl *DynamicResources) PreFilterExtensions() framework.PreFilterExtensions { … } func getStateData(cs *framework.CycleState) (*stateData, error) { … } // Filter invoked at the filter extension point. // It evaluates if a pod can fit due to the resources it requests, // for both allocated and unallocated claims. // // For claims that are bound, then it checks that the node affinity is // satisfied by the given node. // // For claims that are unbound, it checks whether the claim might get allocated // for the node. func (pl *DynamicResources) Filter(ctx context.Context, cs *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { … } // PostFilter checks whether there are allocated claims that could get // deallocated to help get the Pod schedulable. If yes, it picks one and // requests its deallocation. This only gets called when filtering found no // suitable node. func (pl *DynamicResources) PostFilter(ctx context.Context, cs *framework.CycleState, pod *v1.Pod, filteredNodeStatusMap framework.NodeToStatusReader) (*framework.PostFilterResult, *framework.Status) { … } // Reserve reserves claims for the pod. func (pl *DynamicResources) Reserve(ctx context.Context, cs *framework.CycleState, pod *v1.Pod, nodeName string) (status *framework.Status) { … } // Unreserve clears the ReservedFor field for all claims. // It's idempotent, and does nothing if no state found for the given pod. func (pl *DynamicResources) Unreserve(ctx context.Context, cs *framework.CycleState, pod *v1.Pod, nodeName string) { … } // PreBind gets called in a separate goroutine after it has been determined // that the pod should get bound to this node. Because Reserve did not actually // reserve claims, we need to do it now. For claims with the builtin controller, // we also handle the allocation. // // If anything fails, we return an error and // the pod will have to go into the backoff queue. The scheduler will call // Unreserve as part of the error handling. func (pl *DynamicResources) PreBind(ctx context.Context, cs *framework.CycleState, pod *v1.Pod, nodeName string) *framework.Status { … } // bindClaim gets called by PreBind for claim which is not reserved for the pod yet. // It might not even be allocated. bindClaim then ensures that the allocation // and reservation are recorded. This finishes the work started in Reserve. func (pl *DynamicResources) bindClaim(ctx context.Context, state *stateData, index int, pod *v1.Pod, nodeName string) (patchedClaim *resourceapi.ResourceClaim, finalErr error) { … } // statusUnschedulable ensures that there is a log message associated with the // line where the status originated. func statusUnschedulable(logger klog.Logger, reason string, kv ...interface{ … } // statusError ensures that there is a log message associated with the // line where the error originated. func statusError(logger klog.Logger, err error, kv ...interface{ … }