kubernetes/pkg/controller/nodelifecycle/node_lifecycle_controller.go

func init() {}

var UnreachableTaintTemplate

var NotReadyTaintTemplate

var nodeConditionToTaintKeyStatusMap

var taintKeyToNodeConditionMap

type ZoneState

const stateInitial

const stateNormal

const stateFullDisruption

const statePartialDisruption

const retrySleepTime

const nodeNameKeyIndex

const podUpdateWorkerSize

const nodeUpdateWorkerSize

const taintEvictionController

var labelReconcileInfo

type nodeHealthData

func (n *nodeHealthData) deepCopy() *nodeHealthData {}

type nodeHealthMap

func newNodeHealthMap() *nodeHealthMap {}

// getDeepCopy - returns copy of node health data.
// It prevents data being changed after retrieving it from the map.
func (n *nodeHealthMap) getDeepCopy(name string) *nodeHealthData {}

func (n *nodeHealthMap) set(name string, data *nodeHealthData) {}

type podUpdateItem

type Controller

// NewNodeLifecycleController returns a new taint controller.
func NewNodeLifecycleController(
	ctx context.Context,
	leaseInformer coordinformers.LeaseInformer,
	podInformer coreinformers.PodInformer,
	nodeInformer coreinformers.NodeInformer,
	daemonSetInformer appsv1informers.DaemonSetInformer,
	kubeClient clientset.Interface,
	nodeMonitorPeriod time.Duration,
	nodeStartupGracePeriod time.Duration,
	nodeMonitorGracePeriod time.Duration,
	evictionLimiterQPS float32,
	secondaryEvictionLimiterQPS float32,
	largeClusterThreshold int32,
	unhealthyZoneThreshold float32,
) (*Controller, error) {}

// Run starts an asynchronous loop that monitors the status of cluster nodes.
func (nc *Controller) Run(ctx context.Context) {}

func (nc *Controller) doNodeProcessingPassWorker(ctx context.Context) {}

func (nc *Controller) doNoScheduleTaintingPass(ctx context.Context, nodeName string) error {}

func (nc *Controller) doNoExecuteTaintingPass(ctx context.Context) {}

// monitorNodeHealth verifies node health are constantly updated by kubelet, and if not, post "NodeReady==ConditionUnknown".
// This function will
//   - add nodes which are not ready or not reachable for a long period of time to a rate-limited
//     queue so that NoExecute taints can be added by the goroutine running the doNoExecuteTaintingPass function,
//   - update the PodReady condition Pods according to the state of the Node Ready condition.
func (nc *Controller) monitorNodeHealth(ctx context.Context) error {}

func (nc *Controller) processTaintBaseEviction(ctx context.Context, node *v1.Node, observedReadyCondition *v1.NodeCondition) {}

const labelNodeDisruptionExclusion

func isNodeExcludedFromDisruptionChecks(node *v1.Node) bool {}

// tryUpdateNodeHealth checks a given node's conditions and tries to update it. Returns grace period to
// which given node is entitled, state of current and last observed Ready Condition, and an error if it occurred.
func (nc *Controller) tryUpdateNodeHealth(ctx context.Context, node *v1.Node) (time.Duration, v1.NodeCondition, *v1.NodeCondition, error) {}

func (nc *Controller) handleDisruption(ctx context.Context, zoneToNodeConditions map[string][]*v1.NodeCondition, nodes []*v1.Node) {}

func (nc *Controller) podUpdated(oldPod, newPod *v1.Pod) {}

func (nc *Controller) doPodProcessingWorker(ctx context.Context) {}

// processPod is processing events of assigning pods to nodes. In particular:
// 1. for NodeReady=true node, taint eviction for this pod will be cancelled
// 2. for NodeReady=false or unknown node, taint eviction of pod will happen and pod will be marked as not ready
// 3. if node doesn't exist in cache, it will be skipped.
func (nc *Controller) processPod(ctx context.Context, podItem podUpdateItem) {}

func (nc *Controller) setLimiterInZone(zone string, zoneSize int, state ZoneState) {}

// classifyNodes classifies the allNodes to three categories:
//  1. added: the nodes that in 'allNodes', but not in 'knownNodeSet'
//  2. deleted: the nodes that in 'knownNodeSet', but not in 'allNodes'
//  3. newZoneRepresentatives: the nodes that in both 'knownNodeSet' and 'allNodes', but no zone states
func (nc *Controller) classifyNodes(allNodes []*v1.Node) (added, deleted, newZoneRepresentatives []*v1.Node) {}

// HealthyQPSFunc returns the default value for cluster eviction rate - we take
// nodeNum for consistency with ReducedQPSFunc.
func (nc *Controller) HealthyQPSFunc(nodeNum int) float32 {}

// ReducedQPSFunc returns the QPS for when the cluster is large make
// evictions slower, if they're small stop evictions altogether.
func (nc *Controller) ReducedQPSFunc(nodeNum int) float32 {}

// addPodEvictorForNewZone checks if new zone appeared, and if so add new evictor.
func (nc *Controller) addPodEvictorForNewZone(logger klog.Logger, node *v1.Node) {}

func (nc *Controller) markNodeForTainting(node *v1.Node, status v1.ConditionStatus) bool {}

func (nc *Controller) markNodeAsReachable(ctx context.Context, node *v1.Node) (bool, error) {}

// ComputeZoneState returns a slice of NodeReadyConditions for all Nodes in a given zone.
// The zone is considered:
// - fullyDisrupted if there're no Ready Nodes,
// - partiallyDisrupted if at least than nc.unhealthyZoneThreshold percent of Nodes are not Ready,
// - normal otherwise
func (nc *Controller) ComputeZoneState(nodeReadyConditions []*v1.NodeCondition) (int, ZoneState) {}

// reconcileNodeLabels reconciles node labels.
func (nc *Controller) reconcileNodeLabels(ctx context.Context, nodeName string) error {}