kubernetes/pkg/controller/disruption/disruption.go

const DeletionTimeout

const stalePodDisruptionTimeout

type updater

type DisruptionController

type controllerAndScale

type podControllerFinder

func NewDisruptionController(
	ctx context.Context,
	podInformer coreinformers.PodInformer,
	pdbInformer policyinformers.PodDisruptionBudgetInformer,
	rcInformer coreinformers.ReplicationControllerInformer,
	rsInformer appsv1informers.ReplicaSetInformer,
	dInformer appsv1informers.DeploymentInformer,
	ssInformer appsv1informers.StatefulSetInformer,
	kubeClient clientset.Interface,
	restMapper apimeta.RESTMapper,
	scaleNamespacer scaleclient.ScalesGetter,
	discoveryClient discovery.DiscoveryInterface,
) *DisruptionController {}

// NewDisruptionControllerInternal allows to set a clock and
// stalePodDisruptionTimeout
// It is only supposed to be used by tests.
func NewDisruptionControllerInternal(ctx context.Context,
	podInformer coreinformers.PodInformer,
	pdbInformer policyinformers.PodDisruptionBudgetInformer,
	rcInformer coreinformers.ReplicationControllerInformer,
	rsInformer appsv1informers.ReplicaSetInformer,
	dInformer appsv1informers.DeploymentInformer,
	ssInformer appsv1informers.StatefulSetInformer,
	kubeClient clientset.Interface,
	restMapper apimeta.RESTMapper,
	scaleNamespacer scaleclient.ScalesGetter,
	discoveryClient discovery.DiscoveryInterface,
	clock clock.WithTicker,
	stalePodDisruptionTimeout time.Duration,
) *DisruptionController {}

// The workload resources do implement the scale subresource, so it would
// be possible to only check the scale subresource here. But since there is no
// way to take advantage of listers with scale subresources, we use the workload
// resources directly and only fall back to the scale subresource when needed.
func (dc *DisruptionController) finders() []podControllerFinder {}

var controllerKindRS

var controllerKindSS

var controllerKindRC

var controllerKindDep

// getPodReplicaSet finds a replicaset which has no matching deployments.
func (dc *DisruptionController) getPodReplicaSet(ctx context.Context, controllerRef *metav1.OwnerReference, namespace string) (*controllerAndScale, error) {}

// getPodStatefulSet returns the statefulset referenced by the provided controllerRef.
func (dc *DisruptionController) getPodStatefulSet(ctx context.Context, controllerRef *metav1.OwnerReference, namespace string) (*controllerAndScale, error) {}

// getPodDeployments finds deployments for any replicasets which are being managed by deployments.
func (dc *DisruptionController) getPodDeployment(ctx context.Context, controllerRef *metav1.OwnerReference, namespace string) (*controllerAndScale, error) {}

func (dc *DisruptionController) getPodReplicationController(ctx context.Context, controllerRef *metav1.OwnerReference, namespace string) (*controllerAndScale, error) {}

func (dc *DisruptionController) getScaleController(ctx context.Context, controllerRef *metav1.OwnerReference, namespace string) (*controllerAndScale, error) {}

func (dc *DisruptionController) implementsScale(gvr schema.GroupVersionResource) (bool, error) {}

func verifyGroupKind(controllerRef *metav1.OwnerReference, expectedKind string, expectedGroups []string) (bool, error) {}

func (dc *DisruptionController) Run(ctx context.Context) {}

func (dc *DisruptionController) addDB(logger klog.Logger, obj interface{}

func (dc *DisruptionController) updateDB(logger klog.Logger, old, cur interface{}

func (dc *DisruptionController) removeDB(logger klog.Logger, obj interface{}

func (dc *DisruptionController) addPod(logger klog.Logger, obj interface{}

func (dc *DisruptionController) updatePod(logger klog.Logger, _, cur interface{}

func (dc *DisruptionController) deletePod(logger klog.Logger, obj interface{}

func (dc *DisruptionController) enqueuePdb(logger klog.Logger, pdb *policy.PodDisruptionBudget) {}

func (dc *DisruptionController) enqueuePdbForRecheck(logger klog.Logger, pdb *policy.PodDisruptionBudget, delay time.Duration) {}

func (dc *DisruptionController) enqueueStalePodDisruptionCleanup(logger klog.Logger, pod *v1.Pod, d time.Duration) {}

func (dc *DisruptionController) getPdbForPod(logger klog.Logger, pod *v1.Pod) *policy.PodDisruptionBudget {}

// This function returns pods using the PodDisruptionBudget object.
// IMPORTANT NOTE : the returned pods should NOT be modified.
func (dc *DisruptionController) getPodsForPdb(pdb *policy.PodDisruptionBudget) ([]*v1.Pod, error) {}

func (dc *DisruptionController) worker(ctx context.Context) {}

func (dc *DisruptionController) processNextWorkItem(ctx context.Context) bool {}

func (dc *DisruptionController) recheckWorker() {}

func (dc *DisruptionController) processNextRecheckWorkItem() bool {}

func (dc *DisruptionController) stalePodDisruptionWorker(ctx context.Context) {}

func (dc *DisruptionController) processNextStalePodDisruptionWorkItem(ctx context.Context) bool {}

func (dc *DisruptionController) sync(ctx context.Context, key string) error {}

func (dc *DisruptionController) trySync(ctx context.Context, pdb *policy.PodDisruptionBudget) error {}

func (dc *DisruptionController) syncStalePodDisruption(ctx context.Context, key string) error {}

func (dc *DisruptionController) getExpectedPodCount(ctx context.Context, pdb *policy.PodDisruptionBudget, pods []*v1.Pod) (expectedCount, desiredHealthy int32, unmanagedPods []string, err error) {}

func (dc *DisruptionController) getExpectedScale(ctx context.Context, pdb *policy.PodDisruptionBudget, pods []*v1.Pod) (expectedCount int32, unmanagedPods []string, err error) {}

func countHealthyPods(pods []*v1.Pod, disruptedPods map[string]metav1.Time, currentTime time.Time) (currentHealthy int32) {}

// Builds new PodDisruption map, possibly removing items that refer to non-existing, already deleted
// or not-deleted at all items. Also returns an information when this check should be repeated.
func (dc *DisruptionController) buildDisruptedPodMap(logger klog.Logger, pods []*v1.Pod, pdb *policy.PodDisruptionBudget, currentTime time.Time) (map[string]metav1.Time, *time.Time) {}

// failSafe is an attempt to at least update the DisruptionsAllowed field to
// 0 if everything else has failed.  This is one place we
// implement the  "fail open" part of the design since if we manage to update
// this field correctly, we will prevent the /evict handler from approving an
// eviction when it may be unsafe to do so.
func (dc *DisruptionController) failSafe(ctx context.Context, pdb *policy.PodDisruptionBudget, err error) error {}

func (dc *DisruptionController) updatePdbStatus(ctx context.Context, pdb *policy.PodDisruptionBudget, currentHealthy, desiredHealthy, expectedCount int32,
	disruptedPods map[string]metav1.Time) error {}

func (dc *DisruptionController) writePdbStatus(ctx context.Context, pdb *policy.PodDisruptionBudget) error {}

func (dc *DisruptionController) nonTerminatingPodHasStaleDisruptionCondition(pod *v1.Pod) (bool, time.Duration) {}