kubernetes/pkg/controller/disruption/disruption_test.go

type pdbStates

var alwaysReady

func (ps *pdbStates) Set(ctx context.Context, pdb *policy.PodDisruptionBudget) error {}

func (ps *pdbStates) Get(key string) policy.PodDisruptionBudget {}

func (ps *pdbStates) VerifyPdbStatus(t *testing.T, key string, disruptionsAllowed, currentHealthy, desiredHealthy, expectedPods int32, disruptedPodMap map[string]metav1.Time) {}

func (ps *pdbStates) VerifyDisruptionAllowed(t *testing.T, key string, disruptionsAllowed int32) {}

func (ps *pdbStates) VerifyNoStatusError(t *testing.T, key string) {}

type disruptionController

var customGVK

func newFakeDisruptionController(ctx context.Context) (*disruptionController, *pdbStates) {}

func newFakeDisruptionControllerWithTime(ctx context.Context, now time.Time) (*disruptionController, *pdbStates) {}

func fooBar() map[string]string {}

func newSel(labels map[string]string) *metav1.LabelSelector {}

func newSelFooBar() *metav1.LabelSelector {}

func newMinAvailablePodDisruptionBudget(t *testing.T, minAvailable intstr.IntOrString) (*policy.PodDisruptionBudget, string) {}

func newMaxUnavailablePodDisruptionBudget(t *testing.T, maxUnavailable intstr.IntOrString) (*policy.PodDisruptionBudget, string) {}

func updatePodOwnerToRc(t *testing.T, pod *v1.Pod, rc *v1.ReplicationController) {}

func updatePodOwnerToRs(t *testing.T, pod *v1.Pod, rs *apps.ReplicaSet) {}

func updatePodOwnerToSs(t *testing.T, pod *v1.Pod, ss *apps.StatefulSet) {}

func newPod(t *testing.T, name string) (*v1.Pod, string) {}

func newReplicationController(t *testing.T, size int32) (*v1.ReplicationController, string) {}

func newDeployment(t *testing.T, size int32) (*apps.Deployment, string) {}

func newReplicaSet(t *testing.T, size int32) (*apps.ReplicaSet, string) {}

func newStatefulSet(t *testing.T, size int32) (*apps.StatefulSet, string) {}

func update(t *testing.T, store cache.Store, obj interface{}

func add(t *testing.T, store cache.Store, obj interface{}

// Create one with no selector.  Verify it matches all pods
func TestNoSelector(t *testing.T) {}

// Verify that available/expected counts go up as we add pods, then verify that
// available count goes down when we make a pod unavailable.
func TestUnavailable(t *testing.T) {}

// Verify that an integer MaxUnavailable won't
// allow a disruption for pods with no controller.
func TestIntegerMaxUnavailable(t *testing.T) {}

// Verify that an integer MaxUnavailable will recompute allowed disruptions when the scale of
// the selected pod's controller is modified.
func TestIntegerMaxUnavailableWithScaling(t *testing.T) {}

// Verify that an percentage MaxUnavailable will recompute allowed disruptions when the scale of
// the selected pod's controller is modified.
func TestPercentageMaxUnavailableWithScaling(t *testing.T) {}

// Create a pod  with no controller, and verify that a PDB with a percentage
// specified won't allow a disruption.
func TestNakedPod(t *testing.T) {}

// Create a pod  with unsupported controller, and verify that a PDB with a percentage
// specified won't allow a disruption.
func TestUnsupportedControllerPod(t *testing.T) {}

// Verify that disruption controller is not erroring when unmanaged pods are found
func TestStatusForUnmanagedPod(t *testing.T) {}

// Check if the unmanaged pods are correctly collected or not
func TestTotalUnmanagedPods(t *testing.T) {}

// Verify that we count the scale of a ReplicaSet even when it has no Deployment.
func TestReplicaSet(t *testing.T) {}

func TestScaleResource(t *testing.T) {}

func TestScaleFinderNoResource(t *testing.T) {}

// Verify that multiple controllers doesn't allow the PDB to be set true.
func TestMultipleControllers(t *testing.T) {}

func TestReplicationController(t *testing.T) {}

func TestStatefulSetController(t *testing.T) {}

func TestTwoControllers(t *testing.T) {}

// Test pdb doesn't exist
func TestPDBNotExist(t *testing.T) {}

func TestUpdateDisruptedPods(t *testing.T) {}

func TestBasicFinderFunctions(t *testing.T) {}

func TestDeploymentFinderFunction(t *testing.T) {}

// This test checks that the disruption controller does not write stale data to
// a PDB status during race conditions with the eviction handler. Specifically,
// failed updates due to ResourceVersion conflict should not cause a stale value
// of DisruptionsAllowed to be written.
//
// In this test, DisruptionsAllowed starts at 2.
// (A) We will delete 1 pod and trigger DisruptionController to set
// DisruptionsAllowed to 1.
// (B) As the DisruptionController attempts this write, we will evict the
// remaining 2 pods and update DisruptionsAllowed to 0. (The real eviction
// handler would allow this because it still sees DisruptionsAllowed=2.)
// (C) If the DisruptionController writes DisruptionsAllowed=1 despite the
// resource conflict error, then there is a bug.
func TestUpdatePDBStatusRetries(t *testing.T) {}

func TestInvalidSelectors(t *testing.T) {}

func TestStalePodDisruption(t *testing.T) {}

func TestKeepExistingPDBConditionDuringSync(t *testing.T) {}

// waitForCacheCount blocks until the given cache store has the desired number
// of items in it. This will return an error if the condition is not met after a
// 10 second timeout.
func waitForCacheCount(store cache.Store, n int) error {}

func verifyEventEmitted(t *testing.T, dc *disruptionController, expectedEvent string) {}