kubernetes/pkg/controller/replicaset/replica_set_test.go

var informerSyncTimeout

func testNewReplicaSetControllerFromClient(tb testing.TB, client clientset.Interface, stopCh chan struct{}

func skipListerFunc(verb string, url url.URL) bool {}

var alwaysReady

func newReplicaSet(replicas int, selectorMap map[string]string) *apps.ReplicaSet {}

// create a pod with the given phase for the given rs (same selectors and namespace)
func newPod(name string, rs *apps.ReplicaSet, status v1.PodPhase, lastTransitionTime *metav1.Time, properlyOwned bool) *v1.Pod {}

// create count pods with the given phase for the given ReplicaSet (same selectors and namespace), and add them to the store.
func newPodList(store cache.Store, count int, status v1.PodPhase, labelMap map[string]string, rs *apps.ReplicaSet, name string) *v1.PodList {}

// processSync initiates a sync via processNextWorkItem() to test behavior that
// depends on both functions (such as re-queueing on sync error).
func processSync(ctx context.Context, rsc *ReplicaSetController, key string) error {}

func validateSyncReplicaSet(fakePodControl *controller.FakePodControl, expectedCreates, expectedDeletes, expectedPatches int) error {}

func TestSyncReplicaSetDoesNothing(t *testing.T) {}

func TestDeleteFinalStateUnknown(t *testing.T) {}

// Tell the rs to create 100 replicas, but simulate a limit (like a quota limit)
// of 10, and verify that the rs doesn't make 100 create calls per sync pass
func TestSyncReplicaSetCreateFailures(t *testing.T) {}

func TestSyncReplicaSetDormancy(t *testing.T) {}

func TestGetReplicaSetsWithSameController(t *testing.T) {}

func BenchmarkGetReplicaSetsWithSameController(b *testing.B) {}

func TestPodControllerLookup(t *testing.T) {}

func TestRelatedPodsLookup(t *testing.T) {}

func TestWatchControllers(t *testing.T) {}

func TestWatchPods(t *testing.T) {}

func TestUpdatePods(t *testing.T) {}

func TestControllerUpdateRequeue(t *testing.T) {}

func TestControllerUpdateStatusWithFailure(t *testing.T) {}

// TODO: This test is too hairy for a unittest. It should be moved to an E2E suite.
func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int) {}

func TestControllerBurstReplicas(t *testing.T) {}

type FakeRSExpectations

func (fe FakeRSExpectations) SatisfiedExpectations(logger klog.Logger, controllerKey string) bool {}

// TestRSSyncExpectations tests that a pod cannot sneak in between counting active pods
// and checking expectations.
func TestRSSyncExpectations(t *testing.T) {}

func TestDeleteControllerAndExpectations(t *testing.T) {}

func TestExpectationsOnRecreate(t *testing.T) {}

// shuffle returns a new shuffled list of container controllers.
func shuffle(controllers []*apps.ReplicaSet) []*apps.ReplicaSet {}

func TestOverlappingRSs(t *testing.T) {}

func TestDeletionTimestamp(t *testing.T) {}

// setupManagerWithGCEnabled creates a RS manager with a fakePodControl
func setupManagerWithGCEnabled(t *testing.T, stopCh chan struct{}

func TestDoNotPatchPodWithOtherControlRef(t *testing.T) {}

func TestPatchPodFails(t *testing.T) {}

// RS controller shouldn't adopt or create more pods if the rc is about to be
// deleted.
func TestDoNotAdoptOrCreateIfBeingDeleted(t *testing.T) {}

func TestDoNotAdoptOrCreateIfBeingDeletedRace(t *testing.T) {}

var imagePullBackOff

var condImagePullBackOff

var condReplicaFailure

var condReplicaFailure2

var status

func TestGetCondition(t *testing.T) {}

func TestSetCondition(t *testing.T) {}

func TestRemoveCondition(t *testing.T) {}

func TestSlowStartBatch(t *testing.T) {}

func TestGetPodsToDelete(t *testing.T) {}

func TestGetPodKeys(t *testing.T) {}