kubernetes/test/e2e/apimachinery/garbage_collector.go

// estimateMaximumPods estimates how many pods the cluster can handle
// with some wiggle room, to prevent pods being unable to schedule due
// to max pod constraints.
func estimateMaximumPods(ctx context.Context, c clientset.Interface, min, max int32) int32 {}

func getForegroundOptions() metav1.DeleteOptions {}

func getBackgroundOptions() metav1.DeleteOptions {}

func getOrphanOptions() metav1.DeleteOptions {}

var zero

var lablecount

const gcInformerResyncRetryTimeout

const replicaSyncTimeout

func getPodTemplateSpec(labels map[string]string) v1.PodTemplateSpec {}

func newOwnerDeployment(f *framework.Framework, deploymentName string, labels map[string]string) *appsv1.Deployment {}

func newOwnerRC(f *framework.Framework, name string, replicas int32, labels map[string]string) *v1.ReplicationController {}

func newGCPod(name string) *v1.Pod {}

// verifyRemainingObjects verifies if the number of remaining objects.
// It returns error if the communication with the API server fails.
func verifyRemainingObjects(ctx context.Context, f *framework.Framework, objects map[string]int) (bool, error) {}

func gatherMetrics(ctx context.Context, f *framework.Framework) {}

func newCronJob(name, schedule string) *batchv1.CronJob {}

// getUniqLabel returns a UniqLabel based on labeLkey and labelvalue.
func getUniqLabel(labelkey, labelvalue string) map[string]string {}

var _

// TODO(106575): Migrate away from generic polling function.
func waitForReplicas(ctx context.Context, rc *v1.ReplicationController, rcClient clientv1.ReplicationControllerInterface) {}