kubernetes/test/integration/daemonset/daemonset_test.go

var zero

func setup(t *testing.T) (context.Context, kubeapiservertesting.TearDownFunc, *daemon.DaemonSetsController, informers.SharedInformerFactory, clientset.Interface) {}

func setupWithServerSetup(t *testing.T, serverSetup framework.TestServerSetup) (context.Context, kubeapiservertesting.TearDownFunc, *daemon.DaemonSetsController, informers.SharedInformerFactory, clientset.Interface) {}

func testLabels() map[string]string {}

func newDaemonSet(name, namespace string) *apps.DaemonSet {}

func cleanupDaemonSets(t *testing.T, cs clientset.Interface, ds *apps.DaemonSet) {}

func newRollbackStrategy() *apps.DaemonSetUpdateStrategy {}

func newOnDeleteStrategy() *apps.DaemonSetUpdateStrategy {}

func updateStrategies() []*apps.DaemonSetUpdateStrategy {}

func allocatableResources(memory, cpu string) v1.ResourceList {}

func resourcePodSpec(nodeName, memory, cpu string) v1.PodSpec {}

func newNode(name string, label map[string]string) *v1.Node {}

func addNodes(nodeClient corev1client.NodeInterface, startIndex, numNodes int, label map[string]string, t *testing.T) {}

func validateDaemonSetPodsAndMarkReady(
	podClient corev1client.PodInterface,
	podInformer cache.SharedIndexInformer,
	numberPods int,
	t *testing.T,
) {}

func validateDaemonSetPodsActive(
	podClient corev1client.PodInterface,
	podInformer cache.SharedIndexInformer,
	numberPods int,
	t *testing.T,
) {}

func validateDaemonSetPodsTolerations(
	podClient corev1client.PodInterface,
	podInformer cache.SharedIndexInformer,
	expectedTolerations []v1.Toleration,
	prefix string,
	t *testing.T,
) {}

// podUnschedulable returns a condition function that returns true if the given pod
// gets unschedulable status.
func podUnschedulable(c clientset.Interface, podNamespace, podName string) wait.ConditionFunc {}

// waitForPodUnscheduleWithTimeout waits for a pod to fail scheduling and returns
// an error if it does not become unschedulable within the given timeout.
func waitForPodUnschedulableWithTimeout(cs clientset.Interface, pod *v1.Pod, timeout time.Duration) error {}

// waitForPodUnschedule waits for a pod to fail scheduling and returns
// an error if it does not become unschedulable within the timeout duration (30 seconds).
func waitForPodUnschedulable(cs clientset.Interface, pod *v1.Pod) error {}

// waitForPodsCreated waits for number of pods are created.
func waitForPodsCreated(podInformer cache.SharedIndexInformer, num int) error {}

func waitForDaemonSetAndControllerRevisionCreated(c clientset.Interface, name string, namespace string) error {}

func hashAndNameForDaemonSet(ds *apps.DaemonSet) (string, string) {}

func validateDaemonSetCollisionCount(dsClient appstyped.DaemonSetInterface, dsName string, expCount int32, t *testing.T) {}

func validateDaemonSetStatus(
	dsClient appstyped.DaemonSetInterface,
	dsName string,
	expectedNumberReady int32,
	t *testing.T) {}

func validateUpdatedNumberScheduled(
	ctx context.Context,
	dsClient appstyped.DaemonSetInterface,
	dsName string,
	expectedUpdatedNumberScheduled int32,
	t *testing.T) {}

func updateDS(t *testing.T, dsClient appstyped.DaemonSetInterface, dsName string, updateFunc func(*apps.DaemonSet)) *apps.DaemonSet {}

func forEachStrategy(t *testing.T, tf func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy)) {}

func TestOneNodeDaemonLaunchesPod(t *testing.T) {}

func TestSimpleDaemonSetLaunchesPods(t *testing.T) {}

func TestSimpleDaemonSetRestartsPodsOnTerminalPhase(t *testing.T) {}

func TestDaemonSetWithNodeSelectorLaunchesPods(t *testing.T) {}

func TestNotReadyNodeDaemonDoesLaunchPod(t *testing.T) {}

// TestInsufficientCapacityNodeDaemonSetCreateButNotLaunchPod tests thaat the DaemonSet should create
// Pods for all the nodes regardless of available resource on the nodes, and kube-scheduler should
// not schedule Pods onto the nodes with insufficient resource.
func TestInsufficientCapacityNode(t *testing.T) {}

// TestLaunchWithHashCollision tests that a DaemonSet can be updated even if there is a
// hash collision with an existing ControllerRevision
func TestLaunchWithHashCollision(t *testing.T) {}

// Test DaemonSet Controller updates label of the pod after "DedupCurHistories". The scenario is
// 1. Create an another controllerrevision owned by the daemonset but with higher revision and different hash
// 2. Add a node to ensure the controller sync
// 3. The dsc is expected to "PATCH" the existing pod label with new hash and deletes the old controllerrevision once finishes the update
func TestDSCUpdatesPodLabelAfterDedupCurHistories(t *testing.T) {}

// TestTaintedNode tests tainted node isn't expected to have pod scheduled
func TestTaintedNode(t *testing.T) {}

// TestUnschedulableNodeDaemonDoesLaunchPod tests that the DaemonSet Pods can still be scheduled
// to the Unschedulable nodes.
func TestUnschedulableNodeDaemonDoesLaunchPod(t *testing.T) {}

func TestUpdateStatusDespitePodCreationFailure(t *testing.T) {}

func TestDaemonSetRollingUpdateWithTolerations(t *testing.T) {}