kubernetes/test/integration/job/job_test.go

const waitInterval

const fastPodFailureBackoff

const sleepDurationForControllerLatency

const restConfigQPS

const restConfigBurst

type metricLabelsWithValue

func validateCounterMetric(ctx context.Context, t *testing.T, counterVec *basemetrics.CounterVec, wantMetric metricLabelsWithValue) {}

func validateTerminatedPodsTrackingFinalizerMetric(ctx context.Context, t *testing.T, want int) {}

// TestJobPodFailurePolicyWithFailedPodDeletedDuringControllerRestart verifies that the job is properly marked as Failed
// in a scenario when the job controller crashes between removing pod finalizers and marking the job as Failed (based on
// the pod failure policy). After the finalizer for the failed pod is removed we remove the failed pod. This step is
// done to simulate what PodGC would do. Then, the test spawns the second instance of the controller to check that it
// will pick up the job state properly and will mark it as Failed, even if th pod triggering the pod failure policy is
// already deleted.
// Note: this scenario requires the use of finalizers. Without finalizers there is no guarantee a failed pod would be
// checked against the pod failure policy rules before its removal by PodGC.
func TestJobPodFailurePolicyWithFailedPodDeletedDuringControllerRestart(t *testing.T) {}

// TestJobPodFailurePolicy tests handling of pod failures with respect to the
// configured pod failure policy rules
func TestJobPodFailurePolicy(t *testing.T) {}

// TestSuccessPolicy tests handling of job and its pods when
// successPolicy is used.
func TestSuccessPolicy(t *testing.T) {}

// TestSuccessPolicy_ReEnabling tests handling of pod successful when
// re-enabling the JobSuccessPolicy feature.
func TestSuccessPolicy_ReEnabling(t *testing.T) {}

// TestBackoffLimitPerIndex_DelayedPodDeletion tests the pod deletion is delayed
// until the replacement pod is created, so that the replacement pod has the
// index-failure-count annotation bumped, when BackoffLimitPerIndex is used.
func TestBackoffLimitPerIndex_DelayedPodDeletion(t *testing.T) {}

// TestBackoffLimitPerIndex_Reenabling tests handling of pod failures when
// reenabling the BackoffLimitPerIndex feature.
func TestBackoffLimitPerIndex_Reenabling(t *testing.T) {}

// TestBackoffLimitPerIndex_JobPodsCreatedWithExponentialBackoff tests that the
// pods are recreated with expotential backoff delay computed independently
// per index. Scenario:
// - fail index 0
// - fail index 0
// - fail index 1
// - succeed index 0
// - fail index 1
// - succeed index 1
func TestBackoffLimitPerIndex_JobPodsCreatedWithExponentialBackoff(t *testing.T) {}

// TestDelayTerminalPhaseCondition tests the fix for Job controller to delay
// setting the terminal phase conditions (Failed and Complete) until all Pods
// are terminal. The fate of the Job is indicated by the interim Job conditions:
// FailureTarget, or SuccessCriteriaMet.
func TestDelayTerminalPhaseCondition(t *testing.T) {}

// TestBackoffLimitPerIndex tests handling of job and its pods when
// backoff limit per index is used.
func TestBackoffLimitPerIndex(t *testing.T) {}

// TestManagedBy verifies the Job controller correctly makes a decision to
// reconcile or skip reconciliation of the Job depending on the Job's managedBy
// field, and the enablement of the JobManagedBy feature gate.
func TestManagedBy(t *testing.T) {}

// TestManagedBy_Reenabling verifies handling a Job with a custom value of the
// managedBy field by the Job controller, as the JobManagedBy feature gate is
// disabled and reenabled again. First, when the feature gate is enabled, the
// synchronization is skipped, when it is disabled the synchronization is starts,
// and is disabled again with re-enabling of the feature gate.
func TestManagedBy_Reenabling(t *testing.T) {}

// TestManagedBy_RecreatedJob verifies that the Job controller skips
// reconciliation of a job with managedBy field, when this is a recreated job,
// and there is still a pending sync queued for the previous job.
// In this scenario we first create a job without managedBy field, and we mark
// its pod as succeeded. This queues the Job object sync with 1s delay. Then,
// without waiting for the Job status update we delete and recreate the job under
// the same name, but with managedBy field. The queued update starts to execute
// on the new job, but is skipped.
func TestManagedBy_RecreatedJob(t *testing.T) {}

// TestManagedBy_UsingReservedJobFinalizers documents the behavior of the Job
// controller when there is a job with custom value of the managedBy field, creating
// pods with the batch.kubernetes.io/job-tracking finalizer. The built-in controller
// should not remove the finalizer. Note that, the use of the finalizer in jobs
// managed by external controllers is discouraged, but may potentially happen
// when one forks the controller and does not rename the finalizer.
func TestManagedBy_UsingReservedJobFinalizers(t *testing.T) {}

func getIndexFailureCount(p *v1.Pod) (int, error) {}

func completionModePtr(cm batchv1.CompletionMode) *batchv1.CompletionMode {}

// TestNonParallelJob tests that a Job that only executes one Pod. The test
// recreates the Job controller at some points to make sure a new controller
// is able to pickup.
func TestNonParallelJob(t *testing.T) {}

func TestParallelJob(t *testing.T) {}

func TestParallelJobChangingParallelism(t *testing.T) {}

func TestParallelJobWithCompletions(t *testing.T) {}

func TestIndexedJob(t *testing.T) {}

func TestJobPodReplacementPolicy(t *testing.T) {}

// This tests the feature enable -> disable -> enable path for PodReplacementPolicy.
// We verify that Failed case works as expected when turned on.
// Disable reverts to previous behavior.
// Enabling will then match the original failed case.
func TestJobPodReplacementPolicyFeatureToggling(t *testing.T) {}

func TestElasticIndexedJob(t *testing.T) {}

// BenchmarkLargeIndexedJob benchmarks the completion of an Indexed Job.
// We expect that large jobs are more commonly used as Indexed. And they are
// also faster to track, as they need less API calls.
func BenchmarkLargeIndexedJob(b *testing.B) {}

// BenchmarkLargeFailureHandling benchmarks the handling of numerous pod failures
// of an Indexed Job. We set minimal backoff delay to make the job controller
// performance comparable for indexed jobs with global backoffLimit, and those
// with backoffLimit per-index, despite different patterns of handling failures.
func BenchmarkLargeFailureHandling(b *testing.B) {}

// cleanUp removes the specified pod finalizers, then deletes all pods and the job.
func cleanUp(ctx context.Context, clientSet clientset.Interface, jobObj *batchv1.Job, podFinalizersToRemove []string) error {}

func TestOrphanPodsFinalizersClearedWithGC(t *testing.T) {}

func TestFinalizersClearedWhenBackoffLimitExceeded(t *testing.T) {}

func TestJobPodsCreatedWithExponentialBackoff(t *testing.T) {}

func validateExpotentialBackoffDelay(t *testing.T, defaultPodFailureBackoff time.Duration, pods []*v1.Pod) {}

// TestJobFailedWithInterrupts tests that a job were one pod fails and the rest
// succeed is marked as Failed, even if the controller fails in the middle.
func TestJobFailedWithInterrupts(t *testing.T) {}

func validateNoOrphanPodsWithFinalizers(ctx context.Context, t *testing.T, clientSet clientset.Interface, jobObj *batchv1.Job) {}

func TestOrphanPodsFinalizersClearedOnRestart(t *testing.T) {}

func TestSuspendJob(t *testing.T) {}

func TestSuspendJobControllerRestart(t *testing.T) {}

func TestNodeSelectorUpdate(t *testing.T) {}

type podsByStatus

func validateJobsPodsStatusOnly(ctx context.Context, t testing.TB, clientSet clientset.Interface, jobObj *batchv1.Job, desired podsByStatus) {}

func validateJobsPodsStatusOnlyWithTimeout(ctx context.Context, t testing.TB, clientSet clientset.Interface, jobObj *batchv1.Job, desired podsByStatus, timeout time.Duration) {}

func validateJobStatus(ctx context.Context, t testing.TB, clientSet clientset.Interface, jobObj *batchv1.Job, wantStatus batchv1.JobStatus) {}

func validateJobPodsStatus(ctx context.Context, t testing.TB, clientSet clientset.Interface, jobObj *batchv1.Job, desired podsByStatus) {}

func getJobPods(ctx context.Context, t *testing.T, clientSet clientset.Interface, jobObj *batchv1.Job, filter func(v1.PodStatus) bool) ([]*v1.Pod, error) {}

func validateFinishedPodsNoFinalizer(ctx context.Context, t *testing.T, clientSet clientset.Interface, jobObj *batchv1.Job) {}

// validateIndexedJobPods validates indexes and hostname of
// active and completed Pods of an Indexed Job.
// Call after validateJobPodsStatus
func validateIndexedJobPods(ctx context.Context, t *testing.T, clientSet clientset.Interface, jobObj *batchv1.Job, wantActive sets.Set[int], gotCompleted string, wantFailed *string) {}

func waitForEvent(ctx context.Context, events watch.Interface, uid types.UID, reason string) error {}

func getJobConditionStatus(ctx context.Context, job *batchv1.Job, cType batchv1.JobConditionType) v1.ConditionStatus {}

func validateJobFailed(ctx context.Context, t *testing.T, clientSet clientset.Interface, jobObj *batchv1.Job) {}

func validateJobComplete(ctx context.Context, t testing.TB, clientSet clientset.Interface, jobObj *batchv1.Job) {}

func validateJobCondition(ctx context.Context, t testing.TB, clientSet clientset.Interface, jobObj *batchv1.Job, cond batchv1.JobConditionType) {}

func setJobPodsPhase(ctx context.Context, clientSet clientset.Interface, jobObj *batchv1.Job, phase v1.PodPhase, cnt int) (int, error) {}

func setJobPodsReady(ctx context.Context, clientSet clientset.Interface, jobObj *batchv1.Job, cnt int) (int, error) {}

func updateJobPodsStatus(ctx context.Context, clientSet clientset.Interface, jobObj *batchv1.Job, op func(*v1.Pod) bool, cnt int) (int, error) {}

func updatePodStatuses(ctx context.Context, clientSet clientset.Interface, updates []v1.Pod) (int, error) {}

func setJobPhaseForIndex(ctx context.Context, clientSet clientset.Interface, jobObj *batchv1.Job, phase v1.PodPhase, ix int) error {}

func getActivePodForIndex(ctx context.Context, clientSet clientset.Interface, jobObj *batchv1.Job, ix int) (*v1.Pod, error) {}

func getJobPodForIndex(ctx context.Context, clientSet clientset.Interface, jobObj *batchv1.Job, ix int, filter func(*v1.Pod) bool) (*v1.Pod, error) {}

func getJobPodsForIndex(ctx context.Context, clientSet clientset.Interface, jobObj *batchv1.Job, ix int, filter func(*v1.Pod) bool) ([]*v1.Pod, error) {}

func getCompletionIndex(lookupMap map[string]string) (int, error) {}

func createJobWithDefaults(ctx context.Context, clientSet clientset.Interface, ns string, jobObj *batchv1.Job) (*batchv1.Job, error) {}

func setup(t testing.TB, nsBaseName string) (framework.TearDownFunc, *restclient.Config, clientset.Interface, *v1.Namespace) {}

func startJobControllerAndWaitForCaches(tb testing.TB, restConfig *restclient.Config) (context.Context, context.CancelFunc) {}

func resetMetrics() {}

func createJobControllerWithSharedInformers(tb testing.TB, restConfig *restclient.Config, informerSet informers.SharedInformerFactory) (*jobcontroller.Controller, context.Context, context.CancelFunc) {}

func hasJobTrackingFinalizer(obj metav1.Object) bool {}

func setDuringTest(val *int, newVal int) func() {}

func setDurationDuringTest(val *time.Duration, newVal time.Duration) func() {}

func updateJob(ctx context.Context, jobClient typedv1.JobInterface, jobName string, updateFunc func(*batchv1.Job)) (*batchv1.Job, error) {}

func waitForPodsToBeActive(ctx context.Context, t *testing.T, jobClient typedv1.JobInterface, podCount int32, jobObj *batchv1.Job) {}

func deletePods(ctx context.Context, t *testing.T, clientSet clientset.Interface, namespace string) {}

func removePodsFinalizers(ctx context.Context, clientSet clientset.Interface, namespace string, finalizersNames []string) error {}

func updatePod(ctx context.Context, clientSet clientset.Interface, pods []v1.Pod, updateFunc func(*v1.Pod)) error {}

func failTerminatingPods(ctx context.Context, t *testing.T, clientSet clientset.Interface, namespace string) {}