kubernetes/pkg/controller/ttlafterfinished/ttlafterfinished_controller.go

type Controller

// New creates an instance of Controller
func New(ctx context.Context, jobInformer batchinformers.JobInformer, client clientset.Interface) *Controller {}

// Run starts the workers to clean up Jobs.
func (tc *Controller) Run(ctx context.Context, workers int) {}

func (tc *Controller) addJob(logger klog.Logger, obj interface{}

func (tc *Controller) updateJob(logger klog.Logger, old, cur interface{}

func (tc *Controller) enqueue(logger klog.Logger, job *batch.Job) {}

func (tc *Controller) enqueueAfter(job *batch.Job, after time.Duration) {}

func (tc *Controller) worker(ctx context.Context) {}

func (tc *Controller) processNextWorkItem(ctx context.Context) bool {}

func (tc *Controller) handleErr(err error, key string) {}

// processJob will check the Job's state and TTL and delete the Job when it
// finishes and its TTL after finished has expired. If the Job hasn't finished or
// its TTL hasn't expired, it will be added to the queue after the TTL is expected
// to expire.
// This function is not meant to be invoked concurrently with the same key.
func (tc *Controller) processJob(ctx context.Context, key string) error {}

// processTTL checks whether a given Job's TTL has expired, and add it to the queue after the TTL is expected to expire
// if the TTL will expire later.
func (tc *Controller) processTTL(logger klog.Logger, job *batch.Job) (expiredAt *time.Time, err error) {}

// needsCleanup checks whether a Job has finished and has a TTL set.
func needsCleanup(j *batch.Job) bool {}

func getFinishAndExpireTime(j *batch.Job) (*time.Time, *time.Time, error) {}

func timeLeft(logger klog.Logger, j *batch.Job, since *time.Time) (*time.Duration, *time.Time, error) {}

// jobFinishTime takes an already finished Job and returns the time it finishes.
func jobFinishTime(finishedJob *batch.Job) (metav1.Time, error) {}