kubernetes/test/e2e_node/eviction_test.go

const postTestConditionMonitoringPeriod

const evictionPollInterval

const pressureDisappearTimeout

const pressureDelay

const testContextFmt

const noPressure

const lotsOfDisk

const lotsOfFiles

const resourceInodes

const noStarvedResource

var _

var _

var _

var _

var _

var _

var _

var _

var _

var _

var _

type podEvictSpec

// runEvictionTest sets up a testing environment given the provided pods, and checks a few things:
//
//	It ensures that the desired expectedNodeCondition is actually triggered.
//	It ensures that evictionPriority 0 pods are not evicted
//	It ensures that lower evictionPriority pods are always evicted before higher evictionPriority pods (2 evicted before 1, etc.)
//	It ensures that all pods with non-zero evictionPriority are eventually evicted.
//
// runEvictionTest then cleans up the testing environment by deleting provided pods, and ensures that expectedNodeCondition no longer exists
func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expectedNodeCondition v1.NodeConditionType, expectedStarvedResource v1.ResourceName, logFunc func(ctx context.Context), testSpecs []podEvictSpec) {}

// verifyEvictionOrdering returns an error if all non-zero priority pods have not been evicted, nil otherwise
// This function panics (via Expect) if eviction ordering is violated, or if a priority-zero pod fails.
func verifyEvictionOrdering(ctx context.Context, f *framework.Framework, testSpecs []podEvictSpec) error {}

func verifyEvictionPeriod(ctx context.Context, f *framework.Framework, testSpecs []podEvictSpec, nodeUnreadyTime time.Time) {}

func verifyPodConditions(ctx context.Context, f *framework.Framework, testSpecs []podEvictSpec) {}

func verifyEvictionEvents(ctx context.Context, f *framework.Framework, testSpecs []podEvictSpec, expectedStarvedResource v1.ResourceName) {}

// Returns TRUE if the node has the node condition, FALSE otherwise
func hasNodeCondition(ctx context.Context, f *framework.Framework, expectedNodeCondition v1.NodeConditionType) bool {}

func logInodeMetrics(ctx context.Context) {}

func logDiskMetrics(ctx context.Context) {}

func logMemoryMetrics(ctx context.Context) {}

func logPidMetrics(ctx context.Context) {}

func eventuallyGetSummary(ctx context.Context) (s *kubeletstatsv1alpha1.Summary) {}

// returns a pod that does not use any resources
func innocentPod() *v1.Pod {}

const volumeMountPath

const volumeName

func inodeConsumingPod(name string, numFiles int, volumeSource *v1.VolumeSource) *v1.Pod {}

func diskConsumingPod(name string, diskConsumedMB int, volumeSource *v1.VolumeSource, resources v1.ResourceRequirements) *v1.Pod {}

func pidConsumingPod(name string, numProcesses int) *v1.Pod {}

// podWithCommand returns a pod with the provided volumeSource and resourceRequirements.
func podWithCommand(volumeSource *v1.VolumeSource, resources v1.ResourceRequirements, iterations int, name, command string) *v1.Pod {}

func getMemhogPod(podName string, ctnName string, res v1.ResourceRequirements) *v1.Pod {}