kubernetes/test/e2e/storage/testsuites/multivolume.go

type multiVolumeTestSuite

var _

// InitCustomMultiVolumeTestSuite returns multiVolumeTestSuite that implements TestSuite interface
// using custom test patterns
func InitCustomMultiVolumeTestSuite(patterns []storageframework.TestPattern) storageframework.TestSuite {}

// InitMultiVolumeTestSuite returns multiVolumeTestSuite that implements TestSuite interface
// using test suite default patterns
func InitMultiVolumeTestSuite() storageframework.TestSuite {}

func (t *multiVolumeTestSuite) GetTestSuiteInfo() storageframework.TestSuiteInfo {}

func (t *multiVolumeTestSuite) SkipUnsupportedTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {}

func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {}

// testAccessMultipleVolumes tests access to multiple volumes from single pod on the specified node
// If readSeedBase > 0, read test are done before write/read test assuming that there is already data written.
func testAccessMultipleVolumes(ctx context.Context, f *framework.Framework, cs clientset.Interface, ns string,
	node e2epod.NodeSelection, pvcs []*v1.PersistentVolumeClaim, readSeedBase int64, writeSeedBase int64) string {}

// TestAccessMultipleVolumesAcrossPodRecreation tests access to multiple volumes from single pod,
// then recreate pod on the same or different node depending on requiresSameNode,
// and recheck access to the volumes from the recreated pod
func TestAccessMultipleVolumesAcrossPodRecreation(ctx context.Context, f *framework.Framework, cs clientset.Interface, ns string,
	node e2epod.NodeSelection, pvcs []*v1.PersistentVolumeClaim, requiresSameNode bool) {}

// TestConcurrentAccessToSingleVolume tests access to a single volume from multiple pods,
// then delete the last pod, and recheck access to the volume after pod deletion to check if other
// pod deletion doesn't affect. Pods are deployed on the same node or different nodes depending on requiresSameNode.
// Read/write check are done across pod, by check reading both what pod{n-1} and pod{n} wrote from pod{n}.
func TestConcurrentAccessToSingleVolume(ctx context.Context, f *framework.Framework, cs clientset.Interface, ns string,
	node e2epod.NodeSelection, pvc *v1.PersistentVolumeClaim, numPods int, requiresSameNode bool,
	readOnly bool) {}

// TestConcurrentAccessToRelatedVolumes tests access to multiple volumes from multiple pods.
// Each provided PVC is used by a single pod. The test ensures that volumes created from
// another volume (=clone) or volume snapshot can be used together with the original volume.
func TestConcurrentAccessToRelatedVolumes(ctx context.Context, f *framework.Framework, cs clientset.Interface, ns string,
	node e2epod.NodeSelection, pvcs []*v1.PersistentVolumeClaim, expectedContent string) {}

// getCurrentTopologies() goes through all Nodes and returns unique driver topologies and count of Nodes per topology
func getCurrentTopologiesNumber(cs clientset.Interface, nodes *v1.NodeList, keys []string) ([]topology, []int, error) {}

// ensureTopologyRequirements check that there are enough nodes in the cluster for a test and
// sets nodeSelection affinity according to given topology keys for drivers that provide them.
func ensureTopologyRequirements(ctx context.Context, nodeSelection *e2epod.NodeSelection, cs clientset.Interface, driverInfo *storageframework.DriverInfo, minCount int) error {}

// initializeVolume creates a filesystem on given volume, so it can be used as read-only later
func initializeVolume(ctx context.Context, cs clientset.Interface, t *framework.TimeoutContext, ns string, pvc *v1.PersistentVolumeClaim, node e2epod.NodeSelection) {}