Unverified Commit a1db52c5 authored by Travis Nielsen's avatar Travis Nielsen
Browse files

ceph: move integration test to csi driver


The integration tests have been mostly running on the flex driver
with only a newer test on the csi driver. With the CSI driver being
the preferred driver going forward, now the integration tests will
all be running with the CSI driver with the exception of a test
suite that is only dedicated to the flex driver.

A number of other test improvements are also made for code
readability, test stability, and removing unused options.
Signed-off-by: default avatarTravis Nielsen <tnielsen@redhat.com>
Showing with 536 additions and 875 deletions
+536 -875
......@@ -22,6 +22,7 @@ import (
"github.com/rook/rook/pkg/daemon/ceph/client"
"github.com/rook/rook/tests/framework/installer"
"github.com/rook/rook/tests/framework/utils"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// BlockOperation is wrapper for k8s rook block operations
......@@ -38,11 +39,6 @@ type BlockImage struct {
MountPoint string `json:"mountPoint"`
}
var (
writeDataToBlockPod = []string{"sh", "-c", "WRITE_DATA_CMD"}
readDataFromBlockPod = []string{"cat", "READ_DATA_CMD"}
)
// CreateBlockOperation - Constructor to create BlockOperation - client to perform rook Block operations on k8s
func CreateBlockOperation(k8shelp *utils.K8sHelper, manifests installer.CephManifests) *BlockOperation {
return &BlockOperation{k8shelp, manifests}
......@@ -64,22 +60,32 @@ func (b *BlockOperation) Create(manifest string, size int) (string, error) {
}
func (b *BlockOperation) CreatePvc(claimName, storageClassName, mode, size string) error {
return b.k8sClient.ResourceOperation("apply", b.manifests.GetBlockPvcDef(claimName, storageClassName, mode, size))
func (b *BlockOperation) CreateStorageClassAndPVC(csi bool, pvcNamespace, clusterNamespace, systemNamespace, poolName, storageClassName, reclaimPolicy, blockName, mode string) error {
if err := b.k8sClient.ResourceOperation("apply", b.manifests.GetBlockPoolDef(poolName, clusterNamespace, "1")); err != nil {
return err
}
if err := b.k8sClient.ResourceOperation("apply", b.manifests.GetBlockStorageClassDef(csi, poolName, storageClassName, reclaimPolicy, clusterNamespace, systemNamespace)); err != nil {
return err
}
return b.k8sClient.ResourceOperation("apply", b.manifests.GetBlockPVCDef(blockName, pvcNamespace, storageClassName, mode, "1M"))
}
func (b *BlockOperation) CreateStorageClass(poolName, storageClassName, reclaimPolicy, namespace string, varClusterName bool) error {
return b.k8sClient.ResourceOperation("apply", b.manifests.GetBlockStorageClassDef(poolName, storageClassName, reclaimPolicy, namespace, varClusterName))
func (b *BlockOperation) CreatePVC(namespace, claimName, storageClassName, mode, size string) error {
return b.k8sClient.ResourceOperation("apply", b.manifests.GetBlockPVCDef(claimName, namespace, storageClassName, mode, size))
}
func (b *BlockOperation) DeletePvc(claimName, storageClassName, mode, size string) error {
err := b.k8sClient.ResourceOperation("delete", b.manifests.GetBlockPvcDef(claimName, storageClassName, mode, size))
return err
func (b *BlockOperation) CreateStorageClass(csi bool, poolName, storageClassName, reclaimPolicy, namespace string) error {
return b.k8sClient.ResourceOperation("apply", b.manifests.GetBlockStorageClassDef(csi, poolName, storageClassName, reclaimPolicy, namespace, installer.SystemNamespace(namespace)))
}
func (b *BlockOperation) DeleteStorageClass(poolName, storageClassName, reclaimPolicy, namespace string) error {
err := b.k8sClient.ResourceOperation("delete", b.manifests.GetBlockStorageClassDef(poolName, storageClassName, reclaimPolicy, namespace, false))
return err
func (b *BlockOperation) DeletePVC(namespace, claimName string) error {
logger.Infof("deleting pvc %q from namespace %q", claimName, namespace)
return b.k8sClient.Clientset.CoreV1().PersistentVolumeClaims(namespace).Delete(claimName, &metav1.DeleteOptions{})
}
func (b *BlockOperation) DeleteStorageClass(storageClassName string) error {
logger.Infof("deleting storage class %q", storageClassName)
return b.k8sClient.Clientset.StorageV1().StorageClasses().Delete(storageClassName, &metav1.DeleteOptions{})
}
// BlockDelete Function to delete a Block using Rook
......@@ -134,17 +140,7 @@ func (b *BlockOperation) DeleteBlockImage(image BlockImage, namespace string) er
return client.DeleteImage(context, namespace, image.Name, image.PoolName)
}
// BlockMap Function to map a Block using Rook
// Input parameters -
// manifest - Pod definition - pod should be defined to use a pvc that was created earlier
// Output - k8s create pod operation output and/or error
func (b *BlockOperation) BlockMap(manifest string) (string, error) {
args := []string{"apply", "-f", "-"}
result, err := b.k8sClient.KubectlWithStdin(manifest, args...)
if err != nil {
return "", fmt.Errorf("Unable to map block -- : %s", err)
}
return result, nil
// CreateClientPod starts a pod that should have a block PVC.
func (b *BlockOperation) CreateClientPod(manifest string) error {
return b.k8sClient.ResourceOperation("apply", manifest)
}
......@@ -55,6 +55,11 @@ func (f *FilesystemOperation) Create(name, namespace string, activeCount int) er
return nil
}
// CreateStorageClass creates a storage class for CephFS clients
func (f *FilesystemOperation) CreateStorageClass(fsName, namespace, storageClassName string) error {
return f.k8sh.ResourceOperation("apply", f.manifests.GetFileStorageClassDef(fsName, storageClassName, namespace))
}
// ScaleDown scales down the number of active metadata servers of a filesystem in Rook
func (f *FilesystemOperation) ScaleDown(name, namespace string) error {
logger.Infof("scaling down the number of filesystem active metadata servers via CRD")
......
......@@ -109,30 +109,15 @@ func (p *PoolOperation) CephPoolExists(namespace, name string) (bool, error) {
return false, nil
}
func (p *PoolOperation) CreateStorageClassAndPvc(namespace, poolName, storageClassName, reclaimPolicy, blockName, mode string) error {
return p.k8sh.ResourceOperation("apply", p.manifests.GetBlockPoolStorageClassAndPvcDef(namespace, poolName, storageClassName, reclaimPolicy, blockName, mode))
}
func (p *PoolOperation) DeleteStorageClass(storageClassName string) error {
return p.k8sh.Clientset.StorageV1().StorageClasses().Delete(storageClassName, &metav1.DeleteOptions{})
}
func (p *PoolOperation) DeletePool(blockClient *BlockOperation, namespace, poolName string) error {
// Delete all the images in a pool
blockImagesList, _ := blockClient.List(namespace)
for _, blockImage := range blockImagesList {
if poolName == blockImage.PoolName {
logger.Infof("force deleting block image %q in pool %q", blockImage, poolName)
blockClient.DeleteBlockImage(blockImage, namespace)
}
}
return p.k8sh.RookClientset.CephV1().CephBlockPools(namespace).Delete(poolName, &metav1.DeleteOptions{})
}
func (p *PoolOperation) DeletePvc(namespace, pvcName string) error {
return p.k8sh.Clientset.CoreV1().PersistentVolumeClaims(namespace).Delete(pvcName, &metav1.DeleteOptions{})
}
func (p *PoolOperation) CreateStorageClass(namespace, poolName, storageClassName, reclaimPolicy string) error {
return p.k8sh.ResourceOperation("apply", p.manifests.GetBlockPoolStorageClass(namespace, poolName, storageClassName, reclaimPolicy))
}
......@@ -23,7 +23,6 @@ import (
"math/rand"
"os"
"path"
"strings"
"testing"
"time"
......@@ -67,49 +66,6 @@ type CephInstaller struct {
T func() *testing.T
}
func (h *CephInstaller) CreateCephCRDs() error {
var resources string
logger.Info("Creating Rook CRDs")
resources = h.Manifests.GetRookCRDs()
var err error
for i := 0; i < 5; i++ {
if i > 0 {
logger.Infof("waiting 10s...")
time.Sleep(10 * time.Second)
}
_, err = h.k8shelper.KubectlWithStdin(resources, createFromStdinArgs...)
if err == nil {
return nil
}
// If the CRD already exists, the previous test must not have completed cleanup yet.
// Delete the CRDs and attempt to wait for the cleanup.
if strings.Index(err.Error(), "AlreadyExists") == -1 {
return err
}
// ensure all the cluster CRDs are removed
if err = h.purgeClusters(); err != nil {
logger.Warningf("could not purge cluster crds. %+v", err)
}
// remove the finalizer from the cluster CRD
if _, err := h.k8shelper.Kubectl("patch", "crd", "cephclusters.ceph.rook.io", "-p", `{"metadata":{"finalizers": []}}`, "--type=merge"); err != nil {
logger.Warningf("could not remove finalizer from cluster crd. %+v", err)
}
logger.Warningf("CRDs were not cleaned up from a previous test. Deleting them to try again...")
if _, err := h.k8shelper.KubectlWithStdin(resources, deleteFromStdinArgs...); err != nil {
logger.Infof("deleting the crds returned an error: %+v", err)
}
}
return err
}
// CreateCephOperator creates rook-operator via kubectl
func (h *CephInstaller) CreateCephOperator(namespace string) (err error) {
logger.Infof("Starting Rook Operator")
......@@ -117,7 +73,9 @@ func (h *CephInstaller) CreateCephOperator(namespace string) (err error) {
h.k8shelper.CreateAnonSystemClusterBinding()
// creating rook resources
if err = h.CreateCephCRDs(); err != nil {
logger.Info("Creating Rook CRDs")
resources := h.Manifests.GetRookCRDs()
if _, err = h.k8shelper.KubectlWithStdin(resources, createFromStdinArgs...); err != nil {
return err
}
......@@ -126,11 +84,19 @@ func (h *CephInstaller) CreateCephOperator(namespace string) (err error) {
h.k8shelper.ChangeHostnames()
}
rookOperator := h.Manifests.GetRookOperator(namespace)
err = h.k8shelper.CreateNamespace(namespace)
if err != nil {
if errors.IsAlreadyExists(err) {
logger.Warningf("Namespace %q already exists!!!", namespace)
} else {
return fmt.Errorf("failed to create namespace %q. %v", namespace, err)
}
}
rookOperator := h.Manifests.GetRookOperator(namespace)
_, err = h.k8shelper.KubectlWithStdin(rookOperator, createFromStdinArgs...)
if err != nil {
return fmt.Errorf("Failed to create rook-operator pod : %v ", err)
return fmt.Errorf("Failed to create rook-operator pod: %v ", err)
}
logger.Infof("Rook Operator started")
......@@ -382,11 +348,6 @@ func (h *CephInstaller) InstallRookOnK8sWithHostPathAndDevicesOrPVC(namespace, s
mon cephv1.MonSpec, startWithAllNodes bool, rbdMirrorWorkers int) (bool, error) {
var err error
// flag used for local debugging purpose, when rook is pre-installed
if Env.SkipInstallRook {
return true, nil
}
k8sversion := h.k8shelper.GetK8sServerVersion()
logger.Infof("Installing rook on k8s %s", k8sversion)
......@@ -455,10 +416,6 @@ func (h *CephInstaller) UninstallRook(namespace string, gatherLogs bool) {
// UninstallRookFromMultipleNS uninstalls rook from multiple namespaces in k8s
func (h *CephInstaller) UninstallRookFromMultipleNS(gatherLogs bool, systemNamespace string, namespaces ...string) {
// flag used for local debugging purpose, when rook is pre-installed
if Env.SkipInstallRook {
return
}
if gatherLogs {
// Gather logs after status checks
h.GatherAllRookLogs(h.T().Name(), append([]string{systemNamespace}, namespaces...)...)
......@@ -495,6 +452,9 @@ func (h *CephInstaller) UninstallRookFromMultipleNS(gatherLogs bool, systemNames
checkError(h.T(), err, fmt.Sprintf("cannot delete namespace %s", namespace))
}
err = h.k8shelper.DeleteResourceAndWait(false, "namespace", systemNamespace)
checkError(h.T(), err, fmt.Sprintf("cannot delete system namespace %s", systemNamespace))
logger.Infof("removing the operator from namespace %s", systemNamespace)
err = h.k8shelper.DeleteResource(
"crd",
......@@ -513,8 +473,10 @@ func (h *CephInstaller) UninstallRookFromMultipleNS(gatherLogs bool, systemNames
if h.useHelm {
err = h.helmHelper.DeleteLocalRookHelmChart(helmDeployName)
} else {
logger.Infof("Deleting all the resources in the operator manifest")
rookOperator := h.Manifests.GetRookOperator(systemNamespace)
_, err = h.k8shelper.KubectlWithStdin(rookOperator, deleteFromStdinArgs...)
logger.Infof("DONE deleting all the resources in the operator manifest")
}
checkError(h.T(), err, "cannot uninstall rook-operator")
......@@ -575,6 +537,17 @@ func (h *CephInstaller) UninstallRookFromMultipleNS(gatherLogs bool, systemNames
// revert the hostname labels for the test
h.k8shelper.RestoreHostnames()
}
// wait a bit longer for the system namespace to be cleaned up after their deletion
for i := 0; i < 15; i++ {
_, err := h.k8shelper.Clientset.CoreV1().Namespaces().Get(systemNamespace, metav1.GetOptions{})
if err != nil && errors.IsNotFound(err) {
logger.Infof("system namespace %q removed", systemNamespace)
break
}
logger.Infof("system namespace %q still found...", systemNamespace)
time.Sleep(5 * time.Second)
}
}
func (h *CephInstaller) removeClusterFinalizers(namespace, name string) {
......
......@@ -39,18 +39,17 @@ type CephManifests interface {
GetRookExternalCluster(settings *ClusterExternalSettings) string
GetRookToolBox(namespace string) string
GetCleanupPod(node, removalDir string) string
GetBlockPoolDef(poolName string, namespace string, replicaSize string) string
GetBlockStorageClassDef(poolName string, storageClassName string, reclaimPolicy string, namespace string, varClusterName bool) string
GetBlockPvcDef(claimName string, storageClassName string, accessModes string, size string) string
GetBlockPoolStorageClassAndPvcDef(namespace string, poolName string, storageClassName string, reclaimPolicy string, blockName string, accessMode string) string
GetBlockPoolStorageClass(namespace string, poolName string, storageClassName string, reclaimPolicy string) string
GetBlockPoolDef(poolName, namespace, replicaSize string) string
GetBlockStorageClassDef(csi bool, poolName, storageClassName, reclaimPolicy, namespace, systemNamespace string) string
GetFileStorageClassDef(fsName, storageClassName, namespace string) string
GetBlockPVCDef(claimName, namespace, storageClassName, accessModes, size string) string
GetFilesystem(namepace, name string, activeCount int) string
GetNFS(namepace, name, pool string, daemonCount int) string
GetObjectStore(namespace, name string, replicaCount, port int) string
GetObjectStoreUser(namespace, name string, displayName string, store string) string
GetBucketStorageClass(namespace string, storeName string, storageClassName string, reclaimPolicy string, region string) string
GetObc(obcName string, storageClassName string, bucketName string, createBucket bool) string
GetClient(namespace string, name string, caps map[string]string) string
GetObjectStoreUser(namespace, name, displayName, store string) string
GetBucketStorageClass(namespace, storeName, storageClassName, reclaimPolicy, region string) string
GetObc(obcName, storageClassName, bucketName string, createBucket bool) string
GetClient(namespace, name string, caps map[string]string) string
}
type ClusterSettings struct {
......@@ -584,11 +583,7 @@ spec:
// GetRookOperator returns rook Operator manifest
func (m *CephManifestsMaster) GetRookOperator(namespace string) string {
return `kind: Namespace
apiVersion: v1
metadata:
name: ` + namespace + `
---
return `
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
......@@ -2000,11 +1995,26 @@ spec:
targetSizeRatio: .5`
}
func (m *CephManifestsMaster) GetBlockStorageClassDef(poolName string, storageClassName string, reclaimPolicy string, namespace string, varClusterName bool) string {
namespaceParameter := "clusterNamespace"
if varClusterName {
namespaceParameter = "clusterName"
func (m *CephManifestsMaster) GetBlockStorageClassDef(csi bool, poolName, storageClassName, reclaimPolicy, namespace, systemNamespace string) string {
// Create a CSI driver storage class
if csi {
return `
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: ` + storageClassName + `
provisioner: ` + systemNamespace + `.rbd.csi.ceph.com
reclaimPolicy: ` + reclaimPolicy + `
parameters:
pool: ` + poolName + `
clusterID: ` + namespace + `
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: ` + namespace + `
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
csi.storage.k8s.io/node-stage-secret-namespace: ` + namespace + `
`
}
// Create a FLEX driver storage class
return `apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
......@@ -2014,14 +2024,34 @@ allowVolumeExpansion: true
reclaimPolicy: ` + reclaimPolicy + `
parameters:
blockPool: ` + poolName + `
` + namespaceParameter + `: ` + namespace
clusterNamespace: ` + namespace
}
func (m *CephManifestsMaster) GetBlockPvcDef(claimName, storageClassName, accessModes, size string) string {
func (m *CephManifestsMaster) GetFileStorageClassDef(fsName, storageClassName, namespace string) string {
// Create a CSI driver storage class
return `
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: ` + storageClassName + `
provisioner: ` + SystemNamespace(namespace) + `.cephfs.csi.ceph.com
parameters:
clusterID: ` + namespace + `
fsName: ` + fsName + `
pool: ` + fsName + `-data0
csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: ` + namespace + `
csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
csi.storage.k8s.io/node-stage-secret-namespace: ` + namespace + `
`
}
func (m *CephManifestsMaster) GetBlockPVCDef(claimName, namespace, storageClassName, accessModes, size string) string {
return `apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: ` + claimName + `
namespace: ` + namespace + `
annotations:
volume.beta.kubernetes.io/storage-class: ` + storageClassName + `
spec:
......@@ -2032,15 +2062,6 @@ spec:
storage: ` + size
}
func (m *CephManifestsMaster) GetBlockPoolStorageClassAndPvcDef(namespace string, poolName string, storageClassName string, reclaimPolicy string, blockName string, accessMode string) string {
return concatYaml(m.GetBlockPoolDef(poolName, namespace, "1"),
concatYaml(m.GetBlockStorageClassDef(poolName, storageClassName, reclaimPolicy, namespace, false), m.GetBlockPvcDef(blockName, storageClassName, accessMode, "1M")))
}
func (m *CephManifestsMaster) GetBlockPoolStorageClass(namespace string, poolName string, storageClassName string, reclaimPolicy string) string {
return concatYaml(m.GetBlockPoolDef(poolName, namespace, "1"), m.GetBlockStorageClassDef(poolName, storageClassName, reclaimPolicy, namespace, false))
}
// GetFilesystem returns the manifest to create a Rook filesystem resource with the given config.
func (m *CephManifestsMaster) GetFilesystem(namespace, name string, activeCount int) string {
return `apiVersion: ceph.rook.io/v1
......
......@@ -447,11 +447,7 @@ spec:
// GetRookOperator returns rook Operator manifest
func (m *CephManifestsV1_1) GetRookOperator(namespace string) string {
return `kind: Namespace
apiVersion: v1
metadata:
name: ` + namespace + `
---
return `
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
......@@ -1712,11 +1708,25 @@ spec:
size: ` + replicaSize
}
func (m *CephManifestsV1_1) GetBlockStorageClassDef(poolName string, storageClassName string, reclaimPolicy string, namespace string, varClusterName bool) string {
namespaceParameter := "clusterNamespace"
if varClusterName {
namespaceParameter = "clusterName"
func (m *CephManifestsV1_1) GetBlockStorageClassDef(csi bool, poolName, storageClassName, reclaimPolicy, namespace, systemNamespace string) string {
// Create a CSI driver storage class
if csi {
return `
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: ` + storageClassName + `
provisioner: ` + systemNamespace + `.rbd.csi.ceph.com
parameters:
pool: ` + poolName + `
clusterID: ` + namespace + `
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: ` + namespace + `
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
csi.storage.k8s.io/node-stage-secret-namespace: ` + namespace + `
`
}
// Create a FLEX driver storage class
return `apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
......@@ -1726,14 +1736,36 @@ allowVolumeExpansion: true
reclaimPolicy: ` + reclaimPolicy + `
parameters:
blockPool: ` + poolName + `
` + namespaceParameter + `: ` + namespace
clusterNamespace: ` + namespace
}
func (m *CephManifestsV1_1) GetFileStorageClassDef(fsName, storageClassName, namespace string) string {
// Create a CSI driver storage class
csiCephFSNodeSecret := "rook-csi-cephfs-node"
csiCephFSProvisionerSecret := "rook-csi-cephfs-provisioner"
return `
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: ` + storageClassName + `
provisioner: ` + SystemNamespace(namespace) + `.cephfs.csi.ceph.com
parameters:
clusterID: ` + namespace + `
fsName: ` + fsName + `
pool: ` + fsName + `-data0
csi.storage.k8s.io/provisioner-secret-name: ` + csiCephFSProvisionerSecret + `
csi.storage.k8s.io/provisioner-secret-namespace: ` + namespace + `
csi.storage.k8s.io/node-stage-secret-name: ` + csiCephFSNodeSecret + `
csi.storage.k8s.io/node-stage-secret-namespace: ` + namespace + `
`
}
func (m *CephManifestsV1_1) GetBlockPvcDef(claimName, storageClassName, accessModes, size string) string {
func (m *CephManifestsV1_1) GetBlockPVCDef(claimName, namespace, storageClassName, accessModes, size string) string {
return `apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: ` + claimName + `
namespace: ` + namespace + `
annotations:
volume.beta.kubernetes.io/storage-class: ` + storageClassName + `
spec:
......@@ -1744,15 +1776,6 @@ spec:
storage: ` + size
}
func (m *CephManifestsV1_1) GetBlockPoolStorageClassAndPvcDef(namespace string, poolName string, storageClassName string, reclaimPolicy string, blockName string, accessMode string) string {
return concatYaml(m.GetBlockPoolDef(poolName, namespace, "1"),
concatYaml(m.GetBlockStorageClassDef(poolName, storageClassName, reclaimPolicy, namespace, false), m.GetBlockPvcDef(blockName, storageClassName, accessMode, "1M")))
}
func (m *CephManifestsV1_1) GetBlockPoolStorageClass(namespace string, poolName string, storageClassName string, reclaimPolicy string) string {
return concatYaml(m.GetBlockPoolDef(poolName, namespace, "1"), m.GetBlockStorageClassDef(poolName, storageClassName, reclaimPolicy, namespace, false))
}
// GetFilesystem returns the manifest to create a Rook filesystem resource with the given config.
func (m *CephManifestsV1_1) GetFilesystem(namespace, name string, activeCount int) string {
return `apiVersion: ceph.rook.io/v1
......
......@@ -27,7 +27,6 @@ type EnvironmentManifest struct {
RookImageName string
ToolboxImageName string
BaseTestDir string
SkipInstallRook bool
LoadVolumeNumber int
LoadConcurrentRuns int
LoadTime int
......@@ -45,7 +44,6 @@ func init() {
flag.StringVar(&Env.RookImageName, "rook_image", "rook/ceph", "Docker image name for the rook container to install, must be in docker hub or local environment")
flag.StringVar(&Env.ToolboxImageName, "toolbox_image", "rook/ceph", "Docker image name of the toolbox container to install, must be in docker hub or local environment")
flag.StringVar(&Env.BaseTestDir, "base_test_dir", "/data", "Base test directory, for use only when kubernetes master is running on localhost")
flag.BoolVar(&Env.SkipInstallRook, "skip_install_rook", false, "Indicate if Rook need to installed - false if tests are being running at Rook that is pre-installed")
flag.IntVar(&Env.LoadConcurrentRuns, "load_parallel_runs", 20, "number of routines for load test")
flag.IntVar(&Env.LoadVolumeNumber, "load_volumes", 1, "number of volumes(file,object or block) to be created for load test")
flag.IntVar(&Env.LoadTime, "load_time", 1800, "number of seconds each thread perform operations for")
......
......@@ -95,9 +95,3 @@ func checkError(t *testing.T, err error, message string) {
}
assert.NoError(t, err, "%s. %+v", message, err)
}
func concatYaml(first, second string) string {
return first + `
---
` + second
}
......@@ -371,11 +371,7 @@ func (k8sh *K8sHelper) WaitForCustomResourceDeletion(namespace string, checkerFu
// If wait is false, a flag will be passed to indicate the delete should return immediately
func (k8sh *K8sHelper) DeleteResourceAndWait(wait bool, args ...string) error {
if !wait {
// new flag in k8s 1.11
v := version.MustParseSemantic(k8sh.GetK8sServerVersion())
if v.AtLeast(version.MustParseSemantic("1.11.0")) {
args = append(args, "--wait=false")
}
args = append(args, "--wait=false")
}
args = append([]string{"delete"}, args...)
_, err := k8sh.Kubectl(args...)
......@@ -421,8 +417,8 @@ func (k8sh *K8sHelper) CountPodsWithLabel(label string, namespace string) (int,
// WaitForPodCount waits until the desired number of pods with the label are started
func (k8sh *K8sHelper) WaitForPodCount(label, namespace string, count int) error {
options := metav1.ListOptions{LabelSelector: label}
inc := 0
for inc < RetryLoop {
for i := 0; i < RetryLoop; i++ {
pods, err := k8sh.Clientset.CoreV1().Pods(namespace).List(options)
if err != nil {
return fmt.Errorf("failed to find pod with label %s. %+v", label, err)
......@@ -432,10 +428,9 @@ func (k8sh *K8sHelper) WaitForPodCount(label, namespace string, count int) error
logger.Infof("found %d pods with label %s", count, label)
return nil
}
inc++
time.Sleep(RetryInterval * time.Second)
logger.Infof("waiting for %d pods (found %d) with label %s in namespace %s", count, len(pods.Items), label, namespace)
logger.Infof("waiting for %d pods (found %d) with label %s in namespace %s", count, len(pods.Items), label, namespace)
time.Sleep(RetryInterval * time.Second)
}
return fmt.Errorf("Giving up waiting for pods with label %s in namespace %s", label, namespace)
}
......@@ -586,18 +581,15 @@ func (k8sh *K8sHelper) PrintEventsForNamespace(namespace string) {
// IsPodRunning returns true if a Pod is running status or goes to Running status within 90s else returns false
func (k8sh *K8sHelper) IsPodRunning(name string, namespace string) bool {
getOpts := metav1.GetOptions{}
inc := 0
for inc < RetryLoop {
for i := 0; i < 20; i++ {
pod, err := k8sh.Clientset.CoreV1().Pods(namespace).Get(name, getOpts)
if err == nil {
if pod.Status.Phase == "Running" {
return true
}
}
inc++
time.Sleep(RetryInterval * time.Second)
logger.Infof("waiting for pod %s in namespace %s to be running", name, namespace)
}
pod, _ := k8sh.Clientset.CoreV1().Pods(namespace).Get(name, getOpts)
k8sh.PrintPodDescribe(namespace, pod.Name)
......@@ -613,8 +605,8 @@ func (k8sh *K8sHelper) IsPodTerminated(name string, namespace string) bool {
// IsPodTerminatedWithOpts returns true if a Pod is terminated status or goes to Terminated status
// within 90s else returns false\
func (k8sh *K8sHelper) IsPodTerminatedWithOpts(name string, namespace string, getOpts metav1.GetOptions) bool {
inc := 0
for inc < RetryLoop {
for i := 0; i < RetryLoop; i++ {
pod, err := k8sh.Clientset.CoreV1().Pods(namespace).Get(name, getOpts)
if err != nil {
k8slogger.Infof("Pod %s in namespace %s terminated ", name, namespace)
......@@ -622,7 +614,7 @@ func (k8sh *K8sHelper) IsPodTerminatedWithOpts(name string, namespace string, ge
}
k8slogger.Infof("waiting for Pod %s in namespace %s to terminate, status : %+v", name, namespace, pod.Status)
time.Sleep(RetryInterval * time.Second)
inc++
}
k8slogger.Infof("Pod %s in namespace %s did not terminate", name, namespace)
return false
......@@ -631,8 +623,8 @@ func (k8sh *K8sHelper) IsPodTerminatedWithOpts(name string, namespace string, ge
// IsServiceUp returns true if a service is up or comes up within 150s, else returns false
func (k8sh *K8sHelper) IsServiceUp(name string, namespace string) bool {
getOpts := metav1.GetOptions{}
inc := 0
for inc < RetryLoop {
for i := 0; i < RetryLoop; i++ {
_, err := k8sh.Clientset.CoreV1().Services(namespace).Get(name, getOpts)
if err == nil {
k8slogger.Infof("Service: %s in namespace: %s is up", name, namespace)
......@@ -640,7 +632,7 @@ func (k8sh *K8sHelper) IsServiceUp(name string, namespace string) bool {
}
k8slogger.Infof("waiting for Service %s in namespace %s ", name, namespace)
time.Sleep(RetryInterval * time.Second)
inc++
}
k8slogger.Infof("Giving up waiting for service: %s in namespace %s ", name, namespace)
return false
......@@ -660,15 +652,15 @@ func (k8sh *K8sHelper) GetService(servicename string, namespace string) (*v1.Ser
func (k8sh *K8sHelper) IsCRDPresent(crdName string) bool {
cmdArgs := []string{"get", "crd", crdName}
inc := 0
for inc < RetryLoop {
for i := 0; i < RetryLoop; i++ {
_, err := k8sh.Kubectl(cmdArgs...)
if err == nil {
k8slogger.Infof("Found the CRD resource: " + crdName)
return true
}
time.Sleep(RetryInterval * time.Second)
inc++
}
return false
......@@ -798,8 +790,7 @@ func (k8sh *K8sHelper) waitForVolume(namespace, volumeName string, exist bool) e
action = "not " + action
}
inc := 0
for inc < RetryLoop {
for i := 0; i < 10; i++ {
isExist, err := k8sh.isVolumeExist(namespace, volumeName)
if err != nil {
return fmt.Errorf("Errors encountered while getting Volume %s/%s: %v", namespace, volumeName, err)
......@@ -810,8 +801,6 @@ func (k8sh *K8sHelper) waitForVolume(namespace, volumeName string, exist bool) e
k8slogger.Infof("waiting for Volume %s in namespace %s to %s", volumeName, namespace, action)
time.Sleep(RetryInterval * time.Second)
inc++
}
k8sh.printVolumes(namespace, volumeName)
......@@ -953,8 +942,8 @@ func (k8sh *K8sHelper) GetPodEvents(podNamePattern string, namespace string) (*v
// IsPodInError returns true if a Pod is in error status with the given reason and contains the given message
func (k8sh *K8sHelper) IsPodInError(podNamePattern, namespace, reason, containingMessage string) bool {
inc := 0
for inc < RetryLoop {
for i := 0; i < RetryLoop; i++ {
events, err := k8sh.GetPodEvents(podNamePattern, namespace)
if err != nil {
k8slogger.Errorf("Cannot get Pod events for %s in namespace %s: %+v ", podNamePattern, namespace, err)
......@@ -968,7 +957,6 @@ func (k8sh *K8sHelper) IsPodInError(podNamePattern, namespace, reason, containin
}
k8slogger.Infof("waiting for Pod %s in namespace %s to error with reason %s and containing the message: %s", podNamePattern, namespace, reason, containingMessage)
time.Sleep(RetryInterval * time.Second)
inc++
}
k8slogger.Infof("Pod %s in namespace %s did not error with reason %s", podNamePattern, namespace, reason)
......@@ -1035,8 +1023,8 @@ func (k8sh *K8sHelper) CheckPvcCountAndStatus(podName string, namespace string,
pvcCountCheck := false
actualPvcCount := 0
inc := 0
for inc < RetryLoop {
for i := 0; i < RetryLoop; i++ {
pvcList, err := k8sh.Clientset.CoreV1().PersistentVolumeClaims(namespace).List(listOpts)
if err != nil {
logger.Errorf("Cannot get pvc for app : %v in namespace %v, err: %v", podName, namespace, err)
......@@ -1047,7 +1035,7 @@ func (k8sh *K8sHelper) CheckPvcCountAndStatus(podName string, namespace string,
pvcCountCheck = true
break
}
inc++
time.Sleep(RetryInterval * time.Second)
}
......@@ -1056,8 +1044,7 @@ func (k8sh *K8sHelper) CheckPvcCountAndStatus(podName string, namespace string,
return false
}
inc = 0
for inc < RetryLoop {
for i := 0; i < RetryLoop; i++ {
checkAllPVCsStatus := true
pl, _ := k8sh.Clientset.CoreV1().PersistentVolumeClaims(namespace).List(listOpts)
for _, pvc := range pl.Items {
......@@ -1069,7 +1056,7 @@ func (k8sh *K8sHelper) CheckPvcCountAndStatus(podName string, namespace string,
if checkAllPVCsStatus {
return true
}
inc++
time.Sleep(RetryInterval * time.Second)
}
......@@ -1130,8 +1117,8 @@ func (k8sh *K8sHelper) GetPV(name string) (*v1.PersistentVolume, error) {
// If the pod is in expected state within 90s true is returned, if not false
func (k8sh *K8sHelper) IsPodInExpectedState(podNamePattern string, namespace string, state string) bool {
listOpts := metav1.ListOptions{LabelSelector: "app=" + podNamePattern}
inc := 0
for inc < RetryLoop {
for i := 0; i < RetryLoop; i++ {
podList, err := k8sh.Clientset.CoreV1().Pods(namespace).List(listOpts)
if err == nil {
if len(podList.Items) >= 1 {
......@@ -1140,7 +1127,7 @@ func (k8sh *K8sHelper) IsPodInExpectedState(podNamePattern string, namespace str
}
}
}
inc++
time.Sleep(RetryInterval * time.Second)
}
......@@ -1152,8 +1139,8 @@ func (k8sh *K8sHelper) CheckPodCountAndState(podName string, namespace string, m
listOpts := metav1.ListOptions{LabelSelector: "app=" + podName}
podCountCheck := false
actualPodCount := 0
inc := 0
for inc < RetryLoop {
for i := 0; i < RetryLoop; i++ {
podList, err := k8sh.Clientset.CoreV1().Pods(namespace).List(listOpts)
if err != nil {
logger.Errorf("Cannot list pods for app=%s in namespace %s, err: %+v", podName, namespace, err)
......@@ -1166,7 +1153,6 @@ func (k8sh *K8sHelper) CheckPodCountAndState(podName string, namespace string, m
break
}
inc++
logger.Infof("waiting for %d pods with label app=%s, found %d", minExpected, podName, actualPodCount)
time.Sleep(RetryInterval * time.Second)
}
......@@ -1199,14 +1185,13 @@ func (k8sh *K8sHelper) CheckPodCountAndState(podName string, namespace string, m
// WaitUntilPodInNamespaceIsDeleted waits for 90s for a pod in a namespace to be terminated
// If the pod disappears within 90s true is returned, if not false
func (k8sh *K8sHelper) WaitUntilPodInNamespaceIsDeleted(podNamePattern string, namespace string) bool {
inc := 0
for inc < RetryLoop {
for i := 0; i < RetryLoop; i++ {
out, _ := k8sh.GetResource("-n", namespace, "pods", "-l", "app="+podNamePattern)
if !strings.Contains(out, podNamePattern) {
return true
}
inc++
time.Sleep(RetryInterval * time.Second)
}
logger.Infof("Pod %s in namespace %s not deleted", podNamePattern, namespace)
......@@ -1216,14 +1201,13 @@ func (k8sh *K8sHelper) WaitUntilPodInNamespaceIsDeleted(podNamePattern string, n
// WaitUntilPodIsDeleted waits for 90s for a pod to be terminated
// If the pod disappears within 90s true is returned, if not false
func (k8sh *K8sHelper) WaitUntilPodIsDeleted(name, namespace string) bool {
inc := 0
for inc < RetryLoop {
for i := 0; i < RetryLoop; i++ {
_, err := k8sh.Clientset.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{})
if err != nil && errors.IsNotFound(err) {
return true
}
inc++
logger.Infof("pod %s in namespace %s is not deleted yet", name, namespace)
time.Sleep(RetryInterval * time.Second)
}
......@@ -1234,8 +1218,7 @@ func (k8sh *K8sHelper) WaitUntilPodIsDeleted(name, namespace string) bool {
// if PVC goes to Bound state within 90s True is returned, if not false
func (k8sh *K8sHelper) WaitUntilPVCIsBound(namespace string, pvcname string) bool {
inc := 0
for inc < RetryLoop {
for i := 0; i < RetryLoop; i++ {
out, err := k8sh.GetPVCStatus(namespace, pvcname)
if err == nil {
if out == v1.PersistentVolumeClaimPhase(v1.ClaimBound) {
......@@ -1244,7 +1227,7 @@ func (k8sh *K8sHelper) WaitUntilPVCIsBound(namespace string, pvcname string) boo
}
}
logger.Infof("waiting for PVC %s to be bound. current=%s. err=%+v", pvcname, out, err)
inc++
time.Sleep(RetryInterval * time.Second)
}
return false
......@@ -1253,8 +1236,8 @@ func (k8sh *K8sHelper) WaitUntilPVCIsBound(namespace string, pvcname string) boo
// WaitUntilPVCIsExpanded waits for a PVC to be resized for specified value
func (k8sh *K8sHelper) WaitUntilPVCIsExpanded(namespace, pvcname, size string) bool {
getOpts := metav1.GetOptions{}
inc := 0
for inc < RetryLoop {
for i := 0; i < RetryLoop; i++ {
// PVC specs changes immediately, but status will change only if resize process is successfully completed.
pvc, err := k8sh.Clientset.CoreV1().PersistentVolumeClaims(namespace).Get(pvcname, getOpts)
if err == nil {
......@@ -1267,7 +1250,7 @@ func (k8sh *K8sHelper) WaitUntilPVCIsExpanded(namespace, pvcname, size string) b
} else {
logger.Infof("error while getting PVC specs: %+v", err)
}
inc++
time.Sleep(RetryInterval * time.Second)
}
return false
......@@ -1275,14 +1258,14 @@ func (k8sh *K8sHelper) WaitUntilPVCIsExpanded(namespace, pvcname, size string) b
func (k8sh *K8sHelper) WaitUntilPVCIsDeleted(namespace string, pvcname string) bool {
getOpts := metav1.GetOptions{}
inc := 0
for inc < RetryLoop {
for i := 0; i < RetryLoop; i++ {
_, err := k8sh.Clientset.CoreV1().PersistentVolumeClaims(namespace).Get(pvcname, getOpts)
if err != nil {
return true
}
logger.Infof("waiting for PVC %s to be deleted.", pvcname)
inc++
time.Sleep(RetryInterval * time.Second)
}
return false
......@@ -1297,8 +1280,8 @@ func (k8sh *K8sHelper) DeletePvcWithLabel(namespace string, podName string) bool
logger.Errorf("cannot deleted PVCs for pods with label app=%s", podName)
return false
}
inc := 0
for inc < RetryLoop {
for i := 0; i < RetryLoop; i++ {
pvcs, err := k8sh.Clientset.CoreV1().PersistentVolumeClaims(namespace).List(listOpts)
if err == nil {
if len(pvcs.Items) == 0 {
......@@ -1306,7 +1289,7 @@ func (k8sh *K8sHelper) DeletePvcWithLabel(namespace string, podName string) bool
}
}
logger.Infof("waiting for PVCs for pods with label=%s to be deleted.", podName)
inc++
time.Sleep(RetryInterval * time.Second)
}
return false
......@@ -1316,14 +1299,14 @@ func (k8sh *K8sHelper) DeletePvcWithLabel(namespace string, podName string) bool
// If namespace is deleted True is returned, if not false.
func (k8sh *K8sHelper) WaitUntilNameSpaceIsDeleted(namespace string) bool {
getOpts := metav1.GetOptions{}
inc := 0
for inc < RetryLoop {
for i := 0; i < RetryLoop; i++ {
ns, err := k8sh.Clientset.CoreV1().Namespaces().Get(namespace, getOpts)
if err != nil {
return true
}
logger.Infof("Namespace %s %v", namespace, ns.Status.Phase)
inc++
time.Sleep(RetryInterval * time.Second)
}
......@@ -1565,14 +1548,14 @@ func (k8sh *K8sHelper) CreateAnonSystemClusterBinding() {
}
logger.Infof("anon-user-access creation completed, waiting for it to exist in API")
inc := 0
for inc < RetryLoop {
for i := 0; i < RetryLoop; i++ {
var err error
if _, err = k8sh.Clientset.RbacV1beta1().ClusterRoleBindings().Get("anon-user-access", metav1.GetOptions{}); err == nil {
break
}
logger.Warningf("failed to get anon-user-access clusterrolebinding, will try again: %+v", err)
inc++
time.Sleep(RetryInterval * time.Second)
}
}
......
......@@ -51,15 +51,12 @@ func (h *MySQLHelper) CloseConnection() {
// PingSuccess function is used check connection to a database
func (h *MySQLHelper) PingSuccess() bool {
inc := 0
for inc < 30 {
for i := 0; i < 30; i++ {
err := h.DB.Ping()
if err == nil {
return true
}
inc++
time.Sleep(3 * time.Second)
}
......
This diff is collapsed.
......@@ -40,7 +40,7 @@ const (
// These versions are for running a minimal test suite for more efficient tests across different versions of K8s
// instead of running all suites on all versions
// To run on multiple versions, add a comma separate list such as 1.16.0,1.17.0
blockMinimalTestVersion = "1.13.0"
flexDriverMinimalTestVersion = "1.13.0"
multiClusterMinimalTestVersion = "1.14.0"
helmMinimalTestVersion = "1.15.0"
upgradeMinimalTestVersion = "1.16.0"
......
......@@ -42,7 +42,11 @@ const (
// Smoke Test for File System Storage - Test check the following operations on Filesystem Storage in order
// Create,Mount,Write,Read,Unmount and Delete.
func runFileE2ETest(helper *clients.TestClient, k8sh *utils.K8sHelper, s suite.Suite, namespace string, filesystemName string) {
func runFileE2ETest(helper *clients.TestClient, k8sh *utils.K8sHelper, s suite.Suite, namespace, filesystemName string, useCSI bool) {
if useCSI {
checkSkipCSITest(s, k8sh)
}
defer fileTestDataCleanUp(helper, k8sh, s, filePodName, namespace, filesystemName)
logger.Infof("Running on Rook Cluster %s", namespace)
logger.Infof("File Storage End To End Integration Test - create, mount, write to, read from, and unmount")
......@@ -50,30 +54,38 @@ func runFileE2ETest(helper *clients.TestClient, k8sh *utils.K8sHelper, s suite.S
createFilesystem(helper, k8sh, s, namespace, filesystemName, activeCount)
// Create a test pod where CephFS is consumed without user creds
createFilesystemConsumerPod(helper, k8sh, s, namespace, filesystemName)
// Create a test pod where CephFS is consumed with a mountUser and mountSecret specified.
createFilesystemMountCephCredentials(helper, k8sh, s, namespace, filesystemName)
createFilesystemMountUserConsumerPod(helper, k8sh, s, namespace, filesystemName)
storageClassName := "cephfs-storageclass"
err := helper.FSClient.CreateStorageClass(filesystemName, namespace, storageClassName)
assert.NoError(s.T(), err)
createFilesystemConsumerPod(helper, k8sh, s, namespace, filesystemName, storageClassName, useCSI)
// Test reading and writing to the first pod
err := writeAndReadToFilesystem(helper, k8sh, s, namespace, filePodName, "test_file")
err = writeAndReadToFilesystem(helper, k8sh, s, namespace, filePodName, "test_file")
assert.NoError(s.T(), err)
// Test reading and writing to the second pod
err = writeAndReadToFilesystem(helper, k8sh, s, namespace, fileMountUserPodName, "canttouchthis")
assert.Error(s.T(), err, "we should not be able to write to file canttouchthis on CephFS `/`")
err = writeAndReadToFilesystem(helper, k8sh, s, namespace, fileMountUserPodName, "foo/test_file")
assert.NoError(s.T(), err, "we should be able to write to the `/foo` directory on CephFS")
// TODO: Also mount with user credentials with the CSI driver
if !useCSI {
// Create a test pod where CephFS is consumed with a mountUser and mountSecret specified.
createFilesystemMountCephCredentials(helper, k8sh, s, namespace, filesystemName)
createFilesystemMountUserConsumerPod(helper, k8sh, s, namespace, filesystemName, storageClassName)
// Test reading and writing to the second pod
err = writeAndReadToFilesystem(helper, k8sh, s, namespace, fileMountUserPodName, "canttouchthis")
assert.Error(s.T(), err, "we should not be able to write to file canttouchthis on CephFS `/`")
err = writeAndReadToFilesystem(helper, k8sh, s, namespace, fileMountUserPodName, "foo/test_file")
assert.NoError(s.T(), err, "we should be able to write to the `/foo` directory on CephFS")
cleanupFilesystemConsumer(helper, k8sh, s, namespace, fileMountUserPodName)
}
// Start the NFS daemons
testNFSDaemons(helper, k8sh, s, namespace, filesystemName)
// Cleanup the filesystem and its clients
cleanupFilesystemConsumer(k8sh, s, namespace, filePodName)
cleanupFilesystemConsumer(k8sh, s, namespace, fileMountUserPodName)
cleanupFilesystemConsumer(helper, k8sh, s, namespace, filePodName)
downscaleMetadataServers(helper, k8sh, s, namespace, filesystemName)
cleanupFilesystem(helper, k8sh, s, namespace, filesystemName)
helper.BlockClient.DeleteStorageClass(storageClassName)
}
func testNFSDaemons(helper *clients.TestClient, k8sh *utils.K8sHelper, s suite.Suite, namespace string, filesystemName string) {
......@@ -85,8 +97,8 @@ func testNFSDaemons(helper *clients.TestClient, k8sh *utils.K8sHelper, s suite.S
assert.Nil(s.T(), err)
}
func createFilesystemConsumerPod(helper *clients.TestClient, k8sh *utils.K8sHelper, s suite.Suite, namespace string, filesystemName string) {
err := createPodWithFilesystem(k8sh, s, filePodName, namespace, filesystemName, false)
func createFilesystemConsumerPod(helper *clients.TestClient, k8sh *utils.K8sHelper, s suite.Suite, namespace, filesystemName, storageClassName string, useCSI bool) {
err := createPodWithFilesystem(k8sh, s, filePodName, namespace, filesystemName, storageClassName, false, useCSI)
require.NoError(s.T(), err)
filePodRunning := k8sh.IsPodRunning(filePodName, namespace)
require.True(s.T(), filePodRunning, "make sure file-test pod is in running state")
......@@ -109,7 +121,7 @@ func downscaleMetadataServers(helper *clients.TestClient, k8sh *utils.K8sHelper,
require.Nil(s.T(), err)
}
func cleanupFilesystemConsumer(k8sh *utils.K8sHelper, s suite.Suite, namespace string, podName string) {
func cleanupFilesystemConsumer(helper *clients.TestClient, k8sh *utils.K8sHelper, s suite.Suite, namespace string, podName string) {
logger.Infof("Delete file System consumer")
err := k8sh.DeletePod(namespace, podName)
assert.Nil(s.T(), err)
......@@ -117,6 +129,7 @@ func cleanupFilesystemConsumer(k8sh *utils.K8sHelper, s suite.Suite, namespace s
k8sh.PrintPodDescribe(namespace, podName)
assert.Fail(s.T(), fmt.Sprintf("make sure %s pod is terminated", podName))
}
helper.BlockClient.DeletePVC(namespace, podName)
logger.Infof("File system consumer deleted")
}
......@@ -154,16 +167,21 @@ func fileTestDataCleanUp(helper *clients.TestClient, k8sh *utils.K8sHelper, s su
helper.FSClient.Delete(filesystemName, namespace)
}
func createPodWithFilesystem(k8sh *utils.K8sHelper, s suite.Suite, podName, namespace, filesystemName string, mountUser bool) error {
driverName := installer.SystemNamespace(namespace)
testPodManifest := getFilesystemTestPod(podName, namespace, filesystemName, driverName, mountUser)
func createPodWithFilesystem(k8sh *utils.K8sHelper, s suite.Suite, podName, namespace, filesystemName, storageClassName string, mountUser, useCSI bool) error {
var testPodManifest string
if useCSI {
testPodManifest = getFilesystemCSITestPod(podName, namespace, storageClassName)
} else {
driverName := installer.SystemNamespace(namespace)
testPodManifest = getFilesystemFlexTestPod(podName, namespace, filesystemName, driverName, mountUser)
}
if err := k8sh.ResourceOperation("create", testPodManifest); err != nil {
return fmt.Errorf("failed to create pod -- %s. %+v", testPodManifest, err)
}
return nil
}
func getFilesystemTestPod(podName, namespace, filesystemName, driverName string, mountUser bool) string {
func getFilesystemFlexTestPod(podName, namespace, filesystemName, driverName string, mountUser bool) string {
mountUserInsert := ""
if mountUser {
mountUserInsert = `
......@@ -203,6 +221,49 @@ spec:
`
}
func getFilesystemCSITestPod(podName, namespace, storageClassName string) string {
claimName := podName
return `
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: ` + claimName + `
namespace: ` + namespace + `
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
storageClassName: ` + storageClassName + `
---
apiVersion: v1
kind: Pod
metadata:
name: ` + podName + `
namespace: ` + namespace + `
spec:
containers:
- name: ` + podName + `
image: busybox
command:
- sh
- "-c"
- "touch ` + utils.TestMountPath + `/csi.test && sleep 3600"
imagePullPolicy: IfNotPresent
env:
volumeMounts:
- mountPath: ` + utils.TestMountPath + `
name: csivol
volumes:
- name: csivol
persistentVolumeClaim:
claimName: ` + claimName + `
readOnly: false
restartPolicy: Never
`
}
func createFilesystemMountCephCredentials(helper *clients.TestClient, k8sh *utils.K8sHelper, s suite.Suite, namespace string, filesystemName string) {
// Create agent binding for access to Secrets
err := k8sh.ResourceOperation("apply", getFilesystemAgentMountSecretsBinding(namespace))
......@@ -247,8 +308,10 @@ func createFilesystemMountCephCredentials(helper *clients.TestClient, k8sh *util
logger.Info("Created Ceph credentials Secret in Kubernetes")
}
func createFilesystemMountUserConsumerPod(helper *clients.TestClient, k8sh *utils.K8sHelper, s suite.Suite, namespace string, filesystemName string) {
mtfsErr := createPodWithFilesystem(k8sh, s, fileMountUserPodName, namespace, filesystemName, true)
func createFilesystemMountUserConsumerPod(helper *clients.TestClient, k8sh *utils.K8sHelper, s suite.Suite, namespace, filesystemName, storageClassName string) {
// TODO: Mount with user credentials for the CSI driver
useCSI := false
mtfsErr := createPodWithFilesystem(k8sh, s, fileMountUserPodName, namespace, filesystemName, storageClassName, true, useCSI)
require.Nil(s.T(), mtfsErr)
filePodRunning := k8sh.IsPodRunning(fileMountUserPodName, namespace)
require.True(s.T(), filePodRunning, "make sure file-mountuser-test pod is in running state")
......
......@@ -30,15 +30,14 @@ import (
)
var (
userid = "rook-user"
userdisplayname = "A rook RGW user"
bucketname = "smokebkt"
objBody = "Test Rook Object Data"
objectKey = "rookObj1"
contentType = "plain/text"
storageClassName = "rook-smoke-delete-bucket"
obcName = "smoke-delete-bucket"
region = "us-east-1"
userid = "rook-user"
userdisplayname = "A rook RGW user"
bucketname = "smokebkt"
objBody = "Test Rook Object Data"
objectKey = "rookObj1"
contentType = "plain/text"
obcName = "smoke-delete-bucket"
region = "us-east-1"
)
// Smoke Test for ObjectStore - Test check the following operations on ObjectStore in order
......@@ -94,9 +93,10 @@ func runObjectE2ETest(helper *clients.TestClient, k8sh *utils.K8sHelper, s suite
// Testing creation/deletion of objects using Object Bucket Claim
logger.Infof("Step 3 : Create Object Bucket Claim with reclaim policy delete")
cobErr := helper.BucketClient.CreateBucketStorageClass(namespace, storeName, storageClassName, "Delete", region)
bucketStorageClassName := "rook-smoke-delete-bucket"
cobErr := helper.BucketClient.CreateBucketStorageClass(namespace, storeName, bucketStorageClassName, "Delete", region)
require.Nil(s.T(), cobErr)
cobcErr := helper.BucketClient.CreateObc(obcName, storageClassName, bucketname, true)
cobcErr := helper.BucketClient.CreateObc(obcName, bucketStorageClassName, bucketname, true)
require.Nil(s.T(), cobcErr)
for i = 0; i < 4 && !helper.BucketClient.CheckOBC(obcName, "created"); i++ {
......@@ -144,7 +144,7 @@ func runObjectE2ETest(helper *clients.TestClient, k8sh *utils.K8sHelper, s suite
logger.Infof("Object deleted on bucket successfully")
logger.Infof("Step 8 : Delete Object Bucket Claim")
dobcErr := helper.BucketClient.DeleteObc(obcName, storageClassName, bucketname, true)
dobcErr := helper.BucketClient.DeleteObc(obcName, bucketStorageClassName, bucketname, true)
require.Nil(s.T(), dobcErr)
logger.Infof("Checking to see if the obc, secret and cm have all been deleted")
for i = 0; i < 4 && !helper.BucketClient.CheckOBC(obcName, "deleted"); i++ {
......@@ -166,7 +166,7 @@ func runObjectE2ETest(helper *clients.TestClient, k8sh *utils.K8sHelper, s suite
assert.NotEqual(s.T(), i, 4)
assert.Equal(s.T(), rgwErr, rgw.RGWErrorNotFound)
dobErr := helper.BucketClient.DeleteBucketStorageClass(namespace, storeName, storageClassName, "Delete", region)
dobErr := helper.BucketClient.DeleteBucketStorageClass(namespace, storeName, bucketStorageClassName, "Delete", region)
assert.Nil(s.T(), dobErr)
logger.Infof("Delete Object Bucket Claim successfully")
......
/*
Copyright 2019 The Rook Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package integration
import (
"time"
"github.com/stretchr/testify/require"
)
// Smoke Test for Client CRD
func (suite *SmokeSuite) TestCreateClient() {
logger.Infof("Create Client Smoke Test")
clientName := "client1"
caps := map[string]string{
"mon": "allow rwx",
"mgr": "allow rwx",
"osd": "allow rwx",
}
err := suite.helper.UserClient.Create(clientName, suite.namespace, caps)
require.Nil(suite.T(), err)
clientFound := false
for i := 0; i < 30; i++ {
clients, _ := suite.helper.UserClient.Get(suite.namespace, "client."+clientName)
if clients != "" {
clientFound = true
}
if clientFound {
break
}
logger.Infof("Waiting for client to appear")
time.Sleep(2 * time.Second)
}
require.Equal(suite.T(), true, clientFound, "client not found")
logger.Infof("Update Client Smoke Test")
newcaps := map[string]string{
"mon": "allow r",
"mgr": "allow rw",
"osd": "allow *",
}
caps, _ = suite.helper.UserClient.Update(suite.namespace, clientName, newcaps)
require.Equal(suite.T(), "allow r", caps["mon"], "wrong caps")
require.Equal(suite.T(), "allow rw", caps["mgr"], "wrong caps")
require.Equal(suite.T(), "allow *", caps["osd"], "wrong caps")
}
/*
Copyright 2019 The Rook Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package integration
import (
"testing"
"github.com/rook/rook/tests/framework/clients"
"github.com/rook/rook/tests/framework/installer"
"github.com/rook/rook/tests/framework/utils"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
csiRBDNodeSecret = "rook-csi-rbd-node"
csiRBDProvisionerSecret = "rook-csi-rbd-provisioner"
csiCephFSNodeSecret = "rook-csi-cephfs-node"
csiCephFSProvisionerSecret = "rook-csi-cephfs-provisioner"
csiSCRBD = "ceph-csi-rbd"
csiSCCephFS = "ceph-csi-cephfs"
csiPoolRBD = "csi-rbd"
csiPoolCephFS = "csi-cephfs"
csiTestRBDPodName = "csi-test-rbd"
csiTestCephFSPodName = "csi-test-cephfs"
)
func runCephCSIE2ETest(helper *clients.TestClient, k8sh *utils.K8sHelper, s suite.Suite, t *testing.T, namespace string) {
if !k8sh.VersionAtLeast("v1.13.0") {
logger.Info("Skipping csi tests as kube version is less than 1.13.0")
t.Skip()
}
logger.Info("test Ceph CSI driver")
createCephPools(helper, s, namespace)
createCSIStorageClass(k8sh, s, namespace)
createAndDeleteCSIRBDTestPod(k8sh, s, namespace)
createAndDeleteCSICephFSTestPod(k8sh, s, namespace)
//cleanup resources created
deleteCephPools(helper, namespace)
deleteCSIStorageClass(k8sh, namespace)
}
func createCephPools(helper *clients.TestClient, s suite.Suite, namespace string) {
err := helper.PoolClient.Create(csiPoolRBD, namespace, 1)
require.Nil(s.T(), err)
activeCount := 1
err = helper.FSClient.Create(csiPoolCephFS, namespace, activeCount)
require.Nil(s.T(), err)
}
func deleteCephPools(helper *clients.TestClient, namespace string) {
err := helper.PoolClient.Delete(csiPoolRBD, namespace)
if err != nil {
logger.Errorf("failed to delete rbd pool %s with error %v", csiPoolRBD, err)
}
err = helper.FSClient.Delete(csiPoolCephFS, namespace)
if err != nil {
logger.Errorf("failed to delete cephfs pool %s with error %v", csiPoolCephFS, err)
return
}
logger.Info("Deleted Ceph Pools")
}
func createCSIStorageClass(k8sh *utils.K8sHelper, s suite.Suite, namespace string) {
rbdSC := `
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: ` + csiSCRBD + `
provisioner: ` + installer.SystemNamespace(namespace) + `.rbd.csi.ceph.com
parameters:
pool: ` + csiPoolRBD + `
clusterID: ` + namespace + `
csi.storage.k8s.io/provisioner-secret-name: ` + csiRBDProvisionerSecret + `
csi.storage.k8s.io/provisioner-secret-namespace: ` + namespace + `
csi.storage.k8s.io/node-stage-secret-name: ` + csiRBDNodeSecret + `
csi.storage.k8s.io/node-stage-secret-namespace: ` + namespace + `
`
cephFSSC := `
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: ` + csiSCCephFS + `
provisioner: ` + installer.SystemNamespace(namespace) + `.cephfs.csi.ceph.com
parameters:
clusterID: ` + namespace + `
fsName: ` + csiPoolCephFS + `
pool: ` + csiPoolCephFS + `-data0
csi.storage.k8s.io/provisioner-secret-name: ` + csiCephFSProvisionerSecret + `
csi.storage.k8s.io/provisioner-secret-namespace: ` + namespace + `
csi.storage.k8s.io/node-stage-secret-name: ` + csiCephFSNodeSecret + `
csi.storage.k8s.io/node-stage-secret-namespace: ` + namespace + `
`
err := k8sh.ResourceOperation("apply", rbdSC)
require.Nil(s.T(), err)
err = k8sh.ResourceOperation("apply", cephFSSC)
require.Nil(s.T(), err)
}
func deleteCSIStorageClass(k8sh *utils.K8sHelper, namespace string) {
err := k8sh.Clientset.StorageV1().StorageClasses().Delete(csiSCRBD, &metav1.DeleteOptions{})
if err != nil {
logger.Errorf("failed to delete rbd storage class %s with error %v", csiSCRBD, err)
}
err = k8sh.Clientset.StorageV1().StorageClasses().Delete(csiSCCephFS, &metav1.DeleteOptions{})
if err != nil {
logger.Errorf("failed to delete cephfs storage class %s with error %v", csiSCCephFS, err)
return
}
logger.Info("Deleted rbd and cephfs storageclass")
}
func createAndDeleteCSIRBDTestPod(k8sh *utils.K8sHelper, s suite.Suite, namespace string) {
pod := `
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: rbd-pvc-csi
namespace: ` + namespace + `
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
storageClassName: ` + csiSCRBD + `
---
apiVersion: v1
kind: Pod
metadata:
name: ` + csiTestRBDPodName + `
namespace: ` + namespace + `
spec:
containers:
- name: ` + csiTestRBDPodName + `
image: busybox
command:
- sh
- "-c"
- "touch /test/csi.test && sleep 3600"
imagePullPolicy: IfNotPresent
env:
volumeMounts:
- mountPath: /test
name: csivol
volumes:
- name: csivol
persistentVolumeClaim:
claimName: rbd-pvc-csi
readOnly: false
restartPolicy: Never
`
err := k8sh.ResourceOperation("apply", pod)
require.Nil(s.T(), err)
isPodRunning := k8sh.IsPodRunning(csiTestRBDPodName, namespace)
if !isPodRunning {
k8sh.PrintPodDescribe(namespace, csiTestRBDPodName)
k8sh.PrintPodStatus(namespace)
}
// cleanup the pod and pvc
err = k8sh.ResourceOperation("delete", pod)
assert.NoError(s.T(), err)
assert.True(s.T(), isPodRunning, "csi rbd test pod fails to run")
}
func createAndDeleteCSICephFSTestPod(k8sh *utils.K8sHelper, s suite.Suite, namespace string) {
pod := `
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: cephfs-pvc-csi
namespace: ` + namespace + `
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
storageClassName: ` + csiSCCephFS + `
---
apiVersion: v1
kind: Pod
metadata:
name: ` + csiTestCephFSPodName + `
namespace: ` + namespace + `
spec:
containers:
- name: ` + csiTestCephFSPodName + `
image: busybox
command:
- sh
- "-c"
- "touch /test/csi.test && sleep 3600"
imagePullPolicy: IfNotPresent
env:
volumeMounts:
- mountPath: /test
name: csivol
volumes:
- name: csivol
persistentVolumeClaim:
claimName: cephfs-pvc-csi
readOnly: false
restartPolicy: Never
`
err := k8sh.ResourceOperation("apply", pod)
require.Nil(s.T(), err)
isPodRunning := k8sh.IsPodRunning(csiTestCephFSPodName, namespace)
if !isPodRunning {
k8sh.PrintPodDescribe(namespace, csiTestCephFSPodName)
k8sh.PrintPodStatus(namespace)
}
// cleanup the pod and pvc
err = k8sh.ResourceOperation("delete", pod)
assert.NoError(s.T(), err)
assert.True(s.T(), isPodRunning, "csi cephfs test pod fails to run")
}
......@@ -17,6 +17,7 @@ limitations under the License.
package integration
import (
"strconv"
"testing"
"fmt"
......@@ -28,21 +29,12 @@ import (
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/version"
)
// ******************************************************
// *** Scenarios tested by the BlockMountUnMountSuite ***
// Setup
// - via the cluster CRD
// - set up Block PVC - With ReadWriteOnce
// - set up Block PVC - with ReadWriteMany
// - Mount Volume on a pod and write some data
// - UnMount Volume
// Monitors
// - one mons in the cluster
// OSDs
// - Bluestore running on directory
// Block Mount & Unmount scenarios - repeat for each PVC
// *** Scenarios tested by the TestCephFlexSuite ***
// Flex driver block scenarios - repeat for each PVC
// 1. ReadWriteOnce
// a. Mount Volume on a new pod - make sure persisted data is present and write new data
// b. Mount volume on two pods with - mount should be successful only on first pod
......@@ -54,25 +46,25 @@ import (
// b. Scale down pods
// c. Failover pods
// d. Delete StatefulSet
//
// Flex driver file system scenarios
// 1. Create a filesystem
// a. Mount a directory with the static mount
// ******************************************************
// NOTE: This suite needs to be last.
// There is an issue on k8s 1.7 where the CRD controller will frequently fail to create a cluster after this suite is run.
// The error is "the server does not allow this method on the requested resource (post cephclusters.ceph.rook.io)".
// Everything appears to have been cleaned up successfully in this test, so it is still unclear what is causing the issue between tests.
func TestCephBlockSuite(t *testing.T) {
func TestCephFlexSuite(t *testing.T) {
if installer.SkipTestSuite(installer.CephTestSuite) {
t.Skip()
}
s := new(CephBlockSuite)
defer func(s *CephBlockSuite) {
s := new(CephFlexDriverSuite)
defer func(s *CephFlexDriverSuite) {
HandlePanics(recover(), s.op, s.T)
}(s)
suite.Run(t, s)
}
type CephBlockSuite struct {
type CephFlexDriverSuite struct {
suite.Suite
testClient *clients.TestClient
bc *clients.BlockOperation
......@@ -84,24 +76,29 @@ type CephBlockSuite struct {
op *TestCluster
}
func (s *CephBlockSuite) SetupSuite() {
func (s *CephFlexDriverSuite) SetupSuite() {
s.namespace = "block-test-ns"
s.namespace = "flex-ns"
s.pvcNameRWO = "block-persistent-rwo"
s.pvcNameRWX = "block-persistent-rwx"
useHelm := false
mons := 1
rbdMirrorWorkers := 1
s.op, s.kh = StartTestCluster(s.T, blockMinimalTestVersion, s.namespace, "bluestore", useHelm, false, "", mons, rbdMirrorWorkers, installer.VersionMaster, installer.NautilusVersion)
s.op, s.kh = StartTestCluster(s.T, flexDriverMinimalTestVersion, s.namespace, "bluestore", useHelm, false, "", mons, rbdMirrorWorkers, installer.VersionMaster, installer.NautilusVersion)
s.testClient = clients.CreateTestClient(s.kh, s.op.installer.Manifests)
s.bc = s.testClient.BlockClient
}
func (s *CephBlockSuite) AfterTest(suiteName, testName string) {
func (s *CephFlexDriverSuite) AfterTest(suiteName, testName string) {
s.op.installer.CollectOperatorLog(suiteName, testName, installer.SystemNamespace(s.namespace))
}
func (s *CephBlockSuite) TestBlockStorageMountUnMountForStatefulSets() {
func (s *CephFlexDriverSuite) TestFileSystem() {
useCSI := false
runFileE2ETest(s.testClient, s.kh, s.Suite, s.namespace, "smoke-test-fs", useCSI)
}
func (s *CephFlexDriverSuite) TestBlockStorageMountUnMountForStatefulSets() {
poolName := "stspool"
storageClassName := "stssc"
reclaimPolicy := "Delete"
......@@ -112,7 +109,9 @@ func (s *CephBlockSuite) TestBlockStorageMountUnMountForStatefulSets() {
logger.Infof("Test case when block persistent volumes are scaled up and down along with StatefulSet")
logger.Info("Step 1: Create pool and storageClass")
err := s.testClient.PoolClient.CreateStorageClass(s.namespace, poolName, storageClassName, reclaimPolicy)
err := s.testClient.PoolClient.Create(poolName, s.namespace, 1)
assert.Nil(s.T(), err)
err = s.testClient.BlockClient.CreateStorageClass(false, poolName, storageClassName, reclaimPolicy, s.namespace)
assert.Nil(s.T(), err)
logger.Info("Step 2 : Deploy statefulSet with 1X replication")
service, statefulset := getBlockStatefulSetAndServiceDefinition(defaultNamespace, statefulSetName, statefulPodsName, storageClassName)
......@@ -148,7 +147,7 @@ func (s *CephBlockSuite) TestBlockStorageMountUnMountForStatefulSets() {
require.True(s.T(), s.kh.CheckPvcCountAndStatus(statefulSetName, defaultNamespace, 2, "Bound"))
}
func (s *CephBlockSuite) statefulSetDataCleanup(namespace, poolName, storageClassName, reclaimPolicy, statefulSetName, statefulPodsName string) {
func (s *CephFlexDriverSuite) statefulSetDataCleanup(namespace, poolName, storageClassName, reclaimPolicy, statefulSetName, statefulPodsName string) {
delOpts := metav1.DeleteOptions{}
listOpts := metav1.ListOptions{LabelSelector: "app=" + statefulSetName}
// Delete stateful set
......@@ -159,41 +158,37 @@ func (s *CephBlockSuite) statefulSetDataCleanup(namespace, poolName, storageClas
s.kh.DeletePvcWithLabel(defaultNamespace, statefulSetName)
// Delete storageclass and pool
s.testClient.PoolClient.DeletePool(s.testClient.BlockClient, s.namespace, poolName)
s.testClient.PoolClient.DeleteStorageClass(storageClassName)
s.testClient.BlockClient.DeleteStorageClass(storageClassName)
}
func (s *CephBlockSuite) setupPVCs() {
func (s *CephFlexDriverSuite) setupPVCs() {
logger.Infof("creating the test PVCs")
poolNameRWO := "block-pool-rwo"
storageClassNameRWO := "rook-ceph-block-rwo"
systemNamespace := installer.SystemNamespace(s.namespace)
// Create PVCs
cbErr := s.testClient.PoolClient.CreateStorageClassAndPvc(s.namespace, poolNameRWO, storageClassNameRWO, "Delete", s.pvcNameRWO, "ReadWriteOnce")
require.Nil(s.T(), cbErr)
useCSI := false
err := s.testClient.BlockClient.CreateStorageClassAndPVC(useCSI, defaultNamespace, s.namespace, systemNamespace, poolNameRWO, storageClassNameRWO, "Delete", s.pvcNameRWO, "ReadWriteOnce")
require.Nil(s.T(), err)
require.True(s.T(), s.kh.WaitUntilPVCIsBound(defaultNamespace, s.pvcNameRWO), "Make sure PVC is Bound")
cbErr2 := s.testClient.BlockClient.CreatePvc(s.pvcNameRWX, storageClassNameRWO, "ReadWriteMany", "1M")
require.Nil(s.T(), cbErr2)
err = s.testClient.BlockClient.CreatePVC(defaultNamespace, s.pvcNameRWX, storageClassNameRWO, "ReadWriteMany", "1M")
require.Nil(s.T(), err)
require.True(s.T(), s.kh.WaitUntilPVCIsBound(defaultNamespace, s.pvcNameRWX), "Make sure PVC is Bound")
// Mount PVC on a pod and write some data.
_, mtErr := s.bc.BlockMap(getBlockPodDefinition("setup-block-rwo", s.pvcNameRWO, false))
require.Nil(s.T(), mtErr)
err = s.bc.CreateClientPod(getFlexBlockPodDefinition("setup-block-rwo", s.pvcNameRWO, false))
require.Nil(s.T(), err)
crdName, err := s.kh.GetVolumeResourceName(defaultNamespace, s.pvcNameRWO)
require.Nil(s.T(), err)
rwoVolumePresent := s.kh.IsVolumeResourcePresent(installer.SystemNamespace(s.namespace), crdName)
if !rwoVolumePresent {
s.kh.PrintPodDescribe(defaultNamespace, "setup-block-rwo")
s.kh.PrintPodStatus(s.namespace)
s.kh.PrintPodStatus(installer.SystemNamespace(s.namespace))
}
require.True(s.T(), rwoVolumePresent, fmt.Sprintf("make sure rwo Volume %s is created", crdName))
s.kh.IsVolumeResourcePresent(systemNamespace, crdName)
_, mtErr1 := s.bc.BlockMap(getBlockPodDefinition("setup-block-rwx", s.pvcNameRWX, false))
require.Nil(s.T(), mtErr1)
crdName1, err1 := s.kh.GetVolumeResourceName(defaultNamespace, s.pvcNameRWX)
require.Nil(s.T(), err1)
require.True(s.T(), s.kh.IsVolumeResourcePresent(installer.SystemNamespace(s.namespace), crdName1), fmt.Sprintf("make sure rwx Volume %s is created", crdName))
err = s.bc.CreateClientPod(getFlexBlockPodDefinition("setup-block-rwx", s.pvcNameRWX, false))
require.Nil(s.T(), err)
crdName, err = s.kh.GetVolumeResourceName(defaultNamespace, s.pvcNameRWX)
require.Nil(s.T(), err)
s.kh.IsVolumeResourcePresent(systemNamespace, crdName)
require.True(s.T(), s.kh.IsPodRunning("setup-block-rwo", defaultNamespace), "make sure setup-block-rwo pod is in running state")
require.True(s.T(), s.kh.IsPodRunning("setup-block-rwx", defaultNamespace), "make sure setup-block-rwx pod is in running state")
......@@ -212,41 +207,41 @@ func (s *CephBlockSuite) setupPVCs() {
require.True(s.T(), s.kh.IsPodTerminated("setup-block-rwx", defaultNamespace), "make sure setup-block-rwx pod is terminated")
}
func (s *CephBlockSuite) TearDownSuite() {
func (s *CephFlexDriverSuite) TearDownSuite() {
logger.Infof("Cleaning up block storage")
s.kh.DeletePods(
"setup-block-rwo", "setup-block-rwx", "rwo-block-rw-one", "rwo-block-rw-two", "rwo-block-ro-one",
"rwo-block-ro-two", "rwx-block-rw-one", "rwx-block-rw-two", "rwx-block-ro-one", "rwx-block-ro-two")
s.testClient.PoolClient.DeletePvc(s.namespace, s.pvcNameRWO)
s.testClient.PoolClient.DeletePvc(s.namespace, s.pvcNameRWX)
s.testClient.PoolClient.DeleteStorageClass("rook-ceph-block-rwo")
s.testClient.PoolClient.DeleteStorageClass("rook-ceph-block-rwx")
s.testClient.BlockClient.DeletePVC(s.namespace, s.pvcNameRWO)
s.testClient.BlockClient.DeletePVC(s.namespace, s.pvcNameRWX)
s.testClient.BlockClient.DeleteStorageClass("rook-ceph-block-rwo")
s.testClient.BlockClient.DeleteStorageClass("rook-ceph-block-rwx")
s.testClient.PoolClient.DeletePool(s.testClient.BlockClient, s.namespace, "block-pool-rwo")
s.testClient.PoolClient.DeletePool(s.testClient.BlockClient, s.namespace, "block-pool-rwx")
s.op.Teardown()
}
func (s *CephBlockSuite) TestBlockStorageMountUnMountForDifferentAccessModes() {
func (s *CephFlexDriverSuite) TestBlockStorageMountUnMountForDifferentAccessModes() {
s.setupPVCs()
logger.Infof("Test case when existing RWO PVC is mounted and unmounted on pods with various accessModes")
logger.Infof("Step 1.1: Mount existing ReadWriteOnce and ReadWriteMany PVC on a Pod with RW access")
// mount PVC with RWO access on a pod with readonly set to false
_, err := s.bc.BlockMap(getBlockPodDefinition("rwo-block-rw-one", s.pvcNameRWO, false))
err := s.bc.CreateClientPod(getFlexBlockPodDefinition("rwo-block-rw-one", s.pvcNameRWO, false))
require.Nil(s.T(), err)
// mount PVC with RWX access on a pod with readonly set to false
_, err = s.bc.BlockMap(getBlockPodDefinition("rwx-block-rw-one", s.pvcNameRWX, false))
err = s.bc.CreateClientPod(getFlexBlockPodDefinition("rwx-block-rw-one", s.pvcNameRWX, false))
require.Nil(s.T(), err)
crdName1, err1 := s.kh.GetVolumeResourceName(defaultNamespace, s.pvcNameRWO)
crdName2, err2 := s.kh.GetVolumeResourceName(defaultNamespace, s.pvcNameRWX)
assert.Nil(s.T(), err1)
assert.Nil(s.T(), err2)
assert.True(s.T(), s.kh.IsVolumeResourcePresent(installer.SystemNamespace(s.namespace), crdName1), fmt.Sprintf("make sure Volume %s is created", crdName1))
crdName, err := s.kh.GetVolumeResourceName(defaultNamespace, s.pvcNameRWO)
assert.Nil(s.T(), err)
assert.True(s.T(), s.kh.IsVolumeResourcePresent(installer.SystemNamespace(s.namespace), crdName), fmt.Sprintf("make sure Volume %s is created", crdName))
assert.True(s.T(), s.kh.IsPodRunning("rwo-block-rw-one", defaultNamespace), "make sure block-rw-one pod is in running state")
assert.True(s.T(), s.kh.IsVolumeResourcePresent(installer.SystemNamespace(s.namespace), crdName2), fmt.Sprintf("make sure Volume %s is created", crdName2))
crdName, err = s.kh.GetVolumeResourceName(defaultNamespace, s.pvcNameRWX)
assert.Nil(s.T(), err)
assert.True(s.T(), s.kh.IsVolumeResourcePresent(installer.SystemNamespace(s.namespace), crdName), fmt.Sprintf("make sure Volume %s is created", crdName))
assert.True(s.T(), s.kh.IsPodRunning("rwx-block-rw-one", defaultNamespace), "make sure rwx-block-rw-one pod is in running state")
logger.Infof("Step 2: Check if previously persisted data is readable from ReadWriteOnce and ReadWriteMany PVC")
......@@ -278,10 +273,10 @@ func (s *CephBlockSuite) TestBlockStorageMountUnMountForDifferentAccessModes() {
// Mount another Pod with RW access on same PVC
logger.Infof("Step 4: Mount existing ReadWriteOnce and ReadWriteMany PVC on a new Pod with RW access")
// Mount RWO PVC on a new pod with ReadOnly set to false
_, err = s.bc.BlockMap(getBlockPodDefinition("rwo-block-rw-two", s.pvcNameRWO, false))
err = s.bc.CreateClientPod(getFlexBlockPodDefinition("rwo-block-rw-two", s.pvcNameRWO, false))
assert.Nil(s.T(), err)
// Mount RWX PVC on a new pod with ReadOnly set to false
_, err = s.bc.BlockMap(getBlockPodDefinition("rwx-block-rw-two", s.pvcNameRWX, false))
err = s.bc.CreateClientPod(getFlexBlockPodDefinition("rwx-block-rw-two", s.pvcNameRWX, false))
assert.Nil(s.T(), err)
assert.True(s.T(), s.kh.IsPodInError("rwo-block-rw-two", defaultNamespace, "FailedMount", "Volume is already attached by pod"), "make sure rwo-block-rw-two pod errors out while mounting the volume")
assert.True(s.T(), s.kh.IsPodInError("rwx-block-rw-two", defaultNamespace, "FailedMount", "Volume is already attached by pod"), "make sure rwx-block-rw-two pod errors out while mounting the volume")
......@@ -292,10 +287,10 @@ func (s *CephBlockSuite) TestBlockStorageMountUnMountForDifferentAccessModes() {
logger.Infof("Step 5: Mount existing ReadWriteOnce and ReadWriteMany PVC on a new Pod with RO access")
// Mount RWO PVC on a new pod with ReadOnly set to true
_, err = s.bc.BlockMap(getBlockPodDefinition("rwo-block-ro-one", s.pvcNameRWO, true))
err = s.bc.CreateClientPod(getFlexBlockPodDefinition("rwo-block-ro-one", s.pvcNameRWO, true))
assert.Nil(s.T(), err)
// Mount RWX PVC on a new pod with ReadOnly set to true
_, err = s.bc.BlockMap(getBlockPodDefinition("rwx-block-ro-one", s.pvcNameRWX, true))
err = s.bc.CreateClientPod(getFlexBlockPodDefinition("rwx-block-ro-one", s.pvcNameRWX, true))
assert.Nil(s.T(), err)
assert.True(s.T(), s.kh.IsPodInError("rwo-block-ro-one", defaultNamespace, "FailedMount", "Volume is already attached by pod"), "make sure rwo-block-ro-one pod errors out while mounting the volume")
assert.True(s.T(), s.kh.IsPodInError("rwx-block-ro-one", defaultNamespace, "FailedMount", "Volume is already attached by pod"), "make sure rwx-block-ro-one pod errors out while mounting the volume")
......@@ -311,16 +306,18 @@ func (s *CephBlockSuite) TestBlockStorageMountUnMountForDifferentAccessModes() {
assert.True(s.T(), s.kh.IsPodTerminated("rwx-block-rw-one", defaultNamespace), "make sure rwx-lock-rw-one pod is terminated")
logger.Infof("Step 7: Mount ReadWriteOnce and ReadWriteMany PVC on two different pods with ReadOnlyMany with Readonly Access")
// Mount RWO PVC on 2 pods with ReadOnly set to True
_, err1 = s.bc.BlockMap(getBlockPodDefinition("rwo-block-ro-one", s.pvcNameRWO, true))
_, err2 = s.bc.BlockMap(getBlockPodDefinition("rwo-block-ro-two", s.pvcNameRWO, true))
assert.Nil(s.T(), err1)
assert.Nil(s.T(), err2)
err = s.bc.CreateClientPod(getFlexBlockPodDefinition("rwo-block-ro-one", s.pvcNameRWO, true))
assert.Nil(s.T(), err)
err = s.bc.CreateClientPod(getFlexBlockPodDefinition("rwo-block-ro-two", s.pvcNameRWO, true))
assert.Nil(s.T(), err)
// Mount RWX PVC on 2 pods with ReadOnly set to True
_, err1 = s.bc.BlockMap(getBlockPodDefinition("rwx-block-ro-one", s.pvcNameRWX, true))
_, err2 = s.bc.BlockMap(getBlockPodDefinition("rwx-block-ro-two", s.pvcNameRWX, true))
assert.Nil(s.T(), err1)
assert.Nil(s.T(), err2)
err = s.bc.CreateClientPod(getFlexBlockPodDefinition("rwx-block-ro-one", s.pvcNameRWX, true))
assert.Nil(s.T(), err)
err = s.bc.CreateClientPod(getFlexBlockPodDefinition("rwx-block-ro-two", s.pvcNameRWX, true))
assert.Nil(s.T(), err)
assert.True(s.T(), s.kh.IsPodRunning("rwo-block-ro-one", defaultNamespace), "make sure rwo-block-ro-one pod is in running state")
assert.True(s.T(), s.kh.IsPodRunning("rwo-block-ro-two", defaultNamespace), "make sure rwo-block-ro-two pod is in running state")
assert.True(s.T(), s.kh.IsPodRunning("rwx-block-ro-one", defaultNamespace), "make sure rwx-block-ro-one pod is in running state")
......@@ -354,4 +351,46 @@ func (s *CephBlockSuite) TestBlockStorageMountUnMountForDifferentAccessModes() {
assert.True(s.T(), s.kh.IsPodTerminated("rwo-block-ro-two", defaultNamespace), "make sure rwo-block-ro-two pod is terminated")
assert.True(s.T(), s.kh.IsPodTerminated("rwx-block-ro-one", defaultNamespace), "make sure rwx-lock-ro-one pod is terminated")
assert.True(s.T(), s.kh.IsPodTerminated("rwx-block-ro-two", defaultNamespace), "make sure rwx-block-ro-two pod is terminated")
// Test volume expansion
v := version.MustParseSemantic(s.kh.GetK8sServerVersion())
if v.AtLeast(version.MustParseSemantic("1.15.0")) {
logger.Infof("additional step: Expand block storage")
// Expanding the image by applying new PVC specs
err := s.testClient.BlockClient.CreatePVC(defaultNamespace, s.pvcNameRWO, "rook-ceph-block-rwo", "ReadWriteOnce", "2M")
require.Nil(s.T(), err)
// Once the pod using the volume is terminated, the filesystem is expanded and the size of the PVC is increased.
expandedPodName := "setup-block-rwo"
err = s.kh.DeletePod(defaultNamespace, expandedPodName)
require.Nil(s.T(), err)
err = s.bc.CreateClientPod(getFlexBlockPodDefinition(expandedPodName, s.pvcNameRWO, false))
require.Nil(s.T(), err)
require.True(s.T(), s.kh.IsPodRunning(expandedPodName, defaultNamespace), "Make sure new pod is running")
require.True(s.T(), s.kh.WaitUntilPVCIsExpanded(defaultNamespace, s.pvcNameRWO, "2M"), "Make sure PVC is expanded")
logger.Infof("Block Storage successfully expanded")
}
}
func getFlexBlockPodDefinition(podName, blockName string, readOnly bool) string {
return `apiVersion: v1
kind: Pod
metadata:
name: ` + podName + `
spec:
containers:
- image: busybox
name: block-test1
command:
- sleep
- "3600"
imagePullPolicy: IfNotPresent
volumeMounts:
- name: block-persistent-storage
mountPath: ` + utils.TestMountPath + `
volumes:
- name: block-persistent-storage
persistentVolumeClaim:
claimName: ` + blockName + `
readOnly: ` + strconv.FormatBool(readOnly) + `
restartPolicy: Never`
}
......@@ -82,19 +82,17 @@ func (hs *HelmSuite) AfterTest(suiteName, testName string) {
}
// Test to make sure all rook components are installed and Running
func (hs *HelmSuite) TestRookInstallViaHelm() {
func (hs *HelmSuite) TestARookInstallViaHelm() {
checkIfRookClusterIsInstalled(hs.Suite, hs.kh, hs.namespace, hs.namespace, 1)
}
// Test BlockCreation on Rook that was installed via Helm
func (hs *HelmSuite) TestBlockStoreOnRookInstalledViaHelm() {
runBlockE2ETestLite(hs.helper, hs.kh, hs.Suite, hs.namespace, hs.op.installer.CephVersion)
runBlockCSITestLite(hs.helper, hs.kh, hs.Suite, hs.namespace, hs.namespace, hs.op.installer.CephVersion)
}
// Test File System Creation on Rook that was installed via helm
// The test func name has `Z` in its name to run as the last test, this needs to
// be done as there were some issues that the operator "disappeared".
func (hs *HelmSuite) TestZFileStoreOnRookInstalledViaHelm() {
func (hs *HelmSuite) TestFileStoreOnRookInstalledViaHelm() {
runFileE2ETestLite(hs.helper, hs.kh, hs.Suite, hs.namespace, "testfs")
}
......
/*
Copyright 2016 The Rook Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package integration
import (
"strings"
"time"
appsv1 "k8s.io/api/apps/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func (suite *SmokeSuite) getNonCanaryMonDeployments() ([]appsv1.Deployment, error) {
opts := metav1.ListOptions{LabelSelector: "app=rook-ceph-mon"}
deployments, err := suite.k8sh.Clientset.AppsV1().Deployments(suite.namespace).List(opts)
if err != nil {
return nil, err
}
nonCanaryMonDeployments := []appsv1.Deployment{}
for _, deployment := range deployments.Items {
if !strings.HasSuffix(deployment.GetName(), "-canary") {
nonCanaryMonDeployments = append(nonCanaryMonDeployments, deployment)
}
}
return nonCanaryMonDeployments, nil
}
// Smoke Test for Mon failover - Test check the following operations for the Mon failover in order
// Delete mon pod, Wait for new mon pod
func (suite *SmokeSuite) TestMonFailover() {
logger.Infof("Mon Failover Smoke Test")
deployments, err := suite.getNonCanaryMonDeployments()
require.Nil(suite.T(), err)
require.Equal(suite.T(), 3, len(deployments))
monToKill := deployments[0].Name
logger.Infof("Killing mon %s", monToKill)
propagation := metav1.DeletePropagationForeground
delOptions := &metav1.DeleteOptions{PropagationPolicy: &propagation}
err = suite.k8sh.Clientset.AppsV1().Deployments(suite.namespace).Delete(monToKill, delOptions)
require.Nil(suite.T(), err)
// Wait for the health check to start a new monitor
originalMonDeleted := false
for i := 0; i < 30; i++ {
deployments, err := suite.getNonCanaryMonDeployments()
require.Nil(suite.T(), err)
// Make sure the old mon is not still alive
foundOldMon := false
for _, mon := range deployments {
if mon.Name == monToKill {
foundOldMon = true
}
}
// Check if we have three monitors
if foundOldMon {
if originalMonDeleted {
// Depending on the state of the orchestration, the operator might trigger
// re-creation of the deleted mon. In this case, consider the test successful
// rather than wait for the failover which will never occur.
logger.Infof("Original mon created again, no need to wait for mon failover")
return
}
logger.Infof("Waiting for old monitor to stop")
} else {
logger.Infof("Waiting for a new monitor to start")
originalMonDeleted = true
if len(deployments) == 3 {
var newMons []string
for _, mon := range deployments {
newMons = append(newMons, mon.Name)
}
logger.Infof("Found a new monitor! monitors=%v", newMons)
return
}
assert.Equal(suite.T(), 2, len(deployments))
}
time.Sleep(5 * time.Second)
}
require.Fail(suite.T(), "giving up waiting for a new monitor")
}
......@@ -172,7 +172,7 @@ func (o MCTestOperations) Teardown() {
func (o MCTestOperations) startCluster(namespace, store string) error {
logger.Infof("starting cluster %s", namespace)
err := o.installer.CreateK8sRookClusterWithHostPathAndDevicesOrPVC(namespace, o.systemNamespace, store, o.testOverPVC, o.storageClassName,
cephv1.MonSpec{Count: 3, AllowMultiplePerNode: true}, true, 1, installer.NautilusVersion)
cephv1.MonSpec{Count: 1, AllowMultiplePerNode: true}, true, 1, installer.NautilusVersion)
if err != nil {
o.T().Fail()
o.installer.GatherAllRookLogs(o.T().Name(), namespace, o.systemNamespace)
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment