Unverified Commit c12aa6c7 authored by Blaine Gardner's avatar Blaine Gardner
Browse files

ceph rbd-mirror: configure entirely in operator


In the form of mon, mgr, mds, and rgw, convert the rbd-mirror to be
configured completely from the operator. This is a straightforward
conversion with one functional addition: the rbd-mirror daemon stores
*no* data and has no default data dir, so the concept of a `Dataless`
daemon is here introduced.
Signed-off-by: default avatarBlaine Gardner <blaine.gardner@suse.com>
parent 504bec1d
Showing with 198 additions and 183 deletions
+198 -183
......@@ -29,10 +29,11 @@
- Rook will no longer create a directory-based osd in the `dataDirHostPath` if no directories or
devices are specified or if there are no disks on the host.
- Containers in `mon`, `mgr`, `mds`, and `rgw` pods have been removed and/or changed names.
- Containers in `mon`, `mgr`, `mds`, `rgw`, and `rbd-mirror` pods have been removed and/or changed names.
- Config paths in `mon`, `mgr`, `mds` and `rgw` containers are now always under
`/etc/ceph` or `/var/lib/ceph` and as close to Ceph's default path as possible regardless of the
`dataDirHostPath` setting.
- The `rbd-mirror` pod labels now read `rbd-mirror` instead of `rbdmirror` for consistency.
## Known Issues
......
......@@ -190,7 +190,7 @@ func (c *cluster) createInstance(rookImage string) error {
}
// Start the rbd mirroring daemon(s)
rbdmirror := rbd.New(c.context, c.Namespace, rookImage, c.Spec.CephVersion, cephv1.GetRBDMirrorPlacement(c.Spec.Placement),
rbdmirror := rbd.New(c.Info, c.context, c.Namespace, rookImage, c.Spec.CephVersion, cephv1.GetRBDMirrorPlacement(c.Spec.Placement),
c.Spec.Network.HostNetwork, c.Spec.RBDMirroring, cephv1.GetRBDMirrorResources(c.Spec.Resources), c.ownerRef)
err = rbdmirror.Start()
if err != nil {
......
......@@ -22,7 +22,6 @@ import (
"strings"
cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
"github.com/rook/rook/pkg/operator/ceph/config"
"github.com/rook/rook/pkg/operator/ceph/config/keyring"
opspec "github.com/rook/rook/pkg/operator/ceph/spec"
"github.com/rook/rook/pkg/operator/k8sutil"
......@@ -162,7 +161,7 @@ func (c *Cluster) makeMgrDaemonContainer(mgrConfig *mgrConfig) v1.Container {
"ceph-mgr",
},
Args: append(
opspec.DaemonFlags(c.clusterInfo, config.MgrType, mgrConfig.DaemonID),
opspec.DaemonFlags(c.clusterInfo, mgrConfig.DaemonID),
"--foreground",
),
Image: c.cephVersion.Image,
......
......@@ -19,15 +19,14 @@ package mgr
import (
"testing"
cephconfig "github.com/rook/rook/pkg/daemon/ceph/config"
"github.com/rook/rook/pkg/operator/ceph/config"
"github.com/stretchr/testify/assert"
cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
rookalpha "github.com/rook/rook/pkg/apis/rook.io/v1alpha2"
"github.com/rook/rook/pkg/clusterd"
cephconfig "github.com/rook/rook/pkg/daemon/ceph/config"
"github.com/rook/rook/pkg/operator/ceph/config"
cephtest "github.com/rook/rook/pkg/operator/ceph/test"
optest "github.com/rook/rook/pkg/operator/test"
"github.com/stretchr/testify/assert"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
......
......@@ -136,7 +136,7 @@ func (c *Cluster) makeMonFSInitContainer(monConfig *monConfig) v1.Container {
cephMonCommand,
},
Args: append(
opspec.DaemonFlags(c.clusterInfo, config.MonType, monConfig.DaemonName),
opspec.DaemonFlags(c.clusterInfo, monConfig.DaemonName),
// needed so we can generate an initial monmap
// otherwise the mkfs will say: "0  no local addrs match monmap"
config.NewFlag("public-addr", monConfig.PublicIP),
......@@ -159,7 +159,7 @@ func (c *Cluster) makeMonDaemonContainer(monConfig *monConfig) v1.Container {
cephMonCommand,
},
Args: append(
opspec.DaemonFlags(c.clusterInfo, config.MonType, monConfig.DaemonName),
opspec.DaemonFlags(c.clusterInfo, monConfig.DaemonName),
"--foreground",
config.NewFlag("public-addr", monConfig.PublicIP),
config.NewFlag("public-bind-addr", opspec.ContainerEnvVarReference(podIPEnvVar)),
......
/*
Copyright 2019 The Rook Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package rbd
import (
"fmt"
"github.com/rook/rook/pkg/operator/ceph/config"
"github.com/rook/rook/pkg/operator/ceph/config/keyring"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
keyringTemplate = `
[client.rbd-mirror.%s]
key = %s
caps mon = "profile rbd-mirror"
caps osd = "profile rbd"
`
)
// daemonConfig for a single rbd mirror
type daemonConfig struct {
ResourceName string // the name rook gives to mirror resources in k8s metadata
DaemonID string // the ID of the Ceph daemon ("a", "b", ...)
DataPathMap *config.DataPathMap // location to store data in container
}
func (m *Mirroring) generateKeyring(daemonConfig *daemonConfig) error {
user := fullDaemonName(daemonConfig.DaemonID)
access := []string{"mon", "profile rbd-mirror", "osd", "profile rbd"}
s := keyring.GetSecretStore(m.context, m.Namespace, &m.ownerRef)
key, err := s.GenerateKey(daemonConfig.ResourceName, user, access)
if err != nil {
return err
}
// Delete legacy key store for upgrade from Rook v0.9.x to v1.0.x
err = m.context.Clientset.CoreV1().Secrets(m.Namespace).Delete(daemonConfig.ResourceName, &metav1.DeleteOptions{})
if err != nil {
if errors.IsNotFound(err) {
logger.Debugf("legacy rbd-mirror key %s is already removed", daemonConfig.ResourceName)
} else {
logger.Warningf("legacy rbd-mirror key %s could not be removed: %+v", daemonConfig.ResourceName, err)
}
}
keyring := fmt.Sprintf(keyringTemplate, daemonConfig.DaemonID, key)
return s.CreateOrUpdate(daemonConfig.ResourceName, keyring)
}
func fullDaemonName(daemonID string) string {
return fmt.Sprintf("client.rbd-mirror.%s", daemonID)
}
......@@ -24,7 +24,8 @@ import (
cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
rookalpha "github.com/rook/rook/pkg/apis/rook.io/v1alpha2"
"github.com/rook/rook/pkg/clusterd"
"github.com/rook/rook/pkg/operator/ceph/spec"
cephconfig "github.com/rook/rook/pkg/daemon/ceph/config"
"github.com/rook/rook/pkg/operator/ceph/config"
"github.com/rook/rook/pkg/operator/k8sutil"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
......@@ -37,8 +38,9 @@ const (
appName = "rook-ceph-rbd-mirror"
)
// Cluster represents the Rook and environment configuration settings needed to set up rbd mirroring.
// Mirroring represents the Rook and environment configuration settings needed to set up rbd mirroring.
type Mirroring struct {
ClusterInfo *cephconfig.ClusterInfo
Namespace string
placement rookalpha.Placement
context *clusterd.Context
......@@ -51,9 +53,19 @@ type Mirroring struct {
}
// New creates an instance of the rbd mirroring
func New(context *clusterd.Context, namespace, rookVersion string, cephVersion cephv1.CephVersionSpec, placement rookalpha.Placement, hostNetwork bool,
spec cephv1.RBDMirroringSpec, resources v1.ResourceRequirements, ownerRef metav1.OwnerReference) *Mirroring {
func New(
cluster *cephconfig.ClusterInfo,
context *clusterd.Context,
namespace, rookVersion string,
cephVersion cephv1.CephVersionSpec,
placement rookalpha.Placement,
hostNetwork bool,
spec cephv1.RBDMirroringSpec,
resources v1.ResourceRequirements,
ownerRef metav1.OwnerReference,
) *Mirroring {
return &Mirroring{
ClusterInfo: cluster,
context: context,
Namespace: namespace,
placement: placement,
......@@ -70,22 +82,25 @@ func New(context *clusterd.Context, namespace, rookVersion string, cephVersion c
func (m *Mirroring) Start() error {
logger.Infof("configure rbd mirroring with %d workers", m.spec.Workers)
access := []string{"mon", "profile rbd-mirror", "osd", "profile rbd"}
for i := 0; i < m.spec.Workers; i++ {
daemonName := k8sutil.IndexToName(i)
username := fullDaemonName(daemonName)
resourceName := fmt.Sprintf("%s-%s", appName, daemonName)
cfg := spec.KeyringConfig{Namespace: m.Namespace, ResourceName: resourceName, DaemonName: daemonName, OwnerRef: m.ownerRef, Username: username, Access: access}
if err := spec.CreateKeyring(m.context, cfg); err != nil {
return fmt.Errorf("failed to create %s keyring. %+v", resourceName, err)
daemonID := k8sutil.IndexToName(i)
resourceName := fmt.Sprintf("%s-%s", appName, daemonID)
daemonConf := &daemonConfig{
DaemonID: daemonID,
ResourceName: resourceName,
DataPathMap: config.NewDatalessDaemonDataPathMap(),
}
if err := m.generateKeyring(daemonConf); err != nil {
return fmt.Errorf("failed to generate keyring for %s. %+v", resourceName, err)
}
// Start the deployment
deployment := m.makeDeployment(resourceName, daemonName)
deployment := m.makeDeployment(daemonConf)
if _, err := m.context.Clientset.Apps().Deployments(m.Namespace).Create(deployment); err != nil {
if !errors.IsAlreadyExists(err) {
return fmt.Errorf("failed to create %s deployment. %+v", resourceName, err)
/* TODO: update rbd mirrors if already exist */
}
logger.Infof("%s deployment already exists", resourceName)
} else {
......@@ -139,7 +154,3 @@ func (m *Mirroring) removeExtraMirrors() error {
}
return nil
}
func fullDaemonName(daemonName string) string {
return fmt.Sprintf("client.rbd-mirror.%s", daemonName)
}
......@@ -22,6 +22,7 @@ import (
cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
rookalpha "github.com/rook/rook/pkg/apis/rook.io/v1alpha2"
"github.com/rook/rook/pkg/clusterd"
cephconfig "github.com/rook/rook/pkg/daemon/ceph/config"
testop "github.com/rook/rook/pkg/operator/test"
exectest "github.com/rook/rook/pkg/util/exec/test"
"github.com/stretchr/testify/assert"
......@@ -43,7 +44,9 @@ func TestRBDMirror(t *testing.T) {
return "", nil
}
c := New(&clusterd.Context{Clientset: clientset, Executor: executor},
c := New(
&cephconfig.ClusterInfo{FSID: "myfsid"},
&clusterd.Context{Clientset: clientset, Executor: executor},
"ns",
"rook/rook:myversion",
cephv1.CephVersionSpec{Image: "ceph/ceph:myceph"},
......
......@@ -14,11 +14,10 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Package rbd for mirroring
package rbd
import (
opmon "github.com/rook/rook/pkg/operator/ceph/cluster/mon"
"github.com/rook/rook/pkg/operator/ceph/config"
opspec "github.com/rook/rook/pkg/operator/ceph/spec"
"github.com/rook/rook/pkg/operator/k8sutil"
apps "k8s.io/api/apps/v1"
......@@ -26,21 +25,19 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func (m *Mirroring) makeDeployment(resourceName, daemonName string) *apps.Deployment {
func (m *Mirroring) makeDeployment(daemonConfig *daemonConfig) *apps.Deployment {
podSpec := v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Name: resourceName,
Labels: opspec.PodLabels(appName, m.Namespace, "rbdmirror", daemonName),
Name: daemonConfig.ResourceName,
Labels: opspec.PodLabels(appName, m.Namespace, string(config.RbdMirrorType), daemonConfig.DaemonID),
},
Spec: v1.PodSpec{
InitContainers: []v1.Container{
m.makeConfigInitContainer(resourceName, daemonName),
},
InitContainers: []v1.Container{},
Containers: []v1.Container{
m.makeMirroringDaemonContainer(daemonName),
m.makeMirroringDaemonContainer(daemonConfig),
},
RestartPolicy: v1.RestartPolicyAlways,
Volumes: opspec.PodVolumes(""),
Volumes: opspec.DaemonVolumes(daemonConfig.DataPathMap, daemonConfig.ResourceName),
HostNetwork: m.hostNetwork,
},
}
......@@ -52,8 +49,9 @@ func (m *Mirroring) makeDeployment(resourceName, daemonName string) *apps.Deploy
replicas := int32(1)
d := &apps.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: resourceName,
Name: daemonConfig.ResourceName,
Namespace: m.Namespace,
Labels: opspec.PodLabels(appName, m.Namespace, string(config.RbdMirrorType), daemonConfig.DaemonID),
},
Spec: apps.DeploymentSpec{
Selector: &metav1.LabelSelector{
......@@ -67,47 +65,20 @@ func (m *Mirroring) makeDeployment(resourceName, daemonName string) *apps.Deploy
return d
}
func (m *Mirroring) makeConfigInitContainer(resourceName, daemonName string) v1.Container {
return v1.Container{
Name: opspec.ConfigInitContainerName,
Args: []string{
"ceph",
"config-init",
},
Image: k8sutil.MakeRookImage(m.rookVersion),
Env: []v1.EnvVar{
{Name: "ROOK_USERNAME", Value: fullDaemonName(daemonName)},
{Name: "ROOK_KEYRING",
ValueFrom: &v1.EnvVarSource{
SecretKeyRef: &v1.SecretKeySelector{
LocalObjectReference: v1.LocalObjectReference{Name: resourceName},
Key: opspec.KeyringSecretKeyName,
}}},
k8sutil.PodIPEnvVar(k8sutil.PrivateIPEnvVar),
k8sutil.PodIPEnvVar(k8sutil.PublicIPEnvVar),
opmon.EndpointEnvVar(),
k8sutil.ConfigOverrideEnvVar(),
},
VolumeMounts: opspec.RookVolumeMounts(),
Resources: m.resources,
}
}
func (m *Mirroring) makeMirroringDaemonContainer(daemonName string) v1.Container {
func (m *Mirroring) makeMirroringDaemonContainer(daemonConfig *daemonConfig) v1.Container {
container := v1.Container{
Name: "rbdmirror",
Name: "rbd-mirror",
Command: []string{
"rbd-mirror",
},
Args: []string{
Args: append(
opspec.DaemonFlags(m.ClusterInfo, daemonConfig.DaemonID),
"--foreground",
"-n", fullDaemonName(daemonName),
"--conf", "/etc/ceph/ceph.conf",
"--keyring", "/etc/ceph/keyring",
},
"--name="+fullDaemonName(daemonConfig.DaemonID),
),
Image: m.cephVersion.Image,
VolumeMounts: opspec.CephVolumeMounts(),
Env: k8sutil.ClusterDaemonEnvVars(m.cephVersion.Image),
VolumeMounts: opspec.DaemonVolumeMounts(daemonConfig.DataPathMap, daemonConfig.ResourceName),
Env: opspec.DaemonEnvVars(m.cephVersion.Image),
Resources: m.resources,
}
return container
......
......@@ -22,34 +22,50 @@ import (
cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
rookalpha "github.com/rook/rook/pkg/apis/rook.io/v1alpha2"
"github.com/rook/rook/pkg/clusterd"
cephconfig "github.com/rook/rook/pkg/daemon/ceph/config"
"github.com/rook/rook/pkg/operator/ceph/config"
cephtest "github.com/rook/rook/pkg/operator/ceph/test"
optest "github.com/rook/rook/pkg/operator/test"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func TestPodSpec(t *testing.T) {
c := New(&clusterd.Context{},
c := New(
&cephconfig.ClusterInfo{FSID: "myfsid"},
&clusterd.Context{Clientset: optest.New(1)},
"ns",
"rook/rook:myversion",
cephv1.CephVersionSpec{Image: "ceph/ceph:myceph"},
rookalpha.Placement{},
false,
cephv1.RBDMirroringSpec{Workers: 2},
v1.ResourceRequirements{},
v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: *resource.NewQuantity(100.0, resource.BinarySI),
},
Requests: v1.ResourceList{
v1.ResourceMemory: *resource.NewQuantity(1337.0, resource.BinarySI),
},
},
metav1.OwnerReference{},
)
daemonConf := daemonConfig{
DaemonID: "a",
ResourceName: "rook-ceph-rbd-mirror-a",
DataPathMap: config.NewDatalessDaemonDataPathMap(),
}
d := c.makeDeployment("rname", "dname")
assert.Equal(t, "rname", d.Name)
spec := d.Spec.Template.Spec
require.Equal(t, 1, len(spec.Containers))
assert.Equal(t, 1, len(spec.InitContainers))
assert.Equal(t, 3, len(spec.Volumes))
cont := spec.Containers[0]
assert.Equal(t, "rbd-mirror", cont.Command[0])
assert.Equal(t, 7, len(cont.Args))
assert.Equal(t, "--foreground", cont.Args[0])
assert.Equal(t, "-n", cont.Args[1])
assert.Equal(t, "client.rbd-mirror.dname", cont.Args[2])
d := c.makeDeployment(&daemonConf)
assert.Equal(t, "rook-ceph-rbd-mirror-a", d.Name)
// Deployment should have Ceph labels
cephtest.AssertLabelsContainCephRequirements(t, d.ObjectMeta.Labels,
config.RbdMirrorType, "a", appName, "ns")
podTemplate := cephtest.NewPodTemplateSpecTester(t, &d.Spec.Template)
podTemplate.RunFullSuite(config.RbdMirrorType, "a", appName, "ns", "ceph/ceph:myceph",
"100", "1337" /* resources */)
}
......@@ -49,6 +49,8 @@ const (
// RgwType defines the rgw DaemonType
RgwType DaemonType = "rgw"
RbdMirrorType DaemonType = "rbd-mirror"
)
// VarLibCephDir is simply "/var/lib/ceph". It is made overwriteable only for unit tests where it
......
......@@ -32,6 +32,9 @@ type DataPathMap struct {
// daemon's data is stored.
HostDataDir string
// If NoData is true, the daemon has no data to store.
NoData bool
// ContainerDataDir should be set to the path in the container where the specific daemon's data
// is stored.
ContainerDataDir string
......@@ -49,6 +52,7 @@ func NewStatefulDaemonDataPathMap(
return &DataPathMap{
PersistData: true,
HostDataDir: path.Join(dataDirHostPath, daemonDataDirHostRelativePath),
NoData: false,
ContainerDataDir: cephDataDir(daemonType, daemonID),
}
}
......@@ -61,10 +65,20 @@ func NewStatelessDaemonDataPathMap(
return &DataPathMap{
PersistData: false,
HostDataDir: "",
NoData: false,
ContainerDataDir: cephDataDir(daemonType, daemonID),
}
}
func NewDatalessDaemonDataPathMap() *DataPathMap {
return &DataPathMap{
PersistData: false,
HostDataDir: "",
NoData: true,
ContainerDataDir: "",
}
}
func cephDataDir(daemonType DaemonType, daemonID string) string {
// daemons' default data dirs are: /var/lib/ceph/<daemon-type>/ceph-<daemon-id>
return path.Join(VarLibCephDir, string(daemonType), "ceph-"+daemonID)
......
......@@ -80,7 +80,7 @@ func (c *Cluster) makeDeployment(mdsConfig *mdsConfig) *apps.Deployment {
func (c *Cluster) makeMdsDaemonContainer(mdsConfig *mdsConfig) v1.Container {
args := append(
opspec.DaemonFlags(c.clusterInfo, config.MdsType, mdsConfig.DaemonID),
opspec.DaemonFlags(c.clusterInfo, mdsConfig.DaemonID),
"--foreground",
)
......
......@@ -159,7 +159,7 @@ func (c *clusterConfig) makeDaemonContainer() v1.Container {
},
Args: append(
append(
opspec.DaemonFlags(c.clusterInfo, cephconfig.RgwType, c.store.Name),
opspec.DaemonFlags(c.clusterInfo, c.store.Name),
"--foreground",
"--name=client.radosgw.gateway",
cephconfig.NewFlag("host", opspec.ContainerEnvVarReference("POD_NAME")),
......
/*
Copyright 2018 The Rook Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package spec provides Kubernetes controller/pod/container spec items used for many Ceph daemons
package spec
import (
"fmt"
"github.com/rook/rook/pkg/clusterd"
"github.com/rook/rook/pkg/daemon/ceph/client"
"github.com/rook/rook/pkg/operator/k8sutil"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
KeyringSecretKeyName = "keyring"
)
type KeyringConfig struct {
Namespace string
ResourceName string
DaemonName string
OwnerRef metav1.OwnerReference
Username string
Access []string
}
func CreateKeyring(context *clusterd.Context, config KeyringConfig) error {
_, err := context.Clientset.CoreV1().Secrets(config.Namespace).Get(config.ResourceName, metav1.GetOptions{})
if err == nil {
logger.Infof("the keyring %s was already generated", config.ResourceName)
return nil
}
if !errors.IsNotFound(err) {
return fmt.Errorf("failed to get secret %s. %+v", config.ResourceName, err)
}
// get-or-create-key for the user account
keyring, err := client.AuthGetOrCreateKey(context, config.Namespace, config.Username, config.Access)
if err != nil {
return fmt.Errorf("failed to get or create auth key for %s. %+v", config.Username, err)
}
// Store the keyring in a secret
secrets := map[string]string{
KeyringSecretKeyName: keyring,
}
secret := &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: config.ResourceName,
Namespace: config.Namespace,
},
StringData: secrets,
Type: k8sutil.RookType,
}
k8sutil.SetOwnerRef(context.Clientset, config.Namespace, &secret.ObjectMeta, &config.OwnerRef)
_, err = context.Clientset.CoreV1().Secrets(config.Namespace).Create(secret)
if err != nil {
return fmt.Errorf("failed to save mirroring secret. %+v", err)
}
return nil
}
......@@ -68,31 +68,37 @@ func RookVolumeMounts() []v1.VolumeMount {
// DaemonVolumes returns the pod volumes used by all Ceph daemons.
func DaemonVolumes(dataPaths *config.DataPathMap, keyringResourceName string) []v1.Volume {
var dataDirSource v1.VolumeSource
if dataPaths.PersistData {
dataDirSource = v1.VolumeSource{HostPath: &v1.HostPathVolumeSource{Path: dataPaths.HostDataDir}}
} else {
dataDirSource = v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}
}
return []v1.Volume{
{Name: "ceph-daemon-data", VolumeSource: dataDirSource},
vols := []v1.Volume{
config.StoredFileVolume(),
keyring.Volume().Resource(keyringResourceName),
}
if dataPaths.NoData {
return vols
}
src := v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}
if dataPaths.PersistData {
src = v1.VolumeSource{HostPath: &v1.HostPathVolumeSource{Path: dataPaths.HostDataDir}}
}
return append(vols, v1.Volume{Name: "ceph-daemon-data", VolumeSource: src})
}
// DaemonVolumeMounts returns volume mounts which correspond to the DaemonVolumes. These
// volume mounts are shared by most all Ceph daemon containers, both init and standard.
func DaemonVolumeMounts(dataPaths *config.DataPathMap, keyringResourceName string) []v1.VolumeMount {
return []v1.VolumeMount{
{Name: "ceph-daemon-data", MountPath: dataPaths.ContainerDataDir},
mounts := []v1.VolumeMount{
config.StoredFileVolumeMount(),
keyring.VolumeMount().Resource(keyringResourceName),
}
if dataPaths.NoData {
return mounts
}
return append(mounts,
v1.VolumeMount{Name: "ceph-daemon-data", MountPath: dataPaths.ContainerDataDir},
)
}
// DaemonFlags returns the command line flags used by all Ceph daemons.
func DaemonFlags(cluster *cephconfig.ClusterInfo, daemonType config.DaemonType, daemonID string) []string {
func DaemonFlags(cluster *cephconfig.ClusterInfo, daemonID string) []string {
return append(
config.DefaultFlags(cluster.FSID, keyring.VolumeMount().KeyringFilePath()),
config.NewFlag("id", daemonID),
......
......@@ -71,7 +71,10 @@ func (ps *PodSpecTester) AssertVolumesMeetCephRequirements(
if daemonType == config.MonType {
keyringSecretName = "rook-ceph-mons-keyring" // mons share a keyring
}
requiredVols := []string{"ceph-daemon-data", "rook-ceph-config", keyringSecretName}
requiredVols := []string{"rook-ceph-config", keyringSecretName}
if daemonType != config.RbdMirrorType {
requiredVols = append(requiredVols, "ceph-daemon-data")
}
vols := []string{}
for _, v := range ps.spec.Volumes {
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment