Unverified Commit 5f01178e authored by travisn's avatar travisn
Browse files

mon: set names based on chars instead of integers

Signed-off-by: default avatartravisn <tnielsen@redhat.com>
parent 7121418c
Showing with 248 additions and 119 deletions
+248 -119
......@@ -12,6 +12,7 @@
- The minimum version of Kubernetes supported by Rook changed from `1.7` to `1.8`.
## Breaking Changes
- Mons are [named consistently](https://github.com/rook/rook/issues/1751) with other daemons with the letters a, b, c, etc.
- The Rook container images are no longer published to quay.io, they are published only to Docker Hub. All manifests have referenced Docker Hub for multiple releases now, so we do not expect any directly affected users from this change.
- Rook no longer supports kubernetes `1.7`. Users running Kubernetes `1.7` on their clusters are recommended to upgrade to Kubernetes `1.8` or higher. If you are using `kubeadm`, you can follow this [guide](https://kubernetes.io/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade-1-8/) to from Kubernetes `1.7` to `1.8`. If you are using `kops` or `kubespray` for managing your Kubernetes cluster, just follow the respective projects' `upgrade` guide.
......
......@@ -66,7 +66,7 @@ func startMon(cmd *cobra.Command, args []string) error {
return fmt.Errorf("missing mon port")
}
if err := compareMonSecret(clusterInfo.MonitorSecret, path.Join(cfg.dataDir, monName)); err != nil {
if err := compareMonSecret(clusterInfo.MonitorSecret, mon.GetMonRunDirPath(cfg.dataDir, monName)); err != nil {
rook.TerminateFatal(err)
}
......
......@@ -28,7 +28,13 @@ KEYRING_FILE="/etc/ceph/keyring"
# without specifying any arguments
write_endpoints() {
endpoints=$(cat ${MON_CONFIG})
mon_endpoints=$(echo ${endpoints} | sed 's/rook-ceph-mon[0-9]\+=//g')
# filter out the mon names
mon_endpoints=$(echo ${endpoints} | sed 's/[a-z]\+=//g')
# filter out the legacy mon names
mon_endpoints=$(echo ${mon_endpoints} | sed 's/rook-ceph-mon[0-9]\+=//g')
DATE=$(date)
echo "$DATE writing mon endpoints to ${CEPH_CONFIG}: ${endpoints}"
cat <<EOF > ${CEPH_CONFIG}
......
......@@ -26,7 +26,7 @@ func MonInQuorumResponse() string {
resp := client.MonStatusResponse{Quorum: []int{0}}
resp.MonMap.Mons = []client.MonMapEntry{
{
Name: "rook-ceph-mon1",
Name: "rook-ceph-mon-a",
Rank: 0,
Address: "1.2.3.1",
},
......
......@@ -99,18 +99,22 @@ type GlobalConfig struct {
}
// get the path of a given monitor's run dir
func getMonRunDirPath(configDir, monName string) string {
func GetMonRunDirPath(configDir, monName string) string {
if strings.Index(monName, "mon") == -1 {
// if the mon name doesn't have "mon" in it, include it in the directory
return path.Join(configDir, "mon-"+monName)
}
return path.Join(configDir, monName)
}
// get the path of a given monitor's keyring
func getMonKeyringPath(configDir, monName string) string {
return filepath.Join(getMonRunDirPath(configDir, monName), defaultKeyringFile)
return filepath.Join(GetMonRunDirPath(configDir, monName), defaultKeyringFile)
}
// get the path of a given monitor's data dir
func getMonDataDirPath(configDir, monName string) string {
return filepath.Join(getMonRunDirPath(configDir, monName), "data")
return filepath.Join(GetMonRunDirPath(configDir, monName), "data")
}
// get the path of a given monitor's config file
......@@ -190,7 +194,7 @@ func GenerateConfigFile(context *clusterd.Context, cluster *ClusterInfo, pathRoo
globalConfig *cephConfig, clientSettings map[string]string) (string, error) {
if pathRoot == "" {
pathRoot = getMonRunDirPath(context.ConfigDir, getFirstMonitor(cluster))
pathRoot = GetMonRunDirPath(context.ConfigDir, getFirstMonitor(cluster))
}
// create the config directory
......
......@@ -89,7 +89,7 @@ func generateConfigFiles(context *clusterd.Context, config *Config) (string, str
}
// write the config file to disk
confFilePath, err := GenerateConnectionConfigFile(context, config.Cluster, getMonRunDirPath(context.ConfigDir, config.Name),
confFilePath, err := GenerateConnectionConfigFile(context, config.Cluster, GetMonRunDirPath(context.ConfigDir, config.Name),
"admin", getMonKeyringPath(context.ConfigDir, config.Name))
if err != nil {
return "", "", err
......@@ -114,7 +114,7 @@ func startMon(context *clusterd.Context, config *Config, confFilePath, monDataDi
logger.Infof("initializing mon")
// generate the monmap
monmapPath, err := generateMonMap(context, config.Cluster, getMonRunDirPath(context.ConfigDir, config.Name))
monmapPath, err := generateMonMap(context, config.Cluster, GetMonRunDirPath(context.ConfigDir, config.Name))
if err != nil {
return err
}
......
......@@ -233,8 +233,8 @@ func (c *Cluster) failoverMon(name string) error {
logger.Infof("Failing over monitor %s", name)
// Start a new monitor
m := &monConfig{Name: fmt.Sprintf("%s%d", appName, c.maxMonID+1), Port: int32(mon.DefaultPort)}
logger.Infof("starting new mon %s", m.Name)
m := newMonConfig(c.maxMonID + 1)
logger.Infof("starting new mon: %+v", m)
// Create the service endpoint
serviceIP, err := c.createService(m)
......@@ -258,7 +258,7 @@ func (c *Cluster) failoverMon(name string) error {
} else {
m.PublicIP = serviceIP
}
c.clusterInfo.Monitors[m.Name] = mon.ToCephMon(m.Name, m.PublicIP, m.Port)
c.clusterInfo.Monitors[m.Name] = mon.ToCephMon(m.DaemonName, m.PublicIP, m.Port)
// Start the pod
if err = c.startPods(mConf); err != nil {
......
......@@ -55,30 +55,30 @@ func TestCheckHealth(t *testing.T) {
c := New(context, "ns", "", "myversion", cephv1beta1.MonSpec{Count: 3, AllowMultiplePerNode: true},
rookalpha.Placement{}, false, v1.ResourceRequirements{}, metav1.OwnerReference{})
c.clusterInfo = test.CreateConfigDir(1)
logger.Infof("initial mons: %v", c.clusterInfo.Monitors)
c.waitForStart = false
defer os.RemoveAll(c.context.ConfigDir)
c.mapping.Node["rook-ceph-mon1"] = &NodeInfo{
c.mapping.Node["rook-ceph-mon-f"] = &NodeInfo{
Name: "node0",
Address: "",
}
c.mapping.Port["node0"] = cephmon.DefaultPort
c.maxMonID = 10
c.maxMonID = 4
err := c.checkHealth()
assert.Nil(t, err)
logger.Infof("mons after checkHealth: %v", c.clusterInfo.Monitors)
err = c.failoverMon("rook-ceph-mon1")
err = c.failoverMon("f")
assert.Nil(t, err)
newMons := []string{
"rook-ceph-mon11",
"rook-ceph-mon12",
"rook-ceph-mon13",
"rook-ceph-mon-g",
}
for _, monName := range newMons {
_, ok := c.clusterInfo.Monitors[monName]
assert.True(t, ok, fmt.Sprintf("mon %s not found in monitor list", monName))
assert.True(t, ok, fmt.Sprintf("mon %s not found in monitor list. %v", monName, c.clusterInfo.Monitors))
}
}
......@@ -102,39 +102,34 @@ func TestCheckHealthNotFound(t *testing.T) {
c.waitForStart = false
defer os.RemoveAll(c.context.ConfigDir)
c.mapping.Node["rook-ceph-mon1"] = &NodeInfo{
Name: "node0",
Address: "",
c.mapping.Node["rook-ceph-mon-a"] = &NodeInfo{
Name: "node0",
}
c.mapping.Node["rook-ceph-mon2"] = &NodeInfo{
Name: "node0",
Address: "",
c.mapping.Node["rook-ceph-mon-b"] = &NodeInfo{
Name: "node0",
}
c.mapping.Port["node0"] = cephmon.DefaultPort
c.maxMonID = 10
c.maxMonID = 4
c.saveMonConfig()
// Check if the two mons are found in the configmap
cm, err := c.context.Clientset.CoreV1().ConfigMaps(c.Namespace).Get(EndpointConfigMapName, metav1.GetOptions{})
assert.Nil(t, err)
if cm.Data[EndpointDataKey] == "rook-ceph-mon1=1.2.3.1:6790,rook-ceph-mon2=1.2.3.2:6790" {
assert.Equal(t, "rook-ceph-mon1=1.2.3.1:6790,rook-ceph-mon2=1.2.3.2:6790", cm.Data[EndpointDataKey])
} else {
assert.Equal(t, "rook-ceph-mon2=1.2.3.2:6790,rook-ceph-mon1=1.2.3.1:6790", cm.Data[EndpointDataKey])
if cm.Data[EndpointDataKey] != "a=1.2.3.1:6790,b=1.2.3.2:6790" {
assert.Equal(t, "b=1.2.3.2:6790,a=1.2.3.1:6790", cm.Data[EndpointDataKey])
}
// Because mon2 isn't in the MonInQuorumResponse() this will create a mon11
// Because the mon a isn't in the MonInQuorumResponse() this will create a new mon
delete(c.mapping.Node, "rook-ceph-mon-a")
err = c.checkHealth()
assert.Nil(t, err)
// recheck that the "not found" mon has been replaced with a new one
cm, err = c.context.Clientset.CoreV1().ConfigMaps(c.Namespace).Get(EndpointConfigMapName, metav1.GetOptions{})
assert.Nil(t, err)
if cm.Data[EndpointDataKey] == "rook-ceph-mon1=1.2.3.1:6790,rook-ceph-mon11=:6790" {
assert.Equal(t, "rook-ceph-mon1=1.2.3.1:6790,rook-ceph-mon11=:6790", cm.Data[EndpointDataKey])
} else {
assert.Equal(t, "rook-ceph-mon11=:6790,rook-ceph-mon1=1.2.3.1:6790", cm.Data[EndpointDataKey])
if cm.Data[EndpointDataKey] != "b=1.2.3.2:6790,f=:6790" {
assert.Equal(t, "f=:6790,b=1.2.3.2:6790", cm.Data[EndpointDataKey])
}
}
......@@ -146,12 +141,12 @@ func TestCheckHealthTwoMonsOneNode(t *testing.T) {
resp := client.MonStatusResponse{Quorum: []int{0}}
resp.MonMap.Mons = []client.MonMapEntry{
{
Name: "rook-ceph-mon1",
Name: "a",
Rank: 0,
Address: "1.2.3.4",
},
{
Name: "rook-ceph-mon3",
Name: "c",
Rank: 0,
Address: "1.2.3.4",
},
......@@ -177,24 +172,27 @@ func TestCheckHealthTwoMonsOneNode(t *testing.T) {
defer os.RemoveAll(c.context.ConfigDir)
// add two mons to the mapping on node0
c.mapping.Node["rook-ceph-mon1"] = &NodeInfo{
c.mapping.Node["a"] = &NodeInfo{
Name: "node0",
Hostname: "mynode0",
Address: "0.0.0.0",
}
c.mapping.Node["rook-ceph-mon2"] = &NodeInfo{
c.mapping.Node["b"] = &NodeInfo{
Name: "node0",
Hostname: "mynode0",
Address: "0.0.0.0",
}
c.maxMonID = 2
c.maxMonID = 1
c.saveMonConfig()
for i := 1; i <= 2; i++ {
rs := c.makeReplicaSet(&monConfig{Name: fmt.Sprintf("mon%d", i)}, "node0")
monNames := []string{"a", "b"}
for i := 0; i < len(monNames); i++ {
prefix := appName + "-"
name := monNames[i]
rs := c.makeReplicaSet(&monConfig{Name: prefix + name, DaemonName: name}, "node0")
_, err := clientset.ExtensionsV1beta1().ReplicaSets(c.Namespace).Create(rs)
assert.Nil(t, err)
po := c.makeMonPod(&monConfig{Name: fmt.Sprintf("mon%d", i)}, "node0")
po := c.makeMonPod(&monConfig{Name: prefix + name, DaemonName: name}, "node0")
_, err = clientset.CoreV1().Pods(c.Namespace).Create(po)
assert.Nil(t, err)
}
......@@ -202,10 +200,10 @@ func TestCheckHealthTwoMonsOneNode(t *testing.T) {
// initial health check should already see that there is more than one mon on one node (node1)
_, err := c.checkMonsOnSameNode()
assert.Nil(t, err)
assert.Equal(t, "node0", c.mapping.Node["rook-ceph-mon1"].Name)
assert.Equal(t, "node0", c.mapping.Node["rook-ceph-mon2"].Name)
assert.Equal(t, "mynode0", c.mapping.Node["rook-ceph-mon1"].Hostname)
assert.Equal(t, "mynode0", c.mapping.Node["rook-ceph-mon2"].Hostname)
assert.Equal(t, "node0", c.mapping.Node["a"].Name)
assert.Equal(t, "node0", c.mapping.Node["b"].Name)
assert.Equal(t, "mynode0", c.mapping.Node["a"].Hostname)
assert.Equal(t, "mynode0", c.mapping.Node["b"].Hostname)
// add new node and check if the second mon gets failovered to it
n := &v1.Node{
......@@ -229,9 +227,10 @@ func TestCheckHealthTwoMonsOneNode(t *testing.T) {
_, err = c.checkMonsOnSameNode()
assert.Nil(t, err)
// check that mon rook-ceph-mon3 exists
assert.NotNil(t, c.mapping.Node["rook-ceph-mon3"])
assert.Equal(t, "node2", c.mapping.Node["rook-ceph-mon3"].Name)
// check that mon rook-ceph-mon-c exists
logger.Infof("mapping: %+v", c.mapping.Node)
assert.NotNil(t, c.mapping.Node["rook-ceph-mon-c"])
assert.Equal(t, "node2", c.mapping.Node["rook-ceph-mon-c"].Name)
// check if mon2 has been deleted
var rsses *v1beta1.ReplicaSetList
......@@ -240,13 +239,13 @@ func TestCheckHealthTwoMonsOneNode(t *testing.T) {
deleted := false
for _, rs := range rsses.Items {
if rs.Name == "rook-ceph-mon1" || rs.Name == "rook-ceph-mon3" {
if rs.Name == "rook-ceph-mon-a" || rs.Name == "rook-ceph-mon-c" {
deleted = true
} else {
deleted = false
}
}
assert.Equal(t, true, deleted, "rook-ceph-mon2 not failovered/deleted after health check")
assert.Equal(t, true, deleted, "rook-ceph-mon-b not failovered/deleted after health check")
// enable different ceph mon map output
executorNextMons = true
......@@ -260,7 +259,7 @@ func TestCheckHealthTwoMonsOneNode(t *testing.T) {
for _, rs := range rsses.Items {
// both mons should always be on the same node as in this test due to the order
//the mons are processed in the loop
if (rs.Name == "rook-ceph-mon1" && rs.Spec.Template.Spec.NodeSelector[apis.LabelHostname] == "node1") || (rs.Name != "rook-ceph-mon3" && rs.Spec.Template.Spec.NodeSelector[apis.LabelHostname] == "node2") {
if (rs.Name == "rook-ceph-mon-a" && rs.Spec.Template.Spec.NodeSelector[apis.LabelHostname] == "node1") || (rs.Name != "rook-ceph-mon-c" && rs.Spec.Template.Spec.NodeSelector[apis.LabelHostname] == "node2") {
assert.Fail(t, fmt.Sprintf("mon %s shouldn't exist", rs.Name))
}
}
......@@ -287,14 +286,15 @@ func TestCheckMonsValid(t *testing.T) {
defer os.RemoveAll(c.context.ConfigDir)
// add two mons to the mapping on node0
c.mapping.Node["rook-ceph-mon1"] = &NodeInfo{
c.mapping.Node["rook-ceph-mon-a"] = &NodeInfo{
Name: "node0",
Address: "0.0.0.0",
}
c.mapping.Node["rook-ceph-mon2"] = &NodeInfo{
c.mapping.Node["rook-ceph-mon-b"] = &NodeInfo{
Name: "node1",
Address: "0.0.0.0",
}
c.maxMonID = 1
// add three nodes
for i := 0; i < 3; i++ {
......@@ -319,8 +319,8 @@ func TestCheckMonsValid(t *testing.T) {
_, err := c.checkMonsOnValidNodes()
assert.Nil(t, err)
assert.Equal(t, "node0", c.mapping.Node["rook-ceph-mon1"].Name)
assert.Equal(t, "node1", c.mapping.Node["rook-ceph-mon2"].Name)
assert.Equal(t, "node0", c.mapping.Node["rook-ceph-mon-a"].Name)
assert.Equal(t, "node1", c.mapping.Node["rook-ceph-mon-b"].Name)
// set node1 unschedulable and check that mon2 gets failovered to be mon3 to node2
node0, err := c.context.Clientset.CoreV1().Nodes().Get("node0", metav1.GetOptions{})
......@@ -331,7 +331,8 @@ func TestCheckMonsValid(t *testing.T) {
// add the pods so the getNodesInUse() works correctly
for i := 1; i <= 2; i++ {
po := c.makeMonPod(&monConfig{Name: fmt.Sprintf("mon%d", i)}, fmt.Sprintf("node%d", i-1))
name := fmt.Sprintf("mon%d", i)
po := c.makeMonPod(&monConfig{Name: name, DaemonName: name}, fmt.Sprintf("node%d", i-1))
_, err = clientset.CoreV1().Pods(c.Namespace).Create(po)
assert.Nil(t, err)
}
......@@ -340,11 +341,12 @@ func TestCheckMonsValid(t *testing.T) {
assert.Nil(t, err)
assert.Len(t, c.mapping.Node, 2)
assert.Nil(t, c.mapping.Node["rook-ceph-mon1"])
logger.Infof("mapping: %+v", c.mapping)
assert.Nil(t, c.mapping.Node["rook-ceph-mon-a"])
// the new mon should always be on the empty node2
// the failovered mon's name is "rook-ceph-mon0"
assert.Equal(t, "node2", c.mapping.Node["rook-ceph-mon0"].Name)
assert.Equal(t, "node1", c.mapping.Node["rook-ceph-mon2"].Name)
// the failovered mon's name is "rook-ceph-mon-a"
assert.Equal(t, "node2", c.mapping.Node["rook-ceph-mon-c"].Name)
assert.Equal(t, "node1", c.mapping.Node["rook-ceph-mon-b"].Name)
}
func TestCheckLessMonsStartNewMons(t *testing.T) {
......@@ -363,12 +365,12 @@ func TestCheckLessMonsStartNewMons(t *testing.T) {
}
c := New(context, "ns", "", "myversion", cephv1beta1.MonSpec{Count: 5, AllowMultiplePerNode: true},
rookalpha.Placement{}, false, v1.ResourceRequirements{}, metav1.OwnerReference{})
c.maxMonID = 1
c.clusterInfo = test.CreateConfigDir(1)
c.maxMonID = 0
c.clusterInfo = test.CreateConfigDir(0)
c.waitForStart = false
defer os.RemoveAll(c.context.ConfigDir)
err := c.checkHealth()
assert.Nil(t, err)
assert.Equal(t, 5, len(c.clusterInfo.Monitors))
assert.Equal(t, 5, len(c.clusterInfo.Monitors), fmt.Sprintf("mons: %v", c.clusterInfo.Monitors))
}
......@@ -21,6 +21,7 @@ import (
"encoding/json"
"fmt"
"strconv"
"strings"
"time"
"github.com/coreos/pkg/capnslog"
......@@ -71,7 +72,6 @@ type Cluster struct {
Namespace string
Keyring string
Version string
MasterHost string
Size int
AllowMultiplePerNode bool
Port int32
......@@ -91,9 +91,10 @@ type Cluster struct {
// monConfig for a single monitor
type monConfig struct {
Name string
PublicIP string
Port int32
Name string
DaemonName string
PublicIP string
Port int32
}
// Mapping mon node and port mapping
......@@ -216,18 +217,33 @@ func (c *Cluster) initMonConfig(size int) []*monConfig {
// initialize the mon pod info for mons that have been previously created
for _, monitor := range c.clusterInfo.Monitors {
mons = append(mons, &monConfig{Name: monitor.Name, Port: int32(mon.DefaultPort)})
mons = append(mons, &monConfig{Name: monitor.Name, DaemonName: daemonName(monitor.Name), Port: int32(mon.DefaultPort)})
}
// initialize mon info if we don't have enough mons (at first startup)
for i := len(c.clusterInfo.Monitors); i < size; i++ {
c.maxMonID++
mons = append(mons, &monConfig{Name: fmt.Sprintf("%s%d", appName, c.maxMonID), Port: int32(mon.DefaultPort)})
mons = append(mons, newMonConfig(c.maxMonID))
}
return mons
}
func newMonConfig(monID int) *monConfig {
daemonName := indexToName(monID)
return &monConfig{Name: fmt.Sprintf("%s-%s", appName, daemonName), DaemonName: daemonName, Port: int32(mon.DefaultPort)}
}
// Extract the daemon name from the full deployment name.
func daemonName(fullName string) string {
prefix := appName + "-"
if strings.HasPrefix(fullName, prefix) {
return fullName[len(prefix):]
}
// If the deployment name did not have the prefix, we have a legacy daemon name that was in the form rook-ceph-mon0 and should not change
return fullName
}
func (c *Cluster) initMonIPs(mons []*monConfig) error {
for _, m := range mons {
if c.HostNetwork {
......@@ -244,14 +260,14 @@ func (c *Cluster) initMonIPs(mons []*monConfig) error {
}
m.PublicIP = serviceIP
}
c.clusterInfo.Monitors[m.Name] = mon.ToCephMon(m.Name, m.PublicIP, m.Port)
c.clusterInfo.Monitors[m.DaemonName] = mon.ToCephMon(m.DaemonName, m.PublicIP, m.Port)
}
return nil
}
func (c *Cluster) createService(mon *monConfig) (string, error) {
labels := c.getLabels(mon.Name)
labels := c.getLabels(mon.DaemonName)
s := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: mon.Name,
......@@ -290,7 +306,7 @@ func (c *Cluster) createService(mon *monConfig) (string, error) {
return "", nil
}
logger.Infof("mon %s running at %s:%d", mon.Name, s.Spec.ClusterIP, mon.Port)
logger.Infof("mon %s running at %s:%d", mon.DaemonName, s.Spec.ClusterIP, mon.Port)
return s.Spec.ClusterIP, nil
}
......@@ -378,7 +394,7 @@ func (c *Cluster) waitForMonsToJoin(mons []*monConfig) error {
starting := []string{}
for _, m := range mons {
starting = append(starting, m.Name)
starting = append(starting, m.DaemonName)
}
// wait for the monitors to join quorum
......@@ -535,7 +551,7 @@ func (c *Cluster) startMon(m *monConfig, hostname string) error {
}
func waitForQuorumWithMons(context *clusterd.Context, clusterName string, mons []string) error {
logger.Infof("waiting for mon quorum")
logger.Infof("waiting for mon quorum with %v", mons)
// wait for monitors to establish quorum
retryCount := 0
......
......@@ -84,6 +84,14 @@ func newCluster(context *clusterd.Context, namespace string, hostNetwork bool, r
}
}
func TestExtractDaemonName(t *testing.T) {
assert.Equal(t, "a", daemonName("rook-ceph-mon-a"))
assert.Equal(t, "b", daemonName("rook-ceph-mon-b"))
assert.Equal(t, "zz", daemonName("rook-ceph-mon-zz"))
assert.Equal(t, "rook-ceph-mon0", daemonName("rook-ceph-mon0"))
assert.Equal(t, "rook-ceph-mon123", daemonName("rook-ceph-mon123"))
}
func TestStartMonPods(t *testing.T) {
namespace := "ns"
context := newTestStartCluster(namespace)
......@@ -151,7 +159,7 @@ func validateStart(t *testing.T, c *Cluster) {
assert.Equal(t, 4, len(s.StringData))
// there is only one pod created. the other two won't be created since the first one doesn't start
_, err = c.context.Clientset.Extensions().ReplicaSets(c.Namespace).Get("rook-ceph-mon0", metav1.GetOptions{})
_, err = c.context.Clientset.Extensions().ReplicaSets(c.Namespace).Get("rook-ceph-mon-a", metav1.GetOptions{})
assert.Nil(t, err)
}
......@@ -170,14 +178,14 @@ func TestSaveMonEndpoints(t *testing.T) {
cm, err := c.context.Clientset.CoreV1().ConfigMaps(c.Namespace).Get(EndpointConfigMapName, metav1.GetOptions{})
assert.Nil(t, err)
assert.Equal(t, "rook-ceph-mon1=1.2.3.1:6790", cm.Data[EndpointDataKey])
assert.Equal(t, "a=1.2.3.1:6790", cm.Data[EndpointDataKey])
assert.Equal(t, `{"node":{},"port":{}}`, cm.Data[MappingKey])
assert.Equal(t, "-1", cm.Data[MaxMonIDKey])
// update the config map
c.clusterInfo.Monitors["rook-ceph-mon1"].Endpoint = "2.3.4.5:6790"
c.clusterInfo.Monitors["a"].Endpoint = "2.3.4.5:6790"
c.maxMonID = 2
c.mapping.Node["rook-ceph-mon1"] = &NodeInfo{
c.mapping.Node["a"] = &NodeInfo{
Name: "node0",
Address: "1.1.1.1",
Hostname: "myhost",
......@@ -188,8 +196,8 @@ func TestSaveMonEndpoints(t *testing.T) {
cm, err = c.context.Clientset.CoreV1().ConfigMaps(c.Namespace).Get(EndpointConfigMapName, metav1.GetOptions{})
assert.Nil(t, err)
assert.Equal(t, "rook-ceph-mon1=2.3.4.5:6790", cm.Data[EndpointDataKey])
assert.Equal(t, `{"node":{"rook-ceph-mon1":{"Name":"node0","Hostname":"myhost","Address":"1.1.1.1"}},"port":{"node0":12345}}`, cm.Data[MappingKey])
assert.Equal(t, "a=2.3.4.5:6790", cm.Data[EndpointDataKey])
assert.Equal(t, `{"node":{"a":{"Name":"node0","Hostname":"myhost","Address":"1.1.1.1"}},"port":{"node0":12345}}`, cm.Data[MappingKey])
assert.Equal(t, "2", cm.Data[MaxMonIDKey])
}
......@@ -210,23 +218,23 @@ func TestMonInQuourm(t *testing.T) {
assert.False(t, monInQuorum(entry, quorum))
}
func TestMonID(t *testing.T) {
func TestNameToIndex(t *testing.T) {
// invalid
id, err := getMonID("m")
id, err := fullNameToIndex("m")
assert.NotNil(t, err)
assert.Equal(t, -1, id)
id, err = getMonID("mon")
id, err = fullNameToIndex("mon")
assert.NotNil(t, err)
assert.Equal(t, -1, id)
id, err = getMonID("rook-ceph-monitor0")
id, err = fullNameToIndex("rook-ceph-monitor0")
assert.NotNil(t, err)
assert.Equal(t, -1, id)
// valid
id, err = getMonID("rook-ceph-mon0")
id, err = fullNameToIndex("rook-ceph-mon-a")
assert.Nil(t, err)
assert.Equal(t, 0, id)
id, err = getMonID("rook-ceph-mon123")
id, err = fullNameToIndex("rook-ceph-mon123")
assert.Nil(t, err)
assert.Equal(t, 123, id)
}
......@@ -447,14 +455,17 @@ func TestHostNetworkPortIncrease(t *testing.T) {
mons := []*monConfig{
{
Name: "rook-ceph-mon1",
Port: cephmon.DefaultPort,
Name: "rook-ceph-mon-a",
DaemonName: "a",
Port: cephmon.DefaultPort,
},
{
Name: "rook-ceph-mon2",
Port: cephmon.DefaultPort,
Name: "rook-ceph-mon-b",
DaemonName: "b",
Port: cephmon.DefaultPort,
},
}
c.maxMonID = 1
err = c.assignMons(mons)
assert.Nil(t, err)
......@@ -462,11 +473,11 @@ func TestHostNetworkPortIncrease(t *testing.T) {
err = c.initMonIPs(mons)
assert.Nil(t, err)
assert.Equal(t, node.Name, c.mapping.Node["rook-ceph-mon1"].Name)
assert.Equal(t, node.Name, c.mapping.Node["rook-ceph-mon2"].Name)
assert.Equal(t, node.Name, c.mapping.Node["rook-ceph-mon-a"].Name)
assert.Equal(t, node.Name, c.mapping.Node["rook-ceph-mon-b"].Name)
sEndpoint := strings.Split(c.clusterInfo.Monitors["rook-ceph-mon1"].Endpoint, ":")
sEndpoint := strings.Split(c.clusterInfo.Monitors["a"].Endpoint, ":")
assert.Equal(t, strconv.Itoa(cephmon.DefaultPort), sEndpoint[1])
sEndpoint = strings.Split(c.clusterInfo.Monitors["rook-ceph-mon2"].Endpoint, ":")
sEndpoint = strings.Split(c.clusterInfo.Monitors["b"].Endpoint, ":")
assert.Equal(t, strconv.Itoa(cephmon.DefaultPort+1), sEndpoint[1])
}
......@@ -69,6 +69,7 @@ func (c *Cluster) makeReplicaSet(config *monConfig, hostname string) *extensions
ObjectMeta: metav1.ObjectMeta{
Name: config.Name,
Namespace: c.Namespace,
Labels: c.getLabels(config.DaemonName),
},
}
k8sutil.SetOwnerRef(c.context.Clientset, c.Namespace, &rs.ObjectMeta, &c.ownerRef)
......@@ -115,7 +116,7 @@ func (c *Cluster) makeMonPod(config *monConfig, hostname string) *v1.Pod {
ObjectMeta: metav1.ObjectMeta{
Name: config.Name,
Namespace: c.Namespace,
Labels: c.getLabels(config.Name),
Labels: c.getLabels(config.DaemonName),
Annotations: map[string]string{},
},
Spec: podSpec,
......@@ -136,7 +137,7 @@ func (c *Cluster) monContainer(config *monConfig, fsid string) v1.Container {
"ceph",
"mon",
fmt.Sprintf("--config-dir=%s", k8sutil.DataDir),
fmt.Sprintf("--name=%s", config.Name),
fmt.Sprintf("--name=%s", config.DaemonName),
fmt.Sprintf("--port=%d", config.Port),
fmt.Sprintf("--fsid=%s", fsid),
},
......
......@@ -48,11 +48,12 @@ func testPodSpec(t *testing.T, dataDir string) {
},
}, metav1.OwnerReference{})
c.clusterInfo = testop.CreateConfigDir(0)
config := &monConfig{Name: "rook-ceph-mon0", Port: 6790}
name := "rook-ceph-mon-a"
config := &monConfig{Name: name, DaemonName: name, Port: 6790}
pod := c.makeMonPod(config, "foo")
assert.NotNil(t, pod)
assert.Equal(t, "rook-ceph-mon0", pod.Name)
assert.Equal(t, "rook-ceph-mon-a", pod.Name)
assert.Equal(t, v1.RestartPolicyAlways, pod.Spec.RestartPolicy)
assert.Equal(t, 2, len(pod.Spec.Volumes))
assert.Equal(t, "rook-data", pod.Spec.Volumes[0].Name)
......@@ -65,7 +66,7 @@ func testPodSpec(t *testing.T, dataDir string) {
assert.Equal(t, dataDir, pod.Spec.Volumes[0].HostPath.Path)
}
assert.Equal(t, "rook-ceph-mon0", pod.ObjectMeta.Name)
assert.Equal(t, "rook-ceph-mon-a", pod.ObjectMeta.Name)
assert.Equal(t, appName, pod.ObjectMeta.Labels["app"])
assert.Equal(t, c.Namespace, pod.ObjectMeta.Labels["mon_cluster"])
......@@ -79,7 +80,7 @@ func testPodSpec(t *testing.T, dataDir string) {
assert.Equal(t, "ceph", cont.Args[0])
assert.Equal(t, "mon", cont.Args[1])
assert.Equal(t, "--config-dir=/var/lib/rook", cont.Args[2])
assert.Equal(t, "--name=rook-ceph-mon0", cont.Args[3])
assert.Equal(t, "--name="+name, cont.Args[3])
assert.Equal(t, "--port=6790", cont.Args[4])
assert.Equal(t, fmt.Sprintf("--fsid=%s", c.clusterInfo.FSID), cont.Args[5])
......
......@@ -39,6 +39,10 @@ import (
"k8s.io/client-go/kubernetes"
)
const (
maxPerChar = 26
)
// LoadClusterInfo constructs or loads a clusterinfo and returns it along with the maxMonID
func LoadClusterInfo(context *clusterd.Context, namespace string) (*mon.ClusterInfo, int, *Mapping, error) {
return CreateOrLoadClusterInfo(context, namespace, nil)
......@@ -135,7 +139,7 @@ func loadMonConfig(clientset kubernetes.Interface, namespace string) (map[string
// Make sure the max id is consistent with the current monitors
for _, m := range monEndpointMap {
id, _ := getMonID(m.Name)
id, _ := fullNameToIndex(m.Name)
if maxMonID < id {
maxMonID = id
}
......@@ -150,18 +154,6 @@ func loadMonConfig(clientset kubernetes.Interface, namespace string) (map[string
return monEndpointMap, maxMonID, monMapping, nil
}
// get the ID of a monitor from its name
func getMonID(name string) (int, error) {
if strings.Index(name, appName) != 0 || len(name) < len(appName) {
return -1, fmt.Errorf("unexpected mon name")
}
id, err := strconv.Atoi(name[len(appName):])
if err != nil {
return -1, err
}
return id, nil
}
func createClusterAccessSecret(clientset kubernetes.Interface, namespace string, clusterInfo *mon.ClusterInfo, ownerRef *metav1.OwnerReference) error {
logger.Infof("creating mon secrets for a new cluster")
var err error
......@@ -265,3 +257,56 @@ func extractKey(contents string) (string, error) {
}
return secret, nil
}
// convert an index to a mon name based on as few letters of the alphabet as possible
func indexToName(index int) string {
var result string
for {
i := index % maxPerChar
c := 'z' - maxPerChar + i + 1
result = fmt.Sprintf("%c%s", c, result)
if index < maxPerChar {
break
}
// subtract 1 since the character conversion is zero-based
index = (index / maxPerChar) - 1
}
return result
}
// convert the mon name to the numeric mon ID
func fullNameToIndex(name string) (int, error) {
prefix := appName + "-"
if strings.Index(name, prefix) != -1 && len(prefix) < len(name) {
return nameToIndex(name[len(prefix)+1:])
}
// attempt to parse the legacy mon name
legacyPrefix := appName
if strings.Index(name, legacyPrefix) == -1 || len(name) < len(appName) {
return -1, fmt.Errorf("unexpected mon name")
}
id, err := strconv.Atoi(name[len(legacyPrefix):])
if err != nil {
return -1, err
}
return id, nil
}
func nameToIndex(name string) (int, error) {
factor := 1
for i := 1; i < len(name); i++ {
factor *= maxPerChar
}
var result int
for _, c := range name {
charVal := int('z' - c + 1)
if charVal < 0 || charVal >= maxPerChar {
return -1, fmt.Errorf("invalid char '%c' in %s", c, name)
}
result += charVal * factor
factor /= maxPerChar
}
return result, nil
}
/*
Copyright 2018 The Rook Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package mon
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestConvertMonID(t *testing.T) {
assert.Equal(t, "a", indexToName(0))
assert.Equal(t, "b", indexToName(1))
assert.Equal(t, "c", indexToName(2))
assert.Equal(t, "z", indexToName(25))
assert.Equal(t, "aa", indexToName(26))
assert.Equal(t, "ab", indexToName(27))
assert.Equal(t, "ac", indexToName(28))
assert.Equal(t, "az", indexToName(51))
assert.Equal(t, "ba", indexToName(52))
assert.Equal(t, "bb", indexToName(53))
assert.Equal(t, "bz", indexToName(77))
assert.Equal(t, "ca", indexToName(78))
assert.Equal(t, "za", indexToName(676))
assert.Equal(t, "aaa", indexToName(702))
assert.Equal(t, "aaz", indexToName(727))
assert.Equal(t, "aba", indexToName(728))
}
......@@ -24,7 +24,7 @@ import (
)
// CreateConfigDir creates a test cluster
func CreateConfigDir(mons int) *mon.ClusterInfo {
func CreateConfigDir(monCount int) *mon.ClusterInfo {
c := &mon.ClusterInfo{
FSID: "12345",
Name: "default",
......@@ -32,11 +32,12 @@ func CreateConfigDir(mons int) *mon.ClusterInfo {
AdminSecret: "adminsecret",
Monitors: map[string]*mon.CephMonitorConfig{},
}
for i := 1; i <= mons; i++ {
id := fmt.Sprintf("rook-ceph-mon%d", i)
mons := []string{"a", "b", "c", "d", "e"}
for i := 0; i < monCount; i++ {
id := mons[i]
c.Monitors[id] = &mon.CephMonitorConfig{
Name: id,
Endpoint: fmt.Sprintf("1.2.3.%d:6790", i),
Endpoint: fmt.Sprintf("1.2.3.%d:6790", i+1),
}
}
return c
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment