Unverified Commit 78ac6613 authored by catsby's avatar catsby
Browse files

Merge branch 'master' into b-website-fix-k8s-config-link

* master: (34 commits)
  Use Shamir as KeK when migrating from auto-seal to shamir (#8172)
  changelog++
  ssh backend: support at character in role name (#8038)
  Fix typo in YAML markup (#8194)
  Fix typo (#8192)
  Fix k8s injector examples (#8179)
  update dependencies, patch nextjs config (#8184)
  Strip unnecessary payload in AD root cred rotation example (#8160)
  New Website! (#8154)
  Update CHANGELOG.md
  changelog++
  Fix panic when listener fails to startup (#8174)
  Create network layer abstraction to allow in-memory cluster traffic (#8173)
  Update test var name and tidy
  Factor out mysqlhelper so we can create mysql docker containers in other tests. (#8167)
  changelog++
  changelog++
  Pull wrapping creation to a var (#8137)
  ldap, okta: fix renewal when login policies are empty (#8072)
  Update CHANGELOG.md
  ...
parents dcbc4fbb eb5fba1c
Showing with 484 additions and 32 deletions
+484 -32
......@@ -35,7 +35,9 @@ jobs:
command: make ci-verify
install-ui-dependencies:
docker:
- image: node:10-buster
- environment:
JOBS: 2
image: node:10-buster
shell: /usr/bin/env bash -euo pipefail -c
working_directory: /go/src/github.com/hashicorp/vault
steps:
......@@ -98,7 +100,9 @@ jobs:
- GOTESTSUM_VERSION: 0.3.3
test-ui:
docker:
- image: node:10-buster
- environment:
JOBS: 2
image: node:10-buster
shell: /usr/bin/env bash -euo pipefail -c
working_directory: /go/src/github.com/hashicorp/vault
resource_class: medium+
......@@ -136,7 +140,9 @@ jobs:
path: ui/test-results
test-ui-browserstack:
docker:
- image: node:10-buster
- environment:
JOBS: 2
image: node:10-buster
shell: /usr/bin/env bash -euo pipefail -c
working_directory: /go/src/github.com/hashicorp/vault
resource_class: medium+
......@@ -299,6 +305,29 @@ jobs:
- GO_VERSION: 1.12.14
- GO111MODULE: 'off'
- GOTESTSUM_VERSION: 0.3.3
website-docker-image:
docker:
- image: circleci/buildpack-deps
shell: /usr/bin/env bash -euo pipefail -c
steps:
- checkout
- setup_remote_docker
- run:
command: |
echo 'export PACKAGE_LOCK_CHANGED=$(git diff --name-only $(git log --pretty=format:'%h' -n1 HEAD~1)...HEAD | grep -c website/package-lock.json)' >> $BASH_ENV
name: Diff package-lock.json
- run:
command: |
if [ "$CIRCLE_BRANCH" = "master" ] && [ $PACKAGE_LOCK_CHANGED -gt 0 ]; then
cd website/
docker build -t hashicorp/vault-website:$CIRCLE_SHA1 .
docker tag hashicorp/vault-website:$CIRCLE_SHA1 hashicorp/vault-website:latest
docker login -u $DOCKER_USER -p $DOCKER_PASS
docker push hashicorp/vault-website
else
echo "Not building a new website docker image - branch is not master and/or dependencies have not changed."
fi
name: Build Docker Image if Necessary
workflows:
ci:
jobs:
......@@ -326,6 +355,7 @@ workflows:
- test-go-race:
requires:
- build-go-dev
- website-docker-image
version: 2
# Original config.yml file:
......@@ -426,7 +456,9 @@ workflows:
# working_directory: /go/src/github.com/hashicorp/vault
# node:
# docker:
# - image: node:10-buster
# - environment:
# JOBS: 2
# image: node:10-buster
# shell: /usr/bin/env bash -euo pipefail -c
# working_directory: /go/src/github.com/hashicorp/vault
# python:
......@@ -566,6 +598,29 @@ workflows:
# export PATH=\"${PWD}\"/bin:${PATH}
# make test-ui-browserstack
# name: Run Browserstack Tests
# website-docker-image:
# docker:
# - image: circleci/buildpack-deps
# shell: /usr/bin/env bash -euo pipefail -c
# steps:
# - checkout
# - setup_remote_docker
# - run:
# command: |
# echo 'export PACKAGE_LOCK_CHANGED=$(git diff --name-only $(git log --pretty=format:'%h' -n1 HEAD~1)...HEAD | grep -c website/package-lock.json)' >> $BASH_ENV
# name: Diff package-lock.json
# - run:
# command: |
# if [ \"$CIRCLE_BRANCH\" = \"master\" ] && [ $PACKAGE_LOCK_CHANGED -gt 0 ]; then
# cd website/
# docker build -t hashicorp/vault-website:$CIRCLE_SHA1 .
# docker tag hashicorp/vault-website:$CIRCLE_SHA1 hashicorp/vault-website:latest
# docker login -u $DOCKER_USER -p $DOCKER_PASS
# docker push hashicorp/vault-website
# else
# echo \"Not building a new website docker image - branch is not master and/or dependencies have not changed.\"
# fi
# name: Build Docker Image if Necessary
# references:
# cache:
# go-sum: go-sum-v1-{{ checksum \"go.sum\" }}
......@@ -599,4 +654,5 @@ workflows:
# - build-go-dev
# - test-go-race:
# requires:
# - build-go-dev
\ No newline at end of file
# - build-go-dev
# - website-docker-image
\ No newline at end of file
......@@ -36,6 +36,8 @@ executors:
node:
docker:
- image: *NODE_IMAGE
environment:
JOBS: 2
shell: /usr/bin/env bash -euo pipefail -c
working_directory: /go/src/github.com/hashicorp/vault
python:
......
docker:
- image: circleci/buildpack-deps
shell: /usr/bin/env bash -euo pipefail -c
steps:
- checkout
- setup_remote_docker
- run:
name: Diff package-lock.json
command: |
echo 'export PACKAGE_LOCK_CHANGED=$(git diff --name-only $(git log --pretty=format:'%h' -n1 HEAD~1)...HEAD | grep -c website/package-lock.json)' >> $BASH_ENV
- run:
name: Build Docker Image if Necessary
command: |
if [ "$CIRCLE_BRANCH" = "master" ] && [ $PACKAGE_LOCK_CHANGED -gt 0 ]; then
cd website/
docker build -t hashicorp/vault-website:$CIRCLE_SHA1 .
docker tag hashicorp/vault-website:$CIRCLE_SHA1 hashicorp/vault-website:latest
docker login -u $DOCKER_USER -p $DOCKER_PASS
docker push hashicorp/vault-website
else
echo "Not building a new website docker image - branch is not master and/or dependencies have not changed."
fi
......@@ -15,12 +15,13 @@ jobs:
- install-ui-dependencies
- build-go-dev
filters:
branches:
# Forked pull requests have CIRCLE_BRANCH set to pull/XXX
ignore: /pull\/[0-9]+/
branches:
# Forked pull requests have CIRCLE_BRANCH set to pull/XXX
ignore: /pull\/[0-9]+/
- test-go:
requires:
- build-go-dev
- test-go-race:
requires:
- build-go-dev
- website-docker-image
......@@ -2,7 +2,6 @@
IMPROVEMENTS:
* auth/azure: Fix Azure compute client to use correct base URL [AZURE-27]
* auth/jwt: Additional OIDC callback parameters available for CLI logins [JWT-80 & JWT-86]
* auth/jwt: Bound claims may be optionally configured using globs [JWT-89]
* core: Separate out service discovery interface from storage interface to allow
......@@ -10,19 +9,40 @@ IMPROVEMENTS:
* cli: Incorrect TLS configuration will now correctly fail [GH-8025]
* secrets/gcp: Allow specifying the TTL for a service key [GCP-54]
* secrets/gcp: Add support for rotating root keys [GCP-53]
* secrets/nomad: Add support to specify TLS options per Nomad backend [GH-8083]
* storage/raft: Nodes in the raft cluster can all be given possible leader
addresses for them to continuously try and join one of them, thus automating
the process of join to a greater extent [GH-7856]
BUG FIXES:
* plugin: Fix issue where a plugin unwrap request potentially used an expired token [GH-8058]
* secrets/database: Fix issue where a manual static role rotation could potentially panic [GH-8098]
* secrets/database/mysql: Fix issue where special characters for a MySQL password were encoded [GH-8040]
* ui: Update headless Chrome flag to fix `yarn run test:oss` [GH-8035]
* ui: Change `.box-radio` height to min-height to prevent overflow issues [GH-8065]
## 1.3.2 (Unreleased)
IMPROVEMENTS:
* auth/aws: Add aws metadata to identity alias [GH-7975]
BUG FIXES:
* auth/azure: Fix Azure compute client to use correct base URL [AZURE-27]
* auth/ldap: Fix renewal of tokens without cofigured policies that are
generated by an LDAP login [GH-8072]
* auth/okta: Fix renewal of tokens without configured policies that are
generated by an Okta login [GH-8072]
* plugin: Fix issue where a plugin unwrap request potentially used an expired token [GH-8058]
* replication: Fix issue where a forwarded request from a performance/standby node could run in
a timeout
* secrets/database: Fix issue where a manual static role rotation could potentially panic [GH-8098]
* secrets/database: Fix issue where a manual root credential rotation request is not forwarded
to the primary node [GH-8125]
* secrets/database: Fix issue where a manual static role rotation request is not forwarded
to the primary node [GH-8126]
* secrets/database/mysql: Fix issue where special characters for a MySQL password were encoded [GH-8040]
* ui: Fix deleting namespaces [GH-8132]
* ui: Fix Error handler on kv-secret edit and kv-secret view pages [GH-8133]
* ui: Fix OIDC callback to check storage [GH-7929].
* ui: Change `.box-radio` height to min-height to prevent overflow issues [GH-8065]
## 1.3.1 (December 18th, 2019)
......
......@@ -13,6 +13,7 @@ import (
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/sts"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/sdk/helper/awsutil"
)
......@@ -40,7 +41,8 @@ func GenerateLoginData(creds *credentials.Credentials, headerValue, configuredRe
// Use the credentials we've found to construct an STS session
region, err := awsutil.GetRegion(configuredRegion)
if err != nil {
return nil, err
hclog.Default().Warn(fmt.Sprintf("defaulting region to %q due to %s", awsutil.DefaultRegion, err.Error()))
region = awsutil.DefaultRegion
}
stsSession, err := session.NewSessionWithOptions(session.Options{
Config: aws.Config{
......
......@@ -133,9 +133,10 @@ func (b *backend) pathLoginRenew(ctx context.Context, req *logical.Request, d *f
password := req.Auth.InternalData["password"].(string)
loginPolicies, resp, groupNames, err := b.Login(ctx, req, username, password)
if len(loginPolicies) == 0 {
if err != nil || (resp != nil && resp.IsError()) {
return resp, err
}
finalPolicies := cfg.TokenPolicies
if len(loginPolicies) > 0 {
finalPolicies = append(finalPolicies, loginPolicies...)
......
......@@ -118,7 +118,7 @@ func (b *backend) pathLoginRenew(ctx context.Context, req *logical.Request, d *f
}
loginPolicies, resp, groupNames, err := b.Login(ctx, req, username, password)
if len(loginPolicies) == 0 {
if err != nil || (resp != nil && resp.IsError()) {
return resp, err
}
......
......@@ -62,6 +62,15 @@ func (b *backend) client(ctx context.Context, s logical.Storage) (*api.Client, e
if conf.Token != "" {
nomadConf.SecretID = conf.Token
}
if conf.CACert != "" {
nomadConf.TLSConfig.CACertPEM = []byte(conf.CACert)
}
if conf.ClientCert != "" {
nomadConf.TLSConfig.ClientCertPEM = []byte(conf.ClientCert)
}
if conf.ClientKey != "" {
nomadConf.TLSConfig.ClientKeyPEM = []byte(conf.ClientKey)
}
}
client, err := api.NewClient(nomadConf)
......
......@@ -28,6 +28,21 @@ func pathConfigAccess(b *backend) *framework.Path {
Type: framework.TypeInt,
Description: "Max length for name of generated Nomad tokens",
},
"ca_cert": &framework.FieldSchema{
Type: framework.TypeString,
Description: `CA certificate to use when verifying Nomad server certificate,
must be x509 PEM encoded.`,
},
"client_cert": &framework.FieldSchema{
Type: framework.TypeString,
Description: `Client certificate used for Nomad's TLS communication,
must be x509 PEM encoded and if this is set you need to also set client_key.`,
},
"client_key": &framework.FieldSchema{
Type: framework.TypeString,
Description: `Client key used for Nomad's TLS communication,
must be x509 PEM encoded and if this is set you need to also set client_cert.`,
},
},
Callbacks: map[logical.Operation]framework.OperationFunc{
......@@ -101,6 +116,18 @@ func (b *backend) pathConfigAccessWrite(ctx context.Context, req *logical.Reques
if ok {
conf.Token = token.(string)
}
caCert, ok := data.GetOk("ca_cert")
if ok {
conf.CACert = caCert.(string)
}
clientCert, ok := data.GetOk("client_cert")
if ok {
conf.ClientCert = clientCert.(string)
}
clientKey, ok := data.GetOk("client_key")
if ok {
conf.ClientKey = clientKey.(string)
}
conf.MaxTokenNameLength = data.Get("max_token_name_length").(int)
......@@ -126,4 +153,7 @@ type accessConfig struct {
Address string `json:"address"`
Token string `json:"token"`
MaxTokenNameLength int `json:"max_token_name_length"`
CACert string `json:"ca_cert"`
ClientCert string `json:"client_cert"`
ClientKey string `json:"client_key"`
}
......@@ -35,6 +35,7 @@ const (
testOTPKeyType = "otp"
testDynamicKeyType = "dynamic"
testCIDRList = "127.0.0.1/32"
testAtRoleName = "test@RoleName"
testDynamicRoleName = "testDynamicRoleName"
testOTPRoleName = "testOTPRoleName"
testKeyName = "testKeyName"
......@@ -256,6 +257,7 @@ func TestSSHBackend_Lookup(t *testing.T) {
resp2 := []string{testOTPRoleName}
resp3 := []string{testDynamicRoleName, testOTPRoleName}
resp4 := []string{testDynamicRoleName}
resp5 := []string{testAtRoleName}
logicaltest.Test(t, logicaltest.TestCase{
AcceptanceTest: true,
LogicalFactory: testingFactory,
......@@ -270,6 +272,10 @@ func TestSSHBackend_Lookup(t *testing.T) {
testLookupRead(t, data, resp4),
testRoleDelete(t, testDynamicRoleName),
testLookupRead(t, data, resp1),
testRoleWrite(t, testAtRoleName, testDynamicRoleData),
testLookupRead(t, data, resp5),
testRoleDelete(t, testAtRoleName),
testLookupRead(t, data, resp1),
},
})
}
......@@ -289,12 +295,29 @@ func TestSSHBackend_RoleList(t *testing.T) {
},
},
}
resp3 := map[string]interface{}{
"keys": []string{testAtRoleName, testOTPRoleName},
"key_info": map[string]interface{}{
testOTPRoleName: map[string]interface{}{
"key_type": testOTPKeyType,
},
testAtRoleName: map[string]interface{}{
"key_type": testOTPKeyType,
},
},
}
logicaltest.Test(t, logicaltest.TestCase{
LogicalFactory: testingFactory,
Steps: []logicaltest.TestStep{
testRoleList(t, resp1),
testRoleWrite(t, testOTPRoleName, testOTPRoleData),
testRoleList(t, resp2),
testRoleWrite(t, testAtRoleName, testOTPRoleData),
testRoleList(t, resp3),
testRoleDelete(t, testAtRoleName),
testRoleList(t, resp2),
testRoleDelete(t, testOTPRoleName),
testRoleList(t, resp1),
},
})
}
......@@ -319,6 +342,8 @@ func TestSSHBackend_DynamicKeyCreate(t *testing.T) {
testNamedKeysWrite(t, testKeyName, testSharedPrivateKey),
testRoleWrite(t, testDynamicRoleName, testDynamicRoleData),
testCredsWrite(t, testDynamicRoleName, data, false),
testRoleWrite(t, testAtRoleName, testDynamicRoleData),
testCredsWrite(t, testAtRoleName, data, false),
},
})
}
......@@ -343,6 +368,10 @@ func TestSSHBackend_OTPRoleCrud(t *testing.T) {
testRoleRead(t, testOTPRoleName, respOTPRoleData),
testRoleDelete(t, testOTPRoleName),
testRoleRead(t, testOTPRoleName, nil),
testRoleWrite(t, testAtRoleName, testOTPRoleData),
testRoleRead(t, testAtRoleName, respOTPRoleData),
testRoleDelete(t, testAtRoleName),
testRoleRead(t, testAtRoleName, nil),
},
})
}
......@@ -374,6 +403,10 @@ func TestSSHBackend_DynamicRoleCrud(t *testing.T) {
testRoleRead(t, testDynamicRoleName, respDynamicRoleData),
testRoleDelete(t, testDynamicRoleName),
testRoleRead(t, testDynamicRoleName, nil),
testRoleWrite(t, testAtRoleName, testDynamicRoleData),
testRoleRead(t, testAtRoleName, respDynamicRoleData),
testRoleDelete(t, testAtRoleName),
testRoleRead(t, testAtRoleName, nil),
},
})
}
......@@ -405,6 +438,8 @@ func TestSSHBackend_OTPCreate(t *testing.T) {
Steps: []logicaltest.TestStep{
testRoleWrite(t, testOTPRoleName, testOTPRoleData),
testCredsWrite(t, testOTPRoleName, data, false),
testRoleWrite(t, testAtRoleName, testOTPRoleData),
testCredsWrite(t, testAtRoleName, data, false),
},
})
}
......@@ -1108,14 +1143,17 @@ func testRoleRead(t *testing.T, roleName string, expected map[string]interface{}
if err := mapstructure.Decode(resp.Data, &d); err != nil {
return fmt.Errorf("error decoding response:%s", err)
}
if roleName == testOTPRoleName {
switch d.KeyType {
case "otp":
if d.KeyType != expected["key_type"] || d.DefaultUser != expected["default_user"] || d.CIDRList != expected["cidr_list"] {
return fmt.Errorf("data mismatch. bad: %#v", resp)
}
} else {
case "dynamic":
if d.AdminUser != expected["admin_user"] || d.CIDRList != expected["cidr_list"] || d.KeyName != expected["key"] || d.KeyType != expected["key_type"] {
return fmt.Errorf("data mismatch. bad: %#v", resp)
}
default:
return fmt.Errorf("unknown key type. bad: %#v", resp)
}
return nil
},
......
......@@ -20,7 +20,7 @@ type sshOTP struct {
func pathCredsCreate(b *backend) *framework.Path {
return &framework.Path{
Pattern: "creds/" + framework.GenericNameRegex("role"),
Pattern: "creds/" + framework.GenericNameWithAtRegex("role"),
Fields: map[string]*framework.FieldSchema{
"role": &framework.FieldSchema{
Type: framework.TypeString,
......
......@@ -69,7 +69,7 @@ func pathListRoles(b *backend) *framework.Path {
func pathRoles(b *backend) *framework.Path {
return &framework.Path{
Pattern: "roles/" + framework.GenericNameRegex("role"),
Pattern: "roles/" + framework.GenericNameWithAtRegex("role"),
Fields: map[string]*framework.FieldSchema{
"role": &framework.FieldSchema{
Type: framework.TypeString,
......
......@@ -37,7 +37,7 @@ type creationBundle struct {
func pathSign(b *backend) *framework.Path {
return &framework.Path{
Pattern: "sign/" + framework.GenericNameRegex("role"),
Pattern: "sign/" + framework.GenericNameWithAtRegex("role"),
Callbacks: map[logical.Operation]framework.OperationFunc{
logical.UpdateOperation: b.pathSign,
......
......@@ -7,19 +7,125 @@ import (
"encoding/base64"
"testing"
hclog "github.com/hashicorp/go-hclog"
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/helper/testhelpers"
"github.com/hashicorp/vault/shamir"
"github.com/hashicorp/go-hclog"
wrapping "github.com/hashicorp/go-kms-wrapping"
aeadwrapper "github.com/hashicorp/go-kms-wrapping/wrappers/aead"
"github.com/hashicorp/vault/api"
vaulthttp "github.com/hashicorp/vault/http"
"github.com/hashicorp/vault/sdk/helper/logging"
"github.com/hashicorp/vault/sdk/physical"
physInmem "github.com/hashicorp/vault/sdk/physical/inmem"
"github.com/hashicorp/vault/shamir"
"github.com/hashicorp/vault/vault"
"github.com/hashicorp/vault/vault/seal"
)
func TestSealMigrationAutoToShamir(t *testing.T) {
logger := logging.NewVaultLogger(hclog.Trace).Named(t.Name())
phys, err := physInmem.NewInmem(nil, logger)
if err != nil {
t.Fatal(err)
}
haPhys, err := physInmem.NewInmemHA(nil, logger)
if err != nil {
t.Fatal(err)
}
autoSeal := vault.NewAutoSeal(seal.NewTestSeal(nil))
cluster := vault.NewTestCluster(t, &vault.CoreConfig{
Seal: autoSeal,
Physical: phys,
HAPhysical: haPhys.(physical.HABackend),
DisableSealWrap: true,
}, &vault.TestClusterOptions{
Logger: logger,
HandlerFunc: vaulthttp.Handler,
SkipInit: true,
NumCores: 1,
})
cluster.Start()
defer cluster.Cleanup()
client := cluster.Cores[0].Client
initResp, err := client.Sys().Init(&api.InitRequest{
RecoveryShares: 5,
RecoveryThreshold: 3,
})
if err != nil {
t.Fatal(err)
}
testhelpers.WaitForActiveNode(t, cluster)
keys := initResp.RecoveryKeysB64
rootToken := initResp.RootToken
core := cluster.Cores[0].Core
client.SetToken(rootToken)
if err := client.Sys().Seal(); err != nil {
t.Fatal(err)
}
shamirSeal := vault.NewDefaultSeal(&seal.Access{
Wrapper: aeadwrapper.NewWrapper(&wrapping.WrapperOptions{
Logger: logger.Named("shamir"),
}),
})
shamirSeal.SetCore(core)
if err := adjustCoreForSealMigration(logger, core, shamirSeal, autoSeal); err != nil {
t.Fatal(err)
}
var resp *api.SealStatusResponse
unsealOpts := &api.UnsealOpts{}
for _, key := range keys {
unsealOpts.Key = key
unsealOpts.Migrate = false
resp, err = client.Sys().UnsealWithOptions(unsealOpts)
if err == nil {
t.Fatal("expected error due to lack of migrate parameter")
}
unsealOpts.Migrate = true
resp, err = client.Sys().UnsealWithOptions(unsealOpts)
if err != nil {
t.Fatal(err)
}
if resp == nil {
t.Fatal("expected response")
}
if !resp.Sealed {
break
}
}
if resp.Sealed {
t.Fatalf("expected unsealed state; got %#v", *resp)
}
// Seal and unseal again to verify that things are working fine
if err := client.Sys().Seal(); err != nil {
t.Fatal(err)
}
unsealOpts.Migrate = false
for _, key := range keys {
unsealOpts.Key = key
resp, err = client.Sys().UnsealWithOptions(unsealOpts)
if err != nil {
t.Fatal(err)
}
if resp == nil {
t.Fatal("expected response")
}
if !resp.Sealed {
break
}
}
if resp.Sealed {
t.Fatalf("expected unsealed state; got %#v", *resp)
}
}
func TestSealMigration(t *testing.T) {
logger := logging.NewVaultLogger(hclog.Trace).Named(t.Name())
phys, err := physInmem.NewInmem(nil, logger)
......@@ -30,13 +136,13 @@ func TestSealMigration(t *testing.T) {
if err != nil {
t.Fatal(err)
}
shamirwrapper := vault.NewDefaultSeal(&seal.Access{
wrapper := vault.NewDefaultSeal(&seal.Access{
Wrapper: aeadwrapper.NewWrapper(&wrapping.WrapperOptions{
Logger: logger.Named("shamir"),
}),
})
coreConfig := &vault.CoreConfig{
Seal: shamirwrapper,
Seal: wrapper,
Physical: phys,
HAPhysical: haPhys.(physical.HABackend),
DisableSealWrap: true,
......@@ -149,6 +255,41 @@ func TestSealMigration(t *testing.T) {
t.Fatalf("expected unsealed state; got %#v", *resp)
}
// Make sure the seal configs were updated correctly
b, err := autoSeal.BarrierConfig(context.Background())
if err != nil {
t.Fatal(err)
}
if b.Type != autoSeal.BarrierType() {
t.Fatalf("bad seal config: %#v", b)
}
if b.SecretShares != 1 {
t.Fatalf("bad seal config: %#v", b)
}
if b.SecretThreshold != 1 {
t.Fatalf("bad seal config: %#v", b)
}
if b.StoredShares != 1 {
t.Fatalf("bad seal config: %#v", b)
}
r, err := autoSeal.RecoveryConfig(context.Background())
if err != nil {
t.Fatal(err)
}
if r.Type != wrapping.Shamir {
t.Fatalf("bad seal config: %#v", r)
}
if r.SecretShares != 2 {
t.Fatalf("bad seal config: %#v", r)
}
if r.SecretThreshold != 2 {
t.Fatalf("bad seal config: %#v", r)
}
if r.StoredShares != 0 {
t.Fatalf("bad seal config: %#v", r)
}
cluster.Cleanup()
cluster.Cores = nil
}
......@@ -243,6 +384,41 @@ func TestSealMigration(t *testing.T) {
t.Fatalf("expected unsealed state; got %#v", *resp)
}
// Make sure the seal configs were updated correctly
b, err := altSeal.BarrierConfig(context.Background())
if err != nil {
t.Fatal(err)
}
if b.Type != altSeal.BarrierType() {
t.Fatalf("bad seal config: %#v", b)
}
if b.SecretShares != 1 {
t.Fatalf("bad seal config: %#v", b)
}
if b.SecretThreshold != 1 {
t.Fatalf("bad seal config: %#v", b)
}
if b.StoredShares != 1 {
t.Fatalf("bad seal config: %#v", b)
}
r, err := altSeal.RecoveryConfig(context.Background())
if err != nil {
t.Fatal(err)
}
if r.Type != wrapping.Shamir {
t.Fatalf("bad seal config: %#v", r)
}
if r.SecretShares != 2 {
t.Fatalf("bad seal config: %#v", r)
}
if r.SecretThreshold != 2 {
t.Fatalf("bad seal config: %#v", r)
}
if r.StoredShares != 0 {
t.Fatalf("bad seal config: %#v", r)
}
cluster.Cleanup()
cluster.Cores = nil
}
......@@ -257,7 +433,13 @@ func TestSealMigration(t *testing.T) {
core := cluster.Cores[0].Core
if err := adjustCoreForSealMigration(logger, core, shamirwrapper, altSeal); err != nil {
wrapper := vault.NewDefaultSeal(&seal.Access{
Wrapper: aeadwrapper.NewWrapper(&wrapping.WrapperOptions{
Logger: logger.Named("shamir"),
}),
})
if err := adjustCoreForSealMigration(logger, core, wrapper, altSeal); err != nil {
t.Fatal(err)
}
......@@ -286,6 +468,29 @@ func TestSealMigration(t *testing.T) {
t.Fatalf("expected unsealed state; got %#v", *resp)
}
// Make sure the seal configs were updated correctly
b, err := wrapper.BarrierConfig(context.Background())
if err != nil {
t.Fatal(err)
}
if b.Type != wrapping.Shamir {
t.Fatalf("bad seal config: %#v", b)
}
if b.SecretShares != 2 {
t.Fatalf("bad seal config: %#v", b)
}
if b.SecretThreshold != 2 {
t.Fatalf("bad seal config: %#v", b)
}
if b.StoredShares != 1 {
t.Fatalf("bad seal config: %#v", b)
}
_, err = wrapper.RecoveryConfig(context.Background())
if err == nil {
t.Fatal("expected error")
}
cluster.Cleanup()
cluster.Cores = nil
}
......@@ -293,7 +498,7 @@ func TestSealMigration(t *testing.T) {
{
logger.SetLevel(hclog.Trace)
logger.Info("integ: verify autoseal is off and the expected key shares work")
coreConfig.Seal = shamirwrapper
coreConfig.Seal = wrapper
cluster := vault.NewTestCluster(t, coreConfig, clusterConfig)
cluster.Start()
defer cluster.Cleanup()
......
......@@ -1505,6 +1505,15 @@ CLUSTER_SYNTHESIS_COMPLETE:
}()
}
// When the underlying storage is raft, kick off retry join if it was specified
// in the configuration
if config.Storage.Type == "raft" {
if err := core.InitiateRetryJoin(context.Background()); err != nil {
c.UI.Error(fmt.Sprintf("Failed to initiate raft retry join, %q", err.Error()))
return 1
}
}
// Perform service discovery registrations and initialization of
// HTTP server after the verifyOnly check.
......
......@@ -3,6 +3,7 @@ package server
import (
"errors"
"fmt"
"github.com/hashicorp/vault/sdk/helper/jsonutil"
"io"
"io/ioutil"
"os"
......@@ -730,11 +731,25 @@ func ParseStorage(result *Config, list *ast.ObjectList, name string) error {
key = item.Keys[0].Token.Value().(string)
}
var m map[string]string
if err := hcl.DecodeObject(&m, item.Val); err != nil {
var config map[string]interface{}
if err := hcl.DecodeObject(&config, item.Val); err != nil {
return multierror.Prefix(err, fmt.Sprintf("%s.%s:", name, key))
}
m := make(map[string]string)
for key, val := range config {
valStr, ok := val.(string)
if ok {
m[key] = valStr
continue
}
valBytes, err := jsonutil.EncodeJSON(val)
if err != nil {
return err
}
m[key] = string(valBytes)
}
// Pull out the redirect address since it's common to all backends
var redirectAddr string
if v, ok := m["redirect_addr"]; ok {
......
......@@ -37,3 +37,7 @@ func TestParseListeners(t *testing.T) {
func TestParseEntropy(t *testing.T) {
testParseEntropy(t, true)
}
func TestConfigRaftRetryJoin(t *testing.T) {
testConfigRaftRetryJoin(t)
}
......@@ -12,6 +12,38 @@ import (
"github.com/hashicorp/hcl/hcl/ast"
)
func testConfigRaftRetryJoin(t *testing.T) {
config, err := LoadConfigFile("./test-fixtures/raft_retry_join.hcl")
if err != nil {
t.Fatal(err)
}
retryJoinConfig := `[{"leader_api_addr":"http://127.0.0.1:8200"},{"leader_api_addr":"http://127.0.0.2:8200"},{"leader_api_addr":"http://127.0.0.3:8200"}]` + "\n"
expected := &Config{
Listeners: []*Listener{
{
Type: "tcp",
Config: map[string]interface{}{
"address": "127.0.0.1:8200",
},
},
},
Storage: &Storage{
Type: "raft",
Config: map[string]string{
"path": "/storage/path/raft",
"node_id": "raft1",
"retry_join": retryJoinConfig,
},
},
DisableMlock: true,
DisableMlockRaw: true,
}
if !reflect.DeepEqual(config, expected) {
t.Fatalf("\nexpected: %#v\n actual:%#v\n", config, expected)
}
}
func testLoadConfigFile_topLevel(t *testing.T, entropy *Entropy) {
config, err := LoadConfigFile("./test-fixtures/config2.hcl")
if err != nil {
......
......@@ -3,6 +3,7 @@ package seal
import (
"github.com/hashicorp/errwrap"
"github.com/hashicorp/go-hclog"
wrapping "github.com/hashicorp/go-kms-wrapping"
"github.com/hashicorp/go-kms-wrapping/wrappers/awskms"
"github.com/hashicorp/vault/command/server"
"github.com/hashicorp/vault/sdk/logical"
......@@ -10,9 +11,14 @@ import (
"github.com/hashicorp/vault/vault/seal"
)
func configureAWSKMSSeal(configSeal *server.Seal, infoKeys *[]string, info *map[string]string, logger hclog.Logger, inseal vault.Seal) (vault.Seal, error) {
var getAWSKMSFunc = func(opts *wrapping.WrapperOptions, config map[string]string) (wrapping.Wrapper, map[string]string, error) {
kms := awskms.NewWrapper(nil)
kmsInfo, err := kms.SetConfig(configSeal.Config)
kmsInfo, err := kms.SetConfig(config)
return kms, kmsInfo, err
}
func configureAWSKMSSeal(configSeal *server.Seal, infoKeys *[]string, info *map[string]string, logger hclog.Logger, inseal vault.Seal) (vault.Seal, error) {
kms, kmsInfo, err := getAWSKMSFunc(nil, configSeal.Config)
if err != nil {
// If the error is any other than logical.KeyNotFoundError, return the error
if !errwrap.ContainsType(err, new(logical.KeyNotFoundError)) {
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment