Commit ff940f83 authored by Scott G. Miller's avatar Scott G. Miller
Browse files

Merge branch 'master' of github.com:hashicorp/vault

parents fcc186e6 6700382a
Showing with 450 additions and 361 deletions
+450 -361
......@@ -5,6 +5,10 @@ CHANGES:
* token: Token renewals will now return token policies within the `token_policies` , identity policies within `identity_policies`, and the full policy set within `policies`. [[GH-8535](https://github.com/hashicorp/vault/pull/8535)]
* kv: Return the value of delete_version_after when reading kv/config, even if it is set to the default. [[GH-42](https://github.com/vault-plugin-secrets-kv/pull/42)]
IMPROVEMENTS:
* secrets/gcp: Support BigQuery dataset ACLs in absence of IAM endpoints [[GH-78](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/78)]
## 1.4.1 (TBD)
CHANGES:
......@@ -17,6 +21,10 @@ IMPROVEMENTS:
BUG FIXES:
* config/seal: Fix segfault when seal block is removed[[GH-8517](https://github.com/hashicorp/vault/pull/8517)]
* core: Fix blocked requests if a SIGHUP is issued during a long-running request has the state lock held.
Also fixes deadlock that can happen if `vault debug` with the config target is ran during this time.
[[GH-8755](https://github.com/hashicorp/vault/pull/8755)]
* sys/wrapping: Allow unwrapping of wrapping tokens which contain nil data [[GH-8714](https://github.com/hashicorp/vault/pull/8714)]
## 1.4.0 (April 7th, 2020)
......
......@@ -16,11 +16,13 @@ import (
"time"
"github.com/aws/aws-sdk-go/aws"
awsClient "github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/aws/aws-sdk-go/service/iam"
"github.com/fullsailor/pkcs7"
"github.com/hashicorp/errwrap"
cleanhttp "github.com/hashicorp/go-cleanhttp"
"github.com/hashicorp/go-retryablehttp"
uuid "github.com/hashicorp/go-uuid"
"github.com/hashicorp/vault/sdk/framework"
"github.com/hashicorp/vault/sdk/helper/awsutil"
......@@ -35,6 +37,10 @@ const (
iamAuthType = "iam"
ec2AuthType = "ec2"
ec2EntityType = "ec2_instance"
// Retry configuration
retryWaitMin = 500 * time.Millisecond
retryWaitMax = 30 * time.Second
)
func (b *backend) pathLogin() *framework.Path {
......@@ -1198,6 +1204,7 @@ func (b *backend) pathLoginUpdateIam(ctx context.Context, req *logical.Request,
endpoint := "https://sts.amazonaws.com"
maxRetries := awsClient.DefaultRetryerMaxNumRetries
if config != nil {
if config.IAMServerIdHeaderValue != "" {
err = validateVaultHeaderValue(headers, parsedUrl, config.IAMServerIdHeaderValue)
......@@ -1208,9 +1215,12 @@ func (b *backend) pathLoginUpdateIam(ctx context.Context, req *logical.Request,
if config.STSEndpoint != "" {
endpoint = config.STSEndpoint
}
if config.MaxRetries >= 0 {
maxRetries = config.MaxRetries
}
}
callerID, err := submitCallerIdentityRequest(method, endpoint, parsedUrl, body, headers)
callerID, err := submitCallerIdentityRequest(ctx, maxRetries, method, endpoint, parsedUrl, body, headers)
if err != nil {
return logical.ErrorResponse(fmt.Sprintf("error making upstream request: %v", err)), nil
}
......@@ -1555,18 +1565,31 @@ func parseGetCallerIdentityResponse(response string) (GetCallerIdentityResponse,
return result, err
}
func submitCallerIdentityRequest(method, endpoint string, parsedUrl *url.URL, body string, headers http.Header) (*GetCallerIdentityResult, error) {
func submitCallerIdentityRequest(ctx context.Context, maxRetries int, method, endpoint string, parsedUrl *url.URL, body string, headers http.Header) (*GetCallerIdentityResult, error) {
// NOTE: We need to ensure we're calling STS, instead of acting as an unintended network proxy
// The protection against this is that this method will only call the endpoint specified in the
// client config (defaulting to sts.amazonaws.com), so it would require a Vault admin to override
// the endpoint to talk to alternate web addresses
request := buildHttpRequest(method, endpoint, parsedUrl, body, headers)
retryableReq, err := retryablehttp.FromRequest(request)
if err != nil {
return nil, err
}
retryableReq = retryableReq.WithContext(ctx)
client := cleanhttp.DefaultClient()
client.CheckRedirect = func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
}
retryingClient := &retryablehttp.Client{
HTTPClient: client,
RetryWaitMin: retryWaitMin,
RetryWaitMax: retryWaitMax,
RetryMax: maxRetries,
CheckRetry: retryablehttp.DefaultRetryPolicy,
Backoff: retryablehttp.DefaultBackoff,
}
response, err := client.Do(request)
response, err := retryingClient.Do(retryableReq)
if err != nil {
return nil, errwrap.Wrapf("error making request: {{err}}", err)
}
......
......@@ -93,7 +93,7 @@ func (b *backend) Login(ctx context.Context, req *logical.Request, username stri
if b.Logger().IsDebug() {
b.Logger().Debug("error getting user bind DN", "error", err)
}
return nil, logical.ErrorResponse("ldap operation failed"), nil, nil
return nil, logical.ErrorResponse("ldap operation failed: unable to retrieve user bind DN"), nil, nil
}
if b.Logger().IsDebug() {
......@@ -110,7 +110,7 @@ func (b *backend) Login(ctx context.Context, req *logical.Request, username stri
if b.Logger().IsDebug() {
b.Logger().Debug("ldap bind failed", "error", err)
}
return nil, logical.ErrorResponse("ldap operation failed"), nil, nil
return nil, logical.ErrorResponse("ldap operation failed: failed to bind as user"), nil, nil
}
// We re-bind to the BindDN if it's defined because we assume
......@@ -120,7 +120,7 @@ func (b *backend) Login(ctx context.Context, req *logical.Request, username stri
if b.Logger().IsDebug() {
b.Logger().Debug("error while attempting to re-bind with the BindDN User", "error", err)
}
return nil, logical.ErrorResponse("ldap operation failed"), nil, nil
return nil, logical.ErrorResponse("ldap operation failed: failed to re-bind with the BindDN user"), nil, nil
}
if b.Logger().IsDebug() {
b.Logger().Debug("re-bound to original binddn")
......@@ -135,7 +135,7 @@ func (b *backend) Login(ctx context.Context, req *logical.Request, username stri
if cfg.AnonymousGroupSearch {
c, err = ldapClient.DialLDAP(cfg.ConfigEntry)
if err != nil {
return nil, logical.ErrorResponse("ldap operation failed"), nil, nil
return nil, logical.ErrorResponse("ldap operation failed: failed to connect to LDAP server"), nil, nil
}
defer c.Close() // Defer closing of this connection as the deferal above closes the other defined connection
}
......
// +build !enterprise
package command
import (
"context"
"encoding/base64"
"testing"
wrapping "github.com/hashicorp/go-kms-wrapping"
aeadwrapper "github.com/hashicorp/go-kms-wrapping/wrappers/aead"
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/helper/testhelpers"
sealhelper "github.com/hashicorp/vault/helper/testhelpers/seal"
"github.com/hashicorp/vault/helper/testhelpers/teststorage"
vaulthttp "github.com/hashicorp/vault/http"
"github.com/hashicorp/vault/vault"
"github.com/hashicorp/vault/vault/seal"
)
func TestSealMigration_TransitToShamir(t *testing.T) {
t.Parallel()
t.Run("inmem", func(t *testing.T) {
t.Parallel()
testSealMigrationTransitToShamir(t, teststorage.InmemBackendSetup)
})
t.Run("file", func(t *testing.T) {
t.Parallel()
testSealMigrationTransitToShamir(t, teststorage.FileBackendSetup)
})
t.Run("consul", func(t *testing.T) {
t.Parallel()
testSealMigrationTransitToShamir(t, teststorage.ConsulBackendSetup)
})
t.Run("raft", func(t *testing.T) {
t.Parallel()
testSealMigrationTransitToShamir(t, teststorage.RaftBackendSetup)
})
}
func testSealMigrationTransitToShamir(t *testing.T, setup teststorage.ClusterSetupMutator) {
// Create the transit server.
tcluster := sealhelper.NewTransitSealServer(t)
defer tcluster.Cleanup()
tcluster.MakeKey(t, "key1")
var transitSeal vault.Seal
// Create a cluster that uses transit.
conf, opts := teststorage.ClusterSetup(&vault.CoreConfig{
DisableSealWrap: true,
}, &vault.TestClusterOptions{
HandlerFunc: vaulthttp.Handler,
SkipInit: true,
NumCores: 3,
SealFunc: func() vault.Seal {
transitSeal = tcluster.MakeSeal(t, "key1")
return transitSeal
},
},
setup,
)
opts.SetupFunc = nil
cluster := vault.NewTestCluster(t, conf, opts)
cluster.Start()
defer cluster.Cleanup()
// Initialize the cluster, and fetch the recovery keys.
client := cluster.Cores[0].Client
initResp, err := client.Sys().Init(&api.InitRequest{
RecoveryShares: 5,
RecoveryThreshold: 3,
})
if err != nil {
t.Fatal(err)
}
for _, k := range initResp.RecoveryKeysB64 {
b, _ := base64.RawStdEncoding.DecodeString(k)
cluster.RecoveryKeys = append(cluster.RecoveryKeys, b)
}
testhelpers.WaitForActiveNode(t, cluster)
rootToken := initResp.RootToken
client.SetToken(rootToken)
if err := client.Sys().Seal(); err != nil {
t.Fatal(err)
}
// Create a Shamir seal.
logger := cluster.Logger.Named("shamir")
shamirSeal := vault.NewDefaultSeal(&seal.Access{
Wrapper: aeadwrapper.NewWrapper(&wrapping.WrapperOptions{
Logger: logger,
}),
})
// Transition to Shamir seal.
if err := adjustCoreForSealMigration(logger, cluster.Cores[0].Core, shamirSeal, transitSeal); err != nil {
t.Fatal(err)
}
// Unseal and migrate to Shamir.
// Although we're unsealing using the recovery keys, this is still an
// autounseal; if we stopped the transit cluster this would fail.
var resp *api.SealStatusResponse
for _, key := range initResp.RecoveryKeysB64 {
resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key})
if err == nil {
t.Fatal("expected error due to lack of migrate parameter")
}
resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key, Migrate: true})
if err != nil {
t.Fatal(err)
}
if resp == nil || !resp.Sealed {
break
}
}
if resp == nil || resp.Sealed {
t.Fatalf("expected unsealed state; got %#v", resp)
}
testhelpers.WaitForActiveNode(t, cluster)
// Seal the cluster.
if err := client.Sys().Seal(); err != nil {
t.Fatal(err)
}
// Nuke the transit server; assign nil to Cores so the deferred Cleanup
// doesn't break.
tcluster.Cleanup()
tcluster.Cores = nil
// Unseal the cluster. Now the recovery keys are actually the barrier
// unseal keys.
for _, key := range initResp.RecoveryKeysB64 {
resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key})
if err != nil {
t.Fatal(err)
}
if resp == nil || !resp.Sealed {
break
}
}
if resp == nil || resp.Sealed {
t.Fatalf("expected unsealed state; got %#v", resp)
}
// Make sure the seal configs were updated correctly.
b, r, err := cluster.Cores[0].Core.PhysicalSealConfigs(context.Background())
if err != nil {
t.Fatal(err)
}
verifyBarrierConfig(t, b, wrapping.Shamir, 5, 3, 1)
if r != nil {
t.Fatalf("expected nil recovery config, got: %#v", r)
}
}
......@@ -6,6 +6,7 @@ import (
"testing"
wrapping "github.com/hashicorp/go-kms-wrapping"
aeadwrapper "github.com/hashicorp/go-kms-wrapping/wrappers/aead"
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/helper/testhelpers"
sealhelper "github.com/hashicorp/vault/helper/testhelpers/seal"
......@@ -289,29 +290,6 @@ func testSealMigrationShamirToTestSeal(t *testing.T, setup teststorage.ClusterSe
t.Fatal(err)
}
////// Seal the transit cluster; we expect the unseal of our main cluster
////// to fail as a result.
////tcluster.EnsureCoresSealed(t)
////// Verify that we cannot unseal. Now the barrier unseal keys are actually
////// the recovery keys.
////for _, key := range initResp.KeysB64 {
//// resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key})
//// if err != nil {
//// break
//// }
//// if resp == nil || !resp.Sealed {
//// break
//// }
////}
////if err == nil || resp != nil {
//// t.Fatalf("expected sealed state; got %#v", resp)
////}
////// Unseal the transit server; we expect the unseal to work now on our main
////// cluster.
////tcluster.UnsealCores(t)
// Verify that we can unseal.
for _, key := range initResp.KeysB64 {
resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key})
......@@ -342,27 +320,31 @@ func TestSealMigration_TransitToTestSeal(t *testing.T) {
testSealMigrationTransitToTestSeal(t, teststorage.InmemBackendSetup)
})
//t.Run("file", func(t *testing.T) {
// t.Parallel()
// testSealMigrationTransitToTestSeal(t, teststorage.FileBackendSetup)
//})
t.Run("file", func(t *testing.T) {
t.Parallel()
testSealMigrationTransitToTestSeal(t, teststorage.FileBackendSetup)
})
//t.Run("consul", func(t *testing.T) {
// t.Parallel()
// testSealMigrationTransitToTestSeal(t, teststorage.ConsulBackendSetup)
//})
t.Run("consul", func(t *testing.T) {
t.Parallel()
testSealMigrationTransitToTestSeal(t, teststorage.ConsulBackendSetup)
})
//t.Run("raft", func(t *testing.T) {
// t.Parallel()
// testSealMigrationTransitToTestSeal(t, teststorage.RaftBackendSetup)
//})
t.Run("raft", func(t *testing.T) {
t.Parallel()
testSealMigrationTransitToTestSeal(t, teststorage.RaftBackendSetup)
})
}
func testSealMigrationTransitToTestSeal(t *testing.T, setup teststorage.ClusterSetupMutator) {
// Create the transit server.
tcluster := sealhelper.NewTransitSealServer(t)
defer tcluster.Cleanup()
defer func() {
if tcluster != nil {
tcluster.Cleanup()
}
}()
tcluster.MakeKey(t, "key1")
var transitSeal vault.Seal
......@@ -441,11 +423,6 @@ func testSealMigrationTransitToTestSeal(t *testing.T, setup teststorage.ClusterS
t.Fatal(err)
}
// Nuke the transit server; assign nil to Cores so the deferred Cleanup
// doesn't break.
tcluster.Cleanup()
tcluster.Cores = nil
// Unseal the cluster. Now the recovery keys are actually the barrier
// unseal keys.
for _, key := range initResp.RecoveryKeysB64 {
......@@ -460,6 +437,7 @@ func testSealMigrationTransitToTestSeal(t *testing.T, setup teststorage.ClusterS
if resp == nil || resp.Sealed {
t.Fatalf("expected unsealed state; got %#v", resp)
}
testhelpers.WaitForActiveNode(t, cluster)
// Make sure the seal configs were updated correctly.
b, r, err := cluster.Cores[0].Core.PhysicalSealConfigs(context.Background())
......@@ -468,6 +446,190 @@ func testSealMigrationTransitToTestSeal(t *testing.T, setup teststorage.ClusterS
}
verifyBarrierConfig(t, b, wrapping.Test, 1, 1, 1)
verifyBarrierConfig(t, r, wrapping.Shamir, 5, 3, 0)
// Now that migration is done, we can stop the transit cluster, since we
// can seal/unseal without it.
tcluster.Cleanup()
tcluster = nil
if err := client.Sys().Seal(); err != nil {
t.Fatal(err)
}
for _, key := range initResp.RecoveryKeysB64 {
resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key})
if err != nil {
t.Fatal(err)
}
if resp == nil || !resp.Sealed {
break
}
}
if resp == nil || resp.Sealed {
t.Fatalf("expected unsealed state; got %#v", resp)
}
}
func TestSealMigration_TransitToShamir(t *testing.T) {
t.Parallel()
t.Run("inmem", func(t *testing.T) {
t.Parallel()
testSealMigrationTransitToShamir(t, teststorage.InmemBackendSetup)
})
t.Run("file", func(t *testing.T) {
t.Parallel()
testSealMigrationTransitToShamir(t, teststorage.FileBackendSetup)
})
t.Run("consul", func(t *testing.T) {
t.Parallel()
testSealMigrationTransitToShamir(t, teststorage.ConsulBackendSetup)
})
t.Run("raft", func(t *testing.T) {
t.Parallel()
testSealMigrationTransitToShamir(t, teststorage.RaftBackendSetup)
})
}
func testSealMigrationTransitToShamir(t *testing.T, setup teststorage.ClusterSetupMutator) {
// Create the transit server.
tcluster := sealhelper.NewTransitSealServer(t)
defer func() {
if tcluster != nil {
tcluster.Cleanup()
}
}()
tcluster.MakeKey(t, "key1")
var transitSeal vault.Seal
// Create a cluster that uses transit.
conf, opts := teststorage.ClusterSetup(&vault.CoreConfig{
DisableSealWrap: true,
}, &vault.TestClusterOptions{
HandlerFunc: vaulthttp.Handler,
SkipInit: true,
NumCores: 3,
SealFunc: func() vault.Seal {
transitSeal = tcluster.MakeSeal(t, "key1")
return transitSeal
},
},
setup,
)
opts.SetupFunc = nil
cluster := vault.NewTestCluster(t, conf, opts)
cluster.Start()
defer cluster.Cleanup()
// Initialize the cluster, and fetch the recovery keys.
client := cluster.Cores[0].Client
initResp, err := client.Sys().Init(&api.InitRequest{
RecoveryShares: 5,
RecoveryThreshold: 3,
})
if err != nil {
t.Fatal(err)
}
for _, k := range initResp.RecoveryKeysB64 {
b, _ := base64.RawStdEncoding.DecodeString(k)
cluster.RecoveryKeys = append(cluster.RecoveryKeys, b)
}
testhelpers.WaitForActiveNode(t, cluster)
rootToken := initResp.RootToken
client.SetToken(rootToken)
if err := client.Sys().Seal(); err != nil {
t.Fatal(err)
}
// Create a Shamir seal.
logger := cluster.Logger.Named("shamir")
shamirSeal := vault.NewDefaultSeal(&vaultseal.Access{
Wrapper: aeadwrapper.NewWrapper(&wrapping.WrapperOptions{
Logger: logger,
}),
})
// Transition to Shamir seal.
if err := adjustCoreForSealMigration(logger, cluster.Cores[0].Core, shamirSeal, transitSeal); err != nil {
t.Fatal(err)
}
// Unseal and migrate to Shamir.
// Although we're unsealing using the recovery keys, this is still an
// autounseal; if we stopped the transit cluster this would fail.
var resp *api.SealStatusResponse
for _, key := range initResp.RecoveryKeysB64 {
resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key})
if err == nil {
t.Fatal("expected error due to lack of migrate parameter")
}
resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key, Migrate: true})
if err != nil {
t.Fatal(err)
}
if resp == nil || !resp.Sealed {
break
}
}
if resp == nil || resp.Sealed {
t.Fatalf("expected unsealed state; got %#v", resp)
}
testhelpers.WaitForActiveNode(t, cluster)
// Seal the cluster.
if err := client.Sys().Seal(); err != nil {
t.Fatal(err)
}
// Unseal the cluster. Now the recovery keys are actually the barrier
// unseal keys.
for _, key := range initResp.RecoveryKeysB64 {
resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key})
if err != nil {
t.Fatal(err)
}
if resp == nil || !resp.Sealed {
break
}
}
if resp == nil || resp.Sealed {
t.Fatalf("expected unsealed state; got %#v", resp)
}
testhelpers.WaitForActiveNode(t, cluster)
// Make sure the seal configs were updated correctly.
b, r, err := cluster.Cores[0].Core.PhysicalSealConfigs(context.Background())
if err != nil {
t.Fatal(err)
}
verifyBarrierConfig(t, b, wrapping.Shamir, 5, 3, 1)
if r != nil {
t.Fatalf("expected nil recovery config, got: %#v", r)
}
// Now that migration is done, we can stop the transit cluster, since we
// can seal/unseal without it.
tcluster.Cleanup()
tcluster = nil
if err := client.Sys().Seal(); err != nil {
t.Fatal(err)
}
for _, key := range initResp.RecoveryKeysB64 {
resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key})
if err != nil {
t.Fatal(err)
}
if resp == nil || !resp.Sealed {
break
}
}
if resp == nil || resp.Sealed {
t.Fatalf("expected unsealed state; got %#v", resp)
}
}
/*
......
......@@ -16,7 +16,6 @@ import (
)
var (
onEnterprise = false
createSecureRandomReaderFunc = createSecureRandomReader
adjustCoreConfigForEnt = adjustCoreConfigForEntNoop
)
......@@ -62,10 +61,6 @@ func adjustCoreForSealMigration(logger log.Logger, core *vault.Core, barrierSeal
return errors.New(`Recovery seal configuration not found for existing seal`)
}
if onEnterprise && barrierSeal.BarrierType() == wrapping.Shamir {
return errors.New("Migrating from autoseal to Shamir seal is not currently supported on Vault Enterprise")
}
var migrationSeal vault.Seal
var newSeal vault.Seal
......@@ -103,17 +98,17 @@ func adjustCoreForSealMigration(logger log.Logger, core *vault.Core, barrierSeal
// Set the appropriate barrier and recovery configs.
switch {
case migrationSeal.RecoveryKeySupported() && newSeal.RecoveryKeySupported():
case migrationSeal != nil && newSeal != nil && migrationSeal.RecoveryKeySupported() && newSeal.RecoveryKeySupported():
// Migrating from auto->auto, copy the configs over
newSeal.SetCachedBarrierConfig(existBarrierSealConfig)
newSeal.SetCachedRecoveryConfig(existRecoverySealConfig)
case migrationSeal.RecoveryKeySupported():
case migrationSeal != nil && newSeal != nil && migrationSeal.RecoveryKeySupported():
// Migrating from auto->shamir, clone auto's recovery config and set
// stored keys to 1.
newSealConfig := existRecoverySealConfig.Clone()
newSealConfig.StoredShares = 1
newSeal.SetCachedBarrierConfig(newSealConfig)
case newSeal.RecoveryKeySupported():
case newSeal != nil && newSeal.RecoveryKeySupported():
// Migrating from shamir->auto, set a new barrier config and set
// recovery config to a clone of shamir's barrier config with stored
// keys set to 0.
......
......@@ -14,7 +14,6 @@ require (
github.com/NYTimes/gziphandler v1.1.1
github.com/SAP/go-hdb v0.14.1
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 // indirect
github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af // indirect
github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190620160927-9418d7b0cd0f
github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5
github.com/apple/foundationdb/bindings/go v0.0.0-20190411004307-cd5c9d91fad2
......@@ -26,7 +25,6 @@ require (
github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932 // indirect
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 // indirect
github.com/chrismalek/oktasdk-go v0.0.0-20181212195951-3430665dfaa0
github.com/cockroachdb/apd v1.1.0 // indirect
github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c
github.com/coreos/go-semver v0.2.0
github.com/denisenkom/go-mssqldb v0.0.0-20190412130859-3b1d194e553a
......@@ -69,34 +67,32 @@ require (
github.com/hashicorp/nomad/api v0.0.0-20191220223628-edc62acd919d
github.com/hashicorp/raft v1.1.2-0.20191002163536-9c6bd3e3eb17
github.com/hashicorp/raft-snapshot v1.0.2-0.20190827162939-8117efcc5aab
github.com/hashicorp/vault-plugin-auth-alicloud v0.5.4-beta1
github.com/hashicorp/vault-plugin-auth-azure v0.5.4-beta1
github.com/hashicorp/vault-plugin-auth-centrify v0.5.4-beta1
github.com/hashicorp/vault-plugin-auth-cf v0.5.3-beta1
github.com/hashicorp/vault-plugin-auth-gcp v0.6.0-beta1
github.com/hashicorp/vault-plugin-auth-jwt v0.6.0-beta1
github.com/hashicorp/vault-plugin-auth-kerberos v0.1.4-beta1
github.com/hashicorp/vault-plugin-auth-kubernetes v0.6.0-beta1
github.com/hashicorp/vault-plugin-auth-oci v0.5.3-beta1
github.com/hashicorp/vault-plugin-database-elasticsearch v0.5.3-beta1
github.com/hashicorp/vault-plugin-database-mongodbatlas v0.1.0-beta1
github.com/hashicorp/vault-plugin-secrets-ad v0.6.4-beta1
github.com/hashicorp/vault-plugin-secrets-alicloud v0.5.4-beta1
github.com/hashicorp/vault-plugin-secrets-azure v0.5.5-beta1
github.com/hashicorp/vault-plugin-secrets-gcp v0.6.0-beta1
github.com/hashicorp/vault-plugin-secrets-gcpkms v0.5.4-beta1
github.com/hashicorp/vault-plugin-secrets-kv v0.5.4-beta1
github.com/hashicorp/vault-plugin-secrets-mongodbatlas v0.1.1
github.com/hashicorp/vault-plugin-secrets-openldap v0.1.0-beta1.0.20200306174116-e7553b03b931
github.com/hashicorp/vault/api v1.0.5-0.20200215224050-f6547fa8e820
github.com/hashicorp/vault/sdk v0.1.14-0.20200305172021-03a3749f220d
github.com/hashicorp/vault-plugin-auth-alicloud v0.5.5
github.com/hashicorp/vault-plugin-auth-azure v0.5.5
github.com/hashicorp/vault-plugin-auth-centrify v0.5.5
github.com/hashicorp/vault-plugin-auth-cf v0.5.4
github.com/hashicorp/vault-plugin-auth-gcp v0.6.1
github.com/hashicorp/vault-plugin-auth-jwt v0.6.2
github.com/hashicorp/vault-plugin-auth-kerberos v0.1.5
github.com/hashicorp/vault-plugin-auth-kubernetes v0.6.1
github.com/hashicorp/vault-plugin-auth-oci v0.5.4
github.com/hashicorp/vault-plugin-database-elasticsearch v0.5.4
github.com/hashicorp/vault-plugin-database-mongodbatlas v0.1.1
github.com/hashicorp/vault-plugin-secrets-ad v0.6.5
github.com/hashicorp/vault-plugin-secrets-alicloud v0.5.5
github.com/hashicorp/vault-plugin-secrets-azure v0.5.6
github.com/hashicorp/vault-plugin-secrets-gcp v0.6.1
github.com/hashicorp/vault-plugin-secrets-gcpkms v0.5.5
github.com/hashicorp/vault-plugin-secrets-kv v0.5.5
github.com/hashicorp/vault-plugin-secrets-mongodbatlas v0.1.2
github.com/hashicorp/vault-plugin-secrets-openldap v0.1.2
github.com/hashicorp/vault/api v1.0.5-0.20200317185738-82f498082f02
github.com/hashicorp/vault/sdk v0.1.14-0.20200317185738-82f498082f02
github.com/influxdata/influxdb v0.0.0-20190411212539-d24b7ba8c4c4
github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 // indirect
github.com/jackc/pgx v3.3.0+incompatible // indirect
github.com/jcmturner/gokrb5/v8 v8.0.0
github.com/jefferai/isbadcipher v0.0.0-20190226160619-51d2077c035f
github.com/jefferai/jsonx v1.0.0
github.com/joyent/triton-go v0.0.0-20190112182421-51ffac552869
github.com/joyent/triton-go v1.7.1-0.20200416154420-6801d15b779f
github.com/keybase/go-crypto v0.0.0-20190403132359-d65b6b94177f
github.com/kr/pretty v0.1.0
github.com/kr/text v0.1.0
......@@ -129,7 +125,6 @@ require (
github.com/sasha-s/go-deadlock v0.2.0
github.com/shirou/gopsutil v2.19.9+incompatible
github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4 // indirect
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24 // indirect
github.com/stretchr/testify v1.4.0
github.com/tidwall/pretty v1.0.0 // indirect
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c // indirect
......
This diff is collapsed.
......@@ -68,12 +68,12 @@ vault secrets enable ssh
vault secrets enable totp
vault secrets enable transit
# Enterprise backends
VERSION=$(vault status -format=json | jq -r .version)
if [[ $VERSION =~ prem|ent ]]
# Enable enterprise features
if [[ ! -z "$VAULT_LICENSE" ]]
then
vault write sys/license text="$VAULT_LICENSE"
vault secrets enable kmip
vault secrets enable transform
fi
# Output OpenAPI, optionally formatted
......
......@@ -9,6 +9,6 @@ var (
CgoEnabled bool
Version = "1.4.0"
VersionPrerelease = "beta1"
VersionPrerelease = ""
VersionMetadata = ""
)
......@@ -113,6 +113,12 @@ export default Component.extend({
this.send('setAndBroadcast', path, valueToSet);
},
setAndBroadcastTtl(path, value) {
const alwaysSendValue = path === 'expiry' || path === 'safetyBuffer';
let valueToSet = value.enabled === true || alwaysSendValue ? `${value.seconds}s` : undefined;
this.send('setAndBroadcast', path, `${valueToSet}`);
},
codemirrorUpdated(path, isString, value, codemirror) {
codemirror.performLint();
const hasErrors = codemirror.state.lint.marked.length > 0;
......
......@@ -15,12 +15,15 @@
* @param time=30 {Number} - The time (in the default units) which will be adjustable by the user of the form
* @param unit="s" {String} - This is the unit key which will show by default on the form. Can be one of `s` (seconds), `m` (minutes), `h` (hours), `d` (days)
* @param recalculationTimeout=5000 {Number} - This is the time, in milliseconds, that `recalculateSeconds` will be be true after time is updated
* @param initialValue {String} - This is the value set initially (particularly from a string like '30h')
*/
import Ember from 'ember';
import Component from '@ember/component';
import { computed } from '@ember/object';
import { task, timeout } from 'ember-concurrency';
import { typeOf } from '@ember/utils';
import Duration from 'Duration.js';
import layout from '../templates/components/ttl-picker2';
const secondsMap = {
......@@ -43,8 +46,34 @@ export default Component.extend({
helperTextDisabled: 'Allow tokens to be used indefinitely',
helperTextEnabled: 'Disable the use of the token after',
time: 30,
unit: 'm',
unit: 's',
recalculationTimeout: 5000,
initialValue: null,
init() {
this._super(...arguments);
const value = this.initialValue;
// if initial value is unset use params passed in as defaults
if (!value && value !== 0) {
return;
}
let seconds = 30;
if (typeOf(value) === 'number') {
seconds = value;
} else {
try {
seconds = Duration.parse(value).seconds();
} catch (e) {
console.error(e);
// if parsing fails leave as default 30
}
}
this.setProperties({
time: seconds,
unit: 's',
});
},
unitOptions: computed(function() {
return [
{ label: 'seconds', value: 's' },
......@@ -53,16 +82,15 @@ export default Component.extend({
{ label: 'days', value: 'd' },
];
}),
TTL: computed('enableTTL', 'seconds', function() {
handleChange() {
let { time, unit, enableTTL, seconds } = this.getProperties('time', 'unit', 'enableTTL', 'seconds');
return {
const ttl = {
enabled: enableTTL,
seconds,
timeString: time + unit,
};
}),
this.onChange(ttl);
},
updateTime: task(function*(newTime) {
this.set('errorMessage', '');
let parsedTime;
......@@ -75,7 +103,7 @@ export default Component.extend({
return;
}
this.set('time', parsedTime);
this.onChange(this.TTL);
this.handleChange();
if (Ember.testing) {
return;
}
......@@ -107,11 +135,11 @@ export default Component.extend({
} else {
this.recalculateTime(newUnit);
}
this.onChange(this.TTL);
this.handleChange();
},
toggleEnabled() {
this.toggleProperty('enableTTL');
this.onChange(this.TTL);
this.handleChange();
},
},
});
......@@ -96,14 +96,13 @@
label=labelString
}}
{{else if (eq attr.options.editType "ttl")}}
{{ttl-picker
data-test-input=attr.name
initialValue=(or (get model valuePath) attr.options.defaultValue)
labelText=labelString
warning=attr.options.warning
setDefaultValue=(or (get model valuePath) attr.options.setDefault false)
onChange=(action (action "setAndBroadcast" valuePath))
}}
<TtlPicker2
@onChange={{action (action "setAndBroadcastTtl" valuePath)}}
@label={{labelString}}
@helperTextDisabled={{or attr.helpText "Vault will use the default lease duration"}}
@helperTextEnabled="Lease will expire after"
@initialValue={{or (get model valuePath) attr.options.setDefault}}
/>
{{else if (eq attr.options.editType "stringArray")}}
{{string-list
data-test-input=attr.name
......
......@@ -10,11 +10,11 @@
<span class="has-text-grey">{{helperText}}</span>
</Toggle>
{{#if enableTTL}}
<div class="ttl-show-picker">
<div class="ttl-show-picker" data-test-ttl-picker-group="{{label}}">
<div class="field is-grouped is-marginless">
<div class="control is-marginless">
<input
data-test-ttl-value
data-test-ttl-value="{{label}}"
value={{time}}
id="time-{{elementId}}"
type="text"
......@@ -26,7 +26,7 @@
</div>
<div class="control">
<Select
data-test-ttl-unit
data-test-ttl-unit="{{label}}"
@name='ttl-unit'
@options={{unitOptions}}
@onChange={{action 'updateUnit'}}
......
......@@ -31,8 +31,9 @@ module('Acceptance | settings', function(hooks) {
.next()
.path(path)
.toggleOptions()
.defaultTTLVal(100)
.enableDefaultTtl()
.defaultTTLUnit('s')
.defaultTTLVal(100)
.submit();
assert.ok(
find('[data-test-flash-message]').textContent.trim(),
......
......@@ -17,9 +17,9 @@ module('Acceptance | settings/configure/secrets/pki/crl', function(hooks) {
await enablePage.enable('pki', path);
await page.visit({ backend: path, section: 'crl' });
assert.equal(currentRouteName(), 'vault.cluster.settings.configure-secret-backend.section');
await page.form.fillInValue(3);
await page.form.enableTtl();
await page.form.fillInUnit('h');
await page.form.fillInValue(3);
await page.form.submit();
assert.equal(page.lastMessage, 'The crl config for this backend has been updated.');
});
......
......@@ -12,7 +12,7 @@ module('Acceptance | settings/mount-secret-backend', function(hooks) {
return authPage.login();
});
test('it sets the ttl corrects when mounting', async function(assert) {
test('it sets the ttl correctly when mounting', async function(assert) {
// always force the new mount to the top of the list
const path = `kv-${new Date().getTime()}`;
const defaultTTLHours = 100;
......@@ -24,17 +24,17 @@ module('Acceptance | settings/mount-secret-backend', function(hooks) {
assert.equal(currentRouteName(), 'vault.cluster.settings.mount-secret-backend');
await page.selectType('kv');
await page
.next()
.path(path)
.toggleOptions()
.defaultTTLVal(defaultTTLHours)
.enableDefaultTtl()
.defaultTTLUnit('h')
.maxTTLVal(maxTTLHours)
.defaultTTLVal(defaultTTLHours)
.enableMaxTtl()
.maxTTLUnit('h')
.maxTTLVal(maxTTLHours)
.submit();
await configPage.visit({ backend: path });
assert.equal(configPage.defaultTTL, defaultTTLSeconds, 'shows the proper TTL');
assert.equal(configPage.maxTTL, maxTTLSeconds, 'shows the proper max TTL');
......
......@@ -107,14 +107,15 @@ module('Integration | Component | form field', function(hooks) {
test('it renders: editType ttl', async function(assert) {
let [model, spy] = await setup.call(this, createAttr('foo', null, { editType: 'ttl' }));
assert.ok(component.hasTTLPicker, 'renders the ttl-picker component');
await component.fields.objectAt(0).input('3');
await component.fields.objectAt(0).toggleTtl();
await component.fields
.objectAt(0)
.select('h')
.change();
assert.equal(model.get('foo'), '3h');
assert.ok(spy.calledWith('foo', '3h'), 'onChange called with correct args');
await component.fields.objectAt(0).ttlTime('3');
const expectedSeconds = `${3 * 3600}s`;
assert.equal(model.get('foo'), expectedSeconds);
assert.ok(spy.calledWith('foo', expectedSeconds), 'onChange called with correct args');
});
test('it renders: editType stringArray', async function(assert) {
......
......@@ -115,4 +115,20 @@ module('Integration | Component | ttl-picker2', function(hooks) {
'Seconds value is recalculated based on time and unit'
);
});
test('it sets default value to seconds of parsed value when set', async function(assert) {
let changeSpy = sinon.spy();
this.set('onChange', changeSpy);
await render(hbs`
<TtlPicker2
@onChange={{onChange}}
@initialValue="2h"
@enableTTL={{true}}
@time=4
@unit="d"
/>
`);
assert.dom('[data-test-ttl-value]').hasValue('7200', 'time value is initialValue as seconds');
assert.dom('[data-test-select="ttl-unit"]').hasValue('s', 'unit is seconds');
});
});
......@@ -9,6 +9,7 @@ export default {
hasTitle: isPresent('[data-test-title]'),
hasError: isPresent('[data-test-error]'),
submit: clickable('[data-test-submit]'),
enableTtl: clickable('[data-test-toggle-input]'),
fillInValue: fillable('[data-test-ttl-value]'),
fillInUnit: fillable('[data-test-select="ttl-unit"]'),
};
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment