Commit 429ce5cc authored by Mahmood Ali's avatar Mahmood Ali
Browse files

ran 'go mod vendor'

parent b8623b37
Showing with 4003 additions and 0 deletions
+4003 -0
This diff is collapsed.
// +build !consulent
package acl
// enterprisePolicyAuthorizer stub
type enterprisePolicyAuthorizer struct{}
func (authz *enterprisePolicyAuthorizer) init(*Config) {
// nothing to do
}
func (authz *enterprisePolicyAuthorizer) enforce(_ *EnterpriseRule, _ *AuthorizerContext) EnforcementDecision {
return Default
}
// NewPolicyAuthorizer merges the policies and returns an Authorizer that will enforce them
func NewPolicyAuthorizer(policies []*Policy, entConfig *Config) (Authorizer, error) {
return newPolicyAuthorizer(policies, entConfig)
}
// NewPolicyAuthorizerWithDefaults will actually created a ChainedAuthorizer with
// the policies compiled into one Authorizer and the backup policy of the defaultAuthz
func NewPolicyAuthorizerWithDefaults(defaultAuthz Authorizer, policies []*Policy, entConfig *Config) (Authorizer, error) {
authz, err := newPolicyAuthorizer(policies, entConfig)
if err != nil {
return nil, err
}
return NewChainedAuthorizer([]Authorizer{authz, defaultAuthz}), nil
}
package acl
import (
"encoding/binary"
"fmt"
"hash"
"golang.org/x/crypto/blake2b"
)
type policyRulesMergeContext struct {
aclRule string
agentRules map[string]*AgentRule
agentPrefixRules map[string]*AgentRule
eventRules map[string]*EventRule
eventPrefixRules map[string]*EventRule
keyringRule string
keyRules map[string]*KeyRule
keyPrefixRules map[string]*KeyRule
nodeRules map[string]*NodeRule
nodePrefixRules map[string]*NodeRule
operatorRule string
preparedQueryRules map[string]*PreparedQueryRule
preparedQueryPrefixRules map[string]*PreparedQueryRule
serviceRules map[string]*ServiceRule
servicePrefixRules map[string]*ServiceRule
sessionRules map[string]*SessionRule
sessionPrefixRules map[string]*SessionRule
}
func (p *policyRulesMergeContext) init() {
p.aclRule = ""
p.agentRules = make(map[string]*AgentRule)
p.agentPrefixRules = make(map[string]*AgentRule)
p.eventRules = make(map[string]*EventRule)
p.eventPrefixRules = make(map[string]*EventRule)
p.keyringRule = ""
p.keyRules = make(map[string]*KeyRule)
p.keyPrefixRules = make(map[string]*KeyRule)
p.nodeRules = make(map[string]*NodeRule)
p.nodePrefixRules = make(map[string]*NodeRule)
p.operatorRule = ""
p.preparedQueryRules = make(map[string]*PreparedQueryRule)
p.preparedQueryPrefixRules = make(map[string]*PreparedQueryRule)
p.serviceRules = make(map[string]*ServiceRule)
p.servicePrefixRules = make(map[string]*ServiceRule)
p.sessionRules = make(map[string]*SessionRule)
p.sessionPrefixRules = make(map[string]*SessionRule)
}
func (p *policyRulesMergeContext) merge(policy *PolicyRules) {
if takesPrecedenceOver(policy.ACL, p.aclRule) {
p.aclRule = policy.ACL
}
for _, ap := range policy.Agents {
update := true
if permission, found := p.agentRules[ap.Node]; found {
update = takesPrecedenceOver(ap.Policy, permission.Policy)
}
if update {
p.agentRules[ap.Node] = ap
}
}
for _, ap := range policy.AgentPrefixes {
update := true
if permission, found := p.agentPrefixRules[ap.Node]; found {
update = takesPrecedenceOver(ap.Policy, permission.Policy)
}
if update {
p.agentPrefixRules[ap.Node] = ap
}
}
for _, ep := range policy.Events {
update := true
if permission, found := p.eventRules[ep.Event]; found {
update = takesPrecedenceOver(ep.Policy, permission.Policy)
}
if update {
p.eventRules[ep.Event] = ep
}
}
for _, ep := range policy.EventPrefixes {
update := true
if permission, found := p.eventPrefixRules[ep.Event]; found {
update = takesPrecedenceOver(ep.Policy, permission.Policy)
}
if update {
p.eventPrefixRules[ep.Event] = ep
}
}
if takesPrecedenceOver(policy.Keyring, p.keyringRule) {
p.keyringRule = policy.Keyring
}
for _, kp := range policy.Keys {
update := true
if permission, found := p.keyRules[kp.Prefix]; found {
update = takesPrecedenceOver(kp.Policy, permission.Policy)
}
if update {
p.keyRules[kp.Prefix] = kp
}
}
for _, kp := range policy.KeyPrefixes {
update := true
if permission, found := p.keyPrefixRules[kp.Prefix]; found {
update = takesPrecedenceOver(kp.Policy, permission.Policy)
}
if update {
p.keyPrefixRules[kp.Prefix] = kp
}
}
for _, np := range policy.Nodes {
update := true
if permission, found := p.nodeRules[np.Name]; found {
update = takesPrecedenceOver(np.Policy, permission.Policy)
}
if update {
p.nodeRules[np.Name] = np
}
}
for _, np := range policy.NodePrefixes {
update := true
if permission, found := p.nodePrefixRules[np.Name]; found {
update = takesPrecedenceOver(np.Policy, permission.Policy)
}
if update {
p.nodePrefixRules[np.Name] = np
}
}
if takesPrecedenceOver(policy.Operator, p.operatorRule) {
p.operatorRule = policy.Operator
}
for _, qp := range policy.PreparedQueries {
update := true
if permission, found := p.preparedQueryRules[qp.Prefix]; found {
update = takesPrecedenceOver(qp.Policy, permission.Policy)
}
if update {
p.preparedQueryRules[qp.Prefix] = qp
}
}
for _, qp := range policy.PreparedQueryPrefixes {
update := true
if permission, found := p.preparedQueryPrefixRules[qp.Prefix]; found {
update = takesPrecedenceOver(qp.Policy, permission.Policy)
}
if update {
p.preparedQueryPrefixRules[qp.Prefix] = qp
}
}
for _, sp := range policy.Services {
existing, found := p.serviceRules[sp.Name]
if !found {
p.serviceRules[sp.Name] = sp
continue
}
if takesPrecedenceOver(sp.Policy, existing.Policy) {
existing.Policy = sp.Policy
existing.EnterpriseRule = sp.EnterpriseRule
}
if takesPrecedenceOver(sp.Intentions, existing.Intentions) {
existing.Intentions = sp.Intentions
}
}
for _, sp := range policy.ServicePrefixes {
existing, found := p.servicePrefixRules[sp.Name]
if !found {
p.servicePrefixRules[sp.Name] = sp
continue
}
if takesPrecedenceOver(sp.Policy, existing.Policy) {
existing.Policy = sp.Policy
existing.EnterpriseRule = sp.EnterpriseRule
}
if takesPrecedenceOver(sp.Intentions, existing.Intentions) {
existing.Intentions = sp.Intentions
}
}
for _, sp := range policy.Sessions {
update := true
if permission, found := p.sessionRules[sp.Node]; found {
update = takesPrecedenceOver(sp.Policy, permission.Policy)
}
if update {
p.sessionRules[sp.Node] = sp
}
}
for _, sp := range policy.SessionPrefixes {
update := true
if permission, found := p.sessionPrefixRules[sp.Node]; found {
update = takesPrecedenceOver(sp.Policy, permission.Policy)
}
if update {
p.sessionPrefixRules[sp.Node] = sp
}
}
}
func (p *policyRulesMergeContext) update(merged *PolicyRules) {
merged.ACL = p.aclRule
merged.Keyring = p.keyringRule
merged.Operator = p.operatorRule
// All the for loop appends are ugly but Go doesn't have a way to get
// a slice of all values within a map so this is necessary
merged.Agents = []*AgentRule{}
for _, policy := range p.agentRules {
merged.Agents = append(merged.Agents, policy)
}
merged.AgentPrefixes = []*AgentRule{}
for _, policy := range p.agentPrefixRules {
merged.AgentPrefixes = append(merged.AgentPrefixes, policy)
}
merged.Events = []*EventRule{}
for _, policy := range p.eventRules {
merged.Events = append(merged.Events, policy)
}
merged.EventPrefixes = []*EventRule{}
for _, policy := range p.eventPrefixRules {
merged.EventPrefixes = append(merged.EventPrefixes, policy)
}
merged.Keys = []*KeyRule{}
for _, policy := range p.keyRules {
merged.Keys = append(merged.Keys, policy)
}
merged.KeyPrefixes = []*KeyRule{}
for _, policy := range p.keyPrefixRules {
merged.KeyPrefixes = append(merged.KeyPrefixes, policy)
}
merged.Nodes = []*NodeRule{}
for _, policy := range p.nodeRules {
merged.Nodes = append(merged.Nodes, policy)
}
merged.NodePrefixes = []*NodeRule{}
for _, policy := range p.nodePrefixRules {
merged.NodePrefixes = append(merged.NodePrefixes, policy)
}
merged.PreparedQueries = []*PreparedQueryRule{}
for _, policy := range p.preparedQueryRules {
merged.PreparedQueries = append(merged.PreparedQueries, policy)
}
merged.PreparedQueryPrefixes = []*PreparedQueryRule{}
for _, policy := range p.preparedQueryPrefixRules {
merged.PreparedQueryPrefixes = append(merged.PreparedQueryPrefixes, policy)
}
merged.Services = []*ServiceRule{}
for _, policy := range p.serviceRules {
merged.Services = append(merged.Services, policy)
}
merged.ServicePrefixes = []*ServiceRule{}
for _, policy := range p.servicePrefixRules {
merged.ServicePrefixes = append(merged.ServicePrefixes, policy)
}
merged.Sessions = []*SessionRule{}
for _, policy := range p.sessionRules {
merged.Sessions = append(merged.Sessions, policy)
}
merged.SessionPrefixes = []*SessionRule{}
for _, policy := range p.sessionPrefixRules {
merged.SessionPrefixes = append(merged.SessionPrefixes, policy)
}
}
type PolicyMerger struct {
idHasher hash.Hash
policyRulesMergeContext
enterprisePolicyRulesMergeContext
}
func NewPolicyMerger() *PolicyMerger {
merger := &PolicyMerger{}
merger.init()
return merger
}
func (m *PolicyMerger) init() {
var err error
m.idHasher, err = blake2b.New256(nil)
if err != nil {
panic(err)
}
m.policyRulesMergeContext.init()
m.enterprisePolicyRulesMergeContext.init()
}
func (m *PolicyMerger) Merge(policy *Policy) {
// This is part of calculating the merged policies ID
m.idHasher.Write([]byte(policy.ID))
binary.Write(m.idHasher, binary.BigEndian, policy.Revision)
m.policyRulesMergeContext.merge(&policy.PolicyRules)
m.enterprisePolicyRulesMergeContext.merge(&policy.EnterprisePolicyRules)
}
// Policy outputs the merged policy
func (m *PolicyMerger) Policy() *Policy {
merged := &Policy{
ID: fmt.Sprintf("%x", m.idHasher.Sum(nil)),
}
m.policyRulesMergeContext.update(&merged.PolicyRules)
m.enterprisePolicyRulesMergeContext.update(&merged.EnterprisePolicyRules)
return merged
}
func MergePolicies(policies []*Policy) *Policy {
var merger PolicyMerger
merger.init()
for _, p := range policies {
merger.Merge(p)
}
return merger.Policy()
}
// +build !consulent
package acl
type enterprisePolicyRulesMergeContext struct{}
func (ctx *enterprisePolicyRulesMergeContext) init() {
// do nothing
}
func (ctx *enterprisePolicyRulesMergeContext) merge(*EnterprisePolicyRules) {
// do nothing
}
func (ctx *enterprisePolicyRulesMergeContext) update(*EnterprisePolicyRules) {
// do nothing
}
// +build !consulent
package acl
import (
"fmt"
"github.com/hashicorp/hcl"
)
// EnterprisePolicyMeta stub
type EnterprisePolicyMeta struct{}
// EnterpriseRule stub
type EnterpriseRule struct{}
func (r *EnterpriseRule) Validate(string, *Config) error {
// nothing to validate
return nil
}
// EnterprisePolicyRules stub
type EnterprisePolicyRules struct{}
func (r *EnterprisePolicyRules) Validate(*Config) error {
// nothing to validate
return nil
}
func decodeRules(rules string, _ *Config, _ *EnterprisePolicyMeta) (*Policy, error) {
p := &Policy{}
if err := hcl.Decode(p, rules); err != nil {
return nil, fmt.Errorf("Failed to parse ACL rules: %v", err)
}
return p, nil
}
package acl
var (
// allowAll is a singleton policy which allows all
// non-management actions
allowAll Authorizer = &staticAuthorizer{
allowManage: false,
defaultAllow: true,
}
// denyAll is a singleton policy which denies all actions
denyAll Authorizer = &staticAuthorizer{
allowManage: false,
defaultAllow: false,
}
// manageAll is a singleton policy which allows all
// actions, including management
manageAll Authorizer = &staticAuthorizer{
allowManage: true,
defaultAllow: true,
}
)
// StaticAuthorizer is used to implement a base ACL policy. It either
// allows or denies all requests. This can be used as a parent
// ACL to act in a blacklist or whitelist mode.
type staticAuthorizer struct {
allowManage bool
defaultAllow bool
}
func (s *staticAuthorizer) ACLRead(*AuthorizerContext) EnforcementDecision {
if s.allowManage {
return Allow
}
return Deny
}
func (s *staticAuthorizer) ACLWrite(*AuthorizerContext) EnforcementDecision {
if s.allowManage {
return Allow
}
return Deny
}
func (s *staticAuthorizer) AgentRead(string, *AuthorizerContext) EnforcementDecision {
if s.defaultAllow {
return Allow
}
return Deny
}
func (s *staticAuthorizer) AgentWrite(string, *AuthorizerContext) EnforcementDecision {
if s.defaultAllow {
return Allow
}
return Deny
}
func (s *staticAuthorizer) EventRead(string, *AuthorizerContext) EnforcementDecision {
if s.defaultAllow {
return Allow
}
return Deny
}
func (s *staticAuthorizer) EventWrite(string, *AuthorizerContext) EnforcementDecision {
if s.defaultAllow {
return Allow
}
return Deny
}
func (s *staticAuthorizer) IntentionDefaultAllow(*AuthorizerContext) EnforcementDecision {
if s.defaultAllow {
return Allow
}
return Deny
}
func (s *staticAuthorizer) IntentionRead(string, *AuthorizerContext) EnforcementDecision {
if s.defaultAllow {
return Allow
}
return Deny
}
func (s *staticAuthorizer) IntentionWrite(string, *AuthorizerContext) EnforcementDecision {
if s.defaultAllow {
return Allow
}
return Deny
}
func (s *staticAuthorizer) KeyRead(string, *AuthorizerContext) EnforcementDecision {
if s.defaultAllow {
return Allow
}
return Deny
}
func (s *staticAuthorizer) KeyList(string, *AuthorizerContext) EnforcementDecision {
if s.defaultAllow {
return Allow
}
return Deny
}
func (s *staticAuthorizer) KeyWrite(string, *AuthorizerContext) EnforcementDecision {
if s.defaultAllow {
return Allow
}
return Deny
}
func (s *staticAuthorizer) KeyWritePrefix(string, *AuthorizerContext) EnforcementDecision {
if s.defaultAllow {
return Allow
}
return Deny
}
func (s *staticAuthorizer) KeyringRead(*AuthorizerContext) EnforcementDecision {
if s.defaultAllow {
return Allow
}
return Deny
}
func (s *staticAuthorizer) KeyringWrite(*AuthorizerContext) EnforcementDecision {
if s.defaultAllow {
return Allow
}
return Deny
}
func (s *staticAuthorizer) NodeRead(string, *AuthorizerContext) EnforcementDecision {
if s.defaultAllow {
return Allow
}
return Deny
}
func (s *staticAuthorizer) NodeWrite(string, *AuthorizerContext) EnforcementDecision {
if s.defaultAllow {
return Allow
}
return Deny
}
func (s *staticAuthorizer) OperatorRead(*AuthorizerContext) EnforcementDecision {
if s.defaultAllow {
return Allow
}
return Deny
}
func (s *staticAuthorizer) OperatorWrite(*AuthorizerContext) EnforcementDecision {
if s.defaultAllow {
return Allow
}
return Deny
}
func (s *staticAuthorizer) PreparedQueryRead(string, *AuthorizerContext) EnforcementDecision {
if s.defaultAllow {
return Allow
}
return Deny
}
func (s *staticAuthorizer) PreparedQueryWrite(string, *AuthorizerContext) EnforcementDecision {
if s.defaultAllow {
return Allow
}
return Deny
}
func (s *staticAuthorizer) ServiceRead(string, *AuthorizerContext) EnforcementDecision {
if s.defaultAllow {
return Allow
}
return Deny
}
func (s *staticAuthorizer) ServiceWrite(string, *AuthorizerContext) EnforcementDecision {
if s.defaultAllow {
return Allow
}
return Deny
}
func (s *staticAuthorizer) SessionRead(string, *AuthorizerContext) EnforcementDecision {
if s.defaultAllow {
return Allow
}
return Deny
}
func (s *staticAuthorizer) SessionWrite(string, *AuthorizerContext) EnforcementDecision {
if s.defaultAllow {
return Allow
}
return Deny
}
func (s *staticAuthorizer) Snapshot(_ *AuthorizerContext) EnforcementDecision {
if s.allowManage {
return Allow
}
return Deny
}
// AllowAll returns an Authorizer that allows all operations
func AllowAll() Authorizer {
return allowAll
}
// DenyAll returns an Authorizer that denies all operations
func DenyAll() Authorizer {
return denyAll
}
// ManageAll returns an Authorizer that can manage all resources
func ManageAll() Authorizer {
return manageAll
}
// RootAuthorizer returns a possible Authorizer if the ID matches a root policy
func RootAuthorizer(id string) Authorizer {
switch id {
case "allow":
return allowAll
case "deny":
return denyAll
case "manage":
return manageAll
default:
return nil
}
}
This diff is collapsed.
package cache
import (
"container/heap"
"time"
)
// cacheEntry stores a single cache entry.
//
// Note that this isn't a very optimized structure currently. There are
// a lot of improvements that can be made here in the long term.
type cacheEntry struct {
// Fields pertaining to the actual value
Value interface{}
// State can be used to store info needed by the cache type but that should
// not be part of the result the client gets. For example the Connect Leaf
// type needs to store additional data about when it last attempted a renewal
// that is not part of the actual IssuedCert struct it returns. It's opaque to
// the Cache but allows types to store additional data that is coupled to the
// cache entry's lifetime and will be aged out by TTL etc.
State interface{}
Error error
Index uint64
// Metadata that is used for internal accounting
Valid bool // True if the Value is set
Fetching bool // True if a fetch is already active
Waiter chan struct{} // Closed when this entry is invalidated
// Expiry contains information about the expiration of this
// entry. This is a pointer as its shared as a value in the
// expiryHeap as well.
Expiry *cacheEntryExpiry
// FetchedAt stores the time the cache entry was retrieved for determining
// it's age later.
FetchedAt time.Time
// RefreshLostContact stores the time background refresh failed. It gets reset
// to zero after a background fetch has returned successfully, or after a
// background request has be blocking for at least 5 seconds, which ever
// happens first.
RefreshLostContact time.Time
}
// cacheEntryExpiry contains the expiration information for a cache
// entry. Any modifications to this struct should be done only while
// the Cache entriesLock is held.
type cacheEntryExpiry struct {
Key string // Key in the cache map
Expires time.Time // Time when entry expires (monotonic clock)
TTL time.Duration // TTL for this entry to extend when resetting
HeapIndex int // Index in the heap
}
// Reset resets the expiration to be the ttl duration from now.
func (e *cacheEntryExpiry) Reset() {
e.Expires = time.Now().Add(e.TTL)
}
// expiryHeap is a heap implementation that stores information about
// when entries expire. Implements container/heap.Interface.
//
// All operations on the heap and read/write of the heap contents require
// the proper entriesLock to be held on Cache.
type expiryHeap struct {
Entries []*cacheEntryExpiry
// NotifyCh is sent a value whenever the 0 index value of the heap
// changes. This can be used to detect when the earliest value
// changes.
//
// There is a single edge case where the heap will not automatically
// send a notification: if heap.Fix is called manually and the index
// changed is 0 and the change doesn't result in any moves (stays at index
// 0), then we won't detect the change. To work around this, please
// always call the expiryHeap.Fix method instead.
NotifyCh chan struct{}
}
// Identical to heap.Fix for this heap instance but will properly handle
// the edge case where idx == 0 and no heap modification is necessary,
// and still notify the NotifyCh.
//
// This is important for cache expiry since the expiry time may have been
// extended and if we don't send a message to the NotifyCh then we'll never
// reset the timer and the entry will be evicted early.
func (h *expiryHeap) Fix(entry *cacheEntryExpiry) {
idx := entry.HeapIndex
heap.Fix(h, idx)
// This is the edge case we handle: if the prev (idx) and current (HeapIndex)
// is zero, it means the head-of-line didn't change while the value
// changed. Notify to reset our expiry worker.
if idx == 0 && entry.HeapIndex == 0 {
h.notify()
}
}
func (h *expiryHeap) Len() int { return len(h.Entries) }
func (h *expiryHeap) Swap(i, j int) {
h.Entries[i], h.Entries[j] = h.Entries[j], h.Entries[i]
h.Entries[i].HeapIndex = i
h.Entries[j].HeapIndex = j
// If we're moving the 0 index, update the channel since we need
// to re-update the timer we're waiting on for the soonest expiring
// value.
if i == 0 || j == 0 {
h.notify()
}
}
func (h *expiryHeap) Less(i, j int) bool {
// The usage of Before here is important (despite being obvious):
// this function uses the monotonic time that should be available
// on the time.Time value so the heap is immune to wall clock changes.
return h.Entries[i].Expires.Before(h.Entries[j].Expires)
}
// heap.Interface, this isn't expected to be called directly.
func (h *expiryHeap) Push(x interface{}) {
entry := x.(*cacheEntryExpiry)
// Set initial heap index, if we're going to the end then Swap
// won't be called so we need to initialize
entry.HeapIndex = len(h.Entries)
// For the first entry, we need to trigger a channel send because
// Swap won't be called; nothing to swap! We can call it right away
// because all heap operations are within a lock.
if len(h.Entries) == 0 {
h.notify()
}
h.Entries = append(h.Entries, entry)
}
// heap.Interface, this isn't expected to be called directly.
func (h *expiryHeap) Pop() interface{} {
old := h.Entries
n := len(old)
x := old[n-1]
h.Entries = old[0 : n-1]
return x
}
func (h *expiryHeap) notify() {
select {
case h.NotifyCh <- struct{}{}:
// Good
default:
// If the send would've blocked, we just ignore it. The reason this
// is safe is because NotifyCh should always be a buffered channel.
// If this blocks, it means that there is a pending message anyways
// so the receiver will restart regardless.
}
}
// Code generated by mockery v1.0.0
package cache
import mock "github.com/stretchr/testify/mock"
// MockRequest is an autogenerated mock type for the Request type
type MockRequest struct {
mock.Mock
}
// CacheInfo provides a mock function with given fields:
func (_m *MockRequest) CacheInfo() RequestInfo {
ret := _m.Called()
var r0 RequestInfo
if rf, ok := ret.Get(0).(func() RequestInfo); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(RequestInfo)
}
return r0
}
// Code generated by mockery v1.0.0. DO NOT EDIT.
package cache
import mock "github.com/stretchr/testify/mock"
// MockType is an autogenerated mock type for the Type type
type MockType struct {
mock.Mock
}
// Fetch provides a mock function with given fields: _a0, _a1
func (_m *MockType) Fetch(_a0 FetchOptions, _a1 Request) (FetchResult, error) {
ret := _m.Called(_a0, _a1)
var r0 FetchResult
if rf, ok := ret.Get(0).(func(FetchOptions, Request) FetchResult); ok {
r0 = rf(_a0, _a1)
} else {
r0 = ret.Get(0).(FetchResult)
}
var r1 error
if rf, ok := ret.Get(1).(func(FetchOptions, Request) error); ok {
r1 = rf(_a0, _a1)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// SupportsBlocking provides a mock function with given fields:
func (_m *MockType) SupportsBlocking() bool {
ret := _m.Called()
var r0 bool
if rf, ok := ret.Get(0).(func() bool); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(bool)
}
return r0
}
package cache
import (
"time"
)
// Request is a cacheable request.
//
// This interface is typically implemented by request structures in
// the agent/structs package.
type Request interface {
// CacheInfo returns information used for caching this request.
CacheInfo() RequestInfo
}
// RequestInfo represents cache information for a request. The caching
// framework uses this to control the behavior of caching and to determine
// cacheability.
type RequestInfo struct {
// Key is a unique cache key for this request. This key should
// be globally unique to identify this request, since any conflicting
// cache keys could result in invalid data being returned from the cache.
// The Key does not need to include ACL or DC information, since the
// cache already partitions by these values prior to using this key.
Key string
// Token is the ACL token associated with this request.
//
// Datacenter is the datacenter that the request is targeting.
//
// Both of these values are used to partition the cache. The cache framework
// today partitions data on these values to simplify behavior: by
// partitioning ACL tokens, the cache doesn't need to be smart about
// filtering results. By filtering datacenter results, the cache can
// service the multi-DC nature of Consul. This comes at the expense of
// working set size, but in general the effect is minimal.
Token string
Datacenter string
// MinIndex is the minimum index being queried. This is used to
// determine if we already have data satisfying the query or if we need
// to block until new data is available. If no index is available, the
// default value (zero) is acceptable.
MinIndex uint64
// Timeout is the timeout for waiting on a blocking query. When the
// timeout is reached, the last known value is returned (or maybe nil
// if there was no prior value). This "last known value" behavior matches
// normal Consul blocking queries.
Timeout time.Duration
// MaxAge if set limits how stale a cache entry can be. If it is non-zero and
// there is an entry in cache that is older than specified, it is treated as a
// cache miss and re-fetched. It is ignored for cachetypes with Refresh =
// true.
MaxAge time.Duration
// MustRevalidate forces a new lookup of the cache even if there is an
// existing one that has not expired. It is implied by HTTP requests with
// `Cache-Control: max-age=0` but we can't distinguish that case from the
// unset case for MaxAge. Later we may support revalidating the index without
// a full re-fetch but for now the only option is to refetch. It is ignored
// for cachetypes with Refresh = true.
MustRevalidate bool
}
package cache
import (
"reflect"
"time"
"github.com/mitchellh/go-testing-interface"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
// TestCache returns a Cache instance configuring for testing.
func TestCache(t testing.T) *Cache {
// Simple but lets us do some fine-tuning later if we want to.
return New(nil)
}
// TestCacheGetCh returns a channel that returns the result of the Get call.
// This is useful for testing timing and concurrency with Get calls. Any
// error will be logged, so the result value should always be asserted.
func TestCacheGetCh(t testing.T, c *Cache, typ string, r Request) <-chan interface{} {
resultCh := make(chan interface{})
go func() {
result, _, err := c.Get(typ, r)
if err != nil {
t.Logf("Error: %s", err)
close(resultCh)
return
}
resultCh <- result
}()
return resultCh
}
// TestCacheGetChResult tests that the result from TestCacheGetCh matches
// within a reasonable period of time (it expects it to be "immediate" but
// waits some milliseconds).
func TestCacheGetChResult(t testing.T, ch <-chan interface{}, expected interface{}) {
t.Helper()
select {
case result := <-ch:
if !reflect.DeepEqual(result, expected) {
t.Fatalf("Result doesn't match!\n\n%#v\n\n%#v", result, expected)
}
case <-time.After(50 * time.Millisecond):
t.Fatalf("Result not sent on channel")
}
}
// TestCacheNotifyChResult tests that the expected updated was delivered on a
// Notify() chan within a reasonable period of time (it expects it to be
// "immediate" but waits some milliseconds). Expected may be given multiple
// times and if so these are all waited for and asserted to match but IN ANY
// ORDER to ensure we aren't timing dependent.
func TestCacheNotifyChResult(t testing.T, ch <-chan UpdateEvent, expected ...UpdateEvent) {
t.Helper()
expectLen := len(expected)
if expectLen < 1 {
panic("asserting nothing")
}
got := make([]UpdateEvent, 0, expectLen)
timeoutCh := time.After(50 * time.Millisecond)
OUT:
for {
select {
case result := <-ch:
// Ignore age as it's non-deterministic
result.Meta.Age = 0
got = append(got, result)
if len(got) == expectLen {
break OUT
}
case <-timeoutCh:
t.Fatalf("got %d results on chan in 50ms, want %d", len(got), expectLen)
}
}
// We already asserted len since you can only get here if we appended enough.
// Just check all the results we got are in the expected slice
require.ElementsMatch(t, expected, got)
}
// TestRequest returns a Request that returns the given cache key and index.
// The Reset method can be called to reset it for custom usage.
func TestRequest(t testing.T, info RequestInfo) *MockRequest {
req := &MockRequest{}
req.On("CacheInfo").Return(info)
return req
}
// TestType returns a MockType that can be used to setup expectations
// on data fetching.
func TestType(t testing.T) *MockType {
return testTypeInternal(t, true)
}
// TestTypeNonBlocking returns a MockType that returns false to SupportsBlocking.
func TestTypeNonBlocking(t testing.T) *MockType {
return testTypeInternal(t, false)
}
func testTypeInternal(t testing.T, enableBlocking bool) *MockType {
typ := &MockType{}
typ.On("SupportsBlocking").Return(enableBlocking).Maybe()
return typ
}
// A bit weird, but we add methods to the auto-generated structs here so that
// they don't get clobbered. The helper methods are conveniences.
// Static sets a static value to return for a call to Fetch.
func (m *MockType) Static(r FetchResult, err error) *mock.Call {
return m.Mock.On("Fetch", mock.Anything, mock.Anything).Return(r, err)
}
func (m *MockRequest) Reset() {
m.Mock = mock.Mock{}
}
package cache
import (
"time"
)
// Type implements the logic to fetch certain types of data.
type Type interface {
// Fetch fetches a single unique item.
//
// The FetchOptions contain the index and timeouts for blocking queries. The
// MinIndex value on the Request itself should NOT be used as the blocking
// index since a request may be reused multiple times as part of Refresh
// behavior.
//
// The return value is a FetchResult which contains information about the
// fetch. If an error is given, the FetchResult is ignored. The cache does not
// support backends that return partial values. Optional State can be added to
// the FetchResult which will be stored with the cache entry and provided to
// the next Fetch call but will not be returned to clients. This allows types
// to add additional bookkeeping data per cache entry that will still be aged
// out along with the entry's TTL.
//
// On timeout, FetchResult can behave one of two ways. First, it can return
// the last known value. This is the default behavior of blocking RPC calls in
// Consul so this allows cache types to be implemented with no extra logic.
// Second, FetchResult can return an unset value and index. In this case, the
// cache will reuse the last value automatically. If an unset Value is
// returned, the State field will still be updated which allows maintaining
// metadata even when there is no result.
Fetch(FetchOptions, Request) (FetchResult, error)
// SupportsBlocking should return true if the type supports blocking queries.
// Types that do not support blocking queries will not be able to use
// background refresh nor will the cache attempt blocking fetches if the
// client requests them with MinIndex.
SupportsBlocking() bool
}
// FetchOptions are various settable options when a Fetch is called.
type FetchOptions struct {
// MinIndex is the minimum index to be used for blocking queries.
// If blocking queries aren't supported for data being returned,
// this value can be ignored.
MinIndex uint64
// Timeout is the maximum time for the query. This must be implemented
// in the Fetch itself.
Timeout time.Duration
// LastResult is the result from the last successful Fetch and represents the
// value currently stored in the cache at the time Fetch is invoked. It will
// be nil on first call where there is no current cache value. There may have
// been other Fetch attempts that resulted in an error in the mean time. These
// are not explicitly represented currently. We could add that if needed this
// was just simpler for now.
//
// The FetchResult read-only! It is constructed per Fetch call so modifying
// the struct directly (e.g. changing it's Index of Value field) will have no
// effect, however the Value and State fields may be pointers to the actual
// values stored in the cache entry. It is thread-unsafe to modify the Value
// or State via pointers since readers may be concurrently inspecting those
// values under the entry lock (although we guarantee only one Fetch call per
// entry) and modifying them even if the index doesn't change or the Fetch
// eventually errors will likely break logical invariants in the cache too!
LastResult *FetchResult
}
// FetchResult is the result of a Type Fetch operation and contains the
// data along with metadata gathered from that operation.
type FetchResult struct {
// Value is the result of the fetch.
Value interface{}
// State is opaque data stored in the cache but not returned to clients. It
// can be used by Types to maintain any bookkeeping they need between fetches
// (using FetchOptions.LastResult) in a way that gets automatically cleaned up
// by TTL expiry etc.
State interface{}
// Index is the corresponding index value for this data.
Index uint64
}
package cache
import (
"context"
"fmt"
"reflect"
"time"
"github.com/hashicorp/consul/lib"
)
// UpdateEvent is a struct summarizing an update to a cache entry
type UpdateEvent struct {
// CorrelationID is used by the Notify API to allow correlation of updates
// with specific requests. We could return the full request object and
// cachetype for consumers to match against the calls they made but in
// practice it's cleaner for them to choose the minimal necessary unique
// identifier given the set of things they are watching. They might even
// choose to assign random IDs for example.
CorrelationID string
Result interface{}
Meta ResultMeta
Err error
}
// Notify registers a desire to be updated about changes to a cache result.
//
// It is a helper that abstracts code from performing their own "blocking" query
// logic against a cache key to watch for changes and to maintain the key in
// cache actively. It will continue to perform blocking Get requests until the
// context is canceled.
//
// The passed context must be canceled or timeout in order to free resources
// and stop maintaining the value in cache. Typically request-scoped resources
// do this but if a long-lived context like context.Background is used, then the
// caller must arrange for it to be canceled when the watch is no longer
// needed.
//
// The passed chan may be buffered or unbuffered, if the caller doesn't consume
// fast enough it will block the notification loop. When the chan is later
// drained, watching resumes correctly. If the pause is longer than the
// cachetype's TTL, the result might be removed from the local cache. Even in
// this case though when the chan is drained again, the new Get will re-fetch
// the entry from servers and resume notification behavior transparently.
//
// The chan is passed in to allow multiple cached results to be watched by a
// single consumer without juggling extra goroutines per watch. The
// correlationID is opaque and will be returned in all UpdateEvents generated by
// result of watching the specified request so the caller can set this to any
// value that allows them to disambiguate between events in the returned chan
// when sharing a chan between multiple cache entries. If the chan is closed,
// the notify loop will terminate.
func (c *Cache) Notify(ctx context.Context, t string, r Request,
correlationID string, ch chan<- UpdateEvent) error {
// Get the type that we're fetching
c.typesLock.RLock()
tEntry, ok := c.types[t]
c.typesLock.RUnlock()
if !ok {
return fmt.Errorf("unknown type in cache: %s", t)
}
if tEntry.Type.SupportsBlocking() {
go c.notifyBlockingQuery(ctx, t, r, correlationID, ch)
} else {
info := r.CacheInfo()
if info.MaxAge == 0 {
return fmt.Errorf("Cannot use Notify for polling cache types without specifying the MaxAge")
}
go c.notifyPollingQuery(ctx, t, r, correlationID, ch, info.MaxAge)
}
return nil
}
func (c *Cache) notifyBlockingQuery(ctx context.Context, t string, r Request, correlationID string, ch chan<- UpdateEvent) {
// Always start at 0 index to deliver the initial (possibly currently cached
// value).
index := uint64(0)
failures := uint(0)
for {
// Check context hasn't been canceled
if ctx.Err() != nil {
return
}
// Blocking request
res, meta, err := c.getWithIndex(t, r, index)
// Check context hasn't been canceled
if ctx.Err() != nil {
return
}
// Check the index of the value returned in the cache entry to be sure it
// changed
if index == 0 || index < meta.Index {
u := UpdateEvent{correlationID, res, meta, err}
select {
case ch <- u:
case <-ctx.Done():
return
}
// Update index for next request
index = meta.Index
}
// Handle errors with backoff. Badly behaved blocking calls that returned
// a zero index are considered as failures since we need to not get stuck
// in a busy loop.
wait := 0 * time.Second
if err == nil && meta.Index > 0 {
failures = 0
} else {
failures++
wait = backOffWait(failures)
}
if wait > 0 {
select {
case <-time.After(wait):
case <-ctx.Done():
return
}
}
// Sanity check we always request blocking on second pass
if err == nil && index < 1 {
index = 1
}
}
}
func (c *Cache) notifyPollingQuery(ctx context.Context, t string, r Request, correlationID string, ch chan<- UpdateEvent, maxAge time.Duration) {
index := uint64(0)
failures := uint(0)
var lastValue interface{} = nil
for {
// Check context hasn't been canceled
if ctx.Err() != nil {
return
}
// Make the request
res, meta, err := c.getWithIndex(t, r, index)
// Check context hasn't been canceled
if ctx.Err() != nil {
return
}
// Check for a change in the value or an index change
if index < meta.Index || !reflect.DeepEqual(lastValue, res) {
u := UpdateEvent{correlationID, res, meta, err}
select {
case ch <- u:
case <-ctx.Done():
return
}
// Update index and lastValue
lastValue = res
index = meta.Index
}
// Reset or increment failure counter
if err == nil {
failures = 0
} else {
failures++
}
// Determining how long to wait before the next poll is complicated.
// First off the happy path and the error path waits are handled distinctly
//
// Once fetching the data through the cache returns an error (and until a
// non-error value is returned) the wait time between each round of the loop
// gets controlled by the backOffWait function. Because we would have waited
// at least until the age of the cached data was too old the error path should
// immediately retry the fetch and backoff on the time as needed for persistent
// failures which potentially will wait much longer than the MaxAge of the request
//
// When on the happy path we just need to fetch from the cache often enough to ensure
// that the data is not older than the MaxAge. Therefore after fetching the data from
// the cache we can sleep until the age of that data would exceed the MaxAge. Sometimes
// this will be for the MaxAge duration (like when only a single notify was executed so
// only 1 go routine is keeping the cache updated). Other times this will be some smaller
// duration than MaxAge (when multiple notify calls were executed and this go routine just
// got data back from the cache that was a cache hit after the other go routine fetched it
// without a hit). We cannot just set MustRevalidate on the request and always sleep for MaxAge
// as this would eliminate the single-flighting of these requests in the cache and
// the efficiencies gained by it.
if failures > 0 {
errWait := backOffWait(failures)
select {
case <-time.After(errWait):
case <-ctx.Done():
return
}
} else {
// Default to immediately re-poll. This only will happen if the data
// we just got out of the cache is already too stale
pollWait := 0 * time.Second
// Calculate when the cached data's Age will get too stale and
// need to be re-queried. When the data's Age already exceeds the
// maxAge the pollWait value is left at 0 to immediately re-poll
if meta.Age <= maxAge {
pollWait = maxAge - meta.Age
}
// Add a small amount of random jitter to the polling time. One
// purpose of the jitter is to ensure that the next time
// we fetch from the cache the data will be stale (unless another
// notify go routine has updated it while this one is sleeping).
// Without this it would be possible to wake up, fetch the data
// again where the age of the data is strictly equal to the MaxAge
// and then immediately have to re-fetch again. That wouldn't
// be terrible but it would expend a bunch more cpu cycles when
// we can definitely avoid it.
pollWait += lib.RandomStagger(maxAge / 16)
select {
case <-time.After(pollWait):
case <-ctx.Done():
return
}
}
}
}
package connect
import (
"crypto/rand"
"encoding/binary"
"fmt"
"regexp"
"strconv"
"strings"
)
var invalidDNSNameChars = regexp.MustCompile(`[^a-z0-9]`)
const (
// 64 = max length of a certificate common name
// 21 = 7 bytes for ".consul", 9 bytes for .<trust domain> and 5 bytes for ".svc."
// This ends up being 43 bytes
maxServiceAndNamespaceLen = 64 - 21
minServiceNameLen = 15
minNamespaceNameLen = 15
)
// trucateServiceAndNamespace will take a service name and namespace name and truncate
// them appropriately so that they would fit within the space alloted for them in the
// Common Name field of the x509 certificate. That field is capped at 64 characters
// in length and there is other data that must be a part of the name too. This function
// takes all of that into account.
func truncateServiceAndNamespace(serviceName, namespace string) (string, string) {
svcLen := len(serviceName)
nsLen := len(namespace)
totalLen := svcLen + nsLen
// quick exit when the entirety of both can fit
if totalLen <= maxServiceAndNamespaceLen {
return serviceName, namespace
}
toRemove := totalLen - maxServiceAndNamespaceLen
// now we must figure out how to truncate each one, we need to ensure we don't remove all of either one.
if svcLen <= minServiceNameLen {
// only remove bytes from the namespace
return serviceName, truncateTo(namespace, nsLen-toRemove)
} else if nsLen <= minNamespaceNameLen {
// only remove bytes from the service name
return truncateTo(serviceName, svcLen-toRemove), namespace
} else {
// we can remove an "equal" amount from each. If the number of bytes to remove is odd we give it to the namespace
svcTruncate := svcLen - (toRemove / 2) - (toRemove % 2)
nsTruncate := nsLen - (toRemove / 2)
// checks to ensure we don't reduce one side too much when they are not roughly balanced in length.
if svcTruncate <= minServiceNameLen {
svcTruncate = minServiceNameLen
nsTruncate = maxServiceAndNamespaceLen - minServiceNameLen
} else if nsTruncate <= minNamespaceNameLen {
svcTruncate = maxServiceAndNamespaceLen - minNamespaceNameLen
nsTruncate = minNamespaceNameLen
}
return truncateTo(serviceName, svcTruncate), truncateTo(namespace, nsTruncate)
}
}
// ServiceCN returns the common name for a service's certificate. We can't use
// SPIFFE URIs because some CAs require valid FQDN format. We can't use SNI
// values because they are often too long than the 64 bytes allowed by
// CommonNames. We could attempt to encode more information into this to make
// identifying which instance/node it was issued to in a management tool easier
// but that just introduces more complications around length. It's also strange
// that the Common Name would encode more information than the actual
// identifying URI we use to assert anything does and my lead to bad assumptions
// that the common name is in some way "secure" or verified - there is nothing
// inherently provable here except that the requestor had ACLs for that service
// name in that DC.
//
// Format is:
// <sanitized_service_name>.svc.<trust_domain_first_8>.consul
//
// service name is sanitized by removing any chars that are not legal in a DNS
// name and lower casing. It is truncated to the first X chars to keep the
// total at 64.
//
// trust domain is truncated to keep the whole name short
func ServiceCN(serviceName, namespace, trustDomain string) string {
svc := invalidDNSNameChars.ReplaceAllString(strings.ToLower(serviceName), "")
svc, namespace = truncateServiceAndNamespace(svc, namespace)
return fmt.Sprintf("%s.svc.%s.%s.consul",
svc, namespace, truncateTo(trustDomain, 8))
}
// AgentCN returns the common name for an agent certificate. See ServiceCN for
// more details on rationale.
//
// Format is:
// <sanitized_node_name>.agnt.<trust_domain_first_8>.consul
//
// node name is sanitized by removing any chars that are not legal in a DNS
// name and lower casing. It is truncated to the first X chars to keep the
// total at 64.
//
// trust domain is truncated to keep the whole name short
func AgentCN(node, trustDomain string) string {
nodeSan := invalidDNSNameChars.ReplaceAllString(strings.ToLower(node), "")
// 21 = 7 bytes for ".consul", 8 bytes for trust domain, 6 bytes for ".agnt."
return fmt.Sprintf("%s.agnt.%s.consul",
truncateTo(nodeSan, 64-21), truncateTo(trustDomain, 8))
}
// CompactUID returns a crypto random Unique Identifier string consiting of 8
// characters of base36 encoded random value. This has roughly 41 bits of
// entropy so is suitable for infrequently occuring events with low probability
// of collision. It is not suitable for UUIDs for very frequent events. It's
// main purpose is to assign unique values to CA certificate Common Names which
// need to be unique in some providers - see CACN - but without using up large
// amounts of the limited 64 character Common Name. It also makes the values
// more easily digestable by humans considering there are likely to be few of
// them ever in use.
func CompactUID() (string, error) {
// 48 bits (6 bytes) is enough to fill 8 bytes in base36 but it's simpler to
// have a whole uint8 to convert from.
var raw [8]byte
_, err := rand.Read(raw[:])
if err != nil {
return "", err
}
i := binary.LittleEndian.Uint64(raw[:])
return truncateTo(strconv.FormatInt(int64(i), 36), 8), nil
}
// CACN returns the common name for a CA certificate. See ServiceCN for more
// details on rationale. A uniqueID is requires because some providers (e.g.
// Vault) cache by subject and so produce incorrect results - for example they
// won't cross-sign an older CA certificate with the same common name since they
// think they already have a valid cert for that CN and just return the current
// root.
//
// This can be generated by any means but will be truncated to 8 chars and
// sanitised to DNS-safe chars. CompactUID generates suitable UIDs for this
// specific purpose.
//
// Format is:
// {provider}-{uniqueID_first8}.{pri|sec}.ca.<trust_domain_first_8>.consul
//
// trust domain is truncated to keep the whole name short
func CACN(provider, uniqueID, trustDomain string, primaryDC bool) string {
providerSan := invalidDNSNameChars.ReplaceAllString(strings.ToLower(provider), "")
typ := "pri"
if !primaryDC {
typ = "sec"
}
// 32 = 7 bytes for ".consul", 8 bytes for trust domain, 8 bytes for
// ".pri.ca.", 9 bytes for "-{uniqueID-8-b36}"
uidSAN := invalidDNSNameChars.ReplaceAllString(strings.ToLower(uniqueID), "")
return fmt.Sprintf("%s-%s.%s.ca.%s.consul", typ, truncateTo(uidSAN, 8),
truncateTo(providerSan, 64-32), truncateTo(trustDomain, 8))
}
func truncateTo(s string, n int) string {
if len(s) > n {
return s[:n]
}
return s
}
// CNForCertURI returns the correct common name for a given cert URI type. It
// doesn't work for CA Signing IDs since more context is needed and CA Providers
// always know their CN from their own context.
func CNForCertURI(uri CertURI) (string, error) {
// Even though leafs should be from our own CSRs which should have the same CN
// logic as here, override anyway to account for older version clients that
// didn't include the Common Name in the CSR.
switch id := uri.(type) {
case *SpiffeIDService:
return ServiceCN(id.Service, id.Namespace, id.Host), nil
case *SpiffeIDAgent:
return AgentCN(id.Agent, id.Host), nil
case *SpiffeIDSigning:
return "", fmt.Errorf("CertURI is a SpiffeIDSigning, not enough context to generate Common Name")
default:
return "", fmt.Errorf("CertURI type not recognized")
}
}
package connect
import (
"bytes"
"crypto"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"crypto/x509/pkix"
"encoding/asn1"
"encoding/pem"
"net"
"net/url"
)
// SigAlgoForKey returns the preferred x509.SignatureAlgorithm for a given key
// based on it's type. If the key type is not supported we return
// ECDSAWithSHA256 on the basis that it will fail anyway and we've already type
// checked keys by the time we call this in general.
func SigAlgoForKey(key crypto.Signer) x509.SignatureAlgorithm {
if _, ok := key.(*rsa.PrivateKey); ok {
return x509.SHA256WithRSA
}
// We default to ECDSA but don't bother detecting invalid key types as we do
// that in lots of other places and it will fail anyway if we try to sign with
// an incompatible type.
return x509.ECDSAWithSHA256
}
// SigAlgoForKeyType returns the preferred x509.SignatureAlgorithm for a given
// key type string from configuration or an existing cert. If the key type is
// not supported we return ECDSAWithSHA256 on the basis that it will fail anyway
// and we've already type checked config by the time we call this in general.
func SigAlgoForKeyType(keyType string) x509.SignatureAlgorithm {
switch keyType {
case "rsa":
return x509.SHA256WithRSA
case "ec":
fallthrough
default:
return x509.ECDSAWithSHA256
}
}
// CreateCSR returns a CSR to sign the given service with SAN entries
// along with the PEM-encoded private key for this certificate.
func CreateCSR(uri CertURI, commonName string, privateKey crypto.Signer,
dnsNames []string, ipAddresses []net.IP, extensions ...pkix.Extension) (string, error) {
template := &x509.CertificateRequest{
URIs: []*url.URL{uri.URI()},
SignatureAlgorithm: SigAlgoForKey(privateKey),
ExtraExtensions: extensions,
Subject: pkix.Name{CommonName: commonName},
DNSNames: dnsNames,
IPAddresses: ipAddresses,
}
// Create the CSR itself
var csrBuf bytes.Buffer
bs, err := x509.CreateCertificateRequest(rand.Reader, template, privateKey)
if err != nil {
return "", err
}
err = pem.Encode(&csrBuf, &pem.Block{Type: "CERTIFICATE REQUEST", Bytes: bs})
if err != nil {
return "", err
}
return csrBuf.String(), nil
}
// CreateCSR returns a CA CSR to sign the given service along with the PEM-encoded
// private key for this certificate.
func CreateCACSR(uri CertURI, commonName string, privateKey crypto.Signer) (string, error) {
ext, err := CreateCAExtension()
if err != nil {
return "", err
}
return CreateCSR(uri, commonName, privateKey, nil, nil, ext)
}
// CreateCAExtension creates a pkix.Extension for the x509 Basic Constraints
// IsCA field ()
func CreateCAExtension() (pkix.Extension, error) {
type basicConstraints struct {
IsCA bool `asn1:"optional"`
MaxPathLen int `asn1:"optional"`
}
basicCon := basicConstraints{IsCA: true, MaxPathLen: 0}
bitstr, err := asn1.Marshal(basicCon)
if err != nil {
return pkix.Extension{}, err
}
return pkix.Extension{
Id: []int{2, 5, 29, 19}, // from x509 package
Critical: true,
Value: bitstr,
}, nil
}
package connect
import (
"bytes"
"crypto"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"encoding/pem"
"fmt"
"strings"
"time"
)
const (
DefaultPrivateKeyType = "ec"
DefaultPrivateKeyBits = 256
DefaultIntermediateCertTTL = 24 * 365 * time.Hour
)
func pemEncodeKey(key []byte, blockType string) (string, error) {
var buf bytes.Buffer
if err := pem.Encode(&buf, &pem.Block{Type: blockType, Bytes: key}); err != nil {
return "", fmt.Errorf("error encoding private key: %s", err)
}
return buf.String(), nil
}
func generateRSAKey(keyBits int) (crypto.Signer, string, error) {
var pk *rsa.PrivateKey
pk, err := rsa.GenerateKey(rand.Reader, keyBits)
if err != nil {
return nil, "", fmt.Errorf("error generating RSA private key: %s", err)
}
bs := x509.MarshalPKCS1PrivateKey(pk)
pemBlock, err := pemEncodeKey(bs, "RSA PRIVATE KEY")
if err != nil {
return nil, "", err
}
return pk, pemBlock, nil
}
func generateECDSAKey(keyBits int) (crypto.Signer, string, error) {
var pk *ecdsa.PrivateKey
var curve elliptic.Curve
switch keyBits {
case 224:
curve = elliptic.P224()
case 256:
curve = elliptic.P256()
case 384:
curve = elliptic.P384()
case 521:
curve = elliptic.P521()
default:
return nil, "", fmt.Errorf("error generating ECDSA private key: unknown curve length %d", keyBits)
}
pk, err := ecdsa.GenerateKey(curve, rand.Reader)
if err != nil {
return nil, "", fmt.Errorf("error generating ECDSA private key: %s", err)
}
bs, err := x509.MarshalECPrivateKey(pk)
if err != nil {
return nil, "", fmt.Errorf("error marshaling ECDSA private key: %s", err)
}
pemBlock, err := pemEncodeKey(bs, "EC PRIVATE KEY")
if err != nil {
return nil, "", err
}
return pk, pemBlock, nil
}
// GeneratePrivateKey generates a new Private key
func GeneratePrivateKeyWithConfig(keyType string, keyBits int) (crypto.Signer, string, error) {
switch strings.ToLower(keyType) {
case "rsa":
return generateRSAKey(keyBits)
case "ec":
return generateECDSAKey(keyBits)
default:
return nil, "", fmt.Errorf("unknown private key type requested: %s", keyType)
}
}
func GeneratePrivateKey() (crypto.Signer, string, error) {
// TODO: find any calls to this func, replace with calls to GeneratePrivateKeyWithConfig()
// using prefs `private_key_type` and `private_key_bits`
return GeneratePrivateKeyWithConfig(DefaultPrivateKeyType, DefaultPrivateKeyBits)
}
This diff is collapsed.
package connect
import (
"fmt"
"github.com/hashicorp/consul/agent/structs"
)
func UpstreamSNI(u *structs.Upstream, subset string, dc string, trustDomain string) string {
if u.Datacenter != "" {
dc = u.Datacenter
}
if u.DestinationType == structs.UpstreamDestTypePreparedQuery {
return QuerySNI(u.DestinationName, dc, trustDomain)
}
return ServiceSNI(u.DestinationName, subset, u.DestinationNamespace, dc, trustDomain)
}
func DatacenterSNI(dc string, trustDomain string) string {
return fmt.Sprintf("%s.internal.%s", dc, trustDomain)
}
func ServiceSNI(service string, subset string, namespace string, datacenter string, trustDomain string) string {
if namespace == "" {
namespace = "default"
}
if subset == "" {
return fmt.Sprintf("%s.%s.%s.internal.%s", service, namespace, datacenter, trustDomain)
} else {
return fmt.Sprintf("%s.%s.%s.%s.internal.%s", subset, service, namespace, datacenter, trustDomain)
}
}
func QuerySNI(service string, datacenter string, trustDomain string) string {
return fmt.Sprintf("%s.default.%s.query.%s", service, datacenter, trustDomain)
}
func TargetSNI(target *structs.DiscoveryTarget, trustDomain string) string {
return ServiceSNI(target.Service, target.ServiceSubset, target.Namespace, target.Datacenter, trustDomain)
}
This diff is collapsed.
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment