Commit 294fd9aa authored by Alex Dadgar's avatar Alex Dadgar
Browse files

Template hook

parent 915c1176
Showing with 2222 additions and 18 deletions
+2222 -18
package interfaces
import "github.com/hashicorp/nomad/nomad/structs"
type EventEmitter interface {
SetState(state string, event *structs.TaskEvent)
EmitEvent(source, message string)
}
package interfaces
import "os"
// XXX These should probably all return an error and we should have predefined
// error types for the task not currently running
type TaskLifecycle interface {
Restart(source, reason string, failure bool)
Signal(source, reason string, s os.Signal) error
Kill(source, reason string, fail bool)
}
......@@ -2,14 +2,6 @@ package taskrunner
import "os"
// XXX These should probably all return an error and we should have predefined
// error types for the task not currently running
type TaskLifecycle interface {
Restart(source, reason string, failure bool)
Signal(source, reason string, s os.Signal) error
Kill(source, reason string, fail bool)
}
func (tr *TaskRunner) Restart(source, reason string, failure bool) {
// TODO
}
......
......@@ -498,6 +498,11 @@ func (tr *TaskRunner) SetState(state string, event *structs.TaskEvent) {
//}
}
func (tr *TaskRunner) EmitEvent(source, message string) {
event := structs.NewTaskEvent(source).SetMessage(message)
tr.SetState("", event)
}
// WaitCh is closed when TaskRunner.Run exits.
func (tr *TaskRunner) WaitCh() <-chan struct{} {
return tr.waitCh
......
......@@ -9,16 +9,13 @@ import (
"github.com/hashicorp/nomad/client/allocrunner/getter"
"github.com/hashicorp/nomad/client/allocrunnerv2/interfaces"
ti "github.com/hashicorp/nomad/client/allocrunnerv2/taskrunner/interfaces"
"github.com/hashicorp/nomad/client/allocrunnerv2/taskrunner/state"
cconfig "github.com/hashicorp/nomad/client/config"
"github.com/hashicorp/nomad/client/driver"
"github.com/hashicorp/nomad/nomad/structs"
)
type EventEmitter interface {
SetState(state string, event *structs.TaskEvent)
}
// initHooks intializes the tasks hooks.
func (tr *TaskRunner) initHooks() {
hookLogger := tr.logger.Named("task_hook")
......@@ -43,6 +40,18 @@ func (tr *TaskRunner) initHooks() {
task: tr.taskName,
}))
}
// If there are templates is enabled, add the hook
if task := tr.Task(); len(task.Templates) != 0 {
tr.runnerHooks = append(tr.runnerHooks, newTemplateHook(&templateHookConfig{
logger: hookLogger,
lifecycle: tr,
events: tr,
templates: task.Templates,
clientConfig: tr.clientConfig,
envBuilder: tr.envBuilder,
}))
}
}
// prerun is used to run the runners prerun hooks.
......@@ -304,11 +313,11 @@ func (h *taskDirHook) Prerun(ctx context.Context, req *interfaces.TaskPrerunRequ
// artifactHook downloads artifacts for a task.
type artifactHook struct {
eventEmitter EventEmitter
eventEmitter ti.EventEmitter
logger log.Logger
}
func newArtifactHook(e EventEmitter, logger log.Logger) *artifactHook {
func newArtifactHook(e ti.EventEmitter, logger log.Logger) *artifactHook {
h := &artifactHook{
eventEmitter: e,
}
......
package template
import (
"fmt"
"math/rand"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"sync"
"time"
ctconf "github.com/hashicorp/consul-template/config"
"github.com/hashicorp/consul-template/manager"
"github.com/hashicorp/consul-template/signals"
envparse "github.com/hashicorp/go-envparse"
multierror "github.com/hashicorp/go-multierror"
"github.com/hashicorp/nomad/client/allocrunnerv2/taskrunner/interfaces"
"github.com/hashicorp/nomad/client/config"
"github.com/hashicorp/nomad/client/driver/env"
"github.com/hashicorp/nomad/helper"
"github.com/hashicorp/nomad/nomad/structs"
)
const (
// consulTemplateSourceName is the source name when using the TaskHooks.
consulTemplateSourceName = "Template"
// hostSrcOption is the Client option that determines whether the template
// source may be from the host
hostSrcOption = "template.allow_host_source"
// missingDepEventLimit is the number of missing dependencies that will be
// logged before we switch to showing just the number of missing
// dependencies.
missingDepEventLimit = 3
// DefaultMaxTemplateEventRate is the default maximum rate at which a
// template event should be fired.
DefaultMaxTemplateEventRate = 3 * time.Second
)
// TaskTemplateManager is used to run a set of templates for a given task
type TaskTemplateManager struct {
// config holds the template managers configuration
config *TaskTemplateManagerConfig
// lookup allows looking up the set of Nomad templates by their consul-template ID
lookup map[string][]*structs.Template
// runner is the consul-template runner
runner *manager.Runner
// signals is a lookup map from the string representation of a signal to its
// actual signal
signals map[string]os.Signal
// shutdownCh is used to signal and started goroutine to shutdown
shutdownCh chan struct{}
// shutdown marks whether the manager has been shutdown
shutdown bool
shutdownLock sync.Mutex
}
// TaskTemplateManagerConfig is used to configure an instance of the
// TaskTemplateManager
type TaskTemplateManagerConfig struct {
// UnblockCh is closed when the template has been rendered
UnblockCh chan struct{}
// Lifecycle is used to interact with the task the template manager is being
// run for
Lifecycle interfaces.TaskLifecycle
// Events is used to emit events for the task
Events interfaces.EventEmitter
// Templates is the set of templates we are managing
Templates []*structs.Template
// ClientConfig is the Nomad Client configuration
ClientConfig *config.Config
// VaultToken is the Vault token for the task.
VaultToken string
// TaskDir is the task's directory
TaskDir string
// EnvBuilder is the environment variable builder for the task.
EnvBuilder *env.Builder
// MaxTemplateEventRate is the maximum rate at which we should emit events.
MaxTemplateEventRate time.Duration
// retryRate is only used for testing and is used to increase the retry rate
retryRate time.Duration
}
// Validate validates the configuration.
func (c *TaskTemplateManagerConfig) Validate() error {
if c == nil {
return fmt.Errorf("Nil config passed")
} else if c.UnblockCh == nil {
return fmt.Errorf("Invalid unblock channel given")
} else if c.Lifecycle == nil {
return fmt.Errorf("Invalid lifecycle hooks given")
} else if c.Events == nil {
return fmt.Errorf("Invalid event hook given")
} else if c.ClientConfig == nil {
return fmt.Errorf("Invalid client config given")
} else if c.TaskDir == "" {
return fmt.Errorf("Invalid task directory given: %q", c.TaskDir)
} else if c.EnvBuilder == nil {
return fmt.Errorf("Invalid task environment given")
} else if c.MaxTemplateEventRate == 0 {
return fmt.Errorf("Invalid max template event rate given")
}
return nil
}
func NewTaskTemplateManager(config *TaskTemplateManagerConfig) (*TaskTemplateManager, error) {
// Check pre-conditions
if err := config.Validate(); err != nil {
return nil, err
}
tm := &TaskTemplateManager{
config: config,
shutdownCh: make(chan struct{}),
}
// Parse the signals that we need
for _, tmpl := range config.Templates {
if tmpl.ChangeSignal == "" {
continue
}
sig, err := signals.Parse(tmpl.ChangeSignal)
if err != nil {
return nil, fmt.Errorf("Failed to parse signal %q", tmpl.ChangeSignal)
}
if tm.signals == nil {
tm.signals = make(map[string]os.Signal)
}
tm.signals[tmpl.ChangeSignal] = sig
}
// Build the consul-template runner
runner, lookup, err := templateRunner(config)
if err != nil {
return nil, err
}
tm.runner = runner
tm.lookup = lookup
go tm.run()
return tm, nil
}
// Stop is used to stop the consul-template runner
func (tm *TaskTemplateManager) Stop() {
tm.shutdownLock.Lock()
defer tm.shutdownLock.Unlock()
if tm.shutdown {
return
}
close(tm.shutdownCh)
tm.shutdown = true
// Stop the consul-template runner
if tm.runner != nil {
tm.runner.Stop()
}
}
// run is the long lived loop that handles errors and templates being rendered
func (tm *TaskTemplateManager) run() {
// Runner is nil if there is no templates
if tm.runner == nil {
// Unblock the start if there is nothing to do
close(tm.config.UnblockCh)
return
}
// Start the runner
go tm.runner.Start()
// Block till all the templates have been rendered
tm.handleFirstRender()
// Detect if there was a shutdown.
select {
case <-tm.shutdownCh:
return
default:
}
// Read environment variables from env templates before we unblock
envMap, err := loadTemplateEnv(tm.config.Templates, tm.config.TaskDir)
if err != nil {
tm.config.Lifecycle.Kill(consulTemplateSourceName, err.Error(), true)
return
}
tm.config.EnvBuilder.SetTemplateEnv(envMap)
// Unblock the task
close(tm.config.UnblockCh)
// If all our templates are change mode no-op, then we can exit here
if tm.allTemplatesNoop() {
return
}
// handle all subsequent render events.
tm.handleTemplateRerenders(time.Now())
}
// handleFirstRender blocks till all templates have been rendered
func (tm *TaskTemplateManager) handleFirstRender() {
// missingDependencies is the set of missing dependencies.
var missingDependencies map[string]struct{}
// eventTimer is used to trigger the firing of an event showing the missing
// dependencies.
eventTimer := time.NewTimer(tm.config.MaxTemplateEventRate)
if !eventTimer.Stop() {
<-eventTimer.C
}
// outstandingEvent tracks whether there is an outstanding event that should
// be fired.
outstandingEvent := false
// Wait till all the templates have been rendered
WAIT:
for {
select {
case <-tm.shutdownCh:
return
case err, ok := <-tm.runner.ErrCh:
if !ok {
continue
}
tm.config.Lifecycle.Kill(consulTemplateSourceName, err.Error(), true)
case <-tm.runner.TemplateRenderedCh():
// A template has been rendered, figure out what to do
events := tm.runner.RenderEvents()
// Not all templates have been rendered yet
if len(events) < len(tm.lookup) {
continue
}
for _, event := range events {
// This template hasn't been rendered
if event.LastWouldRender.IsZero() {
continue WAIT
}
}
break WAIT
case <-tm.runner.RenderEventCh():
events := tm.runner.RenderEvents()
joinedSet := make(map[string]struct{})
for _, event := range events {
missing := event.MissingDeps
if missing == nil {
continue
}
for _, dep := range missing.List() {
joinedSet[dep.String()] = struct{}{}
}
}
// Check to see if the new joined set is the same as the old
different := len(joinedSet) != len(missingDependencies)
if !different {
for k := range joinedSet {
if _, ok := missingDependencies[k]; !ok {
different = true
break
}
}
}
// Nothing to do
if !different {
continue
}
// Update the missing set
missingDependencies = joinedSet
// Update the event timer channel
if !outstandingEvent {
// We got new data so reset
outstandingEvent = true
eventTimer.Reset(tm.config.MaxTemplateEventRate)
}
case <-eventTimer.C:
if missingDependencies == nil {
continue
}
// Clear the outstanding event
outstandingEvent = false
// Build the missing set
missingSlice := make([]string, 0, len(missingDependencies))
for k := range missingDependencies {
missingSlice = append(missingSlice, k)
}
sort.Strings(missingSlice)
if l := len(missingSlice); l > missingDepEventLimit {
missingSlice[missingDepEventLimit] = fmt.Sprintf("and %d more", l-missingDepEventLimit)
missingSlice = missingSlice[:missingDepEventLimit+1]
}
missingStr := strings.Join(missingSlice, ", ")
tm.config.Events.EmitEvent(consulTemplateSourceName, fmt.Sprintf("Missing: %s", missingStr))
}
}
}
// handleTemplateRerenders is used to handle template render events after they
// have all rendered. It takes action based on which set of templates re-render.
// The passed allRenderedTime is the time at which all templates have rendered.
// This is used to avoid signaling the task for any render event before hand.
func (tm *TaskTemplateManager) handleTemplateRerenders(allRenderedTime time.Time) {
// A lookup for the last time the template was handled
handledRenders := make(map[string]time.Time, len(tm.config.Templates))
for {
select {
case <-tm.shutdownCh:
return
case err, ok := <-tm.runner.ErrCh:
if !ok {
continue
}
tm.config.Lifecycle.Kill(consulTemplateSourceName, err.Error(), true)
case <-tm.runner.TemplateRenderedCh():
// A template has been rendered, figure out what to do
var handling []string
signals := make(map[string]struct{})
restart := false
var splay time.Duration
events := tm.runner.RenderEvents()
for id, event := range events {
// First time through
if allRenderedTime.After(event.LastDidRender) || allRenderedTime.Equal(event.LastDidRender) {
handledRenders[id] = allRenderedTime
continue
}
// We have already handled this one
if htime := handledRenders[id]; htime.After(event.LastDidRender) || htime.Equal(event.LastDidRender) {
continue
}
// Lookup the template and determine what to do
tmpls, ok := tm.lookup[id]
if !ok {
tm.config.Lifecycle.Kill(consulTemplateSourceName, fmt.Sprintf("template runner returned unknown template id %q", id), true)
return
}
// Read environment variables from templates
envMap, err := loadTemplateEnv(tm.config.Templates, tm.config.TaskDir)
if err != nil {
tm.config.Lifecycle.Kill(consulTemplateSourceName, err.Error(), true)
return
}
tm.config.EnvBuilder.SetTemplateEnv(envMap)
for _, tmpl := range tmpls {
switch tmpl.ChangeMode {
case structs.TemplateChangeModeSignal:
signals[tmpl.ChangeSignal] = struct{}{}
case structs.TemplateChangeModeRestart:
restart = true
case structs.TemplateChangeModeNoop:
continue
}
if tmpl.Splay > splay {
splay = tmpl.Splay
}
}
handling = append(handling, id)
}
if restart || len(signals) != 0 {
if splay != 0 {
ns := splay.Nanoseconds()
offset := rand.Int63n(ns)
t := time.Duration(offset)
select {
case <-time.After(t):
case <-tm.shutdownCh:
return
}
}
// Update handle time
for _, id := range handling {
handledRenders[id] = events[id].LastDidRender
}
if restart {
const failure = false
tm.config.Lifecycle.Restart(consulTemplateSourceName, "template with change_mode restart re-rendered", failure)
} else if len(signals) != 0 {
var mErr multierror.Error
for signal := range signals {
err := tm.config.Lifecycle.Signal(consulTemplateSourceName, "template re-rendered", tm.signals[signal])
if err != nil {
multierror.Append(&mErr, err)
}
}
if err := mErr.ErrorOrNil(); err != nil {
flat := make([]os.Signal, 0, len(signals))
for signal := range signals {
flat = append(flat, tm.signals[signal])
}
tm.config.Lifecycle.Kill(consulTemplateSourceName, fmt.Sprintf("Sending signals %v failed: %v", flat, err), true)
}
}
}
}
}
}
// allTemplatesNoop returns whether all the managed templates have change mode noop.
func (tm *TaskTemplateManager) allTemplatesNoop() bool {
for _, tmpl := range tm.config.Templates {
if tmpl.ChangeMode != structs.TemplateChangeModeNoop {
return false
}
}
return true
}
// templateRunner returns a consul-template runner for the given templates and a
// lookup by destination to the template. If no templates are in the config, a
// nil template runner and lookup is returned.
func templateRunner(config *TaskTemplateManagerConfig) (
*manager.Runner, map[string][]*structs.Template, error) {
if len(config.Templates) == 0 {
return nil, nil, nil
}
// Parse the templates
ctmplMapping, err := parseTemplateConfigs(config)
if err != nil {
return nil, nil, err
}
// Create the runner configuration.
runnerConfig, err := newRunnerConfig(config, ctmplMapping)
if err != nil {
return nil, nil, err
}
runner, err := manager.NewRunner(runnerConfig, false, false)
if err != nil {
return nil, nil, err
}
// Set Nomad's environment variables
runner.Env = config.EnvBuilder.Build().All()
// Build the lookup
idMap := runner.TemplateConfigMapping()
lookup := make(map[string][]*structs.Template, len(idMap))
for id, ctmpls := range idMap {
for _, ctmpl := range ctmpls {
templates := lookup[id]
templates = append(templates, ctmplMapping[ctmpl])
lookup[id] = templates
}
}
return runner, lookup, nil
}
// parseTemplateConfigs converts the tasks templates in the config into
// consul-templates
func parseTemplateConfigs(config *TaskTemplateManagerConfig) (map[ctconf.TemplateConfig]*structs.Template, error) {
allowAbs := config.ClientConfig.ReadBoolDefault(hostSrcOption, true)
taskEnv := config.EnvBuilder.Build()
ctmpls := make(map[ctconf.TemplateConfig]*structs.Template, len(config.Templates))
for _, tmpl := range config.Templates {
var src, dest string
if tmpl.SourcePath != "" {
if filepath.IsAbs(tmpl.SourcePath) {
if !allowAbs {
return nil, fmt.Errorf("Specifying absolute template paths disallowed by client config: %q", tmpl.SourcePath)
}
src = tmpl.SourcePath
} else {
src = filepath.Join(config.TaskDir, taskEnv.ReplaceEnv(tmpl.SourcePath))
}
}
if tmpl.DestPath != "" {
dest = filepath.Join(config.TaskDir, taskEnv.ReplaceEnv(tmpl.DestPath))
}
ct := ctconf.DefaultTemplateConfig()
ct.Source = &src
ct.Destination = &dest
ct.Contents = &tmpl.EmbeddedTmpl
ct.LeftDelim = &tmpl.LeftDelim
ct.RightDelim = &tmpl.RightDelim
// Set the permissions
if tmpl.Perms != "" {
v, err := strconv.ParseUint(tmpl.Perms, 8, 12)
if err != nil {
return nil, fmt.Errorf("Failed to parse %q as octal: %v", tmpl.Perms, err)
}
m := os.FileMode(v)
ct.Perms = &m
}
ct.Finalize()
ctmpls[*ct] = tmpl
}
return ctmpls, nil
}
// newRunnerConfig returns a consul-template runner configuration, setting the
// Vault and Consul configurations based on the clients configs.
func newRunnerConfig(config *TaskTemplateManagerConfig,
templateMapping map[ctconf.TemplateConfig]*structs.Template) (*ctconf.Config, error) {
cc := config.ClientConfig
conf := ctconf.DefaultConfig()
// Gather the consul-template templates
flat := ctconf.TemplateConfigs(make([]*ctconf.TemplateConfig, 0, len(templateMapping)))
for ctmpl := range templateMapping {
local := ctmpl
flat = append(flat, &local)
}
conf.Templates = &flat
// Go through the templates and determine the minimum Vault grace
vaultGrace := time.Duration(-1)
for _, tmpl := range templateMapping {
// Initial condition
if vaultGrace < 0 {
vaultGrace = tmpl.VaultGrace
} else if tmpl.VaultGrace < vaultGrace {
vaultGrace = tmpl.VaultGrace
}
}
// Force faster retries
if config.retryRate != 0 {
rate := config.retryRate
conf.Consul.Retry.Backoff = &rate
}
// Setup the Consul config
if cc.ConsulConfig != nil {
conf.Consul.Address = &cc.ConsulConfig.Addr
conf.Consul.Token = &cc.ConsulConfig.Token
if cc.ConsulConfig.EnableSSL != nil && *cc.ConsulConfig.EnableSSL {
verify := cc.ConsulConfig.VerifySSL != nil && *cc.ConsulConfig.VerifySSL
conf.Consul.SSL = &ctconf.SSLConfig{
Enabled: helper.BoolToPtr(true),
Verify: &verify,
Cert: &cc.ConsulConfig.CertFile,
Key: &cc.ConsulConfig.KeyFile,
CaCert: &cc.ConsulConfig.CAFile,
}
}
if cc.ConsulConfig.Auth != "" {
parts := strings.SplitN(cc.ConsulConfig.Auth, ":", 2)
if len(parts) != 2 {
return nil, fmt.Errorf("Failed to parse Consul Auth config")
}
conf.Consul.Auth = &ctconf.AuthConfig{
Enabled: helper.BoolToPtr(true),
Username: &parts[0],
Password: &parts[1],
}
}
}
// Setup the Vault config
// Always set these to ensure nothing is picked up from the environment
emptyStr := ""
conf.Vault.RenewToken = helper.BoolToPtr(false)
conf.Vault.Token = &emptyStr
if cc.VaultConfig != nil && cc.VaultConfig.IsEnabled() {
conf.Vault.Address = &cc.VaultConfig.Addr
conf.Vault.Token = &config.VaultToken
conf.Vault.Grace = helper.TimeToPtr(vaultGrace)
if strings.HasPrefix(cc.VaultConfig.Addr, "https") || cc.VaultConfig.TLSCertFile != "" {
skipVerify := cc.VaultConfig.TLSSkipVerify != nil && *cc.VaultConfig.TLSSkipVerify
verify := !skipVerify
conf.Vault.SSL = &ctconf.SSLConfig{
Enabled: helper.BoolToPtr(true),
Verify: &verify,
Cert: &cc.VaultConfig.TLSCertFile,
Key: &cc.VaultConfig.TLSKeyFile,
CaCert: &cc.VaultConfig.TLSCaFile,
CaPath: &cc.VaultConfig.TLSCaPath,
ServerName: &cc.VaultConfig.TLSServerName,
}
} else {
conf.Vault.SSL = &ctconf.SSLConfig{
Enabled: helper.BoolToPtr(false),
Verify: helper.BoolToPtr(false),
Cert: &emptyStr,
Key: &emptyStr,
CaCert: &emptyStr,
CaPath: &emptyStr,
ServerName: &emptyStr,
}
}
}
conf.Finalize()
return conf, nil
}
// loadTemplateEnv loads task environment variables from all templates.
func loadTemplateEnv(tmpls []*structs.Template, taskDir string) (map[string]string, error) {
all := make(map[string]string, 50)
for _, t := range tmpls {
if !t.Envvars {
continue
}
f, err := os.Open(filepath.Join(taskDir, t.DestPath))
if err != nil {
return nil, fmt.Errorf("error opening env template: %v", err)
}
defer f.Close()
// Parse environment fil
vars, err := envparse.Parse(f)
if err != nil {
return nil, fmt.Errorf("error parsing env template %q: %v", t.DestPath, err)
}
for k, v := range vars {
all[k] = v
}
}
return all, nil
}
This diff is collapsed.
package taskrunner
import (
"context"
"fmt"
"sync"
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/nomad/client/allocrunnerv2/interfaces"
ti "github.com/hashicorp/nomad/client/allocrunnerv2/taskrunner/interfaces"
"github.com/hashicorp/nomad/client/allocrunnerv2/taskrunner/template"
"github.com/hashicorp/nomad/client/config"
"github.com/hashicorp/nomad/client/driver/env"
"github.com/hashicorp/nomad/nomad/structs"
)
type templateHookConfig struct {
// logger is used to log
logger log.Logger
// lifecycle is used to interact with the task's lifecycle
lifecycle ti.TaskLifecycle
// events is used to emit events
events ti.EventEmitter
// templates is the set of templates we are managing
templates []*structs.Template
// clientConfig is the Nomad Client configuration
clientConfig *config.Config
// envBuilder is the environment variable builder for the task.
envBuilder *env.Builder
}
type templateHook struct {
config *templateHookConfig
// logger is used to log
logger log.Logger
// templateManager is used to manage any consul-templates this task may have
templateManager *template.TaskTemplateManager
managerLock sync.Mutex
// vaultToken is the current Vault token
vaultToken string
// taskDir is the task directory
taskDir string
}
func newTemplateHook(config *templateHookConfig) *templateHook {
h := &templateHook{
config: config,
}
h.logger = config.logger.Named(h.Name())
return h
}
func (*templateHook) Name() string {
return "template"
}
func (h *templateHook) Prerun(ctx context.Context, req *interfaces.TaskPrerunRequest, resp *interfaces.TaskPrerunResponse) error {
h.managerLock.Lock()
defer h.managerLock.Unlock()
// If we have already run prerun before exit early.
if h.templateManager != nil {
return nil
}
// Store the current Vault token and the task directory
h.taskDir = req.TaskDir
h.vaultToken = req.VaultToken
unblockCh, err := h.newManager()
if err != nil {
return err
}
// Wait for the template to render
select {
case <-ctx.Done():
case <-unblockCh:
}
return nil
}
func (h *templateHook) newManager() (unblock chan struct{}, err error) {
unblock = make(chan struct{})
m, err := template.NewTaskTemplateManager(&template.TaskTemplateManagerConfig{
UnblockCh: unblock,
Lifecycle: h.config.lifecycle,
Events: h.config.events,
Templates: h.config.templates,
ClientConfig: h.config.clientConfig,
VaultToken: h.vaultToken,
TaskDir: h.taskDir,
EnvBuilder: h.config.envBuilder,
MaxTemplateEventRate: template.DefaultMaxTemplateEventRate,
})
if err != nil {
h.logger.Error("failed to create template manager", "error", err)
return nil, err
}
h.templateManager = m
return unblock, nil
}
func (h *templateHook) Poststop(ctx context.Context, req *interfaces.TaskPoststopRequest, resp *interfaces.TaskPoststopResponse) error {
h.managerLock.Lock()
defer h.managerLock.Unlock()
// Shutdown any created template
if h.templateManager != nil {
h.templateManager.Stop()
}
return nil
}
// Handle new Vault token
func (h *templateHook) Update(ctx context.Context, req *interfaces.TaskUpdateRequest, resp *interfaces.TaskUpdateResponse) error {
h.managerLock.Lock()
defer h.managerLock.Unlock()
// Nothing to do
if h.templateManager == nil {
return nil
}
// Check if the Vault token has changed
if req.VaultToken == h.vaultToken {
return nil
} else {
h.vaultToken = req.VaultToken
}
// Shutdown the old template
h.templateManager.Stop()
h.templateManager = nil
// Create the new template
if _, err := h.newManager(); err != nil {
err := fmt.Errorf("failed to build template manager: %v", err)
h.logger.Error("failed to build template manager", "error", err)
// XXX I think we can skip this
// r.setState(structs.TaskStateDead, structs.NewTaskEvent(structs.TaskSetupFailure).SetSetupError(err).SetFailsTask(), false)
h.config.lifecycle.Kill(h.Name(), err.Error(), true)
}
return nil
}
......@@ -14,6 +14,7 @@ import (
"github.com/hashicorp/nomad/client/allocdir"
"github.com/hashicorp/nomad/client/allocrunnerv2/interfaces"
ti "github.com/hashicorp/nomad/client/allocrunnerv2/taskrunner/interfaces"
"github.com/hashicorp/nomad/client/vaultclient"
"github.com/hashicorp/nomad/nomad/structs"
)
......@@ -50,8 +51,8 @@ func (tr *TaskRunner) updatedVaultToken(token string) {
type vaultHookConfig struct {
vaultStanza *structs.Vault
client vaultclient.VaultClient
events EventEmitter
lifecycle TaskLifecycle
events ti.EventEmitter
lifecycle ti.TaskLifecycle
updater vaultTokenUpdateHandler
logger log.Logger
alloc *structs.Allocation
......@@ -63,10 +64,10 @@ type vaultHook struct {
vaultStanza *structs.Vault
// eventEmitter is used to emit events to the task
eventEmitter EventEmitter
eventEmitter ti.EventEmitter
// lifecycle is used to signal, restart and kill a task
lifecycle TaskLifecycle
lifecycle ti.TaskLifecycle
// updater is used to update the Vault token
updater vaultTokenUpdateHandler
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment