Commit bc1894fb authored by Michael Schurter's avatar Michael Schurter
Browse files

Merge branch 'master' into f-cpu_hard_limit

parents bc0256da 5fad2288
Showing with 619 additions and 252 deletions
+619 -252
......@@ -6,24 +6,31 @@ __BACKWARDS INCOMPATIBILITIES:__
in HTTP check paths will now fail to validate. [[GH-3685](https://github.com/hashicorp/nomad/issues/3685)]
IMPROVEMENTS:
* core: Allow upgrading/downgrading TLS via SIGHUP on both servers and clients [[GH-3492](https://github.com/hashicorp/nomad/issues/3492)]
* core: A set of features (Autopilot) has been added to allow for automatic operator-friendly management of Nomad servers. For more information about Autopilot, see the [Autopilot Guide](https://www.nomadproject.io/guides/cluster/autopilot.html). [[GH-3670](https://github.com/hashicorp/nomad/pull/3670)]
* cli: Use ISO_8601 time format for cli output
[[GH-3814](https://github.com/hashicorp/nomad/pull/3814)]
* client: Allow '.' in environment variable names [[GH-3760](https://github.com/hashicorp/nomad/issues/3760)]
* client: Refactor client fingerprint methods to a request/response format
[[GH-3781](https://github.com/hashicorp/nomad/issues/3781)]
* discovery: Allow `check_restart` to be specified in the `service` stanza.
[[GH-3718](https://github.com/hashicorp/nomad/issues/3718)]
* driver/docker: Support advertising IPv6 addresses [[GH-3790](https://github.com/hashicorp/nomad/issues/3790)]
* driver/docker; Support overriding image entrypoint [[GH-3788](https://github.com/hashicorp/nomad/issues/3788)]
* driver/docker: Support adding or dropping capabilities [[GH-3754](https://github.com/hashicorp/nomad/issues/3754)]
* driver/docker: Support mounting root filesystem as read-only [[GH-3802](https://github.com/hashicorp/nomad/issues/3802)]
* driver/lxc: Add volumes config to LXC driver [[GH-3687](https://github.com/hashicorp/nomad/issues/3687)]
* telemetry: Support DataDog tags [[GH-3839](https://github.com/hashicorp/nomad/issues/3839)]
BUG FIXES:
* core: Fix search endpoint forwarding for multi-region clusters [[GH-3680](https://github.com/hashicorp/nomad/issues/3680)]
* core: Allow upgrading/downgrading TLS via SIGHUP on both servers and clients [[GH-3492](https://github.com/hashicorp/nomad/issues/3492)]
* core: Fix an issue in which batch jobs with queued placements and lost
allocations could result in improper placement counts [[GH-3717](https://github.com/hashicorp/nomad/issues/3717)]
* client: Migrated ephemeral_disk's maintain directory permissions [[GH-3723](https://github.com/hashicorp/nomad/issues/3723)]
* client: Always advertise driver IP when in driver address mode [[GH-3682](https://github.com/hashicorp/nomad/issues/3682)]
* client/vault: Recognize renewing non-renewable Vault lease as fatal [[GH-3727](https://github.com/hashicorp/nomad/issues/3727)]
* config: Revert minimum CPU limit back to 20 from 100.
* driver/lxc: Cleanup LXC containers after errors on container startup. [[GH-3773](https://github.com/hashicorp/nomad/issues/3773)]
* ui: Fix ui on non-leaders when ACLs are enabled [[GH-3722](https://github.com/hashicorp/nomad/issues/3722)]
* ui: Fix requests using client-side certificates in Firefox. [[GH-3728](https://github.com/hashicorp/nomad/pull/3728)]
......@@ -163,8 +170,7 @@ BUG FIXES:
change [[GH-3214](https://github.com/hashicorp/nomad/issues/3214)]
* api: Fix search handling of jobs with more than four hyphens and case were
length could cause lookup error [[GH-3203](https://github.com/hashicorp/nomad/issues/3203)]
* client: Improve the speed at which clients detect garbage collection events
[GH_-3452]
* client: Improve the speed at which clients detect garbage collection events [[GH-3452](https://github.com/hashicorp/nomad/issues/3452)]
* client: Fix lock contention that could cause a node to miss a heartbeat and
be marked as down [[GH-3195](https://github.com/hashicorp/nomad/issues/3195)]
* client: Fix data race that could lead to concurrent map read/writes during
......
......@@ -148,7 +148,7 @@ deps: ## Install build and development dependencies
@echo "==> Updating build dependencies..."
go get -u github.com/kardianos/govendor
go get -u github.com/ugorji/go/codec/codecgen
go get -u github.com/jteeuwen/go-bindata/...
go get -u github.com/hashicorp/go-bindata/...
go get -u github.com/elazarl/go-bindata-assetfs/...
go get -u github.com/a8m/tree/cmd/tree
go get -u github.com/magiconair/vendorfmt/cmd/vendorfmt
......
......@@ -151,6 +151,7 @@ type HostDiskStats struct {
// NodeListStub is a subset of information returned during
// node list operations.
type NodeListStub struct {
Address string
ID string
Datacenter string
Name string
......
......@@ -2,6 +2,7 @@ package api
import (
"bytes"
"encoding/json"
"fmt"
"io"
"strconv"
......@@ -19,7 +20,7 @@ type AutopilotConfiguration struct {
// LastContactThreshold is the limit on the amount of time a server can go
// without leader contact before being considered unhealthy.
LastContactThreshold *ReadableDuration
LastContactThreshold time.Duration
// MaxTrailingLogs is the amount of entries in the Raft Log that a server can
// be behind before being considered unhealthy.
......@@ -28,20 +29,19 @@ type AutopilotConfiguration struct {
// ServerStabilizationTime is the minimum amount of time a server must be
// in a stable, healthy state before it can be added to the cluster. Only
// applicable with Raft protocol version 3 or higher.
ServerStabilizationTime *ReadableDuration
ServerStabilizationTime time.Duration
// (Enterprise-only) RedundancyZoneTag is the node tag to use for separating
// servers into zones for redundancy. If left blank, this feature will be disabled.
RedundancyZoneTag string
// (Enterprise-only) EnableRedundancyZones specifies whether to enable redundancy zones.
EnableRedundancyZones bool
// (Enterprise-only) DisableUpgradeMigration will disable Autopilot's upgrade migration
// strategy of waiting until enough newer-versioned servers have been added to the
// cluster before promoting them to voters.
DisableUpgradeMigration bool
// (Enterprise-only) UpgradeVersionTag is the node tag to use for version info when
// performing upgrade migrations. If left blank, the Nomad version will be used.
UpgradeVersionTag string
// (Enterprise-only) EnableCustomUpgrades specifies whether to enable using custom
// upgrade versions when performing migrations.
EnableCustomUpgrades bool
// CreateIndex holds the index corresponding the creation of this configuration.
// This is a read-only field.
......@@ -54,6 +54,45 @@ type AutopilotConfiguration struct {
ModifyIndex uint64
}
func (u *AutopilotConfiguration) MarshalJSON() ([]byte, error) {
type Alias AutopilotConfiguration
return json.Marshal(&struct {
LastContactThreshold string
ServerStabilizationTime string
*Alias
}{
LastContactThreshold: u.LastContactThreshold.String(),
ServerStabilizationTime: u.ServerStabilizationTime.String(),
Alias: (*Alias)(u),
})
}
func (u *AutopilotConfiguration) UnmarshalJSON(data []byte) error {
type Alias AutopilotConfiguration
aux := &struct {
LastContactThreshold string
ServerStabilizationTime string
*Alias
}{
Alias: (*Alias)(u),
}
if err := json.Unmarshal(data, &aux); err != nil {
return err
}
var err error
if aux.LastContactThreshold != "" {
if u.LastContactThreshold, err = time.ParseDuration(aux.LastContactThreshold); err != nil {
return err
}
}
if aux.ServerStabilizationTime != "" {
if u.ServerStabilizationTime, err = time.ParseDuration(aux.ServerStabilizationTime); err != nil {
return err
}
}
return nil
}
// ServerHealth is the health (from the leader's point of view) of a server.
type ServerHealth struct {
// ID is the raft ID of the server.
......@@ -75,7 +114,7 @@ type ServerHealth struct {
Leader bool
// LastContact is the time since this node's last contact with the leader.
LastContact *ReadableDuration
LastContact time.Duration
// LastTerm is the highest leader term this server has a record of in its Raft log.
LastTerm uint64
......@@ -94,6 +133,37 @@ type ServerHealth struct {
StableSince time.Time
}
func (u *ServerHealth) MarshalJSON() ([]byte, error) {
type Alias ServerHealth
return json.Marshal(&struct {
LastContact string
*Alias
}{
LastContact: u.LastContact.String(),
Alias: (*Alias)(u),
})
}
func (u *ServerHealth) UnmarshalJSON(data []byte) error {
type Alias ServerHealth
aux := &struct {
LastContact string
*Alias
}{
Alias: (*Alias)(u),
}
if err := json.Unmarshal(data, &aux); err != nil {
return err
}
var err error
if aux.LastContact != "" {
if u.LastContact, err = time.ParseDuration(aux.LastContact); err != nil {
return err
}
}
return nil
}
// OperatorHealthReply is a representation of the overall health of the cluster
type OperatorHealthReply struct {
// Healthy is true if all the servers in the cluster are healthy.
......@@ -107,46 +177,6 @@ type OperatorHealthReply struct {
Servers []ServerHealth
}
// ReadableDuration is a duration type that is serialized to JSON in human readable format.
type ReadableDuration time.Duration
func NewReadableDuration(dur time.Duration) *ReadableDuration {
d := ReadableDuration(dur)
return &d
}
func (d *ReadableDuration) String() string {
return d.Duration().String()
}
func (d *ReadableDuration) Duration() time.Duration {
if d == nil {
return time.Duration(0)
}
return time.Duration(*d)
}
func (d *ReadableDuration) MarshalJSON() ([]byte, error) {
return []byte(fmt.Sprintf(`"%s"`, d.Duration().String())), nil
}
func (d *ReadableDuration) UnmarshalJSON(raw []byte) error {
if d == nil {
return fmt.Errorf("cannot unmarshal to nil pointer")
}
str := string(raw)
if len(str) < 2 || str[0] != '"' || str[len(str)-1] != '"' {
return fmt.Errorf("must be enclosed with quotes: %s", str)
}
dur, err := time.ParseDuration(str[1 : len(str)-1])
if err != nil {
return err
}
*d = ReadableDuration(dur)
return nil
}
// AutopilotGetConfiguration is used to query the current Autopilot configuration.
func (op *Operator) AutopilotGetConfiguration(q *QueryOptions) (*AutopilotConfiguration, error) {
r, err := op.c.newRequest("GET", "/v1/operator/autopilot/configuration")
......
......@@ -17,13 +17,17 @@ func TestAPI_OperatorAutopilotGetSetConfiguration(t *testing.T) {
defer s.Stop()
operator := c.Operator()
config, err := operator.AutopilotGetConfiguration(nil)
assert.Nil(err)
var config *AutopilotConfiguration
retry.Run(t, func(r *retry.R) {
var err error
config, err = operator.AutopilotGetConfiguration(nil)
r.Check(err)
})
assert.True(config.CleanupDeadServers)
// Change a config setting
newConf := &AutopilotConfiguration{CleanupDeadServers: false}
err = operator.AutopilotSetConfiguration(newConf, nil)
err := operator.AutopilotSetConfiguration(newConf, nil)
assert.Nil(err)
config, err = operator.AutopilotGetConfiguration(nil)
......
......@@ -23,6 +23,7 @@ import (
"github.com/hashicorp/nomad/client/driver"
"github.com/hashicorp/nomad/client/fingerprint"
"github.com/hashicorp/nomad/client/stats"
cstructs "github.com/hashicorp/nomad/client/structs"
"github.com/hashicorp/nomad/client/vaultclient"
"github.com/hashicorp/nomad/command/agent/consul"
"github.com/hashicorp/nomad/helper"
......@@ -931,33 +932,42 @@ func (c *Client) fingerprint() error {
c.logger.Printf("[DEBUG] client: built-in fingerprints: %v", fingerprint.BuiltinFingerprints())
var applied []string
var skipped []string
var detectedFingerprints []string
var skippedFingerprints []string
for _, name := range fingerprint.BuiltinFingerprints() {
// Skip modules that are not in the whitelist if it is enabled.
if _, ok := whitelist[name]; whitelistEnabled && !ok {
skipped = append(skipped, name)
skippedFingerprints = append(skippedFingerprints, name)
continue
}
// Skip modules that are in the blacklist
if _, ok := blacklist[name]; ok {
skipped = append(skipped, name)
skippedFingerprints = append(skippedFingerprints, name)
continue
}
f, err := fingerprint.NewFingerprint(name, c.logger)
if err != nil {
return err
}
c.configLock.Lock()
applies, err := f.Fingerprint(c.config, c.config.Node)
request := &cstructs.FingerprintRequest{Config: c.config, Node: c.config.Node}
var response cstructs.FingerprintResponse
err = f.Fingerprint(request, &response)
c.configLock.Unlock()
if err != nil {
return err
}
if applies {
applied = append(applied, name)
// log the fingerprinters which have been applied
if response.Detected {
detectedFingerprints = append(detectedFingerprints, name)
}
// add the diff found from each fingerprinter
c.updateNodeFromFingerprint(&response)
p, period := f.Periodic()
if p {
// TODO: If more periodic fingerprinters are added, then
......@@ -966,9 +976,10 @@ func (c *Client) fingerprint() error {
go c.fingerprintPeriodic(name, f, period)
}
}
c.logger.Printf("[DEBUG] client: applied fingerprints %v", applied)
if len(skipped) != 0 {
c.logger.Printf("[DEBUG] client: fingerprint modules skipped due to white/blacklist: %v", skipped)
c.logger.Printf("[DEBUG] client: detected fingerprints %v", detectedFingerprints)
if len(skippedFingerprints) != 0 {
c.logger.Printf("[DEBUG] client: fingerprint modules skipped due to white/blacklist: %v", skippedFingerprints)
}
return nil
}
......@@ -980,10 +991,17 @@ func (c *Client) fingerprintPeriodic(name string, f fingerprint.Fingerprint, d t
select {
case <-time.After(d):
c.configLock.Lock()
if _, err := f.Fingerprint(c.config, c.config.Node); err != nil {
request := &cstructs.FingerprintRequest{Config: c.config, Node: c.config.Node}
var response cstructs.FingerprintResponse
err := f.Fingerprint(request, &response)
c.configLock.Unlock()
if err != nil {
c.logger.Printf("[DEBUG] client: periodic fingerprinting for %v failed: %v", name, err)
} else {
c.updateNodeFromFingerprint(&response)
}
c.configLock.Unlock()
case <-c.shutdownCh:
return
}
......@@ -997,19 +1015,19 @@ func (c *Client) setupDrivers() error {
whitelistEnabled := len(whitelist) > 0
blacklist := c.config.ReadStringListToMap("driver.blacklist")
var avail []string
var skipped []string
var detectedDrivers []string
var skippedDrivers []string
driverCtx := driver.NewDriverContext("", "", c.config, c.config.Node, c.logger, nil)
for name := range driver.BuiltinDrivers {
// Skip fingerprinting drivers that are not in the whitelist if it is
// enabled.
if _, ok := whitelist[name]; whitelistEnabled && !ok {
skipped = append(skipped, name)
skippedDrivers = append(skippedDrivers, name)
continue
}
// Skip fingerprinting drivers that are in the blacklist
if _, ok := blacklist[name]; ok {
skipped = append(skipped, name)
skippedDrivers = append(skippedDrivers, name)
continue
}
......@@ -1017,16 +1035,23 @@ func (c *Client) setupDrivers() error {
if err != nil {
return err
}
c.configLock.Lock()
applies, err := d.Fingerprint(c.config, c.config.Node)
request := &cstructs.FingerprintRequest{Config: c.config, Node: c.config.Node}
var response cstructs.FingerprintResponse
err = d.Fingerprint(request, &response)
c.configLock.Unlock()
if err != nil {
return err
}
if applies {
avail = append(avail, name)
// log the fingerprinters which have been applied
if response.Detected {
detectedDrivers = append(detectedDrivers, name)
}
c.updateNodeFromFingerprint(&response)
p, period := d.Periodic()
if p {
go c.fingerprintPeriodic(name, d, period)
......@@ -1034,15 +1059,42 @@ func (c *Client) setupDrivers() error {
}
c.logger.Printf("[DEBUG] client: available drivers %v", avail)
if len(skipped) != 0 {
c.logger.Printf("[DEBUG] client: drivers skipped due to white/blacklist: %v", skipped)
c.logger.Printf("[DEBUG] client: detected drivers %v", detectedDrivers)
if len(skippedDrivers) > 0 {
c.logger.Printf("[DEBUG] client: drivers skipped due to white/blacklist: %v", skippedDrivers)
}
return nil
}
// updateNodeFromFingerprint updates the node with the result of
// fingerprinting the node from the diff that was created
func (c *Client) updateNodeFromFingerprint(response *cstructs.FingerprintResponse) {
c.configLock.Lock()
defer c.configLock.Unlock()
for name, val := range response.Attributes {
if val == "" {
delete(c.config.Node.Attributes, name)
} else {
c.config.Node.Attributes[name] = val
}
}
// update node links and resources from the diff created from
// fingerprinting
for name, val := range response.Links {
if val == "" {
delete(c.config.Node.Links, name)
} else {
c.config.Node.Links[name] = val
}
}
if response.Resources != nil {
c.config.Node.Resources.Merge(response.Resources)
}
}
// retryIntv calculates a retry interval value given the base
func (c *Client) retryIntv(base time.Duration) time.Duration {
if c.config.DevMode {
......
......@@ -14,6 +14,7 @@ import (
"github.com/hashicorp/consul/lib/freeport"
memdb "github.com/hashicorp/go-memdb"
"github.com/hashicorp/nomad/client/config"
"github.com/hashicorp/nomad/client/driver"
"github.com/hashicorp/nomad/client/fingerprint"
"github.com/hashicorp/nomad/command/agent/consul"
"github.com/hashicorp/nomad/helper"
......@@ -252,6 +253,48 @@ func TestClient_HasNodeChanged(t *testing.T) {
}
}
func TestClient_Fingerprint_Periodic(t *testing.T) {
if _, ok := driver.BuiltinDrivers["mock_driver"]; !ok {
t.Skip(`test requires mock_driver; run with "-tags nomad_test"`)
}
t.Parallel()
// these constants are only defined when nomad_test is enabled, so these fail
// our linter without explicit disabling.
c1 := testClient(t, func(c *config.Config) {
c.Options = map[string]string{
driver.ShutdownPeriodicAfter: "true", // nolint: varcheck
driver.ShutdownPeriodicDuration: "3", // nolint: varcheck
}
})
defer c1.Shutdown()
node := c1.config.Node
mockDriverName := "driver.mock_driver"
// Ensure the mock driver is registered on the client
testutil.WaitForResult(func() (bool, error) {
mockDriverStatus := node.Attributes[mockDriverName]
if mockDriverStatus == "" {
return false, fmt.Errorf("mock driver attribute should be set on the client")
}
return true, nil
}, func(err error) {
t.Fatalf("err: %v", err)
})
// Ensure that the client fingerprinter eventually removes this attribute
testutil.WaitForResult(func() (bool, error) {
mockDriverStatus := node.Attributes[mockDriverName]
if mockDriverStatus != "" {
return false, fmt.Errorf("mock driver attribute should not be set on the client")
}
return true, nil
}, func(err error) {
t.Fatalf("err: %v", err)
})
}
func TestClient_Fingerprint_InWhitelist(t *testing.T) {
t.Parallel()
c := testClient(t, func(c *config.Config) {
......
......@@ -26,7 +26,6 @@ import (
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/go-plugin"
"github.com/hashicorp/nomad/client/allocdir"
"github.com/hashicorp/nomad/client/config"
"github.com/hashicorp/nomad/client/driver/env"
"github.com/hashicorp/nomad/client/driver/executor"
dstructs "github.com/hashicorp/nomad/client/driver/structs"
......@@ -180,51 +179,52 @@ type DockerVolumeDriverConfig struct {
// DockerDriverConfig defines the user specified config block in a jobspec
type DockerDriverConfig struct {
ImageName string `mapstructure:"image"` // Container's Image Name
LoadImage string `mapstructure:"load"` // LoadImage is a path to an image archive file
Command string `mapstructure:"command"` // The Command to run when the container starts up
Args []string `mapstructure:"args"` // The arguments to the Command
Entrypoint []string `mapstructure:"entrypoint"` // Override the containers entrypoint
IpcMode string `mapstructure:"ipc_mode"` // The IPC mode of the container - host and none
NetworkMode string `mapstructure:"network_mode"` // The network mode of the container - host, nat and none
NetworkAliases []string `mapstructure:"network_aliases"` // The network-scoped alias for the container
IPv4Address string `mapstructure:"ipv4_address"` // The container ipv4 address
IPv6Address string `mapstructure:"ipv6_address"` // the container ipv6 address
PidMode string `mapstructure:"pid_mode"` // The PID mode of the container - host and none
UTSMode string `mapstructure:"uts_mode"` // The UTS mode of the container - host and none
UsernsMode string `mapstructure:"userns_mode"` // The User namespace mode of the container - host and none
PortMapRaw []map[string]string `mapstructure:"port_map"` //
PortMap map[string]int `mapstructure:"-"` // A map of host port labels and the ports exposed on the container
Privileged bool `mapstructure:"privileged"` // Flag to run the container in privileged mode
SysctlRaw []map[string]string `mapstructure:"sysctl"` //
Sysctl map[string]string `mapstructure:"-"` // The sysctl custom configurations
UlimitRaw []map[string]string `mapstructure:"ulimit"` //
Ulimit []docker.ULimit `mapstructure:"-"` // The ulimit custom configurations
DNSServers []string `mapstructure:"dns_servers"` // DNS Server for containers
DNSSearchDomains []string `mapstructure:"dns_search_domains"` // DNS Search domains for containers
DNSOptions []string `mapstructure:"dns_options"` // DNS Options
ExtraHosts []string `mapstructure:"extra_hosts"` // Add host to /etc/hosts (host:IP)
Hostname string `mapstructure:"hostname"` // Hostname for containers
LabelsRaw []map[string]string `mapstructure:"labels"` //
Labels map[string]string `mapstructure:"-"` // Labels to set when the container starts up
Auth []DockerDriverAuth `mapstructure:"auth"` // Authentication credentials for a private Docker registry
AuthSoftFail bool `mapstructure:"auth_soft_fail"` // Soft-fail if auth creds are provided but fail
TTY bool `mapstructure:"tty"` // Allocate a Pseudo-TTY
Interactive bool `mapstructure:"interactive"` // Keep STDIN open even if not attached
ShmSize int64 `mapstructure:"shm_size"` // Size of /dev/shm of the container in bytes
WorkDir string `mapstructure:"work_dir"` // Working directory inside the container
Logging []DockerLoggingOpts `mapstructure:"logging"` // Logging options for syslog server
Volumes []string `mapstructure:"volumes"` // Host-Volumes to mount in, syntax: /path/to/host/directory:/destination/path/in/container
Mounts []DockerMount `mapstructure:"mounts"` // Docker volumes to mount
VolumeDriver string `mapstructure:"volume_driver"` // Docker volume driver used for the container's volumes
ForcePull bool `mapstructure:"force_pull"` // Always force pull before running image, useful if your tags are mutable
MacAddress string `mapstructure:"mac_address"` // Pin mac address to container
SecurityOpt []string `mapstructure:"security_opt"` // Flags to pass directly to security-opt
Devices []DockerDevice `mapstructure:"devices"` // To allow mounting USB or other serial control devices
CapAdd []string `mapstructure:"cap_add"` // Flags to pass directly to cap-add
CapDrop []string `mapstructure:"cap_drop"` // Flags to pass directly to cap-drop
ReadonlyRootfs bool `mapstructure:"readonly_rootfs"` // Mount the container’s root filesystem as read only
CPUHardLimit bool `mapstructure:"cpu_hard_limit"` // Enforce CPU hard limit.
ImageName string `mapstructure:"image"` // Container's Image Name
LoadImage string `mapstructure:"load"` // LoadImage is a path to an image archive file
Command string `mapstructure:"command"` // The Command to run when the container starts up
Args []string `mapstructure:"args"` // The arguments to the Command
Entrypoint []string `mapstructure:"entrypoint"` // Override the containers entrypoint
IpcMode string `mapstructure:"ipc_mode"` // The IPC mode of the container - host and none
NetworkMode string `mapstructure:"network_mode"` // The network mode of the container - host, nat and none
NetworkAliases []string `mapstructure:"network_aliases"` // The network-scoped alias for the container
IPv4Address string `mapstructure:"ipv4_address"` // The container ipv4 address
IPv6Address string `mapstructure:"ipv6_address"` // the container ipv6 address
PidMode string `mapstructure:"pid_mode"` // The PID mode of the container - host and none
UTSMode string `mapstructure:"uts_mode"` // The UTS mode of the container - host and none
UsernsMode string `mapstructure:"userns_mode"` // The User namespace mode of the container - host and none
PortMapRaw []map[string]string `mapstructure:"port_map"` //
PortMap map[string]int `mapstructure:"-"` // A map of host port labels and the ports exposed on the container
Privileged bool `mapstructure:"privileged"` // Flag to run the container in privileged mode
SysctlRaw []map[string]string `mapstructure:"sysctl"` //
Sysctl map[string]string `mapstructure:"-"` // The sysctl custom configurations
UlimitRaw []map[string]string `mapstructure:"ulimit"` //
Ulimit []docker.ULimit `mapstructure:"-"` // The ulimit custom configurations
DNSServers []string `mapstructure:"dns_servers"` // DNS Server for containers
DNSSearchDomains []string `mapstructure:"dns_search_domains"` // DNS Search domains for containers
DNSOptions []string `mapstructure:"dns_options"` // DNS Options
ExtraHosts []string `mapstructure:"extra_hosts"` // Add host to /etc/hosts (host:IP)
Hostname string `mapstructure:"hostname"` // Hostname for containers
LabelsRaw []map[string]string `mapstructure:"labels"` //
Labels map[string]string `mapstructure:"-"` // Labels to set when the container starts up
Auth []DockerDriverAuth `mapstructure:"auth"` // Authentication credentials for a private Docker registry
AuthSoftFail bool `mapstructure:"auth_soft_fail"` // Soft-fail if auth creds are provided but fail
TTY bool `mapstructure:"tty"` // Allocate a Pseudo-TTY
Interactive bool `mapstructure:"interactive"` // Keep STDIN open even if not attached
ShmSize int64 `mapstructure:"shm_size"` // Size of /dev/shm of the container in bytes
WorkDir string `mapstructure:"work_dir"` // Working directory inside the container
Logging []DockerLoggingOpts `mapstructure:"logging"` // Logging options for syslog server
Volumes []string `mapstructure:"volumes"` // Host-Volumes to mount in, syntax: /path/to/host/directory:/destination/path/in/container
Mounts []DockerMount `mapstructure:"mounts"` // Docker volumes to mount
VolumeDriver string `mapstructure:"volume_driver"` // Docker volume driver used for the container's volumes
ForcePull bool `mapstructure:"force_pull"` // Always force pull before running image, useful if your tags are mutable
MacAddress string `mapstructure:"mac_address"` // Pin mac address to container
SecurityOpt []string `mapstructure:"security_opt"` // Flags to pass directly to security-opt
Devices []DockerDevice `mapstructure:"devices"` // To allow mounting USB or other serial control devices
CapAdd []string `mapstructure:"cap_add"` // Flags to pass directly to cap-add
CapDrop []string `mapstructure:"cap_drop"` // Flags to pass directly to cap-drop
ReadonlyRootfs bool `mapstructure:"readonly_rootfs"` // Mount the container’s root filesystem as read only
AdvertiseIPv6Address bool `mapstructure:"advertise_ipv6_address"` // Flag to use the GlobalIPv6Address from the container as the detected IP
CPUHardLimit bool `mapstructure:"cpu_hard_limit"` // Enforce CPU hard limit.
}
func sliceMergeUlimit(ulimitsRaw map[string]string) ([]docker.ULimit, error) {
......@@ -485,16 +485,15 @@ func NewDockerDriver(ctx *DriverContext) Driver {
return &DockerDriver{DriverContext: *ctx}
}
func (d *DockerDriver) Fingerprint(cfg *config.Config, node *structs.Node) (bool, error) {
// Initialize docker API clients
func (d *DockerDriver) Fingerprint(req *cstructs.FingerprintRequest, resp *cstructs.FingerprintResponse) error {
client, _, err := d.dockerClients()
if err != nil {
if d.fingerprintSuccess == nil || *d.fingerprintSuccess {
d.logger.Printf("[INFO] driver.docker: failed to initialize client: %s", err)
}
delete(node.Attributes, dockerDriverAttr)
d.fingerprintSuccess = helper.BoolToPtr(false)
return false, nil
resp.RemoveAttribute(dockerDriverAttr)
return nil
}
// This is the first operation taken on the client so we'll try to
......@@ -502,25 +501,26 @@ func (d *DockerDriver) Fingerprint(cfg *config.Config, node *structs.Node) (bool
// Docker isn't available so we'll simply disable the docker driver.
env, err := client.Version()
if err != nil {
delete(node.Attributes, dockerDriverAttr)
if d.fingerprintSuccess == nil || *d.fingerprintSuccess {
d.logger.Printf("[DEBUG] driver.docker: could not connect to docker daemon at %s: %s", client.Endpoint(), err)
}
d.fingerprintSuccess = helper.BoolToPtr(false)
return false, nil
resp.RemoveAttribute(dockerDriverAttr)
return nil
}
node.Attributes[dockerDriverAttr] = "1"
node.Attributes["driver.docker.version"] = env.Get("Version")
resp.AddAttribute(dockerDriverAttr, "1")
resp.AddAttribute("driver.docker.version", env.Get("Version"))
resp.Detected = true
privileged := d.config.ReadBoolDefault(dockerPrivilegedConfigOption, false)
if privileged {
node.Attributes[dockerPrivilegedConfigOption] = "1"
resp.AddAttribute(dockerPrivilegedConfigOption, "1")
}
// Advertise if this node supports Docker volumes
if d.config.ReadBoolDefault(dockerVolumesConfigOption, dockerVolumesConfigDefault) {
node.Attributes["driver."+dockerVolumesConfigOption] = "1"
resp.AddAttribute("driver."+dockerVolumesConfigOption, "1")
}
// Detect bridge IP address - #2785
......@@ -538,7 +538,7 @@ func (d *DockerDriver) Fingerprint(cfg *config.Config, node *structs.Node) (bool
}
if n.IPAM.Config[0].Gateway != "" {
node.Attributes["driver.docker.bridge_ip"] = n.IPAM.Config[0].Gateway
resp.AddAttribute("driver.docker.bridge_ip", n.IPAM.Config[0].Gateway)
} else if d.fingerprintSuccess == nil {
// Docker 17.09.0-ce dropped the Gateway IP from the bridge network
// See https://github.com/moby/moby/issues/32648
......@@ -549,7 +549,7 @@ func (d *DockerDriver) Fingerprint(cfg *config.Config, node *structs.Node) (bool
}
d.fingerprintSuccess = helper.BoolToPtr(true)
return true, nil
return nil
}
// Validate is used to validate the driver configuration
......@@ -682,6 +682,9 @@ func (d *DockerDriver) Validate(config map[string]interface{}) error {
"readonly_rootfs": {
Type: fields.TypeBool,
},
"advertise_ipv6_address": {
Type: fields.TypeBool,
},
"cpu_hard_limit": {
Type: fields.TypeBool,
},
......@@ -895,6 +898,10 @@ func (d *DockerDriver) detectIP(c *docker.Container) (string, bool) {
}
ip = net.IPAddress
if d.driverConfig.AdvertiseIPv6Address {
ip = net.GlobalIPv6Address
auto = true
}
ipName = name
// Don't auto-advertise IPs for default networks (bridge on
......
......@@ -171,17 +171,31 @@ func TestDockerDriver_Fingerprint(t *testing.T) {
node := &structs.Node{
Attributes: make(map[string]string),
}
apply, err := d.Fingerprint(&config.Config{}, node)
request := &cstructs.FingerprintRequest{Config: &config.Config{}, Node: node}
var response cstructs.FingerprintResponse
err := d.Fingerprint(request, &response)
if err != nil {
t.Fatalf("err: %v", err)
}
if apply != testutil.DockerIsConnected(t) {
attributes := response.Attributes
if testutil.DockerIsConnected(t) && attributes["driver.docker"] == "" {
t.Fatalf("Fingerprinter should detect when docker is available")
}
if node.Attributes["driver.docker"] != "1" {
if attributes["driver.docker"] != "1" {
t.Log("Docker daemon not available. The remainder of the docker tests will be skipped.")
} else {
// if docker is available, make sure that the response is tagged as
// applicable
if !response.Detected {
t.Fatalf("expected response to be applicable")
}
}
t.Logf("Found docker version %s", node.Attributes["driver.docker.version"])
t.Logf("Found docker version %s", attributes["driver.docker.version"])
}
// TestDockerDriver_Fingerprint_Bridge asserts that if Docker is running we set
......@@ -210,18 +224,31 @@ func TestDockerDriver_Fingerprint_Bridge(t *testing.T) {
conf := testConfig(t)
conf.Node = mock.Node()
dd := NewDockerDriver(NewDriverContext("", "", conf, conf.Node, testLogger(), nil))
ok, err := dd.Fingerprint(conf, conf.Node)
request := &cstructs.FingerprintRequest{Config: conf, Node: conf.Node}
var response cstructs.FingerprintResponse
err = dd.Fingerprint(request, &response)
if err != nil {
t.Fatalf("error fingerprinting docker: %v", err)
}
if !ok {
if !response.Detected {
t.Fatalf("expected response to be applicable")
}
attributes := response.Attributes
if attributes == nil {
t.Fatalf("expected attributes to be set")
}
if attributes["driver.docker"] == "" {
t.Fatalf("expected Docker to be enabled but false was returned")
}
if found := conf.Node.Attributes["driver.docker.bridge_ip"]; found != expectedAddr {
if found := attributes["driver.docker.bridge_ip"]; found != expectedAddr {
t.Fatalf("expected bridge ip %q but found: %q", expectedAddr, found)
}
t.Logf("docker bridge ip: %q", conf.Node.Attributes["driver.docker.bridge_ip"])
t.Logf("docker bridge ip: %q", attributes["driver.docker.bridge_ip"])
}
func TestDockerDriver_StartOpen_Wait(t *testing.T) {
......@@ -2269,3 +2296,85 @@ func TestDockerDriver_ReadonlyRootfs(t *testing.T) {
assert.True(t, container.HostConfig.ReadonlyRootfs, "ReadonlyRootfs option not set")
}
func TestDockerDriver_AdvertiseIPv6Address(t *testing.T) {
if !tu.IsTravis() {
t.Parallel()
}
if !testutil.DockerIsConnected(t) {
t.Skip("Docker not connected")
}
expectedPrefix := "2001:db8:1::242:ac11"
expectedAdvertise := true
task := &structs.Task{
Name: "nc-demo",
Driver: "docker",
Config: map[string]interface{}{
"image": "busybox",
"load": "busybox.tar",
"command": "/bin/nc",
"args": []string{"-l", "127.0.0.1", "-p", "0"},
"advertise_ipv6_address": expectedAdvertise,
},
Resources: &structs.Resources{
MemoryMB: 256,
CPU: 512,
},
LogConfig: &structs.LogConfig{
MaxFiles: 10,
MaxFileSizeMB: 10,
},
}
client := newTestDockerClient(t)
// Make sure IPv6 is enabled
net, err := client.NetworkInfo("bridge")
if err != nil {
t.Skip("error retrieving bridge network information, skipping")
}
if net == nil || !net.EnableIPv6 {
t.Skip("IPv6 not enabled on bridge network, skipping")
}
tctx := testDockerDriverContexts(t, task)
driver := NewDockerDriver(tctx.DriverCtx)
copyImage(t, tctx.ExecCtx.TaskDir, "busybox.tar")
defer tctx.AllocDir.Destroy()
presp, err := driver.Prestart(tctx.ExecCtx, task)
defer driver.Cleanup(tctx.ExecCtx, presp.CreatedResources)
if err != nil {
t.Fatalf("Error in prestart: %v", err)
}
sresp, err := driver.Start(tctx.ExecCtx, task)
if err != nil {
t.Fatalf("Error in start: %v", err)
}
if sresp.Handle == nil {
t.Fatalf("handle is nil\nStack\n%s", debug.Stack())
}
assert.Equal(t, expectedAdvertise, sresp.Network.AutoAdvertise, "Wrong autoadvertise. Expect: %s, got: %s", expectedAdvertise, sresp.Network.AutoAdvertise)
if !strings.HasPrefix(sresp.Network.IP, expectedPrefix) {
t.Fatalf("Got IP address %q want ip address with prefix %q", sresp.Network.IP, expectedPrefix)
}
defer sresp.Handle.Kill()
handle := sresp.Handle.(*DockerHandle)
waitForExist(t, client, handle)
container, err := client.InspectContainer(handle.ContainerID())
if err != nil {
t.Fatalf("Error inspecting container: %v", err)
}
if !strings.HasPrefix(container.NetworkSettings.GlobalIPv6Address, expectedPrefix) {
t.Fatalf("Got GlobalIPv6address %s want GlobalIPv6address with prefix %s", expectedPrefix, container.NetworkSettings.GlobalIPv6Address)
}
}
......@@ -3,12 +3,12 @@
package driver
import (
"github.com/hashicorp/nomad/client/config"
cstructs "github.com/hashicorp/nomad/client/structs"
"github.com/hashicorp/nomad/helper"
"github.com/hashicorp/nomad/nomad/structs"
)
func (d *ExecDriver) Fingerprint(cfg *config.Config, node *structs.Node) (bool, error) {
func (d *ExecDriver) Fingerprint(req *cstructs.FingerprintRequest, resp *cstructs.FingerprintResponse) error {
d.fingerprintSuccess = helper.BoolToPtr(false)
return false, nil
resp.Detected = true
return nil
}
package driver
import (
"github.com/hashicorp/nomad/client/config"
cstructs "github.com/hashicorp/nomad/client/structs"
"github.com/hashicorp/nomad/helper"
"github.com/hashicorp/nomad/nomad/structs"
"golang.org/x/sys/unix"
)
......@@ -13,28 +12,31 @@ const (
execDriverAttr = "driver.exec"
)
func (d *ExecDriver) Fingerprint(cfg *config.Config, node *structs.Node) (bool, error) {
func (d *ExecDriver) Fingerprint(req *cstructs.FingerprintRequest, resp *cstructs.FingerprintResponse) error {
// The exec driver will be detected in every case
resp.Detected = true
// Only enable if cgroups are available and we are root
if !cgroupsMounted(node) {
if !cgroupsMounted(req.Node) {
if d.fingerprintSuccess == nil || *d.fingerprintSuccess {
d.logger.Printf("[DEBUG] driver.exec: cgroups unavailable, disabling")
d.logger.Printf("[INFO] driver.exec: cgroups unavailable, disabling")
}
d.fingerprintSuccess = helper.BoolToPtr(false)
delete(node.Attributes, execDriverAttr)
return false, nil
resp.RemoveAttribute(execDriverAttr)
return nil
} else if unix.Geteuid() != 0 {
if d.fingerprintSuccess == nil || *d.fingerprintSuccess {
d.logger.Printf("[DEBUG] driver.exec: must run as root user, disabling")
}
delete(node.Attributes, execDriverAttr)
d.fingerprintSuccess = helper.BoolToPtr(false)
return false, nil
resp.RemoveAttribute(execDriverAttr)
return nil
}
if d.fingerprintSuccess == nil || !*d.fingerprintSuccess {
d.logger.Printf("[DEBUG] driver.exec: exec driver is enabled")
}
node.Attributes[execDriverAttr] = "1"
resp.AddAttribute(execDriverAttr, "1")
d.fingerprintSuccess = helper.BoolToPtr(true)
return true, nil
return nil
}
......@@ -13,6 +13,7 @@ import (
"github.com/hashicorp/nomad/client/config"
"github.com/hashicorp/nomad/client/driver/env"
cstructs "github.com/hashicorp/nomad/client/structs"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/testutil"
......@@ -37,14 +38,19 @@ func TestExecDriver_Fingerprint(t *testing.T) {
"unique.cgroup.mountpoint": "/sys/fs/cgroup",
},
}
apply, err := d.Fingerprint(&config.Config{}, node)
request := &cstructs.FingerprintRequest{Config: &config.Config{}, Node: node}
var response cstructs.FingerprintResponse
err := d.Fingerprint(request, &response)
if err != nil {
t.Fatalf("err: %v", err)
}
if !apply {
t.Fatalf("should apply")
if !response.Detected {
t.Fatalf("expected response to be applicable")
}
if node.Attributes["driver.exec"] == "" {
if response.Attributes == nil || response.Attributes["driver.exec"] == "" {
t.Fatalf("missing driver")
}
}
......
......@@ -18,7 +18,6 @@ import (
"github.com/hashicorp/go-plugin"
"github.com/mitchellh/mapstructure"
"github.com/hashicorp/nomad/client/config"
"github.com/hashicorp/nomad/client/driver/env"
"github.com/hashicorp/nomad/client/driver/executor"
dstructs "github.com/hashicorp/nomad/client/driver/structs"
......@@ -112,15 +111,16 @@ func (d *JavaDriver) Abilities() DriverAbilities {
}
}
func (d *JavaDriver) Fingerprint(cfg *config.Config, node *structs.Node) (bool, error) {
func (d *JavaDriver) Fingerprint(req *cstructs.FingerprintRequest, resp *cstructs.FingerprintResponse) error {
// Only enable if we are root and cgroups are mounted when running on linux systems.
if runtime.GOOS == "linux" && (syscall.Geteuid() != 0 || !cgroupsMounted(node)) {
if runtime.GOOS == "linux" && (syscall.Geteuid() != 0 || !cgroupsMounted(req.Node)) {
if d.fingerprintSuccess == nil || *d.fingerprintSuccess {
d.logger.Printf("[DEBUG] driver.java: root privileges and mounted cgroups required on linux, disabling")
d.logger.Printf("[INFO] driver.java: root privileges and mounted cgroups required on linux, disabling")
}
delete(node.Attributes, "driver.java")
d.fingerprintSuccess = helper.BoolToPtr(false)
return false, nil
resp.RemoveAttribute(javaDriverAttr)
resp.Detected = true
return nil
}
// Find java version
......@@ -132,9 +132,9 @@ func (d *JavaDriver) Fingerprint(cfg *config.Config, node *structs.Node) (bool,
err := cmd.Run()
if err != nil {
// assume Java wasn't found
delete(node.Attributes, javaDriverAttr)
d.fingerprintSuccess = helper.BoolToPtr(false)
return false, nil
resp.RemoveAttribute(javaDriverAttr)
return nil
}
// 'java -version' returns output on Stderr typically.
......@@ -152,9 +152,9 @@ func (d *JavaDriver) Fingerprint(cfg *config.Config, node *structs.Node) (bool,
if d.fingerprintSuccess == nil || *d.fingerprintSuccess {
d.logger.Println("[WARN] driver.java: error parsing Java version information, aborting")
}
delete(node.Attributes, javaDriverAttr)
d.fingerprintSuccess = helper.BoolToPtr(false)
return false, nil
resp.RemoveAttribute(javaDriverAttr)
return nil
}
// Assume 'java -version' returns 3 lines:
......@@ -166,13 +166,14 @@ func (d *JavaDriver) Fingerprint(cfg *config.Config, node *structs.Node) (bool,
versionString := info[0]
versionString = strings.TrimPrefix(versionString, "java version ")
versionString = strings.Trim(versionString, "\"")
node.Attributes[javaDriverAttr] = "1"
node.Attributes["driver.java.version"] = versionString
node.Attributes["driver.java.runtime"] = info[1]
node.Attributes["driver.java.vm"] = info[2]
resp.AddAttribute(javaDriverAttr, "1")
resp.AddAttribute("driver.java.version", versionString)
resp.AddAttribute("driver.java.runtime", info[1])
resp.AddAttribute("driver.java.vm", info[2])
d.fingerprintSuccess = helper.BoolToPtr(true)
resp.Detected = true
return true, nil
return nil
}
func (d *JavaDriver) Prestart(*ExecContext, *structs.Task) (*PrestartResponse, error) {
......
......@@ -11,6 +11,7 @@ import (
"time"
"github.com/hashicorp/nomad/client/config"
cstructs "github.com/hashicorp/nomad/client/structs"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/testutil"
"github.com/stretchr/testify/assert"
......@@ -49,14 +50,19 @@ func TestJavaDriver_Fingerprint(t *testing.T) {
"unique.cgroup.mountpoint": "/sys/fs/cgroups",
},
}
apply, err := d.Fingerprint(&config.Config{}, node)
request := &cstructs.FingerprintRequest{Config: &config.Config{}, Node: node}
var response cstructs.FingerprintResponse
err := d.Fingerprint(request, &response)
if err != nil {
t.Fatalf("err: %v", err)
}
if apply != javaLocated() {
t.Fatalf("Fingerprinter should detect Java when it is installed")
if !response.Detected {
t.Fatalf("expected response to be applicable")
}
if node.Attributes["driver.java"] != "1" {
if response.Attributes["driver.java"] != "1" && javaLocated() {
if v, ok := osJavaDriverSupport[runtime.GOOS]; v && ok {
t.Fatalf("missing java driver")
} else {
......@@ -64,7 +70,7 @@ func TestJavaDriver_Fingerprint(t *testing.T) {
}
}
for _, key := range []string{"driver.java.version", "driver.java.runtime", "driver.java.vm"} {
if node.Attributes[key] == "" {
if response.Attributes[key] == "" {
t.Fatalf("missing driver key (%s)", key)
}
}
......
......@@ -14,7 +14,6 @@ import (
"syscall"
"time"
"github.com/hashicorp/nomad/client/config"
"github.com/hashicorp/nomad/client/fingerprint"
"github.com/hashicorp/nomad/client/stats"
"github.com/hashicorp/nomad/helper/fields"
......@@ -184,24 +183,27 @@ func (d *LxcDriver) FSIsolation() cstructs.FSIsolation {
}
// Fingerprint fingerprints the lxc driver configuration
func (d *LxcDriver) Fingerprint(cfg *config.Config, node *structs.Node) (bool, error) {
func (d *LxcDriver) Fingerprint(req *cstructs.FingerprintRequest, resp *cstructs.FingerprintResponse) error {
cfg := req.Config
enabled := cfg.ReadBoolDefault(lxcConfigOption, true)
if !enabled && !cfg.DevMode {
return false, nil
return nil
}
version := lxc.Version()
if version == "" {
return false, nil
return nil
}
node.Attributes["driver.lxc.version"] = version
node.Attributes["driver.lxc"] = "1"
resp.AddAttribute("driver.lxc.version", version)
resp.AddAttribute("driver.lxc", "1")
resp.Detected = true
// Advertise if this node supports lxc volumes
if d.config.ReadBoolDefault(lxcVolumesConfigOption, lxcVolumesConfigDefault) {
node.Attributes["driver."+lxcVolumesConfigOption] = "1"
resp.AddAttribute("driver."+lxcVolumesConfigOption, "1")
}
return true, nil
return nil
}
func (d *LxcDriver) Prestart(*ExecContext, *structs.Task) (*PrestartResponse, error) {
......@@ -210,9 +212,20 @@ func (d *LxcDriver) Prestart(*ExecContext, *structs.Task) (*PrestartResponse, er
// Start starts the LXC Driver
func (d *LxcDriver) Start(ctx *ExecContext, task *structs.Task) (*StartResponse, error) {
sresp, err, errCleanup := d.startWithCleanup(ctx, task)
if err != nil {
if cleanupErr := errCleanup(); cleanupErr != nil {
d.logger.Printf("[ERR] error occurred while cleaning up from error in Start: %v", cleanupErr)
}
}
return sresp, err
}
func (d *LxcDriver) startWithCleanup(ctx *ExecContext, task *structs.Task) (*StartResponse, error, func() error) {
noCleanup := func() error { return nil }
var driverConfig LxcDriverConfig
if err := mapstructure.WeakDecode(task.Config, &driverConfig); err != nil {
return nil, err
return nil, err, noCleanup
}
lxcPath := lxc.DefaultConfigPath()
if path := d.config.Read("driver.lxc.path"); path != "" {
......@@ -222,7 +235,7 @@ func (d *LxcDriver) Start(ctx *ExecContext, task *structs.Task) (*StartResponse,
containerName := fmt.Sprintf("%s-%s", task.Name, d.DriverContext.allocID)
c, err := lxc.NewContainer(containerName, lxcPath)
if err != nil {
return nil, fmt.Errorf("unable to initialize container: %v", err)
return nil, fmt.Errorf("unable to initialize container: %v", err), noCleanup
}
var verbosity lxc.Verbosity
......@@ -232,7 +245,7 @@ func (d *LxcDriver) Start(ctx *ExecContext, task *structs.Task) (*StartResponse,
case "", "quiet":
verbosity = lxc.Quiet
default:
return nil, fmt.Errorf("lxc driver config 'verbosity' can only be either quiet or verbose")
return nil, fmt.Errorf("lxc driver config 'verbosity' can only be either quiet or verbose"), noCleanup
}
c.SetVerbosity(verbosity)
......@@ -249,7 +262,7 @@ func (d *LxcDriver) Start(ctx *ExecContext, task *structs.Task) (*StartResponse,
case "", "error":
logLevel = lxc.ERROR
default:
return nil, fmt.Errorf("lxc driver config 'log_level' can only be trace, debug, info, warn or error")
return nil, fmt.Errorf("lxc driver config 'log_level' can only be trace, debug, info, warn or error"), noCleanup
}
c.SetLogLevel(logLevel)
......@@ -267,12 +280,12 @@ func (d *LxcDriver) Start(ctx *ExecContext, task *structs.Task) (*StartResponse,
}
if err := c.Create(options); err != nil {
return nil, fmt.Errorf("unable to create container: %v", err)
return nil, fmt.Errorf("unable to create container: %v", err), noCleanup
}
// Set the network type to none
if err := c.SetConfigItem("lxc.network.type", "none"); err != nil {
return nil, fmt.Errorf("error setting network type configuration: %v", err)
return nil, fmt.Errorf("error setting network type configuration: %v", err), c.Destroy
}
// Bind mount the shared alloc dir and task local dir in the container
......@@ -290,7 +303,7 @@ func (d *LxcDriver) Start(ctx *ExecContext, task *structs.Task) (*StartResponse,
if filepath.IsAbs(paths[0]) {
if !volumesEnabled {
return nil, fmt.Errorf("absolute bind-mount volume in config but '%v' is false", lxcVolumesConfigOption)
return nil, fmt.Errorf("absolute bind-mount volume in config but '%v' is false", lxcVolumesConfigOption), c.Destroy
}
} else {
// Relative source paths are treated as relative to alloc dir
......@@ -302,21 +315,28 @@ func (d *LxcDriver) Start(ctx *ExecContext, task *structs.Task) (*StartResponse,
for _, mnt := range mounts {
if err := c.SetConfigItem("lxc.mount.entry", mnt); err != nil {
return nil, fmt.Errorf("error setting bind mount %q error: %v", mnt, err)
return nil, fmt.Errorf("error setting bind mount %q error: %v", mnt, err), c.Destroy
}
}
// Start the container
if err := c.Start(); err != nil {
return nil, fmt.Errorf("unable to start container: %v", err)
return nil, fmt.Errorf("unable to start container: %v", err), c.Destroy
}
stopAndDestroyCleanup := func() error {
if err := c.Stop(); err != nil {
return err
}
return c.Destroy()
}
// Set the resource limits
if err := c.SetMemoryLimit(lxc.ByteSize(task.Resources.MemoryMB) * lxc.MB); err != nil {
return nil, fmt.Errorf("unable to set memory limits: %v", err)
return nil, fmt.Errorf("unable to set memory limits: %v", err), stopAndDestroyCleanup
}
if err := c.SetCgroupItem("cpu.shares", strconv.Itoa(task.Resources.CPU)); err != nil {
return nil, fmt.Errorf("unable to set cpu shares: %v", err)
return nil, fmt.Errorf("unable to set cpu shares: %v", err), stopAndDestroyCleanup
}
h := lxcDriverHandle{
......@@ -335,7 +355,7 @@ func (d *LxcDriver) Start(ctx *ExecContext, task *structs.Task) (*StartResponse,
go h.run()
return &StartResponse{Handle: &h}, nil
return &StartResponse{Handle: &h}, nil, noCleanup
}
func (d *LxcDriver) Cleanup(*ExecContext, *CreatedResources) error { return nil }
......
......@@ -13,6 +13,7 @@ import (
"time"
"github.com/hashicorp/nomad/client/config"
cstructs "github.com/hashicorp/nomad/client/structs"
ctestutil "github.com/hashicorp/nomad/client/testutil"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/testutil"
......@@ -38,23 +39,34 @@ func TestLxcDriver_Fingerprint(t *testing.T) {
node := &structs.Node{
Attributes: map[string]string{},
}
apply, err := d.Fingerprint(&config.Config{}, node)
if err != nil {
t.Fatalf("err: %v", err)
}
if !apply {
t.Fatalf("should apply by default")
}
apply, err = d.Fingerprint(&config.Config{Options: map[string]string{lxcConfigOption: "0"}}, node)
if err != nil {
t.Fatalf("err: %v", err)
}
if apply {
t.Fatalf("should not apply with config")
// test with an empty config
{
request := &cstructs.FingerprintRequest{Config: &config.Config{}, Node: node}
var response cstructs.FingerprintResponse
err := d.Fingerprint(request, &response)
if err != nil {
t.Fatalf("err: %v", err)
}
}
if node.Attributes["driver.lxc"] == "" {
t.Fatalf("missing driver")
// test when lxc is enable din the config
{
conf := &config.Config{Options: map[string]string{lxcConfigOption: "1"}}
request := &cstructs.FingerprintRequest{Config: conf, Node: node}
var response cstructs.FingerprintResponse
err := d.Fingerprint(request, &response)
if err != nil {
t.Fatalf("err: %v", err)
}
if !response.Detected {
t.Fatalf("expected response to be applicable")
}
if response.Attributes["driver.lxc"] == "" {
t.Fatalf("missing driver")
}
}
}
......@@ -310,6 +322,7 @@ func TestLxcDriver_Start_NoVolumes(t *testing.T) {
ctx := testDriverContexts(t, task)
defer ctx.AllocDir.Destroy()
// set lxcVolumesConfigOption to false to disallow absolute paths as the source for the bind mount
ctx.DriverCtx.config.Options = map[string]string{lxcVolumesConfigOption: "false"}
d := NewLxcDriver(ctx.DriverCtx)
......@@ -317,8 +330,19 @@ func TestLxcDriver_Start_NoVolumes(t *testing.T) {
if _, err := d.Prestart(ctx.ExecCtx, task); err != nil {
t.Fatalf("prestart err: %v", err)
}
// expect the "absolute bind-mount volume in config.. " error
_, err := d.Start(ctx.ExecCtx, task)
if err == nil {
t.Fatalf("expected error in start, got nil.")
}
// Because the container was created but not started before
// the expected error, we can test that the destroy-only
// cleanup is done here.
containerName := fmt.Sprintf("%s-%s", task.Name, ctx.DriverCtx.allocID)
if err := exec.Command("bash", "-c", fmt.Sprintf("lxc-ls -1 | grep -q %s", containerName)).Run(); err == nil {
t.Fatalf("error, container '%s' is still around", containerName)
}
}
......@@ -15,13 +15,23 @@ import (
"github.com/mitchellh/mapstructure"
"github.com/hashicorp/nomad/client/config"
dstructs "github.com/hashicorp/nomad/client/driver/structs"
"github.com/hashicorp/nomad/client/fingerprint"
cstructs "github.com/hashicorp/nomad/client/structs"
"github.com/hashicorp/nomad/nomad/structs"
)
const (
// ShutdownPeriodicAfter is a config key that can be used during tests to
// "stop" a previously-functioning driver, allowing for testing of periodic
// drivers and fingerprinters
ShutdownPeriodicAfter = "test.shutdown_periodic_after"
// ShutdownPeriodicDuration is a config option that can be used during tests
// to "stop" a previously functioning driver after the specified duration
// (specified in seconds) for testing of periodic drivers and fingerprinters.
ShutdownPeriodicDuration = "test.shutdown_periodic_duration"
)
// Add the mock driver to the list of builtin drivers
func init() {
BuiltinDrivers["mock_driver"] = NewMockDriver
......@@ -78,14 +88,29 @@ type MockDriverConfig struct {
// MockDriver is a driver which is used for testing purposes
type MockDriver struct {
DriverContext
fingerprint.StaticFingerprinter
cleanupFailNum int
// shutdownFingerprintTime is the time up to which the driver will be up
shutdownFingerprintTime time.Time
}
// NewMockDriver is a factory method which returns a new Mock Driver
func NewMockDriver(ctx *DriverContext) Driver {
return &MockDriver{DriverContext: *ctx}
md := &MockDriver{DriverContext: *ctx}
// if the shutdown configuration options are set, start the timer here.
// This config option defaults to false
if ctx.config != nil && ctx.config.ReadBoolDefault(ShutdownPeriodicAfter, false) {
duration, err := ctx.config.ReadInt(ShutdownPeriodicDuration)
if err != nil {
errMsg := fmt.Sprintf("unable to read config option for shutdown_periodic_duration %s, got err %s", duration, err.Error())
panic(errMsg)
}
md.shutdownFingerprintTime = time.Now().Add(time.Second * time.Duration(duration))
}
return md
}
func (d *MockDriver) Abilities() DriverAbilities {
......@@ -194,9 +219,18 @@ func (m *MockDriver) Validate(map[string]interface{}) error {
}
// Fingerprint fingerprints a node and returns if MockDriver is enabled
func (m *MockDriver) Fingerprint(cfg *config.Config, node *structs.Node) (bool, error) {
node.Attributes["driver.mock_driver"] = "1"
return true, nil
func (m *MockDriver) Fingerprint(req *cstructs.FingerprintRequest, resp *cstructs.FingerprintResponse) error {
switch {
// If the driver is configured to shut down after a period of time, and the
// current time is after the time which the node should shut down, simulate
// driver failure
case !m.shutdownFingerprintTime.IsZero() && time.Now().After(m.shutdownFingerprintTime):
resp.RemoveAttribute("driver.mock_driver")
default:
resp.AddAttribute("driver.mock_driver", "1")
resp.Detected = true
}
return nil
}
// MockDriverHandle is a driver handler which supervises a mock task
......@@ -339,3 +373,8 @@ func (h *mockDriverHandle) run() {
}
}
}
// When testing, poll for updates
func (m *MockDriver) Periodic() (bool, time.Duration) {
return true, 500 * time.Millisecond
}
......@@ -17,7 +17,6 @@ import (
"github.com/coreos/go-semver/semver"
plugin "github.com/hashicorp/go-plugin"
"github.com/hashicorp/nomad/client/config"
"github.com/hashicorp/nomad/client/driver/executor"
dstructs "github.com/hashicorp/nomad/client/driver/structs"
"github.com/hashicorp/nomad/client/fingerprint"
......@@ -155,7 +154,7 @@ func (d *QemuDriver) FSIsolation() cstructs.FSIsolation {
return cstructs.FSIsolationImage
}
func (d *QemuDriver) Fingerprint(cfg *config.Config, node *structs.Node) (bool, error) {
func (d *QemuDriver) Fingerprint(req *cstructs.FingerprintRequest, resp *cstructs.FingerprintResponse) error {
bin := "qemu-system-x86_64"
if runtime.GOOS == "windows" {
// On windows, the "qemu-system-x86_64" command does not respond to the
......@@ -164,22 +163,24 @@ func (d *QemuDriver) Fingerprint(cfg *config.Config, node *structs.Node) (bool,
}
outBytes, err := exec.Command(bin, "--version").Output()
if err != nil {
delete(node.Attributes, qemuDriverAttr)
return false, nil
// return no error, as it isn't an error to not find qemu, it just means we
// can't use it.
return nil
}
out := strings.TrimSpace(string(outBytes))
matches := reQemuVersion.FindStringSubmatch(out)
if len(matches) != 2 {
delete(node.Attributes, qemuDriverAttr)
return false, fmt.Errorf("Unable to parse Qemu version string: %#v", matches)
resp.RemoveAttribute(qemuDriverAttr)
return fmt.Errorf("Unable to parse Qemu version string: %#v", matches)
}
currentQemuVersion := matches[1]
node.Attributes[qemuDriverAttr] = "1"
node.Attributes[qemuDriverVersionAttr] = currentQemuVersion
resp.AddAttribute(qemuDriverAttr, "1")
resp.AddAttribute(qemuDriverVersionAttr, currentQemuVersion)
resp.Detected = true
return true, nil
return nil
}
func (d *QemuDriver) Prestart(_ *ExecContext, task *structs.Task) (*PrestartResponse, error) {
......@@ -246,7 +247,7 @@ func (d *QemuDriver) Start(ctx *ExecContext, task *structs.Task) (*StartResponse
}
// This socket will be used to manage the virtual machine (for example,
// to perform graceful shutdowns)
monitorPath, err := d.getMonitorPath(ctx.TaskDir.Dir)
monitorPath, err = d.getMonitorPath(ctx.TaskDir.Dir)
if err != nil {
d.logger.Printf("[ERR] driver.qemu: could not get qemu monitor path: %s", err)
return nil, err
......@@ -464,6 +465,7 @@ func (h *qemuHandle) Kill() error {
// If Nomad did not send a graceful shutdown signal, issue an interrupt to
// the qemu process as a last resort
if gracefulShutdownSent == false {
h.logger.Printf("[DEBUG] driver.qemu: graceful shutdown is not enabled, sending an interrupt signal to pid: %d", h.userPid)
if err := h.executor.ShutDown(); err != nil {
if h.pluginClient.Exited() {
return nil
......
......@@ -10,6 +10,7 @@ import (
"time"
"github.com/hashicorp/nomad/client/config"
cstructs "github.com/hashicorp/nomad/client/structs"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/testutil"
......@@ -34,17 +35,28 @@ func TestQemuDriver_Fingerprint(t *testing.T) {
node := &structs.Node{
Attributes: make(map[string]string),
}
apply, err := d.Fingerprint(&config.Config{}, node)
request := &cstructs.FingerprintRequest{Config: &config.Config{}, Node: node}
var response cstructs.FingerprintResponse
err := d.Fingerprint(request, &response)
if err != nil {
t.Fatalf("err: %v", err)
}
if !apply {
t.Fatalf("should apply")
if !response.Detected {
t.Fatalf("expected response to be applicable")
}
attributes := response.Attributes
if attributes == nil {
t.Fatalf("attributes should not be nil")
}
if node.Attributes[qemuDriverAttr] == "" {
if attributes[qemuDriverAttr] == "" {
t.Fatalf("Missing Qemu driver")
}
if node.Attributes[qemuDriverVersionAttr] == "" {
if attributes[qemuDriverVersionAttr] == "" {
t.Fatalf("Missing Qemu driver version")
}
}
......@@ -164,12 +176,15 @@ func TestQemuDriver_GracefulShutdown(t *testing.T) {
defer ctx.AllocDir.Destroy()
d := NewQemuDriver(ctx.DriverCtx)
apply, err := d.Fingerprint(&config.Config{}, ctx.DriverCtx.node)
request := &cstructs.FingerprintRequest{Config: &config.Config{}, Node: ctx.DriverCtx.node}
var response cstructs.FingerprintResponse
err := d.Fingerprint(request, &response)
if err != nil {
t.Fatalf("err: %v", err)
}
if !apply {
t.Fatalf("should apply")
for name, value := range response.Attributes {
ctx.DriverCtx.node.Attributes[name] = value
}
dst := ctx.ExecCtx.TaskDir.Dir
......
......@@ -11,7 +11,6 @@ import (
"github.com/hashicorp/go-plugin"
"github.com/hashicorp/nomad/client/allocdir"
"github.com/hashicorp/nomad/client/config"
"github.com/hashicorp/nomad/client/driver/env"
"github.com/hashicorp/nomad/client/driver/executor"
dstructs "github.com/hashicorp/nomad/client/driver/structs"
......@@ -92,18 +91,19 @@ func (d *RawExecDriver) FSIsolation() cstructs.FSIsolation {
return cstructs.FSIsolationNone
}
func (d *RawExecDriver) Fingerprint(cfg *config.Config, node *structs.Node) (bool, error) {
func (d *RawExecDriver) Fingerprint(req *cstructs.FingerprintRequest, resp *cstructs.FingerprintResponse) error {
// Check that the user has explicitly enabled this executor.
enabled := cfg.ReadBoolDefault(rawExecConfigOption, false)
enabled := req.Config.ReadBoolDefault(rawExecConfigOption, false)
if enabled || cfg.DevMode {
if enabled || req.Config.DevMode {
d.logger.Printf("[WARN] driver.raw_exec: raw exec is enabled. Only enable if needed")
node.Attributes[rawExecDriverAttr] = "1"
return true, nil
resp.AddAttribute(rawExecDriverAttr, "1")
resp.Detected = true
return nil
}
delete(node.Attributes, rawExecDriverAttr)
return false, nil
resp.RemoveAttribute(rawExecDriverAttr)
return nil
}
func (d *RawExecDriver) Prestart(*ExecContext, *structs.Task) (*PrestartResponse, error) {
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment