Commit 0ad63293 authored by Seth Hoenig's avatar Seth Hoenig
Browse files

ci: purge consul/sdk in favor of nomad/sdk

This PR substitutes consul/sdk for nomad/sdk.

Major difference in the implementation of freeport, where the
new version is based on ephemoral ports instead of pre-allocated
port blocks.
parent 334c2583
Showing with 509 additions and 652 deletions
+509 -652
......@@ -64,6 +64,23 @@ jobs:
run: |
make bootstrap
make dev
tests-sdk:
runs-on: ubuntu-20.04
timeout-minutes: 10
steps:
- uses: actions/checkout@v2
- uses: magnetikonline/action-golang-cache@v1
with:
go-version: ${{env.GO_VERSION}}
cache-key-suffix: -sdk
- name: Run SDK tests
env:
GOTEST_MOD: sdk
run: |
make bootstrap
make generate-all
hc-install consul ${{env.CONSUL_VERSION}}
make test-nomad-module
tests-api:
runs-on: ubuntu-20.04
timeout-minutes: 30
......
......@@ -177,7 +177,7 @@ check: ## Lint the source code
@if (git status -s | grep -q -e '\.hcl$$' -e '\.nomad$$' -e '\.tf$$'); then echo the following HCL files are out of sync; git status -s | grep -e '\.hcl$$' -e '\.nomad$$' -e '\.tf$$'; exit 1; fi
@echo "==> Check API package is isolated from rest"
@cd ./api && if go list --test -f '{{ join .Deps "\n" }}' . | grep github.com/hashicorp/nomad/ | grep -v -e /nomad/api/ -e nomad/api.test; then echo " /api package depends the ^^ above internal nomad packages. Remove such dependency"; exit 1; fi
@cd ./api && if go list --test -f '{{ join .Deps "\n" }}' . | grep github.com/hashicorp/nomad/ | grep -v -e /nomad/api -v -e /nomad/sdk -e nomad/api.test ; then echo " /api package depends the ^^ above internal nomad packages. Remove such dependency"; exit 1; fi
@echo "==> Checking Go mod.."
@GO111MODULE=on $(MAKE) tidy
......
......@@ -2,24 +2,38 @@ module github.com/hashicorp/nomad/api
go 1.17
replace github.com/hashicorp/nomad/sdk => ../sdk
require (
github.com/docker/go-units v0.3.3
github.com/gorilla/websocket v1.4.2
github.com/hashicorp/cronexpr v1.1.1
github.com/hashicorp/go-cleanhttp v0.5.2
github.com/hashicorp/go-rootcerts v1.0.2
github.com/hashicorp/nomad/sdk v0.0.0-00010101000000-000000000000
github.com/kr/pretty v0.3.0
github.com/mitchellh/go-testing-interface v1.14.1
github.com/mitchellh/mapstructure v1.4.3
github.com/stretchr/testify v1.7.1
)
require (
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/fatih/color v1.9.0 // indirect
github.com/gojuno/minimock/v3 v3.0.6 // indirect
github.com/hashicorp/consul/api v1.12.0 // indirect
github.com/hashicorp/go-hclog v0.12.0 // indirect
github.com/hashicorp/go-immutable-radix v1.0.0 // indirect
github.com/hashicorp/golang-lru v0.5.0 // indirect
github.com/hashicorp/serf v0.9.6 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/mattn/go-colorable v0.1.6 // indirect
github.com/mattn/go-isatty v0.0.12 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/pkg/errors v0.8.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/rogpeppe/go-internal v1.6.1 // indirect
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44 // indirect
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c // indirect
oss.indeed.com/go/libtime v1.6.0 // indirect
)
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da h1:8GUt8eRujhVEGZFFEjBj46YV4rDjvGrNxb0KMWYkL2I=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/docker/go-units v0.3.3 h1:Xk8S3Xj5sLGlG5g67hJmYMmUgXv5N4PhkjJHHqrwnTk=
github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s=
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gojuno/minimock/v3 v3.0.4/go.mod h1:HqeqnwV8mAABn3pO5hqF+RE7gjA0jsN8cbbSogoGrzI=
github.com/gojuno/minimock/v3 v3.0.6 h1:YqHcVR10x2ZvswPK8Ix5yk+hMpspdQ3ckSpkOzyF85I=
github.com/gojuno/minimock/v3 v3.0.6/go.mod h1:v61ZjAKHr+WnEkND63nQPCZ/DTfQgJdvbCi3IuoMblY=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c h1:964Od4U6p2jUkFxvCydnIczKteheJEzHRToSGK3Bnlw=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/hashicorp/consul/api v1.12.0 h1:k3y1FYv6nuKyNTqj6w9gXOx5r5CfLj/k/euUeBXj1OY=
github.com/hashicorp/consul/api v1.12.0/go.mod h1:6pVBMo0ebnYdt2S3H87XhekM/HHrUoTD2XXb/VrZVy0=
github.com/hashicorp/consul/sdk v0.8.0 h1:OJtKBtEjboEZvG6AOUdh4Z1Zbyu0WcxQ0qatRrZHTVU=
github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms=
github.com/hashicorp/cronexpr v1.1.1 h1:NJZDd87hGXjoZBdvyCF9mX4DCq5Wy7+A/w+A7q0wn6c=
github.com/hashicorp/cronexpr v1.1.1/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4=
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
github.com/hashicorp/go-hclog v0.12.0 h1:d4QkX8FRTYaKaCZBoXYY8zJX2BXjWxurN/GA2tkrmZM=
github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0=
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-msgpack v0.5.3 h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4=
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI=
github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
github.com/hashicorp/go-sockaddr v1.0.0 h1:GeH6tui99pF4NJgfnhp+L6+FfobzVW3Ah46sLo0ICXs=
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE=
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc=
github.com/hashicorp/memberlist v0.3.0 h1:8+567mCcFDnS5ADl7lrpxPMWiFCElyUEeW0gtj34fMA=
github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
github.com/hashicorp/serf v0.9.6 h1:uuEX1kLR6aoda1TBttmJQKDLZE1Ob7KN0NPdE7EtCDc=
github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4=
github.com/hexdigest/gowrap v1.1.7/go.mod h1:Z+nBFUDLa01iaNM+/jzoOA1JJ7sm51rnYFauKFUB5fs=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE=
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
github.com/miekg/dns v1.1.41 h1:WMszZWJG0XmzbK9FEmzH2TVcqYzFesusSIB41b8KHxY=
github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU=
github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8=
github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0=
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.4.3 h1:OVowDSCllw/YjdLkam3/sm7wEtOy59d8ndGgCcyj8cs=
github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c h1:Lgl0gzECD8GnQ5QCWA8o6BtfL6mDH5rQgM4/fX3avOs=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1 h1:4qWs8cYYH6PoEFy4dfhDFgoMGkwAcETd+MmPdCPMzUc=
golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44 h1:Bli41pIlzTzf3KEY06n+xnzK/BESIg2ze4Pgfh/aI8c=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
oss.indeed.com/go/libtime v1.6.0 h1:XQyczJihse/wQGo59OfPF3f4f+Sywv4R8vdGB3S9BfU=
oss.indeed.com/go/libtime v1.6.0/go.mod h1:B2sdEcuzB0zhTKkAuHy4JInKRc7Al3tME4qWam6R7mA=
package discover
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
)
// Checks the current executable, then $GOPATH/bin, and finally the CWD, in that
// order. If it can't be found, an error is returned.
func NomadExecutable() (string, error) {
nomadExe := "nomad"
if runtime.GOOS == "windows" {
nomadExe = "nomad.exe"
}
// Check the current executable.
bin, err := os.Executable()
if err != nil {
return "", fmt.Errorf("Failed to determine the nomad executable: %v", err)
}
if _, err := os.Stat(bin); err == nil && isNomad(bin, nomadExe) {
return bin, nil
}
// Check the $PATH
if bin, err := exec.LookPath(nomadExe); err == nil {
return bin, nil
}
// Check the $GOPATH.
bin = filepath.Join(os.Getenv("GOPATH"), "bin", nomadExe)
if _, err := os.Stat(bin); err == nil {
return bin, nil
}
// Check the CWD.
pwd, err := os.Getwd()
if err != nil {
return "", fmt.Errorf("Could not find Nomad executable (%v): %v", nomadExe, err)
}
bin = filepath.Join(pwd, nomadExe)
if _, err := os.Stat(bin); err == nil {
return bin, nil
}
// Check CWD/bin
bin = filepath.Join(pwd, "bin", nomadExe)
if _, err := os.Stat(bin); err == nil {
return bin, nil
}
return "", fmt.Errorf("Could not find Nomad executable (%v)", nomadExe)
}
func isNomad(path, nomadExe string) bool {
if strings.HasSuffix(path, ".test") || strings.HasSuffix(path, ".test.exe") {
return false
}
return true
}
// Package freeport provides a helper for allocating free ports across multiple
// processes on the same machine.
package freeport
import (
"fmt"
"math/rand"
"net"
"sync"
"time"
"github.com/mitchellh/go-testing-interface"
)
const (
// blockSize is the size of the allocated port block. ports are given out
// consecutively from that block with roll-over for the lifetime of the
// application/test run.
blockSize = 100
// maxBlocks is the number of available port blocks.
// lowPort + maxBlocks * blockSize must be less than 65535.
maxBlocks = 10
// lowPort is the lowest port number that should be used.
lowPort = 8000
// attempts is how often we try to allocate a port block
// before giving up.
attempts = 10
)
var (
// firstPort is the first port of the allocated block.
firstPort int
// lockLn is the system-wide mutex for the port block.
lockLn net.Listener
// mu guards nextPort
mu sync.Mutex
// once is used to do the initialization on the first call to retrieve free
// ports
once sync.Once
// port is the last allocated port.
port int
)
// initialize is used to initialize freeport.
func initialize() {
if lowPort+maxBlocks*blockSize > 65535 {
panic("freeport: block size too big or too many blocks requested")
}
rand.Seed(time.Now().UnixNano())
firstPort, lockLn = alloc()
}
// alloc reserves a port block for exclusive use for the lifetime of the
// application. lockLn serves as a system-wide mutex for the port block and is
// implemented as a TCP listener which is bound to the firstPort and which will
// be automatically released when the application terminates.
func alloc() (int, net.Listener) {
for i := 0; i < attempts; i++ {
block := int(rand.Int31n(int32(maxBlocks)))
firstPort := lowPort + block*blockSize
ln, err := net.ListenTCP("tcp", tcpAddr("127.0.0.1", firstPort))
if err != nil {
continue
}
// log.Printf("[DEBUG] freeport: allocated port block %d (%d-%d)", block, firstPort, firstPort+blockSize-1)
return firstPort, ln
}
panic("freeport: cannot allocate port block")
}
func tcpAddr(ip string, port int) *net.TCPAddr {
return &net.TCPAddr{IP: net.ParseIP(ip), Port: port}
}
// Get wraps the Free function and panics on any failure retrieving ports.
func Get(n int) (ports []int) {
ports, err := Free(n)
if err != nil {
panic(err)
}
return ports
}
// GetT is suitable for use when retrieving unused ports in tests. If there is
// an error retrieving free ports, the test will be failed.
func GetT(t testing.T, n int) (ports []int) {
ports, err := Free(n)
if err != nil {
t.Fatalf("Failed retrieving free port: %v", err)
}
return ports
}
// Free returns a list of free ports from the allocated port block. It is safe
// to call this method concurrently. Ports have been tested to be available on
// 127.0.0.1 TCP but there is no guarantee that they will remain free in the
// future.
func Free(n int) (ports []int, err error) {
mu.Lock()
defer mu.Unlock()
if n > blockSize-1 {
return nil, fmt.Errorf("freeport: block size too small")
}
// Reserve a port block
once.Do(initialize)
for len(ports) < n {
port++
// roll-over the port
if port < firstPort+1 || port >= firstPort+blockSize {
port = firstPort + 1
}
// if the port is in use then skip it
ln, err := net.ListenTCP("tcp", tcpAddr("127.0.0.1", port))
if err != nil {
// log.Println("[DEBUG] freeport: port already in use: ", port)
continue
}
ln.Close()
ports = append(ports, port)
}
// log.Println("[DEBUG] freeport: free ports:", ports)
return ports, nil
}
......@@ -20,12 +20,12 @@ import (
"net/http"
"os"
"os/exec"
"testing"
"time"
cleanhttp "github.com/hashicorp/go-cleanhttp"
"github.com/hashicorp/nomad/api/internal/testutil/discover"
"github.com/hashicorp/nomad/api/internal/testutil/freeport"
testing "github.com/mitchellh/go-testing-interface"
"github.com/hashicorp/go-cleanhttp"
"github.com/hashicorp/nomad/sdk"
"github.com/hashicorp/nomad/sdk/portfree"
)
// TestServerConfig is the main server configuration struct.
......@@ -102,9 +102,9 @@ type ServerConfigCallback func(c *TestServerConfig)
// defaultServerConfig returns a new TestServerConfig struct
// with all of the listen ports incremented by one.
func defaultServerConfig(t testing.T) *TestServerConfig {
ports := freeport.GetT(t, 3)
func defaultServerConfig(t *testing.T) *TestServerConfig {
ports := portfree.New(t).Get(3)
logLevel := "DEBUG"
if envLogLevel := os.Getenv("NOMAD_TEST_LOG_LEVEL"); envLogLevel != "" {
logLevel = envLogLevel
......@@ -137,9 +137,9 @@ func defaultServerConfig(t testing.T) *TestServerConfig {
// TestServer is the main server wrapper struct.
type TestServer struct {
t *testing.T
cmd *exec.Cmd
Config *TestServerConfig
t testing.T
HTTPAddr string
SerfAddr string
......@@ -148,11 +148,8 @@ type TestServer struct {
// NewTestServer creates a new TestServer, and makes a call to
// an optional callback function to modify the configuration.
func NewTestServer(t testing.T, cb ServerConfigCallback) *TestServer {
path, err := discover.NomadExecutable()
if err != nil {
t.Skipf("nomad not found, skipping: %v", err)
}
func NewTestServer(t *testing.T, cb ServerConfigCallback) *TestServer {
path := sdk.Find(t, "nomad")
// Check that we are actually running nomad
vcmd := exec.Command(path, "-version")
......
......@@ -7,8 +7,6 @@ import (
"path/filepath"
"testing"
consulapi "github.com/hashicorp/consul/api"
consultest "github.com/hashicorp/consul/sdk/testutil"
"github.com/hashicorp/nomad/ci"
"github.com/hashicorp/nomad/client/allocdir"
"github.com/hashicorp/nomad/client/allocrunner/interfaces"
......@@ -21,20 +19,10 @@ import (
"github.com/hashicorp/nomad/nomad/mock"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/nomad/structs/config"
"github.com/hashicorp/nomad/sdk"
"github.com/stretchr/testify/require"
)
func getTestConsul(t *testing.T) *consultest.TestServer {
testConsul, err := consultest.NewTestServerConfigT(t, func(c *consultest.TestServerConfig) {
if !testing.Verbose() { // disable consul logging if -v not set
c.Stdout = ioutil.Discard
c.Stderr = ioutil.Discard
}
})
require.NoError(t, err, "failed to start test consul server")
return testConsul
}
func TestConnectNativeHook_Name(t *testing.T) {
ci.Parallel(t)
name := new(connectNativeHook).Name()
......@@ -311,8 +299,10 @@ func TestTaskRunner_ConnectNativeHook_Ok(t *testing.T) {
ci.Parallel(t)
testutil.RequireConsul(t)
testConsul := getTestConsul(t)
defer testConsul.Stop()
consul, ready, stop := sdk.NewConsul(t, func(c *sdk.ConsulConfig) {
// c.LogLevel = os.Getenv("NOMAD_TEST_LOG_LEVEL") todo
})
t.Cleanup(stop)
alloc := mock.Alloc()
alloc.AllocatedResources.Shared.Networks = []*structs.NetworkResource{{Mode: "host", IP: "1.1.1.1"}}
......@@ -329,23 +319,25 @@ func TestTaskRunner_ConnectNativeHook_Ok(t *testing.T) {
logger := testlog.HCLogger(t)
allocDir, cleanup := allocdir.TestAllocDir(t, logger, "ConnectNative", alloc.ID)
defer cleanup()
t.Cleanup(cleanup)
// wait for consul agent
ready()
// register group services
consulConfig := consulapi.DefaultConfig()
consulConfig.Address = testConsul.HTTPAddr
consulAPIClient, err := consulapi.NewClient(consulConfig)
require.NoError(t, err)
namespacesClient := agentconsul.NewNamespacesClient(consulAPIClient.Namespaces(), consulAPIClient.Agent())
consulClient := consul.Client(t)
namespacesClient := agentconsul.NewNamespacesClient(consulClient.Namespaces(), consulClient.Agent())
consulClient := agentconsul.NewServiceClient(consulAPIClient.Agent(), namespacesClient, logger, true)
go consulClient.Run()
defer consulClient.Shutdown()
require.NoError(t, consulClient.RegisterWorkload(agentconsul.BuildAllocServices(mock.Node(), alloc, agentconsul.NoopRestarter())))
serviceClient := agentconsul.NewServiceClient(consulClient.Agent(), namespacesClient, logger, true)
go serviceClient.Run()
t.Cleanup(func() {
_ = serviceClient.Shutdown()
})
require.NoError(t, serviceClient.RegisterWorkload(agentconsul.BuildAllocServices(mock.Node(), alloc, agentconsul.NoopRestarter())))
// Run Connect Native hook
h := newConnectNativeHook(newConnectNativeHookConfig(alloc, &config.ConsulConfig{
Addr: consulConfig.Address,
Addr: consul.HTTP(),
}, logger))
request := &interfaces.TaskPrestartRequest{
Task: tg.Tasks[0],
......@@ -363,7 +355,7 @@ func TestTaskRunner_ConnectNativeHook_Ok(t *testing.T) {
require.True(t, response.Done)
// Assert only CONSUL_HTTP_ADDR env variable is set
require.Equal(t, map[string]string{"CONSUL_HTTP_ADDR": testConsul.HTTPAddr}, response.Env)
require.Equal(t, map[string]string{"CONSUL_HTTP_ADDR": consul.HTTP()}, response.Env)
// Assert no secrets were written
checkFilesInDir(t, request.TaskDir.SecretsDir,
......@@ -376,8 +368,8 @@ func TestTaskRunner_ConnectNativeHook_with_SI_token(t *testing.T) {
ci.Parallel(t)
testutil.RequireConsul(t)
testConsul := getTestConsul(t)
defer testConsul.Stop()
consul, ready, stop := sdk.NewConsul(t, nil)
t.Cleanup(stop)
alloc := mock.Alloc()
alloc.AllocatedResources.Shared.Networks = []*structs.NetworkResource{{Mode: "host", IP: "1.1.1.1"}}
......@@ -394,23 +386,25 @@ func TestTaskRunner_ConnectNativeHook_with_SI_token(t *testing.T) {
logger := testlog.HCLogger(t)
allocDir, cleanup := allocdir.TestAllocDir(t, logger, "ConnectNative", alloc.ID)
defer cleanup()
t.Cleanup(cleanup)
// wait for consul agent
ready()
// register group services
consulConfig := consulapi.DefaultConfig()
consulConfig.Address = testConsul.HTTPAddr
consulAPIClient, err := consulapi.NewClient(consulConfig)
require.NoError(t, err)
namespacesClient := agentconsul.NewNamespacesClient(consulAPIClient.Namespaces(), consulAPIClient.Agent())
consulClient := consul.Client(t)
namespacesClient := agentconsul.NewNamespacesClient(consulClient.Namespaces(), consulClient.Agent())
consulClient := agentconsul.NewServiceClient(consulAPIClient.Agent(), namespacesClient, logger, true)
go consulClient.Run()
defer consulClient.Shutdown()
require.NoError(t, consulClient.RegisterWorkload(agentconsul.BuildAllocServices(mock.Node(), alloc, agentconsul.NoopRestarter())))
serviceClient := agentconsul.NewServiceClient(consulClient.Agent(), namespacesClient, logger, true)
go serviceClient.Run()
t.Cleanup(func() {
_ = serviceClient.Shutdown()
})
require.NoError(t, serviceClient.RegisterWorkload(agentconsul.BuildAllocServices(mock.Node(), alloc, agentconsul.NoopRestarter())))
// Run Connect Native hook
h := newConnectNativeHook(newConnectNativeHookConfig(alloc, &config.ConsulConfig{
Addr: consulConfig.Address,
Addr: consul.HTTP(),
}, logger))
request := &interfaces.TaskPrestartRequest{
Task: tg.Tasks[0],
......@@ -422,7 +416,7 @@ func TestTaskRunner_ConnectNativeHook_with_SI_token(t *testing.T) {
// Insert service identity token in the secrets directory
token := uuid.Generate()
siTokenFile := filepath.Join(request.TaskDir.SecretsDir, sidsTokenFile)
err = ioutil.WriteFile(siTokenFile, []byte(token), 0440)
err := ioutil.WriteFile(siTokenFile, []byte(token), 0440)
require.NoError(t, err)
response := new(interfaces.TaskPrestartResponse)
......@@ -453,8 +447,8 @@ func TestTaskRunner_ConnectNativeHook_shareTLS(t *testing.T) {
fakeCert, fakeCertDir := setupCertDirs(t)
defer cleanupCertDirs(t, fakeCert, fakeCertDir)
testConsul := getTestConsul(t)
defer testConsul.Stop()
consul, ready, stop := sdk.NewConsul(t, nil)
t.Cleanup(stop)
alloc := mock.Alloc()
alloc.AllocatedResources.Shared.Networks = []*structs.NetworkResource{{Mode: "host", IP: "1.1.1.1"}}
......@@ -471,23 +465,25 @@ func TestTaskRunner_ConnectNativeHook_shareTLS(t *testing.T) {
logger := testlog.HCLogger(t)
allocDir, cleanup := allocdir.TestAllocDir(t, logger, "ConnectNative", alloc.ID)
defer cleanup()
t.Cleanup(cleanup)
// wait for consul agent
ready()
// register group services
consulConfig := consulapi.DefaultConfig()
consulConfig.Address = testConsul.HTTPAddr
consulAPIClient, err := consulapi.NewClient(consulConfig)
require.NoError(t, err)
namespacesClient := agentconsul.NewNamespacesClient(consulAPIClient.Namespaces(), consulAPIClient.Agent())
consulClient := consul.Client(t)
namespacesClient := agentconsul.NewNamespacesClient(consulClient.Namespaces(), consulClient.Agent())
consulClient := agentconsul.NewServiceClient(consulAPIClient.Agent(), namespacesClient, logger, true)
go consulClient.Run()
defer consulClient.Shutdown()
require.NoError(t, consulClient.RegisterWorkload(agentconsul.BuildAllocServices(mock.Node(), alloc, agentconsul.NoopRestarter())))
serviceClient := agentconsul.NewServiceClient(consulClient.Agent(), namespacesClient, logger, true)
go serviceClient.Run()
t.Cleanup(func() {
_ = serviceClient.Shutdown()
})
require.NoError(t, serviceClient.RegisterWorkload(agentconsul.BuildAllocServices(mock.Node(), alloc, agentconsul.NoopRestarter())))
// Run Connect Native hook
h := newConnectNativeHook(newConnectNativeHookConfig(alloc, &config.ConsulConfig{
Addr: consulConfig.Address,
Addr: consul.HTTP(),
// TLS config consumed by native application
ShareSSL: shareSSL,
......@@ -570,12 +566,12 @@ func TestTaskRunner_ConnectNativeHook_shareTLS_override(t *testing.T) {
ci.Parallel(t)
testutil.RequireConsul(t)
consul, ready, stop := sdk.NewConsul(t, nil)
t.Cleanup(stop)
fakeCert, fakeCertDir := setupCertDirs(t)
defer cleanupCertDirs(t, fakeCert, fakeCertDir)
testConsul := getTestConsul(t)
defer testConsul.Stop()
alloc := mock.Alloc()
alloc.AllocatedResources.Shared.Networks = []*structs.NetworkResource{{Mode: "host", IP: "1.1.1.1"}}
tg := alloc.Job.TaskGroups[0]
......@@ -591,23 +587,25 @@ func TestTaskRunner_ConnectNativeHook_shareTLS_override(t *testing.T) {
logger := testlog.HCLogger(t)
allocDir, cleanup := allocdir.TestAllocDir(t, logger, "ConnectNative", alloc.ID)
defer cleanup()
t.Cleanup(cleanup)
// wait for consul agent
ready()
// register group services
consulConfig := consulapi.DefaultConfig()
consulConfig.Address = testConsul.HTTPAddr
consulAPIClient, err := consulapi.NewClient(consulConfig)
require.NoError(t, err)
namespacesClient := agentconsul.NewNamespacesClient(consulAPIClient.Namespaces(), consulAPIClient.Agent())
consulClient := consul.Client(t)
namespacesClient := agentconsul.NewNamespacesClient(consulClient.Namespaces(), consulClient.Agent())
consulClient := agentconsul.NewServiceClient(consulAPIClient.Agent(), namespacesClient, logger, true)
go consulClient.Run()
defer consulClient.Shutdown()
require.NoError(t, consulClient.RegisterWorkload(agentconsul.BuildAllocServices(mock.Node(), alloc, agentconsul.NoopRestarter())))
serviceClient := agentconsul.NewServiceClient(consulClient.Agent(), namespacesClient, logger, true)
go serviceClient.Run()
t.Cleanup(func() {
_ = serviceClient.Shutdown()
})
require.NoError(t, serviceClient.RegisterWorkload(agentconsul.BuildAllocServices(mock.Node(), alloc, agentconsul.NoopRestarter())))
// Run Connect Native hook
h := newConnectNativeHook(newConnectNativeHookConfig(alloc, &config.ConsulConfig{
Addr: consulConfig.Address,
Addr: consul.HTTP(),
// TLS config consumed by native application
ShareSSL: helper.BoolToPtr(true),
......
//go:build !windows
// +build !windows
// todo(shoenig): Once Connect is supported on Windows, we'll need to make this
// set of tests work there too.
package taskrunner
......@@ -30,6 +26,7 @@ import (
"github.com/hashicorp/nomad/nomad/mock"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/nomad/structs/config"
"github.com/hashicorp/nomad/sdk"
"github.com/stretchr/testify/require"
"golang.org/x/sys/unix"
)
......@@ -295,8 +292,8 @@ func TestEnvoyBootstrapHook_with_SI_token(t *testing.T) {
ci.Parallel(t)
testutil.RequireConsul(t)
testConsul := getTestConsul(t)
defer testConsul.Stop()
consul, ready, stop := sdk.NewConsul(t, nil)
t.Cleanup(stop)
alloc := mock.ConnectAlloc()
alloc.AllocatedResources.Shared.Networks = []*structs.NetworkResource{
......@@ -331,23 +328,25 @@ func TestEnvoyBootstrapHook_with_SI_token(t *testing.T) {
logger := testlog.HCLogger(t)
allocDir, cleanup := allocdir.TestAllocDir(t, logger, "EnvoyBootstrap", alloc.ID)
defer cleanup()
t.Cleanup(cleanup)
// wait for consul agent
ready()
// Register Group Services
consulConfig := consulapi.DefaultConfig()
consulConfig.Address = testConsul.HTTPAddr
consulAPIClient, err := consulapi.NewClient(consulConfig)
require.NoError(t, err)
namespacesClient := agentconsul.NewNamespacesClient(consulAPIClient.Namespaces(), consulAPIClient.Agent())
consulClient := consul.Client(t)
namespacesClient := agentconsul.NewNamespacesClient(consulClient.Namespaces(), consulClient.Agent())
consulClient := agentconsul.NewServiceClient(consulAPIClient.Agent(), namespacesClient, logger, true)
go consulClient.Run()
defer consulClient.Shutdown()
require.NoError(t, consulClient.RegisterWorkload(agentconsul.BuildAllocServices(mock.Node(), alloc, agentconsul.NoopRestarter())))
serviceClient := agentconsul.NewServiceClient(consulClient.Agent(), namespacesClient, logger, true)
go serviceClient.Run()
t.Cleanup(func() {
_ = serviceClient.Shutdown()
})
require.NoError(t, serviceClient.RegisterWorkload(agentconsul.BuildAllocServices(mock.Node(), alloc, agentconsul.NoopRestarter())))
// Run Connect bootstrap Hook
h := newEnvoyBootstrapHook(newEnvoyBootstrapHookConfig(alloc, &config.ConsulConfig{
Addr: consulConfig.Address,
Addr: consul.HTTP(),
}, consulNamespace, logger))
req := &interfaces.TaskPrestartRequest{
Task: sidecarTask,
......@@ -359,7 +358,7 @@ func TestEnvoyBootstrapHook_with_SI_token(t *testing.T) {
// Insert service identity token in the secrets directory
token := uuid.Generate()
siTokenFile := filepath.Join(req.TaskDir.SecretsDir, sidsTokenFile)
err = ioutil.WriteFile(siTokenFile, []byte(token), 0440)
err := ioutil.WriteFile(siTokenFile, []byte(token), 0440)
require.NoError(t, err)
resp := &interfaces.TaskPrestartResponse{}
......@@ -376,7 +375,9 @@ func TestEnvoyBootstrapHook_with_SI_token(t *testing.T) {
}
f, err := os.Open(args.ReplaceEnv(structs.EnvoyBootstrapPath, env))
require.NoError(t, err)
defer f.Close()
t.Cleanup(func() {
_ = f.Close()
})
// Assert bootstrap configuration is valid json
var out envoyConfig
......@@ -396,8 +397,8 @@ func TestTaskRunner_EnvoyBootstrapHook_sidecar_ok(t *testing.T) {
ci.Parallel(t)
testutil.RequireConsul(t)
testConsul := getTestConsul(t)
defer testConsul.Stop()
consul, ready, stop := sdk.NewConsul(t, nil)
t.Cleanup(stop)
alloc := mock.ConnectAlloc()
alloc.AllocatedResources.Shared.Networks = []*structs.NetworkResource{
......@@ -432,23 +433,24 @@ func TestTaskRunner_EnvoyBootstrapHook_sidecar_ok(t *testing.T) {
logger := testlog.HCLogger(t)
allocDir, cleanup := allocdir.TestAllocDir(t, logger, "EnvoyBootstrap", alloc.ID)
defer cleanup()
t.Cleanup(cleanup)
// wait for consul agent
ready()
// Register Group Services
consulConfig := consulapi.DefaultConfig()
consulConfig.Address = testConsul.HTTPAddr
consulAPIClient, err := consulapi.NewClient(consulConfig)
require.NoError(t, err)
consulAPIClient := consul.Client(t)
namespacesClient := agentconsul.NewNamespacesClient(consulAPIClient.Namespaces(), consulAPIClient.Agent())
consulClient := agentconsul.NewServiceClient(consulAPIClient.Agent(), namespacesClient, logger, true)
go consulClient.Run()
defer consulClient.Shutdown()
require.NoError(t, consulClient.RegisterWorkload(agentconsul.BuildAllocServices(mock.Node(), alloc, agentconsul.NoopRestarter())))
serviceClient := agentconsul.NewServiceClient(consulAPIClient.Agent(), namespacesClient, logger, true)
go serviceClient.Run()
t.Cleanup(func() {
_ = serviceClient.Shutdown()
})
require.NoError(t, serviceClient.RegisterWorkload(agentconsul.BuildAllocServices(mock.Node(), alloc, agentconsul.NoopRestarter())))
// Run Connect bootstrap Hook
h := newEnvoyBootstrapHook(newEnvoyBootstrapHookConfig(alloc, &config.ConsulConfig{
Addr: consulConfig.Address,
Addr: consul.HTTP(),
}, consulNamespace, logger))
req := &interfaces.TaskPrestartRequest{
Task: sidecarTask,
......@@ -474,7 +476,9 @@ func TestTaskRunner_EnvoyBootstrapHook_sidecar_ok(t *testing.T) {
}
f, err := os.Open(args.ReplaceEnv(structs.EnvoyBootstrapPath, env))
require.NoError(t, err)
defer f.Close()
t.Cleanup(func() {
_ = f.Close()
})
// Assert bootstrap configuration is valid json
var out envoyConfig
......@@ -491,29 +495,31 @@ func TestTaskRunner_EnvoyBootstrapHook_gateway_ok(t *testing.T) {
ci.Parallel(t)
logger := testlog.HCLogger(t)
testConsul := getTestConsul(t)
defer testConsul.Stop()
consul, ready, stop := sdk.NewConsul(t, nil)
t.Cleanup(stop)
// Setup an Allocation
alloc := mock.ConnectIngressGatewayAlloc("bridge")
allocDir, cleanupDir := allocdir.TestAllocDir(t, logger, "EnvoyBootstrapIngressGateway", alloc.ID)
defer cleanupDir()
// wait for consul agent
ready()
// Get a Consul client
consulConfig := consulapi.DefaultConfig()
consulConfig.Address = testConsul.HTTPAddr
consulAPIClient, err := consulapi.NewClient(consulConfig)
require.NoError(t, err)
namespacesClient := agentconsul.NewNamespacesClient(consulAPIClient.Namespaces(), consulAPIClient.Agent())
consulClient := consul.Client(t)
namespacesClient := agentconsul.NewNamespacesClient(consulClient.Namespaces(), consulClient.Agent())
// Register Group Services
serviceClient := agentconsul.NewServiceClient(consulAPIClient.Agent(), namespacesClient, logger, true)
serviceClient := agentconsul.NewServiceClient(consulClient.Agent(), namespacesClient, logger, true)
go serviceClient.Run()
defer serviceClient.Shutdown()
t.Cleanup(func() {
_ = serviceClient.Shutdown()
})
require.NoError(t, serviceClient.RegisterWorkload(agentconsul.BuildAllocServices(mock.Node(), alloc, agentconsul.NoopRestarter())))
// Register Configuration Entry
ceClient := consulAPIClient.ConfigEntries()
ceClient := consulClient.ConfigEntries()
set, _, err := ceClient.Set(&consulapi.IngressGatewayConfigEntry{
Kind: consulapi.IngressGateway,
Name: "gateway-service", // matches job
......@@ -530,7 +536,7 @@ func TestTaskRunner_EnvoyBootstrapHook_gateway_ok(t *testing.T) {
// Run Connect bootstrap hook
h := newEnvoyBootstrapHook(newEnvoyBootstrapHookConfig(alloc, &config.ConsulConfig{
Addr: consulConfig.Address,
Addr: consul.HTTP(),
}, consulNamespace, logger))
req := &interfaces.TaskPrestartRequest{
......@@ -555,7 +561,9 @@ func TestTaskRunner_EnvoyBootstrapHook_gateway_ok(t *testing.T) {
}
f, err := os.Open(args.ReplaceEnv(structs.EnvoyBootstrapPath, env))
require.NoError(t, err)
defer f.Close()
t.Cleanup(func() {
_ = f.Close()
})
var out envoyConfig
require.NoError(t, json.NewDecoder(f).Decode(&out))
......@@ -611,8 +619,8 @@ func TestTaskRunner_EnvoyBootstrapHook_RecoverableError(t *testing.T) {
ci.Parallel(t)
testutil.RequireConsul(t)
testConsul := getTestConsul(t)
defer testConsul.Stop()
consul, ready, stop := sdk.NewConsul(t, nil)
t.Cleanup(stop)
alloc := mock.ConnectAlloc()
alloc.AllocatedResources.Shared.Networks = []*structs.NetworkResource{
......@@ -647,7 +655,10 @@ func TestTaskRunner_EnvoyBootstrapHook_RecoverableError(t *testing.T) {
logger := testlog.HCLogger(t)
allocDir, cleanup := allocdir.TestAllocDir(t, logger, "EnvoyBootstrap", alloc.ID)
defer cleanup()
t.Cleanup(cleanup)
// wait for consul agent
ready()
// Unlike the successful test above, do NOT register the group services
// yet. This should cause a recoverable error similar to if Consul was
......@@ -655,7 +666,7 @@ func TestTaskRunner_EnvoyBootstrapHook_RecoverableError(t *testing.T) {
// Run Connect bootstrap Hook
h := newEnvoyBootstrapHook(newEnvoyBootstrapHookConfig(alloc, &config.ConsulConfig{
Addr: testConsul.HTTPAddr,
Addr: consul.HTTP(),
}, consulNamespace, logger))
// Lower the allowable wait time for testing
......@@ -689,8 +700,8 @@ func TestTaskRunner_EnvoyBootstrapHook_retryTimeout(t *testing.T) {
ci.Parallel(t)
logger := testlog.HCLogger(t)
testConsul := getTestConsul(t)
defer testConsul.Stop()
consul, ready, stop := sdk.NewConsul(t, nil)
t.Cleanup(stop)
begin := time.Now()
......@@ -724,12 +735,15 @@ func TestTaskRunner_EnvoyBootstrapHook_retryTimeout(t *testing.T) {
Kind: structs.NewTaskKind(structs.ConnectProxyPrefix, "foo"),
}
tg.Tasks = append(tg.Tasks, sidecarTask)
allocDir, cleanupAlloc := allocdir.TestAllocDir(t, logger, "EnvoyBootstrapRetryTimeout", alloc.ID)
defer cleanupAlloc()
allocDir, cleanup := allocdir.TestAllocDir(t, logger, "EnvoyBootstrapRetryTimeout", alloc.ID)
t.Cleanup(cleanup)
// wait for consul agent
ready()
// Get a Consul client
consulConfig := consulapi.DefaultConfig()
consulConfig.Address = testConsul.HTTPAddr
consulConfig.Address = consul.HTTP()
// Do NOT register group services, causing the hook to retry until timeout
......@@ -805,9 +819,7 @@ func TestTaskRunner_EnvoyBootstrapHook_extractNameAndKind(t *testing.T) {
})
t.Run("normal task", func(t *testing.T) {
_, _, err := (*envoyBootstrapHook)(nil).extractNameAndKind(
structs.TaskKind(""),
)
_, _, err := (*envoyBootstrapHook)(nil).extractNameAndKind("")
require.EqualError(t, err, "envoy must be used as connect sidecar or gateway")
})
}
......
......@@ -20,7 +20,7 @@ import (
"time"
templateconfig "github.com/hashicorp/consul-template/config"
ctestutil "github.com/hashicorp/consul/sdk/testutil"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/nomad/ci"
"github.com/hashicorp/nomad/client/allocdir"
"github.com/hashicorp/nomad/client/config"
......@@ -31,6 +31,7 @@ import (
"github.com/hashicorp/nomad/nomad/mock"
"github.com/hashicorp/nomad/nomad/structs"
sconfig "github.com/hashicorp/nomad/nomad/structs/config"
"github.com/hashicorp/nomad/sdk"
"github.com/hashicorp/nomad/testutil"
"github.com/kr/pretty"
"github.com/stretchr/testify/assert"
......@@ -133,14 +134,14 @@ type testHarness struct {
vaultToken string
taskDir string
vault *testutil.TestVault
consul *ctestutil.TestServer
consul *sdk.Consul
emitRate time.Duration
nomadNamespace string
}
// newTestHarness returns a harness starting a dev consul and vault server,
// building the appropriate config and creating a TaskTemplateManager
func newTestHarness(t *testing.T, templates []*structs.Template, consul, vault bool) *testHarness {
func newTestHarness(t *testing.T, templates []*structs.Template, useConsul, useVault bool) *testHarness {
region := "global"
mockNode := mock.Node()
......@@ -165,28 +166,20 @@ func newTestHarness(t *testing.T, templates []*structs.Template, consul, vault b
task.Name = TestTaskName
harness.envBuilder = taskenv.NewBuilder(harness.node, a, task, region)
harness.nomadNamespace = a.Namespace
// Make a tempdir
d, err := ioutil.TempDir("", "ct_test")
if err != nil {
t.Fatalf("Failed to make tmpdir: %v", err)
}
harness.taskDir = d
harness.taskDir = t.TempDir()
harness.envBuilder.SetClientTaskRoot(harness.taskDir)
if consul {
harness.consul, err = ctestutil.NewTestServerConfigT(t, func(c *ctestutil.TestServerConfig) {
// defaults
})
if err != nil {
t.Fatalf("error starting test Consul server: %v", err)
}
if useConsul {
consul, ready, stop := sdk.NewConsul(t, nil)
t.Cleanup(stop)
harness.consul = consul
harness.config.ConsulConfig = &sconfig.ConsulConfig{
Addr: harness.consul.HTTPAddr,
Addr: consul.HTTP(),
}
ready()
}
if vault {
if useVault {
harness.vault = testutil.NewTestVault(t)
harness.config.VaultConfig = harness.vault.Config
harness.vaultToken = harness.vault.RootToken
......@@ -195,6 +188,16 @@ func newTestHarness(t *testing.T, templates []*structs.Template, consul, vault b
return harness
}
func (h *testHarness) setKV(t *testing.T, key, value string) {
require.NotNil(t, h.consul)
client := h.consul.Client(t)
_, err := client.KV().Put(&api.KVPair{
Key: key,
Value: []byte(value),
}, nil)
require.NoError(t, err)
}
func (h *testHarness) start(t *testing.T) {
if err := h.startWithErr(); err != nil {
t.Fatalf("failed to build task template manager: %v", err)
......@@ -222,20 +225,14 @@ func (h *testHarness) setEmitRate(d time.Duration) {
h.emitRate = d
}
// stop is used to stop any running Vault or Consul server plus the task manager
// stop is used to stop Vault or the task manager
func (h *testHarness) stop() {
if h.vault != nil {
h.vault.Stop()
}
if h.consul != nil {
h.consul.Stop()
}
if h.manager != nil {
h.manager.Stop()
}
if h.taskDir != "" {
os.RemoveAll(h.taskDir)
}
}
func TestTaskTemplateManager_InvalidConfig(t *testing.T) {
......@@ -379,6 +376,15 @@ func TestTaskTemplateManager_InvalidConfig(t *testing.T) {
}
}
func setKV(t *testing.T, consul *sdk.Consul, key, value string) {
client := consul.Client(t)
_, err := client.KV().Put(&api.KVPair{
Key: key,
Value: []byte(value),
}, nil)
require.NoError(t, err)
}
func TestTaskTemplateManager_HostPath(t *testing.T) {
ci.Parallel(t)
// Make a template that will render immediately and write it to a tmp file
......@@ -641,7 +647,7 @@ func TestTaskTemplateManager_Unblock_Consul(t *testing.T) {
}
// Write the key to Consul
harness.consul.SetKV(t, key, []byte(content))
harness.setKV(t, key, content)
// Wait for the unblock
select {
......@@ -757,7 +763,7 @@ func TestTaskTemplateManager_Unblock_Multi_Template(t *testing.T) {
}
// Write the key to Consul
harness.consul.SetKV(t, consulKey, []byte(consulContent))
harness.setKV(t, consulKey, consulContent)
// Wait for the unblock
select {
......@@ -903,7 +909,7 @@ func TestTaskTemplateManager_Rerender_Noop(t *testing.T) {
}
// Write the key to Consul
harness.consul.SetKV(t, key, []byte(content1))
harness.setKV(t, key, content1)
// Wait for the unblock
select {
......@@ -924,7 +930,7 @@ func TestTaskTemplateManager_Rerender_Noop(t *testing.T) {
}
// Update the key in Consul
harness.consul.SetKV(t, key, []byte(content2))
harness.setKV(t, key, content2)
select {
case <-harness.mockHooks.RestartCh:
......@@ -986,8 +992,8 @@ func TestTaskTemplateManager_Rerender_Signal(t *testing.T) {
}
// Write the key to Consul
harness.consul.SetKV(t, key1, []byte(content1_1))
harness.consul.SetKV(t, key2, []byte(content2_1))
harness.setKV(t, key1, content1_1)
harness.setKV(t, key2, content2_1)
// Wait for the unblock
select {
......@@ -1001,8 +1007,8 @@ func TestTaskTemplateManager_Rerender_Signal(t *testing.T) {
}
// Update the keys in Consul
harness.consul.SetKV(t, key1, []byte(content1_2))
harness.consul.SetKV(t, key2, []byte(content2_2))
harness.setKV(t, key1, content1_2)
harness.setKV(t, key2, content2_2)
// Wait for signals
timeout := time.After(time.Duration(1*testutil.TestMultiplier()) * time.Second)
......@@ -1072,7 +1078,7 @@ func TestTaskTemplateManager_Rerender_Restart(t *testing.T) {
}
// Write the key to Consul
harness.consul.SetKV(t, key1, []byte(content1_1))
harness.setKV(t, key1, content1_1)
// Wait for the unblock
select {
......@@ -1082,7 +1088,7 @@ func TestTaskTemplateManager_Rerender_Restart(t *testing.T) {
}
// Update the keys in Consul
harness.consul.SetKV(t, key1, []byte(content1_2))
harness.setKV(t, key1, content1_2)
// Wait for restart
timeout := time.After(time.Duration(1*testutil.TestMultiplier()) * time.Second)
......@@ -1169,7 +1175,7 @@ func TestTaskTemplateManager_Signal_Error(t *testing.T) {
harness.mockHooks.SignalError = fmt.Errorf("test error")
// Write the key to Consul
harness.consul.SetKV(t, key1, []byte(content1))
harness.setKV(t, key1, content1)
// Wait a little
select {
......@@ -1179,7 +1185,7 @@ func TestTaskTemplateManager_Signal_Error(t *testing.T) {
}
// Write the key to Consul
harness.consul.SetKV(t, key1, []byte(content2))
harness.setKV(t, key1, content2)
// Wait for kill channel
select {
......@@ -1442,8 +1448,8 @@ BAR={{key "bar"}}
}
// Write the key to Consul
harness.consul.SetKV(t, key1, []byte(content1_1))
harness.consul.SetKV(t, key2, []byte(content1_1))
harness.setKV(t, key1, content1_1)
harness.setKV(t, key2, content1_1)
// Wait for the unblock
select {
......@@ -1461,7 +1467,7 @@ BAR={{key "bar"}}
}
// Update the keys in Consul
harness.consul.SetKV(t, key1, []byte(content1_2))
harness.setKV(t, key1, content1_2)
// Wait for restart
timeout := time.After(time.Duration(1*testutil.TestMultiplier()) * time.Second)
......@@ -1893,7 +1899,7 @@ func TestTaskTemplateManager_BlockedEvents(t *testing.T) {
// Write 0-2 keys to Consul
for i := 0; i < 3; i++ {
harness.consul.SetKV(t, fmt.Sprintf("%d", i), []byte{0xa})
harness.setKV(t, strconv.Itoa(i), ":)")
}
// Ensure that we get a blocked event
......
......@@ -17,9 +17,9 @@ import (
client "github.com/hashicorp/nomad/client/config"
"github.com/hashicorp/nomad/client/testutil"
"github.com/hashicorp/nomad/helper"
"github.com/hashicorp/nomad/helper/freeport"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/nomad/structs/config"
"github.com/hashicorp/nomad/sdk/portfree"
"github.com/stretchr/testify/require"
)
......@@ -652,8 +652,7 @@ func TestConfig_Listener(t *testing.T) {
}
// Works with valid inputs
ports := freeport.MustTake(2)
defer freeport.Return(ports)
ports := portfree.New(t).Get(2)
ln, err := config.Listener("tcp", "127.0.0.1", ports[0])
if err != nil {
......
package consul
import (
"io/ioutil"
"testing"
"time"
consulapi "github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/sdk/testutil"
"github.com/hashicorp/nomad/ci"
"github.com/hashicorp/nomad/client/serviceregistration"
"github.com/hashicorp/nomad/helper/testlog"
"github.com/hashicorp/nomad/nomad/mock"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/sdk"
"github.com/stretchr/testify/require"
)
func TestConsul_Connect(t *testing.T) {
ci.Parallel(t)
// Create an embedded Consul server
testconsul, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) {
// If -v wasn't specified squelch consul logging
if !testing.Verbose() {
c.Stdout = ioutil.Discard
c.Stderr = ioutil.Discard
}
})
if err != nil {
t.Fatalf("error starting test consul server: %v", err)
}
defer testconsul.Stop()
consul, ready, stop := sdk.NewConsul(t, nil)
t.Cleanup(stop)
consulConfig := consulapi.DefaultConfig()
consulConfig.Address = testconsul.HTTPAddr
consulConfig.Address = consul.HTTP()
consulClient, err := consulapi.NewClient(consulConfig)
require.NoError(t, err)
namespacesClient := NewNamespacesClient(consulClient.Namespaces(), consulClient.Agent())
serviceClient := NewServiceClient(consulClient.Agent(), namespacesClient, testlog.HCLogger(t), true)
......@@ -43,11 +33,13 @@ func TestConsul_Connect(t *testing.T) {
const interval = 50 * time.Millisecond
serviceClient.periodicInterval = interval
// Disable deregistration probation to test syncing
// Disable de-registration probation to test syncing
serviceClient.deregisterProbationExpiry = time.Time{}
go serviceClient.Run()
defer serviceClient.Shutdown()
t.Cleanup(func() {
_ = serviceClient.Shutdown()
})
alloc := mock.Alloc()
alloc.AllocatedResources.Shared.Networks = []*structs.NetworkResource{
......@@ -81,6 +73,9 @@ func TestConsul_Connect(t *testing.T) {
},
}
// wait for consul agent
ready()
require.NoError(t, serviceClient.RegisterWorkload(BuildAllocServices(mock.Node(), alloc, NoopRestarter())))
require.Eventually(t, func() bool {
......@@ -89,7 +84,7 @@ func TestConsul_Connect(t *testing.T) {
return len(services) == 2
}, 3*time.Second, 100*time.Millisecond)
// Test a few times to ensure Nomad doesn't improperly deregister
// Test a few times to ensure Nomad doesn't improperly de-register
// Connect services.
for i := 10; i > 0; i-- {
services, err := consulClient.Agent().Services()
......
......@@ -2,13 +2,11 @@ package consul_test
import (
"context"
"io/ioutil"
"os"
"testing"
"time"
consulapi "github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/sdk/testutil"
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/nomad/ci"
"github.com/hashicorp/nomad/client/allocdir"
......@@ -20,10 +18,11 @@ import (
"github.com/hashicorp/nomad/client/serviceregistration/wrapper"
"github.com/hashicorp/nomad/client/state"
"github.com/hashicorp/nomad/client/vaultclient"
"github.com/hashicorp/nomad/command/agent/consul"
agentconsul "github.com/hashicorp/nomad/command/agent/consul"
"github.com/hashicorp/nomad/helper/testlog"
"github.com/hashicorp/nomad/nomad/mock"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/sdk"
"github.com/stretchr/testify/require"
)
......@@ -40,42 +39,20 @@ func (m *mockUpdater) TaskStateUpdated() {
func TestConsul_Integration(t *testing.T) {
ci.Parallel(t)
if testing.Short() {
t.Skip("-short set; skipping")
}
r := require.New(t)
// Create an embedded Consul server
testconsul, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) {
// If -v wasn't specified squelch consul logging
if !testing.Verbose() {
c.Stdout = ioutil.Discard
c.Stderr = ioutil.Discard
}
})
if err != nil {
t.Fatalf("error starting test consul server: %v", err)
}
defer testconsul.Stop()
consul, ready, stop := sdk.NewConsul(t, nil)
t.Cleanup(stop)
conf := config.DefaultConfig()
conf.Node = mock.Node()
conf.ConsulConfig.Addr = testconsul.HTTPAddr
conf.ConsulConfig.Addr = consul.HTTP()
consulConfig, err := conf.ConsulConfig.ApiConfig()
if err != nil {
t.Fatalf("error generating consul config: %v", err)
}
require.NoError(t, err)
conf.StateDir, err = ioutil.TempDir("", "nomadtest-consulstate")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(conf.StateDir)
conf.AllocDir, err = ioutil.TempDir("", "nomdtest-consulalloc")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(conf.AllocDir)
conf.StateDir = t.TempDir()
// todo(shoenig): the task runner does not unmount secrets, and the testing
// cleanup hook fails trying to remove it
// conf.AllocDir = t.TempDir()
conf.AllocDir = os.TempDir()
alloc := mock.Alloc()
task := alloc.Job.TaskGroups[0].Tasks[0]
......@@ -139,11 +116,17 @@ func TestConsul_Integration(t *testing.T) {
taskDir := allocDir.NewTaskDir(task.Name)
vclient := vaultclient.NewMockVaultClient()
consulClient, err := consulapi.NewClient(consulConfig)
r.Nil(err)
require.NoError(t, err)
// wait for consul
ready()
namespacesClient := agentconsul.NewNamespacesClient(consulClient.Namespaces(), consulClient.Agent())
serviceClient := agentconsul.NewServiceClient(consulClient.Agent(), namespacesClient, testlog.HCLogger(t), true)
t.Cleanup(func() {
_ = serviceClient.Shutdown()
})
namespacesClient := consul.NewNamespacesClient(consulClient.Namespaces(), consulClient.Agent())
serviceClient := consul.NewServiceClient(consulClient.Agent(), namespacesClient, testlog.HCLogger(t), true)
defer serviceClient.Shutdown() // just-in-case cleanup
consulRan := make(chan struct{})
go func() {
serviceClient.Run()
......@@ -155,8 +138,8 @@ func TestConsul_Integration(t *testing.T) {
closedCh := make(chan struct{})
close(closedCh)
// Build the config
config := &taskrunner.Config{
// Build the task runner config
runnerConfig := &taskrunner.Config{
Alloc: alloc,
ClientConfig: conf,
Consul: serviceClient,
......@@ -172,8 +155,8 @@ func TestConsul_Integration(t *testing.T) {
ServiceRegWrapper: wrapper.NewHandlerWrapper(logger, serviceClient, regMock.NewServiceRegistrationHandler(logger)),
}
tr, err := taskrunner.NewTaskRunner(config)
r.NoError(err)
tr, err := taskrunner.NewTaskRunner(runnerConfig)
require.NoError(t, err)
go tr.Run()
defer func() {
// Make sure we always shutdown task runner when the test exits
......@@ -181,14 +164,14 @@ func TestConsul_Integration(t *testing.T) {
case <-tr.WaitCh():
// Exited cleanly, no need to kill
default:
tr.Kill(context.Background(), &structs.TaskEvent{}) // just in case
_ = tr.Kill(context.Background(), &structs.TaskEvent{}) // just in case
}
}()
// Block waiting for the service to appear
catalog := consulClient.Catalog()
res, meta, err := catalog.Service("httpd2", "test", nil)
r.Nil(err)
require.NoError(t, err)
for i := 0; len(res) == 0 && i < 10; i++ {
//Expected initial request to fail, do a blocking query
......@@ -197,7 +180,7 @@ func TestConsul_Integration(t *testing.T) {
t.Fatalf("error querying for service: %v", err)
}
}
r.Len(res, 1)
require.Len(t, res, 1)
// Truncate results
res = res[:]
......@@ -205,16 +188,16 @@ func TestConsul_Integration(t *testing.T) {
// Assert the service with the checks exists
for i := 0; len(res) == 0 && i < 10; i++ {
res, meta, err = catalog.Service("httpd", "http", &consulapi.QueryOptions{WaitIndex: meta.LastIndex + 1, WaitTime: 3 * time.Second})
r.Nil(err)
require.NoError(t, err)
}
r.Len(res, 1)
require.Len(t, res, 1)
// Assert the script check passes (mock_driver script checks always
// pass) after having time to run once
time.Sleep(2 * time.Second)
checks, _, err := consulClient.Health().Checks("httpd", nil)
r.Nil(err)
r.Len(checks, 2)
require.Nil(t, err)
require.Len(t, checks, 2)
for _, check := range checks {
if expected := "httpd"; check.ServiceName != expected {
......@@ -254,7 +237,7 @@ func TestConsul_Integration(t *testing.T) {
logger.Debug("killing task")
// Kill the task
tr.Kill(context.Background(), &structs.TaskEvent{})
_ = tr.Kill(context.Background(), &structs.TaskEvent{})
select {
case <-tr.WaitCh():
......@@ -269,7 +252,7 @@ func TestConsul_Integration(t *testing.T) {
// Ensure Consul is clean
services, _, err := catalog.Services(nil)
r.Nil(err)
r.Len(services, 1)
r.Contains(services, "consul")
require.Nil(t, err)
require.Len(t, services, 1)
require.Contains(t, services, "consul")
}
......@@ -35,41 +35,10 @@ import (
// makeHTTPServer returns a test server whose logs will be written to
// the passed writer. If the writer is nil, the logs are written to stderr.
func makeHTTPServer(t testing.TB, cb func(c *Config)) *TestAgent {
func makeHTTPServer(t *testing.T, cb func(c *Config)) *TestAgent {
return NewTestAgent(t, t.Name(), cb)
}
func BenchmarkHTTPRequests(b *testing.B) {
s := makeHTTPServer(b, func(c *Config) {
c.Client.Enabled = false
})
defer s.Shutdown()
job := mock.Job()
var allocs []*structs.Allocation
count := 1000
for i := 0; i < count; i++ {
alloc := mock.Alloc()
alloc.Job = job
alloc.JobID = job.ID
alloc.Name = fmt.Sprintf("my-job.web[%d]", i)
allocs = append(allocs, alloc)
}
handler := func(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
return allocs[:count], nil
}
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
resp := httptest.NewRecorder()
req, _ := http.NewRequest("GET", "/v1/kv/key", nil)
s.Server.wrap(handler)(resp, req)
}
})
}
func TestMultipleInterfaces(t *testing.T) {
ci.Parallel(t)
......@@ -81,7 +50,7 @@ func TestMultipleInterfaces(t *testing.T) {
})
defer s.Shutdown()
httpPort := s.ports[0]
httpPort := s.Config.Ports.HTTP
for _, ip := range httpIps {
resp, err := http.Get(fmt.Sprintf("http://%s:%d/", ip, httpPort))
......@@ -1445,14 +1414,14 @@ func benchmarkJsonEncoding(b *testing.B, handle *codec.JsonHandle) {
}
}
func httpTest(t testing.TB, cb func(c *Config), f func(srv *TestAgent)) {
func httpTest(t *testing.T, cb func(c *Config), f func(srv *TestAgent)) {
s := makeHTTPServer(t, cb)
defer s.Shutdown()
testutil.WaitForLeader(t, s.Agent.RPC)
f(s)
}
func httpACLTest(t testing.TB, cb func(c *Config), f func(srv *TestAgent)) {
func httpACLTest(t *testing.T, cb func(c *Config), f func(srv *TestAgent)) {
s := makeHTTPServer(t, func(c *Config) {
c.ACL.Enabled = true
if cb != nil {
......
......@@ -16,11 +16,11 @@ import (
"testing"
"time"
"github.com/hashicorp/consul/sdk/testutil/retry"
"github.com/hashicorp/nomad/api"
"github.com/hashicorp/nomad/ci"
"github.com/hashicorp/nomad/nomad/mock"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/sdk/retry"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
......
......@@ -17,12 +17,12 @@ import (
"github.com/hashicorp/nomad/api"
client "github.com/hashicorp/nomad/client/config"
"github.com/hashicorp/nomad/client/fingerprint"
"github.com/hashicorp/nomad/helper/freeport"
"github.com/hashicorp/nomad/helper/testlog"
"github.com/hashicorp/nomad/nomad"
"github.com/hashicorp/nomad/nomad/mock"
"github.com/hashicorp/nomad/nomad/structs"
sconfig "github.com/hashicorp/nomad/nomad/structs/config"
"github.com/hashicorp/nomad/sdk/portfree"
"github.com/hashicorp/nomad/testutil"
)
......@@ -38,7 +38,7 @@ var TempDir = os.TempDir()
// is removed after shutdown.
type TestAgent struct {
// T is the testing object
T testing.TB
T *testing.T
// Name is an optional name of the agent.
Name string
......@@ -80,10 +80,6 @@ type TestAgent struct {
// RootToken is auto-bootstrapped if ACLs are enabled
RootToken *structs.ACLToken
// ports that are reserved through freeport that must be returned at
// the end of a test, done when Shutdown() is called.
ports []int
// Enterprise specifies if the agent is enterprise or not
Enterprise bool
......@@ -94,7 +90,7 @@ type TestAgent struct {
// NewTestAgent returns a started agent with the given name and
// configuration. The caller should call Shutdown() to stop the agent and
// remove temporary directories.
func NewTestAgent(t testing.TB, name string, configCallback func(*Config)) *TestAgent {
func NewTestAgent(t *testing.T, name string, configCallback func(*Config)) *TestAgent {
a := &TestAgent{
T: t,
Name: name,
......@@ -110,7 +106,7 @@ func NewTestAgent(t testing.TB, name string, configCallback func(*Config)) *Test
// Start starts a test agent.
func (a *TestAgent) Start() *TestAgent {
if a.Agent != nil {
a.T.Fatalf("TestAgent already started")
a.T.Fatal("TestAgent already started")
}
if a.Config == nil {
a.Config = a.config()
......@@ -267,8 +263,6 @@ func (a *TestAgent) Shutdown() error {
}
a.shutdown = true
defer freeport.Return(a.ports)
defer func() {
if a.DataDir != "" {
os.RemoveAll(a.DataDir)
......@@ -324,8 +318,7 @@ func (a *TestAgent) Client() *api.Client {
// Instead of relying on one set of ports to be sufficient we retry
// starting the agent with different ports on port conflict.
func (a *TestAgent) pickRandomPorts(c *Config) {
ports := freeport.MustTake(3)
a.ports = append(a.ports, ports...)
ports := portfree.New(a.T).Get(3)
c.Ports.HTTP = ports[0]
c.Ports.RPC = ports[1]
......
......@@ -2,7 +2,6 @@ package command
import (
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"os"
......@@ -13,13 +12,13 @@ import (
"time"
consulapi "github.com/hashicorp/consul/api"
consultest "github.com/hashicorp/consul/sdk/testutil"
"github.com/hashicorp/nomad/api"
"github.com/hashicorp/nomad/ci"
clienttest "github.com/hashicorp/nomad/client/testutil"
"github.com/hashicorp/nomad/command/agent"
"github.com/hashicorp/nomad/helper"
"github.com/hashicorp/nomad/nomad/state"
"github.com/hashicorp/nomad/sdk"
"github.com/hashicorp/nomad/testutil"
"github.com/mitchellh/cli"
"github.com/stretchr/testify/assert"
......@@ -687,29 +686,17 @@ func TestDebug_WriteBytes_PathEscapesSandbox(t *testing.T) {
func TestDebug_CollectConsul(t *testing.T) {
ci.Parallel(t)
if testing.Short() {
t.Skip("-short set; skipping")
}
ci.SkipSlow(t, "is a slow test")
// Skip test if Consul binary cannot be found
clienttest.RequireConsul(t)
// Create an embedded Consul server
testconsul, err := consultest.NewTestServerConfigT(t, func(c *consultest.TestServerConfig) {
// If -v wasn't specified squelch consul logging
if !testing.Verbose() {
c.Stdout = ioutil.Discard
c.Stderr = ioutil.Discard
}
})
require.NoError(t, err)
if err != nil {
t.Fatalf("error starting test consul server: %v", err)
}
defer testconsul.Stop()
consul, ready, stop := sdk.NewConsul(t, nil)
t.Cleanup(stop)
consulConfig := consulapi.DefaultConfig()
consulConfig.Address = testconsul.HTTPAddr
consulConfig.Address = consul.HTTP()
// Setup mock UI
ui := cli.NewMockUi()
......@@ -727,9 +714,11 @@ func TestDebug_CollectConsul(t *testing.T) {
// Setup capture directory
testDir := t.TempDir()
defer os.Remove(testDir)
c.collectDir = testDir
// Wait for consul agent
ready()
// Collect data from Consul into folder "test"
c.collectConsul("test")
......
......@@ -14,7 +14,6 @@ import (
"github.com/hashicorp/nomad/ci"
"github.com/hashicorp/nomad/client/testutil"
"github.com/hashicorp/nomad/helper"
"github.com/hashicorp/nomad/helper/freeport"
tu "github.com/hashicorp/nomad/testutil"
"github.com/stretchr/testify/require"
)
......@@ -58,8 +57,7 @@ func TestDockerDriver_PluginConfig_PidsLimit(t *testing.T) {
driver := dh.Impl().(*Driver)
driver.config.PidsLimit = 5
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
cfg.PidsLimit = 7
......@@ -80,8 +78,7 @@ func TestDockerDriver_PidsLimit(t *testing.T) {
testutil.DockerCompatible(t)
require := require.New(t)
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
cfg.PidsLimit = 1
cfg.Command = "/bin/sh"
cfg.Args = []string{"-c", "sleep 5 & sleep 5 & sleep 5"}
......
......@@ -20,7 +20,6 @@ import (
"github.com/hashicorp/nomad/ci"
"github.com/hashicorp/nomad/client/taskenv"
"github.com/hashicorp/nomad/client/testutil"
"github.com/hashicorp/nomad/helper/freeport"
"github.com/hashicorp/nomad/helper/pluginutils/hclspecutils"
"github.com/hashicorp/nomad/helper/pluginutils/hclutils"
"github.com/hashicorp/nomad/helper/pluginutils/loader"
......@@ -30,6 +29,7 @@ import (
"github.com/hashicorp/nomad/plugins/base"
"github.com/hashicorp/nomad/plugins/drivers"
dtestutil "github.com/hashicorp/nomad/plugins/drivers/testutils"
"github.com/hashicorp/nomad/sdk/portfree"
tu "github.com/hashicorp/nomad/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
......@@ -72,10 +72,9 @@ var (
busyboxLongRunningCmd = []string{"nc", "-l", "-p", "3000", "127.0.0.1"}
)
// Returns a task with a reserved and dynamic port. The ports are returned
// respectively, and should be reclaimed with freeport.Return at the end of a test.
// Returns a task with a reserved and dynamic ports.
func dockerTask(t *testing.T) (*drivers.TaskConfig, *TaskConfig, []int) {
ports := freeport.MustTake(2)
ports := portfree.New(t).Get(2)
dockerReserved := ports[0]
dockerDynamic := ports[1]
......@@ -113,7 +112,6 @@ func dockerTask(t *testing.T) (*drivers.TaskConfig, *TaskConfig, []int) {
}
require.NoError(t, task.EncodeConcreteDriverConfig(&cfg))
return task, &cfg, ports
}
......@@ -639,14 +637,9 @@ func TestDockerDriver_StartN(t *testing.T) {
testutil.DockerCompatible(t)
require := require.New(t)
task1, _, ports1 := dockerTask(t)
defer freeport.Return(ports1)
task2, _, ports2 := dockerTask(t)
defer freeport.Return(ports2)
task3, _, ports3 := dockerTask(t)
defer freeport.Return(ports3)
task1, _, _ := dockerTask(t)
task2, _, _ := dockerTask(t)
task3, _, _ := dockerTask(t)
taskList := []*drivers.TaskConfig{task1, task2, task3}
......@@ -693,22 +686,19 @@ func TestDockerDriver_StartNVersions(t *testing.T) {
testutil.DockerCompatible(t)
require := require.New(t)
task1, cfg1, ports1 := dockerTask(t)
defer freeport.Return(ports1)
task1, cfg1, _ := dockerTask(t)
tcfg1 := newTaskConfig("", []string{"echo", "hello"})
cfg1.Image = tcfg1.Image
cfg1.LoadImage = tcfg1.LoadImage
require.NoError(task1.EncodeConcreteDriverConfig(cfg1))
task2, cfg2, ports2 := dockerTask(t)
defer freeport.Return(ports2)
task2, cfg2, _ := dockerTask(t)
tcfg2 := newTaskConfig("musl", []string{"echo", "hello"})
cfg2.Image = tcfg2.Image
cfg2.LoadImage = tcfg2.LoadImage
require.NoError(task2.EncodeConcreteDriverConfig(cfg2))
task3, cfg3, ports3 := dockerTask(t)
defer freeport.Return(ports3)
task3, cfg3, _ := dockerTask(t)
tcfg3 := newTaskConfig("glibc", []string{"echo", "hello"})
cfg3.Image = tcfg3.Image
cfg3.LoadImage = tcfg3.LoadImage
......@@ -758,9 +748,7 @@ func TestDockerDriver_Labels(t *testing.T) {
ci.Parallel(t)
testutil.DockerCompatible(t)
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
cfg.Labels = map[string]string{
"label1": "value1",
"label2": "value2",
......@@ -787,8 +775,7 @@ func TestDockerDriver_ExtraLabels(t *testing.T) {
ci.Parallel(t)
testutil.DockerCompatible(t)
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
......@@ -822,8 +809,7 @@ func TestDockerDriver_LoggingConfiguration(t *testing.T) {
ci.Parallel(t)
testutil.DockerCompatible(t)
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
......@@ -849,8 +835,7 @@ func TestDockerDriver_ForcePull(t *testing.T) {
ci.Parallel(t)
testutil.DockerCompatible(t)
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
cfg.ForcePull = true
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
......@@ -873,8 +858,7 @@ func TestDockerDriver_ForcePull_RepoDigest(t *testing.T) {
}
testutil.DockerCompatible(t)
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
cfg.LoadImage = ""
cfg.Image = "library/busybox@sha256:58ac43b2cc92c687a32c8be6278e50a063579655fe3090125dcb2af0ff9e1a64"
localDigest := "sha256:8ac48589692a53a9b8c2d1ceaa6b402665aa7fe667ba51ccc03002300856d8c7"
......@@ -899,8 +883,7 @@ func TestDockerDriver_SecurityOptUnconfined(t *testing.T) {
}
testutil.DockerCompatible(t)
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
cfg.SecurityOpt = []string{"seccomp=unconfined"}
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
......@@ -923,8 +906,7 @@ func TestDockerDriver_SecurityOptFromFile(t *testing.T) {
}
testutil.DockerCompatible(t)
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
cfg.SecurityOpt = []string{"seccomp=./test-resources/docker/seccomp.json"}
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
......@@ -942,8 +924,7 @@ func TestDockerDriver_Runtime(t *testing.T) {
ci.Parallel(t)
testutil.DockerCompatible(t)
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
cfg.Runtime = "runc"
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
......@@ -962,8 +943,7 @@ func TestDockerDriver_Runtime(t *testing.T) {
func TestDockerDriver_CreateContainerConfig(t *testing.T) {
ci.Parallel(t)
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
opt := map[string]string{"size": "120G"}
cfg.StorageOpt = opt
......@@ -986,8 +966,7 @@ func TestDockerDriver_CreateContainerConfig(t *testing.T) {
func TestDockerDriver_CreateContainerConfig_RuntimeConflict(t *testing.T) {
ci.Parallel(t)
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
task.DeviceEnv["NVIDIA_VISIBLE_DEVICES"] = "GPU_UUID_1"
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
......@@ -1025,8 +1004,7 @@ func TestDockerDriver_CreateContainerConfig_ChecksAllowRuntimes(t *testing.T) {
"custom",
}
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
for _, runtime := range allowRuntime {
......@@ -1050,8 +1028,7 @@ func TestDockerDriver_CreateContainerConfig_ChecksAllowRuntimes(t *testing.T) {
func TestDockerDriver_CreateContainerConfig_User(t *testing.T) {
ci.Parallel(t)
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
task.User = "random-user-1"
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
......@@ -1068,8 +1045,7 @@ func TestDockerDriver_CreateContainerConfig_User(t *testing.T) {
func TestDockerDriver_CreateContainerConfig_Labels(t *testing.T) {
ci.Parallel(t)
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
task.AllocID = uuid.Generate()
task.JobName = "redis-demo-job"
......@@ -1159,8 +1135,7 @@ func TestDockerDriver_CreateContainerConfig_Logging(t *testing.T) {
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
cfg.Logging = c.loggingConfig
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
......@@ -1181,8 +1156,7 @@ func TestDockerDriver_CreateContainerConfig_Logging(t *testing.T) {
func TestDockerDriver_CreateContainerConfig_Mounts(t *testing.T) {
ci.Parallel(t)
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
cfg.Mounts = []DockerMount{
{
......@@ -1296,8 +1270,7 @@ func TestDockerDriver_CreateContainerConfigWithRuntimes(t *testing.T) {
}
for _, testCase := range testCases {
t.Run(testCase.description, func(t *testing.T) {
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
dh := dockerDriverHarness(t, map[string]interface{}{
"allow_runtimes": []string{"runc", "nvidia", "nvidia-runtime-modified-name"},
......@@ -1387,8 +1360,7 @@ func TestDockerDriver_Capabilities(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.Name, func(t *testing.T) {
client := newTestDockerClient(t)
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
if len(tc.CapAdd) > 0 {
cfg.CapAdd = tc.CapAdd
......@@ -1465,8 +1437,8 @@ func TestDockerDriver_DNS(t *testing.T) {
}
for _, c := range cases {
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
task.DNS = c.cfg
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
......@@ -1488,8 +1460,7 @@ func TestDockerDriver_Init(t *testing.T) {
t.Skip("Windows does not support init.")
}
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
cfg.Init = true
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
......@@ -1529,8 +1500,7 @@ func TestDockerDriver_CPUSetCPUs(t *testing.T) {
for _, testCase := range testCases {
t.Run(testCase.Name, func(t *testing.T) {
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
cfg.CPUSetCPUs = testCase.CPUSetCPUs
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
......@@ -1554,8 +1524,7 @@ func TestDockerDriver_MemoryHardLimit(t *testing.T) {
t.Skip("Windows does not support MemoryReservation")
}
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
cfg.MemoryHardLimit = 300
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
......@@ -1578,8 +1547,8 @@ func TestDockerDriver_MACAddress(t *testing.T) {
t.Skip("Windows docker does not support setting MacAddress")
}
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
cfg.MacAddress = "00:16:3e:00:00:00"
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
......@@ -1597,8 +1566,8 @@ func TestDockerWorkDir(t *testing.T) {
ci.Parallel(t)
testutil.DockerCompatible(t)
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
cfg.WorkDir = "/some/path"
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
......@@ -1625,7 +1594,7 @@ func TestDockerDriver_PortsNoMap(t *testing.T) {
testutil.DockerCompatible(t)
task, _, ports := dockerTask(t)
defer freeport.Return(ports)
res := ports[0]
dyn := ports[1]
......@@ -1667,7 +1636,6 @@ func TestDockerDriver_PortsMapping(t *testing.T) {
testutil.DockerCompatible(t)
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
res := ports[0]
dyn := ports[1]
cfg.PortMap = map[string]int{
......@@ -1716,7 +1684,7 @@ func TestDockerDriver_CreateContainerConfig_Ports(t *testing.T) {
ci.Parallel(t)
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
hostIP := "127.0.0.1"
if runtime.GOOS == "windows" {
hostIP = ""
......@@ -1759,7 +1727,6 @@ func TestDockerDriver_CreateContainerConfig_PortsMapping(t *testing.T) {
ci.Parallel(t)
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
res := ports[0]
dyn := ports[1]
cfg.PortMap = map[string]int{
......@@ -1795,8 +1762,7 @@ func TestDockerDriver_CleanupContainer(t *testing.T) {
ci.Parallel(t)
testutil.DockerCompatible(t)
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
cfg.Command = "echo"
cfg.Args = []string{"hello"}
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
......@@ -1833,8 +1799,7 @@ func TestDockerDriver_EnableImageGC(t *testing.T) {
ci.Parallel(t)
testutil.DockerCompatible(t)
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
cfg.Command = "echo"
cfg.Args = []string{"hello"}
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
......@@ -1900,8 +1865,7 @@ func TestDockerDriver_DisableImageGC(t *testing.T) {
ci.Parallel(t)
testutil.DockerCompatible(t)
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
cfg.Command = "echo"
cfg.Args = []string{"hello"}
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
......@@ -1963,8 +1927,7 @@ func TestDockerDriver_MissingContainer_Cleanup(t *testing.T) {
ci.Parallel(t)
testutil.DockerCompatible(t)
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
cfg.Command = "echo"
cfg.Args = []string{"hello"}
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
......@@ -2030,8 +1993,7 @@ func TestDockerDriver_Stats(t *testing.T) {
ci.Parallel(t)
testutil.DockerCompatible(t)
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
cfg.Command = "sleep"
cfg.Args = []string{"1000"}
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
......@@ -2256,8 +2218,7 @@ func TestDockerDriver_Mounts(t *testing.T) {
driver.config.Volumes.Enabled = true
// Build the task
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
cfg.Command = "sleep"
cfg.Args = []string{"10000"}
cfg.Mounts = c.Mounts
......@@ -2455,7 +2416,7 @@ func TestDockerDriver_Devices_IsInvalidConfig(t *testing.T) {
}
for _, tc := range testCases {
task, cfg, ports := dockerTask(t)
task, cfg, _ := dockerTask(t)
cfg.Devices = tc.deviceConfig
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
d := dockerDriverHarness(t, nil)
......@@ -2466,7 +2427,6 @@ func TestDockerDriver_Devices_IsInvalidConfig(t *testing.T) {
_, _, err := d.StartTask(task)
require.Error(t, err)
require.Contains(t, err.Error(), tc.err.Error())
freeport.Return(ports)
}
}
......@@ -2492,8 +2452,7 @@ func TestDockerDriver_Device_Success(t *testing.T) {
ContainerPath: containerPath,
}
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
cfg.Devices = []DockerDevice{config}
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
......@@ -2513,8 +2472,7 @@ func TestDockerDriver_Entrypoint(t *testing.T) {
testutil.DockerCompatible(t)
entrypoint := []string{"sh", "-c"}
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
cfg.Entrypoint = entrypoint
cfg.Command = strings.Join(busyboxLongRunningCmd, " ")
cfg.Args = []string{}
......@@ -2541,8 +2499,7 @@ func TestDockerDriver_ReadonlyRootfs(t *testing.T) {
t.Skip("Windows Docker does not support root filesystem in read-only mode")
}
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
cfg.ReadonlyRootfs = true
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
......@@ -2579,8 +2536,7 @@ func TestDockerDriver_VolumeError(t *testing.T) {
ci.Parallel(t)
// setup
_, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
_, cfg, _ := dockerTask(t)
driver := dockerDriverHarness(t, nil)
// assert volume error is recoverable
......@@ -2594,8 +2550,7 @@ func TestDockerDriver_AdvertiseIPv6Address(t *testing.T) {
expectedPrefix := "2001:db8:1::242:ac11"
expectedAdvertise := true
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
cfg.AdvertiseIPv6Addr = expectedAdvertise
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
......@@ -2701,8 +2656,7 @@ func TestDockerDriver_CreationIdempotent(t *testing.T) {
ci.Parallel(t)
testutil.DockerCompatible(t)
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
client := newTestDockerClient(t)
......@@ -2769,8 +2723,7 @@ func TestDockerDriver_CreationIdempotent(t *testing.T) {
func TestDockerDriver_CreateContainerConfig_CPUHardLimit(t *testing.T) {
ci.Parallel(t)
task, _, ports := dockerTask(t)
defer freeport.Return(ports)
task, _, _ := dockerTask(t)
dh := dockerDriverHarness(t, nil)
driver := dh.Impl().(*Driver)
......
......@@ -18,7 +18,6 @@ import (
"github.com/hashicorp/nomad/ci"
"github.com/hashicorp/nomad/client/allocdir"
"github.com/hashicorp/nomad/client/testutil"
"github.com/hashicorp/nomad/helper/freeport"
"github.com/hashicorp/nomad/helper/uuid"
"github.com/hashicorp/nomad/plugins/drivers"
dtestutil "github.com/hashicorp/nomad/plugins/drivers/testutils"
......@@ -31,8 +30,7 @@ func TestDockerDriver_User(t *testing.T) {
ci.Parallel(t)
testutil.DockerCompatible(t)
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
task.User = "alice"
cfg.Command = "/bin/sleep"
cfg.Args = []string{"10000"}
......@@ -148,8 +146,7 @@ func TestDockerDriver_CPUCFSPeriod(t *testing.T) {
ci.Parallel(t)
testutil.DockerCompatible(t)
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
cfg.CPUHardLimit = true
cfg.CPUCFSPeriod = 1000000
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
......@@ -169,8 +166,7 @@ func TestDockerDriver_Sysctl_Ulimit(t *testing.T) {
ci.Parallel(t)
testutil.DockerCompatible(t)
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
expectedUlimits := map[string]string{
"nproc": "4242",
"nofile": "2048:4096",
......@@ -240,7 +236,7 @@ func TestDockerDriver_Sysctl_Ulimit_Errors(t *testing.T) {
}
for _, tc := range testCases {
task, cfg, ports := dockerTask(t)
task, cfg, _ := dockerTask(t)
cfg.Ulimit = tc.ulimitConfig
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
......@@ -252,7 +248,6 @@ func TestDockerDriver_Sysctl_Ulimit_Errors(t *testing.T) {
_, _, err := d.StartTask(task)
require.NotNil(t, err, "Expected non nil error")
require.Contains(t, err.Error(), tc.err.Error())
freeport.Return(ports)
}
}
......@@ -339,8 +334,7 @@ func TestDockerDriver_BindMountsHonorVolumesEnabledFlag(t *testing.T) {
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
cfg.VolumeDriver = c.volumeDriver
cfg.Volumes = c.volumes
......@@ -366,8 +360,7 @@ func TestDockerDriver_BindMountsHonorVolumesEnabledFlag(t *testing.T) {
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
cfg.VolumeDriver = c.volumeDriver
cfg.Volumes = c.volumes
......@@ -515,8 +508,7 @@ func TestDockerDriver_MountsSerialization(t *testing.T) {
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
cfg.Mounts = c.passedMounts
task.AllocDir = allocDir
......@@ -538,8 +530,7 @@ func TestDockerDriver_MountsSerialization(t *testing.T) {
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
cfg.Mounts = c.passedMounts
task.AllocDir = allocDir
......@@ -567,8 +558,7 @@ func TestDockerDriver_CreateContainerConfig_MountsCombined(t *testing.T) {
ci.Parallel(t)
testutil.DockerCompatible(t)
task, cfg, ports := dockerTask(t)
defer freeport.Return(ports)
task, cfg, _ := dockerTask(t)
task.Devices = []*drivers.DeviceConfig{
{
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment