Compare commits
585 Commits
Author | SHA1 | Date | |
---|---|---|---|
c9769965b8 | |||
52cee1f57f | |||
056f4b6c00 | |||
3919d666c1 | |||
8c8d978cd8 | |||
dea4210da1 | |||
a6344f7561 | |||
c490e5c8a1 | |||
84052ff0b6 | |||
9ca374a88d | |||
648aa7422d | |||
41aefd131b | |||
2e90d3fe76 | |||
4f33c6cfe6 | |||
f4e6fdc193 | |||
9d069d54d6 | |||
fb0ee9d84a | |||
016b7893c6 | |||
1724772b20 | |||
a6a5d0e068 | |||
d548cb6ac2 | |||
d9641771ed | |||
aaa3f1c491 | |||
5889f7af0e | |||
5579cddbdb | |||
2b6866484e | |||
34a27b0127 | |||
948d1d61ff | |||
c96a9bfdfd | |||
4e80ac1cb3 | |||
550bda951e | |||
6b27508c93 | |||
6684766c5f | |||
5fd43b7cf0 | |||
336e2b8c84 | |||
ee69ac857e | |||
6caf5b0ac3 | |||
0f461282c8 | |||
ab7c110880 | |||
5046466dae | |||
0cc581b2da | |||
7dde23e60b | |||
e4a48cf53b | |||
a3fe1e78df | |||
5f2bb3319b | |||
429b08a408 | |||
ec0317e0e4 | |||
613e3b65ac | |||
dfb9063b3f | |||
284354c0da | |||
82ee60fe8b | |||
73a8c24089 | |||
d313be4420 | |||
83750d14e3 | |||
123532d2a4 | |||
1a05b5980f | |||
a3a772be7b | |||
42a5055d3c | |||
a93639650f | |||
71a230a4fa | |||
0643ed968f | |||
1572aaf6ca | |||
5803de1ac5 | |||
13874f4610 | |||
341ea5a6ea | |||
93be5afb60 | |||
5ed3916f82 | |||
7760f75ae0 | |||
390764c2b4 | |||
9926395e5b | |||
422428908a | |||
76c43f62e2 | |||
b69d5f6e6e | |||
0db441b28f | |||
e3ebabc3b0 | |||
d0867c8d03 | |||
b46458a18f | |||
3ae29d763e | |||
125cb0aa64 | |||
783871a253 | |||
8294a9f1db | |||
ef43b21597 | |||
6fdcaa1a63 | |||
d47a2d03b4 | |||
739cf59953 | |||
2e386dfbdc | |||
ccbb2ee3ae | |||
eb78ce4c4e | |||
6084e05a6b | |||
da8a604c4c | |||
df2b2d7417 | |||
d87b0344b5 | |||
2606e8e1c8 | |||
b62de1dcb1 | |||
37057ba969 | |||
b58512bbda | |||
c90045c506 | |||
8b91a43576 | |||
602dba45ea | |||
d240073f65 | |||
69f09e0f18 | |||
cca26ae3d7 | |||
1c1894cdd3 | |||
26a0406669 | |||
a746d63177 | |||
0fc5e70c18 | |||
b74c2f89f0 | |||
7ac7fc33a7 | |||
9339903a85 | |||
33c8d0a1a7 | |||
5488571108 | |||
28fbfbbbe7 | |||
18cdab3671 | |||
311baeed5d | |||
f4d4d490af | |||
256a4e1f29 | |||
c50c6672f3 | |||
1345dd07f7 | |||
e83010b739 | |||
0dbde9e923 | |||
d4193bbd22 | |||
b92404fd0a | |||
9f01331595 | |||
82076f90a3 | |||
e165bb19a0 | |||
8168689caa | |||
c71b078c8e | |||
caa8efbf86 | |||
bcec5553c5 | |||
9ac744779c | |||
4e76bced53 | |||
60f263b629 | |||
ea57ce7514 | |||
439a2e2678 | |||
346eca5748 | |||
643b28f9d3 | |||
1938c96239 | |||
5dc8f5229f | |||
f35e5e864f | |||
47b4242613 | |||
92c4428cfd | |||
d97673c13f | |||
f61071312a | |||
234608433e | |||
36b6ae9a3c | |||
977f82c32c | |||
1f6dce61ba | |||
6f07da9f41 | |||
ac7cc4b7d1 | |||
d591b59205 | |||
c6f2102073 | |||
612266f3e5 | |||
5fbfa1481e | |||
430a87d887 | |||
0c953101ff | |||
07c144d8a6 | |||
298ab8e89e | |||
8812be1e14 | |||
4268996680 | |||
34232a170a | |||
0fa90ec9e8 | |||
f073ee91ea | |||
cf502735e9 | |||
252a30aee8 | |||
677c4c4cb6 | |||
6a457720a4 | |||
f2de250b10 | |||
6cb9bd2619 | |||
e727bd52f1 | |||
d2c57142d3 | |||
9be099466d | |||
acae5d4286 | |||
637eabccce | |||
e6cfbe42db | |||
15aec7cd87 | |||
b5d3f9b2fe | |||
e38258381f | |||
e8a1c7a53f | |||
5bf9b5345e | |||
2af71f31b4 | |||
4662b41de6 | |||
ff5a48c9f9 | |||
4fb4ac120b | |||
c7fef6cb76 | |||
6a7308d5c7 | |||
4419662fa0 | |||
b91f8630a3 | |||
5668e5f767 | |||
aa0d7ea5d0 | |||
c52c5f5056 | |||
90fc407420 | |||
9fb391fed5 | |||
fbc55da2bf | |||
1b1f5f22d4 | |||
66da43bbbc | |||
731d32afda | |||
b4688701ea | |||
af4c41f32e | |||
d0a1e15ef3 | |||
a4da0e4ee2 | |||
7d816aecf1 | |||
a63b05efbc | |||
7f212ca9cb | |||
296eccd238 | |||
f94eb0b997 | |||
a70c3b661e | |||
0f246bfba4 | |||
8141b72d5e | |||
277c5d74cc | |||
7a86b6c73e | |||
52a85d5757 | |||
a76e5dbb11 | |||
c3e5aac18e | |||
10b38ab9ff | |||
32cd6e99b2 | |||
a2540e3318 | |||
0b874e8db2 | |||
192136df20 | |||
ab8fdba484 | |||
342e6d6823 | |||
dfe7bfd127 | |||
51f55f3748 | |||
a709cd9aa1 | |||
d4dfdf68a6 | |||
a5c21ab2e8 | |||
c1690c91c2 | |||
e8195b65e4 | |||
c9cff5c845 | |||
da20d9eda4 | |||
a2bdcabc33 | |||
1e8ee99d1a | |||
a07260959d | |||
5fdea4b947 | |||
83da5d7657 | |||
1761f9f891 | |||
b3282cd0bb | |||
65ece3bc1d | |||
e2d6b92370 | |||
bcd912e854 | |||
8251781efb | |||
3b7eaf66b6 | |||
1d148e9755 | |||
d84ed1b4b3 | |||
baf80b7d7e | |||
9777b3c177 | |||
d2151500b6 | |||
e101b72a72 | |||
b847a43c61 | |||
19f5093034 | |||
585102ee20 | |||
ee7ac22f0d | |||
0b67c23d42 | |||
f1ba247844 | |||
2fa7ee0cf9 | |||
40fbb3691d | |||
d9b1435621 | |||
72ab34f210 | |||
67ca186dd1 | |||
85fa3efc06 | |||
8531ec9186 | |||
8c3f5f0831 | |||
c4beee38f6 | |||
247a1a6e6e | |||
a4396cfca0 | |||
53b72920a5 | |||
536454b079 | |||
95bb8075f5 | |||
708d2fbd61 | |||
103f09b470 | |||
c4c312c2e6 | |||
d7babeba2e | |||
9e59c74c24 | |||
d94253ff6a | |||
dc90c594c6 | |||
094c2c75f3 | |||
9ddace0566 | |||
47061a31e2 | |||
33d897bcb6 | |||
bf94d6f45e | |||
1556d1c63e | |||
c2093b128d | |||
153b82a803 | |||
587c8f4701 | |||
922c6897d1 | |||
eb6025a184 | |||
c43f9bc705 | |||
cd2847c1b9 | |||
309d6a49b6 | |||
8281b98e19 | |||
0e99cbb0ad | |||
7c7adc7198 | |||
c1ebd84bf0 | |||
26aeebb372 | |||
c7de2a524b | |||
e924504928 | |||
63908108b2 | |||
9bc5da9780 | |||
4a7d8c6fea | |||
722aacb633 | |||
ab0581e114 | |||
68808534b3 | |||
0500f27db8 | |||
cb92b30c25 | |||
67147cf435 | |||
96a2439c38 | |||
e8f97aa437 | |||
87757d4fcf | |||
33de89b69f | |||
9e86f1672b | |||
28aade3e06 | |||
35276de37e | |||
492218a3e1 | |||
a740e521d2 | |||
bdc183114a | |||
7de87d958e | |||
ffce277c0c | |||
c226b4e5cb | |||
094f4d02b8 | |||
ba615ff94e | |||
5240465f39 | |||
ef6a59bbd3 | |||
cd123f7f24 | |||
0984b23f0e | |||
d9dca20d7f | |||
d8bebcd201 | |||
f576d70b3c | |||
ae5ff890d4 | |||
24ee97d558 | |||
f949bfd46c | |||
242e96d251 | |||
66d9a6ebbc | |||
4e28f1de4e | |||
9b8a757526 | |||
a894a8c7bc | |||
962155e463 | |||
c90c981bb2 | |||
04fe83daa0 | |||
50d0ab2944 | |||
608e7dfab2 | |||
c6e3a8dbbd | |||
1884d89d3b | |||
ed95f9ab81 | |||
9f8466a186 | |||
8c869a2e3e | |||
743ad0eb5c | |||
5253b3ec13 | |||
ebf8231c9a | |||
adceaf60e1 | |||
96c63cc0b6 | |||
5f2fa6d76f | |||
bd064e8094 | |||
8f4e879ca7 | |||
a19ab91b1b | |||
4f627baf71 | |||
3914d51a7e | |||
bd6c12d686 | |||
9135c134ec | |||
59d71740b1 | |||
078d27c0f1 | |||
180f2d1fde | |||
391b155a98 | |||
47982ea21c | |||
d0e31f9457 | |||
7803ae6acb | |||
97de82bbcc | |||
bd1d49fabd | |||
928bbeaf0f | |||
343a26434d | |||
107da007b1 | |||
fb980e4542 | |||
f12ad6a56f | |||
5691086ba2 | |||
831a54e9b7 | |||
fd64f4d2a0 | |||
3cd89bed45 | |||
5b2568adf1 | |||
48a85ce8f8 | |||
936927a54f | |||
8418daa544 | |||
5c22133492 | |||
e69b9f6dcb | |||
b03093be73 | |||
bc44d5deb3 | |||
8ab86ac49d | |||
850b7466cd | |||
652cbedee5 | |||
bf96b92def | |||
ab21f4d169 | |||
64a39fdb86 | |||
2192bcccbd | |||
7237a925eb | |||
34ed6e1a08 | |||
8cbdf73eba | |||
624a964cda | |||
a14dfe74e1 | |||
f2e822810a | |||
a192111e6a | |||
4271dd6645 | |||
457ed11b49 | |||
9f8da6c225 | |||
ed9a521d6d | |||
68fafd030d | |||
f49926413a | |||
e8aec5f4f0 | |||
c51ed4bbb7 | |||
ba4ad51c26 | |||
785b84fd43 | |||
15ce66b2f5 | |||
9949c2b34e | |||
7e6d7caf4b | |||
48c64a1f72 | |||
6297e5ea93 | |||
0c315e0ff4 | |||
b7fcabea7b | |||
999141f0fd | |||
f5f6e44369 | |||
0c2183c10a | |||
cd38ecc378 | |||
1771f18437 | |||
72807965a8 | |||
611c7744a1 | |||
9baf9e569b | |||
ede3aad2ab | |||
143a75ccde | |||
62218c1497 | |||
8a238cda3d | |||
706d8c7968 | |||
cb3cc6f523 | |||
87fd8415da | |||
edcd5bf67f | |||
9528caa1d7 | |||
3f32e5973f | |||
a17e466a29 | |||
ff03c82151 | |||
152c409022 | |||
a46d4efba6 | |||
fca384e24c | |||
ec64eda2bc | |||
20adb604cc | |||
57a1ce28c4 | |||
39caf94790 | |||
ba4c89a12e | |||
b013b125bc | |||
e786010584 | |||
01397678df | |||
fae77970ac | |||
e737ed8105 | |||
b2dd01a0b0 | |||
323ff78206 | |||
8659693c76 | |||
c3a8f379e8 | |||
ad18f229c5 | |||
2feac2956a | |||
60d6195a9e | |||
c0cf506fb4 | |||
a649aa8b7e | |||
7fef64dacd | |||
91fca69aa0 | |||
3fef552978 | |||
50364ab571 | |||
a4e32c748a | |||
c48bc34a34 | |||
451ee18c4a | |||
4ee3699933 | |||
caa2555b1d | |||
09851621de | |||
05c8a29688 | |||
793d665db4 | |||
50da691d45 | |||
6f1fe0cda2 | |||
ab007e4ab8 | |||
03dd43e97d | |||
4f92417a5d | |||
3016ab79cb | |||
b2d6626363 | |||
98e2d6957a | |||
779299de15 | |||
bf5582b01f | |||
7e94d31c8b | |||
896f59267a | |||
21b0a3649d | |||
3bb6066558 | |||
64be24dd20 | |||
f8ffe53709 | |||
4d3f6c6533 | |||
6163fe166e | |||
6eff3f0fce | |||
6358cf788f | |||
6915278f65 | |||
b33713da4a | |||
83c1bd516d | |||
5d24cabc83 | |||
7127e6de54 | |||
cea8f1d381 | |||
bedcca922c | |||
faf50ea698 | |||
a323335d36 | |||
f15dda0248 | |||
8d71d56809 | |||
cf472a6b4c | |||
fd6ac61afc | |||
16a1926f94 | |||
839974bad0 | |||
4566d60e6f | |||
49a7278563 | |||
8676f8761f | |||
b9781fa7c2 | |||
08052f60da | |||
44230a4e86 | |||
90ffb8489a | |||
238f6e8a0b | |||
ef7cf3bf11 | |||
e7d5b7af67 | |||
359e55f6e4 | |||
dd29c8064f | |||
c7bd2a2a1e | |||
87fa167efa | |||
baaa6efc2b | |||
cece179bd4 | |||
56b92812fa | |||
2cbbcee351 | |||
f5508b1794 | |||
8f7d552401 | |||
bcd6ecb7fb | |||
65666fc28a | |||
b4734c280a | |||
dd61f685b8 | |||
641ce3358a | |||
4984b57aa2 | |||
87d8d87c6e | |||
283c4169ac | |||
d5f11b2442 | |||
5edc81c627 | |||
391413f7e7 | |||
c05c60a5d2 | |||
87b42e34e0 | |||
be0bec9eab | |||
cb59559835 | |||
078b67c50f | |||
e95c4739f5 | |||
32877bdc7b | |||
5e3af86c26 | |||
ec1073def8 | |||
28e530e005 | |||
9e9aba4e3a | |||
de038530ef | |||
337977e868 | |||
1c2bdbacb1 | |||
9715962356 | |||
5afbe181ce | |||
a5094f2a6a | |||
9156d1ecfd | |||
fe5ec398bf | |||
babf42f03a | |||
859f6322a0 | |||
815c5fa43c | |||
10b2466d82 | |||
f68d8f3757 | |||
9b083b62cf | |||
59614fc60d | |||
b54af6b42f | |||
7cab7e5fef | |||
4c5735cef8 | |||
58e1db6aae | |||
63ae6ba5b5 | |||
f58b4d3dd6 | |||
d3a8584212 | |||
51f1ae1e9e | |||
4271126bae | |||
049f5015c1 | |||
6ab671c88b | |||
d73ac90acf | |||
adf6e2f7b1 | |||
fb0803cf4c | |||
806834a6e9 | |||
8415634016 | |||
319f687ced | |||
8127e8f8e8 | |||
dd46cc64a4 | |||
2d5862a94d | |||
3d45a81006 | |||
51a0996087 | |||
80ac2ec6fc | |||
5d61b5e813 | |||
b769636435 |
5
.gitignore
vendored
Normal file
5
.gitignore
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
.vagrant
|
||||
*.retry
|
||||
inventory/vagrant_ansible_inventory
|
||||
temp
|
||||
.idea
|
49
.gitmodules
vendored
49
.gitmodules
vendored
@ -1,49 +0,0 @@
|
||||
[submodule "roles/apps/k8s-kube-ui"]
|
||||
path = roles/apps/k8s-kube-ui
|
||||
url = https://github.com/ansibl8s/k8s-kube-ui.git
|
||||
branch = v1.0
|
||||
[submodule "roles/apps/k8s-kubedns"]
|
||||
path = roles/apps/k8s-kubedns
|
||||
url = https://github.com/ansibl8s/k8s-kubedns.git
|
||||
branch = v1.0
|
||||
[submodule "roles/apps/k8s-common"]
|
||||
path = roles/apps/k8s-common
|
||||
url = https://github.com/ansibl8s/k8s-common.git
|
||||
branch = v1.0
|
||||
[submodule "roles/apps/k8s-redis"]
|
||||
path = roles/apps/k8s-redis
|
||||
url = https://github.com/ansibl8s/k8s-redis.git
|
||||
branch = v1.0
|
||||
[submodule "roles/apps/k8s-elasticsearch"]
|
||||
path = roles/apps/k8s-elasticsearch
|
||||
url = https://github.com/ansibl8s/k8s-elasticsearch.git
|
||||
[submodule "roles/apps/k8s-fabric8"]
|
||||
path = roles/apps/k8s-fabric8
|
||||
url = https://github.com/ansibl8s/k8s-fabric8.git
|
||||
branch = v1.0
|
||||
[submodule "roles/apps/k8s-memcached"]
|
||||
path = roles/apps/k8s-memcached
|
||||
url = https://github.com/ansibl8s/k8s-memcached.git
|
||||
branch = v1.0
|
||||
[submodule "roles/apps/k8s-postgres"]
|
||||
path = roles/apps/k8s-postgres
|
||||
url = https://github.com/ansibl8s/k8s-postgres.git
|
||||
branch = v1.0
|
||||
[submodule "roles/apps/k8s-kubedash"]
|
||||
path = roles/apps/k8s-kubedash
|
||||
url = https://github.com/ansibl8s/k8s-kubedash.git
|
||||
[submodule "roles/apps/k8s-heapster"]
|
||||
path = roles/apps/k8s-heapster
|
||||
url = https://github.com/ansibl8s/k8s-heapster.git
|
||||
[submodule "roles/apps/k8s-influxdb"]
|
||||
path = roles/apps/k8s-influxdb
|
||||
url = https://github.com/ansibl8s/k8s-influxdb.git
|
||||
[submodule "roles/apps/k8s-kube-logstash"]
|
||||
path = roles/apps/k8s-kube-logstash
|
||||
url = https://github.com/ansibl8s/k8s-kube-logstash.git
|
||||
[submodule "roles/apps/k8s-etcd"]
|
||||
path = roles/apps/k8s-etcd
|
||||
url = https://github.com/ansibl8s/k8s-etcd.git
|
||||
[submodule "roles/apps/k8s-rabbitmq"]
|
||||
path = roles/apps/k8s-rabbitmq
|
||||
url = https://github.com/ansibl8s/k8s-rabbitmq.git
|
||||
|
162
.travis.yml
162
.travis.yml
@ -1,41 +1,149 @@
|
||||
sudo: required
|
||||
dist: trusty
|
||||
language: python
|
||||
python: "2.7"
|
||||
sudo: false
|
||||
|
||||
addons:
|
||||
hosts:
|
||||
- node1
|
||||
git:
|
||||
depth: 5
|
||||
|
||||
env:
|
||||
- SITE=cluster.yml
|
||||
global:
|
||||
GCE_USER=travis
|
||||
SSH_USER=$GCE_USER
|
||||
TEST_ID=$TRAVIS_JOB_NUMBER
|
||||
CONTAINER_ENGINE=docker
|
||||
PRIVATE_KEY=$GCE_PRIVATE_KEY
|
||||
ANSIBLE_KEEP_REMOTE_FILES=1
|
||||
matrix:
|
||||
# Debian Jessie
|
||||
- >-
|
||||
KUBE_NETWORK_PLUGIN=flannel
|
||||
CLOUD_IMAGE=debian-8-kubespray
|
||||
CLOUD_REGION=europe-west1-b
|
||||
- >-
|
||||
KUBE_NETWORK_PLUGIN=calico
|
||||
CLOUD_IMAGE=debian-8-kubespray
|
||||
CLOUD_REGION=us-central1-c
|
||||
- >-
|
||||
KUBE_NETWORK_PLUGIN=weave
|
||||
CLOUD_IMAGE=debian-8-kubespray
|
||||
CLOUD_REGION=us-east1-d
|
||||
|
||||
# Centos 7
|
||||
- >-
|
||||
KUBE_NETWORK_PLUGIN=flannel
|
||||
CLOUD_IMAGE=centos-7-sudo
|
||||
CLOUD_REGION=asia-east1-c
|
||||
|
||||
- >-
|
||||
KUBE_NETWORK_PLUGIN=calico
|
||||
CLOUD_IMAGE=centos-7-sudo
|
||||
CLOUD_REGION=europe-west1-b
|
||||
|
||||
- >-
|
||||
KUBE_NETWORK_PLUGIN=weave
|
||||
CLOUD_IMAGE=centos-7-sudo
|
||||
CLOUD_REGION=us-central1-c
|
||||
|
||||
# Redhat 7
|
||||
- >-
|
||||
KUBE_NETWORK_PLUGIN=flannel
|
||||
CLOUD_IMAGE=rhel-7-sudo
|
||||
CLOUD_REGION=us-east1-d
|
||||
|
||||
- >-
|
||||
KUBE_NETWORK_PLUGIN=calico
|
||||
CLOUD_IMAGE=rhel-7-sudo
|
||||
CLOUD_REGION=asia-east1-c
|
||||
|
||||
- >-
|
||||
KUBE_NETWORK_PLUGIN=weave
|
||||
CLOUD_IMAGE=rhel-7-sudo
|
||||
CLOUD_REGION=europe-west1-b
|
||||
|
||||
# Ubuntu 16.04
|
||||
- >-
|
||||
KUBE_NETWORK_PLUGIN=flannel
|
||||
CLOUD_IMAGE=ubuntu-1604-xenial
|
||||
CLOUD_REGION=us-central1-c
|
||||
- >-
|
||||
KUBE_NETWORK_PLUGIN=calico
|
||||
CLOUD_IMAGE=ubuntu-1604-xenial
|
||||
CLOUD_REGION=us-east1-d
|
||||
- >-
|
||||
KUBE_NETWORK_PLUGIN=weave
|
||||
CLOUD_IMAGE=ubuntu-1604-xenial
|
||||
CLOUD_REGION=asia-east1-c
|
||||
|
||||
# Ubuntu 15.10
|
||||
- >-
|
||||
KUBE_NETWORK_PLUGIN=flannel
|
||||
CLOUD_IMAGE=ubuntu-1510-wily
|
||||
CLOUD_REGION=europe-west1-b
|
||||
- >-
|
||||
KUBE_NETWORK_PLUGIN=calico
|
||||
CLOUD_IMAGE=ubuntu-1510-wily
|
||||
CLOUD_REGION=us-central1-a
|
||||
- >-
|
||||
KUBE_NETWORK_PLUGIN=weave
|
||||
CLOUD_IMAGE=ubuntu-1510-wily
|
||||
CLOUD_REGION=us-east1-d
|
||||
|
||||
|
||||
before_install:
|
||||
- sudo apt-get update -qq
|
||||
|
||||
install:
|
||||
# Install Ansible.
|
||||
- sudo -H pip install ansible
|
||||
- sudo -H pip install netaddr
|
||||
- pip install --user boto -U
|
||||
- pip install --user ansible
|
||||
- pip install --user netaddr
|
||||
- pip install --user apache-libcloud
|
||||
|
||||
cache:
|
||||
directories:
|
||||
- $HOME/releases
|
||||
- directories:
|
||||
- $HOME/.cache/pip
|
||||
- $HOME/.local
|
||||
|
||||
before_script:
|
||||
- export PATH=$PATH:/usr/local/bin
|
||||
- echo "RUN $TRAVIS_JOB_NUMBER $KUBE_NETWORK_PLUGIN $CONTAINER_ENGINE "
|
||||
- mkdir -p $HOME/.ssh
|
||||
- echo $PRIVATE_KEY | base64 -d > $HOME/.ssh/id_rsa
|
||||
- echo $GCE_PEM_FILE | base64 -d > $HOME/.ssh/gce
|
||||
- chmod 400 $HOME/.ssh/id_rsa
|
||||
- chmod 755 $HOME/.local/bin/ansible-playbook
|
||||
- $HOME/.local/bin/ansible-playbook --version
|
||||
- cp tests/ansible.cfg .
|
||||
# - "echo $HOME/.local/bin/ansible-playbook -i inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root -e '{\"cloud_provider\": true}' $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} setup-kubernetes/cluster.yml"
|
||||
## Configure ansible deployment logs to be collected as an artifact. Enable when GCS configured, see https://docs.travis-ci.com/user/deployment/gcs
|
||||
# - $HOME/.local/bin/ansible-playbook -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root scritps/configure-logs.yaml
|
||||
|
||||
script:
|
||||
# Check the role/playbook's syntax.
|
||||
- "sudo -H ansible-playbook -i inventory/local-tests.cfg $SITE --syntax-check"
|
||||
|
||||
# Run the role/playbook with ansible-playbook.
|
||||
- "sudo -H ansible-playbook -i inventory/local-tests.cfg $SITE --connection=local"
|
||||
|
||||
# Run the role/playbook again, checking to make sure it's idempotent.
|
||||
- >
|
||||
sudo -H ansible-playbook -i inventory/local-tests.cfg $SITE --connection=local
|
||||
| tee /dev/stderr | grep -q 'changed=0.*failed=0'
|
||||
&& (echo 'Idempotence test: pass' && exit 0)
|
||||
|| (echo 'Idempotence test: fail' && exit 1)
|
||||
$HOME/.local/bin/ansible-playbook tests/cloud_playbooks/create-gce.yml -i tests/local_inventory/hosts -c local $LOG_LEVEL
|
||||
-e test_id=${TEST_ID}
|
||||
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||
-e gce_project_id=${GCE_PROJECT_ID}
|
||||
-e gce_service_account_email=${GCE_ACCOUNT}
|
||||
-e gce_pem_file=${HOME}/.ssh/gce
|
||||
-e cloud_image=${CLOUD_IMAGE}
|
||||
-e inventory_path=${PWD}/inventory/inventory.ini
|
||||
-e cloud_region=${CLOUD_REGION}
|
||||
|
||||
# Create cluster
|
||||
- "$HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} cluster.yml"
|
||||
# Tests Cases
|
||||
## Test Master API
|
||||
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini tests/testcases/010_check-apiserver.yml $LOG_LEVEL
|
||||
## Create a POD
|
||||
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/020_check-create-pod.yml $LOG_LEVEL
|
||||
## Ping the between 2 pod
|
||||
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/030_check-network.yml $LOG_LEVEL
|
||||
## Collect env info, enable it once GCS configured, see https://docs.travis-ci.com/user/deployment/gcs
|
||||
# - $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root scritps/collect-info.yaml
|
||||
|
||||
after_script:
|
||||
- >
|
||||
$HOME/.local/bin/ansible-playbook -i inventory/inventory.ini tests/cloud_playbooks/delete-gce.yml -c local $LOG_LEVEL
|
||||
-e test_id=${TEST_ID}
|
||||
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||
-e gce_project_id=${GCE_PROJECT_ID}
|
||||
-e gce_service_account_email=${GCE_ACCOUNT}
|
||||
-e gce_pem_file=${HOME}/.ssh/gce
|
||||
-e cloud_image=${CLOUD_IMAGE}
|
||||
-e inventory_path=${PWD}/inventory/inventory.ini
|
||||
-e cloud_region=${CLOUD_REGION}
|
||||
|
10
CONTRIBUTING.md
Normal file
10
CONTRIBUTING.md
Normal file
@ -0,0 +1,10 @@
|
||||
# Contributing guidelines
|
||||
|
||||
## How to become a contributor and submit your own code
|
||||
|
||||
### Contributing A Patch
|
||||
|
||||
1. Submit an issue describing your proposed change to the repo in question.
|
||||
2. The [repo owners](OWNERS) will respond to your issue promptly.
|
||||
3. Fork the desired repo, develop and test your code changes.
|
||||
4. Submit a pull request.
|
201
LICENSE
Normal file
201
LICENSE
Normal file
@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2016 Kubespray
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
6
OWNERS
Normal file
6
OWNERS
Normal file
@ -0,0 +1,6 @@
|
||||
# See the OWNERS file documentation:
|
||||
# https://github.com/kubernetes/kubernetes/blob/master/docs/devel/owners.md
|
||||
|
||||
owners:
|
||||
- Smana
|
||||
- ant31
|
312
README.md
312
README.md
@ -1,279 +1,87 @@
|
||||
[](https://travis-ci.org/ansibl8s/setup-kubernetes)
|
||||
kubernetes-ansible
|
||||
========
|
||||

|
||||
|
||||
Install and configure a Multi-Master/HA kubernetes cluster including network plugin.
|
||||
##Deploy a production ready kubernetes cluster
|
||||
|
||||
### Requirements
|
||||
Tested on **Debian Wheezy/Jessie** and **Ubuntu** (14.10, 15.04, 15.10).
|
||||
Should work on **RedHat/Fedora/Centos** platforms (to be tested)
|
||||
* The target servers must have access to the Internet in order to pull docker imaqes.
|
||||
* The firewalls are not managed, you'll need to implement your own rules the way you used to.
|
||||
* Ansible v1.9.x and python-netaddr
|
||||
If you have questions, you can [invite yourself](https://slack.kubespray.io/) to **chat** with us on Slack! [](https://kubespray.slack.com)
|
||||
|
||||
### Components
|
||||
* [kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.1.3
|
||||
* [etcd](https://github.com/coreos/etcd/releases) v2.2.2
|
||||
* [calicoctl](https://github.com/projectcalico/calico-docker/releases) v0.13.0
|
||||
* [flanneld](https://github.com/coreos/flannel/releases) v0.5.5
|
||||
* [docker](https://www.docker.com/) v1.9.1
|
||||
- Can be deployed on **AWS, GCE, OpenStack or Baremetal**
|
||||
- **High available** cluster
|
||||
- **Composable** (Choice of the network plugin for instance)
|
||||
- Support most popular **Linux distributions**
|
||||
- **Continuous integration tests**
|
||||
|
||||
Quickstart
|
||||
-------------------------
|
||||
The following steps will quickly setup a kubernetes cluster with default configuration.
|
||||
These defaults are good for tests purposes.
|
||||
|
||||
Edit the inventory according to the number of servers
|
||||
```
|
||||
[downloader]
|
||||
localhost ansible_connection=local ansible_python_interpreter=python2
|
||||
To deploy the cluster you can use :
|
||||
|
||||
[kube-master]
|
||||
10.115.99.31
|
||||
[**kargo-cli**](https://github.com/kubespray/kargo-cli) (deprecated, a newer [go](https://github.com/Smana/kargo-cli/tree/kargogo) version soon)<br>
|
||||
**Ansible** usual commands <br>
|
||||
**vagrant** by simply running `vagrant up` (for tests purposes) <br>
|
||||
|
||||
[etcd]
|
||||
10.115.99.31
|
||||
10.115.99.32
|
||||
10.115.99.33
|
||||
|
||||
[kube-node]
|
||||
10.115.99.32
|
||||
10.115.99.33
|
||||
* [Requirements](#requirements)
|
||||
* [Getting started](docs/getting-started.md)
|
||||
* [Vagrant install](docs/vagrant.md)
|
||||
* [CoreOS bootstrap](docs/coreos.md)
|
||||
* [Ansible variables](docs/ansible.md)
|
||||
* [Cloud providers](docs/cloud.md)
|
||||
* [OpenStack](docs/openstack.md)
|
||||
* [AWS](docs/aws.md)
|
||||
* [Network plugins](#network-plugins)
|
||||
* [Roadmap](docs/roadmap.md)
|
||||
|
||||
[k8s-cluster:children]
|
||||
kube-node
|
||||
kube-master
|
||||
```
|
||||
Supported Linux distributions
|
||||
===============
|
||||
|
||||
Run the playbook
|
||||
```
|
||||
ansible-playbook -i inventory/inventory.cfg cluster.yml -u root
|
||||
```
|
||||
* **CoreOS**
|
||||
* **Debian** Wheezy, Jessie
|
||||
* **Ubuntu** 14.10, 15.04, 15.10, 16.04
|
||||
* **Fedora** 23
|
||||
* **CentOS/RHEL** 7
|
||||
|
||||
You can jump directly to "*Available apps, installation procedure*"
|
||||
Versions
|
||||
--------------
|
||||
|
||||
[kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.4.0 <br>
|
||||
[etcd](https://github.com/coreos/etcd/releases) v3.0.1 <br>
|
||||
[calicoctl](https://github.com/projectcalico/calico-docker/releases) v0.20.0 <br>
|
||||
[flanneld](https://github.com/coreos/flannel/releases) v0.5.5 <br>
|
||||
[weave](http://weave.works/) v1.6.1 <br>
|
||||
[docker](https://www.docker.com/) v1.10.3 <br>
|
||||
|
||||
Ansible
|
||||
-------------------------
|
||||
### Variables
|
||||
The main variables to change are located in the directory ```inventory/group_vars/all.yml```.
|
||||
|
||||
### Inventory
|
||||
Below is an example of an inventory.
|
||||
Note : The bgp vars local_as and peers are not mandatory if the var **'peer_with_router'** is set to false
|
||||
By default this variable is set to false and therefore all the nodes are configure in **'node-mesh'** mode.
|
||||
In node-mesh mode the nodes peers with all the nodes in order to exchange routes.
|
||||
Requirements
|
||||
--------------
|
||||
|
||||
```
|
||||
* The target servers must have **access to the Internet** in order to pull docker images.
|
||||
* The **firewalls are not managed**, you'll need to implement your own rules the way you used to.
|
||||
in order to avoid any issue during deployment you should disable your firewall
|
||||
* **Copy your ssh keys** to all the servers part of your inventory.
|
||||
* **Ansible v2.x and python-netaddr**
|
||||
|
||||
[downloader]
|
||||
localhost ansible_connection=local ansible_python_interpreter=python2
|
||||
|
||||
[kube-master]
|
||||
node1 ansible_ssh_host=10.99.0.26
|
||||
node2 ansible_ssh_host=10.99.0.27
|
||||
## Network plugins
|
||||
You can choose between 3 network plugins. (default: `flannel` with vxlan backend)
|
||||
|
||||
[etcd]
|
||||
node1 ansible_ssh_host=10.99.0.26
|
||||
node2 ansible_ssh_host=10.99.0.27
|
||||
node3 ansible_ssh_host=10.99.0.4
|
||||
* [**flannel**](docs/flannel.md): gre/vxlan (layer 2) networking.
|
||||
|
||||
[kube-node]
|
||||
node2 ansible_ssh_host=10.99.0.27
|
||||
node3 ansible_ssh_host=10.99.0.4
|
||||
node4 ansible_ssh_host=10.99.0.5
|
||||
node5 ansible_ssh_host=10.99.0.36
|
||||
node6 ansible_ssh_host=10.99.0.37
|
||||
* [**calico**](docs/calico.md): bgp (layer 3) networking.
|
||||
|
||||
[paris]
|
||||
node1 ansible_ssh_host=10.99.0.26
|
||||
node3 ansible_ssh_host=10.99.0.4 local_as=xxxxxxxx
|
||||
node4 ansible_ssh_host=10.99.0.5 local_as=xxxxxxxx
|
||||
* **weave**: Weave is a lightweight container overlay network that doesn't require an external K/V database cluster. <br>
|
||||
(Please refer to `weave` [troubleshooting documentation](http://docs.weave.works/weave/latest_release/troubleshooting.html))
|
||||
|
||||
[new-york]
|
||||
node2 ansible_ssh_host=10.99.0.27
|
||||
node5 ansible_ssh_host=10.99.0.36 local_as=xxxxxxxx
|
||||
node6 ansible_ssh_host=10.99.0.37 local_as=xxxxxxxx
|
||||
The choice is defined with the variable `kube_network_plugin`
|
||||
|
||||
[k8s-cluster:children]
|
||||
kube-node
|
||||
kube-master
|
||||
```
|
||||
|
||||
### Playbook
|
||||
```
|
||||
---
|
||||
- hosts: downloader
|
||||
sudo: no
|
||||
roles:
|
||||
- { role: download, tags: download }
|
||||
## CI Tests
|
||||
|
||||
- hosts: k8s-cluster
|
||||
roles:
|
||||
- { role: kubernetes/preinstall, tags: preinstall }
|
||||
- { role: docker, tags: docker }
|
||||
- { role: kubernetes/node, tags: node }
|
||||
- { role: etcd, tags: etcd }
|
||||
- { role: dnsmasq, tags: dnsmasq }
|
||||
- { role: network_plugin, tags: ['calico', 'flannel', 'network'] }
|
||||
[](https://travis-ci.org/kubespray/kargo) </br>
|
||||
|
||||
- hosts: kube-master
|
||||
roles:
|
||||
- { role: kubernetes/master, tags: master }
|
||||
### Google Compute Engine
|
||||
|
||||
```
|
||||
| Calico | Flannel | Weave |
|
||||
------------- | ------------- | ------------- | ------------- |
|
||||
Ubuntu Xenial |[](https://ci.kubespray.io/job/kargo-gce-xenial-calico/)|[](https://ci.kubespray.io/job/kargo-gce-xenial-flannel/)|[](https://ci.kubespray.io/job/kargo-gce-xenial-weave)|
|
||||
CentOS 7 |[](https://ci.kubespray.io/job/kargo-gce-centos7-calico/)|[](https://ci.kubespray.io/job/kargo-gce-centos7-flannel/)|[](https://ci.kubespray.io/job/kargo-gce-centos7-weave/)|
|
||||
CoreOS (stable) |[](https://ci.kubespray.io/job/kargo-gce-coreos-calico/)|[](https://ci.kubespray.io/job/kargo-gce-coreos-flannel/)|[](https://ci.kubespray.io/job/kargo-gce-coreos-weave/)|
|
||||
|
||||
### Run
|
||||
It is possible to define variables for different environments.
|
||||
For instance, in order to deploy the cluster on 'dev' environment run the following command.
|
||||
```
|
||||
ansible-playbook -i inventory/dev/inventory.cfg cluster.yml -u root
|
||||
```
|
||||
|
||||
Kubernetes
|
||||
-------------------------
|
||||
### Multi master notes
|
||||
* You can choose where to install the master components. If you want your master node to act both as master (api,scheduler,controller) and node (e.g. accept workloads, create pods ...),
|
||||
the server address has to be present on both groups 'kube-master' and 'kube-node'.
|
||||
|
||||
* Almost all kubernetes components are running into pods except *kubelet*. These pods are managed by kubelet which ensure they're always running
|
||||
|
||||
* For safety reasons, you should have at least two master nodes and 3 etcd servers
|
||||
|
||||
* Kube-proxy doesn't support multiple apiservers on startup ([Issue 18174](https://github.com/kubernetes/kubernetes/issues/18174)). An external loadbalancer needs to be configured.
|
||||
In order to do so, some variables have to be used '**loadbalancer_apiserver**' and '**apiserver_loadbalancer_domain_name**'
|
||||
|
||||
|
||||
### Network Overlay
|
||||
You can choose between 2 network plugins. Only one must be chosen.
|
||||
|
||||
* **flannel**: gre/vxlan (layer 2) networking. ([official docs](https://github.com/coreos/flannel))
|
||||
|
||||
* **calico**: bgp (layer 3) networking. ([official docs](http://docs.projectcalico.org/en/0.13/))
|
||||
|
||||
The choice is defined with the variable '**kube_network_plugin**'
|
||||
|
||||
### Expose a service
|
||||
There are several loadbalancing solutions.
|
||||
The one i found suitable for kubernetes are [Vulcand](http://vulcand.io/) and [Haproxy](http://www.haproxy.org/)
|
||||
|
||||
My cluster is working with haproxy and kubernetes services are configured with the loadbalancing type '**nodePort**'.
|
||||
eg: each node opens the same tcp port and forwards the traffic to the target pod wherever it is located.
|
||||
|
||||
Then Haproxy can be configured to request kubernetes's api in order to loadbalance on the proper tcp port on the nodes.
|
||||
|
||||
Please refer to the proper kubernetes documentation on [Services](https://github.com/kubernetes/kubernetes/blob/release-1.0/docs/user-guide/services.md)
|
||||
|
||||
### Check cluster status
|
||||
|
||||
#### Kubernetes components
|
||||
|
||||
* Check the status of the processes
|
||||
```
|
||||
systemctl status kubelet
|
||||
```
|
||||
|
||||
* Check the logs
|
||||
```
|
||||
journalctl -ae -u kubelet
|
||||
```
|
||||
|
||||
* Check the NAT rules
|
||||
```
|
||||
iptables -nLv -t nat
|
||||
```
|
||||
|
||||
For the master nodes you'll have to see the docker logs for the apiserver
|
||||
```
|
||||
docker logs [apiserver docker id]
|
||||
```
|
||||
|
||||
|
||||
### Available apps, installation procedure
|
||||
|
||||
There are two ways of installing new apps
|
||||
|
||||
#### Ansible galaxy
|
||||
|
||||
Additionnal apps can be installed with ```ansible-galaxy```.
|
||||
|
||||
ou'll need to edit the file '*requirements.yml*' in order to chose needed apps.
|
||||
The list of available apps are available [there](https://github.com/ansibl8s)
|
||||
|
||||
For instance it is **strongly recommanded** to install a dns server which resolves kubernetes service names.
|
||||
In order to use this role you'll need the following entries in the file '*requirements.yml*'
|
||||
Please refer to the [k8s-kubedns readme](https://github.com/ansibl8s/k8s-kubedns) for additionnal info.
|
||||
```
|
||||
- src: https://github.com/ansibl8s/k8s-common.git
|
||||
path: roles/apps
|
||||
# version: v1.0
|
||||
|
||||
- src: https://github.com/ansibl8s/k8s-kubedns.git
|
||||
path: roles/apps
|
||||
# version: v1.0
|
||||
```
|
||||
**Note**: the role common is required by all the apps and provides the tasks and libraries needed.
|
||||
|
||||
And empty the apps directory
|
||||
```
|
||||
rm -rf roles/apps/*
|
||||
```
|
||||
|
||||
Then download the roles with ansible-galaxy
|
||||
```
|
||||
ansible-galaxy install -r requirements.yml
|
||||
```
|
||||
|
||||
#### Git submodules
|
||||
Alternatively the roles can be installed as git submodules.
|
||||
That way is easier if you want to do some changes and commit them.
|
||||
|
||||
You can list available submodules with the following command:
|
||||
```
|
||||
grep path .gitmodules | sed 's/.*= //'
|
||||
```
|
||||
|
||||
In order to install the dns addon you'll need to follow these steps
|
||||
```
|
||||
git submodule init roles/apps/k8s-common roles/apps/k8s-kubedns
|
||||
git submodule update
|
||||
```
|
||||
|
||||
Finally update the playbook ```apps.yml``` with the chosen roles, and run it
|
||||
```
|
||||
...
|
||||
- hosts: kube-master
|
||||
roles:
|
||||
- { role: apps/k8s-kubedns, tags: ['kubedns', 'apps'] }
|
||||
...
|
||||
```
|
||||
|
||||
```
|
||||
ansible-playbook -i inventory/inventory.cfg apps.yml -u root
|
||||
```
|
||||
|
||||
|
||||
#### Calico networking
|
||||
Check if the calico-node container is running
|
||||
```
|
||||
docker ps | grep calico
|
||||
```
|
||||
|
||||
The **calicoctl** command allows to check the status of the network workloads.
|
||||
* Check the status of Calico nodes
|
||||
```
|
||||
calicoctl status
|
||||
```
|
||||
|
||||
* Show the configured network subnet for containers
|
||||
```
|
||||
calicoctl pool show
|
||||
```
|
||||
|
||||
* Show the workloads (ip addresses of containers and their located)
|
||||
```
|
||||
calicoctl endpoint show --detail
|
||||
```
|
||||
#### Flannel networking
|
||||
|
||||
Congrats ! now you can walk through [kubernetes basics](http://kubernetes.io/v1.1/basicstutorials.html)
|
||||
CI tests sponsored by Google (GCE), and [teuto.net](https://teuto.net/) for OpenStack.
|
||||
|
9
RELEASE.md
Normal file
9
RELEASE.md
Normal file
@ -0,0 +1,9 @@
|
||||
# Release Process
|
||||
|
||||
The Kargo Project is released on an as-needed basis. The process is as follows:
|
||||
|
||||
1. An issue is proposing a new release with a changelog since the last release
|
||||
2. At least on of the [OWNERS](OWNERS) must LGTM this release
|
||||
3. An OWNER runs `git tag -s $VERSION` and inserts the changelog and pushes the tag with `git push $VERSION`
|
||||
4. The release issue is closed
|
||||
5. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] kargo $VERSION is released`
|
115
Vagrantfile
vendored
Normal file
115
Vagrantfile
vendored
Normal file
@ -0,0 +1,115 @@
|
||||
# -*- mode: ruby -*-
|
||||
# # vi: set ft=ruby :
|
||||
|
||||
require 'fileutils'
|
||||
|
||||
Vagrant.require_version ">= 1.8.0"
|
||||
|
||||
CONFIG = File.join(File.dirname(__FILE__), "vagrant/config.rb")
|
||||
|
||||
# Defaults for config options defined in CONFIG
|
||||
$num_instances = 3
|
||||
$instance_name_prefix = "k8s"
|
||||
$vm_gui = false
|
||||
$vm_memory = 1536
|
||||
$vm_cpus = 1
|
||||
$shared_folders = {}
|
||||
$forwarded_ports = {}
|
||||
$subnet = "172.17.8"
|
||||
$box = "bento/ubuntu-14.04"
|
||||
|
||||
host_vars = {}
|
||||
|
||||
if File.exist?(CONFIG)
|
||||
require CONFIG
|
||||
end
|
||||
|
||||
# if $inventory is not set, try to use example
|
||||
$inventory = File.join(File.dirname(__FILE__), "inventory") if ! $inventory
|
||||
|
||||
# if $inventory has a hosts file use it, otherwise copy over vars etc
|
||||
# to where vagrant expects dynamic inventory to be.
|
||||
if ! File.exist?(File.join(File.dirname($inventory), "hosts"))
|
||||
$vagrant_ansible = File.join(File.dirname(__FILE__), ".vagrant",
|
||||
"provisioners", "ansible")
|
||||
FileUtils.mkdir_p($vagrant_ansible) if ! File.exist?($vagrant_ansible)
|
||||
if ! File.exist?(File.join($vagrant_ansible,"inventory"))
|
||||
FileUtils.ln_s($inventory, $vagrant_ansible)
|
||||
end
|
||||
end
|
||||
|
||||
Vagrant.configure("2") do |config|
|
||||
# always use Vagrants insecure key
|
||||
config.ssh.insert_key = false
|
||||
config.vm.box = $box
|
||||
|
||||
# plugin conflict
|
||||
if Vagrant.has_plugin?("vagrant-vbguest") then
|
||||
config.vbguest.auto_update = false
|
||||
end
|
||||
|
||||
(1..$num_instances).each do |i|
|
||||
config.vm.define vm_name = "%s-%02d" % [$instance_name_prefix, i] do |config|
|
||||
config.vm.hostname = vm_name
|
||||
|
||||
if $expose_docker_tcp
|
||||
config.vm.network "forwarded_port", guest: 2375, host: ($expose_docker_tcp + i - 1), auto_correct: true
|
||||
end
|
||||
|
||||
$forwarded_ports.each do |guest, host|
|
||||
config.vm.network "forwarded_port", guest: guest, host: host, auto_correct: true
|
||||
end
|
||||
|
||||
["vmware_fusion", "vmware_workstation"].each do |vmware|
|
||||
config.vm.provider vmware do |v|
|
||||
v.vmx['memsize'] = $vm_memory
|
||||
v.vmx['numvcpus'] = $vm_cpus
|
||||
end
|
||||
end
|
||||
|
||||
config.vm.provider :virtualbox do |vb|
|
||||
vb.gui = $vm_gui
|
||||
vb.memory = $vm_memory
|
||||
vb.cpus = $vm_cpus
|
||||
end
|
||||
|
||||
ip = "#{$subnet}.#{i+100}"
|
||||
host_vars[vm_name] = {
|
||||
"ip" => ip,
|
||||
#"access_ip" => ip,
|
||||
"flannel_interface" => ip,
|
||||
"flannel_backend_type" => "host-gw",
|
||||
"local_release_dir" => "/vagrant/temp",
|
||||
"download_run_once" => "True"
|
||||
}
|
||||
config.vm.network :private_network, ip: ip
|
||||
|
||||
# Only execute once the Ansible provisioner,
|
||||
# when all the machines are up and ready.
|
||||
if i == $num_instances
|
||||
config.vm.provision "ansible" do |ansible|
|
||||
ansible.playbook = "cluster.yml"
|
||||
if File.exist?(File.join(File.dirname($inventory), "hosts"))
|
||||
ansible.inventory_path = $inventory
|
||||
end
|
||||
ansible.sudo = true
|
||||
ansible.limit = "all"
|
||||
ansible.host_key_checking = false
|
||||
ansible.raw_arguments = ["--forks=#{$num_instances}"]
|
||||
ansible.host_vars = host_vars
|
||||
#ansible.tags = ['download']
|
||||
ansible.groups = {
|
||||
# The first three nodes should be etcd servers
|
||||
"etcd" => ["#{$instance_name_prefix}-0[1:3]"],
|
||||
# The first two nodes should be masters
|
||||
"kube-master" => ["#{$instance_name_prefix}-0[1:2]"],
|
||||
# all nodes should be kube nodes
|
||||
"kube-node" => ["#{$instance_name_prefix}-0[1:#{$num_instances}]"],
|
||||
"k8s-cluster:children" => ["kube-master", "kube-node"],
|
||||
}
|
||||
end
|
||||
end
|
||||
|
||||
end
|
||||
end
|
||||
end
|
4
ansible.cfg
Normal file
4
ansible.cfg
Normal file
@ -0,0 +1,4 @@
|
||||
[ssh_connection]
|
||||
pipelining=True
|
||||
[defaults]
|
||||
host_key_checking=False
|
29
apps.yml
29
apps.yml
@ -1,29 +0,0 @@
|
||||
---
|
||||
- hosts: kube-master
|
||||
roles:
|
||||
# System
|
||||
- { role: apps/k8s-kubedns, tags: ['kubedns', 'kube-system'] }
|
||||
|
||||
# Databases
|
||||
- { role: apps/k8s-postgres, tags: 'postgres' }
|
||||
- { role: apps/k8s-elasticsearch, tags: 'elasticsearch' }
|
||||
- { role: apps/k8s-memcached, tags: 'memcached' }
|
||||
- { role: apps/k8s-redis, tags: 'redis' }
|
||||
|
||||
# Msg Broker
|
||||
- { role: apps/k8s-rabbitmq, tags: 'rabbitmq' }
|
||||
|
||||
# Monitoring
|
||||
- { role: apps/k8s-influxdb, tags: ['influxdb', 'kube-system']}
|
||||
- { role: apps/k8s-heapster, tags: ['heapster', 'kube-system']}
|
||||
- { role: apps/k8s-kubedash, tags: ['kubedash', 'kube-system']}
|
||||
|
||||
# logging
|
||||
- { role: apps/k8s-kube-logstash, tags: 'kube-logstash'}
|
||||
|
||||
# Console
|
||||
- { role: apps/k8s-fabric8, tags: 'fabric8' }
|
||||
- { role: apps/k8s-kube-ui, tags: ['kube-ui', 'kube-system']}
|
||||
|
||||
# ETCD
|
||||
- { role: apps/k8s-etcd, tags: 'etcd'}
|
32
cluster.yml
32
cluster.yml
@ -1,18 +1,36 @@
|
||||
---
|
||||
- hosts: downloader
|
||||
sudo: no
|
||||
- hosts: all
|
||||
gather_facts: false
|
||||
roles:
|
||||
- { role: download, tags: download }
|
||||
- bootstrap-os
|
||||
tags:
|
||||
- bootstrap-os
|
||||
|
||||
|
||||
- hosts: all
|
||||
gather_facts: true
|
||||
|
||||
- hosts: etcd:!k8s-cluster
|
||||
roles:
|
||||
- { role: kubernetes/preinstall, tags: preinstall }
|
||||
- { role: etcd, tags: etcd }
|
||||
|
||||
- hosts: k8s-cluster
|
||||
roles:
|
||||
- { role: kubernetes/preinstall, tags: preinstall }
|
||||
- { role: docker, tags: docker }
|
||||
- { role: kubernetes/node, tags: node }
|
||||
- { role: etcd, tags: etcd }
|
||||
- { role: dnsmasq, tags: dnsmasq }
|
||||
- { role: network_plugin, tags: ['calico', 'flannel', 'network'] }
|
||||
- { role: kubernetes/node, tags: node }
|
||||
- { role: network_plugin, tags: network }
|
||||
|
||||
- hosts: kube-master
|
||||
roles:
|
||||
- { role: kubernetes/preinstall, tags: preinstall }
|
||||
- { role: kubernetes/master, tags: master }
|
||||
|
||||
- hosts: k8s-cluster
|
||||
roles:
|
||||
- { role: dnsmasq, tags: dnsmasq }
|
||||
|
||||
- hosts: kube-master[0]
|
||||
roles:
|
||||
- { role: kubernetes-apps, tags: apps }
|
||||
|
59
code-of-conduct.md
Normal file
59
code-of-conduct.md
Normal file
@ -0,0 +1,59 @@
|
||||
## Kubernetes Community Code of Conduct
|
||||
|
||||
### Contributor Code of Conduct
|
||||
|
||||
As contributors and maintainers of this project, and in the interest of fostering
|
||||
an open and welcoming community, we pledge to respect all people who contribute
|
||||
through reporting issues, posting feature requests, updating documentation,
|
||||
submitting pull requests or patches, and other activities.
|
||||
|
||||
We are committed to making participation in this project a harassment-free experience for
|
||||
everyone, regardless of level of experience, gender, gender identity and expression,
|
||||
sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
|
||||
religion, or nationality.
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery
|
||||
* Personal attacks
|
||||
* Trolling or insulting/derogatory comments
|
||||
* Public or private harassment
|
||||
* Publishing other's private information, such as physical or electronic addresses,
|
||||
without explicit permission
|
||||
* Other unethical or unprofessional conduct.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or reject
|
||||
comments, commits, code, wiki edits, issues, and other contributions that are not
|
||||
aligned to this Code of Conduct. By adopting this Code of Conduct, project maintainers
|
||||
commit themselves to fairly and consistently applying these principles to every aspect
|
||||
of managing this project. Project maintainers who do not follow or enforce the Code of
|
||||
Conduct may be permanently removed from the project team.
|
||||
|
||||
This code of conduct applies both within project spaces and in public spaces
|
||||
when an individual is representing the project or its community.
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
|
||||
opening an issue or contacting one or more of the project maintainers.
|
||||
|
||||
This Code of Conduct is adapted from the Contributor Covenant
|
||||
(http://contributor-covenant.org), version 1.2.0, available at
|
||||
http://contributor-covenant.org/version/1/2/0/
|
||||
|
||||
### Kubernetes Events Code of Conduct
|
||||
|
||||
Kubernetes events are working conferences intended for professional networking and collaboration in the
|
||||
Kubernetes community. Attendees are expected to behave according to professional standards and in accordance
|
||||
with their employer's policies on appropriate workplace behavior.
|
||||
|
||||
While at Kubernetes events or related social networking opportunities, attendees should not engage in
|
||||
discriminatory or offensive speech or actions regarding gender, sexuality, race, or religion. Speakers should
|
||||
be especially aware of these concerns.
|
||||
|
||||
The Kubernetes team does not condone any statements by speakers contrary to these standards. The Kubernetes
|
||||
team reserves the right to deny entrance and/or eject from an event (without refund) any individual found to
|
||||
be engaging in discriminatory or offensive speech or actions.
|
||||
|
||||
Please bring any concerns to to the immediate attention of Kubernetes event staff
|
||||
|
||||
|
||||
[]()
|
2
contrib/terraform/aws/.gitignore
vendored
Normal file
2
contrib/terraform/aws/.gitignore
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
*.tfstate*
|
||||
inventory
|
261
contrib/terraform/aws/00-create-infrastructure.tf
Executable file
261
contrib/terraform/aws/00-create-infrastructure.tf
Executable file
@ -0,0 +1,261 @@
|
||||
variable "deploymentName" {
|
||||
type = "string"
|
||||
description = "The desired name of your deployment."
|
||||
}
|
||||
|
||||
variable "numControllers"{
|
||||
type = "string"
|
||||
description = "Desired # of controllers."
|
||||
}
|
||||
|
||||
variable "numEtcd" {
|
||||
type = "string"
|
||||
description = "Desired # of etcd nodes. Should be an odd number."
|
||||
}
|
||||
|
||||
variable "numNodes" {
|
||||
type = "string"
|
||||
description = "Desired # of nodes."
|
||||
}
|
||||
|
||||
variable "volSizeController" {
|
||||
type = "string"
|
||||
description = "Volume size for the controllers (GB)."
|
||||
}
|
||||
|
||||
variable "volSizeEtcd" {
|
||||
type = "string"
|
||||
description = "Volume size for etcd (GB)."
|
||||
}
|
||||
|
||||
variable "volSizeNodes" {
|
||||
type = "string"
|
||||
description = "Volume size for nodes (GB)."
|
||||
}
|
||||
|
||||
variable "subnet" {
|
||||
type = "string"
|
||||
description = "The subnet in which to put your cluster."
|
||||
}
|
||||
|
||||
variable "securityGroups" {
|
||||
type = "string"
|
||||
description = "The sec. groups in which to put your cluster."
|
||||
}
|
||||
|
||||
variable "ami"{
|
||||
type = "string"
|
||||
description = "AMI to use for all VMs in cluster."
|
||||
}
|
||||
|
||||
variable "SSHKey" {
|
||||
type = "string"
|
||||
description = "SSH key to use for VMs."
|
||||
}
|
||||
|
||||
variable "master_instance_type" {
|
||||
type = "string"
|
||||
description = "Size of VM to use for masters."
|
||||
}
|
||||
|
||||
variable "etcd_instance_type" {
|
||||
type = "string"
|
||||
description = "Size of VM to use for etcd."
|
||||
}
|
||||
|
||||
variable "node_instance_type" {
|
||||
type = "string"
|
||||
description = "Size of VM to use for nodes."
|
||||
}
|
||||
|
||||
variable "terminate_protect" {
|
||||
type = "string"
|
||||
default = "false"
|
||||
}
|
||||
|
||||
variable "awsRegion" {
|
||||
type = "string"
|
||||
}
|
||||
|
||||
provider "aws" {
|
||||
region = "${var.awsRegion}"
|
||||
}
|
||||
|
||||
variable "iam_prefix" {
|
||||
type = "string"
|
||||
description = "Prefix name for IAM profiles"
|
||||
}
|
||||
|
||||
resource "aws_iam_instance_profile" "kubernetes_master_profile" {
|
||||
name = "${var.iam_prefix}_kubernetes_master_profile"
|
||||
roles = ["${aws_iam_role.kubernetes_master_role.name}"]
|
||||
}
|
||||
|
||||
resource "aws_iam_role" "kubernetes_master_role" {
|
||||
name = "${var.iam_prefix}_kubernetes_master_role"
|
||||
assume_role_policy = <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Principal": { "Service": "ec2.amazonaws.com"},
|
||||
"Action": "sts:AssumeRole"
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy" "kubernetes_master_policy" {
|
||||
name = "${var.iam_prefix}_kubernetes_master_policy"
|
||||
role = "${aws_iam_role.kubernetes_master_role.id}"
|
||||
policy = <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": ["ec2:*"],
|
||||
"Resource": ["*"]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": ["elasticloadbalancing:*"],
|
||||
"Resource": ["*"]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": "s3:*",
|
||||
"Resource": "*"
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
}
|
||||
|
||||
resource "aws_iam_instance_profile" "kubernetes_node_profile" {
|
||||
name = "${var.iam_prefix}_kubernetes_node_profile"
|
||||
roles = ["${aws_iam_role.kubernetes_node_role.name}"]
|
||||
}
|
||||
|
||||
resource "aws_iam_role" "kubernetes_node_role" {
|
||||
name = "${var.iam_prefix}_kubernetes_node_role"
|
||||
assume_role_policy = <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Principal": { "Service": "ec2.amazonaws.com"},
|
||||
"Action": "sts:AssumeRole"
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy" "kubernetes_node_policy" {
|
||||
name = "${var.iam_prefix}_kubernetes_node_policy"
|
||||
role = "${aws_iam_role.kubernetes_node_role.id}"
|
||||
policy = <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": "s3:*",
|
||||
"Resource": "*"
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": "ec2:Describe*",
|
||||
"Resource": "*"
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": "ec2:AttachVolume",
|
||||
"Resource": "*"
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": "ec2:DetachVolume",
|
||||
"Resource": "*"
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
}
|
||||
|
||||
resource "aws_instance" "master" {
|
||||
count = "${var.numControllers}"
|
||||
ami = "${var.ami}"
|
||||
instance_type = "${var.master_instance_type}"
|
||||
subnet_id = "${var.subnet}"
|
||||
vpc_security_group_ids = ["${var.securityGroups}"]
|
||||
key_name = "${var.SSHKey}"
|
||||
disable_api_termination = "${var.terminate_protect}"
|
||||
iam_instance_profile = "${aws_iam_instance_profile.kubernetes_master_profile.id}"
|
||||
root_block_device {
|
||||
volume_size = "${var.volSizeController}"
|
||||
}
|
||||
tags {
|
||||
Name = "${var.deploymentName}-master-${count.index + 1}"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_instance" "etcd" {
|
||||
count = "${var.numEtcd}"
|
||||
ami = "${var.ami}"
|
||||
instance_type = "${var.etcd_instance_type}"
|
||||
subnet_id = "${var.subnet}"
|
||||
vpc_security_group_ids = ["${var.securityGroups}"]
|
||||
key_name = "${var.SSHKey}"
|
||||
disable_api_termination = "${var.terminate_protect}"
|
||||
root_block_device {
|
||||
volume_size = "${var.volSizeEtcd}"
|
||||
}
|
||||
tags {
|
||||
Name = "${var.deploymentName}-etcd-${count.index + 1}"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
resource "aws_instance" "minion" {
|
||||
count = "${var.numNodes}"
|
||||
ami = "${var.ami}"
|
||||
instance_type = "${var.node_instance_type}"
|
||||
subnet_id = "${var.subnet}"
|
||||
vpc_security_group_ids = ["${var.securityGroups}"]
|
||||
key_name = "${var.SSHKey}"
|
||||
disable_api_termination = "${var.terminate_protect}"
|
||||
iam_instance_profile = "${aws_iam_instance_profile.kubernetes_node_profile.id}"
|
||||
root_block_device {
|
||||
volume_size = "${var.volSizeNodes}"
|
||||
}
|
||||
tags {
|
||||
Name = "${var.deploymentName}-minion-${count.index + 1}"
|
||||
}
|
||||
}
|
||||
|
||||
output "kubernetes_master_profile" {
|
||||
value = "${aws_iam_instance_profile.kubernetes_master_profile.id}"
|
||||
}
|
||||
|
||||
output "kubernetes_node_profile" {
|
||||
value = "${aws_iam_instance_profile.kubernetes_node_profile.id}"
|
||||
}
|
||||
|
||||
output "master-ip" {
|
||||
value = "${join(", ", aws_instance.master.*.private_ip)}"
|
||||
}
|
||||
|
||||
output "etcd-ip" {
|
||||
value = "${join(", ", aws_instance.etcd.*.private_ip)}"
|
||||
}
|
||||
|
||||
output "minion-ip" {
|
||||
value = "${join(", ", aws_instance.minion.*.private_ip)}"
|
||||
}
|
||||
|
||||
|
37
contrib/terraform/aws/01-create-inventory.tf
Executable file
37
contrib/terraform/aws/01-create-inventory.tf
Executable file
@ -0,0 +1,37 @@
|
||||
variable "SSHUser" {
|
||||
type = "string"
|
||||
description = "SSH User for VMs."
|
||||
}
|
||||
|
||||
resource "null_resource" "ansible-provision" {
|
||||
|
||||
depends_on = ["aws_instance.master","aws_instance.etcd","aws_instance.minion"]
|
||||
|
||||
##Create Master Inventory
|
||||
provisioner "local-exec" {
|
||||
command = "echo \"[kube-master]\" > inventory"
|
||||
}
|
||||
provisioner "local-exec" {
|
||||
command = "echo \"${join("\n",formatlist("%s ansible_ssh_user=%s", aws_instance.master.*.private_ip, var.SSHUser))}\" >> inventory"
|
||||
}
|
||||
|
||||
##Create ETCD Inventory
|
||||
provisioner "local-exec" {
|
||||
command = "echo \"\n[etcd]\" >> inventory"
|
||||
}
|
||||
provisioner "local-exec" {
|
||||
command = "echo \"${join("\n",formatlist("%s ansible_ssh_user=%s", aws_instance.etcd.*.private_ip, var.SSHUser))}\" >> inventory"
|
||||
}
|
||||
|
||||
##Create Nodes Inventory
|
||||
provisioner "local-exec" {
|
||||
command = "echo \"\n[kube-node]\" >> inventory"
|
||||
}
|
||||
provisioner "local-exec" {
|
||||
command = "echo \"${join("\n",formatlist("%s ansible_ssh_user=%s", aws_instance.minion.*.private_ip, var.SSHUser))}\" >> inventory"
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "echo \"\n[k8s-cluster:children]\nkube-node\nkube-master\netcd\" >> inventory"
|
||||
}
|
||||
}
|
28
contrib/terraform/aws/README.md
Normal file
28
contrib/terraform/aws/README.md
Normal file
@ -0,0 +1,28 @@
|
||||
## Kubernetes on AWS with Terraform
|
||||
|
||||
**Overview:**
|
||||
|
||||
- This will create nodes in a VPC inside of AWS
|
||||
|
||||
- A dynamic number of masters, etcd, and nodes can be created
|
||||
|
||||
- These scripts currently expect Private IP connectivity with the nodes that are created. This means that you may need a tunnel to your VPC or to run these scripts from a VM inside the VPC. Will be looking into how to work around this later.
|
||||
|
||||
**How to Use:**
|
||||
|
||||
- Export the variables for your Amazon credentials:
|
||||
|
||||
```
|
||||
export AWS_ACCESS_KEY_ID="xxx"
|
||||
export AWS_SECRET_ACCESS_KEY="yyy"
|
||||
```
|
||||
|
||||
- Update contrib/terraform/aws/terraform.tfvars with your data
|
||||
|
||||
- Run with `terraform apply`
|
||||
|
||||
- Once the infrastructure is created, you can run the kubespray playbooks and supply contrib/terraform/aws/inventory with the `-i` flag.
|
||||
|
||||
**Future Work:**
|
||||
|
||||
- Update the inventory creation file to be something a little more reasonable. It's just a local-exec from Terraform now, using terraform.py or something may make sense in the future.
|
22
contrib/terraform/aws/terraform.tfvars
Normal file
22
contrib/terraform/aws/terraform.tfvars
Normal file
@ -0,0 +1,22 @@
|
||||
deploymentName="test-kube-deploy"
|
||||
|
||||
numControllers="2"
|
||||
numEtcd="3"
|
||||
numNodes="2"
|
||||
|
||||
volSizeController="20"
|
||||
volSizeEtcd="20"
|
||||
volSizeNodes="20"
|
||||
|
||||
awsRegion="us-west-2"
|
||||
subnet="subnet-xxxxx"
|
||||
ami="ami-32a85152"
|
||||
securityGroups="sg-xxxxx"
|
||||
SSHUser="core"
|
||||
SSHKey="my-key"
|
||||
|
||||
master_instance_type="m3.xlarge"
|
||||
etcd_instance_type="m3.xlarge"
|
||||
node_instance_type="m3.xlarge"
|
||||
|
||||
terminate_protect="false"
|
137
contrib/terraform/openstack/README.md
Normal file
137
contrib/terraform/openstack/README.md
Normal file
@ -0,0 +1,137 @@
|
||||
# Kubernetes on Openstack with Terraform
|
||||
|
||||
Provision a Kubernetes cluster with [Terraform](https://www.terraform.io) on
|
||||
Openstack.
|
||||
|
||||
## Status
|
||||
|
||||
This will install a Kubernetes cluster on an Openstack Cloud. It is tested on a
|
||||
OpenStack Cloud provided by [BlueBox](https://www.blueboxcloud.com/) and
|
||||
should work on most modern installs of OpenStack that support the basic
|
||||
services.
|
||||
|
||||
There are some assumptions made to try and ensure it will work on your openstack cluster.
|
||||
|
||||
* floating-ips are used for access
|
||||
* you already have a suitable OS image in glance
|
||||
* you already have both an internal network and a floating-ip pool created
|
||||
* you have security-groups enabled
|
||||
|
||||
|
||||
## Requirements
|
||||
|
||||
- [Install Terraform](https://www.terraform.io/intro/getting-started/install.html)
|
||||
|
||||
## Terraform
|
||||
|
||||
Terraform will be used to provision all of the OpenStack resources required to
|
||||
run Docker Swarm. It is also used to deploy and provision the software
|
||||
requirements.
|
||||
|
||||
### Prep
|
||||
|
||||
#### OpenStack
|
||||
|
||||
Ensure your OpenStack credentials are loaded in environment variables. This is
|
||||
how I do it:
|
||||
|
||||
```
|
||||
$ source ~/.stackrc
|
||||
```
|
||||
|
||||
You will need two networks before installing, an internal network and
|
||||
an external (floating IP Pool) network. The internet network can be shared as
|
||||
we use security groups to provide network segregation. Due to the many
|
||||
differences between OpenStack installs the Terraform does not attempt to create
|
||||
these for you.
|
||||
|
||||
By default Terraform will expect that your networks are called `internal` and
|
||||
`external`. You can change this by altering the Terraform variables `network_name` and `floatingip_pool`.
|
||||
|
||||
A full list of variables you can change can be found at [variables.tf](variables.tf).
|
||||
|
||||
All OpenStack resources will use the Terraform variable `cluster_name` (
|
||||
default `example`) in their name to make it easier to track. For example the
|
||||
first compute resource will be named `example-kubernetes-1`.
|
||||
|
||||
#### Terraform
|
||||
|
||||
Ensure your local ssh-agent is running and your ssh key has been added. This
|
||||
step is required by the terraform provisioner:
|
||||
|
||||
```
|
||||
$ eval $(ssh-agent -s)
|
||||
$ ssh-add ~/.ssh/id_rsa
|
||||
```
|
||||
|
||||
|
||||
Ensure that you have your Openstack credentials loaded into Terraform
|
||||
environment variables. Likely via a command similar to:
|
||||
|
||||
```
|
||||
$ echo Setting up Terraform creds && \
|
||||
export TF_VAR_username=${OS_USERNAME} && \
|
||||
export TF_VAR_password=${OS_PASSWORD} && \
|
||||
export TF_VAR_tenant=${OS_TENANT_NAME} && \
|
||||
export TF_VAR_auth_url=${OS_AUTH_URL}
|
||||
```
|
||||
|
||||
# Provision a Kubernetes Cluster on OpenStack
|
||||
|
||||
```
|
||||
terraform apply -state=contrib/terraform/openstack/terraform.tfstate contrib/terraform/openstack
|
||||
openstack_compute_secgroup_v2.k8s_master: Creating...
|
||||
description: "" => "example - Kubernetes Master"
|
||||
name: "" => "example-k8s-master"
|
||||
rule.#: "" => "<computed>"
|
||||
...
|
||||
...
|
||||
Apply complete! Resources: 9 added, 0 changed, 0 destroyed.
|
||||
|
||||
The state of your infrastructure has been saved to the path
|
||||
below. This state is required to modify and destroy your
|
||||
infrastructure, so keep it safe. To inspect the complete state
|
||||
use the `terraform show` command.
|
||||
|
||||
State path: contrib/terraform/openstack/terraform.tfstate
|
||||
```
|
||||
|
||||
Make sure you can connect to the hosts:
|
||||
|
||||
```
|
||||
$ ansible -i contrib/terraform/openstack/hosts -m ping all
|
||||
example-k8s_node-1 | SUCCESS => {
|
||||
"changed": false,
|
||||
"ping": "pong"
|
||||
}
|
||||
example-etcd-1 | SUCCESS => {
|
||||
"changed": false,
|
||||
"ping": "pong"
|
||||
}
|
||||
example-k8s-master-1 | SUCCESS => {
|
||||
"changed": false,
|
||||
"ping": "pong"
|
||||
}
|
||||
```
|
||||
|
||||
if it fails try to connect manually via SSH ... it could be somthing as simple as a stale host key.
|
||||
|
||||
Deploy kubernetes:
|
||||
|
||||
```
|
||||
$ ansible-playbook --become -i contrib/terraform/openstack/hosts cluster.yml
|
||||
```
|
||||
|
||||
# clean up:
|
||||
|
||||
```
|
||||
$ terraform destroy
|
||||
Do you really want to destroy?
|
||||
Terraform will delete all your managed infrastructure.
|
||||
There is no undo. Only 'yes' will be accepted to confirm.
|
||||
|
||||
Enter a value: yes
|
||||
...
|
||||
...
|
||||
Apply complete! Resources: 0 added, 0 changed, 12 destroyed.
|
||||
```
|
136
contrib/terraform/openstack/group_vars/all.yml
Normal file
136
contrib/terraform/openstack/group_vars/all.yml
Normal file
@ -0,0 +1,136 @@
|
||||
# Directory where the binaries will be installed
|
||||
bin_dir: /usr/local/bin
|
||||
|
||||
# Where the binaries will be downloaded.
|
||||
# Note: ensure that you've enough disk space (about 1G)
|
||||
local_release_dir: "/tmp/releases"
|
||||
|
||||
# Uncomment this line for CoreOS only.
|
||||
# Directory where python binary is installed
|
||||
# ansible_python_interpreter: "/opt/bin/python"
|
||||
|
||||
# This is the group that the cert creation scripts chgrp the
|
||||
# cert files to. Not really changable...
|
||||
kube_cert_group: kube-cert
|
||||
|
||||
# Cluster Loglevel configuration
|
||||
kube_log_level: 2
|
||||
|
||||
# Users to create for basic auth in Kubernetes API via HTTP
|
||||
kube_api_pwd: "changeme"
|
||||
kube_users:
|
||||
kube:
|
||||
pass: "{{kube_api_pwd}}"
|
||||
role: admin
|
||||
root:
|
||||
pass: "changeme"
|
||||
role: admin
|
||||
|
||||
# Kubernetes cluster name, also will be used as DNS domain
|
||||
cluster_name: cluster.local
|
||||
|
||||
# For some environments, each node has a pubilcally accessible
|
||||
# address and an address it should bind services to. These are
|
||||
# really inventory level variables, but described here for consistency.
|
||||
#
|
||||
# When advertising access, the access_ip will be used, but will defer to
|
||||
# ip and then the default ansible ip when unspecified.
|
||||
#
|
||||
# When binding to restrict access, the ip variable will be used, but will
|
||||
# defer to the default ansible ip when unspecified.
|
||||
#
|
||||
# The ip variable is used for specific address binding, e.g. listen address
|
||||
# for etcd. This is use to help with environments like Vagrant or multi-nic
|
||||
# systems where one address should be preferred over another.
|
||||
# ip: 10.2.2.2
|
||||
#
|
||||
# The access_ip variable is used to define how other nodes should access
|
||||
# the node. This is used in flannel to allow other flannel nodes to see
|
||||
# this node for example. The access_ip is really useful AWS and Google
|
||||
# environments where the nodes are accessed remotely by the "public" ip,
|
||||
# but don't know about that address themselves.
|
||||
# access_ip: 1.1.1.1
|
||||
|
||||
# Choose network plugin (calico, weave or flannel)
|
||||
kube_network_plugin: flannel
|
||||
|
||||
# Kubernetes internal network for services, unused block of space.
|
||||
kube_service_addresses: 10.233.0.0/18
|
||||
|
||||
# internal network. When used, it will assign IP
|
||||
# addresses from this range to individual pods.
|
||||
# This network must be unused in your network infrastructure!
|
||||
kube_pods_subnet: 10.233.64.0/18
|
||||
|
||||
# internal network total size (optional). This is the prefix of the
|
||||
# entire network. Must be unused in your environment.
|
||||
# kube_network_prefix: 18
|
||||
|
||||
# internal network node size allocation (optional). This is the size allocated
|
||||
# to each node on your network. With these defaults you should have
|
||||
# room for 4096 nodes with 254 pods per node.
|
||||
kube_network_node_prefix: 24
|
||||
|
||||
# With calico it is possible to distributed routes with border routers of the datacenter.
|
||||
peer_with_router: false
|
||||
# Warning : enabling router peering will disable calico's default behavior ('node mesh').
|
||||
# The subnets of each nodes will be distributed by the datacenter router
|
||||
|
||||
# The port the API Server will be listening on.
|
||||
kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
|
||||
kube_apiserver_port: 443 # (https)
|
||||
kube_apiserver_insecure_port: 8080 # (http)
|
||||
|
||||
# Internal DNS configuration.
|
||||
# Kubernetes can create and mainatain its own DNS server to resolve service names
|
||||
# into appropriate IP addresses. It's highly advisable to run such DNS server,
|
||||
# as it greatly simplifies configuration of your applications - you can use
|
||||
# service names instead of magic environment variables.
|
||||
# You still must manually configure all your containers to use this DNS server,
|
||||
# Kubernetes won't do this for you (yet).
|
||||
|
||||
# Upstream dns servers used by dnsmasq
|
||||
upstream_dns_servers:
|
||||
- 8.8.8.8
|
||||
- 8.8.4.4
|
||||
#
|
||||
# # Use dns server : https://github.com/ansibl8s/k8s-skydns/blob/master/skydns-README.md
|
||||
dns_setup: true
|
||||
dns_domain: "{{ cluster_name }}"
|
||||
#
|
||||
# # Ip address of the kubernetes skydns service
|
||||
skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
|
||||
dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
|
||||
|
||||
# There are some changes specific to the cloud providers
|
||||
# for instance we need to encapsulate packets with some network plugins
|
||||
# If set the possible values are either 'gce', 'aws' or 'openstack'
|
||||
# When openstack is used make sure to source in the openstack credentials
|
||||
# like you would do when using nova-client before starting the playbook.
|
||||
# cloud_provider:
|
||||
|
||||
# For multi masters architecture:
|
||||
# kube-proxy doesn't support multiple apiservers for the time being so you'll need to configure your own loadbalancer
|
||||
# This domain name will be inserted into the /etc/hosts file of all servers
|
||||
# configuration example with haproxy :
|
||||
# listen kubernetes-apiserver-https
|
||||
# bind 10.99.0.21:8383
|
||||
# option ssl-hello-chk
|
||||
# mode tcp
|
||||
# timeout client 3h
|
||||
# timeout server 3h
|
||||
# server master1 10.99.0.26:443
|
||||
# server master2 10.99.0.27:443
|
||||
# balance roundrobin
|
||||
# apiserver_loadbalancer_domain_name: "lb-apiserver.kubernetes.local"
|
||||
|
||||
## Set these proxy values in order to update docker daemon to use proxies
|
||||
# http_proxy: ""
|
||||
# https_proxy: ""
|
||||
# no_proxy: ""
|
||||
|
||||
## A string of extra options to pass to the docker daemon.
|
||||
## This string should be exactly as you wish it to appear.
|
||||
## An obvious use case is allowing insecure-registry access
|
||||
## to self hosted registries like so:
|
||||
docker_options: "--insecure-registry={{ kube_service_addresses }}"
|
1
contrib/terraform/openstack/hosts
Symbolic link
1
contrib/terraform/openstack/hosts
Symbolic link
@ -0,0 +1 @@
|
||||
../terraform.py
|
94
contrib/terraform/openstack/kubespray.tf
Normal file
94
contrib/terraform/openstack/kubespray.tf
Normal file
@ -0,0 +1,94 @@
|
||||
resource "openstack_networking_floatingip_v2" "k8s_master" {
|
||||
count = "${var.number_of_k8s_masters}"
|
||||
pool = "${var.floatingip_pool}"
|
||||
}
|
||||
|
||||
resource "openstack_networking_floatingip_v2" "k8s_node" {
|
||||
count = "${var.number_of_k8s_nodes}"
|
||||
pool = "${var.floatingip_pool}"
|
||||
}
|
||||
|
||||
|
||||
resource "openstack_compute_keypair_v2" "k8s" {
|
||||
name = "kubernetes-${var.cluster_name}"
|
||||
public_key = "${file(var.public_key_path)}"
|
||||
}
|
||||
|
||||
resource "openstack_compute_secgroup_v2" "k8s_master" {
|
||||
name = "${var.cluster_name}-k8s-master"
|
||||
description = "${var.cluster_name} - Kubernetes Master"
|
||||
}
|
||||
|
||||
resource "openstack_compute_secgroup_v2" "k8s" {
|
||||
name = "${var.cluster_name}-k8s"
|
||||
description = "${var.cluster_name} - Kubernetes"
|
||||
rule {
|
||||
ip_protocol = "tcp"
|
||||
from_port = "22"
|
||||
to_port = "22"
|
||||
cidr = "0.0.0.0/0"
|
||||
}
|
||||
rule {
|
||||
ip_protocol = "icmp"
|
||||
from_port = "-1"
|
||||
to_port = "-1"
|
||||
cidr = "0.0.0.0/0"
|
||||
}
|
||||
rule {
|
||||
ip_protocol = "tcp"
|
||||
from_port = "1"
|
||||
to_port = "65535"
|
||||
self = true
|
||||
}
|
||||
rule {
|
||||
ip_protocol = "udp"
|
||||
from_port = "1"
|
||||
to_port = "65535"
|
||||
self = true
|
||||
}
|
||||
rule {
|
||||
ip_protocol = "icmp"
|
||||
from_port = "-1"
|
||||
to_port = "-1"
|
||||
self = true
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_master" {
|
||||
name = "${var.cluster_name}-k8s-master-${count.index+1}"
|
||||
count = "${var.number_of_k8s_masters}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_master}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
security_groups = [ "${openstack_compute_secgroup_v2.k8s_master.name}",
|
||||
"${openstack_compute_secgroup_v2.k8s.name}" ]
|
||||
floating_ip = "${element(openstack_networking_floatingip_v2.k8s_master.*.address, count.index)}"
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "etcd,kube-master,kube-node,k8s-cluster"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_node" {
|
||||
name = "${var.cluster_name}-k8s-node-${count.index+1}"
|
||||
count = "${var.number_of_k8s_nodes}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_node}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
security_groups = ["${openstack_compute_secgroup_v2.k8s.name}" ]
|
||||
floating_ip = "${element(openstack_networking_floatingip_v2.k8s_node.*.address, count.index)}"
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "kube-node,k8s-cluster"
|
||||
}
|
||||
}
|
||||
|
||||
#output "msg" {
|
||||
# value = "Your hosts are ready to go!\nYour ssh hosts are: ${join(", ", openstack_networking_floatingip_v2.k8s_master.*.address )}"
|
||||
#}
|
238
contrib/terraform/openstack/terraform.tfstate
Normal file
238
contrib/terraform/openstack/terraform.tfstate
Normal file
@ -0,0 +1,238 @@
|
||||
{
|
||||
"version": 1,
|
||||
"serial": 17,
|
||||
"modules": [
|
||||
{
|
||||
"path": [
|
||||
"root"
|
||||
],
|
||||
"outputs": {},
|
||||
"resources": {
|
||||
"openstack_compute_instance_v2.k8s_master.0": {
|
||||
"type": "openstack_compute_instance_v2",
|
||||
"depends_on": [
|
||||
"openstack_compute_keypair_v2.k8s",
|
||||
"openstack_compute_secgroup_v2.k8s",
|
||||
"openstack_compute_secgroup_v2.k8s_master",
|
||||
"openstack_networking_floatingip_v2.k8s_master"
|
||||
],
|
||||
"primary": {
|
||||
"id": "f4a44f6e-33ff-4e35-b593-34f3dfd80dc9",
|
||||
"attributes": {
|
||||
"access_ip_v4": "173.247.105.12",
|
||||
"access_ip_v6": "",
|
||||
"flavor_id": "3",
|
||||
"flavor_name": "m1.medium",
|
||||
"floating_ip": "173.247.105.12",
|
||||
"id": "f4a44f6e-33ff-4e35-b593-34f3dfd80dc9",
|
||||
"image_id": "1525c3f3-1224-4958-bd07-da9feaedf18b",
|
||||
"image_name": "ubuntu-14.04",
|
||||
"key_pair": "kubernetes-example",
|
||||
"metadata.#": "2",
|
||||
"metadata.kubespray_groups": "etcd,kube-master,kube-node,k8s-cluster",
|
||||
"metadata.ssh_user": "ubuntu",
|
||||
"name": "example-k8s-master-1",
|
||||
"network.#": "1",
|
||||
"network.0.access_network": "false",
|
||||
"network.0.fixed_ip_v4": "10.230.7.86",
|
||||
"network.0.fixed_ip_v6": "",
|
||||
"network.0.floating_ip": "173.247.105.12",
|
||||
"network.0.mac": "fa:16:3e:fb:82:1d",
|
||||
"network.0.name": "internal",
|
||||
"network.0.port": "",
|
||||
"network.0.uuid": "ba0fdd03-72b5-41eb-bb67-fef437fd6cb4",
|
||||
"security_groups.#": "2",
|
||||
"security_groups.2779334175": "example-k8s",
|
||||
"security_groups.3772290257": "example-k8s-master",
|
||||
"volume.#": "0"
|
||||
}
|
||||
}
|
||||
},
|
||||
"openstack_compute_instance_v2.k8s_master.1": {
|
||||
"type": "openstack_compute_instance_v2",
|
||||
"depends_on": [
|
||||
"openstack_compute_keypair_v2.k8s",
|
||||
"openstack_compute_secgroup_v2.k8s",
|
||||
"openstack_compute_secgroup_v2.k8s_master",
|
||||
"openstack_networking_floatingip_v2.k8s_master"
|
||||
],
|
||||
"primary": {
|
||||
"id": "cbb565fe-a3b6-44ff-8f81-8ec29704d11b",
|
||||
"attributes": {
|
||||
"access_ip_v4": "173.247.105.70",
|
||||
"access_ip_v6": "",
|
||||
"flavor_id": "3",
|
||||
"flavor_name": "m1.medium",
|
||||
"floating_ip": "173.247.105.70",
|
||||
"id": "cbb565fe-a3b6-44ff-8f81-8ec29704d11b",
|
||||
"image_id": "1525c3f3-1224-4958-bd07-da9feaedf18b",
|
||||
"image_name": "ubuntu-14.04",
|
||||
"key_pair": "kubernetes-example",
|
||||
"metadata.#": "2",
|
||||
"metadata.kubespray_groups": "etcd,kube-master,kube-node,k8s-cluster",
|
||||
"metadata.ssh_user": "ubuntu",
|
||||
"name": "example-k8s-master-2",
|
||||
"network.#": "1",
|
||||
"network.0.access_network": "false",
|
||||
"network.0.fixed_ip_v4": "10.230.7.85",
|
||||
"network.0.fixed_ip_v6": "",
|
||||
"network.0.floating_ip": "173.247.105.70",
|
||||
"network.0.mac": "fa:16:3e:33:98:e6",
|
||||
"network.0.name": "internal",
|
||||
"network.0.port": "",
|
||||
"network.0.uuid": "ba0fdd03-72b5-41eb-bb67-fef437fd6cb4",
|
||||
"security_groups.#": "2",
|
||||
"security_groups.2779334175": "example-k8s",
|
||||
"security_groups.3772290257": "example-k8s-master",
|
||||
"volume.#": "0"
|
||||
}
|
||||
}
|
||||
},
|
||||
"openstack_compute_instance_v2.k8s_node": {
|
||||
"type": "openstack_compute_instance_v2",
|
||||
"depends_on": [
|
||||
"openstack_compute_keypair_v2.k8s",
|
||||
"openstack_compute_secgroup_v2.k8s",
|
||||
"openstack_networking_floatingip_v2.k8s_node"
|
||||
],
|
||||
"primary": {
|
||||
"id": "39deed7e-8307-4b62-b56c-ce2b405a03fa",
|
||||
"attributes": {
|
||||
"access_ip_v4": "173.247.105.76",
|
||||
"access_ip_v6": "",
|
||||
"flavor_id": "3",
|
||||
"flavor_name": "m1.medium",
|
||||
"floating_ip": "173.247.105.76",
|
||||
"id": "39deed7e-8307-4b62-b56c-ce2b405a03fa",
|
||||
"image_id": "1525c3f3-1224-4958-bd07-da9feaedf18b",
|
||||
"image_name": "ubuntu-14.04",
|
||||
"key_pair": "kubernetes-example",
|
||||
"metadata.#": "2",
|
||||
"metadata.kubespray_groups": "kube-node,k8s-cluster",
|
||||
"metadata.ssh_user": "ubuntu",
|
||||
"name": "example-k8s-node-1",
|
||||
"network.#": "1",
|
||||
"network.0.access_network": "false",
|
||||
"network.0.fixed_ip_v4": "10.230.7.84",
|
||||
"network.0.fixed_ip_v6": "",
|
||||
"network.0.floating_ip": "173.247.105.76",
|
||||
"network.0.mac": "fa:16:3e:53:57:bc",
|
||||
"network.0.name": "internal",
|
||||
"network.0.port": "",
|
||||
"network.0.uuid": "ba0fdd03-72b5-41eb-bb67-fef437fd6cb4",
|
||||
"security_groups.#": "1",
|
||||
"security_groups.2779334175": "example-k8s",
|
||||
"volume.#": "0"
|
||||
}
|
||||
}
|
||||
},
|
||||
"openstack_compute_keypair_v2.k8s": {
|
||||
"type": "openstack_compute_keypair_v2",
|
||||
"primary": {
|
||||
"id": "kubernetes-example",
|
||||
"attributes": {
|
||||
"id": "kubernetes-example",
|
||||
"name": "kubernetes-example",
|
||||
"public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC9nU6RPYCabjLH1LvJfpp9L8r8q5RZ6niS92zD95xpm2b2obVydWe0tCSFdmULBuvT8Q8YQ4qOG2g/oJlsGOsia+4CQjYEUV9CgTH9H5HK3vUOwtO5g2eFnYKSmI/4znHa0WYpQFnQK2kSSeCs2beTlJhc8vjfN/2HHmuny6SxNSbnCk/nZdwamxEONIVdjlm3CSBlq4PChT/D/uUqm/nOm0Zqdk9ZlTBkucsjiOCJeEzg4HioKmIH8ewqsKuS7kMADHPH98JMdBhTKbYbLrxTC/RfiaON58WJpmdOA935TT5Td5aVQZoqe/i/5yFRp5fMG239jtfbM0Igu44TEIib pczarkowski@Pauls-MacBook-Pro.local\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
"openstack_compute_secgroup_v2.k8s": {
|
||||
"type": "openstack_compute_secgroup_v2",
|
||||
"primary": {
|
||||
"id": "418394e2-b4be-4953-b7a3-b309bf28fbdb",
|
||||
"attributes": {
|
||||
"description": "example - Kubernetes",
|
||||
"id": "418394e2-b4be-4953-b7a3-b309bf28fbdb",
|
||||
"name": "example-k8s",
|
||||
"rule.#": "5",
|
||||
"rule.112275015.cidr": "",
|
||||
"rule.112275015.from_group_id": "",
|
||||
"rule.112275015.from_port": "1",
|
||||
"rule.112275015.id": "597170c9-b35a-45c0-8717-652a342f3fd6",
|
||||
"rule.112275015.ip_protocol": "tcp",
|
||||
"rule.112275015.self": "true",
|
||||
"rule.112275015.to_port": "65535",
|
||||
"rule.2180185248.cidr": "0.0.0.0/0",
|
||||
"rule.2180185248.from_group_id": "",
|
||||
"rule.2180185248.from_port": "-1",
|
||||
"rule.2180185248.id": "ffdcdd5e-f18b-4537-b502-8849affdfed9",
|
||||
"rule.2180185248.ip_protocol": "icmp",
|
||||
"rule.2180185248.self": "false",
|
||||
"rule.2180185248.to_port": "-1",
|
||||
"rule.3267409695.cidr": "",
|
||||
"rule.3267409695.from_group_id": "",
|
||||
"rule.3267409695.from_port": "-1",
|
||||
"rule.3267409695.id": "4f91d9ca-940c-4f4d-9ce1-024cbd7d9c54",
|
||||
"rule.3267409695.ip_protocol": "icmp",
|
||||
"rule.3267409695.self": "true",
|
||||
"rule.3267409695.to_port": "-1",
|
||||
"rule.635693822.cidr": "",
|
||||
"rule.635693822.from_group_id": "",
|
||||
"rule.635693822.from_port": "1",
|
||||
"rule.635693822.id": "c6816e5b-a1a4-4071-acce-d09b92d14d49",
|
||||
"rule.635693822.ip_protocol": "udp",
|
||||
"rule.635693822.self": "true",
|
||||
"rule.635693822.to_port": "65535",
|
||||
"rule.836640770.cidr": "0.0.0.0/0",
|
||||
"rule.836640770.from_group_id": "",
|
||||
"rule.836640770.from_port": "22",
|
||||
"rule.836640770.id": "8845acba-636b-4c23-b9e2-5bff76d9008d",
|
||||
"rule.836640770.ip_protocol": "tcp",
|
||||
"rule.836640770.self": "false",
|
||||
"rule.836640770.to_port": "22"
|
||||
}
|
||||
}
|
||||
},
|
||||
"openstack_compute_secgroup_v2.k8s_master": {
|
||||
"type": "openstack_compute_secgroup_v2",
|
||||
"primary": {
|
||||
"id": "c74aed25-6161-46c4-a488-dfc7f49a228e",
|
||||
"attributes": {
|
||||
"description": "example - Kubernetes Master",
|
||||
"id": "c74aed25-6161-46c4-a488-dfc7f49a228e",
|
||||
"name": "example-k8s-master",
|
||||
"rule.#": "0"
|
||||
}
|
||||
}
|
||||
},
|
||||
"openstack_networking_floatingip_v2.k8s_master.0": {
|
||||
"type": "openstack_networking_floatingip_v2",
|
||||
"primary": {
|
||||
"id": "2a320c67-214d-4631-a840-2de82505ed3f",
|
||||
"attributes": {
|
||||
"address": "173.247.105.12",
|
||||
"id": "2a320c67-214d-4631-a840-2de82505ed3f",
|
||||
"pool": "external",
|
||||
"port_id": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
"openstack_networking_floatingip_v2.k8s_master.1": {
|
||||
"type": "openstack_networking_floatingip_v2",
|
||||
"primary": {
|
||||
"id": "3adbfc13-e7ae-4bcf-99d3-3ba9db056e1f",
|
||||
"attributes": {
|
||||
"address": "173.247.105.70",
|
||||
"id": "3adbfc13-e7ae-4bcf-99d3-3ba9db056e1f",
|
||||
"pool": "external",
|
||||
"port_id": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
"openstack_networking_floatingip_v2.k8s_node": {
|
||||
"type": "openstack_networking_floatingip_v2",
|
||||
"primary": {
|
||||
"id": "a3f77aa6-5c3a-4edf-b97e-ee211dfa81e1",
|
||||
"attributes": {
|
||||
"address": "173.247.105.76",
|
||||
"id": "a3f77aa6-5c3a-4edf-b97e-ee211dfa81e1",
|
||||
"pool": "external",
|
||||
"port_id": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
13
contrib/terraform/openstack/terraform.tfstate.backup
Normal file
13
contrib/terraform/openstack/terraform.tfstate.backup
Normal file
@ -0,0 +1,13 @@
|
||||
{
|
||||
"version": 1,
|
||||
"serial": 16,
|
||||
"modules": [
|
||||
{
|
||||
"path": [
|
||||
"root"
|
||||
],
|
||||
"outputs": {},
|
||||
"resources": {}
|
||||
}
|
||||
]
|
||||
}
|
61
contrib/terraform/openstack/variables.tf
Normal file
61
contrib/terraform/openstack/variables.tf
Normal file
@ -0,0 +1,61 @@
|
||||
variable "cluster_name" {
|
||||
default = "example"
|
||||
}
|
||||
|
||||
variable "number_of_k8s_masters" {
|
||||
default = 2
|
||||
}
|
||||
|
||||
variable "number_of_k8s_nodes" {
|
||||
default = 1
|
||||
}
|
||||
|
||||
variable "public_key_path" {
|
||||
description = "The path of the ssh pub key"
|
||||
default = "~/.ssh/id_rsa.pub"
|
||||
}
|
||||
|
||||
variable "image" {
|
||||
description = "the image to use"
|
||||
default = "ubuntu-14.04"
|
||||
}
|
||||
|
||||
variable "ssh_user" {
|
||||
description = "used to fill out tags for ansible inventory"
|
||||
default = "ubuntu"
|
||||
}
|
||||
|
||||
variable "flavor_k8s_master" {
|
||||
default = 3
|
||||
}
|
||||
|
||||
variable "flavor_k8s_node" {
|
||||
default = 3
|
||||
}
|
||||
|
||||
|
||||
variable "network_name" {
|
||||
description = "name of the internal network to use"
|
||||
default = "internal"
|
||||
}
|
||||
|
||||
variable "floatingip_pool" {
|
||||
description = "name of the floating ip pool to use"
|
||||
default = "external"
|
||||
}
|
||||
|
||||
variable "username" {
|
||||
description = "Your openstack username"
|
||||
}
|
||||
|
||||
variable "password" {
|
||||
description = "Your openstack password"
|
||||
}
|
||||
|
||||
variable "tenant" {
|
||||
description = "Your openstack tenant/project"
|
||||
}
|
||||
|
||||
variable "auth_url" {
|
||||
description = "Your openstack auth URL"
|
||||
}
|
736
contrib/terraform/terraform.py
Executable file
736
contrib/terraform/terraform.py
Executable file
@ -0,0 +1,736 @@
|
||||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright 2015 Cisco Systems, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# original: https://github.com/CiscoCloud/terraform.py
|
||||
|
||||
"""\
|
||||
Dynamic inventory for Terraform - finds all `.tfstate` files below the working
|
||||
directory and generates an inventory based on them.
|
||||
"""
|
||||
from __future__ import unicode_literals, print_function
|
||||
import argparse
|
||||
from collections import defaultdict
|
||||
from functools import wraps
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
|
||||
VERSION = '0.3.0pre'
|
||||
|
||||
|
||||
def tfstates(root=None):
|
||||
root = root or os.getcwd()
|
||||
for dirpath, _, filenames in os.walk(root):
|
||||
for name in filenames:
|
||||
if os.path.splitext(name)[-1] == '.tfstate':
|
||||
yield os.path.join(dirpath, name)
|
||||
|
||||
|
||||
def iterresources(filenames):
|
||||
for filename in filenames:
|
||||
with open(filename, 'r') as json_file:
|
||||
state = json.load(json_file)
|
||||
for module in state['modules']:
|
||||
name = module['path'][-1]
|
||||
for key, resource in module['resources'].items():
|
||||
yield name, key, resource
|
||||
|
||||
## READ RESOURCES
|
||||
PARSERS = {}
|
||||
|
||||
|
||||
def _clean_dc(dcname):
|
||||
# Consul DCs are strictly alphanumeric with underscores and hyphens -
|
||||
# ensure that the consul_dc attribute meets these requirements.
|
||||
return re.sub('[^\w_\-]', '-', dcname)
|
||||
|
||||
|
||||
def iterhosts(resources):
|
||||
'''yield host tuples of (name, attributes, groups)'''
|
||||
for module_name, key, resource in resources:
|
||||
resource_type, name = key.split('.', 1)
|
||||
try:
|
||||
parser = PARSERS[resource_type]
|
||||
except KeyError:
|
||||
continue
|
||||
|
||||
yield parser(resource, module_name)
|
||||
|
||||
|
||||
def parses(prefix):
|
||||
def inner(func):
|
||||
PARSERS[prefix] = func
|
||||
return func
|
||||
|
||||
return inner
|
||||
|
||||
|
||||
def calculate_mantl_vars(func):
|
||||
"""calculate Mantl vars"""
|
||||
|
||||
@wraps(func)
|
||||
def inner(*args, **kwargs):
|
||||
name, attrs, groups = func(*args, **kwargs)
|
||||
|
||||
# attrs
|
||||
if attrs.get('role', '') == 'control':
|
||||
attrs['consul_is_server'] = True
|
||||
else:
|
||||
attrs['consul_is_server'] = False
|
||||
|
||||
# groups
|
||||
if attrs.get('publicly_routable', False):
|
||||
groups.append('publicly_routable')
|
||||
|
||||
return name, attrs, groups
|
||||
|
||||
return inner
|
||||
|
||||
|
||||
def _parse_prefix(source, prefix, sep='.'):
|
||||
for compkey, value in source.items():
|
||||
try:
|
||||
curprefix, rest = compkey.split(sep, 1)
|
||||
except ValueError:
|
||||
continue
|
||||
|
||||
if curprefix != prefix or rest == '#':
|
||||
continue
|
||||
|
||||
yield rest, value
|
||||
|
||||
|
||||
def parse_attr_list(source, prefix, sep='.'):
|
||||
attrs = defaultdict(dict)
|
||||
for compkey, value in _parse_prefix(source, prefix, sep):
|
||||
idx, key = compkey.split(sep, 1)
|
||||
attrs[idx][key] = value
|
||||
|
||||
return attrs.values()
|
||||
|
||||
|
||||
def parse_dict(source, prefix, sep='.'):
|
||||
return dict(_parse_prefix(source, prefix, sep))
|
||||
|
||||
|
||||
def parse_list(source, prefix, sep='.'):
|
||||
return [value for _, value in _parse_prefix(source, prefix, sep)]
|
||||
|
||||
|
||||
def parse_bool(string_form):
|
||||
token = string_form.lower()[0]
|
||||
|
||||
if token == 't':
|
||||
return True
|
||||
elif token == 'f':
|
||||
return False
|
||||
else:
|
||||
raise ValueError('could not convert %r to a bool' % string_form)
|
||||
|
||||
|
||||
@parses('triton_machine')
|
||||
@calculate_mantl_vars
|
||||
def triton_machine(resource, module_name):
|
||||
raw_attrs = resource['primary']['attributes']
|
||||
name = raw_attrs.get('name')
|
||||
groups = []
|
||||
|
||||
attrs = {
|
||||
'id': raw_attrs['id'],
|
||||
'dataset': raw_attrs['dataset'],
|
||||
'disk': raw_attrs['disk'],
|
||||
'firewall_enabled': parse_bool(raw_attrs['firewall_enabled']),
|
||||
'image': raw_attrs['image'],
|
||||
'ips': parse_list(raw_attrs, 'ips'),
|
||||
'memory': raw_attrs['memory'],
|
||||
'name': raw_attrs['name'],
|
||||
'networks': parse_list(raw_attrs, 'networks'),
|
||||
'package': raw_attrs['package'],
|
||||
'primary_ip': raw_attrs['primaryip'],
|
||||
'root_authorized_keys': raw_attrs['root_authorized_keys'],
|
||||
'state': raw_attrs['state'],
|
||||
'tags': parse_dict(raw_attrs, 'tags'),
|
||||
'type': raw_attrs['type'],
|
||||
'user_data': raw_attrs['user_data'],
|
||||
'user_script': raw_attrs['user_script'],
|
||||
|
||||
# ansible
|
||||
'ansible_ssh_host': raw_attrs['primaryip'],
|
||||
'ansible_ssh_port': 22,
|
||||
'ansible_ssh_user': 'root', # it's "root" on Triton by default
|
||||
|
||||
# generic
|
||||
'public_ipv4': raw_attrs['primaryip'],
|
||||
'provider': 'triton',
|
||||
}
|
||||
|
||||
# private IPv4
|
||||
for ip in attrs['ips']:
|
||||
if ip.startswith('10') or ip.startswith('192.168'): # private IPs
|
||||
attrs['private_ipv4'] = ip
|
||||
break
|
||||
|
||||
if 'private_ipv4' not in attrs:
|
||||
attrs['private_ipv4'] = attrs['public_ipv4']
|
||||
|
||||
# attrs specific to Mantl
|
||||
attrs.update({
|
||||
'consul_dc': _clean_dc(attrs['tags'].get('dc', 'none')),
|
||||
'role': attrs['tags'].get('role', 'none'),
|
||||
'ansible_python_interpreter': attrs['tags'].get('python_bin', 'python')
|
||||
})
|
||||
|
||||
# add groups based on attrs
|
||||
groups.append('triton_image=' + attrs['image'])
|
||||
groups.append('triton_package=' + attrs['package'])
|
||||
groups.append('triton_state=' + attrs['state'])
|
||||
groups.append('triton_firewall_enabled=%s' % attrs['firewall_enabled'])
|
||||
groups.extend('triton_tags_%s=%s' % item
|
||||
for item in attrs['tags'].items())
|
||||
groups.extend('triton_network=' + network
|
||||
for network in attrs['networks'])
|
||||
|
||||
# groups specific to Mantl
|
||||
groups.append('role=' + attrs['role'])
|
||||
groups.append('dc=' + attrs['consul_dc'])
|
||||
|
||||
return name, attrs, groups
|
||||
|
||||
|
||||
@parses('digitalocean_droplet')
|
||||
@calculate_mantl_vars
|
||||
def digitalocean_host(resource, tfvars=None):
|
||||
raw_attrs = resource['primary']['attributes']
|
||||
name = raw_attrs['name']
|
||||
groups = []
|
||||
|
||||
attrs = {
|
||||
'id': raw_attrs['id'],
|
||||
'image': raw_attrs['image'],
|
||||
'ipv4_address': raw_attrs['ipv4_address'],
|
||||
'locked': parse_bool(raw_attrs['locked']),
|
||||
'metadata': json.loads(raw_attrs.get('user_data', '{}')),
|
||||
'region': raw_attrs['region'],
|
||||
'size': raw_attrs['size'],
|
||||
'ssh_keys': parse_list(raw_attrs, 'ssh_keys'),
|
||||
'status': raw_attrs['status'],
|
||||
# ansible
|
||||
'ansible_ssh_host': raw_attrs['ipv4_address'],
|
||||
'ansible_ssh_port': 22,
|
||||
'ansible_ssh_user': 'root', # it's always "root" on DO
|
||||
# generic
|
||||
'public_ipv4': raw_attrs['ipv4_address'],
|
||||
'private_ipv4': raw_attrs.get('ipv4_address_private',
|
||||
raw_attrs['ipv4_address']),
|
||||
'provider': 'digitalocean',
|
||||
}
|
||||
|
||||
# attrs specific to Mantl
|
||||
attrs.update({
|
||||
'consul_dc': _clean_dc(attrs['metadata'].get('dc', attrs['region'])),
|
||||
'role': attrs['metadata'].get('role', 'none'),
|
||||
'ansible_python_interpreter': attrs['metadata'].get('python_bin','python')
|
||||
})
|
||||
|
||||
# add groups based on attrs
|
||||
groups.append('do_image=' + attrs['image'])
|
||||
groups.append('do_locked=%s' % attrs['locked'])
|
||||
groups.append('do_region=' + attrs['region'])
|
||||
groups.append('do_size=' + attrs['size'])
|
||||
groups.append('do_status=' + attrs['status'])
|
||||
groups.extend('do_metadata_%s=%s' % item
|
||||
for item in attrs['metadata'].items())
|
||||
|
||||
# groups specific to Mantl
|
||||
groups.append('role=' + attrs['role'])
|
||||
groups.append('dc=' + attrs['consul_dc'])
|
||||
|
||||
return name, attrs, groups
|
||||
|
||||
|
||||
@parses('softlayer_virtualserver')
|
||||
@calculate_mantl_vars
|
||||
def softlayer_host(resource, module_name):
|
||||
raw_attrs = resource['primary']['attributes']
|
||||
name = raw_attrs['name']
|
||||
groups = []
|
||||
|
||||
attrs = {
|
||||
'id': raw_attrs['id'],
|
||||
'image': raw_attrs['image'],
|
||||
'ipv4_address': raw_attrs['ipv4_address'],
|
||||
'metadata': json.loads(raw_attrs.get('user_data', '{}')),
|
||||
'region': raw_attrs['region'],
|
||||
'ram': raw_attrs['ram'],
|
||||
'cpu': raw_attrs['cpu'],
|
||||
'ssh_keys': parse_list(raw_attrs, 'ssh_keys'),
|
||||
'public_ipv4': raw_attrs['ipv4_address'],
|
||||
'private_ipv4': raw_attrs['ipv4_address_private'],
|
||||
'ansible_ssh_host': raw_attrs['ipv4_address'],
|
||||
'ansible_ssh_port': 22,
|
||||
'ansible_ssh_user': 'root',
|
||||
'provider': 'softlayer',
|
||||
}
|
||||
|
||||
# attrs specific to Mantl
|
||||
attrs.update({
|
||||
'consul_dc': _clean_dc(attrs['metadata'].get('dc', attrs['region'])),
|
||||
'role': attrs['metadata'].get('role', 'none'),
|
||||
'ansible_python_interpreter': attrs['metadata'].get('python_bin','python')
|
||||
})
|
||||
|
||||
# groups specific to Mantl
|
||||
groups.append('role=' + attrs['role'])
|
||||
groups.append('dc=' + attrs['consul_dc'])
|
||||
|
||||
return name, attrs, groups
|
||||
|
||||
|
||||
@parses('openstack_compute_instance_v2')
|
||||
@calculate_mantl_vars
|
||||
def openstack_host(resource, module_name):
|
||||
raw_attrs = resource['primary']['attributes']
|
||||
name = raw_attrs['name']
|
||||
groups = []
|
||||
|
||||
attrs = {
|
||||
'access_ip_v4': raw_attrs['access_ip_v4'],
|
||||
'access_ip_v6': raw_attrs['access_ip_v6'],
|
||||
'flavor': parse_dict(raw_attrs, 'flavor',
|
||||
sep='_'),
|
||||
'id': raw_attrs['id'],
|
||||
'image': parse_dict(raw_attrs, 'image',
|
||||
sep='_'),
|
||||
'key_pair': raw_attrs['key_pair'],
|
||||
'metadata': parse_dict(raw_attrs, 'metadata'),
|
||||
'network': parse_attr_list(raw_attrs, 'network'),
|
||||
'region': raw_attrs.get('region', ''),
|
||||
'security_groups': parse_list(raw_attrs, 'security_groups'),
|
||||
# ansible
|
||||
'ansible_ssh_port': 22,
|
||||
# workaround for an OpenStack bug where hosts have a different domain
|
||||
# after they're restarted
|
||||
'host_domain': 'novalocal',
|
||||
'use_host_domain': True,
|
||||
# generic
|
||||
'public_ipv4': raw_attrs['access_ip_v4'],
|
||||
'private_ipv4': raw_attrs['access_ip_v4'],
|
||||
'provider': 'openstack',
|
||||
}
|
||||
|
||||
if 'floating_ip' in raw_attrs:
|
||||
attrs['private_ipv4'] = raw_attrs['network.0.fixed_ip_v4']
|
||||
|
||||
try:
|
||||
attrs.update({
|
||||
'ansible_ssh_host': raw_attrs['access_ip_v4'],
|
||||
'publicly_routable': True,
|
||||
})
|
||||
except (KeyError, ValueError):
|
||||
attrs.update({'ansible_ssh_host': '', 'publicly_routable': False})
|
||||
|
||||
# attrs specific to Ansible
|
||||
if 'metadata.ssh_user' in raw_attrs:
|
||||
attrs['ansible_ssh_user'] = raw_attrs['metadata.ssh_user']
|
||||
|
||||
# attrs specific to Mantl
|
||||
attrs.update({
|
||||
'consul_dc': _clean_dc(attrs['metadata'].get('dc', module_name)),
|
||||
'role': attrs['metadata'].get('role', 'none'),
|
||||
'ansible_python_interpreter': attrs['metadata'].get('python_bin','python')
|
||||
})
|
||||
|
||||
# add groups based on attrs
|
||||
groups.append('os_image=' + attrs['image']['name'])
|
||||
groups.append('os_flavor=' + attrs['flavor']['name'])
|
||||
groups.extend('os_metadata_%s=%s' % item
|
||||
for item in attrs['metadata'].items())
|
||||
groups.append('os_region=' + attrs['region'])
|
||||
|
||||
# groups specific to Mantl
|
||||
groups.append('role=' + attrs['metadata'].get('role', 'none'))
|
||||
groups.append('dc=' + attrs['consul_dc'])
|
||||
|
||||
# groups specific to kubespray
|
||||
for group in attrs['metadata'].get('kubespray_groups', "").split(","):
|
||||
groups.append(group)
|
||||
|
||||
return name, attrs, groups
|
||||
|
||||
|
||||
@parses('aws_instance')
|
||||
@calculate_mantl_vars
|
||||
def aws_host(resource, module_name):
|
||||
name = resource['primary']['attributes']['tags.Name']
|
||||
raw_attrs = resource['primary']['attributes']
|
||||
|
||||
groups = []
|
||||
|
||||
attrs = {
|
||||
'ami': raw_attrs['ami'],
|
||||
'availability_zone': raw_attrs['availability_zone'],
|
||||
'ebs_block_device': parse_attr_list(raw_attrs, 'ebs_block_device'),
|
||||
'ebs_optimized': parse_bool(raw_attrs['ebs_optimized']),
|
||||
'ephemeral_block_device': parse_attr_list(raw_attrs,
|
||||
'ephemeral_block_device'),
|
||||
'id': raw_attrs['id'],
|
||||
'key_name': raw_attrs['key_name'],
|
||||
'private': parse_dict(raw_attrs, 'private',
|
||||
sep='_'),
|
||||
'public': parse_dict(raw_attrs, 'public',
|
||||
sep='_'),
|
||||
'root_block_device': parse_attr_list(raw_attrs, 'root_block_device'),
|
||||
'security_groups': parse_list(raw_attrs, 'security_groups'),
|
||||
'subnet': parse_dict(raw_attrs, 'subnet',
|
||||
sep='_'),
|
||||
'tags': parse_dict(raw_attrs, 'tags'),
|
||||
'tenancy': raw_attrs['tenancy'],
|
||||
'vpc_security_group_ids': parse_list(raw_attrs,
|
||||
'vpc_security_group_ids'),
|
||||
# ansible-specific
|
||||
'ansible_ssh_port': 22,
|
||||
'ansible_ssh_host': raw_attrs['public_ip'],
|
||||
# generic
|
||||
'public_ipv4': raw_attrs['public_ip'],
|
||||
'private_ipv4': raw_attrs['private_ip'],
|
||||
'provider': 'aws',
|
||||
}
|
||||
|
||||
# attrs specific to Ansible
|
||||
if 'tags.sshUser' in raw_attrs:
|
||||
attrs['ansible_ssh_user'] = raw_attrs['tags.sshUser']
|
||||
if 'tags.sshPrivateIp' in raw_attrs:
|
||||
attrs['ansible_ssh_host'] = raw_attrs['private_ip']
|
||||
|
||||
# attrs specific to Mantl
|
||||
attrs.update({
|
||||
'consul_dc': _clean_dc(attrs['tags'].get('dc', module_name)),
|
||||
'role': attrs['tags'].get('role', 'none'),
|
||||
'ansible_python_interpreter': attrs['tags'].get('python_bin','python')
|
||||
})
|
||||
|
||||
# groups specific to Mantl
|
||||
groups.extend(['aws_ami=' + attrs['ami'],
|
||||
'aws_az=' + attrs['availability_zone'],
|
||||
'aws_key_name=' + attrs['key_name'],
|
||||
'aws_tenancy=' + attrs['tenancy']])
|
||||
groups.extend('aws_tag_%s=%s' % item for item in attrs['tags'].items())
|
||||
groups.extend('aws_vpc_security_group=' + group
|
||||
for group in attrs['vpc_security_group_ids'])
|
||||
groups.extend('aws_subnet_%s=%s' % subnet
|
||||
for subnet in attrs['subnet'].items())
|
||||
|
||||
# groups specific to Mantl
|
||||
groups.append('role=' + attrs['role'])
|
||||
groups.append('dc=' + attrs['consul_dc'])
|
||||
|
||||
return name, attrs, groups
|
||||
|
||||
|
||||
@parses('google_compute_instance')
|
||||
@calculate_mantl_vars
|
||||
def gce_host(resource, module_name):
|
||||
name = resource['primary']['id']
|
||||
raw_attrs = resource['primary']['attributes']
|
||||
groups = []
|
||||
|
||||
# network interfaces
|
||||
interfaces = parse_attr_list(raw_attrs, 'network_interface')
|
||||
for interface in interfaces:
|
||||
interface['access_config'] = parse_attr_list(interface,
|
||||
'access_config')
|
||||
for key in interface.keys():
|
||||
if '.' in key:
|
||||
del interface[key]
|
||||
|
||||
# general attrs
|
||||
attrs = {
|
||||
'can_ip_forward': raw_attrs['can_ip_forward'] == 'true',
|
||||
'disks': parse_attr_list(raw_attrs, 'disk'),
|
||||
'machine_type': raw_attrs['machine_type'],
|
||||
'metadata': parse_dict(raw_attrs, 'metadata'),
|
||||
'network': parse_attr_list(raw_attrs, 'network'),
|
||||
'network_interface': interfaces,
|
||||
'self_link': raw_attrs['self_link'],
|
||||
'service_account': parse_attr_list(raw_attrs, 'service_account'),
|
||||
'tags': parse_list(raw_attrs, 'tags'),
|
||||
'zone': raw_attrs['zone'],
|
||||
# ansible
|
||||
'ansible_ssh_port': 22,
|
||||
'provider': 'gce',
|
||||
}
|
||||
|
||||
# attrs specific to Ansible
|
||||
if 'metadata.ssh_user' in raw_attrs:
|
||||
attrs['ansible_ssh_user'] = raw_attrs['metadata.ssh_user']
|
||||
|
||||
# attrs specific to Mantl
|
||||
attrs.update({
|
||||
'consul_dc': _clean_dc(attrs['metadata'].get('dc', module_name)),
|
||||
'role': attrs['metadata'].get('role', 'none'),
|
||||
'ansible_python_interpreter': attrs['metadata'].get('python_bin','python')
|
||||
})
|
||||
|
||||
try:
|
||||
attrs.update({
|
||||
'ansible_ssh_host': interfaces[0]['access_config'][0]['nat_ip'] or interfaces[0]['access_config'][0]['assigned_nat_ip'],
|
||||
'public_ipv4': interfaces[0]['access_config'][0]['nat_ip'] or interfaces[0]['access_config'][0]['assigned_nat_ip'],
|
||||
'private_ipv4': interfaces[0]['address'],
|
||||
'publicly_routable': True,
|
||||
})
|
||||
except (KeyError, ValueError):
|
||||
attrs.update({'ansible_ssh_host': '', 'publicly_routable': False})
|
||||
|
||||
# add groups based on attrs
|
||||
groups.extend('gce_image=' + disk['image'] for disk in attrs['disks'])
|
||||
groups.append('gce_machine_type=' + attrs['machine_type'])
|
||||
groups.extend('gce_metadata_%s=%s' % (key, value)
|
||||
for (key, value) in attrs['metadata'].items()
|
||||
if key not in set(['sshKeys']))
|
||||
groups.extend('gce_tag=' + tag for tag in attrs['tags'])
|
||||
groups.append('gce_zone=' + attrs['zone'])
|
||||
|
||||
if attrs['can_ip_forward']:
|
||||
groups.append('gce_ip_forward')
|
||||
if attrs['publicly_routable']:
|
||||
groups.append('gce_publicly_routable')
|
||||
|
||||
# groups specific to Mantl
|
||||
groups.append('role=' + attrs['metadata'].get('role', 'none'))
|
||||
groups.append('dc=' + attrs['consul_dc'])
|
||||
|
||||
return name, attrs, groups
|
||||
|
||||
|
||||
@parses('vsphere_virtual_machine')
|
||||
@calculate_mantl_vars
|
||||
def vsphere_host(resource, module_name):
|
||||
raw_attrs = resource['primary']['attributes']
|
||||
network_attrs = parse_dict(raw_attrs, 'network_interface')
|
||||
network = parse_dict(network_attrs, '0')
|
||||
ip_address = network.get('ipv4_address', network['ip_address'])
|
||||
name = raw_attrs['name']
|
||||
groups = []
|
||||
|
||||
attrs = {
|
||||
'id': raw_attrs['id'],
|
||||
'ip_address': ip_address,
|
||||
'private_ipv4': ip_address,
|
||||
'public_ipv4': ip_address,
|
||||
'metadata': parse_dict(raw_attrs, 'custom_configuration_parameters'),
|
||||
'ansible_ssh_port': 22,
|
||||
'provider': 'vsphere',
|
||||
}
|
||||
|
||||
try:
|
||||
attrs.update({
|
||||
'ansible_ssh_host': ip_address,
|
||||
})
|
||||
except (KeyError, ValueError):
|
||||
attrs.update({'ansible_ssh_host': '', })
|
||||
|
||||
attrs.update({
|
||||
'consul_dc': _clean_dc(attrs['metadata'].get('consul_dc', module_name)),
|
||||
'role': attrs['metadata'].get('role', 'none'),
|
||||
'ansible_python_interpreter': attrs['metadata'].get('python_bin','python')
|
||||
})
|
||||
|
||||
# attrs specific to Ansible
|
||||
if 'ssh_user' in attrs['metadata']:
|
||||
attrs['ansible_ssh_user'] = attrs['metadata']['ssh_user']
|
||||
|
||||
groups.append('role=' + attrs['role'])
|
||||
groups.append('dc=' + attrs['consul_dc'])
|
||||
|
||||
return name, attrs, groups
|
||||
|
||||
@parses('azure_instance')
|
||||
@calculate_mantl_vars
|
||||
def azure_host(resource, module_name):
|
||||
name = resource['primary']['attributes']['name']
|
||||
raw_attrs = resource['primary']['attributes']
|
||||
|
||||
groups = []
|
||||
|
||||
attrs = {
|
||||
'automatic_updates': raw_attrs['automatic_updates'],
|
||||
'description': raw_attrs['description'],
|
||||
'hosted_service_name': raw_attrs['hosted_service_name'],
|
||||
'id': raw_attrs['id'],
|
||||
'image': raw_attrs['image'],
|
||||
'ip_address': raw_attrs['ip_address'],
|
||||
'location': raw_attrs['location'],
|
||||
'name': raw_attrs['name'],
|
||||
'reverse_dns': raw_attrs['reverse_dns'],
|
||||
'security_group': raw_attrs['security_group'],
|
||||
'size': raw_attrs['size'],
|
||||
'ssh_key_thumbprint': raw_attrs['ssh_key_thumbprint'],
|
||||
'subnet': raw_attrs['subnet'],
|
||||
'username': raw_attrs['username'],
|
||||
'vip_address': raw_attrs['vip_address'],
|
||||
'virtual_network': raw_attrs['virtual_network'],
|
||||
'endpoint': parse_attr_list(raw_attrs, 'endpoint'),
|
||||
# ansible
|
||||
'ansible_ssh_port': 22,
|
||||
'ansible_ssh_user': raw_attrs['username'],
|
||||
'ansible_ssh_host': raw_attrs['vip_address'],
|
||||
}
|
||||
|
||||
# attrs specific to mantl
|
||||
attrs.update({
|
||||
'consul_dc': attrs['location'].lower().replace(" ", "-"),
|
||||
'role': attrs['description']
|
||||
})
|
||||
|
||||
# groups specific to mantl
|
||||
groups.extend(['azure_image=' + attrs['image'],
|
||||
'azure_location=' + attrs['location'].lower().replace(" ", "-"),
|
||||
'azure_username=' + attrs['username'],
|
||||
'azure_security_group=' + attrs['security_group']])
|
||||
|
||||
# groups specific to mantl
|
||||
groups.append('role=' + attrs['role'])
|
||||
groups.append('dc=' + attrs['consul_dc'])
|
||||
|
||||
return name, attrs, groups
|
||||
|
||||
|
||||
@parses('clc_server')
|
||||
@calculate_mantl_vars
|
||||
def clc_server(resource, module_name):
|
||||
raw_attrs = resource['primary']['attributes']
|
||||
name = raw_attrs.get('id')
|
||||
groups = []
|
||||
md = parse_dict(raw_attrs, 'metadata')
|
||||
attrs = {
|
||||
'metadata': md,
|
||||
'ansible_ssh_port': md.get('ssh_port', 22),
|
||||
'ansible_ssh_user': md.get('ssh_user', 'root'),
|
||||
'provider': 'clc',
|
||||
'publicly_routable': False,
|
||||
}
|
||||
|
||||
try:
|
||||
attrs.update({
|
||||
'public_ipv4': raw_attrs['public_ip_address'],
|
||||
'private_ipv4': raw_attrs['private_ip_address'],
|
||||
'ansible_ssh_host': raw_attrs['public_ip_address'],
|
||||
'publicly_routable': True,
|
||||
})
|
||||
except (KeyError, ValueError):
|
||||
attrs.update({
|
||||
'ansible_ssh_host': raw_attrs['private_ip_address'],
|
||||
'private_ipv4': raw_attrs['private_ip_address'],
|
||||
})
|
||||
|
||||
attrs.update({
|
||||
'consul_dc': _clean_dc(attrs['metadata'].get('dc', module_name)),
|
||||
'role': attrs['metadata'].get('role', 'none'),
|
||||
})
|
||||
|
||||
groups.append('role=' + attrs['role'])
|
||||
groups.append('dc=' + attrs['consul_dc'])
|
||||
return name, attrs, groups
|
||||
|
||||
|
||||
|
||||
## QUERY TYPES
|
||||
def query_host(hosts, target):
|
||||
for name, attrs, _ in hosts:
|
||||
if name == target:
|
||||
return attrs
|
||||
|
||||
return {}
|
||||
|
||||
|
||||
def query_list(hosts):
|
||||
groups = defaultdict(dict)
|
||||
meta = {}
|
||||
|
||||
for name, attrs, hostgroups in hosts:
|
||||
for group in set(hostgroups):
|
||||
groups[group].setdefault('hosts', [])
|
||||
groups[group]['hosts'].append(name)
|
||||
|
||||
meta[name] = attrs
|
||||
|
||||
groups['_meta'] = {'hostvars': meta}
|
||||
return groups
|
||||
|
||||
|
||||
def query_hostfile(hosts):
|
||||
out = ['## begin hosts generated by terraform.py ##']
|
||||
out.extend(
|
||||
'{}\t{}'.format(attrs['ansible_ssh_host'].ljust(16), name)
|
||||
for name, attrs, _ in hosts
|
||||
)
|
||||
|
||||
out.append('## end hosts generated by terraform.py ##')
|
||||
return '\n'.join(out)
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
__file__, __doc__,
|
||||
formatter_class=argparse.ArgumentDefaultsHelpFormatter, )
|
||||
modes = parser.add_mutually_exclusive_group(required=True)
|
||||
modes.add_argument('--list',
|
||||
action='store_true',
|
||||
help='list all variables')
|
||||
modes.add_argument('--host', help='list variables for a single host')
|
||||
modes.add_argument('--version',
|
||||
action='store_true',
|
||||
help='print version and exit')
|
||||
modes.add_argument('--hostfile',
|
||||
action='store_true',
|
||||
help='print hosts as a /etc/hosts snippet')
|
||||
parser.add_argument('--pretty',
|
||||
action='store_true',
|
||||
help='pretty-print output JSON')
|
||||
parser.add_argument('--nometa',
|
||||
action='store_true',
|
||||
help='with --list, exclude hostvars')
|
||||
default_root = os.environ.get('TERRAFORM_STATE_ROOT',
|
||||
os.path.abspath(os.path.join(os.path.dirname(__file__),
|
||||
'..', '..', )))
|
||||
parser.add_argument('--root',
|
||||
default=default_root,
|
||||
help='custom root to search for `.tfstate`s in')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.version:
|
||||
print('%s %s' % (__file__, VERSION))
|
||||
parser.exit()
|
||||
|
||||
hosts = iterhosts(iterresources(tfstates(args.root)))
|
||||
if args.list:
|
||||
output = query_list(hosts)
|
||||
if args.nometa:
|
||||
del output['_meta']
|
||||
print(json.dumps(output, indent=4 if args.pretty else None))
|
||||
elif args.host:
|
||||
output = query_host(hosts, args.host)
|
||||
print(json.dumps(output, indent=4 if args.pretty else None))
|
||||
elif args.hostfile:
|
||||
output = query_hostfile(hosts)
|
||||
print(output)
|
||||
|
||||
parser.exit()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
50
docs/ansible.md
Normal file
50
docs/ansible.md
Normal file
@ -0,0 +1,50 @@
|
||||
Ansible variables
|
||||
===============
|
||||
|
||||
|
||||
Inventory
|
||||
-------------
|
||||
The inventory is composed of 3 groups:
|
||||
|
||||
* **kube-node** : list of kubernetes nodes where the pods will run.
|
||||
* **kube-master** : list of servers where kubernetes master components (apiserver, scheduler, controller) will run.
|
||||
Note: if you want the server to act both as master and node the server must be defined on both groups _kube-master_ and _kube-node_
|
||||
* **etcd**: list of server to compose the etcd server. you should have at least 3 servers for failover purposes.
|
||||
|
||||
Below is a complete inventory example:
|
||||
|
||||
```
|
||||
## Configure 'ip' variable to bind kubernetes services on a
|
||||
## different ip than the default iface
|
||||
node1 ansible_ssh_host=95.54.0.12 # ip=10.3.0.1
|
||||
node2 ansible_ssh_host=95.54.0.13 # ip=10.3.0.2
|
||||
node3 ansible_ssh_host=95.54.0.14 # ip=10.3.0.3
|
||||
node4 ansible_ssh_host=95.54.0.15 # ip=10.3.0.4
|
||||
node5 ansible_ssh_host=95.54.0.16 # ip=10.3.0.5
|
||||
node6 ansible_ssh_host=95.54.0.17 # ip=10.3.0.6
|
||||
|
||||
[kube-master]
|
||||
node1
|
||||
node2
|
||||
|
||||
[etcd]
|
||||
node1
|
||||
node2
|
||||
node3
|
||||
|
||||
[kube-node]
|
||||
node2
|
||||
node3
|
||||
node4
|
||||
node5
|
||||
node6
|
||||
|
||||
[k8s-cluster:children]
|
||||
kube-node
|
||||
kube-master
|
||||
etcd
|
||||
```
|
||||
|
||||
Group vars
|
||||
--------------
|
||||
The main variables to change are located in the directory ```inventory/group_vars/all.yml```.
|
10
docs/aws.md
Normal file
10
docs/aws.md
Normal file
@ -0,0 +1,10 @@
|
||||
AWS
|
||||
===============
|
||||
|
||||
To deploy kubespray on [AWS](https://aws.amazon.com/) uncomment the `cloud_provider` option in `group_vars/all.yml` and set it to `'aws'`.
|
||||
|
||||
Prior to creating your instances, you **must** ensure that you have created IAM roles and policies for both "kubernetes-master" and "kubernetes-node". You can find the IAM policies [here](https://github.com/kubernetes/kubernetes/tree/master/cluster/aws/templates/iam). See the [IAM Documentation](https://aws.amazon.com/documentation/iam/) if guidance is needed on how to set these up. When you bring your instances online, associate them with the respective IAM role. Nodes that are only to be used for Etcd do not need a role.
|
||||
|
||||
The next step is to make sure the hostnames in your `inventory` file are identical to your internal hostnames in AWS. This may look something like `ip-111-222-333-444.us-west-2.compute.internal`. You can then specify how Ansible connects to these instances with `ansible_ssh_host` and `ansible_ssh_user`.
|
||||
|
||||
You can now create your cluster!
|
39
docs/calico.md
Normal file
39
docs/calico.md
Normal file
@ -0,0 +1,39 @@
|
||||
Calico
|
||||
===========
|
||||
|
||||
Check if the calico-node container is running
|
||||
|
||||
```
|
||||
docker ps | grep calico
|
||||
```
|
||||
|
||||
The **calicoctl** command allows to check the status of the network workloads.
|
||||
* Check the status of Calico nodes
|
||||
|
||||
```
|
||||
calicoctl status
|
||||
```
|
||||
|
||||
* Show the configured network subnet for containers
|
||||
|
||||
```
|
||||
calicoctl pool show
|
||||
```
|
||||
|
||||
* Show the workloads (ip addresses of containers and their located)
|
||||
|
||||
```
|
||||
calicoctl endpoint show --detail
|
||||
```
|
||||
|
||||
##### Optionnal : BGP Peering with border routers
|
||||
|
||||
In some cases you may want to route the pods subnet and so NAT is not needed on the nodes.
|
||||
For instance if you have a cluster spread on different locations and you want your pods to talk each other no matter where they are located.
|
||||
The following variables need to be set:
|
||||
`peer_with_router` to enable the peering with the datacenter's border router (default value: false).
|
||||
you'll need to edit the inventory and add a and a hostvar `local_as` by node.
|
||||
|
||||
```
|
||||
node1 ansible_ssh_host=95.54.0.12 local_as=xxxxxx
|
||||
```
|
22
docs/cloud.md
Normal file
22
docs/cloud.md
Normal file
@ -0,0 +1,22 @@
|
||||
Cloud providers
|
||||
==============
|
||||
|
||||
#### Provisioning
|
||||
|
||||
You can use kargo-cli to start new instances on cloud providers
|
||||
here's an example
|
||||
```
|
||||
kargo [aws|gce] --nodes 2 --etcd 3 --cluster-name test-smana
|
||||
```
|
||||
|
||||
#### Deploy kubernetes
|
||||
|
||||
With kargo-cli
|
||||
```
|
||||
kargo deploy [--aws|--gce] -u admin
|
||||
```
|
||||
|
||||
Or ansible-playbook command
|
||||
```
|
||||
ansible-playbook -u smana -e ansible_ssh_user=admin -e cloud_provider=[aws|gce] -b --become-user=root -i inventory/single.cfg cluster.yml
|
||||
```
|
24
docs/coreos.md
Normal file
24
docs/coreos.md
Normal file
@ -0,0 +1,24 @@
|
||||
CoreOS bootstrap
|
||||
===============
|
||||
|
||||
Example with **kargo-cli**:
|
||||
|
||||
```
|
||||
kargo deploy --gce --coreos
|
||||
```
|
||||
|
||||
Or with Ansible:
|
||||
|
||||
Before running the cluster playbook you must satisfy the following requirements:
|
||||
|
||||
* On each CoreOS nodes a writable directory **/opt/bin** (~400M disk space)
|
||||
|
||||
* Uncomment the variable **ansible\_python\_interpreter** in the file `inventory/group_vars/all.yml`
|
||||
|
||||
* run the Python bootstrap playbook
|
||||
|
||||
```
|
||||
ansible-playbook -u smana -e ansible_ssh_user=smana -b --become-user=root -i inventory/inventory.cfg coreos-bootstrap.yml
|
||||
```
|
||||
|
||||
Then you can proceed to [cluster deployment](#run-deployment)
|
92
docs/dns-stack.md
Normal file
92
docs/dns-stack.md
Normal file
@ -0,0 +1,92 @@
|
||||
K8s DNS stack by Kargo
|
||||
======================
|
||||
|
||||
Kargo configures a [Kubernetes DNS](http://kubernetes.io/docs/admin/dns/)
|
||||
[cluster add-on](http://releases.k8s.io/master/cluster/addons/README.md)
|
||||
to serve as an authoritative DNS server for a given ``dns_domain`` and its
|
||||
``svc, default.svc`` default subdomains (a total of ``ndots: 5`` max levels).
|
||||
|
||||
Note, additional search (sub)domains may be defined in the ``searchdomains``
|
||||
and ``ndots`` vars. And additional recursive DNS resolvers in the `` upstream_dns_servers``,
|
||||
``nameservers`` vars. Intranet DNS resolvers should be specified in the first
|
||||
place, followed by external resolvers, for example:
|
||||
|
||||
```
|
||||
skip_dnsmasq: true
|
||||
nameservers: [8.8.8.8]
|
||||
upstream_dns_servers: [172.18.32.6]
|
||||
```
|
||||
or
|
||||
```
|
||||
skip_dnsmasq: false
|
||||
upstream_dns_servers: [172.18.32.6, 172.18.32.7, 8.8.8.8, 8.8.8.4]
|
||||
```
|
||||
The vars are explained below as well.
|
||||
|
||||
DNS configuration details
|
||||
-------------------------
|
||||
|
||||
Here is an approximate picture of how DNS things working and
|
||||
being configured by Kargo ansible playbooks:
|
||||
|
||||

|
||||
|
||||
Note that an additional dnsmasq daemon set is installed by Kargo
|
||||
by default. Kubelet will configure DNS base of all pods to use the
|
||||
given dnsmasq cluster IP, which is defined via the ``dns_server`` var.
|
||||
The dnsmasq forwards requests for a given cluster ``dns_domain`` to
|
||||
Kubedns's SkyDns service. The SkyDns server is configured to be an
|
||||
authoritative DNS server for the given cluser domain (and its subdomains
|
||||
up to ``ndots:5`` depth). Note: you should scale its replication controller
|
||||
up, if SkyDns chokes. These two layered DNS forwarders provide HA for the
|
||||
DNS cluster IP endpoint, which is a critical moving part for Kubernetes apps.
|
||||
|
||||
Nameservers are as well configured in the hosts' ``/etc/resolv.conf`` files,
|
||||
as the given DNS cluster IP merged with ``nameservers`` values. While the
|
||||
DNS cluster IP merged with the ``upstream_dns_servers`` defines additional
|
||||
nameservers for the aforementioned nsmasq daemon set running on all hosts.
|
||||
This mitigates existing Linux limitation of max 3 nameservers in the
|
||||
``/etc/resolv.conf`` and also brings an additional caching layer for the
|
||||
clustered DNS services.
|
||||
|
||||
You can skip the dnsmasq daemon set install steps by setting the
|
||||
``skip_dnsmasq: true``. This may be the case, if you're fine with
|
||||
the nameservers limitation. Sadly, there is no way to work around the
|
||||
search domain limitations of a 256 chars and 6 domains. Thus, you can
|
||||
use the ``searchdomains`` var to define no more than a three custom domains.
|
||||
Remaining three slots are reserved for K8s cluster default subdomains.
|
||||
|
||||
When dnsmasq skipped, Kargo redefines the DNS cluster IP to point directly
|
||||
to SkyDns cluster IP ``skydns_server`` and configures Kubelet's
|
||||
``--dns_cluster`` to use that IP as well. While this greatly simplifies
|
||||
things, it comes by the price of limited nameservers though. As you know now,
|
||||
the DNS cluster IP takes a slot in the ``/etc/resolv.conf``, thus you can
|
||||
specify no more than a two nameservers for infra and/or external use.
|
||||
Those may be specified either in ``nameservers`` or ``upstream_dns_servers``
|
||||
and will be merged together with the ``skydns_server`` IP into the hots'
|
||||
``/etc/resolv.conf``.
|
||||
|
||||
Limitations
|
||||
-----------
|
||||
|
||||
* Kargo has yet ways to configure Kubedns addon to forward requests SkyDns can
|
||||
not answer with authority to arbitrary recursive resolvers. This task is left
|
||||
for future. See [official SkyDns docs](https://github.com/skynetservices/skydns)
|
||||
for details.
|
||||
|
||||
* There is
|
||||
[no way to specify a custom value](https://github.com/kubernetes/kubernetes/issues/33554)
|
||||
for the SkyDNS ``ndots`` param via an
|
||||
[option for KubeDNS](https://github.com/kubernetes/kubernetes/blob/master/cmd/kube-dns/app/options/options.go)
|
||||
add-on, while SkyDNS supports it though. Thus, DNS SRV records may not work
|
||||
as expected as they require the ``ndots:7``.
|
||||
|
||||
* the ``searchdomains`` have a limitation of a 6 names and 256 chars
|
||||
length. Due to default ``svc, default.svc`` subdomains, the actual
|
||||
limits are a 4 names and 239 chars respectively.
|
||||
|
||||
* the ``nameservers`` have a limitation of a 3 servers, although there
|
||||
is a way to mitigate that with the ``upstream_dns_servers``,
|
||||
see below. Anyway, the ``nameservers`` can take no more than a two
|
||||
custom DNS servers because of one slot is reserved for a Kubernetes
|
||||
cluster needs.
|
BIN
docs/figures/dns.jpeg
Normal file
BIN
docs/figures/dns.jpeg
Normal file
Binary file not shown.
After Width: | Height: | Size: 654 KiB |
BIN
docs/figures/loadbalancer_localhost.png
Normal file
BIN
docs/figures/loadbalancer_localhost.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 57 KiB |
51
docs/flannel.md
Normal file
51
docs/flannel.md
Normal file
@ -0,0 +1,51 @@
|
||||
Flannel
|
||||
==============
|
||||
|
||||
* Flannel configuration file should have been created there
|
||||
|
||||
```
|
||||
cat /run/flannel/subnet.env
|
||||
FLANNEL_NETWORK=10.233.0.0/18
|
||||
FLANNEL_SUBNET=10.233.16.1/24
|
||||
FLANNEL_MTU=1450
|
||||
FLANNEL_IPMASQ=false
|
||||
```
|
||||
|
||||
* Check if the network interface has been created
|
||||
|
||||
```
|
||||
ip a show dev flannel.1
|
||||
4: flannel.1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UNKNOWN group default
|
||||
link/ether e2:f3:a7:0f:bf:cb brd ff:ff:ff:ff:ff:ff
|
||||
inet 10.233.16.0/18 scope global flannel.1
|
||||
valid_lft forever preferred_lft forever
|
||||
inet6 fe80::e0f3:a7ff:fe0f:bfcb/64 scope link
|
||||
valid_lft forever preferred_lft forever
|
||||
```
|
||||
|
||||
* Docker must be configured with a bridge ip in the flannel subnet.
|
||||
|
||||
```
|
||||
ps aux | grep docker
|
||||
root 20196 1.7 2.7 1260616 56840 ? Ssl 10:18 0:07 /usr/bin/docker daemon --bip=10.233.16.1/24 --mtu=1450
|
||||
```
|
||||
|
||||
* Try to run a container and check its ip address
|
||||
|
||||
```
|
||||
kubectl run test --image=busybox --command -- tail -f /dev/null
|
||||
replicationcontroller "test" created
|
||||
|
||||
kubectl describe po test-34ozs | grep ^IP
|
||||
IP: 10.233.16.2
|
||||
```
|
||||
|
||||
```
|
||||
kubectl exec test-34ozs -- ip a show dev eth0
|
||||
8: eth0@if9: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1450 qdisc noqueue
|
||||
link/ether 02:42:0a:e9:2b:03 brd ff:ff:ff:ff:ff:ff
|
||||
inet 10.233.16.2/24 scope global eth0
|
||||
valid_lft forever preferred_lft forever
|
||||
inet6 fe80::42:aff:fee9:2b03/64 scope link tentative flags 08
|
||||
valid_lft forever preferred_lft forever
|
||||
```
|
19
docs/getting-started.md
Normal file
19
docs/getting-started.md
Normal file
@ -0,0 +1,19 @@
|
||||
Getting started
|
||||
===============
|
||||
|
||||
The easiest way to run the deployement is to use the **kargo-cli** tool.
|
||||
A complete documentation can be found in its [github repository](https://github.com/kubespray/kargo-cli).
|
||||
|
||||
Here is a simple example on AWS:
|
||||
|
||||
* Create instances and generate the inventory
|
||||
|
||||
```
|
||||
kargo aws --instances 3
|
||||
```
|
||||
|
||||
* Run the deployment
|
||||
|
||||
```
|
||||
kargo deploy --aws -u centos -n calico
|
||||
```
|
112
docs/ha-mode.md
Normal file
112
docs/ha-mode.md
Normal file
@ -0,0 +1,112 @@
|
||||
HA endpoints for K8s
|
||||
====================
|
||||
|
||||
The following components require a highly available endpoints:
|
||||
* etcd cluster,
|
||||
* kube-apiserver service instances.
|
||||
|
||||
The former provides the
|
||||
[etcd-proxy](https://coreos.com/etcd/docs/latest/proxy.html) service to access
|
||||
the cluster members in HA fashion.
|
||||
|
||||
The latter relies on a 3rd side reverse proxies, like Nginx or HAProxy, to
|
||||
achieve the same goal.
|
||||
|
||||
Etcd
|
||||
----
|
||||
|
||||
Etcd proxies are deployed on each node in the `k8s-cluster` group. A proxy is
|
||||
a separate etcd process. It has a `localhost:2379` frontend and all of the etcd
|
||||
cluster members as backends. Note that the `access_ip` is used as the backend
|
||||
IP, if specified. Frontend endpoints cannot be accessed externally as they are
|
||||
bound to a localhost only.
|
||||
|
||||
The `etcd_access_endpoint` fact provides an access pattern for clients. And the
|
||||
`etcd_multiaccess` (defaults to `false`) group var controlls that behavior.
|
||||
When enabled, it makes deployed components to access the etcd cluster members
|
||||
directly: `http://ip1:2379, http://ip2:2379,...`. This mode assumes the clients
|
||||
do a loadbalancing and handle HA for connections. Note, a pod definition of a
|
||||
flannel networking plugin always uses a single `--etcd-server` endpoint!
|
||||
|
||||
|
||||
Kube-apiserver
|
||||
--------------
|
||||
|
||||
K8s components require a loadbalancer to access the apiservers via a reverse
|
||||
proxy. Kargo includes support for an nginx-based proxy that resides on each
|
||||
non-master Kubernetes node. This is referred to as localhost loadbalancing. It
|
||||
is less efficient than a dedicated load balancer because it creates extra
|
||||
health checks on the Kubernetes apiserver, but is more practical for scenarios
|
||||
where an external LB or virtual IP management is inconvenient.
|
||||
|
||||
This option is configured by the variable `loadbalancer_apiserver_localhost`.
|
||||
you will need to configure your own loadbalancer to achieve HA. Note that
|
||||
deploying a loadbalancer is up to a user and is not covered by ansible roles
|
||||
in Kargo. By default, it only configures a non-HA endpoint, which points to
|
||||
the `access_ip` or IP address of the first server node in the `kube-master`
|
||||
group. It can also configure clients to use endpoints for a given loadbalancer
|
||||
type. The following diagram shows how traffic to the apiserver is directed.
|
||||
|
||||

|
||||
|
||||
..note:: Kubernetes master nodes still use insecure localhost access because
|
||||
there are bugs in Kubernetes <1.5.0 in using TLS auth on master role
|
||||
services.
|
||||
|
||||
A user may opt to use an external loadbalancer (LB) instead. An external LB
|
||||
provides access for external clients, while the internal LB accepts client
|
||||
connections only to the localhost, similarly to the etcd-proxy HA endpoints.
|
||||
Given a frontend `VIP` address and `IP1, IP2` addresses of backends, here is
|
||||
an example configuration for a HAProxy service acting as an external LB:
|
||||
```
|
||||
listen kubernetes-apiserver-https
|
||||
bind <VIP>:8383
|
||||
option ssl-hello-chk
|
||||
mode tcp
|
||||
timeout client 3h
|
||||
timeout server 3h
|
||||
server master1 <IP1>:443
|
||||
server master2 <IP2>:443
|
||||
balance roundrobin
|
||||
```
|
||||
|
||||
And the corresponding example global vars config:
|
||||
```
|
||||
apiserver_loadbalancer_domain_name: "lb-apiserver.kubernetes.local"
|
||||
loadbalancer_apiserver:
|
||||
address: <VIP>
|
||||
port: 8383
|
||||
```
|
||||
|
||||
This domain name, or default "lb-apiserver.kubernetes.local", will be inserted
|
||||
into the `/etc/hosts` file of all servers in the `k8s-cluster` group. Note that
|
||||
the HAProxy service should as well be HA and requires a VIP management, which
|
||||
is out of scope of this doc.
|
||||
|
||||
Specifying an external LB overrides any internal localhost LB configuration.
|
||||
Note that for this example, the `kubernetes-apiserver-http` endpoint
|
||||
has backends receiving unencrypted traffic, which may be a security issue
|
||||
when interconnecting different nodes, or maybe not, if those belong to the
|
||||
isolated management network without external access.
|
||||
|
||||
In order to achieve HA for HAProxy instances, those must be running on the
|
||||
each node in the `k8s-cluster` group as well, but require no VIP, thus
|
||||
no VIP management.
|
||||
|
||||
Access endpoints are evaluated automagically, as the following:
|
||||
|
||||
| Endpoint type | kube-master | non-master |
|
||||
|------------------------------|---------------|---------------------|
|
||||
| Local LB | http://lc:p | http://lc:sp |
|
||||
| External LB, no internal | http://lc:p | https://lb:lp |
|
||||
| No ext/int LB (default) | http://lc:p | https://m[0].aip:sp |
|
||||
|
||||
Where:
|
||||
* `m[0]` - the first node in the `kube-master` group;
|
||||
* `lb` - LB FQDN, `apiserver_loadbalancer_domain_name`;
|
||||
* `lc` - localhost;
|
||||
* `p` - insecure port, `kube_apiserver_insecure_port`
|
||||
* `sp` - secure port, `kube_apiserver_port`;
|
||||
* `lp` - LB port, `loadbalancer_apiserver.port`, defers to the secure port;
|
||||
* `ip` - the node IP, defers to the ansible IP;
|
||||
* `aip` - `access_ip`, defers to the ip.
|
19
docs/large-deployments.md
Normal file
19
docs/large-deployments.md
Normal file
@ -0,0 +1,19 @@
|
||||
Large deployments of K8s
|
||||
========================
|
||||
|
||||
For a large scaled deployments, consider the following configuration changes:
|
||||
|
||||
* Tune [ansible settings](http://docs.ansible.com/ansible/intro_configuration.html)
|
||||
for `forks` and `timeout` vars to fit large numbers of nodes being deployed.
|
||||
|
||||
* Override containers' `foo_image_repo` vars to point to intranet registry.
|
||||
|
||||
* Override the ``download_run_once: true`` to download binaries and container
|
||||
images only once then push to nodes in batches.
|
||||
|
||||
* Adjust the `retry_stagger` global var as appropriate. It should provide sane
|
||||
load on a delegate (the first K8s master node) then retrying failed
|
||||
push or download operations.
|
||||
|
||||
For example, when deploying 200 nodes, you may want to run ansible with
|
||||
``--forks=50``, ``--timeout=600`` and define the ``retry_stagger: 60``.
|
48
docs/openstack.md
Normal file
48
docs/openstack.md
Normal file
@ -0,0 +1,48 @@
|
||||
OpenStack
|
||||
===============
|
||||
|
||||
To deploy kubespray on [OpenStack](https://www.openstack.org/) uncomment the `cloud_provider` option in `group_vars/all.yml` and set it to `'openstack'`.
|
||||
|
||||
After that make sure to source in your OpenStack credentials like you would do when using `nova-client` by using `source path/to/your/openstack-rc`.
|
||||
|
||||
The next step is to make sure the hostnames in your `inventory` file are identical to your instance names in OpenStack.
|
||||
Otherwise [cinder](https://wiki.openstack.org/wiki/Cinder) won't work as expected.
|
||||
|
||||
Unless you are using calico you can now run the playbook.
|
||||
|
||||
**Additional step needed when using calico:**
|
||||
|
||||
Calico does not encapsulate all packages with the hosts ip addresses. Instead the packages will be routed with the PODs ip addresses directly.
|
||||
OpenStack will filter and drop all packages from ips it does not know to prevent spoofing.
|
||||
|
||||
In order to make calico work on OpenStack you will need to tell OpenStack to allow calicos packages by allowing the network it uses.
|
||||
|
||||
First you will need the ids of your OpenStack instances that will run kubernetes:
|
||||
|
||||
nova list --tenant Your-Tenant
|
||||
+--------------------------------------+--------+----------------------------------+--------+-------------+
|
||||
| ID | Name | Tenant ID | Status | Power State |
|
||||
+--------------------------------------+--------+----------------------------------+--------+-------------+
|
||||
| e1f48aad-df96-4bce-bf61-62ae12bf3f95 | k8s-1 | fba478440cb2444a9e5cf03717eb5d6f | ACTIVE | Running |
|
||||
| 725cd548-6ea3-426b-baaa-e7306d3c8052 | k8s-2 | fba478440cb2444a9e5cf03717eb5d6f | ACTIVE | Running |
|
||||
|
||||
Then you can use the instance ids to find the connected [neutron](https://wiki.openstack.org/wiki/Neutron) ports:
|
||||
|
||||
neutron port-list -c id -c device_id
|
||||
+--------------------------------------+--------------------------------------+
|
||||
| id | device_id |
|
||||
+--------------------------------------+--------------------------------------+
|
||||
| 5662a4e0-e646-47f0-bf88-d80fbd2d99ef | e1f48aad-df96-4bce-bf61-62ae12bf3f95 |
|
||||
| e5ae2045-a1e1-4e99-9aac-4353889449a7 | 725cd548-6ea3-426b-baaa-e7306d3c8052 |
|
||||
|
||||
Given the port ids on the left, you can set the `allowed_address_pairs` in neutron:
|
||||
|
||||
# allow kube_service_addresses network
|
||||
neutron port-update 5662a4e0-e646-47f0-bf88-d80fbd2d99ef --allowed_address_pairs list=true type=dict ip_address=10.233.0.0/18
|
||||
neutron port-update e5ae2045-a1e1-4e99-9aac-4353889449a7 --allowed_address_pairs list=true type=dict ip_address=10.233.0.0/18
|
||||
|
||||
# allow kube_pods_subnet network
|
||||
neutron port-update 5662a4e0-e646-47f0-bf88-d80fbd2d99ef --allowed_address_pairs list=true type=dict ip_address=10.233.64.0/18
|
||||
neutron port-update e5ae2045-a1e1-4e99-9aac-4353889449a7 --allowed_address_pairs list=true type=dict ip_address=10.233.64.0/18
|
||||
|
||||
Now you can finally run the playbook.
|
87
docs/roadmap.md
Normal file
87
docs/roadmap.md
Normal file
@ -0,0 +1,87 @@
|
||||
Kargo's roadmap
|
||||
=================
|
||||
|
||||
### Self deployment (pull-mode) [#320](https://github.com/kubespray/kargo/issues/320)
|
||||
- the playbook would install and configure docker/rkt and the etcd cluster
|
||||
- the following data would be inserted into etcd: certs,tokens,users,inventory,group_vars.
|
||||
- a "kubespray" container would be deployed (kargo-cli, ansible-playbook, kpm)
|
||||
- to be discussed, a way to provide the inventory
|
||||
- **self deployment** of the node from inside a container [#321](https://github.com/kubespray/kargo/issues/321)
|
||||
|
||||
### Provisionning and cloud providers
|
||||
- Terraform to provision instances on **GCE, AWS, Openstack, Digital Ocean, Azure**
|
||||
- On AWS autoscaling, multi AZ
|
||||
- On Azure autoscaling, create loadbalancer [#297](https://github.com/kubespray/kargo/issues/297)
|
||||
- On GCE be able to create a loadbalancer automatically (IAM ?) [#280](https://github.com/kubespray/kargo/issues/280)
|
||||
- **TLS boostrap** support for kubelet [#234](https://github.com/kubespray/kargo/issues/234)
|
||||
(related issues: https://github.com/kubernetes/kubernetes/pull/20439 <br>
|
||||
https://github.com/kubernetes/kubernetes/issues/18112)
|
||||
|
||||
### Tests
|
||||
- Run kubernetes e2e tests
|
||||
- migrate to jenkins
|
||||
(a test is currently a deployment on a 3 node cluste, testing k8s api, ping between 2 pods)
|
||||
- Full tests on GCE per day (All OS's, all network plugins)
|
||||
- trigger a single test per pull request
|
||||
- single test with the Ansible version n-1 per day
|
||||
- Test idempotency on on single OS but for all network plugins/container engines
|
||||
- single test on AWS per day
|
||||
- test different achitectures :
|
||||
- 3 instances, 3 are members of the etcd cluster, 2 of them acting as master and node, 1 as node
|
||||
- 5 instances, 3 are etcd and nodes, 2 are masters only
|
||||
- 7 instances, 3 etcd only, 2 masters, 2 nodes
|
||||
- test scale up cluster: +1 etcd, +1 master, +1 node
|
||||
|
||||
### Lifecycle
|
||||
- Drain worker node when upgrading k8s components in a worker node. [#154](https://github.com/kubespray/kargo/issues/154)
|
||||
- Drain worker node when shutting down/deleting an instance
|
||||
|
||||
### Networking
|
||||
- romana.io support [#160](https://github.com/kubespray/kargo/issues/160)
|
||||
- Configure network policy for Calico. [#159](https://github.com/kubespray/kargo/issues/159)
|
||||
- Opencontrail
|
||||
- Canal
|
||||
|
||||
### High availability
|
||||
- (to be discussed) option to set a loadbalancer for the apiservers like ucarp/packemaker/keepalived
|
||||
While waiting for the issue [kubernetes/kubernetes#18174](https://github.com/kubernetes/kubernetes/issues/18174) to be fixed.
|
||||
|
||||
### Kargo-cli
|
||||
- Delete instances
|
||||
- `kargo vagrant` to setup a test cluster locally
|
||||
- `kargo azure` for Microsoft Azure support
|
||||
- switch to Terraform instead of Ansible for provisionning
|
||||
- update $HOME/.kube/config when a cluster is deployed. Optionally switch to this context
|
||||
|
||||
### Kargo API
|
||||
- Perform all actions through an **API**
|
||||
- Store inventories / configurations of mulltiple clusters
|
||||
- make sure that state of cluster is completely saved in no more than one config file beyond hosts inventory
|
||||
|
||||
### Addons (with kpm)
|
||||
Include optionals deployments to init the cluster:
|
||||
##### Monitoring
|
||||
- Heapster / Grafana ....
|
||||
- **Prometheus**
|
||||
|
||||
##### Others
|
||||
|
||||
##### Dashboards:
|
||||
- kubernetes-dashboard
|
||||
- Fabric8
|
||||
- Tectonic
|
||||
- Cockpit
|
||||
|
||||
##### Paas like
|
||||
- Openshift Origin
|
||||
- Openstack
|
||||
- Deis Workflow
|
||||
|
||||
### Others
|
||||
- remove nodes (adding is already supported)
|
||||
- being able to choose any k8s version (almost done)
|
||||
- **rkt** support [#59](https://github.com/kubespray/kargo/issues/59)
|
||||
- Review documentation (split in categories)
|
||||
- **consul** -> if officialy supported by k8s
|
||||
- flex volumes options (e.g. **torrus** support) [#312](https://github.com/kubespray/kargo/issues/312)
|
||||
- Clusters federation option (aka **ubernetes**) [#329](https://github.com/kubespray/kargo/issues/329)
|
41
docs/vagrant.md
Normal file
41
docs/vagrant.md
Normal file
@ -0,0 +1,41 @@
|
||||
Vagrant Install
|
||||
=================
|
||||
|
||||
Assuming you have Vagrant (1.8+) installed with virtualbox (it may work
|
||||
with vmware, but is untested) you should be able to launch a 3 node
|
||||
Kubernetes cluster by simply running `$ vagrant up`.<br />
|
||||
|
||||
This will spin up 3 VMs and install kubernetes on them. Once they are
|
||||
completed you can connect to any of them by running <br />
|
||||
`$ vagrant ssh k8s-0[1..3]`.
|
||||
|
||||
```
|
||||
$ vagrant up
|
||||
Bringing machine 'k8s-01' up with 'virtualbox' provider...
|
||||
Bringing machine 'k8s-02' up with 'virtualbox' provider...
|
||||
Bringing machine 'k8s-03' up with 'virtualbox' provider...
|
||||
==> k8s-01: Box 'bento/ubuntu-14.04' could not be found. Attempting to find and install...
|
||||
...
|
||||
...
|
||||
k8s-03: Running ansible-playbook...
|
||||
|
||||
PLAY [k8s-cluster] *************************************************************
|
||||
|
||||
TASK [setup] *******************************************************************
|
||||
ok: [k8s-03]
|
||||
ok: [k8s-01]
|
||||
ok: [k8s-02]
|
||||
...
|
||||
...
|
||||
PLAY RECAP *********************************************************************
|
||||
k8s-01 : ok=157 changed=66 unreachable=0 failed=0
|
||||
k8s-02 : ok=137 changed=59 unreachable=0 failed=0
|
||||
k8s-03 : ok=86 changed=51 unreachable=0 failed=0
|
||||
|
||||
$ vagrant ssh k8s-01
|
||||
vagrant@k8s-01:~$ kubectl get nodes
|
||||
NAME STATUS AGE
|
||||
k8s-01 Ready 45s
|
||||
k8s-02 Ready 45s
|
||||
k8s-03 Ready 45s
|
||||
```
|
@ -1,27 +1,75 @@
|
||||
# Directory where the binaries will be installed
|
||||
# Valid bootstrap options (required): xenial, coreos, none
|
||||
bootstrap_os: none
|
||||
|
||||
# Directory where the binaries will be installed
|
||||
bin_dir: /usr/local/bin
|
||||
|
||||
# Where the binaries will be downloaded.
|
||||
# Note: ensure that you've enough disk space (about 1G)
|
||||
local_release_dir: "/tmp/releases"
|
||||
# Random shifts for retrying failed ops like pushing/downloading
|
||||
retry_stagger: 5
|
||||
|
||||
# Uncomment this line for CoreOS only.
|
||||
# Directory where python binary is installed
|
||||
# ansible_python_interpreter: "/opt/bin/python"
|
||||
|
||||
# This is the group that the cert creation scripts chgrp the
|
||||
# cert files to. Not really changable...
|
||||
kube_cert_group: kube-cert
|
||||
|
||||
# Cluster Loglevel configuration
|
||||
kube_log_level: 2
|
||||
|
||||
# Users to create for basic auth in Kubernetes API via HTTP
|
||||
kube_api_pwd: "changeme"
|
||||
kube_users:
|
||||
kube:
|
||||
pass: changeme
|
||||
pass: "{{kube_api_pwd}}"
|
||||
role: admin
|
||||
root:
|
||||
pass: "changeme"
|
||||
role: admin
|
||||
# root:
|
||||
# pass: changeme
|
||||
# role: admin
|
||||
|
||||
# Kubernetes cluster name, also will be used as DNS domain
|
||||
cluster_name: cluster.local
|
||||
# Subdomains of DNS domain to be resolved via /etc/resolv.conf
|
||||
ndots: 5
|
||||
|
||||
# set this variable to calico if needed. keep it empty if flannel is used
|
||||
kube_network_plugin: calico
|
||||
# For some environments, each node has a pubilcally accessible
|
||||
# address and an address it should bind services to. These are
|
||||
# really inventory level variables, but described here for consistency.
|
||||
#
|
||||
# When advertising access, the access_ip will be used, but will defer to
|
||||
# ip and then the default ansible ip when unspecified.
|
||||
#
|
||||
# When binding to restrict access, the ip variable will be used, but will
|
||||
# defer to the default ansible ip when unspecified.
|
||||
#
|
||||
# The ip variable is used for specific address binding, e.g. listen address
|
||||
# for etcd. This is use to help with environments like Vagrant or multi-nic
|
||||
# systems where one address should be preferred over another.
|
||||
# ip: 10.2.2.2
|
||||
#
|
||||
# The access_ip variable is used to define how other nodes should access
|
||||
# the node. This is used in flannel to allow other flannel nodes to see
|
||||
# this node for example. The access_ip is really useful AWS and Google
|
||||
# environments where the nodes are accessed remotely by the "public" ip,
|
||||
# but don't know about that address themselves.
|
||||
# access_ip: 1.1.1.1
|
||||
|
||||
# Etcd access modes:
|
||||
# Enable multiaccess to configure clients to access all of the etcd members directly
|
||||
# as the "http://hostX:port, http://hostY:port, ..." and ignore the proxy loadbalancers.
|
||||
# This may be the case if clients support and loadbalance multiple etcd servers natively.
|
||||
etcd_multiaccess: false
|
||||
|
||||
# Assume there are no internal loadbalancers for apiservers exist and listen on
|
||||
# kube_apiserver_port (default 443)
|
||||
loadbalancer_apiserver_localhost: true
|
||||
|
||||
# Choose network plugin (calico, weave or flannel)
|
||||
kube_network_plugin: flannel
|
||||
|
||||
# Kubernetes internal network for services, unused block of space.
|
||||
kube_service_addresses: 10.233.0.0/18
|
||||
@ -58,29 +106,39 @@ kube_apiserver_insecure_port: 8080 # (http)
|
||||
# You still must manually configure all your containers to use this DNS server,
|
||||
# Kubernetes won't do this for you (yet).
|
||||
|
||||
# Do not install additional dnsmasq
|
||||
skip_dnsmasq: false
|
||||
# Upstream dns servers used by dnsmasq
|
||||
upstream_dns_servers:
|
||||
- 8.8.8.8
|
||||
- 4.4.8.8
|
||||
#upstream_dns_servers:
|
||||
# - 8.8.8.8
|
||||
# - 8.8.4.4
|
||||
#
|
||||
# # Use dns server : https://github.com/ansibl8s/k8s-skydns/blob/master/skydns-README.md
|
||||
dns_setup: true
|
||||
dns_domain: "{{ cluster_name }}"
|
||||
#
|
||||
# # Ip address of the kubernetes dns service
|
||||
# # Ip address of the kubernetes skydns service
|
||||
skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
|
||||
dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
|
||||
|
||||
# For multi masters architecture:
|
||||
# kube-proxy doesn't support multiple apiservers for the time being so you'll need to configure your own loadbalancer
|
||||
# This domain name will be inserted into the /etc/hosts file of all servers
|
||||
# configuration example with haproxy :
|
||||
# listen kubernetes-apiserver-https
|
||||
# bind 10.99.0.21:8383
|
||||
# option ssl-hello-chk
|
||||
# mode tcp
|
||||
# timeout client 3h
|
||||
# timeout server 3h
|
||||
# server master1 10.99.0.26:443
|
||||
# server master2 10.99.0.27:443
|
||||
# balance roundrobin
|
||||
# apiserver_loadbalancer_domain_name: "lb-apiserver.kubernetes.local"
|
||||
# There are some changes specific to the cloud providers
|
||||
# for instance we need to encapsulate packets with some network plugins
|
||||
# If set the possible values are either 'gce', 'aws' or 'openstack'
|
||||
# When openstack is used make sure to source in the openstack credentials
|
||||
# like you would do when using nova-client before starting the playbook.
|
||||
# cloud_provider:
|
||||
|
||||
## Set these proxy values in order to update docker daemon to use proxies
|
||||
# http_proxy: ""
|
||||
# https_proxy: ""
|
||||
# no_proxy: ""
|
||||
|
||||
## A string of extra options to pass to the docker daemon.
|
||||
## This string should be exactly as you wish it to appear.
|
||||
## An obvious use case is allowing insecure-registry access
|
||||
## to self hosted registries like so:
|
||||
docker_options: "--insecure-registry={{ kube_service_addresses }}"
|
||||
|
||||
# default packages to install within the cluster
|
||||
kpm_packages: []
|
||||
# - name: kube-system/grafana
|
||||
|
@ -1,32 +1,29 @@
|
||||
[downloader]
|
||||
localhost ansible_connection=local ansible_python_interpreter=python2
|
||||
|
||||
[kube-master]
|
||||
node1 ansible_ssh_host=10.99.0.26
|
||||
node2 ansible_ssh_host=10.99.0.27
|
||||
|
||||
[etcd]
|
||||
node1 ansible_ssh_host=10.99.0.26
|
||||
node2 ansible_ssh_host=10.99.0.27
|
||||
node3 ansible_ssh_host=10.99.0.4
|
||||
|
||||
[kube-node]
|
||||
node2 ansible_ssh_host=10.99.0.27
|
||||
node3 ansible_ssh_host=10.99.0.4
|
||||
node4 ansible_ssh_host=10.99.0.5
|
||||
node5 ansible_ssh_host=10.99.0.36
|
||||
node6 ansible_ssh_host=10.99.0.37
|
||||
|
||||
[paris]
|
||||
node1 ansible_ssh_host=10.99.0.26
|
||||
node3 ansible_ssh_host=10.99.0.4 local_as=xxxxxxxx
|
||||
node4 ansible_ssh_host=10.99.0.5 local_as=xxxxxxxx
|
||||
|
||||
[new-york]
|
||||
node2 ansible_ssh_host=10.99.0.27
|
||||
node5 ansible_ssh_host=10.99.0.36 local_as=xxxxxxxx
|
||||
node6 ansible_ssh_host=10.99.0.37 local_as=xxxxxxxx
|
||||
|
||||
[k8s-cluster:children]
|
||||
kube-node
|
||||
kube-master
|
||||
#[kube-master]
|
||||
#node1 ansible_ssh_host=10.99.0.26
|
||||
#node2 ansible_ssh_host=10.99.0.27
|
||||
#
|
||||
#[etcd]
|
||||
#node1 ansible_ssh_host=10.99.0.26
|
||||
#node2 ansible_ssh_host=10.99.0.27
|
||||
#node3 ansible_ssh_host=10.99.0.4
|
||||
#
|
||||
#[kube-node]
|
||||
#node2 ansible_ssh_host=10.99.0.27
|
||||
#node3 ansible_ssh_host=10.99.0.4
|
||||
#node4 ansible_ssh_host=10.99.0.5
|
||||
#node5 ansible_ssh_host=10.99.0.36
|
||||
#node6 ansible_ssh_host=10.99.0.37
|
||||
#
|
||||
#[paris]
|
||||
#node1 ansible_ssh_host=10.99.0.26
|
||||
#node3 ansible_ssh_host=10.99.0.4 local_as=xxxxxxxx
|
||||
#node4 ansible_ssh_host=10.99.0.5 local_as=xxxxxxxx
|
||||
#
|
||||
#[new-york]
|
||||
#node2 ansible_ssh_host=10.99.0.27
|
||||
#node5 ansible_ssh_host=10.99.0.36 local_as=xxxxxxxx
|
||||
#node6 ansible_ssh_host=10.99.0.37 local_as=xxxxxxxx
|
||||
#
|
||||
#[k8s-cluster:children]
|
||||
#kube-node
|
||||
#kube-master
|
||||
|
@ -1,8 +1,5 @@
|
||||
node1 ansible_connection=local local_release_dir={{ansible_env.HOME}}/releases
|
||||
|
||||
[downloader]
|
||||
node1
|
||||
|
||||
[kube-master]
|
||||
node1
|
||||
|
||||
|
2
requirements.txt
Normal file
2
requirements.txt
Normal file
@ -0,0 +1,2 @@
|
||||
ansible
|
||||
netaddr
|
@ -1,41 +1,48 @@
|
||||
---
|
||||
- src: https://github.com/ansibl8s/k8s-common.git
|
||||
- src: https://gitlab.com/kubespray-ansibl8s/k8s-common.git
|
||||
path: roles/apps
|
||||
version: v1.0
|
||||
scm: git
|
||||
|
||||
- src: https://github.com/ansibl8s/k8s-kubedns.git
|
||||
path: roles/apps
|
||||
version: v1.0
|
||||
|
||||
#- src: https://github.com/ansibl8s/k8s-kube-ui.git
|
||||
#- src: https://gitlab.com/kubespray-ansibl8s/k8s-dashboard.git
|
||||
# path: roles/apps
|
||||
# version: v1.0
|
||||
# scm: git
|
||||
#
|
||||
#- src: https://github.com/ansibl8s/k8s-fabric8.git
|
||||
#- src: https://gitlab.com/kubespray-ansibl8s/k8s-kubedns.git
|
||||
# path: roles/apps
|
||||
# version: v1.0
|
||||
# scm: git
|
||||
#
|
||||
#- src: https://github.com/ansibl8s/k8s-elasticsearch.git
|
||||
#- src: https://gitlab.com/kubespray-ansibl8s/k8s-elasticsearch.git
|
||||
# path: roles/apps
|
||||
# # version: v1.0
|
||||
# scm: git
|
||||
#
|
||||
#- src: https://github.com/ansibl8s/k8s-redis.git
|
||||
#- src: https://gitlab.com/kubespray-ansibl8s/k8s-redis.git
|
||||
# path: roles/apps
|
||||
# # version: v1.0
|
||||
# scm: git
|
||||
#
|
||||
#- src: https://github.com/ansibl8s/k8s-memcached.git
|
||||
#- src: https://gitlab.com/kubespray-ansibl8s/k8s-memcached.git
|
||||
# path: roles/apps
|
||||
# version: v1.0
|
||||
# scm: git
|
||||
#
|
||||
#- src: https://github.com/ansibl8s/k8s-postgres.git
|
||||
#- src: https://gitlab.com/kubespray-ansibl8s/k8s-postgres.git
|
||||
# path: roles/apps
|
||||
# version: v1.0
|
||||
# scm: git
|
||||
#
|
||||
#- src: https://github.com/ansibl8s/k8s-heapster.git
|
||||
#- src: https://gitlab.com/kubespray-ansibl8s/k8s-pgbouncer.git
|
||||
# path: roles/apps
|
||||
# scm: git
|
||||
#
|
||||
#- src: https://github.com/ansibl8s/k8s-influxdb.git
|
||||
#- src: https://gitlab.com/kubespray-ansibl8s/k8s-heapster.git
|
||||
# path: roles/apps
|
||||
# scm: git
|
||||
#
|
||||
#- src: https://github.com/ansibl8s/k8s-kubedash.git
|
||||
#- src: https://gitlab.com/kubespray-ansibl8s/k8s-influxdb.git
|
||||
# path: roles/apps
|
||||
# scm: git
|
||||
#
|
||||
#- src: https://gitlab.com/kubespray-ansibl8s/k8s-kubedash.git
|
||||
# path: roles/apps
|
||||
# scm: git
|
||||
#
|
||||
#- src: https://gitlab.com/kubespray-ansibl8s/k8s-kube-logstash.git
|
||||
# path: roles/apps
|
||||
# scm: git
|
||||
|
24
roles/adduser/defaults/main.yml
Normal file
24
roles/adduser/defaults/main.yml
Normal file
@ -0,0 +1,24 @@
|
||||
---
|
||||
addusers:
|
||||
etcd:
|
||||
name: etcd
|
||||
comment: "Etcd user"
|
||||
createhome: yes
|
||||
home: "/var/lib/etcd"
|
||||
system: yes
|
||||
shell: /bin/nologin
|
||||
kube:
|
||||
name: kube
|
||||
comment: "Kubernetes user"
|
||||
shell: /sbin/nologin
|
||||
system: yes
|
||||
group: "{{ kube_cert_group }}"
|
||||
createhome: no
|
||||
|
||||
adduser:
|
||||
name: "{{ user.name }}"
|
||||
group: "{{ user.name|default(None) }}"
|
||||
comment: "{{ user.comment|default(None) }}"
|
||||
shell: "{{ user.shell|default(None) }}"
|
||||
system: "{{ user.system|default(None) }}"
|
||||
createhome: "{{ user.createhome|default(None) }}"
|
13
roles/adduser/tasks/main.yml
Normal file
13
roles/adduser/tasks/main.yml
Normal file
@ -0,0 +1,13 @@
|
||||
---
|
||||
- name: User | Create User Group
|
||||
group: name={{user.group|default(user.name)}} system={{user.system|default(omit)}}
|
||||
|
||||
- name: User | Create User
|
||||
user:
|
||||
comment: "{{user.comment|default(omit)}}"
|
||||
createhome: "{{user.create_home|default(omit)}}"
|
||||
group: "{{user.group|default(user.name)}}"
|
||||
home: "{{user.home|default(omit)}}"
|
||||
shell: "{{user.shell|default(omit)}}"
|
||||
name: "{{user.name}}"
|
||||
system: "{{user.system|default(omit)}}"
|
8
roles/adduser/vars/coreos.yml
Normal file
8
roles/adduser/vars/coreos.yml
Normal file
@ -0,0 +1,8 @@
|
||||
---
|
||||
addusers:
|
||||
- name: kube
|
||||
comment: "Kubernetes user"
|
||||
shell: /sbin/nologin
|
||||
system: yes
|
||||
group: "{{ kube_cert_group }}"
|
||||
createhome: no
|
15
roles/adduser/vars/debian.yml
Normal file
15
roles/adduser/vars/debian.yml
Normal file
@ -0,0 +1,15 @@
|
||||
---
|
||||
addusers:
|
||||
- name: etcd
|
||||
comment: "Etcd user"
|
||||
createhome: yes
|
||||
home: "/var/lib/etcd"
|
||||
system: yes
|
||||
shell: /bin/nologin
|
||||
|
||||
- name: kube
|
||||
comment: "Kubernetes user"
|
||||
shell: /sbin/nologin
|
||||
system: yes
|
||||
group: "{{ kube_cert_group }}"
|
||||
createhome: no
|
15
roles/adduser/vars/redhat.yml
Normal file
15
roles/adduser/vars/redhat.yml
Normal file
@ -0,0 +1,15 @@
|
||||
---
|
||||
addusers:
|
||||
- name: etcd
|
||||
comment: "Etcd user"
|
||||
createhome: yes
|
||||
home: "/var/lib/etcd"
|
||||
system: yes
|
||||
shell: /bin/nologin
|
||||
|
||||
- name: kube
|
||||
comment: "Kubernetes user"
|
||||
shell: /sbin/nologin
|
||||
system: yes
|
||||
group: "{{ kube_cert_group }}"
|
||||
createhome: no
|
Submodule roles/apps/k8s-common deleted from c69c5f881f
Submodule roles/apps/k8s-elasticsearch deleted from 3d74c70a4a
Submodule roles/apps/k8s-etcd deleted from abd61ee91a
Submodule roles/apps/k8s-fabric8 deleted from 82ca8293b0
Submodule roles/apps/k8s-heapster deleted from 44a6519bf8
Submodule roles/apps/k8s-influxdb deleted from 38d54c48e7
Submodule roles/apps/k8s-kube-logstash deleted from 256fa156e4
Submodule roles/apps/k8s-kube-ui deleted from b81a2848d9
Submodule roles/apps/k8s-kubedash deleted from 64385696a9
Submodule roles/apps/k8s-kubedns deleted from b5015aed8f
Submodule roles/apps/k8s-memcached deleted from 563b35f3b6
Submodule roles/apps/k8s-postgres deleted from e219c91391
Submodule roles/apps/k8s-rabbitmq deleted from b91f96bb9c
Submodule roles/apps/k8s-redis deleted from a4e134fef3
4
roles/bootstrap-os/defaults/main.yml
Normal file
4
roles/bootstrap-os/defaults/main.yml
Normal file
@ -0,0 +1,4 @@
|
||||
---
|
||||
pypy_version: 2.4.0
|
||||
pip_python_modules:
|
||||
- httplib2
|
@ -1,7 +1,9 @@
|
||||
#/bin/bash
|
||||
set -e
|
||||
|
||||
BINDIR="/usr/local/bin"
|
||||
BINDIR="/opt/bin"
|
||||
|
||||
mkdir -p $BINDIR
|
||||
|
||||
cd $BINDIR
|
||||
|
||||
@ -9,7 +11,7 @@ if [[ -e $BINDIR/.bootstrapped ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
PYPY_VERSION=2.4.0
|
||||
PYPY_VERSION=5.1.0
|
||||
|
||||
wget -O - https://bitbucket.org/pypy/pypy/downloads/pypy-$PYPY_VERSION-linux64.tar.bz2 |tar -xjf -
|
||||
mv -n pypy-$PYPY_VERSION-linux64 pypy
|
19017
roles/bootstrap-os/files/get-pip.py
Normal file
19017
roles/bootstrap-os/files/get-pip.py
Normal file
File diff suppressed because it is too large
Load Diff
@ -1,3 +1,3 @@
|
||||
#!/bin/bash
|
||||
BINDIR="/usr/local/bin"
|
||||
BINDIR="/opt/bin"
|
||||
LD_LIBRARY_PATH=$BINDIR/pypy/lib:$LD_LIBRARY_PATH $BINDIR/pypy/bin/$(basename $0) $@
|
49
roles/bootstrap-os/tasks/bootstrap-coreos.yml
Normal file
49
roles/bootstrap-os/tasks/bootstrap-coreos.yml
Normal file
@ -0,0 +1,49 @@
|
||||
---
|
||||
- name: Bootstrap | Check if bootstrap is needed
|
||||
raw: stat /opt/bin/.bootstrapped
|
||||
register: need_bootstrap
|
||||
ignore_errors: True
|
||||
|
||||
|
||||
- name: Bootstrap | Run bootstrap.sh
|
||||
script: bootstrap.sh
|
||||
when: (need_bootstrap | failed)
|
||||
|
||||
- set_fact:
|
||||
ansible_python_interpreter: "/opt/bin/python"
|
||||
|
||||
- name: Bootstrap | Check if we need to install pip
|
||||
shell: "{{ansible_python_interpreter}} -m pip --version"
|
||||
register: need_pip
|
||||
ignore_errors: True
|
||||
changed_when: false
|
||||
when: (need_bootstrap | failed)
|
||||
|
||||
- name: Bootstrap | Copy get-pip.py
|
||||
copy: src=get-pip.py dest=~/get-pip.py
|
||||
when: (need_pip | failed)
|
||||
|
||||
- name: Bootstrap | Install pip
|
||||
shell: "{{ansible_python_interpreter}} ~/get-pip.py"
|
||||
when: (need_pip | failed)
|
||||
|
||||
- name: Bootstrap | Remove get-pip.py
|
||||
file: path=~/get-pip.py state=absent
|
||||
when: (need_pip | failed)
|
||||
|
||||
- name: Bootstrap | Install pip launcher
|
||||
copy: src=runner dest=/opt/bin/pip mode=0755
|
||||
when: (need_pip | failed)
|
||||
|
||||
- name: Install required python modules
|
||||
pip:
|
||||
name: "{{ item }}"
|
||||
with_items: "{{pip_python_modules}}"
|
||||
|
||||
- name: Check configured hostname
|
||||
shell: hostname
|
||||
register: configured_hostname
|
||||
|
||||
- name: Assign inventory name to unconfigured hostnames
|
||||
shell: sh -c "echo \"{{inventory_hostname}}\" > /etc/hostname; hostname \"{{inventory_hostname}}\""
|
||||
when: (configured_hostname.stdout == 'localhost')
|
14
roles/bootstrap-os/tasks/bootstrap-ubuntu.yml
Normal file
14
roles/bootstrap-os/tasks/bootstrap-ubuntu.yml
Normal file
@ -0,0 +1,14 @@
|
||||
---
|
||||
# raw: cat /etc/issue.net | grep '{{ bootstrap_versions }}'
|
||||
|
||||
- name: Bootstrap | Check if bootstrap is needed
|
||||
raw: which python
|
||||
register: need_bootstrap
|
||||
ignore_errors: True
|
||||
|
||||
- name: Bootstrap | Install python 2.x
|
||||
raw: DEBIAN_FRONTEND=noninteractive apt-get install -y python-minimal
|
||||
when: need_bootstrap | failed
|
||||
|
||||
- set_fact:
|
||||
ansible_python_interpreter: "/usr/bin/python"
|
6
roles/bootstrap-os/tasks/main.yml
Normal file
6
roles/bootstrap-os/tasks/main.yml
Normal file
@ -0,0 +1,6 @@
|
||||
---
|
||||
- include: bootstrap-ubuntu.yml
|
||||
when: bootstrap_os == "ubuntu"
|
||||
|
||||
- include: bootstrap-coreos.yml
|
||||
when: bootstrap_os == "coreos"
|
2
roles/bootstrap-os/templates/python_shim.j2
Normal file
2
roles/bootstrap-os/templates/python_shim.j2
Normal file
@ -0,0 +1,2 @@
|
||||
#!/bin/bash
|
||||
LD_LIBRARY_PATH={{ pypy_install_path }}/lib:$LD_LIBRARY_PATH exec {{ pypy_install_path }}/bin/{{ item.src }} "$@"
|
12
roles/dnsmasq/defaults/main.yml
Normal file
12
roles/dnsmasq/defaults/main.yml
Normal file
@ -0,0 +1,12 @@
|
||||
---
|
||||
# Existing search/nameserver resolvconf entries will be purged and
|
||||
# ensured by this additional data:
|
||||
|
||||
# Max of 4 names is allowed and no more than 256 - 17 chars total
|
||||
# (a 2 is reserved for the 'default.svc.' and'svc.')
|
||||
#searchdomains:
|
||||
# - foo.bar.lc
|
||||
|
||||
# Max of 2 is allowed here (a 1 is reserved for the dns_server)
|
||||
#nameservers:
|
||||
# - 127.0.0.1
|
34
roles/dnsmasq/handlers/main.yml
Normal file
34
roles/dnsmasq/handlers/main.yml
Normal file
@ -0,0 +1,34 @@
|
||||
- name: Dnsmasq | restart network
|
||||
command: /bin/true
|
||||
notify:
|
||||
- Dnsmasq | reload network
|
||||
- Dnsmasq | update resolvconf
|
||||
when: ansible_os_family != "CoreOS"
|
||||
|
||||
- name: Dnsmasq | reload network
|
||||
service:
|
||||
name: >-
|
||||
{% if ansible_os_family == "RedHat" -%}
|
||||
network
|
||||
{%- elif ansible_os_family == "Debian" -%}
|
||||
networking
|
||||
{%- endif %}
|
||||
state: restarted
|
||||
when: ansible_os_family != "RedHat" and ansible_os_family != "CoreOS"
|
||||
|
||||
- name: Dnsmasq | update resolvconf
|
||||
command: /bin/true
|
||||
notify:
|
||||
- Dnsmasq | reload resolvconf
|
||||
- Dnsmasq | reload kubelet
|
||||
|
||||
- name: Dnsmasq | reload resolvconf
|
||||
command: /sbin/resolvconf -u
|
||||
ignore_errors: true
|
||||
|
||||
- name: Dnsmasq | reload kubelet
|
||||
service:
|
||||
name: kubelet
|
||||
state: restarted
|
||||
when: "{{ inventory_hostname in groups['kube-master'] }}"
|
||||
ignore_errors: true
|
305
roles/dnsmasq/library/kube.py
Normal file
305
roles/dnsmasq/library/kube.py
Normal file
@ -0,0 +1,305 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
DOCUMENTATION = """
|
||||
---
|
||||
module: kube
|
||||
short_description: Manage Kubernetes Cluster
|
||||
description:
|
||||
- Create, replace, remove, and stop resources within a Kubernetes Cluster
|
||||
version_added: "2.0"
|
||||
options:
|
||||
name:
|
||||
required: false
|
||||
default: null
|
||||
description:
|
||||
- The name associated with resource
|
||||
filename:
|
||||
required: false
|
||||
default: null
|
||||
description:
|
||||
- The path and filename of the resource(s) definition file.
|
||||
kubectl:
|
||||
required: false
|
||||
default: null
|
||||
description:
|
||||
- The path to the kubectl bin
|
||||
namespace:
|
||||
required: false
|
||||
default: null
|
||||
description:
|
||||
- The namespace associated with the resource(s)
|
||||
resource:
|
||||
required: false
|
||||
default: null
|
||||
description:
|
||||
- The resource to perform an action on. pods (po), replicationControllers (rc), services (svc)
|
||||
label:
|
||||
required: false
|
||||
default: null
|
||||
description:
|
||||
- The labels used to filter specific resources.
|
||||
server:
|
||||
required: false
|
||||
default: null
|
||||
description:
|
||||
- The url for the API server that commands are executed against.
|
||||
force:
|
||||
required: false
|
||||
default: false
|
||||
description:
|
||||
- A flag to indicate to force delete, replace, or stop.
|
||||
all:
|
||||
required: false
|
||||
default: false
|
||||
description:
|
||||
- A flag to indicate delete all, stop all, or all namespaces when checking exists.
|
||||
log_level:
|
||||
required: false
|
||||
default: 0
|
||||
description:
|
||||
- Indicates the level of verbosity of logging by kubectl.
|
||||
state:
|
||||
required: false
|
||||
choices: ['present', 'absent', 'latest', 'reloaded', 'stopped']
|
||||
default: present
|
||||
description:
|
||||
- present handles checking existence or creating if definition file provided,
|
||||
absent handles deleting resource(s) based on other options,
|
||||
latest handles creating ore updating based on existence,
|
||||
reloaded handles updating resource(s) definition using definition file,
|
||||
stopped handles stopping resource(s) based on other options.
|
||||
requirements:
|
||||
- kubectl
|
||||
author: "Kenny Jones (@kenjones-cisco)"
|
||||
"""
|
||||
|
||||
EXAMPLES = """
|
||||
- name: test nginx is present
|
||||
kube: name=nginx resource=rc state=present
|
||||
|
||||
- name: test nginx is stopped
|
||||
kube: name=nginx resource=rc state=stopped
|
||||
|
||||
- name: test nginx is absent
|
||||
kube: name=nginx resource=rc state=absent
|
||||
|
||||
- name: test nginx is present
|
||||
kube: filename=/tmp/nginx.yml
|
||||
"""
|
||||
|
||||
|
||||
class KubeManager(object):
|
||||
|
||||
def __init__(self, module):
|
||||
|
||||
self.module = module
|
||||
|
||||
self.kubectl = module.params.get('kubectl')
|
||||
if self.kubectl is None:
|
||||
self.kubectl = module.get_bin_path('kubectl', True)
|
||||
self.base_cmd = [self.kubectl]
|
||||
|
||||
if module.params.get('server'):
|
||||
self.base_cmd.append('--server=' + module.params.get('server'))
|
||||
|
||||
if module.params.get('log_level'):
|
||||
self.base_cmd.append('--v=' + str(module.params.get('log_level')))
|
||||
|
||||
if module.params.get('namespace'):
|
||||
self.base_cmd.append('--namespace=' + module.params.get('namespace'))
|
||||
|
||||
self.all = module.params.get('all')
|
||||
self.force = module.params.get('force')
|
||||
self.name = module.params.get('name')
|
||||
self.filename = module.params.get('filename')
|
||||
self.resource = module.params.get('resource')
|
||||
self.label = module.params.get('label')
|
||||
|
||||
def _execute(self, cmd):
|
||||
args = self.base_cmd + cmd
|
||||
try:
|
||||
rc, out, err = self.module.run_command(args)
|
||||
if rc != 0:
|
||||
self.module.fail_json(
|
||||
msg='error running kubectl (%s) command (rc=%d): %s' % (' '.join(args), rc, out or err))
|
||||
except Exception as exc:
|
||||
self.module.fail_json(
|
||||
msg='error running kubectl (%s) command: %s' % (' '.join(args), str(exc)))
|
||||
return out.splitlines()
|
||||
|
||||
def _execute_nofail(self, cmd):
|
||||
args = self.base_cmd + cmd
|
||||
rc, out, err = self.module.run_command(args)
|
||||
if rc != 0:
|
||||
return None
|
||||
return out.splitlines()
|
||||
|
||||
def create(self, check=True):
|
||||
if check and self.exists():
|
||||
return []
|
||||
|
||||
cmd = ['create']
|
||||
|
||||
if not self.filename:
|
||||
self.module.fail_json(msg='filename required to create')
|
||||
|
||||
cmd.append('--filename=' + self.filename)
|
||||
|
||||
return self._execute(cmd)
|
||||
|
||||
def replace(self):
|
||||
|
||||
if not self.force and not self.exists():
|
||||
return []
|
||||
|
||||
cmd = ['replace']
|
||||
|
||||
if self.force:
|
||||
cmd.append('--force')
|
||||
|
||||
if not self.filename:
|
||||
self.module.fail_json(msg='filename required to reload')
|
||||
|
||||
cmd.append('--filename=' + self.filename)
|
||||
|
||||
return self._execute(cmd)
|
||||
|
||||
def delete(self):
|
||||
|
||||
if not self.force and not self.exists():
|
||||
return []
|
||||
|
||||
cmd = ['delete']
|
||||
|
||||
if self.filename:
|
||||
cmd.append('--filename=' + self.filename)
|
||||
else:
|
||||
if not self.resource:
|
||||
self.module.fail_json(msg='resource required to delete without filename')
|
||||
|
||||
cmd.append(self.resource)
|
||||
|
||||
if self.name:
|
||||
cmd.append(self.name)
|
||||
|
||||
if self.label:
|
||||
cmd.append('--selector=' + self.label)
|
||||
|
||||
if self.all:
|
||||
cmd.append('--all')
|
||||
|
||||
if self.force:
|
||||
cmd.append('--ignore-not-found')
|
||||
|
||||
return self._execute(cmd)
|
||||
|
||||
def exists(self):
|
||||
cmd = ['get']
|
||||
|
||||
if not self.resource:
|
||||
return False
|
||||
|
||||
cmd.append(self.resource)
|
||||
|
||||
if self.name:
|
||||
cmd.append(self.name)
|
||||
|
||||
cmd.append('--no-headers')
|
||||
|
||||
if self.label:
|
||||
cmd.append('--selector=' + self.label)
|
||||
|
||||
if self.all:
|
||||
cmd.append('--all-namespaces')
|
||||
|
||||
result = self._execute_nofail(cmd)
|
||||
if not result:
|
||||
return False
|
||||
return True
|
||||
|
||||
def stop(self):
|
||||
|
||||
if not self.force and not self.exists():
|
||||
return []
|
||||
|
||||
cmd = ['stop']
|
||||
|
||||
if self.filename:
|
||||
cmd.append('--filename=' + self.filename)
|
||||
else:
|
||||
if not self.resource:
|
||||
self.module.fail_json(msg='resource required to stop without filename')
|
||||
|
||||
cmd.append(self.resource)
|
||||
|
||||
if self.name:
|
||||
cmd.append(self.name)
|
||||
|
||||
if self.label:
|
||||
cmd.append('--selector=' + self.label)
|
||||
|
||||
if self.all:
|
||||
cmd.append('--all')
|
||||
|
||||
if self.force:
|
||||
cmd.append('--ignore-not-found')
|
||||
|
||||
return self._execute(cmd)
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
name=dict(),
|
||||
filename=dict(),
|
||||
namespace=dict(),
|
||||
resource=dict(),
|
||||
label=dict(),
|
||||
server=dict(),
|
||||
kubectl=dict(),
|
||||
force=dict(default=False, type='bool'),
|
||||
all=dict(default=False, type='bool'),
|
||||
log_level=dict(default=0, type='int'),
|
||||
state=dict(default='present', choices=['present', 'absent', 'latest', 'reloaded', 'stopped']),
|
||||
)
|
||||
)
|
||||
|
||||
changed = False
|
||||
|
||||
manager = KubeManager(module)
|
||||
state = module.params.get('state')
|
||||
|
||||
if state == 'present':
|
||||
result = manager.create()
|
||||
|
||||
elif state == 'absent':
|
||||
result = manager.delete()
|
||||
|
||||
elif state == 'reloaded':
|
||||
result = manager.replace()
|
||||
|
||||
elif state == 'stopped':
|
||||
result = manager.stop()
|
||||
|
||||
elif state == 'latest':
|
||||
if manager.exists():
|
||||
manager.force = True
|
||||
result = manager.replace()
|
||||
else:
|
||||
result = manager.create(check=False)
|
||||
|
||||
else:
|
||||
module.fail_json(msg='Unrecognized state %s.' % state)
|
||||
|
||||
if result:
|
||||
changed = True
|
||||
module.exit_json(changed=changed,
|
||||
msg='success: %s' % (' '.join(result))
|
||||
)
|
||||
|
||||
|
||||
from ansible.module_utils.basic import * # noqa
|
||||
if __name__ == '__main__':
|
||||
main()
|
58
roles/dnsmasq/tasks/dnsmasq.yml
Normal file
58
roles/dnsmasq/tasks/dnsmasq.yml
Normal file
@ -0,0 +1,58 @@
|
||||
---
|
||||
- name: ensure dnsmasq.d directory exists
|
||||
file:
|
||||
path: /etc/dnsmasq.d
|
||||
state: directory
|
||||
|
||||
- name: ensure dnsmasq.d-available directory exists
|
||||
file:
|
||||
path: /etc/dnsmasq.d-available
|
||||
state: directory
|
||||
|
||||
- name: Write dnsmasq configuration
|
||||
template:
|
||||
src: 01-kube-dns.conf.j2
|
||||
dest: /etc/dnsmasq.d-available/01-kube-dns.conf
|
||||
mode: 0755
|
||||
backup: yes
|
||||
|
||||
- name: Stat dnsmasq configuration
|
||||
stat: path=/etc/dnsmasq.d/01-kube-dns.conf
|
||||
register: sym
|
||||
|
||||
- name: Move previous configuration
|
||||
command: mv /etc/dnsmasq.d/01-kube-dns.conf /etc/dnsmasq.d-available/01-kube-dns.conf.bak
|
||||
changed_when: False
|
||||
when: sym.stat.islnk is defined and sym.stat.islnk == False
|
||||
|
||||
- name: Enable dnsmasq configuration
|
||||
file:
|
||||
src: /etc/dnsmasq.d-available/01-kube-dns.conf
|
||||
dest: /etc/dnsmasq.d/01-kube-dns.conf
|
||||
state: link
|
||||
|
||||
- name: Create dnsmasq manifests
|
||||
template: src={{item.file}} dest=/etc/kubernetes/{{item.file}}
|
||||
with_items:
|
||||
- {file: dnsmasq-ds.yml, type: ds}
|
||||
- {file: dnsmasq-svc.yml, type: svc}
|
||||
register: manifests
|
||||
when: inventory_hostname == groups['kube-master'][0]
|
||||
|
||||
- name: Start Resources
|
||||
kube:
|
||||
name: dnsmasq
|
||||
namespace: kube-system
|
||||
kubectl: "{{bin_dir}}/kubectl"
|
||||
resource: "{{item.item.type}}"
|
||||
filename: /etc/kubernetes/{{item.item.file}}
|
||||
state: "{{item.changed | ternary('latest','present') }}"
|
||||
with_items: "{{ manifests.results }}"
|
||||
when: inventory_hostname == groups['kube-master'][0]
|
||||
|
||||
- name: Check for dnsmasq port (pulling image and running container)
|
||||
wait_for:
|
||||
host: "{{dns_server}}"
|
||||
port: 53
|
||||
delay: 5
|
||||
when: inventory_hostname == groups['kube-node'][0]
|
@ -1,101 +1,5 @@
|
||||
---
|
||||
- name: populate inventory into hosts file
|
||||
lineinfile:
|
||||
dest: /etc/hosts
|
||||
regexp: "^{{ hostvars[item].ansible_default_ipv4.address }} {{ item }}$"
|
||||
line: "{{ hostvars[item].ansible_default_ipv4.address }} {{ item }}"
|
||||
state: present
|
||||
backup: yes
|
||||
when: hostvars[item].ansible_default_ipv4.address is defined
|
||||
with_items: groups['all']
|
||||
- include: dnsmasq.yml
|
||||
when: "{{ not skip_dnsmasq|bool }}"
|
||||
|
||||
- name: populate kubernetes loadbalancer address into hosts file
|
||||
lineinfile:
|
||||
dest: /etc/hosts
|
||||
regexp: ".*{{ apiserver_loadbalancer_domain_name }}$"
|
||||
line: "{{ loadbalancer_apiserver.address }} lb-apiserver.kubernetes.local"
|
||||
state: present
|
||||
backup: yes
|
||||
when: loadbalancer_apiserver is defined and apiserver_loadbalancer_domain_name is defined
|
||||
|
||||
- name: clean hosts file
|
||||
lineinfile:
|
||||
dest: /etc/hosts
|
||||
regexp: "{{ item }}"
|
||||
state: absent
|
||||
backup: yes
|
||||
with_items:
|
||||
- '^127\.0\.0\.1(\s+){{ inventory_hostname }}.*'
|
||||
- '^::1(\s+){{ inventory_hostname }}.*'
|
||||
|
||||
- name: ensure dnsmasq.d directory exists
|
||||
file:
|
||||
path: /etc/dnsmasq.d
|
||||
state: directory
|
||||
when: inventory_hostname in groups['kube-master']
|
||||
|
||||
- name: configure dnsmasq
|
||||
template:
|
||||
src: 01-kube-dns.conf.j2
|
||||
dest: /etc/dnsmasq.d/01-kube-dns.conf
|
||||
mode: 755
|
||||
backup: yes
|
||||
when: inventory_hostname in groups['kube-master']
|
||||
|
||||
- name: create dnsmasq pod template
|
||||
template: src=dnsmasq-pod.yml dest=/etc/kubernetes/manifests/dnsmasq-pod.manifest
|
||||
when: inventory_hostname in groups['kube-master']
|
||||
|
||||
- name: Check for dnsmasq port
|
||||
wait_for:
|
||||
port: 53
|
||||
delay: 5
|
||||
timeout: 100
|
||||
when: inventory_hostname in groups['kube-master']
|
||||
|
||||
- name: check resolvconf
|
||||
stat: path=/etc/resolvconf/resolv.conf.d/head
|
||||
register: resolvconf
|
||||
|
||||
- name: target resolv.conf file
|
||||
set_fact:
|
||||
resolvconffile: >
|
||||
{%- if resolvconf.stat.exists == True -%}
|
||||
/etc/resolvconf/resolv.conf.d/head
|
||||
{%- else -%}
|
||||
/etc/resolv.conf
|
||||
{%- endif -%}
|
||||
|
||||
- name: Add search resolv.conf
|
||||
lineinfile:
|
||||
line: search {{ [ 'default.svc.' + dns_domain, 'svc.' + dns_domain, dns_domain ] | join(' ') }}
|
||||
dest: "{{resolvconffile}}"
|
||||
state: present
|
||||
insertafter: EOF
|
||||
backup: yes
|
||||
follow: yes
|
||||
|
||||
- name: Add all masters as nameserver
|
||||
lineinfile:
|
||||
line: nameserver {{ hostvars[item]['ansible_default_ipv4']['address'] }}
|
||||
dest: "{{resolvconffile}}"
|
||||
state: present
|
||||
insertafter: EOF
|
||||
backup: yes
|
||||
follow: yes
|
||||
with_items: groups['kube-master']
|
||||
|
||||
- name: disable resolv.conf modification by dhclient
|
||||
copy: src=dhclient_nodnsupdate dest=/etc/dhcp/dhclient-enter-hooks.d/nodnsupdate mode=u+x backup=yes
|
||||
when: ansible_os_family == "Debian"
|
||||
|
||||
- name: disable resolv.conf modification by dhclient
|
||||
copy: src=dhclient_nodnsupdate dest=/etc/dhcp/dhclient.d/nodnsupdate mode=u+x backup=yes
|
||||
when: ansible_os_family == "RedHat"
|
||||
|
||||
- name: update resolvconf
|
||||
command: resolvconf -u
|
||||
changed_when: False
|
||||
when: resolvconf.stat.exists == True
|
||||
|
||||
- meta: flush_handlers
|
||||
- include: resolvconf.yml
|
||||
|
101
roles/dnsmasq/tasks/resolvconf.yml
Normal file
101
roles/dnsmasq/tasks/resolvconf.yml
Normal file
@ -0,0 +1,101 @@
|
||||
---
|
||||
- name: check resolvconf
|
||||
shell: which resolvconf
|
||||
register: resolvconf
|
||||
ignore_errors: yes
|
||||
|
||||
- name: target resolv.conf file
|
||||
set_fact:
|
||||
resolvconffile: >-
|
||||
{%- if resolvconf.rc == 0 -%}/etc/resolvconf/resolv.conf.d/head{%- else -%}/etc/resolv.conf{%- endif -%}
|
||||
|
||||
- name: generate search domains to resolvconf
|
||||
set_fact:
|
||||
searchentries:
|
||||
"{{ ([ 'default.svc.' + dns_domain, 'svc.' + dns_domain ] + searchdomains|default([])) | join(' ') }}"
|
||||
|
||||
- name: pick dnsmasq cluster IP
|
||||
set_fact:
|
||||
dnsmasq_server: >-
|
||||
{%- if skip_dnsmasq|bool -%}{{ [ skydns_server ] + upstream_dns_servers|default([]) }}{%- else -%}{{ [ dns_server ] }}{%- endif -%}
|
||||
|
||||
- name: generate nameservers to resolvconf
|
||||
set_fact:
|
||||
nameserverentries:
|
||||
"{{ dnsmasq_server|default([]) + nameservers|default([]) }}"
|
||||
|
||||
- name: Remove search and nameserver options from resolvconf head
|
||||
lineinfile:
|
||||
dest: /etc/resolvconf/resolv.conf.d/head
|
||||
state: absent
|
||||
regexp: "^{{ item }}.*$"
|
||||
backup: yes
|
||||
follow: yes
|
||||
with_items:
|
||||
- search
|
||||
- nameserver
|
||||
when: resolvconf.rc == 0
|
||||
notify: Dnsmasq | update resolvconf
|
||||
|
||||
- name: Add search domains to resolv.conf
|
||||
lineinfile:
|
||||
line: "search {{searchentries}}"
|
||||
dest: "{{resolvconffile}}"
|
||||
state: present
|
||||
insertbefore: BOF
|
||||
backup: yes
|
||||
follow: yes
|
||||
notify: Dnsmasq | update resolvconf
|
||||
|
||||
- name: Add nameservers to resolv.conf
|
||||
blockinfile:
|
||||
dest: "{{resolvconffile}}"
|
||||
block: |-
|
||||
{% for item in nameserverentries -%}
|
||||
nameserver {{ item }}
|
||||
{% endfor %}
|
||||
state: present
|
||||
insertafter: "^search.*$"
|
||||
create: yes
|
||||
backup: yes
|
||||
follow: yes
|
||||
marker: "# Ansible nameservers {mark}"
|
||||
notify: Dnsmasq | update resolvconf
|
||||
|
||||
- name: Add options to resolv.conf
|
||||
lineinfile:
|
||||
line: options {{ item }}
|
||||
dest: "{{resolvconffile}}"
|
||||
state: present
|
||||
regexp: "^options.*{{ item }}$"
|
||||
insertafter: EOF
|
||||
backup: yes
|
||||
follow: yes
|
||||
with_items:
|
||||
- ndots:{{ ndots }}
|
||||
- timeout:2
|
||||
- attempts:2
|
||||
notify: Dnsmasq | update resolvconf
|
||||
|
||||
- name: Remove search and nameserver options from resolvconf base
|
||||
lineinfile:
|
||||
dest: /etc/resolvconf/resolv.conf.d/base
|
||||
state: absent
|
||||
regexp: "^{{ item }}.*$"
|
||||
backup: yes
|
||||
follow: yes
|
||||
with_items:
|
||||
- search
|
||||
- nameserver
|
||||
when: resolvconf.rc == 0
|
||||
notify: Dnsmasq | update resolvconf
|
||||
|
||||
- name: disable resolv.conf modification by dhclient
|
||||
copy: src=dhclient_nodnsupdate dest=/etc/dhcp/dhclient-enter-hooks.d/znodnsupdate mode=0755
|
||||
notify: Dnsmasq | restart network
|
||||
when: ansible_os_family == "Debian"
|
||||
|
||||
- name: disable resolv.conf modification by dhclient
|
||||
copy: src=dhclient_nodnsupdate dest=/etc/dhcp/dhclient.d/nodnsupdate mode=u+x
|
||||
notify: Dnsmasq | restart network
|
||||
when: ansible_os_family == "RedHat"
|
@ -1,19 +1,29 @@
|
||||
#Listen on all interfaces
|
||||
interface=*
|
||||
#Listen on localhost
|
||||
bind-interfaces
|
||||
listen-address=0.0.0.0
|
||||
|
||||
addn-hosts=/etc/hosts
|
||||
|
||||
bogus-priv
|
||||
strict-order
|
||||
# Forward k8s domain to kube-dns
|
||||
server=/{{ dns_domain }}/{{ skydns_server }}
|
||||
|
||||
#Set upstream dns servers
|
||||
{% if upstream_dns_servers is defined %}
|
||||
{% for srv in upstream_dns_servers %}
|
||||
server={{ srv }}
|
||||
{% endfor %}
|
||||
{% elif cloud_provider == "gce" %}
|
||||
server=169.254.169.254
|
||||
{% else %}
|
||||
server=8.8.8.8
|
||||
server=8.8.4.4
|
||||
{% endif %}
|
||||
|
||||
# Forward k8s domain to kube-dns
|
||||
server=/{{ dns_domain }}/{{ dns_server }}
|
||||
bogus-priv
|
||||
no-resolv
|
||||
no-negcache
|
||||
cache-size=1000
|
||||
max-cache-ttl=10
|
||||
max-ttl=20
|
||||
log-facility=-
|
||||
|
53
roles/dnsmasq/templates/dnsmasq-ds.yml
Normal file
53
roles/dnsmasq/templates/dnsmasq-ds.yml
Normal file
@ -0,0 +1,53 @@
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: dnsmasq
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: dnsmasq
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: dnsmasq
|
||||
spec:
|
||||
containers:
|
||||
- name: dnsmasq
|
||||
image: andyshinn/dnsmasq:2.72
|
||||
command:
|
||||
- dnsmasq
|
||||
args:
|
||||
- -k
|
||||
- "-7"
|
||||
- /etc/dnsmasq.d
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
- NET_ADMIN
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 256M
|
||||
ports:
|
||||
- name: dns
|
||||
containerPort: 53
|
||||
protocol: UDP
|
||||
- name: dns-tcp
|
||||
containerPort: 53
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: etcdnsmasqd
|
||||
mountPath: /etc/dnsmasq.d
|
||||
- name: etcdnsmasqdavailable
|
||||
mountPath: /etc/dnsmasq.d-available
|
||||
|
||||
volumes:
|
||||
- name: etcdnsmasqd
|
||||
hostPath:
|
||||
path: /etc/dnsmasq.d
|
||||
- name: etcdnsmasqdavailable
|
||||
hostPath:
|
||||
path: /etc/dnsmasq.d-available
|
||||
dnsPolicy: Default # Don't use cluster DNS.
|
@ -1,49 +0,0 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: dnsmasq
|
||||
namespace: kube-system
|
||||
spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: dnsmasq
|
||||
image: andyshinn/dnsmasq:2.72
|
||||
command:
|
||||
- dnsmasq
|
||||
args:
|
||||
- -k
|
||||
- "-7"
|
||||
- /etc/dnsmasq.d
|
||||
- --local-service
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
- NET_ADMIN
|
||||
imagePullPolicy: Always
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 256M
|
||||
ports:
|
||||
- name: dns
|
||||
containerPort: 53
|
||||
hostPort: 53
|
||||
protocol: UDP
|
||||
- name: dns-tcp
|
||||
containerPort: 53
|
||||
hostPort: 53
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: etcdnsmasqd
|
||||
mountPath: /etc/dnsmasq.d
|
||||
- name: etcdnsmasqdavailable
|
||||
mountPath: /etc/dnsmasq.d-available
|
||||
|
||||
volumes:
|
||||
- name: etcdnsmasqd
|
||||
hostPath:
|
||||
path: /etc/dnsmasq.d
|
||||
- name: etcdnsmasqdavailable
|
||||
hostPath:
|
||||
path: /etc/dnsmasq.d-available
|
23
roles/dnsmasq/templates/dnsmasq-svc.yml
Normal file
23
roles/dnsmasq/templates/dnsmasq-svc.yml
Normal file
@ -0,0 +1,23 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
kubernetes.io/cluster-service: 'true'
|
||||
k8s-app: dnsmasq
|
||||
name: dnsmasq
|
||||
namespace: kube-system
|
||||
spec:
|
||||
ports:
|
||||
- port: 53
|
||||
name: dns-tcp
|
||||
targetPort: 53
|
||||
protocol: TCP
|
||||
- port: 53
|
||||
name: dns
|
||||
targetPort: 53
|
||||
protocol: UDP
|
||||
type: ClusterIP
|
||||
clusterIP: {{dns_server}}
|
||||
selector:
|
||||
k8s-app: dnsmasq
|
10
roles/docker/defaults/main.yml
Normal file
10
roles/docker/defaults/main.yml
Normal file
@ -0,0 +1,10 @@
|
||||
docker_version: 1.10
|
||||
|
||||
docker_package_info:
|
||||
pkgs:
|
||||
|
||||
docker_repo_key_info:
|
||||
repo_keys:
|
||||
|
||||
docker_repo_info:
|
||||
repos:
|
6
roles/docker/files/rh_docker.repo
Normal file
6
roles/docker/files/rh_docker.repo
Normal file
@ -0,0 +1,6 @@
|
||||
[dockerrepo]
|
||||
name=Docker Repository
|
||||
baseurl=https://yum.dockerproject.org/repo/main/centos/7
|
||||
enabled=1
|
||||
gpgcheck=1
|
||||
gpgkey=https://yum.dockerproject.org/gpg
|
27
roles/docker/handlers/main.yml
Normal file
27
roles/docker/handlers/main.yml
Normal file
@ -0,0 +1,27 @@
|
||||
---
|
||||
- name: restart docker
|
||||
command: /bin/true
|
||||
notify:
|
||||
- Docker | reload systemd
|
||||
- Docker | reload docker
|
||||
- Docker | pause while Docker restarts
|
||||
- Docker | wait for docker
|
||||
|
||||
- name : Docker | reload systemd
|
||||
shell: systemctl daemon-reload
|
||||
when: ansible_service_mgr == "systemd"
|
||||
|
||||
- name: Docker | reload docker
|
||||
service:
|
||||
name: docker
|
||||
state: restarted
|
||||
|
||||
- name: Docker | pause while Docker restarts
|
||||
pause: seconds=10 prompt="Waiting for docker restart"
|
||||
|
||||
- name: Docker | wait for docker
|
||||
command: /usr/bin/docker images
|
||||
register: docker_ready
|
||||
retries: 10
|
||||
delay: 5
|
||||
until: docker_ready.rc == 0
|
@ -11,6 +11,7 @@
|
||||
- defaults.yml
|
||||
paths:
|
||||
- ../vars
|
||||
skip: true
|
||||
|
||||
- name: check for minimum kernel version
|
||||
fail:
|
||||
@ -18,31 +19,63 @@
|
||||
docker requires a minimum kernel version of
|
||||
{{ docker_kernel_min_version }} on
|
||||
{{ ansible_distribution }}-{{ ansible_distribution_version }}
|
||||
when: ansible_kernel|version_compare(docker_kernel_min_version, "<")
|
||||
|
||||
- name: ensure docker requirements packages are installed
|
||||
action: "{{ docker_package_info.pkg_mgr }}"
|
||||
args: docker_package_info.args
|
||||
with_items: docker_package_info.pre_pkgs
|
||||
when: docker_package_info.pre_pkgs|length > 0
|
||||
when: (ansible_os_family != "CoreOS") and (ansible_kernel|version_compare(docker_kernel_min_version, "<"))
|
||||
|
||||
- name: ensure docker repository public key is installed
|
||||
action: "{{ docker_repo_key_info.pkg_key }}"
|
||||
args: docker_repo_key_info.args
|
||||
with_items: docker_repo_key_info.repo_keys
|
||||
when: docker_repo_key_info.repo_keys|length > 0
|
||||
args:
|
||||
id: "{{item}}"
|
||||
keyserver: "{{docker_repo_key_info.keyserver}}"
|
||||
state: present
|
||||
register: keyserver_task_result
|
||||
until: keyserver_task_result|success
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
with_items: "{{ docker_repo_key_info.repo_keys }}"
|
||||
when: ansible_os_family != "CoreOS"
|
||||
|
||||
- name: ensure docker repository is enabled
|
||||
action: "{{ docker_repo_info.pkg_repo }}"
|
||||
args: docker_repo_info.args
|
||||
with_items: docker_repo_info.repos
|
||||
when: docker_repo_info.repos|length > 0
|
||||
args:
|
||||
repo: "{{item}}"
|
||||
state: present
|
||||
with_items: "{{ docker_repo_info.repos }}"
|
||||
when: (ansible_os_family != "CoreOS") and (docker_repo_info.repos|length > 0)
|
||||
|
||||
- name: Configure docker repository on RedHat/CentOS
|
||||
copy:
|
||||
src: "rh_docker.repo"
|
||||
dest: "/etc/yum.repos.d/docker.repo"
|
||||
when: ansible_distribution in ["CentOS","RedHat"] and
|
||||
ansible_distribution_major_version >= 7
|
||||
|
||||
- name: ensure docker packages are installed
|
||||
action: "{{ docker_package_info.pkg_mgr }}"
|
||||
args: docker_package_info.args
|
||||
with_items: docker_package_info.pkgs
|
||||
when: docker_package_info.pkgs|length > 0
|
||||
args:
|
||||
pkg: "{{item.name}}"
|
||||
force: "{{item.force|default(omit)}}"
|
||||
state: present
|
||||
register: docker_task_result
|
||||
until: docker_task_result|success
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
with_items: "{{ docker_package_info.pkgs }}"
|
||||
when: (ansible_os_family != "CoreOS") and (docker_package_info.pkgs|length > 0)
|
||||
|
||||
- name: allow for proxies on systems using systemd
|
||||
include: systemd-proxies.yml
|
||||
when: ansible_service_mgr == "systemd" and
|
||||
(http_proxy is defined or https_proxy is defined or no_proxy is defined)
|
||||
|
||||
- name: Write docker.service systemd file
|
||||
template:
|
||||
src: systemd-docker.service.j2
|
||||
dest: /etc/systemd/system/docker.service
|
||||
register: docker_service_file
|
||||
notify: restart docker
|
||||
when: ansible_service_mgr == "systemd" and ansible_os_family != "CoreOS"
|
||||
|
||||
- meta: flush_handlers
|
||||
|
||||
- name: ensure docker service is started and enabled
|
||||
service:
|
||||
|
9
roles/docker/tasks/systemd-proxies.yml
Normal file
9
roles/docker/tasks/systemd-proxies.yml
Normal file
@ -0,0 +1,9 @@
|
||||
---
|
||||
- name: create docker service directory for systemd
|
||||
file: path=/etc/systemd/system/docker.service.d state=directory
|
||||
|
||||
- name: drop docker environment conf to enable proxy usage
|
||||
template:
|
||||
src: http-proxy.conf.j2
|
||||
dest: /etc/systemd/system/docker.service.d/http-proxy.conf
|
||||
notify: restart docker
|
3
roles/docker/templates/http-proxy.conf.j2
Normal file
3
roles/docker/templates/http-proxy.conf.j2
Normal file
@ -0,0 +1,3 @@
|
||||
[Service]
|
||||
|
||||
Environment={% if http_proxy %}"HTTP_PROXY={{ http_proxy }}"{% endif %} {% if https_proxy %}"HTTPS_PROXY={{ https_proxy }}"{% endif %} {% if no_proxy %}"NO_PROXY={{ no_proxy }}"{% endif %}
|
40
roles/docker/templates/systemd-docker.service.j2
Normal file
40
roles/docker/templates/systemd-docker.service.j2
Normal file
@ -0,0 +1,40 @@
|
||||
[Unit]
|
||||
Description=Docker Application Container Engine
|
||||
Documentation=http://docs.docker.com
|
||||
{% if ansible_os_family == "RedHat" %}
|
||||
After=network.target docker-storage-setup.service
|
||||
Wants=docker-storage-setup.service
|
||||
{% elif ansible_os_family == "Debian" %}
|
||||
After=network.target docker.socket
|
||||
Wants=docker.socket
|
||||
{% endif %}
|
||||
|
||||
[Service]
|
||||
Type=notify
|
||||
{% if ansible_os_family == "RedHat" %}
|
||||
EnvironmentFile=-/etc/default/docker
|
||||
EnvironmentFile=-/etc/sysconfig/docker
|
||||
EnvironmentFile=-/etc/sysconfig/docker-network
|
||||
EnvironmentFile=-/etc/sysconfig/docker-storage
|
||||
{% elif ansible_os_family == "Debian" %}
|
||||
EnvironmentFile=-/etc/default/docker
|
||||
{% endif %}
|
||||
Environment=GOTRACEBACK=crash
|
||||
ExecReload=/bin/kill -s HUP $MAINPID
|
||||
Delegate=yes
|
||||
KillMode=process
|
||||
ExecStart=/usr/bin/docker daemon \
|
||||
$OPTIONS \
|
||||
$DOCKER_STORAGE_OPTIONS \
|
||||
$DOCKER_NETWORK_OPTIONS \
|
||||
$INSECURE_REGISTRY \
|
||||
$DOCKER_OPTS
|
||||
TasksMax=infinity
|
||||
LimitNOFILE=1048576
|
||||
LimitNPROC=1048576
|
||||
LimitCORE=infinity
|
||||
TimeoutStartSec=1min
|
||||
Restart=on-abnormal
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
@ -1,24 +1,16 @@
|
||||
docker_kernel_min_version: '2.6.32-431'
|
||||
|
||||
# versioning: docker-io itself is pinned at docker 1.5
|
||||
|
||||
docker_package_info:
|
||||
pkg_mgr: yum
|
||||
args:
|
||||
name: "{{ item }}"
|
||||
state: latest
|
||||
update_cache: yes
|
||||
pre_pkgs:
|
||||
- epel-release
|
||||
- curl
|
||||
- device-mapper-libs
|
||||
pkgs:
|
||||
- docker-io
|
||||
- name: docker-io
|
||||
|
||||
docker_repo_key_info:
|
||||
pkg_key: ''
|
||||
args: {}
|
||||
repo_keys: []
|
||||
|
||||
docker_repo_info:
|
||||
pkg_repo: ''
|
||||
args: {}
|
||||
repos: []
|
||||
|
@ -1,36 +1,29 @@
|
||||
docker_kernel_min_version: '3.2'
|
||||
|
||||
# https://apt.dockerproject.org/repo/dists/debian-wheezy/main/filelist
|
||||
docker_versioned_pkg:
|
||||
latest: docker-engine
|
||||
1.9: docker-engine=1.9.1-0~{{ ansible_distribution_release|lower }}
|
||||
1.10: docker-engine=1.10.3-0~{{ ansible_distribution_release|lower }}
|
||||
1.11: docker-engine=1.11.2-0~{{ ansible_distribution_release|lower }}
|
||||
1.12: docker-engine=1.12.1-0~{{ ansible_distribution_release|lower }}
|
||||
|
||||
docker_package_info:
|
||||
pkg_mgr: apt
|
||||
args:
|
||||
pkg: "{{ item }}"
|
||||
update_cache: yes
|
||||
cache_valid_time: 600
|
||||
state: latest
|
||||
pre_pkgs:
|
||||
- apt-transport-https
|
||||
- curl
|
||||
- software-properties-common
|
||||
pkgs:
|
||||
- docker-engine
|
||||
- name: "{{ docker_versioned_pkg[docker_version] }}"
|
||||
force: yes
|
||||
|
||||
docker_repo_key_info:
|
||||
pkg_key: apt_key
|
||||
args:
|
||||
id: "{{ item }}"
|
||||
keyserver: hkp://p80.pool.sks-keyservers.net:80
|
||||
state: present
|
||||
keyserver: hkp://p80.pool.sks-keyservers.net:80
|
||||
repo_keys:
|
||||
- 58118E89F3A912897C070ADBF76221572C52609D
|
||||
- 58118E89F3A912897C070ADBF76221572C52609D
|
||||
|
||||
docker_repo_info:
|
||||
pkg_repo: apt_repository
|
||||
args:
|
||||
repo: "{{ item }}"
|
||||
update_cache: yes
|
||||
state: present
|
||||
repos:
|
||||
- >
|
||||
deb https://apt.dockerproject.org/repo
|
||||
deb https://apt.dockerproject.org/repo
|
||||
{{ ansible_distribution|lower }}-{{ ansible_distribution_release|lower }}
|
||||
main
|
||||
|
@ -1,22 +1,16 @@
|
||||
docker_kernel_min_version: '0'
|
||||
|
||||
# versioning: docker-io itself is pinned at docker 1.5
|
||||
|
||||
docker_package_info:
|
||||
pkg_mgr: yum
|
||||
args:
|
||||
name: "{{ item }}"
|
||||
state: latest
|
||||
update_cache: yes
|
||||
pre_pkgs:
|
||||
- curl
|
||||
pkgs:
|
||||
- docker-io
|
||||
- name: docker-io
|
||||
|
||||
docker_repo_key_info:
|
||||
pkg_key: ''
|
||||
args: {}
|
||||
repo_keys: []
|
||||
|
||||
docker_repo_info:
|
||||
pkg_repo: ''
|
||||
args: {}
|
||||
repos: []
|
||||
|
21
roles/docker/vars/fedora.yml
Normal file
21
roles/docker/vars/fedora.yml
Normal file
@ -0,0 +1,21 @@
|
||||
docker_kernel_min_version: '0'
|
||||
|
||||
docker_versioned_pkg:
|
||||
latest: docker
|
||||
1.9: docker-1:1.9.1
|
||||
1.10: docker-1:1.10.1
|
||||
1.11: docker-1:1.11.2
|
||||
1.12: docker-1:1.12.1
|
||||
|
||||
docker_package_info:
|
||||
pkg_mgr: dnf
|
||||
pkgs:
|
||||
- name: "{{ docker_versioned_pkg[docker_version] }}"
|
||||
|
||||
docker_repo_key_info:
|
||||
pkg_key: ''
|
||||
repo_keys: []
|
||||
|
||||
docker_repo_info:
|
||||
pkg_repo: ''
|
||||
repos: []
|
@ -2,21 +2,13 @@ docker_kernel_min_version: '0'
|
||||
|
||||
docker_package_info:
|
||||
pkg_mgr: yum
|
||||
args:
|
||||
name: "{{ item }}"
|
||||
state: latest
|
||||
update_cache: yes
|
||||
pre_pkgs:
|
||||
- curl
|
||||
pkgs:
|
||||
- docker
|
||||
- name: docker-engine
|
||||
|
||||
docker_repo_key_info:
|
||||
pkg_key: ''
|
||||
args: {}
|
||||
repo_keys: []
|
||||
|
||||
docker_repo_info:
|
||||
pkg_repo: ''
|
||||
args: {}
|
||||
repos: []
|
||||
|
29
roles/docker/vars/ubuntu-16.04.yml
Normal file
29
roles/docker/vars/ubuntu-16.04.yml
Normal file
@ -0,0 +1,29 @@
|
||||
---
|
||||
docker_version: 1.11
|
||||
docker_kernel_min_version: '3.2'
|
||||
|
||||
# https://apt.dockerproject.org/repo/dists/ubuntu-xenial/main/filelist
|
||||
docker_versioned_pkg:
|
||||
latest: docker-engine
|
||||
1.11: docker-engine=1.11.1-0~{{ ansible_distribution_release|lower }}
|
||||
1.12: docker-engine=1.12.1-0~{{ ansible_distribution_release|lower }}
|
||||
|
||||
docker_package_info:
|
||||
pkg_mgr: apt
|
||||
pkgs:
|
||||
- name: "{{ docker_versioned_pkg[docker_version] }}"
|
||||
force: yes
|
||||
|
||||
docker_repo_key_info:
|
||||
pkg_key: apt_key
|
||||
keyserver: hkp://p80.pool.sks-keyservers.net:80
|
||||
repo_keys:
|
||||
- 58118E89F3A912897C070ADBF76221572C52609D
|
||||
|
||||
docker_repo_info:
|
||||
pkg_repo: apt_repository
|
||||
repos:
|
||||
- >
|
||||
deb https://apt.dockerproject.org/repo
|
||||
{{ ansible_distribution|lower }}-{{ ansible_distribution_release|lower }}
|
||||
main
|
30
roles/docker/vars/ubuntu.yml
Normal file
30
roles/docker/vars/ubuntu.yml
Normal file
@ -0,0 +1,30 @@
|
||||
|
||||
docker_kernel_min_version: '3.2'
|
||||
|
||||
# https://apt.dockerproject.org/repo/dists/ubuntu-trusty/main/filelist
|
||||
docker_versioned_pkg:
|
||||
latest: docker-engine
|
||||
1.9: docker-engine=1.9.0-0~{{ ansible_distribution_release|lower }}
|
||||
1.10: docker-engine=1.10.3-0~{{ ansible_distribution_release|lower }}
|
||||
1.11: docker-engine=1.11.1-0~{{ ansible_distribution_release|lower }}
|
||||
1.12: docker-engine=1.12.1-0~{{ ansible_distribution_release|lower }}
|
||||
|
||||
docker_package_info:
|
||||
pkg_mgr: apt
|
||||
pkgs:
|
||||
- name: "{{ docker_versioned_pkg[docker_version] }}"
|
||||
force: yes
|
||||
|
||||
docker_repo_key_info:
|
||||
pkg_key: apt_key
|
||||
keyserver: hkp://p80.pool.sks-keyservers.net:80
|
||||
repo_keys:
|
||||
- 58118E89F3A912897C070ADBF76221572C52609D
|
||||
|
||||
docker_repo_info:
|
||||
pkg_repo: apt_repository
|
||||
repos:
|
||||
- >
|
||||
deb https://apt.dockerproject.org/repo
|
||||
{{ ansible_distribution|lower }}-{{ ansible_distribution_release|lower }}
|
||||
main
|
@ -1,42 +1,124 @@
|
||||
---
|
||||
local_release_dir: /tmp
|
||||
|
||||
# if this is set to true will only download files once
|
||||
download_run_once: False
|
||||
|
||||
# Versions
|
||||
kube_version: v1.4.0
|
||||
|
||||
etcd_version: v3.0.6
|
||||
#TODO(mattymo): Move calico versions to roles/network_plugins/calico/defaults
|
||||
# after migration to container download
|
||||
calico_version: v0.20.0
|
||||
calico_cni_version: v1.4.2
|
||||
weave_version: v1.6.1
|
||||
flannel_version: 0.5.5
|
||||
calico_version: v0.13.0
|
||||
calico_plugin_version: v0.7.0
|
||||
kube_version: v1.1.3
|
||||
flannel_server_helper_version: 0.1
|
||||
|
||||
kubectl_checksum: "01b9bea18061a27b1cf30e34fd8ab45cfc096c9a9d57d0ed21072abb40dd3d1d"
|
||||
kubelet_checksum: "62191c66f2d670dd52ddf1d88ef81048977abf1ffaa95ee6333299447eb6a482"
|
||||
# Download URL's
|
||||
etcd_download_url: "https://storage.googleapis.com/kargo/{{etcd_version}}_etcd"
|
||||
calico_cni_download_url: "https://storage.googleapis.com/kargo/{{calico_cni_version}}_calico-cni-plugin"
|
||||
calico_cni_ipam_download_url: "https://storage.googleapis.com/kargo/{{calico_cni_version}}_calico-cni-plugin-ipam"
|
||||
weave_download_url: "https://storage.googleapis.com/kargo/{{weave_version}}_weave"
|
||||
|
||||
kube_download_url: "https://storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/amd64"
|
||||
# Checksums
|
||||
calico_cni_checksum: "9cab29764681e9d80da826e4b2cd10841cc01a749e0018867d96dd76a4691548"
|
||||
calico_cni_ipam_checksum: "09d076b15b791956efee91646e47fdfdcf382db16082cef4f542a9fff7bae172"
|
||||
weave_checksum: "9bf9d6e5a839e7bcbb28cc00c7acae9d09284faa3e7a3720ca9c2b9e93c68580"
|
||||
etcd_checksum: "385afd518f93e3005510b7aaa04d38ee4a39f06f5152cd33bb86d4f0c94c7485"
|
||||
|
||||
flannel_download_url: "https://github.com/coreos/flannel/releases/download/v{{ flannel_version }}/flannel-{{ flannel_version }}-linux-amd64.tar.gz"
|
||||
|
||||
calico_download_url: "https://github.com/Metaswitch/calico-docker/releases/download/{{calico_version}}/calicoctl"
|
||||
|
||||
calico_plugin_download_url: "https://github.com/projectcalico/calico-kubernetes/releases/download/{{calico_plugin_version}}/calico_kubernetes"
|
||||
# Containers
|
||||
# Possible values: host, docker
|
||||
etcd_deployment_type: "docker"
|
||||
etcd_image_repo: "quay.io/coreos/etcd"
|
||||
etcd_image_tag: "{{ etcd_version }}"
|
||||
flannel_server_helper_image_repo: "gcr.io/google_containers/flannel-server-helper"
|
||||
flannel_server_helper_image_tag: "{{ flannel_server_helper_version }}"
|
||||
flannel_image_repo: "quay.io/coreos/flannel"
|
||||
flannel_image_tag: "{{ flannel_version }}"
|
||||
calicoctl_image_repo: "calico/ctl"
|
||||
calicoctl_image_tag: "{{ calico_version }}"
|
||||
calico_node_image_repo: "calico/node"
|
||||
calico_node_image_tag: "{{ calico_version }}"
|
||||
hyperkube_image_repo: "quay.io/coreos/hyperkube"
|
||||
hyperkube_image_tag: "{{ kube_version }}_coreos.0"
|
||||
|
||||
downloads:
|
||||
- name: calico
|
||||
dest: calico/bin/calicoctl
|
||||
url: "{{calico_download_url}}"
|
||||
|
||||
- name: calico-plugin
|
||||
calico_cni_plugin:
|
||||
dest: calico/bin/calico
|
||||
url: "{{calico_plugin_download_url}}"
|
||||
version: "{{calico_cni_version}}"
|
||||
sha256: "{{ calico_cni_checksum }}"
|
||||
source_url: "{{ calico_cni_download_url }}"
|
||||
url: "{{ calico_cni_download_url }}"
|
||||
owner: "root"
|
||||
mode: "0755"
|
||||
enabled: "{{ kube_network_plugin == 'calico' }}"
|
||||
calico_cni_plugin_ipam:
|
||||
dest: calico/bin/calico-ipam
|
||||
version: "{{calico_cni_version}}"
|
||||
sha256: "{{ calico_cni_ipam_checksum }}"
|
||||
source_url: "{{ calico_cni_ipam_download_url }}"
|
||||
url: "{{ calico_cni_ipam_download_url }}"
|
||||
owner: "root"
|
||||
mode: "0755"
|
||||
enabled: "{{ kube_network_plugin == 'calico' }}"
|
||||
weave:
|
||||
dest: weave/bin/weave
|
||||
version: "{{weave_version}}"
|
||||
source_url: "{{weave_download_url}}"
|
||||
url: "{{weave_download_url}}"
|
||||
sha256: "{{ weave_checksum }}"
|
||||
owner: "root"
|
||||
mode: "0755"
|
||||
enabled: "{{ kube_network_plugin == 'weave' }}"
|
||||
etcd:
|
||||
version: "{{etcd_version}}"
|
||||
dest: "etcd/etcd-{{ etcd_version }}-linux-amd64.tar.gz"
|
||||
sha256: "{{ etcd_checksum }}"
|
||||
source_url: "{{ etcd_download_url }}"
|
||||
url: "{{ etcd_download_url }}"
|
||||
unarchive: true
|
||||
owner: "etcd"
|
||||
mode: "0755"
|
||||
container: "{{ etcd_deployment_type == 'docker' }}"
|
||||
repo: "{{ etcd_image_repo }}"
|
||||
tag: "{{ etcd_image_tag }}"
|
||||
hyperkube:
|
||||
container: true
|
||||
repo: "{{ hyperkube_image_repo }}"
|
||||
tag: "{{ hyperkube_image_tag }}"
|
||||
flannel:
|
||||
container: true
|
||||
repo: "{{ flannel_image_repo }}"
|
||||
tag: "{{ flannel_image_tag }}"
|
||||
enabled: "{{ kube_network_plugin == 'flannel' }}"
|
||||
flannel_server_helper:
|
||||
container: true
|
||||
repo: "{{ flannel_server_helper_image_repo }}"
|
||||
tag: "{{ flannel_server_helper_image_tag }}"
|
||||
enabled: "{{ kube_network_plugin == 'flannel' }}"
|
||||
calicoctl:
|
||||
container: true
|
||||
repo: "{{ calicoctl_image_repo }}"
|
||||
tag: "{{ calicoctl_image_tag }}"
|
||||
enabled: "{{ kube_network_plugin == 'calico' }}"
|
||||
calico_node:
|
||||
container: true
|
||||
repo: "{{ calico_node_image_repo }}"
|
||||
tag: "{{ calico_node_image_tag }}"
|
||||
enabled: "{{ kube_network_plugin == 'calico' }}"
|
||||
|
||||
- name: flannel
|
||||
dest: flannel/flannel-{{ flannel_version }}-linux-amd64.tar.gz
|
||||
url: "{{flannel_download_url}}"
|
||||
unarchive: yes
|
||||
|
||||
- name: kubernetes-kubelet
|
||||
dest: kubernetes/bin/kubelet
|
||||
sha256: "{{kubelet_checksum}}"
|
||||
url: "{{ kube_download_url }}/kubelet"
|
||||
|
||||
- name: kubernetes-kubectl
|
||||
dest: kubernetes/bin/kubectl
|
||||
sha256: "{{kubectl_checksum}}"
|
||||
url: "{{ kube_download_url }}/kubectl"
|
||||
download:
|
||||
container: "{{ file.container|default('false') }}"
|
||||
repo: "{{ file.repo|default(None) }}"
|
||||
tag: "{{ file.tag|default(None) }}"
|
||||
enabled: "{{ file.enabled|default('true') }}"
|
||||
dest: "{{ file.dest|default(None) }}"
|
||||
version: "{{ file.version|default(None) }}"
|
||||
sha256: "{{ file.sha256|default(None) }}"
|
||||
source_url: "{{ file.source_url|default(None) }}"
|
||||
url: "{{ file.url|default(None) }}"
|
||||
unarchive: "{{ file.unarchive|default('false') }}"
|
||||
owner: "{{ file.owner|default('kube') }}"
|
||||
mode: "{{ file.mode|default(None) }}"
|
||||
|
@ -1,19 +1,83 @@
|
||||
---
|
||||
- name: downloading...
|
||||
debug:
|
||||
msg: "{{ download.url }}"
|
||||
when: "{{ download.enabled|bool and not download.container|bool }}"
|
||||
|
||||
- name: Create dest directories
|
||||
file: path={{local_release_dir}}/{{item.dest|dirname}} state=directory recurse=yes
|
||||
with_items: downloads
|
||||
file: path={{local_release_dir}}/{{download.dest|dirname}} state=directory recurse=yes
|
||||
when: "{{ download.enabled|bool and not download.container|bool }}"
|
||||
|
||||
- name: Download items
|
||||
get_url:
|
||||
url: "{{item.url}}"
|
||||
dest: "{{local_release_dir}}/{{item.dest}}"
|
||||
sha256sum: "{{item.sha256 | default(omit)}}"
|
||||
with_items: downloads
|
||||
url: "{{download.url}}"
|
||||
dest: "{{local_release_dir}}/{{download.dest}}"
|
||||
sha256sum: "{{download.sha256 | default(omit)}}"
|
||||
owner: "{{ download.owner|default(omit) }}"
|
||||
mode: "{{ download.mode|default(omit) }}"
|
||||
register: get_url_result
|
||||
until: "'OK' in get_url_result.msg or 'file already exists' in get_url_result.msg"
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
when: "{{ download.enabled|bool and not download.container|bool }}"
|
||||
|
||||
- name: Extract archives
|
||||
unarchive:
|
||||
src: "{{ local_release_dir }}/{{item.dest}}"
|
||||
dest: "{{ local_release_dir }}/{{item.dest|dirname}}"
|
||||
copy: no
|
||||
when: "{{item.unarchive is defined and item.unarchive == True}}"
|
||||
with_items: downloads
|
||||
src: "{{ local_release_dir }}/{{download.dest}}"
|
||||
dest: "{{ local_release_dir }}/{{download.dest|dirname}}"
|
||||
owner: "{{ download.owner|default(omit) }}"
|
||||
mode: "{{ download.mode|default(omit) }}"
|
||||
copy: no
|
||||
when: "{{ download.enabled|bool and not download.container|bool and download.unarchive is defined and download.unarchive == True }}"
|
||||
|
||||
- name: Fix permissions
|
||||
file:
|
||||
state: file
|
||||
path: "{{local_release_dir}}/{{download.dest}}"
|
||||
owner: "{{ download.owner|default(omit) }}"
|
||||
mode: "{{ download.mode|default(omit) }}"
|
||||
when: "{{ download.enabled|bool and not download.container|bool and (download.unarchive is not defined or download.unarchive == False) }}"
|
||||
|
||||
- name: pulling...
|
||||
debug:
|
||||
msg: "{{ download.repo }}:{{ download.tag }}"
|
||||
when: "{{ download.enabled|bool and download.container|bool }}"
|
||||
|
||||
- name: Create dest directory for saved/loaded container images
|
||||
file: path="{{local_release_dir}}/containers" state=directory recurse=yes
|
||||
when: "{{ download.enabled|bool and download.container|bool }}"
|
||||
|
||||
#NOTE(bogdando) this brings no docker-py deps for nodes
|
||||
- name: Download containers
|
||||
command: "/usr/bin/docker pull {{ download.repo }}:{{ download.tag }}"
|
||||
register: pull_task_result
|
||||
until: pull_task_result.rc == 0
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
when: "{{ download.enabled|bool and download.container|bool }}"
|
||||
delegate_to: "{{ groups['kube-master'][0] if download_run_once|bool else inventory_hostname }}"
|
||||
run_once: "{{ download_run_once|bool }}"
|
||||
|
||||
- set_fact:
|
||||
fname: "{{local_release_dir}}/containers/{{download.repo|regex_replace('/|\0|:', '_')}}:{{download.tag|regex_replace('/|\0|:', '_')}}.tar"
|
||||
|
||||
- name: Download | save container images
|
||||
shell: docker save "{{ download.repo }}:{{ download.tag }}" > "{{ fname }}"
|
||||
delegate_to: "{{groups['kube-master'][0]}}"
|
||||
run_once: true
|
||||
when: ansible_os_family != "CoreOS" and download_run_once|bool and download.enabled|bool and download.container|bool
|
||||
|
||||
- name: Download | get container images
|
||||
synchronize:
|
||||
src: "{{ fname }}"
|
||||
dest: "{{local_release_dir}}/containers"
|
||||
mode: push
|
||||
register: get_task
|
||||
until: get_task|success
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
when: ansible_os_family != "CoreOS" and inventory_hostname != groups['kube-master'][0] and download_run_once|bool and download.enabled|bool and download.container|bool
|
||||
|
||||
- name: Download | load container images
|
||||
shell: docker load < "{{ fname }}"
|
||||
when: ansible_os_family != "CoreOS" and inventory_hostname != groups['kube-master'][0] and download_run_once|bool and download.enabled|bool and download.container|bool
|
||||
|
2
roles/etcd/defaults/main.yml
Normal file
2
roles/etcd/defaults/main.yml
Normal file
@ -0,0 +1,2 @@
|
||||
---
|
||||
etcd_bin_dir: "{{ local_release_dir }}/etcd/etcd-{{ etcd_version }}-linux-amd64/"
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user