Compare commits
527 Commits
v2.14.1
...
release-2.
Author | SHA1 | Date | |
---|---|---|---|
c91a05f330 | |||
a583a2d9aa | |||
713abf29ca | |||
247d062c02 | |||
9fa051780e | |||
bcf695913f | |||
23cd1d41fb | |||
62f5369237 | |||
59fc17f4e3 | |||
26c1d42fff | |||
c1aa755a3c | |||
b3d9f2b4a2 | |||
29c2fbdbc1 | |||
4b9f98f933 | |||
e9870b8d25 | |||
e0c74fa082 | |||
5b93a97281 | |||
bdf74c6749 | |||
d6f9a8d752 | |||
e357d8678c | |||
b1b407a0b4 | |||
6c3d1649a6 | |||
14cf3e138b | |||
afbabebfd5 | |||
8c0a2741ae | |||
1d078e1119 | |||
d90baa8601 | |||
d5660cd37c | |||
63cec45597 | |||
f07e24db8f | |||
5d5be3e96a | |||
6e7649360f | |||
1dd38721b3 | |||
6a001e4971 | |||
96e6a6ac3f | |||
2556eb2733 | |||
d29ea386d6 | |||
a0ee569091 | |||
3f4eb9be08 | |||
5ea2d1eb67 | |||
ffc38a2237 | |||
360aff4a57 | |||
d26191373a | |||
4c06aa98b5 | |||
1b267b6599 | |||
dd6efb73f7 | |||
dfeed1c1a4 | |||
0071e3c99c | |||
0feec14b15 | |||
975f84494c | |||
7c86734d2e | |||
8665e1de87 | |||
c16efc9ab8 | |||
324c95d37f | |||
69806e0a46 | |||
ad15a4b755 | |||
002a4b03a4 | |||
96476430a3 | |||
73db44b00c | |||
b32d25942d | |||
fce705a92b | |||
6164c90f70 | |||
e036b899a3 | |||
8c7b90ebbf | |||
38d9d2ea0e | |||
384d30b675 | |||
add61868c6 | |||
b599f3084f | |||
a7493e26e1 | |||
ae3a1d7c01 | |||
e39e3d5c26 | |||
1e7d48846a | |||
6001edeecd | |||
ce0b7834ff | |||
3ac92689f0 | |||
1c0836946f | |||
bccbe323b7 | |||
d73249a793 | |||
cd9a03f86c | |||
b47c21c683 | |||
6de5303e3f | |||
2a2fb68b2f | |||
844ebb7838 | |||
332cc1cd58 | |||
e7ce83016e | |||
bf6a39eb84 | |||
42382e2cde | |||
f8e4650791 | |||
e444b3c140 | |||
d56ac216f4 | |||
420a412234 | |||
90c643f3ab | |||
1d4e380231 | |||
6d293ba899 | |||
aa086e5407 | |||
cce0940e1f | |||
daed3e5b6a | |||
e2a7f3e2ab | |||
5a351b4b00 | |||
6f2abbf79c | |||
bef1e628ac | |||
7340a163a4 | |||
a6622b176b | |||
771a5e26bb | |||
be278f9dba | |||
6479e26904 | |||
1c7053c9d8 | |||
596d0289f8 | |||
7df7054bdc | |||
5377aac936 | |||
ceb6c172ad | |||
7f52c1d3a2 | |||
af1e16b934 | |||
2257181ca8 | |||
7e75d48cc4 | |||
6330db89a7 | |||
f05d6b3711 | |||
cce9d3125d | |||
e381ce57e2 | |||
5dbce6a2bd | |||
5b0e88339a | |||
db43891f2b | |||
f72063e7c2 | |||
36a3a78952 | |||
2d1597bf10 | |||
edfa3e9b14 | |||
6fa3565dac | |||
7dec8e5caa | |||
49abf6007a | |||
f0cdf71ccb | |||
8655b92e93 | |||
e1c6992c55 | |||
486b223e01 | |||
d53fd29e34 | |||
4f89bfac48 | |||
5fee96b404 | |||
12873f916b | |||
efa180392b | |||
6d9ed398e3 | |||
6d3dbb43a4 | |||
811f546ea6 | |||
ead8a4e4de | |||
05f132c136 | |||
5f2c8ac38f | |||
14511053aa | |||
8353532a09 | |||
1c62af0c95 | |||
f103ac7640 | |||
274e06a48d | |||
a39f306184 | |||
69d11daef6 | |||
057e8b4358 | |||
18c0e54e4f | |||
85007fa9a7 | |||
5c5bf41afe | |||
5dba53a223 | |||
2bcd9eb9e9 | |||
5a54db2f3c | |||
b47542b003 | |||
14b63ede8c | |||
b07c5966a6 | |||
c7db72e1da | |||
dc5df57c26 | |||
a9c97e5253 | |||
53e5ef6b4e | |||
8800b5c01d | |||
280036fad6 | |||
a6e1f5ece9 | |||
fedd671d68 | |||
b7c22659e3 | |||
c9c0c01de0 | |||
e442b1d2b9 | |||
e9f4ff227e | |||
668bbe0528 | |||
e045a45e48 | |||
2c9fc18903 | |||
d4eecac108 | |||
ef351e0234 | |||
05adeed1fa | |||
15f1b19136 | |||
154fa45422 | |||
e35becebf8 | |||
bdd36c2d34 | |||
0a0156c946 | |||
100d9333ca | |||
a4cc416511 | |||
2ea5793782 | |||
0ddf915027 | |||
067db686f6 | |||
ed2b4b805e | |||
8375aa72e2 | |||
6334e4bd84 | |||
86ce8aac85 | |||
de46f86137 | |||
5616b08229 | |||
8682a57ea3 | |||
662a37ab4f | |||
42947c9840 | |||
3749729d5a | |||
fb8b075110 | |||
1c5391dda7 | |||
f2d10e9465 | |||
796d3fb975 | |||
5c04bdd52b | |||
17143dbc51 | |||
1c8bba36db | |||
95b329b64d | |||
de1d9df787 | |||
6450207713 | |||
edc4bb4a49 | |||
a21ee33180 | |||
bcaa31ae33 | |||
0cc1726781 | |||
aad78840a0 | |||
e3ab665e90 | |||
1a91792e7c | |||
670c37b428 | |||
040dacd5cd | |||
59541de437 | |||
fc8551bcba | |||
c2c97c36bc | |||
211fdde742 | |||
366cbb3e6f | |||
a318624fad | |||
3cf5981146 | |||
4cc065e66d | |||
ba731ed145 | |||
b77460ec34 | |||
88bee6c68e | |||
1f84d6344b | |||
699fbd64ab | |||
b42bf39fb7 | |||
5368d51d63 | |||
c5db012c9a | |||
b70d986bfa | |||
973628fc1b | |||
91fea7c956 | |||
d378d789cf | |||
9007d6621a | |||
774ec49396 | |||
bba55faae8 | |||
8f2b0772f9 | |||
1a409dc7ae | |||
404ea0270e | |||
ef939dee74 | |||
f1576eabb1 | |||
49c4345c9a | |||
f94182f77d | |||
222a77dfe7 | |||
24ceee134e | |||
04c8a73889 | |||
9a75501152 | |||
f6fbbc17a4 | |||
15dc3868c3 | |||
2525d7aff8 | |||
a5d2137ed9 | |||
a8e51e686e | |||
a2429ef64d | |||
1b88678cf3 | |||
0e96852159 | |||
19a61d838f | |||
4eec302e86 | |||
f3885aa589 | |||
b493c81ce8 | |||
9ef62194c3 | |||
91ee4aa542 | |||
e3caff833c | |||
b2995e4ec4 | |||
ccd3aeebbc | |||
7a033a1d55 | |||
1652d8bf4b | |||
c85f275bdb | |||
a923f4e7c0 | |||
82af8e455e | |||
1baee488ab | |||
7433b70d95 | |||
de6c71a426 | |||
16a34548ea | |||
b2f3ab77cd | |||
b2f6ed7dee | |||
09e34d29cd | |||
667a6981ea | |||
cf1d9f5612 | |||
55b03a41b2 | |||
81b4ffa6b4 | |||
8c1821228d | |||
9c5c1a09a1 | |||
09fa99fdc6 | |||
8331939aed | |||
02213d6e07 | |||
387df0ee1f | |||
b59035df06 | |||
5517e62c86 | |||
5dca5225dc | |||
c005c90746 | |||
8bdd0bb82f | |||
a790935d02 | |||
1fcbbd3b9d | |||
b9077d3ea2 | |||
1d7d84540f | |||
6f471d1c5e | |||
ff95292435 | |||
e8a8a7b8cc | |||
b0ad8ec023 | |||
ab2bfd7f8c | |||
29f1c40580 | |||
2585e72a30 | |||
837fca1368 | |||
0c995c1ea7 | |||
ad244ab744 | |||
308ceee46c | |||
e0195da80d | |||
b02f40b392 | |||
c0fe32c4ec | |||
e9f93a1de9 | |||
c14388629a | |||
3c1f84a9e9 | |||
398a995798 | |||
dc86b2063a | |||
bbab1013c5 | |||
1945499e2f | |||
c971debd15 | |||
161c7e9fce | |||
fd3ebc13f7 | |||
9db4b949f2 | |||
5b5726bdd4 | |||
1347bb2e4b | |||
286191ecb7 | |||
096bcdd078 | |||
7d7739e031 | |||
3470810709 | |||
98b43bb24f | |||
275c54e810 | |||
9a05037352 | |||
143f9c78be | |||
75f0aaf4a1 | |||
c36df6a78b | |||
10a6bd67de | |||
db17ba54b4 | |||
c2f64a52da | |||
0b81c6a6c4 | |||
36bd4cdc43 | |||
87eea16d7b | |||
0aa6d3d4bc | |||
43dbff938e | |||
54aebb92fd | |||
f0c7649158 | |||
93445b4dbc | |||
aeaa876d57 | |||
9c1e08249d | |||
33a60fe919 | |||
85982dc8e9 | |||
dbe02d398a | |||
e022e2e13c | |||
7084d38767 | |||
00e0f3bd2b | |||
cd7212453e | |||
a69f2b09da | |||
878fe80ca3 | |||
8331c1f858 | |||
f4a69d2827 | |||
ed6cef85d8 | |||
d315f73080 | |||
06ec5393d7 | |||
1a491fc10c | |||
488db81e36 | |||
f377d9f057 | |||
db4e942b0d | |||
68b96bdf1a | |||
4f7a760a94 | |||
da5077fa5f | |||
f1231bb97d | |||
80eb1ad936 | |||
cc5303e1c8 | |||
f6a5948f58 | |||
f6eed8091e | |||
4a8a52bad9 | |||
c09aabab0c | |||
d47ba2b2ef | |||
17fb1ceed8 | |||
97ff67e54a | |||
d4204a42fd | |||
c6f6940459 | |||
d739a6bb2f | |||
0982c66051 | |||
d40701463f | |||
405692d793 | |||
7938748d77 | |||
e909f84966 | |||
8a153ed38e | |||
eb16986f32 | |||
bd801de236 | |||
9c3bcd48ee | |||
ee23b947aa | |||
0f7341bdde | |||
602b5aaf01 | |||
70bbb3e280 | |||
a27eebb225 | |||
1b0326f773 | |||
93a1693040 | |||
df7ed24389 | |||
544aa00c17 | |||
fc22453618 | |||
fefcb8c9f8 | |||
9cf5dd0291 | |||
7a1f033c1d | |||
4a5acad414 | |||
227e96469c | |||
c93fa6effe | |||
102fb94524 | |||
c25d624524 | |||
12ab8b7af3 | |||
097bec473c | |||
d36b5d7d55 | |||
4b858b6466 | |||
e03e3c4582 | |||
91f1edbdd4 | |||
c6e2a4ebd8 | |||
3eefb5f2ad | |||
04b19359cb | |||
f2ef781efd | |||
60b0fb3e88 | |||
f323d70c0f | |||
03f316e7a2 | |||
79b7f0d592 | |||
d25aebdaf5 | |||
1454ba4a9e | |||
4781df587c | |||
e49330d6ee | |||
dbe6eb20c8 | |||
8bec5beb4b | |||
e6effb8245 | |||
5e32655830 | |||
270f91e577 | |||
07858e8f71 | |||
4cb5a4f609 | |||
cb57c3c916 | |||
92b1166dd0 | |||
e6c28982dd | |||
64f69718fb | |||
1301e69c7d | |||
99b8f0902e | |||
6a4d322a7c | |||
9d7f358d4b | |||
b1bb5a4796 | |||
f8ae086334 | |||
c49bda7319 | |||
aa9022d638 | |||
2994ba33ac | |||
87c0f135dc | |||
a687013fbe | |||
b0097fd0c1 | |||
9729b6b75a | |||
58959ae82f | |||
4ffc106c58 | |||
a374301570 | |||
bc8e16fc69 | |||
947162452d | |||
7a730d42dd | |||
109391031b | |||
aba63f0f9a | |||
e67886bf9d | |||
c2ac3b51c1 | |||
081a9e7bd8 | |||
55d8ed093a | |||
77149e5d89 | |||
28839f6b71 | |||
49bcf91aaf | |||
28073c76ac | |||
50e8a52c74 | |||
5c448b6896 | |||
c0fd5b2e84 | |||
6141b98bf8 | |||
2eae207435 | |||
9a8e4381be | |||
5f034330c5 | |||
edea63511d | |||
80df4f8b01 | |||
68118c2653 | |||
1e79dcfcaa | |||
1805e95b69 | |||
0d0cc8cf9c | |||
5bd937ece0 | |||
8908a70c19 | |||
5ec2467268 | |||
e489e70031 | |||
05c9169c70 | |||
bd49c993de | |||
5989680967 | |||
e1265b2e7b | |||
1721460dcd | |||
861bf967a7 | |||
09b8314057 | |||
151b142d30 | |||
b7c4136702 | |||
e666fe5a8d | |||
9ce34be217 | |||
79226d0870 | |||
686316b390 | |||
6da385de9d | |||
0cc5e3ef03 | |||
47194c1fe4 | |||
3bf40d5db9 | |||
a9e11623cd | |||
a870dd368e | |||
b6b26c710f | |||
705ad84ce7 | |||
04932f496f | |||
dffbd58671 | |||
152e0162a9 | |||
2fa7faa75a | |||
12f514f752 | |||
e2886f37a2 | |||
03dff09b8a | |||
a556f8f2bf | |||
1765c9125a | |||
ab28192d50 | |||
ad15721677 | |||
a2d4dbeee4 | |||
1712ba1198 | |||
040dda37ed | |||
a99ba3bb16 | |||
05ff4a527d | |||
ae5328c500 | |||
34ff39e654 | |||
8e3915f5bf | |||
6019a1006c |
2
.github/ISSUE_TEMPLATE/support.md
vendored
2
.github/ISSUE_TEMPLATE/support.md
vendored
@ -1,7 +1,7 @@
|
||||
---
|
||||
name: Support Request
|
||||
about: Support request or question relating to Kubespray
|
||||
labels: triage/support
|
||||
labels: kind/support
|
||||
|
||||
---
|
||||
|
||||
|
1
.gitignore
vendored
1
.gitignore
vendored
@ -15,6 +15,7 @@ contrib/terraform/aws/credentials.tfvars
|
||||
**/*.sw[pon]
|
||||
*~
|
||||
vagrant/
|
||||
plugins/mitogen
|
||||
|
||||
# Ansible inventory
|
||||
inventory/*
|
||||
|
@ -8,13 +8,14 @@ stages:
|
||||
- deploy-special
|
||||
|
||||
variables:
|
||||
KUBESPRAY_VERSION: v2.13.3
|
||||
KUBESPRAY_VERSION: v2.15.1
|
||||
FAILFASTCI_NAMESPACE: 'kargo-ci'
|
||||
GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray'
|
||||
ANSIBLE_FORCE_COLOR: "true"
|
||||
MAGIC: "ci check this"
|
||||
TEST_ID: "$CI_PIPELINE_ID-$CI_BUILD_ID"
|
||||
CI_TEST_VARS: "./tests/files/${CI_JOB_NAME}.yml"
|
||||
CI_TEST_REGISTRY_MIRROR: "./tests/common/_docker_hub_registry_mirror.yml"
|
||||
GS_ACCESS_KEY_ID: $GS_KEY
|
||||
GS_SECRET_ACCESS_KEY: $GS_SECRET
|
||||
CONTAINER_ENGINE: docker
|
||||
@ -29,7 +30,9 @@ variables:
|
||||
MITOGEN_ENABLE: "false"
|
||||
ANSIBLE_LOG_LEVEL: "-vv"
|
||||
RECOVER_CONTROL_PLANE_TEST: "false"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube-master[1:]"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube_control_plane[1:]"
|
||||
TERRAFORM_14_VERSION: 0.14.10
|
||||
TERRAFORM_13_VERSION: 0.13.6
|
||||
|
||||
before_script:
|
||||
- ./tests/scripts/rebase.sh
|
||||
|
@ -14,7 +14,7 @@ vagrant-validate:
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
variables:
|
||||
VAGRANT_VERSION: 2.2.4
|
||||
VAGRANT_VERSION: 2.2.15
|
||||
script:
|
||||
- ./tests/scripts/vagrant-validate.sh
|
||||
except: ['triggers', 'master']
|
||||
@ -64,9 +64,9 @@ markdownlint:
|
||||
tags: [light]
|
||||
image: node
|
||||
before_script:
|
||||
- npm install -g markdownlint-cli
|
||||
- npm install -g markdownlint-cli@0.22.0
|
||||
script:
|
||||
- markdownlint README.md docs --ignore docs/_sidebar.md
|
||||
- markdownlint $(find . -name '*.md' | grep -vF './.git') --ignore docs/_sidebar.md --ignore contrib/dind/README.md
|
||||
|
||||
ci-matrix:
|
||||
stage: unit-tests
|
||||
|
@ -1,43 +1,54 @@
|
||||
---
|
||||
.packet: &packet
|
||||
.packet:
|
||||
extends: .testcases
|
||||
variables:
|
||||
CI_PLATFORM: "packet"
|
||||
SSH_USER: "kubespray"
|
||||
CI_PLATFORM: packet
|
||||
SSH_USER: kubespray
|
||||
tags:
|
||||
- packet
|
||||
except: [triggers]
|
||||
|
||||
# CI template for PRs
|
||||
.packet_pr:
|
||||
only: [/^pr-.*$/]
|
||||
except: ['triggers']
|
||||
extends: .packet
|
||||
|
||||
# CI template for periodic CI jobs
|
||||
# Enabled when PERIODIC_CI_ENABLED var is set
|
||||
.packet_periodic:
|
||||
only:
|
||||
variables:
|
||||
- $PERIODIC_CI_ENABLED
|
||||
allow_failure: true
|
||||
extends: .packet
|
||||
|
||||
packet_ubuntu18-calico-aio:
|
||||
stage: deploy-part1
|
||||
extends: .packet
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
# Future AIO job
|
||||
packet_ubuntu20-calico-aio:
|
||||
stage: deploy-part1
|
||||
extends: .packet
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
# ### PR JOBS PART2
|
||||
|
||||
packet_centos7-flannel-containerd-addons-ha:
|
||||
extends: .packet
|
||||
extends: .packet_pr
|
||||
stage: deploy-part2
|
||||
when: on_success
|
||||
variables:
|
||||
MITOGEN_ENABLE: "true"
|
||||
|
||||
packet_centos7-crio:
|
||||
extends: .packet
|
||||
packet_centos8-crio:
|
||||
extends: .packet_pr
|
||||
stage: deploy-part2
|
||||
when: on_success
|
||||
variables:
|
||||
MITOGEN_ENABLE: "true"
|
||||
|
||||
packet_ubuntu18-crio:
|
||||
extends: .packet
|
||||
extends: .packet_pr
|
||||
stage: deploy-part2
|
||||
when: manual
|
||||
variables:
|
||||
@ -45,44 +56,44 @@ packet_ubuntu18-crio:
|
||||
|
||||
packet_ubuntu16-canal-kubeadm-ha:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu16-canal-sep:
|
||||
stage: deploy-special
|
||||
extends: .packet
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_ubuntu16-flannel-ha:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_ubuntu16-kube-router-sep:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_ubuntu16-kube-router-svc-proxy:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_debian10-cilium-svc-proxy:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
when: manual
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
|
||||
packet_debian10-containerd:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
variables:
|
||||
MITOGEN_ENABLE: "true"
|
||||
|
||||
packet_centos7-calico-ha-once-localhost:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
variables:
|
||||
# This will instruct Docker not to start over TLS.
|
||||
@ -92,97 +103,91 @@ packet_centos7-calico-ha-once-localhost:
|
||||
|
||||
packet_centos8-kube-ovn:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
|
||||
packet_centos8-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_fedora32-weave:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_opensuse-canal:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu18-ovn4nfv:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
|
||||
# Contiv does not work in k8s v1.16
|
||||
# packet_ubuntu16-contiv-sep:
|
||||
# stage: deploy-part2
|
||||
# extends: .packet
|
||||
# when: on_success
|
||||
|
||||
# ### MANUAL JOBS
|
||||
|
||||
packet_ubuntu16-weave-sep:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_ubuntu18-cilium-sep:
|
||||
stage: deploy-special
|
||||
extends: .packet
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_ubuntu18-flannel-containerd-ha:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_ubuntu18-flannel-containerd-ha-once:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_debian9-macvlan:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_centos7-calico-ha:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_centos7-kube-router:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_centos7-multus-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_oracle7-canal-ha:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_fedora31-flannel:
|
||||
packet_fedora33-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
MITOGEN_ENABLE: "true"
|
||||
|
||||
packet_amazon-linux-2-aio:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_fedora32-kube-ovn-containerd:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
|
||||
# ### PR JOBS PART3
|
||||
@ -190,7 +195,7 @@ packet_fedora32-kube-ovn-containerd:
|
||||
|
||||
packet_centos7-weave-upgrade-ha:
|
||||
stage: deploy-part3
|
||||
extends: .packet
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
UPGRADE_TEST: basic
|
||||
@ -198,7 +203,7 @@ packet_centos7-weave-upgrade-ha:
|
||||
|
||||
packet_debian9-calico-upgrade:
|
||||
stage: deploy-part3
|
||||
extends: .packet
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
variables:
|
||||
UPGRADE_TEST: graceful
|
||||
@ -206,7 +211,7 @@ packet_debian9-calico-upgrade:
|
||||
|
||||
packet_debian9-calico-upgrade-once:
|
||||
stage: deploy-part3
|
||||
extends: .packet
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
UPGRADE_TEST: graceful
|
||||
@ -214,16 +219,16 @@ packet_debian9-calico-upgrade-once:
|
||||
|
||||
packet_ubuntu18-calico-ha-recover:
|
||||
stage: deploy-part3
|
||||
extends: .packet
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
RECOVER_CONTROL_PLANE_TEST: "true"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube-master[1:]"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube_control_plane[1:]"
|
||||
|
||||
packet_ubuntu18-calico-ha-recover-noquorum:
|
||||
stage: deploy-part3
|
||||
extends: .packet
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
RECOVER_CONTROL_PLANE_TEST: "true"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[1:],kube-master[1:]"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[1:],kube_control_plane[1:]"
|
||||
|
@ -4,7 +4,7 @@ shellcheck:
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
variables:
|
||||
SHELLCHECK_VERSION: v0.6.0
|
||||
SHELLCHECK_VERSION: v0.7.1
|
||||
before_script:
|
||||
- ./tests/scripts/rebase.sh
|
||||
- curl --silent --location "https://github.com/koalaman/shellcheck/releases/download/"${SHELLCHECK_VERSION}"/shellcheck-"${SHELLCHECK_VERSION}".linux.x86_64.tar.xz" | tar -xJv
|
||||
@ -12,5 +12,5 @@ shellcheck:
|
||||
- shellcheck --version
|
||||
script:
|
||||
# Run shellcheck for all *.sh except contrib/
|
||||
- find . -name '*.sh' -not -path './contrib/*' | xargs shellcheck --severity error
|
||||
- find . -name '*.sh' -not -path './contrib/*' -not -path './.git/*' | xargs shellcheck --severity error
|
||||
except: ['triggers', 'master']
|
||||
|
@ -53,31 +53,92 @@
|
||||
# Cleanup regardless of exit code
|
||||
- chronic ./tests/scripts/testcases_cleanup.sh
|
||||
|
||||
tf-validate-openstack:
|
||||
tf-0.13.x-validate-openstack:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: 0.12.24
|
||||
TF_VERSION: $TERRAFORM_13_VERSION
|
||||
PROVIDER: openstack
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-validate-packet:
|
||||
tf-0.13.x-validate-packet:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: 0.12.24
|
||||
TF_VERSION: $TERRAFORM_13_VERSION
|
||||
PROVIDER: packet
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-validate-aws:
|
||||
tf-0.13.x-validate-aws:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: 0.12.24
|
||||
TF_VERSION: $TERRAFORM_13_VERSION
|
||||
PROVIDER: aws
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-0.13.x-validate-exoscale:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_13_VERSION
|
||||
PROVIDER: exoscale
|
||||
|
||||
tf-0.13.x-validate-vsphere:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_13_VERSION
|
||||
PROVIDER: vsphere
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-0.13.x-validate-upcloud:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_13_VERSION
|
||||
PROVIDER: upcloud
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-0.14.x-validate-openstack:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_14_VERSION
|
||||
PROVIDER: openstack
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-0.14.x-validate-packet:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_14_VERSION
|
||||
PROVIDER: packet
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-0.14.x-validate-aws:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_14_VERSION
|
||||
PROVIDER: aws
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-0.14.x-validate-exoscale:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_14_VERSION
|
||||
PROVIDER: exoscale
|
||||
|
||||
tf-0.14.x-validate-vsphere:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_14_VERSION
|
||||
PROVIDER: vsphere
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-0.14.x-validate-upcloud:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_14_VERSION
|
||||
PROVIDER: upcloud
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
# tf-packet-ubuntu16-default:
|
||||
# extends: .terraform_apply
|
||||
# variables:
|
||||
# TF_VERSION: 0.12.24
|
||||
# TF_VERSION: $TERRAFORM_14_VERSION
|
||||
# PROVIDER: packet
|
||||
# CLUSTER: $CI_COMMIT_REF_NAME
|
||||
# TF_VAR_number_of_k8s_masters: "1"
|
||||
@ -91,7 +152,7 @@ tf-validate-aws:
|
||||
# tf-packet-ubuntu18-default:
|
||||
# extends: .terraform_apply
|
||||
# variables:
|
||||
# TF_VERSION: 0.12.24
|
||||
# TF_VERSION: $TERRAFORM_14_VERSION
|
||||
# PROVIDER: packet
|
||||
# CLUSTER: $CI_COMMIT_REF_NAME
|
||||
# TF_VAR_number_of_k8s_masters: "1"
|
||||
@ -146,9 +207,10 @@ tf-elastx_ubuntu18-calico:
|
||||
extends: .terraform_apply
|
||||
stage: deploy-part3
|
||||
when: on_success
|
||||
allow_failure: true
|
||||
variables:
|
||||
<<: *elastx_variables
|
||||
TF_VERSION: 0.12.24
|
||||
TF_VERSION: $TERRAFORM_14_VERSION
|
||||
PROVIDER: openstack
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
ANSIBLE_TIMEOUT: "60"
|
||||
@ -174,44 +236,45 @@ tf-elastx_ubuntu18-calico:
|
||||
TF_VAR_image: ubuntu-18.04-server-latest
|
||||
TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
||||
|
||||
# OVH voucher expired, commenting job until things are sorted out
|
||||
|
||||
tf-ovh_cleanup:
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
image: python
|
||||
environment: ovh
|
||||
variables:
|
||||
<<: *ovh_variables
|
||||
before_script:
|
||||
- pip install -r scripts/openstack-cleanup/requirements.txt
|
||||
script:
|
||||
- ./scripts/openstack-cleanup/main.py
|
||||
# tf-ovh_cleanup:
|
||||
# stage: unit-tests
|
||||
# tags: [light]
|
||||
# image: python
|
||||
# environment: ovh
|
||||
# variables:
|
||||
# <<: *ovh_variables
|
||||
# before_script:
|
||||
# - pip install -r scripts/openstack-cleanup/requirements.txt
|
||||
# script:
|
||||
# - ./scripts/openstack-cleanup/main.py
|
||||
|
||||
tf-ovh_ubuntu18-calico:
|
||||
extends: .terraform_apply
|
||||
when: on_success
|
||||
environment: ovh
|
||||
variables:
|
||||
<<: *ovh_variables
|
||||
TF_VERSION: 0.12.24
|
||||
PROVIDER: openstack
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
ANSIBLE_TIMEOUT: "60"
|
||||
SSH_USER: ubuntu
|
||||
TF_VAR_number_of_k8s_masters: "0"
|
||||
TF_VAR_number_of_k8s_masters_no_floating_ip: "1"
|
||||
TF_VAR_number_of_k8s_masters_no_floating_ip_no_etcd: "0"
|
||||
TF_VAR_number_of_etcd: "0"
|
||||
TF_VAR_number_of_k8s_nodes: "0"
|
||||
TF_VAR_number_of_k8s_nodes_no_floating_ip: "1"
|
||||
TF_VAR_number_of_gfs_nodes_no_floating_ip: "0"
|
||||
TF_VAR_number_of_bastions: "0"
|
||||
TF_VAR_number_of_k8s_masters_no_etcd: "0"
|
||||
TF_VAR_use_neutron: "0"
|
||||
TF_VAR_floatingip_pool: "Ext-Net"
|
||||
TF_VAR_external_net: "6011fbc9-4cbf-46a4-8452-6890a340b60b"
|
||||
TF_VAR_network_name: "Ext-Net"
|
||||
TF_VAR_flavor_k8s_master: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
|
||||
TF_VAR_flavor_k8s_node: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
|
||||
TF_VAR_image: "Ubuntu 18.04"
|
||||
TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
||||
# tf-ovh_ubuntu18-calico:
|
||||
# extends: .terraform_apply
|
||||
# when: on_success
|
||||
# environment: ovh
|
||||
# variables:
|
||||
# <<: *ovh_variables
|
||||
# TF_VERSION: $TERRAFORM_14_VERSION
|
||||
# PROVIDER: openstack
|
||||
# CLUSTER: $CI_COMMIT_REF_NAME
|
||||
# ANSIBLE_TIMEOUT: "60"
|
||||
# SSH_USER: ubuntu
|
||||
# TF_VAR_number_of_k8s_masters: "0"
|
||||
# TF_VAR_number_of_k8s_masters_no_floating_ip: "1"
|
||||
# TF_VAR_number_of_k8s_masters_no_floating_ip_no_etcd: "0"
|
||||
# TF_VAR_number_of_etcd: "0"
|
||||
# TF_VAR_number_of_k8s_nodes: "0"
|
||||
# TF_VAR_number_of_k8s_nodes_no_floating_ip: "1"
|
||||
# TF_VAR_number_of_gfs_nodes_no_floating_ip: "0"
|
||||
# TF_VAR_number_of_bastions: "0"
|
||||
# TF_VAR_number_of_k8s_masters_no_etcd: "0"
|
||||
# TF_VAR_use_neutron: "0"
|
||||
# TF_VAR_floatingip_pool: "Ext-Net"
|
||||
# TF_VAR_external_net: "6011fbc9-4cbf-46a4-8452-6890a340b60b"
|
||||
# TF_VAR_network_name: "Ext-Net"
|
||||
# TF_VAR_flavor_k8s_master: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
|
||||
# TF_VAR_flavor_k8s_node: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
|
||||
# TF_VAR_image: "Ubuntu 18.04"
|
||||
# TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
||||
|
@ -38,6 +38,11 @@ molecule_tests:
|
||||
after_script:
|
||||
- chronic ./tests/scripts/testcases_cleanup.sh
|
||||
|
||||
vagrant_ubuntu18-calico-dual-stack:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: on_success
|
||||
|
||||
vagrant_ubuntu18-flannel:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
|
@ -1,6 +1,9 @@
|
||||
---
|
||||
extends: default
|
||||
|
||||
ignore: |
|
||||
.git/
|
||||
|
||||
rules:
|
||||
braces:
|
||||
min-spaces-inside: 0
|
||||
|
@ -10,7 +10,7 @@ To install development dependencies you can use `pip install -r tests/requiremen
|
||||
|
||||
#### Linting
|
||||
|
||||
Kubespray uses `yamllint` and `ansible-lint`. To run them locally use `yamllint .` and `./tests/scripts/ansible-lint.sh`
|
||||
Kubespray uses `yamllint` and `ansible-lint`. To run them locally use `yamllint .` and `ansible-lint`
|
||||
|
||||
#### Molecule
|
||||
|
||||
|
33
Dockerfile
33
Dockerfile
@ -1,21 +1,30 @@
|
||||
FROM ubuntu:18.04
|
||||
# Use imutable image tags rather than mutable tags (like ubuntu:18.04)
|
||||
FROM ubuntu:bionic-20200807
|
||||
|
||||
RUN mkdir /kubespray
|
||||
WORKDIR /kubespray
|
||||
RUN apt update -y && \
|
||||
apt install -y \
|
||||
RUN apt update -y \
|
||||
&& apt install -y \
|
||||
libssl-dev python3-dev sshpass apt-transport-https jq moreutils \
|
||||
ca-certificates curl gnupg2 software-properties-common python3-pip rsync
|
||||
RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - && \
|
||||
add-apt-repository \
|
||||
ca-certificates curl gnupg2 software-properties-common python3-pip rsync \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - \
|
||||
&& add-apt-repository \
|
||||
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
|
||||
$(lsb_release -cs) \
|
||||
stable" \
|
||||
&& apt update -y && apt-get install docker-ce -y
|
||||
&& apt update -y && apt-get install --no-install-recommends -y docker-ce \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /kubespray
|
||||
COPY . .
|
||||
RUN /usr/bin/python3 -m pip install pip -U && /usr/bin/python3 -m pip install -r tests/requirements.txt && python3 -m pip install -r requirements.txt && update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
||||
RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.17.5/bin/linux/amd64/kubectl \
|
||||
&& chmod a+x kubectl && cp kubectl /usr/local/bin/kubectl
|
||||
RUN /usr/bin/python3 -m pip install pip -U \
|
||||
&& /usr/bin/python3 -m pip install -r tests/requirements.txt \
|
||||
&& python3 -m pip install -r requirements.txt \
|
||||
&& update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
||||
|
||||
RUN KUBE_VERSION=$(sed -n 's/^kube_version: //p' roles/kubespray-defaults/defaults/main.yaml) \
|
||||
&& curl -LO https://storage.googleapis.com/kubernetes-release/release/$KUBE_VERSION/bin/linux/amd64/kubectl \
|
||||
&& chmod a+x kubectl \
|
||||
&& mv kubectl /usr/local/bin/kubectl
|
||||
|
||||
# Some tools like yamllint need this
|
||||
ENV LANG=C.UTF-8
|
||||
|
2
OWNERS
2
OWNERS
@ -4,3 +4,5 @@ approvers:
|
||||
- kubespray-approvers
|
||||
reviewers:
|
||||
- kubespray-reviewers
|
||||
emeritus_approvers:
|
||||
- kubespray-emeritus_approvers
|
@ -1,19 +1,18 @@
|
||||
aliases:
|
||||
kubespray-approvers:
|
||||
- ant31
|
||||
- mattymo
|
||||
- atoms
|
||||
- chadswen
|
||||
- mirwan
|
||||
- miouge1
|
||||
- riverzhang
|
||||
- verwilst
|
||||
- woopstar
|
||||
- luckysb
|
||||
- floryut
|
||||
kubespray-reviewers:
|
||||
- jjungnickel
|
||||
- archifleks
|
||||
- holmsten
|
||||
- bozzo
|
||||
- floryut
|
||||
- eppo
|
||||
- oomichi
|
||||
kubespray-emeritus_approvers:
|
||||
- riverzhang
|
||||
- atoms
|
||||
- ant31
|
||||
|
71
README.md
71
README.md
@ -32,7 +32,7 @@ CONFIG_FILE=inventory/mycluster/hosts.yaml python3 contrib/inventory_builder/inv
|
||||
|
||||
# Review and change parameters under ``inventory/mycluster/group_vars``
|
||||
cat inventory/mycluster/group_vars/all/all.yml
|
||||
cat inventory/mycluster/group_vars/k8s-cluster/k8s-cluster.yml
|
||||
cat inventory/mycluster/group_vars/k8s_cluster/k8s_cluster.yml
|
||||
|
||||
# Deploy Kubespray with Ansible Playbook - run the playbook as root
|
||||
# The option `--become` is required, as for example writing SSL keys in /etc/,
|
||||
@ -48,11 +48,23 @@ As a consequence, `ansible-playbook` command will fail with:
|
||||
ERROR! no action detected in task. This often indicates a misspelled module name, or incorrect module path.
|
||||
```
|
||||
|
||||
probably pointing on a task depending on a module present in requirements.txt (i.e. "unseal vault").
|
||||
probably pointing on a task depending on a module present in requirements.txt.
|
||||
|
||||
One way of solving this would be to uninstall the Ansible package and then, to install it via pip but it is not always possible.
|
||||
A workaround consists of setting `ANSIBLE_LIBRARY` and `ANSIBLE_MODULE_UTILS` environment variables respectively to the `ansible/modules` and `ansible/module_utils` subdirectories of pip packages installation location, which can be found in the Location field of the output of `pip show [package]` before executing `ansible-playbook`.
|
||||
|
||||
A simple way to ensure you get all the correct version of Ansible is to use the [pre-built docker image from Quay](https://quay.io/repository/kubespray/kubespray?tab=tags).
|
||||
You will then need to use [bind mounts](https://docs.docker.com/storage/bind-mounts/) to get the inventory and ssh key into the container, like this:
|
||||
|
||||
```ShellSession
|
||||
docker pull quay.io/kubespray/kubespray:v2.15.1
|
||||
docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inventory \
|
||||
--mount type=bind,source="${HOME}"/.ssh/id_rsa,dst=/root/.ssh/id_rsa \
|
||||
quay.io/kubespray/kubespray:v2.15.1 bash
|
||||
# Inside the container you may now run the kubespray playbooks:
|
||||
ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml
|
||||
```
|
||||
|
||||
### Vagrant
|
||||
|
||||
For Vagrant we need to install python dependencies for provisioning tasks.
|
||||
@ -105,51 +117,55 @@ vagrant up
|
||||
- **Flatcar Container Linux by Kinvolk**
|
||||
- **Debian** Buster, Jessie, Stretch, Wheezy
|
||||
- **Ubuntu** 16.04, 18.04, 20.04
|
||||
- **CentOS/RHEL** 7, 8 (experimental: see [centos 8 notes](docs/centos8.md))
|
||||
- **Fedora** 31, 32
|
||||
- **CentOS/RHEL** 7, [8](docs/centos8.md)
|
||||
- **Fedora** 32, 33
|
||||
- **Fedora CoreOS** (experimental: see [fcos Note](docs/fcos.md))
|
||||
- **openSUSE** Leap 42.3/Tumbleweed
|
||||
- **Oracle Linux** 7, 8 (experimental: [centos 8 notes](docs/centos8.md) apply)
|
||||
- **openSUSE** Leap 15.x/Tumbleweed
|
||||
- **Oracle Linux** 7, [8](docs/centos8.md)
|
||||
- **Alma Linux** [8](docs/centos8.md)
|
||||
- **Amazon Linux 2** (experimental: see [amazon linux notes](docs/amazonlinux.md)
|
||||
|
||||
Note: Upstart/SysV init based OS types are not supported.
|
||||
|
||||
## Supported Components
|
||||
|
||||
- Core
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.18.8
|
||||
- [etcd](https://github.com/coreos/etcd) v3.4.3
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.20.7
|
||||
- [etcd](https://github.com/coreos/etcd) v3.4.13
|
||||
- [docker](https://www.docker.com/) v19.03 (see note)
|
||||
- [containerd](https://containerd.io/) v1.2.13
|
||||
- [cri-o](http://cri-o.io/) v1.17 (experimental: see [CRI-O Note](docs/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
||||
- [containerd](https://containerd.io/) v1.4.4
|
||||
- [cri-o](http://cri-o.io/) v1.20 (experimental: see [CRI-O Note](docs/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
||||
- Network Plugin
|
||||
- [cni-plugins](https://github.com/containernetworking/plugins) v0.8.6
|
||||
- [calico](https://github.com/projectcalico/calico) v3.15.2
|
||||
- [cni-plugins](https://github.com/containernetworking/plugins) v0.9.1
|
||||
- [calico](https://github.com/projectcalico/calico) v3.17.4
|
||||
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
|
||||
- [cilium](https://github.com/cilium/cilium) v1.8.3
|
||||
- [contiv](https://github.com/contiv/install) v1.2.1
|
||||
- [flanneld](https://github.com/coreos/flannel) v0.12.0
|
||||
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.3.0
|
||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) v1.0.1
|
||||
- [multus](https://github.com/intel/multus-cni) v3.6.0
|
||||
- [cilium](https://github.com/cilium/cilium) v1.8.9
|
||||
- [flanneld](https://github.com/coreos/flannel) v0.13.0
|
||||
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.6.2
|
||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) v1.2.2
|
||||
- [multus](https://github.com/intel/multus-cni) v3.7.0
|
||||
- [ovn4nfv](https://github.com/opnfv/ovn4nfv-k8s-plugin) v1.1.0
|
||||
- [weave](https://github.com/weaveworks/weave) v2.7.0
|
||||
- [weave](https://github.com/weaveworks/weave) v2.8.1
|
||||
- Application
|
||||
- [ambassador](https://github.com/datawire/ambassador): v1.5
|
||||
- [cephfs-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.0-k8s1.11
|
||||
- [rbd-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.1-k8s1.11
|
||||
- [cert-manager](https://github.com/jetstack/cert-manager) v0.16.1
|
||||
- [coredns](https://github.com/coredns/coredns) v1.6.7
|
||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v0.35.0
|
||||
- [coredns](https://github.com/coredns/coredns) v1.7.0
|
||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v0.43.0
|
||||
|
||||
Note: The list of validated [docker versions](https://kubernetes.io/docs/setup/production-environment/container-runtimes/#docker) is 1.13.1, 17.03, 17.06, 17.09, 18.06, 18.09 and 19.03. The recommended docker version is 19.03. The kubelet might break on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
|
||||
## Container Runtime Notes
|
||||
|
||||
- The list of available docker version is 18.09, 19.03 and 20.10. The recommended docker version is 19.03. The kubelet might break on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
|
||||
- The cri-o version should be aligned with the respective kubernetes version (i.e. kube_version=1.20.x, crio_version=1.20)
|
||||
|
||||
## Requirements
|
||||
|
||||
- **Minimum required version of Kubernetes is v1.17**
|
||||
- **Ansible v2.9+, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands**
|
||||
- **Minimum required version of Kubernetes is v1.19**
|
||||
- **Ansible v2.9.x, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands, Ansible 2.10.x is experimentally supported for now**
|
||||
- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](docs/offline-environment.md))
|
||||
- The target servers are configured to allow **IPv4 forwarding**.
|
||||
- **Your ssh key must be copied** to all the servers part of your inventory.
|
||||
- If using IPv6 for pods and services, the target servers are configured to allow **IPv6 forwarding**.
|
||||
- The **firewalls are not managed**, you'll need to implement your own rules the way you used to.
|
||||
in order to avoid any issue during deployment you should disable your firewall.
|
||||
- If kubespray is ran from non-root user account, correct privilege escalation method
|
||||
@ -179,9 +195,6 @@ You can choose between 10 network plugins. (default: `calico`, except Vagrant us
|
||||
|
||||
- [cilium](http://docs.cilium.io/en/latest/): layer 3/4 networking (as well as layer 7 to protect and secure application protocols), supports dynamic insertion of BPF bytecode into the Linux kernel to implement security services, networking and visibility logic.
|
||||
|
||||
- [contiv](docs/contiv.md): supports vlan, vxlan, bgp and Cisco SDN networking. This plugin is able to
|
||||
apply firewall policies, segregate containers in multiple network and bridging pods onto physical networks.
|
||||
|
||||
- [ovn4nfv](docs/ovn4nfv.md): [ovn4nfv-k8s-plugins](https://github.com/opnfv/ovn4nfv-k8s-plugin) is the network controller, OVS agent and CNI server to offer basic SFC and OVN overlay networking.
|
||||
|
||||
- [weave](docs/weave.md): Weave is a lightweight container overlay network that doesn't require an external K/V database cluster.
|
||||
@ -208,6 +221,8 @@ See also [Network checker](docs/netcheck.md).
|
||||
|
||||
- [nginx](https://kubernetes.github.io/ingress-nginx): the NGINX Ingress Controller.
|
||||
|
||||
- [metallb](docs/metallb.md): the MetalLB bare-metal service LoadBalancer provider.
|
||||
|
||||
## Community docs and resources
|
||||
|
||||
- [kubernetes.io/docs/setup/production-environment/tools/kubespray/](https://kubernetes.io/docs/setup/production-environment/tools/kubespray/)
|
||||
|
60
Vagrantfile
vendored
60
Vagrantfile
vendored
@ -26,12 +26,14 @@ SUPPORTED_OS = {
|
||||
"centos-bento" => {box: "bento/centos-7.6", user: "vagrant"},
|
||||
"centos8" => {box: "centos/8", user: "vagrant"},
|
||||
"centos8-bento" => {box: "bento/centos-8", user: "vagrant"},
|
||||
"fedora31" => {box: "fedora/31-cloud-base", user: "vagrant"},
|
||||
"fedora32" => {box: "fedora/32-cloud-base", user: "vagrant"},
|
||||
"opensuse" => {box: "bento/opensuse-leap-15.1", user: "vagrant"},
|
||||
"fedora33" => {box: "fedora/33-cloud-base", user: "vagrant"},
|
||||
"opensuse" => {box: "bento/opensuse-leap-15.2", user: "vagrant"},
|
||||
"opensuse-tumbleweed" => {box: "opensuse/Tumbleweed.x86_64", user: "vagrant"},
|
||||
"oraclelinux" => {box: "generic/oracle7", user: "vagrant"},
|
||||
"oraclelinux8" => {box: "generic/oracle8", user: "vagrant"},
|
||||
"rhel7" => {box: "generic/rhel7", user: "vagrant"},
|
||||
"rhel8" => {box: "generic/rhel8", user: "vagrant"},
|
||||
}
|
||||
|
||||
if File.exist?(CONFIG)
|
||||
@ -47,6 +49,7 @@ $vm_cpus ||= 2
|
||||
$shared_folders ||= {}
|
||||
$forwarded_ports ||= {}
|
||||
$subnet ||= "172.18.8"
|
||||
$subnet_ipv6 ||= "fd3c:b398:0698:0756"
|
||||
$os ||= "ubuntu1804"
|
||||
$network_plugin ||= "flannel"
|
||||
# Setting multi_networking to true will install Multus: https://github.com/intel/multus-cni
|
||||
@ -83,16 +86,16 @@ $inventory = File.absolute_path($inventory, File.dirname(__FILE__))
|
||||
if ! File.exist?(File.join(File.dirname($inventory), "hosts.ini"))
|
||||
$vagrant_ansible = File.join(File.dirname(__FILE__), ".vagrant", "provisioners", "ansible")
|
||||
FileUtils.mkdir_p($vagrant_ansible) if ! File.exist?($vagrant_ansible)
|
||||
if ! File.exist?(File.join($vagrant_ansible,"inventory"))
|
||||
FileUtils.ln_s($inventory, File.join($vagrant_ansible,"inventory"))
|
||||
end
|
||||
$vagrant_inventory = File.join($vagrant_ansible,"inventory")
|
||||
FileUtils.rm_f($vagrant_inventory)
|
||||
FileUtils.ln_s($inventory, $vagrant_inventory)
|
||||
end
|
||||
|
||||
if Vagrant.has_plugin?("vagrant-proxyconf")
|
||||
$no_proxy = ENV['NO_PROXY'] || ENV['no_proxy'] || "127.0.0.1,localhost"
|
||||
(1..$num_instances).each do |i|
|
||||
$no_proxy += ",#{$subnet}.#{i+100}"
|
||||
end
|
||||
$no_proxy = ENV['NO_PROXY'] || ENV['no_proxy'] || "127.0.0.1,localhost"
|
||||
(1..$num_instances).each do |i|
|
||||
$no_proxy += ",#{$subnet}.#{i+100}"
|
||||
end
|
||||
end
|
||||
|
||||
Vagrant.configure("2") do |config|
|
||||
@ -142,6 +145,7 @@ Vagrant.configure("2") do |config|
|
||||
vb.gui = $vm_gui
|
||||
vb.linked_clone = true
|
||||
vb.customize ["modifyvm", :id, "--vram", "8"] # ubuntu defaults to 256 MB which is a waste of precious RAM
|
||||
vb.customize ["modifyvm", :id, "--audio", "none"]
|
||||
end
|
||||
|
||||
node.vm.provider :libvirt do |lv|
|
||||
@ -176,19 +180,39 @@ Vagrant.configure("2") do |config|
|
||||
node.vm.network "forwarded_port", guest: guest, host: host, auto_correct: true
|
||||
end
|
||||
|
||||
node.vm.synced_folder ".", "/vagrant", disabled: false, type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z'] , rsync__exclude: ['.git','venv']
|
||||
$shared_folders.each do |src, dst|
|
||||
node.vm.synced_folder src, dst, type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z']
|
||||
if ["rhel7","rhel8"].include? $os
|
||||
# Vagrant synced_folder rsync options cannot be used for RHEL boxes as Rsync package cannot
|
||||
# be installed until the host is registered with a valid Red Hat support subscription
|
||||
node.vm.synced_folder ".", "/vagrant", disabled: false
|
||||
$shared_folders.each do |src, dst|
|
||||
node.vm.synced_folder src, dst
|
||||
end
|
||||
else
|
||||
node.vm.synced_folder ".", "/vagrant", disabled: false, type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z'] , rsync__exclude: ['.git','venv']
|
||||
$shared_folders.each do |src, dst|
|
||||
node.vm.synced_folder src, dst, type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z']
|
||||
end
|
||||
end
|
||||
|
||||
ip = "#{$subnet}.#{i+100}"
|
||||
node.vm.network :private_network, ip: ip
|
||||
node.vm.network :private_network, ip: ip,
|
||||
:libvirt__guest_ipv6 => 'yes',
|
||||
:libvirt__ipv6_address => "#{$subnet_ipv6}::#{i+100}",
|
||||
:libvirt__ipv6_prefix => "64",
|
||||
:libvirt__forward_mode => "none",
|
||||
:libvirt__dhcp_enabled => false
|
||||
|
||||
# Disable swap for each vm
|
||||
node.vm.provision "shell", inline: "swapoff -a"
|
||||
|
||||
# Disable firewalld on oraclelinux vms
|
||||
if ["oraclelinux","oraclelinux8"].include? $os
|
||||
# ubuntu1804 and ubuntu2004 have IPv6 explicitly disabled. This undoes that.
|
||||
if ["ubuntu1804", "ubuntu2004"].include? $os
|
||||
node.vm.provision "shell", inline: "rm -f /etc/modprobe.d/local.conf"
|
||||
node.vm.provision "shell", inline: "sed -i '/net.ipv6.conf.all.disable_ipv6/d' /etc/sysctl.d/99-sysctl.conf /etc/sysctl.conf"
|
||||
end
|
||||
|
||||
# Disable firewalld on oraclelinux/redhat vms
|
||||
if ["oraclelinux","oraclelinux8","rhel7","rhel8"].include? $os
|
||||
node.vm.provision "shell", inline: "systemctl stop firewalld; systemctl disable firewalld"
|
||||
end
|
||||
|
||||
@ -229,9 +253,9 @@ Vagrant.configure("2") do |config|
|
||||
#ansible.tags = ['download']
|
||||
ansible.groups = {
|
||||
"etcd" => ["#{$instance_name_prefix}-[1:#{$etcd_instances}]"],
|
||||
"kube-master" => ["#{$instance_name_prefix}-[1:#{$kube_master_instances}]"],
|
||||
"kube-node" => ["#{$instance_name_prefix}-[1:#{$kube_node_instances}]"],
|
||||
"k8s-cluster:children" => ["kube-master", "kube-node"],
|
||||
"kube_control_plane" => ["#{$instance_name_prefix}-[1:#{$kube_master_instances}]"],
|
||||
"kube_node" => ["#{$instance_name_prefix}-[1:#{$kube_node_instances}]"],
|
||||
"k8s_cluster:children" => ["kube_control_plane", "kube_node"],
|
||||
}
|
||||
end
|
||||
end
|
||||
|
@ -3,13 +3,30 @@
|
||||
gather_facts: false
|
||||
become: no
|
||||
vars:
|
||||
minimal_ansible_version: 2.8.0
|
||||
minimal_ansible_version: 2.9.0
|
||||
maximal_ansible_version: 2.11.0
|
||||
ansible_connection: local
|
||||
tasks:
|
||||
- name: "Check ansible version >={{ minimal_ansible_version }}"
|
||||
- name: "Check {{ minimal_ansible_version }} <= Ansible version < {{ maximal_ansible_version }}"
|
||||
assert:
|
||||
msg: "Ansible must be {{ minimal_ansible_version }} or higher"
|
||||
msg: "Ansible must be between {{ minimal_ansible_version }} and {{ maximal_ansible_version }}"
|
||||
that:
|
||||
- ansible_version.string is version(minimal_ansible_version, ">=")
|
||||
- ansible_version.string is version(maximal_ansible_version, "<")
|
||||
tags:
|
||||
- check
|
||||
|
||||
- name: "Check that python netaddr is installed"
|
||||
assert:
|
||||
msg: "Python netaddr is not present"
|
||||
that: "'127.0.0.1' | ipaddr"
|
||||
tags:
|
||||
- check
|
||||
|
||||
# CentOS 7 provides too old jinja version
|
||||
- name: "Check that jinja is not too old (install via pip)"
|
||||
assert:
|
||||
msg: "Your Jinja version is too old, install via pip"
|
||||
that: "{% set test %}It works{% endset %}{{ test == 'It works' }}"
|
||||
tags:
|
||||
- check
|
||||
|
57
cluster.yml
57
cluster.yml
@ -2,31 +2,21 @@
|
||||
- name: Check ansible version
|
||||
import_playbook: ansible_version.yml
|
||||
|
||||
- hosts: all
|
||||
gather_facts: false
|
||||
tags: always
|
||||
tasks:
|
||||
- name: "Set up proxy environment"
|
||||
set_fact:
|
||||
proxy_env:
|
||||
http_proxy: "{{ http_proxy | default ('') }}"
|
||||
HTTP_PROXY: "{{ http_proxy | default ('') }}"
|
||||
https_proxy: "{{ https_proxy | default ('') }}"
|
||||
HTTPS_PROXY: "{{ https_proxy | default ('') }}"
|
||||
no_proxy: "{{ no_proxy | default ('') }}"
|
||||
NO_PROXY: "{{ no_proxy | default ('') }}"
|
||||
no_log: true
|
||||
- name: Ensure compatibility with old groups
|
||||
import_playbook: legacy_groups.yml
|
||||
|
||||
- hosts: bastion[0]
|
||||
gather_facts: False
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: bastion-ssh-config, tags: ["localhost", "bastion"] }
|
||||
|
||||
- hosts: k8s-cluster:etcd
|
||||
- hosts: k8s_cluster:etcd
|
||||
strategy: linear
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
gather_facts: false
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: bootstrap-os, tags: bootstrap-os}
|
||||
@ -35,19 +25,20 @@
|
||||
tags: always
|
||||
import_playbook: facts.yml
|
||||
|
||||
- hosts: k8s-cluster:etcd
|
||||
- hosts: k8s_cluster:etcd
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes/preinstall, tags: preinstall }
|
||||
- { role: "container-engine", tags: "container-engine", when: deploy_container_engine|default(true) }
|
||||
- { role: download, tags: download, when: "not skip_downloads" }
|
||||
environment: "{{ proxy_env }}"
|
||||
|
||||
- hosts: etcd
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- role: etcd
|
||||
@ -57,9 +48,10 @@
|
||||
etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}"
|
||||
when: not etcd_kubeadm_enabled| default(false)
|
||||
|
||||
- hosts: k8s-cluster
|
||||
- hosts: k8s_cluster
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- role: etcd
|
||||
@ -69,50 +61,54 @@
|
||||
etcd_events_cluster_setup: false
|
||||
when: not etcd_kubeadm_enabled| default(false)
|
||||
|
||||
- hosts: k8s-cluster
|
||||
- hosts: k8s_cluster
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes/node, tags: node }
|
||||
environment: "{{ proxy_env }}"
|
||||
|
||||
- hosts: kube-master
|
||||
- hosts: kube_control_plane
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes/master, tags: master }
|
||||
- { role: kubernetes/control-plane, tags: master }
|
||||
- { role: kubernetes/client, tags: client }
|
||||
- { role: kubernetes-apps/cluster_roles, tags: cluster-roles }
|
||||
|
||||
- hosts: k8s-cluster
|
||||
- hosts: k8s_cluster
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes/kubeadm, tags: kubeadm}
|
||||
- { role: network_plugin, tags: network }
|
||||
- { role: kubernetes/node-label, tags: node-label }
|
||||
|
||||
- hosts: calico-rr
|
||||
- hosts: calico_rr
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: network_plugin/calico/rr, tags: ['network', 'calico_rr'] }
|
||||
|
||||
- hosts: kube-master[0]
|
||||
- hosts: kube_control_plane[0]
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes-apps/rotate_tokens, tags: rotate_tokens, when: "secret_changed|default(false)" }
|
||||
- { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"] }
|
||||
|
||||
- hosts: kube-master
|
||||
- hosts: kube_control_plane
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes-apps/external_cloud_controller, tags: external-cloud-controller }
|
||||
@ -121,17 +117,18 @@
|
||||
- { role: kubernetes-apps/ingress_controller, tags: ingress-controller }
|
||||
- { role: kubernetes-apps/external_provisioner, tags: external-provisioner }
|
||||
|
||||
- hosts: kube-master
|
||||
- hosts: kube_control_plane
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes-apps, tags: apps }
|
||||
environment: "{{ proxy_env }}"
|
||||
|
||||
- hosts: k8s-cluster
|
||||
- hosts: k8s_cluster
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf, dns_late: true }
|
||||
|
@ -35,7 +35,7 @@ class SearchEC2Tags(object):
|
||||
hosts['_meta'] = { 'hostvars': {} }
|
||||
|
||||
##Search ec2 three times to find nodes of each group type. Relies on kubespray-role key/value.
|
||||
for group in ["kube-master", "kube-node", "etcd"]:
|
||||
for group in ["kube_control_plane", "kube_node", "etcd"]:
|
||||
hosts[group] = []
|
||||
tag_key = "kubespray-role"
|
||||
tag_value = ["*"+group+"*"]
|
||||
@ -70,7 +70,7 @@ class SearchEC2Tags(object):
|
||||
hosts[group].append(dns_name)
|
||||
hosts['_meta']['hostvars'][dns_name] = ansible_host
|
||||
|
||||
hosts['k8s-cluster'] = {'children':['kube-master', 'kube-node']}
|
||||
hosts['k8s_cluster'] = {'children':['kube_control_plane', 'kube_node']}
|
||||
print(json.dumps(hosts, sort_keys=True, indent=2))
|
||||
|
||||
SearchEC2Tags()
|
||||
|
1
contrib/aws_inventory/requirements.txt
Normal file
1
contrib/aws_inventory/requirements.txt
Normal file
@ -0,0 +1 @@
|
||||
boto3 # Apache-2.0
|
@ -24,14 +24,14 @@ experience.
|
||||
|
||||
You can enable the use of a Bastion Host by changing **use_bastion** in group_vars/all to **true**. The generated
|
||||
templates will then include an additional bastion VM which can then be used to connect to the masters and nodes. The option
|
||||
also removes all public IPs from all other VMs.
|
||||
also removes all public IPs from all other VMs.
|
||||
|
||||
## Generating and applying
|
||||
|
||||
To generate and apply the templates, call:
|
||||
|
||||
```shell
|
||||
$ ./apply-rg.sh <resource_group_name>
|
||||
./apply-rg.sh <resource_group_name>
|
||||
```
|
||||
|
||||
If you change something in the configuration (e.g. number of nodes) later, you can call this again and Azure will
|
||||
@ -42,25 +42,23 @@ take care about creating/modifying whatever is needed.
|
||||
If you need to delete all resources from a resource group, simply call:
|
||||
|
||||
```shell
|
||||
$ ./clear-rg.sh <resource_group_name>
|
||||
./clear-rg.sh <resource_group_name>
|
||||
```
|
||||
|
||||
**WARNING** this really deletes everything from your resource group, including everything that was later created by you!
|
||||
|
||||
|
||||
## Generating an inventory for kubespray
|
||||
|
||||
After you have applied the templates, you can generate an inventory with this call:
|
||||
|
||||
```shell
|
||||
$ ./generate-inventory.sh <resource_group_name>
|
||||
./generate-inventory.sh <resource_group_name>
|
||||
```
|
||||
|
||||
It will create the file ./inventory which can then be used with kubespray, e.g.:
|
||||
|
||||
```shell
|
||||
$ cd kubespray-root-dir
|
||||
$ sudo pip3 install -r requirements.txt
|
||||
$ ansible-playbook -i contrib/azurerm/inventory -u devops --become -e "@inventory/sample/group_vars/all/all.yml" cluster.yml
|
||||
cd kubespray-root-dir
|
||||
sudo pip3 install -r requirements.txt
|
||||
ansible-playbook -i contrib/azurerm/inventory -u devops --become -e "@inventory/sample/group_vars/all/all.yml" cluster.yml
|
||||
```
|
||||
|
||||
|
@ -7,9 +7,9 @@
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
[kube-master]
|
||||
[kube_control_plane]
|
||||
{% for vm in vm_list %}
|
||||
{% if 'kube-master' in vm.tags.roles %}
|
||||
{% if 'kube_control_plane' in vm.tags.roles %}
|
||||
{{ vm.name }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
@ -21,13 +21,13 @@
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
[kube-node]
|
||||
[kube_node]
|
||||
{% for vm in vm_list %}
|
||||
{% if 'kube-node' in vm.tags.roles %}
|
||||
{% if 'kube_node' in vm.tags.roles %}
|
||||
{{ vm.name }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
[k8s-cluster:children]
|
||||
kube-node
|
||||
kube-master
|
||||
[k8s_cluster:children]
|
||||
kube_node
|
||||
kube_control_plane
|
||||
|
@ -7,9 +7,9 @@
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
[kube-master]
|
||||
[kube_control_plane]
|
||||
{% for vm in vm_roles_list %}
|
||||
{% if 'kube-master' in vm.tags.roles %}
|
||||
{% if 'kube_control_plane' in vm.tags.roles %}
|
||||
{{ vm.name }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
@ -21,14 +21,14 @@
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
[kube-node]
|
||||
[kube_node]
|
||||
{% for vm in vm_roles_list %}
|
||||
{% if 'kube-node' in vm.tags.roles %}
|
||||
{% if 'kube_node' in vm.tags.roles %}
|
||||
{{ vm.name }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
[k8s-cluster:children]
|
||||
kube-node
|
||||
kube-master
|
||||
[k8s_cluster:children]
|
||||
kube_node
|
||||
kube_control_plane
|
||||
|
||||
|
@ -144,7 +144,7 @@
|
||||
"[concat('Microsoft.Network/networkInterfaces/', 'master-{{i}}-nic')]"
|
||||
],
|
||||
"tags": {
|
||||
"roles": "kube-master,etcd"
|
||||
"roles": "kube_control_plane,etcd"
|
||||
},
|
||||
"apiVersion": "{{apiVersion}}",
|
||||
"properties": {
|
||||
|
@ -61,7 +61,7 @@
|
||||
"[concat('Microsoft.Network/networkInterfaces/', 'minion-{{i}}-nic')]"
|
||||
],
|
||||
"tags": {
|
||||
"roles": "kube-node"
|
||||
"roles": "kube_node"
|
||||
},
|
||||
"apiVersion": "{{apiVersion}}",
|
||||
"properties": {
|
||||
@ -112,4 +112,4 @@
|
||||
} {% if not loop.last %},{% endif %}
|
||||
{% endfor %}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
@ -6,6 +6,7 @@ to serve as Kubernetes "nodes", which in turn will run
|
||||
called DIND (Docker-IN-Docker).
|
||||
|
||||
The playbook has two roles:
|
||||
|
||||
- dind-host: creates the "nodes" as containers in localhost, with
|
||||
appropriate settings for DIND (privileged, volume mapping for dind
|
||||
storage, etc).
|
||||
@ -27,7 +28,7 @@ See below for a complete successful run:
|
||||
|
||||
1. Create the node containers
|
||||
|
||||
~~~~
|
||||
```shell
|
||||
# From the kubespray root dir
|
||||
cd contrib/dind
|
||||
pip install -r requirements.txt
|
||||
@ -36,15 +37,15 @@ ansible-playbook -i hosts dind-cluster.yaml
|
||||
|
||||
# Back to kubespray root
|
||||
cd ../..
|
||||
~~~~
|
||||
```
|
||||
|
||||
NOTE: if the playbook run fails with something like below error
|
||||
message, you may need to specifically set `ansible_python_interpreter`,
|
||||
see `./hosts` file for an example expanded localhost entry.
|
||||
|
||||
~~~
|
||||
```shell
|
||||
failed: [localhost] (item=kube-node1) => {"changed": false, "item": "kube-node1", "msg": "Failed to import docker or docker-py - No module named requests.exceptions. Try `pip install docker` or `pip install docker-py` (Python 2.6)"}
|
||||
~~~
|
||||
```
|
||||
|
||||
2. Customize kubespray-dind.yaml
|
||||
|
||||
@ -52,33 +53,33 @@ Note that there's coupling between above created node containers
|
||||
and `kubespray-dind.yaml` settings, in particular regarding selected `node_distro`
|
||||
(as set in `group_vars/all/all.yaml`), and docker settings.
|
||||
|
||||
~~~
|
||||
```shell
|
||||
$EDITOR contrib/dind/kubespray-dind.yaml
|
||||
~~~
|
||||
```
|
||||
|
||||
3. Prepare the inventory and run the playbook
|
||||
|
||||
~~~
|
||||
```shell
|
||||
INVENTORY_DIR=inventory/local-dind
|
||||
mkdir -p ${INVENTORY_DIR}
|
||||
rm -f ${INVENTORY_DIR}/hosts.ini
|
||||
CONFIG_FILE=${INVENTORY_DIR}/hosts.ini /tmp/kubespray.dind.inventory_builder.sh
|
||||
|
||||
ansible-playbook --become -e ansible_ssh_user=debian -i ${INVENTORY_DIR}/hosts.ini cluster.yml --extra-vars @contrib/dind/kubespray-dind.yaml
|
||||
~~~
|
||||
```
|
||||
|
||||
NOTE: You could also test other distros without editing files by
|
||||
passing `--extra-vars` as per below commandline,
|
||||
replacing `DISTRO` by either `debian`, `ubuntu`, `centos`, `fedora`:
|
||||
|
||||
~~~
|
||||
```shell
|
||||
cd contrib/dind
|
||||
ansible-playbook -i hosts dind-cluster.yaml --extra-vars node_distro=DISTRO
|
||||
|
||||
cd ../..
|
||||
CONFIG_FILE=inventory/local-dind/hosts.ini /tmp/kubespray.dind.inventory_builder.sh
|
||||
ansible-playbook --become -e ansible_ssh_user=DISTRO -i inventory/local-dind/hosts.ini cluster.yml --extra-vars @contrib/dind/kubespray-dind.yaml --extra-vars bootstrap_os=DISTRO
|
||||
~~~
|
||||
```
|
||||
|
||||
## Resulting deployment
|
||||
|
||||
@ -89,7 +90,7 @@ from the host where you ran kubespray playbooks.
|
||||
|
||||
Running from an Ubuntu Xenial host:
|
||||
|
||||
~~~
|
||||
```shell
|
||||
$ uname -a
|
||||
Linux ip-xx-xx-xx-xx 4.4.0-1069-aws #79-Ubuntu SMP Mon Sep 24
|
||||
15:01:41 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
|
||||
@ -149,14 +150,14 @@ kube-system weave-net-xr46t 2/2 Running 0
|
||||
|
||||
$ docker exec kube-node1 curl -s http://localhost:31081/api/v1/connectivity_check
|
||||
{"Message":"All 10 pods successfully reported back to the server","Absent":null,"Outdated":null}
|
||||
~~~
|
||||
```
|
||||
|
||||
## Using ./run-test-distros.sh
|
||||
|
||||
You can use `./run-test-distros.sh` to run a set of tests via DIND,
|
||||
and excerpt from this script, to get an idea:
|
||||
|
||||
~~~
|
||||
```shell
|
||||
# The SPEC file(s) must have two arrays as e.g.
|
||||
# DISTROS=(debian centos)
|
||||
# EXTRAS=(
|
||||
@ -169,7 +170,7 @@ and excerpt from this script, to get an idea:
|
||||
#
|
||||
# Each $EXTRAS element will be whitespace split, and passed as --extra-vars
|
||||
# to main kubespray ansible-playbook run.
|
||||
~~~
|
||||
```
|
||||
|
||||
See e.g. `test-some_distros-most_CNIs.env` and
|
||||
`test-some_distros-kube_router_combo.env` in particular for a richer
|
||||
|
@ -46,7 +46,7 @@ test_distro() {
|
||||
pass_or_fail "$prefix: netcheck" || return 1
|
||||
}
|
||||
|
||||
NODES=($(egrep ^kube-node hosts))
|
||||
NODES=($(egrep ^kube_node hosts))
|
||||
NETCHECKER_HOST=localhost
|
||||
|
||||
: ${OUTPUT_DIR:=./out}
|
||||
|
@ -44,8 +44,8 @@ import re
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
ROLES = ['all', 'kube-master', 'kube-node', 'etcd', 'k8s-cluster',
|
||||
'calico-rr']
|
||||
ROLES = ['all', 'kube_control_plane', 'kube_node', 'etcd', 'k8s_cluster',
|
||||
'calico_rr']
|
||||
PROTECTED_NAMES = ROLES
|
||||
AVAILABLE_COMMANDS = ['help', 'print_cfg', 'print_ips', 'print_hostnames',
|
||||
'load']
|
||||
@ -63,10 +63,12 @@ def get_var_as_bool(name, default):
|
||||
|
||||
|
||||
CONFIG_FILE = os.environ.get("CONFIG_FILE", "./inventory/sample/hosts.yaml")
|
||||
KUBE_MASTERS = int(os.environ.get("KUBE_MASTERS_MASTERS", 2))
|
||||
# Remove the reference of KUBE_MASTERS after some deprecation cycles.
|
||||
KUBE_CONTROL_HOSTS = int(os.environ.get("KUBE_CONTROL_HOSTS",
|
||||
os.environ.get("KUBE_MASTERS", 2)))
|
||||
# Reconfigures cluster distribution at scale
|
||||
SCALE_THRESHOLD = int(os.environ.get("SCALE_THRESHOLD", 50))
|
||||
MASSIVE_SCALE_THRESHOLD = int(os.environ.get("SCALE_THRESHOLD", 200))
|
||||
MASSIVE_SCALE_THRESHOLD = int(os.environ.get("MASSIVE_SCALE_THRESHOLD", 200))
|
||||
|
||||
DEBUG = get_var_as_bool("DEBUG", True)
|
||||
HOST_PREFIX = os.environ.get("HOST_PREFIX", "node")
|
||||
@ -102,10 +104,11 @@ class KubesprayInventory(object):
|
||||
etcd_hosts_count = 3 if len(self.hosts.keys()) >= 3 else 1
|
||||
self.set_etcd(list(self.hosts.keys())[:etcd_hosts_count])
|
||||
if len(self.hosts) >= SCALE_THRESHOLD:
|
||||
self.set_kube_master(list(self.hosts.keys())[
|
||||
etcd_hosts_count:(etcd_hosts_count + KUBE_MASTERS)])
|
||||
self.set_kube_control_plane(list(self.hosts.keys())[
|
||||
etcd_hosts_count:(etcd_hosts_count + KUBE_CONTROL_HOSTS)])
|
||||
else:
|
||||
self.set_kube_master(list(self.hosts.keys())[:KUBE_MASTERS])
|
||||
self.set_kube_control_plane(
|
||||
list(self.hosts.keys())[:KUBE_CONTROL_HOSTS])
|
||||
self.set_kube_node(self.hosts.keys())
|
||||
if len(self.hosts) >= SCALE_THRESHOLD:
|
||||
self.set_calico_rr(list(self.hosts.keys())[:etcd_hosts_count])
|
||||
@ -266,7 +269,7 @@ class KubesprayInventory(object):
|
||||
|
||||
def purge_invalid_hosts(self, hostnames, protected_names=[]):
|
||||
for role in self.yaml_config['all']['children']:
|
||||
if role != 'k8s-cluster' and self.yaml_config['all']['children'][role]['hosts']: # noqa
|
||||
if role != 'k8s_cluster' and self.yaml_config['all']['children'][role]['hosts']: # noqa
|
||||
all_hosts = self.yaml_config['all']['children'][role]['hosts'].copy() # noqa
|
||||
for host in all_hosts.keys():
|
||||
if host not in hostnames and host not in protected_names:
|
||||
@ -287,52 +290,54 @@ class KubesprayInventory(object):
|
||||
if self.yaml_config['all']['hosts'] is None:
|
||||
self.yaml_config['all']['hosts'] = {host: None}
|
||||
self.yaml_config['all']['hosts'][host] = opts
|
||||
elif group != 'k8s-cluster:children':
|
||||
elif group != 'k8s_cluster:children':
|
||||
if self.yaml_config['all']['children'][group]['hosts'] is None:
|
||||
self.yaml_config['all']['children'][group]['hosts'] = {
|
||||
host: None}
|
||||
else:
|
||||
self.yaml_config['all']['children'][group]['hosts'][host] = None # noqa
|
||||
|
||||
def set_kube_master(self, hosts):
|
||||
def set_kube_control_plane(self, hosts):
|
||||
for host in hosts:
|
||||
self.add_host_to_group('kube-master', host)
|
||||
self.add_host_to_group('kube_control_plane', host)
|
||||
|
||||
def set_all(self, hosts):
|
||||
for host, opts in hosts.items():
|
||||
self.add_host_to_group('all', host, opts)
|
||||
|
||||
def set_k8s_cluster(self):
|
||||
k8s_cluster = {'children': {'kube-master': None, 'kube-node': None}}
|
||||
self.yaml_config['all']['children']['k8s-cluster'] = k8s_cluster
|
||||
k8s_cluster = {'children': {'kube_control_plane': None,
|
||||
'kube_node': None}}
|
||||
self.yaml_config['all']['children']['k8s_cluster'] = k8s_cluster
|
||||
|
||||
def set_calico_rr(self, hosts):
|
||||
for host in hosts:
|
||||
if host in self.yaml_config['all']['children']['kube-master']:
|
||||
self.debug("Not adding {0} to calico-rr group because it "
|
||||
"conflicts with kube-master group".format(host))
|
||||
if host in self.yaml_config['all']['children']['kube_control_plane']: # noqa
|
||||
self.debug("Not adding {0} to calico_rr group because it "
|
||||
"conflicts with kube_control_plane "
|
||||
"group".format(host))
|
||||
continue
|
||||
if host in self.yaml_config['all']['children']['kube-node']:
|
||||
self.debug("Not adding {0} to calico-rr group because it "
|
||||
"conflicts with kube-node group".format(host))
|
||||
if host in self.yaml_config['all']['children']['kube_node']:
|
||||
self.debug("Not adding {0} to calico_rr group because it "
|
||||
"conflicts with kube_node group".format(host))
|
||||
continue
|
||||
self.add_host_to_group('calico-rr', host)
|
||||
self.add_host_to_group('calico_rr', host)
|
||||
|
||||
def set_kube_node(self, hosts):
|
||||
for host in hosts:
|
||||
if len(self.yaml_config['all']['hosts']) >= SCALE_THRESHOLD:
|
||||
if host in self.yaml_config['all']['children']['etcd']['hosts']: # noqa
|
||||
self.debug("Not adding {0} to kube-node group because of "
|
||||
self.debug("Not adding {0} to kube_node group because of "
|
||||
"scale deployment and host is in etcd "
|
||||
"group.".format(host))
|
||||
continue
|
||||
if len(self.yaml_config['all']['hosts']) >= MASSIVE_SCALE_THRESHOLD: # noqa
|
||||
if host in self.yaml_config['all']['children']['kube-master']['hosts']: # noqa
|
||||
self.debug("Not adding {0} to kube-node group because of "
|
||||
"scale deployment and host is in kube-master "
|
||||
"group.".format(host))
|
||||
if host in self.yaml_config['all']['children']['kube_control_plane']['hosts']: # noqa
|
||||
self.debug("Not adding {0} to kube_node group because of "
|
||||
"scale deployment and host is in "
|
||||
"kube_control_plane group.".format(host))
|
||||
continue
|
||||
self.add_host_to_group('kube-node', host)
|
||||
self.add_host_to_group('kube_node', host)
|
||||
|
||||
def set_etcd(self, hosts):
|
||||
for host in hosts:
|
||||
@ -402,8 +407,9 @@ Configurable env vars:
|
||||
DEBUG Enable debug printing. Default: True
|
||||
CONFIG_FILE File to write config to Default: ./inventory/sample/hosts.yaml
|
||||
HOST_PREFIX Host prefix for generated hosts. Default: node
|
||||
KUBE_CONTROL_HOSTS Set the number of kube-control-planes. Default: 2
|
||||
SCALE_THRESHOLD Separate ETCD role if # of nodes >= 50
|
||||
MASSIVE_SCALE_THRESHOLD Separate K8s master and ETCD if # of nodes >= 200
|
||||
MASSIVE_SCALE_THRESHOLD Separate K8s control-plane and ETCD if # of nodes >= 200
|
||||
''' # noqa
|
||||
print(help_text)
|
||||
|
||||
|
@ -13,8 +13,8 @@
|
||||
# under the License.
|
||||
|
||||
import inventory
|
||||
import mock
|
||||
import unittest
|
||||
from unittest import mock
|
||||
|
||||
from collections import OrderedDict
|
||||
import sys
|
||||
@ -51,7 +51,7 @@ class TestInventory(unittest.TestCase):
|
||||
groups = ['group1', 'group2']
|
||||
self.inv.ensure_required_groups(groups)
|
||||
for group in groups:
|
||||
self.assertTrue(group in self.inv.yaml_config['all']['children'])
|
||||
self.assertIn(group, self.inv.yaml_config['all']['children'])
|
||||
|
||||
def test_get_host_id(self):
|
||||
hostnames = ['node99', 'no99de01', '01node01', 'node1.domain',
|
||||
@ -209,8 +209,8 @@ class TestInventory(unittest.TestCase):
|
||||
('doesnotbelong2', {'whateveropts=ilike'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing_hosts
|
||||
self.inv.purge_invalid_hosts(proper_hostnames)
|
||||
self.assertTrue(
|
||||
bad_host not in self.inv.yaml_config['all']['hosts'].keys())
|
||||
self.assertNotIn(
|
||||
bad_host, self.inv.yaml_config['all']['hosts'].keys())
|
||||
|
||||
def test_add_host_to_group(self):
|
||||
group = 'etcd'
|
||||
@ -222,13 +222,13 @@ class TestInventory(unittest.TestCase):
|
||||
self.inv.yaml_config['all']['children'][group]['hosts'].get(host),
|
||||
None)
|
||||
|
||||
def test_set_kube_master(self):
|
||||
group = 'kube-master'
|
||||
def test_set_kube_control_plane(self):
|
||||
group = 'kube_control_plane'
|
||||
host = 'node1'
|
||||
|
||||
self.inv.set_kube_master([host])
|
||||
self.assertTrue(
|
||||
host in self.inv.yaml_config['all']['children'][group]['hosts'])
|
||||
self.inv.set_kube_control_plane([host])
|
||||
self.assertIn(
|
||||
host, self.inv.yaml_config['all']['children'][group]['hosts'])
|
||||
|
||||
def test_set_all(self):
|
||||
hosts = OrderedDict([
|
||||
@ -241,30 +241,30 @@ class TestInventory(unittest.TestCase):
|
||||
self.inv.yaml_config['all']['hosts'].get(host), opt)
|
||||
|
||||
def test_set_k8s_cluster(self):
|
||||
group = 'k8s-cluster'
|
||||
expected_hosts = ['kube-node', 'kube-master']
|
||||
group = 'k8s_cluster'
|
||||
expected_hosts = ['kube_node', 'kube_control_plane']
|
||||
|
||||
self.inv.set_k8s_cluster()
|
||||
for host in expected_hosts:
|
||||
self.assertTrue(
|
||||
host in
|
||||
self.assertIn(
|
||||
host,
|
||||
self.inv.yaml_config['all']['children'][group]['children'])
|
||||
|
||||
def test_set_kube_node(self):
|
||||
group = 'kube-node'
|
||||
group = 'kube_node'
|
||||
host = 'node1'
|
||||
|
||||
self.inv.set_kube_node([host])
|
||||
self.assertTrue(
|
||||
host in self.inv.yaml_config['all']['children'][group]['hosts'])
|
||||
self.assertIn(
|
||||
host, self.inv.yaml_config['all']['children'][group]['hosts'])
|
||||
|
||||
def test_set_etcd(self):
|
||||
group = 'etcd'
|
||||
host = 'node1'
|
||||
|
||||
self.inv.set_etcd([host])
|
||||
self.assertTrue(
|
||||
host in self.inv.yaml_config['all']['children'][group]['hosts'])
|
||||
self.assertIn(
|
||||
host, self.inv.yaml_config['all']['children'][group]['hosts'])
|
||||
|
||||
def test_scale_scenario_one(self):
|
||||
num_nodes = 50
|
||||
@ -275,12 +275,12 @@ class TestInventory(unittest.TestCase):
|
||||
|
||||
self.inv.set_all(hosts)
|
||||
self.inv.set_etcd(list(hosts.keys())[0:3])
|
||||
self.inv.set_kube_master(list(hosts.keys())[0:2])
|
||||
self.inv.set_kube_control_plane(list(hosts.keys())[0:2])
|
||||
self.inv.set_kube_node(hosts.keys())
|
||||
for h in range(3):
|
||||
self.assertFalse(
|
||||
list(hosts.keys())[h] in
|
||||
self.inv.yaml_config['all']['children']['kube-node']['hosts'])
|
||||
self.inv.yaml_config['all']['children']['kube_node']['hosts'])
|
||||
|
||||
def test_scale_scenario_two(self):
|
||||
num_nodes = 500
|
||||
@ -291,12 +291,12 @@ class TestInventory(unittest.TestCase):
|
||||
|
||||
self.inv.set_all(hosts)
|
||||
self.inv.set_etcd(list(hosts.keys())[0:3])
|
||||
self.inv.set_kube_master(list(hosts.keys())[3:5])
|
||||
self.inv.set_kube_control_plane(list(hosts.keys())[3:5])
|
||||
self.inv.set_kube_node(hosts.keys())
|
||||
for h in range(5):
|
||||
self.assertFalse(
|
||||
list(hosts.keys())[h] in
|
||||
self.inv.yaml_config['all']['children']['kube-node']['hosts'])
|
||||
self.inv.yaml_config['all']['children']['kube_node']['hosts'])
|
||||
|
||||
def test_range2ips_range(self):
|
||||
changed_hosts = ['10.90.0.2', '10.90.0.4-10.90.0.6', '10.90.0.8']
|
||||
|
@ -5,7 +5,7 @@ deployment on VMs.
|
||||
|
||||
This playbook does not create Virtual Machines, nor does it run Kubespray itself.
|
||||
|
||||
### User creation
|
||||
## User creation
|
||||
|
||||
If you want to create a user for running Kubespray deployment, you should specify
|
||||
both `k8s_deployment_user` and `k8s_deployment_user_pkey_path`.
|
||||
|
@ -8,19 +8,19 @@ In the same directory of this ReadMe file you should find a file named `inventor
|
||||
|
||||
Change that file to reflect your local setup (adding more machines or removing them and setting the adequate ip numbers), and save it to `inventory/sample/k8s_gfs_inventory`. Make sure that the settings on `inventory/sample/group_vars/all.yml` make sense with your deployment. Then execute change to the kubespray root folder, and execute (supposing that the machines are all using ubuntu):
|
||||
|
||||
```
|
||||
```shell
|
||||
ansible-playbook -b --become-user=root -i inventory/sample/k8s_gfs_inventory --user=ubuntu ./cluster.yml
|
||||
```
|
||||
|
||||
This will provision your Kubernetes cluster. Then, to provision and configure the GlusterFS cluster, from the same directory execute:
|
||||
|
||||
```
|
||||
```shell
|
||||
ansible-playbook -b --become-user=root -i inventory/sample/k8s_gfs_inventory --user=ubuntu ./contrib/network-storage/glusterfs/glusterfs.yml
|
||||
```
|
||||
|
||||
If your machines are not using Ubuntu, you need to change the `--user=ubuntu` to the correct user. Alternatively, if your Kubernetes machines are using one OS and your GlusterFS a different one, you can instead specify the `ansible_ssh_user=<correct-user>` variable in the inventory file that you just created, for each machine/VM:
|
||||
|
||||
```
|
||||
```shell
|
||||
k8s-master-1 ansible_ssh_host=192.168.0.147 ip=192.168.0.147 ansible_ssh_user=core
|
||||
k8s-master-node-1 ansible_ssh_host=192.168.0.148 ip=192.168.0.148 ansible_ssh_user=core
|
||||
k8s-master-node-2 ansible_ssh_host=192.168.0.146 ip=192.168.0.146 ansible_ssh_user=core
|
||||
@ -30,7 +30,7 @@ k8s-master-node-2 ansible_ssh_host=192.168.0.146 ip=192.168.0.146 ansible_ssh_us
|
||||
|
||||
First step is to fill in a `my-kubespray-gluster-cluster.tfvars` file with the specification desired for your cluster. An example with all required variables would look like:
|
||||
|
||||
```
|
||||
```ini
|
||||
cluster_name = "cluster1"
|
||||
number_of_k8s_masters = "1"
|
||||
number_of_k8s_masters_no_floating_ip = "2"
|
||||
@ -39,7 +39,7 @@ number_of_k8s_nodes = "0"
|
||||
public_key_path = "~/.ssh/my-desired-key.pub"
|
||||
image = "Ubuntu 16.04"
|
||||
ssh_user = "ubuntu"
|
||||
flavor_k8s_node = "node-flavor-id-in-your-openstack"
|
||||
flavor_k8s_node = "node-flavor-id-in-your-openstack"
|
||||
flavor_k8s_master = "master-flavor-id-in-your-openstack"
|
||||
network_name = "k8s-network"
|
||||
floatingip_pool = "net_external"
|
||||
@ -54,7 +54,7 @@ ssh_user_gfs = "ubuntu"
|
||||
|
||||
As explained in the general terraform/openstack guide, you need to source your OpenStack credentials file, add your ssh-key to the ssh-agent and setup environment variables for terraform:
|
||||
|
||||
```
|
||||
```shell
|
||||
$ source ~/.stackrc
|
||||
$ eval $(ssh-agent -s)
|
||||
$ ssh-add ~/.ssh/my-desired-key
|
||||
@ -67,7 +67,7 @@ $ echo Setting up Terraform creds && \
|
||||
|
||||
Then, standing on the kubespray directory (root base of the Git checkout), issue the following terraform command to create the VMs for the cluster:
|
||||
|
||||
```
|
||||
```shell
|
||||
terraform apply -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kubespray-gluster-cluster.tfvars contrib/terraform/openstack
|
||||
```
|
||||
|
||||
@ -75,18 +75,18 @@ This will create both your Kubernetes and Gluster VMs. Make sure that the ansibl
|
||||
|
||||
Then, provision your Kubernetes (kubespray) cluster with the following ansible call:
|
||||
|
||||
```
|
||||
```shell
|
||||
ansible-playbook -b --become-user=root -i contrib/terraform/openstack/hosts ./cluster.yml
|
||||
```
|
||||
|
||||
Finally, provision the glusterfs nodes and add the Persistent Volume setup for GlusterFS in Kubernetes through the following ansible call:
|
||||
|
||||
```
|
||||
```shell
|
||||
ansible-playbook -b --become-user=root -i contrib/terraform/openstack/hosts ./contrib/network-storage/glusterfs/glusterfs.yml
|
||||
```
|
||||
|
||||
If you need to destroy the cluster, you can run:
|
||||
|
||||
```
|
||||
```shell
|
||||
terraform destroy -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kubespray-gluster-cluster.tfvars contrib/terraform/openstack
|
||||
```
|
||||
|
@ -15,10 +15,10 @@
|
||||
roles:
|
||||
- { role: glusterfs/server }
|
||||
|
||||
- hosts: k8s-cluster
|
||||
- hosts: k8s_cluster
|
||||
roles:
|
||||
- { role: glusterfs/client }
|
||||
|
||||
- hosts: kube-master[0]
|
||||
- hosts: kube_control_plane[0]
|
||||
roles:
|
||||
- { role: kubernetes-pv }
|
||||
|
@ -14,7 +14,7 @@
|
||||
# gfs_node2 ansible_ssh_host=95.54.0.19 # disk_volume_device_1=/dev/vdc ip=10.3.0.8
|
||||
# gfs_node3 ansible_ssh_host=95.54.0.20 # disk_volume_device_1=/dev/vdc ip=10.3.0.9
|
||||
|
||||
# [kube-master]
|
||||
# [kube_control_plane]
|
||||
# node1
|
||||
# node2
|
||||
|
||||
@ -23,16 +23,16 @@
|
||||
# node2
|
||||
# node3
|
||||
|
||||
# [kube-node]
|
||||
# [kube_node]
|
||||
# node2
|
||||
# node3
|
||||
# node4
|
||||
# node5
|
||||
# node6
|
||||
|
||||
# [k8s-cluster:children]
|
||||
# kube-node
|
||||
# kube-master
|
||||
# [k8s_cluster:children]
|
||||
# kube_node
|
||||
# kube_control_plane
|
||||
|
||||
# [gfs-cluster]
|
||||
# gfs_node1
|
||||
|
@ -8,7 +8,7 @@ Installs and configures GlusterFS on Linux.
|
||||
|
||||
For GlusterFS to connect between servers, TCP ports `24007`, `24008`, and `24009`/`49152`+ (that port, plus an additional incremented port for each additional server in the cluster; the latter if GlusterFS is version 3.4+), and TCP/UDP port `111` must be open. You can open these using whatever firewall you wish (this can easily be configured using the `geerlingguy.firewall` role).
|
||||
|
||||
This role performs basic installation and setup of Gluster, but it does not configure or mount bricks (volumes), since that step is easier to do in a series of plays in your own playbook. Ansible 1.9+ includes the [`gluster_volume`](https://docs.ansible.com/gluster_volume_module.html) module to ease the management of Gluster volumes.
|
||||
This role performs basic installation and setup of Gluster, but it does not configure or mount bricks (volumes), since that step is easier to do in a series of plays in your own playbook. Ansible 1.9+ includes the [`gluster_volume`](https://docs.ansible.com/ansible/latest/collections/gluster/gluster/gluster_volume_module.html) module to ease the management of Gluster volumes.
|
||||
|
||||
## Role Variables
|
||||
|
||||
|
@ -8,7 +8,7 @@
|
||||
- { file: glusterfs-kubernetes-pv.yml.j2, type: pv, dest: glusterfs-kubernetes-pv.yml}
|
||||
- { file: glusterfs-kubernetes-endpoint-svc.json.j2, type: svc, dest: glusterfs-kubernetes-endpoint-svc.json}
|
||||
register: gluster_pv
|
||||
when: inventory_hostname == groups['kube-master'][0] and groups['gfs-cluster'] is defined and hostvars[groups['gfs-cluster'][0]].gluster_disk_size_gb is defined
|
||||
when: inventory_hostname == groups['kube_control_plane'][0] and groups['gfs-cluster'] is defined and hostvars[groups['gfs-cluster'][0]].gluster_disk_size_gb is defined
|
||||
|
||||
- name: Kubernetes Apps | Set GlusterFS endpoint and PV
|
||||
kube:
|
||||
@ -19,4 +19,4 @@
|
||||
filename: "{{ kube_config_dir }}/{{ item.item.dest }}"
|
||||
state: "{{ item.changed | ternary('latest','present') }}"
|
||||
with_items: "{{ gluster_pv.results }}"
|
||||
when: inventory_hostname == groups['kube-master'][0] and groups['gfs-cluster'] is defined
|
||||
when: inventory_hostname == groups['kube_control_plane'][0] and groups['gfs-cluster'] is defined
|
||||
|
@ -1,17 +1,26 @@
|
||||
# Deploy Heketi/Glusterfs into Kubespray/Kubernetes
|
||||
|
||||
This playbook aims to automate [this](https://github.com/heketi/heketi/blob/master/docs/admin/install-kubernetes.md) tutorial. It deploys heketi/glusterfs into kubernetes and sets up a storageclass.
|
||||
|
||||
## Important notice
|
||||
|
||||
> Due to resource limits on the current project maintainers and general lack of contributions we are considering placing Heketi into a [near-maintenance mode](https://github.com/heketi/heketi#important-notice)
|
||||
|
||||
## Client Setup
|
||||
|
||||
Heketi provides a CLI that provides users with a means to administer the deployment and configuration of GlusterFS in Kubernetes. [Download and install the heketi-cli](https://github.com/heketi/heketi/releases) on your client machine.
|
||||
|
||||
## Install
|
||||
|
||||
Copy the inventory.yml.sample over to inventory/sample/k8s_heketi_inventory.yml and change it according to your setup.
|
||||
```
|
||||
|
||||
```shell
|
||||
ansible-playbook --ask-become -i inventory/sample/k8s_heketi_inventory.yml contrib/network-storage/heketi/heketi.yml
|
||||
```
|
||||
|
||||
## Tear down
|
||||
```
|
||||
|
||||
```shell
|
||||
ansible-playbook --ask-become -i inventory/sample/k8s_heketi_inventory.yml contrib/network-storage/heketi/heketi-tear-down.yml
|
||||
```
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
- hosts: kube-master[0]
|
||||
- hosts: kube_control_plane[0]
|
||||
roles:
|
||||
- { role: tear-down }
|
||||
|
||||
|
@ -3,7 +3,7 @@
|
||||
roles:
|
||||
- { role: prepare }
|
||||
|
||||
- hosts: kube-master[0]
|
||||
- hosts: kube_control_plane[0]
|
||||
tags:
|
||||
- "provision"
|
||||
roles:
|
||||
|
@ -3,17 +3,17 @@ all:
|
||||
heketi_admin_key: "11elfeinhundertundelf"
|
||||
heketi_user_key: "!!einseinseins"
|
||||
children:
|
||||
k8s-cluster:
|
||||
k8s_cluster:
|
||||
vars:
|
||||
kubelet_fail_swap_on: false
|
||||
children:
|
||||
kube-master:
|
||||
kube_control_plane:
|
||||
hosts:
|
||||
node1:
|
||||
etcd:
|
||||
hosts:
|
||||
node2:
|
||||
kube-node:
|
||||
kube_node:
|
||||
hosts: &kube_nodes
|
||||
node1:
|
||||
node2:
|
||||
|
43
contrib/offline/README.md
Normal file
43
contrib/offline/README.md
Normal file
@ -0,0 +1,43 @@
|
||||
# Offline deployment
|
||||
|
||||
## manage-offline-container-images.sh
|
||||
|
||||
Container image collecting script for offline deployment
|
||||
|
||||
This script has two features:
|
||||
(1) Get container images from an environment which is deployed online.
|
||||
(2) Deploy local container registry and register the container images to the registry.
|
||||
|
||||
Step(1) should be done online site as a preparation, then we bring the gotten images
|
||||
to the target offline environment.
|
||||
Then we will run step(2) for registering the images to local registry.
|
||||
|
||||
Step(1) can be operated with:
|
||||
|
||||
```shell
|
||||
manage-offline-container-images.sh create
|
||||
```
|
||||
|
||||
Step(2) can be operated with:
|
||||
|
||||
```shell
|
||||
manage-offline-container-images.sh register
|
||||
```
|
||||
|
||||
## generate_list.sh
|
||||
|
||||
This script generates the list of downloaded files and the list of container images by `roles/download/defaults/main.yml` file.
|
||||
|
||||
Run this script will generates three files, all downloaded files url in files.list, all container images in images.list, all component version in generate.sh.
|
||||
|
||||
```shell
|
||||
bash generate_list.sh
|
||||
tree temp
|
||||
temp
|
||||
├── files.list
|
||||
├── generate.sh
|
||||
└── images.list
|
||||
0 directories, 3 files
|
||||
```
|
||||
|
||||
In some cases you may want to update some component version, you can edit `generate.sh` file, then run `bash generate.sh | grep 'https' > files.list` to update file.list or run `bash generate.sh | grep -v 'https'> images.list` to update images.list.
|
1
contrib/offline/docker-daemon.json
Normal file
1
contrib/offline/docker-daemon.json
Normal file
@ -0,0 +1 @@
|
||||
{ "insecure-registries":["HOSTNAME:5000"] }
|
57
contrib/offline/generate_list.sh
Normal file
57
contrib/offline/generate_list.sh
Normal file
@ -0,0 +1,57 @@
|
||||
#!/bin/bash
|
||||
set -eo pipefail
|
||||
|
||||
CURRENT_DIR=$(cd $(dirname $0); pwd)
|
||||
TEMP_DIR="${CURRENT_DIR}/temp"
|
||||
REPO_ROOT_DIR="${CURRENT_DIR%/contrib/offline}"
|
||||
|
||||
: ${IMAGE_ARCH:="amd64"}
|
||||
: ${ANSIBLE_SYSTEM:="linux"}
|
||||
: ${ANSIBLE_ARCHITECTURE:="x86_64"}
|
||||
: ${DOWNLOAD_YML:="roles/download/defaults/main.yml"}
|
||||
: ${KUBE_VERSION_YAML:="roles/kubespray-defaults/defaults/main.yaml"}
|
||||
|
||||
mkdir -p ${TEMP_DIR}
|
||||
|
||||
# ARCH used in convert {%- if image_arch != 'amd64' -%}-{{ image_arch }}{%- endif -%} to {{arch}}
|
||||
if [ "${IMAGE_ARCH}" != "amd64" ]; then ARCH="${IMAGE_ARCH}"; fi
|
||||
|
||||
cat > ${TEMP_DIR}/generate.sh << EOF
|
||||
arch=${ARCH}
|
||||
image_arch=${IMAGE_ARCH}
|
||||
ansible_system=${ANSIBLE_SYSTEM}
|
||||
ansible_architecture=${ANSIBLE_ARCHITECTURE}
|
||||
EOF
|
||||
|
||||
# generate all component version by $DOWNLOAD_YML
|
||||
grep 'kube_version:' ${REPO_ROOT_DIR}/${KUBE_VERSION_YAML} \
|
||||
| sed 's/: /=/g' >> ${TEMP_DIR}/generate.sh
|
||||
grep '_version:' ${REPO_ROOT_DIR}/${DOWNLOAD_YML} \
|
||||
| sed 's/: /=/g;s/{{/${/g;s/}}/}/g' | tr -d ' ' >> ${TEMP_DIR}/generate.sh
|
||||
sed -i 's/kube_major_version=.*/kube_major_version=${kube_version%.*}/g' ${TEMP_DIR}/generate.sh
|
||||
sed -i 's/crictl_version=.*/crictl_version=${kube_version%.*}.0/g' ${TEMP_DIR}/generate.sh
|
||||
|
||||
# generate all download files url
|
||||
grep 'download_url:' ${REPO_ROOT_DIR}/${DOWNLOAD_YML} \
|
||||
| sed 's/: /=/g;s/ //g;s/{{/${/g;s/}}/}/g;s/|lower//g;s/^.*_url=/echo /g' >> ${TEMP_DIR}/generate.sh
|
||||
|
||||
# generate all images list
|
||||
grep -E '_repo:|_tag:' ${REPO_ROOT_DIR}/${DOWNLOAD_YML} \
|
||||
| sed "s#{%- if image_arch != 'amd64' -%}-{{ image_arch }}{%- endif -%}#{{arch}}#g" \
|
||||
| sed 's/: /=/g;s/{{/${/g;s/}}/}/g' | tr -d ' ' >> ${TEMP_DIR}/generate.sh
|
||||
sed -n '/^downloads:/,/download_defaults:/p' ${REPO_ROOT_DIR}/${DOWNLOAD_YML} \
|
||||
| sed -n "s/repo: //p;s/tag: //p" | tr -d ' ' | sed 's/{{/${/g;s/}}/}/g' \
|
||||
| sed 'N;s#\n# #g' | tr ' ' ':' | sed 's/^/echo /g' >> ${TEMP_DIR}/generate.sh
|
||||
|
||||
# special handling for https://github.com/kubernetes-sigs/kubespray/pull/7570
|
||||
sed -i 's#^coredns_image_repo=.*#coredns_image_repo=${kube_image_repo}$(if printf "%s\\n%s\\n" v1.21 ${kube_version%.*} | sort --check=quiet --version-sort; then echo -n /coredns/coredns;else echo -n /coredns; fi)#' ${TEMP_DIR}/generate.sh
|
||||
sed -i 's#^coredns_image_tag=.*#coredns_image_tag=$(if printf "%s\\n%s\\n" v1.21 ${kube_version%.*} | sort --check=quiet --version-sort; then echo -n ${coredns_version};else echo -n ${coredns_version/v/}; fi)#' ${TEMP_DIR}/generate.sh
|
||||
|
||||
# add kube-* images to images list
|
||||
KUBE_IMAGES="kube-apiserver kube-controller-manager kube-scheduler kube-proxy"
|
||||
echo "${KUBE_IMAGES}" | tr ' ' '\n' | xargs -L1 -I {} \
|
||||
echo 'echo ${kube_image_repo}/{}:${kube_version}' >> ${TEMP_DIR}/generate.sh
|
||||
|
||||
# print files.list and images.list
|
||||
bash ${TEMP_DIR}/generate.sh | grep 'https' | sort > ${TEMP_DIR}/files.list
|
||||
bash ${TEMP_DIR}/generate.sh | grep -v 'https' | sort > ${TEMP_DIR}/images.list
|
170
contrib/offline/manage-offline-container-images.sh
Executable file
170
contrib/offline/manage-offline-container-images.sh
Executable file
@ -0,0 +1,170 @@
|
||||
#!/bin/bash
|
||||
|
||||
OPTION=$1
|
||||
CURRENT_DIR=$(cd $(dirname $0); pwd)
|
||||
TEMP_DIR="${CURRENT_DIR}/temp"
|
||||
|
||||
IMAGE_TAR_FILE="${CURRENT_DIR}/container-images.tar.gz"
|
||||
IMAGE_DIR="${CURRENT_DIR}/container-images"
|
||||
IMAGE_LIST="${IMAGE_DIR}/container-images.txt"
|
||||
RETRY_COUNT=5
|
||||
|
||||
function create_container_image_tar() {
|
||||
set -e
|
||||
|
||||
IMAGES=$(kubectl describe pods --all-namespaces | grep " Image:" | awk '{print $2}' | sort | uniq)
|
||||
# NOTE: etcd and pause cannot be seen as pods.
|
||||
# The pause image is used for --pod-infra-container-image option of kubelet.
|
||||
EXT_IMAGES=$(kubectl cluster-info dump | egrep "quay.io/coreos/etcd:|k8s.gcr.io/pause:" | sed s@\"@@g)
|
||||
IMAGES="${IMAGES} ${EXT_IMAGES}"
|
||||
|
||||
rm -f ${IMAGE_TAR_FILE}
|
||||
rm -rf ${IMAGE_DIR}
|
||||
mkdir ${IMAGE_DIR}
|
||||
cd ${IMAGE_DIR}
|
||||
|
||||
sudo docker pull registry:latest
|
||||
sudo docker save -o registry-latest.tar registry:latest
|
||||
|
||||
for image in ${IMAGES}
|
||||
do
|
||||
FILE_NAME="$(echo ${image} | sed s@"/"@"-"@g | sed s/":"/"-"/g)".tar
|
||||
set +e
|
||||
for step in $(seq 1 ${RETRY_COUNT})
|
||||
do
|
||||
sudo docker pull ${image}
|
||||
if [ $? -eq 0 ]; then
|
||||
break
|
||||
fi
|
||||
echo "Failed to pull ${image} at step ${step}"
|
||||
if [ ${step} -eq ${RETRY_COUNT} ]; then
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
set -e
|
||||
sudo docker save -o ${FILE_NAME} ${image}
|
||||
|
||||
# NOTE: Here removes the following repo parts from each image
|
||||
# so that these parts will be replaced with Kubespray.
|
||||
# - kube_image_repo: "k8s.gcr.io"
|
||||
# - gcr_image_repo: "gcr.io"
|
||||
# - docker_image_repo: "docker.io"
|
||||
# - quay_image_repo: "quay.io"
|
||||
FIRST_PART=$(echo ${image} | awk -F"/" '{print $1}')
|
||||
if [ "${FIRST_PART}" = "k8s.gcr.io" ] ||
|
||||
[ "${FIRST_PART}" = "gcr.io" ] ||
|
||||
[ "${FIRST_PART}" = "docker.io" ] ||
|
||||
[ "${FIRST_PART}" = "quay.io" ]; then
|
||||
image=$(echo ${image} | sed s@"${FIRST_PART}/"@@)
|
||||
fi
|
||||
echo "${FILE_NAME} ${image}" >> ${IMAGE_LIST}
|
||||
done
|
||||
|
||||
cd ..
|
||||
sudo chown ${USER} ${IMAGE_DIR}/*
|
||||
tar -zcvf ${IMAGE_TAR_FILE} ./container-images
|
||||
rm -rf ${IMAGE_DIR}
|
||||
|
||||
echo ""
|
||||
echo "${IMAGE_TAR_FILE} is created to contain your container images."
|
||||
echo "Please keep this file and bring it to your offline environment."
|
||||
}
|
||||
|
||||
function register_container_images() {
|
||||
if [ ! -f ${IMAGE_TAR_FILE} ]; then
|
||||
echo "${IMAGE_TAR_FILE} should exist."
|
||||
exit 1
|
||||
fi
|
||||
if [ ! -d ${TEMP_DIR} ]; then
|
||||
mkdir ${TEMP_DIR}
|
||||
fi
|
||||
|
||||
# To avoid "http: server gave http response to https client" error.
|
||||
LOCALHOST_NAME=$(hostname)
|
||||
if [ -d /etc/docker/ ]; then
|
||||
set -e
|
||||
# Ubuntu18.04, RHEL7/CentOS7
|
||||
cp ${CURRENT_DIR}/docker-daemon.json ${TEMP_DIR}/docker-daemon.json
|
||||
sed -i s@"HOSTNAME"@"${LOCALHOST_NAME}"@ ${TEMP_DIR}/docker-daemon.json
|
||||
sudo cp ${TEMP_DIR}/docker-daemon.json /etc/docker/daemon.json
|
||||
elif [ -d /etc/containers/ ]; then
|
||||
set -e
|
||||
# RHEL8/CentOS8
|
||||
cp ${CURRENT_DIR}/registries.conf ${TEMP_DIR}/registries.conf
|
||||
sed -i s@"HOSTNAME"@"${LOCALHOST_NAME}"@ ${TEMP_DIR}/registries.conf
|
||||
sudo cp ${TEMP_DIR}/registries.conf /etc/containers/registries.conf
|
||||
else
|
||||
echo "docker package(docker-ce, etc.) should be installed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
tar -zxvf ${IMAGE_TAR_FILE}
|
||||
sudo docker load -i ${IMAGE_DIR}/registry-latest.tar
|
||||
set +e
|
||||
sudo docker container inspect registry >/dev/null 2>&1
|
||||
if [ $? -ne 0 ]; then
|
||||
sudo docker run --restart=always -d -p 5000:5000 --name registry registry:latest
|
||||
fi
|
||||
set -e
|
||||
|
||||
while read -r line; do
|
||||
file_name=$(echo ${line} | awk '{print $1}')
|
||||
raw_image=$(echo ${line} | awk '{print $2}')
|
||||
new_image="${LOCALHOST_NAME}:5000/${raw_image}"
|
||||
org_image=$(sudo docker load -i ${IMAGE_DIR}/${file_name} | head -n1 | awk '{print $3}')
|
||||
image_id=$(sudo docker image inspect ${org_image} | grep "\"Id\":" | awk -F: '{print $3}'| sed s/'\",'//)
|
||||
if [ -z "${file_name}" ]; then
|
||||
echo "Failed to get file_name for line ${line}"
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "${raw_image}" ]; then
|
||||
echo "Failed to get raw_image for line ${line}"
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "${org_image}" ]; then
|
||||
echo "Failed to get org_image for line ${line}"
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "${image_id}" ]; then
|
||||
echo "Failed to get image_id for file ${file_name}"
|
||||
exit 1
|
||||
fi
|
||||
sudo docker load -i ${IMAGE_DIR}/${file_name}
|
||||
sudo docker tag ${image_id} ${new_image}
|
||||
sudo docker push ${new_image}
|
||||
done <<< "$(cat ${IMAGE_LIST})"
|
||||
|
||||
echo "Succeeded to register container images to local registry."
|
||||
echo "Please specify ${LOCALHOST_NAME}:5000 for the following options in your inventry:"
|
||||
echo "- kube_image_repo"
|
||||
echo "- gcr_image_repo"
|
||||
echo "- docker_image_repo"
|
||||
echo "- quay_image_repo"
|
||||
}
|
||||
|
||||
if [ "${OPTION}" == "create" ]; then
|
||||
create_container_image_tar
|
||||
elif [ "${OPTION}" == "register" ]; then
|
||||
register_container_images
|
||||
else
|
||||
echo "This script has two features:"
|
||||
echo "(1) Get container images from an environment which is deployed online."
|
||||
echo "(2) Deploy local container registry and register the container images to the registry."
|
||||
echo ""
|
||||
echo "Step(1) should be done online site as a preparation, then we bring"
|
||||
echo "the gotten images to the target offline environment."
|
||||
echo "Then we will run step(2) for registering the images to local registry."
|
||||
echo ""
|
||||
echo "${IMAGE_TAR_FILE} is created to contain your container images."
|
||||
echo "Please keep this file and bring it to your offline environment."
|
||||
echo ""
|
||||
echo "Step(1) can be operated with:"
|
||||
echo " $ ./manage-offline-container-images.sh create"
|
||||
echo ""
|
||||
echo "Step(2) can be operated with:"
|
||||
echo " $ ./manage-offline-container-images.sh register"
|
||||
echo ""
|
||||
echo "Please specify 'create' or 'register'."
|
||||
echo ""
|
||||
exit 1
|
||||
fi
|
8
contrib/offline/registries.conf
Normal file
8
contrib/offline/registries.conf
Normal file
@ -0,0 +1,8 @@
|
||||
[registries.search]
|
||||
registries = ['registry.access.redhat.com', 'registry.redhat.io', 'docker.io']
|
||||
|
||||
[registries.insecure]
|
||||
registries = ['HOSTNAME:5000']
|
||||
|
||||
[registries.block]
|
||||
registries = []
|
4
contrib/os-services/os-services.yml
Normal file
4
contrib/os-services/os-services.yml
Normal file
@ -0,0 +1,4 @@
|
||||
---
|
||||
- hosts: all
|
||||
roles:
|
||||
- { role: prepare }
|
2
contrib/os-services/roles/prepare/defaults/main.yml
Normal file
2
contrib/os-services/roles/prepare/defaults/main.yml
Normal file
@ -0,0 +1,2 @@
|
||||
---
|
||||
disable_service_firewall: false
|
23
contrib/os-services/roles/prepare/tasks/main.yml
Normal file
23
contrib/os-services/roles/prepare/tasks/main.yml
Normal file
@ -0,0 +1,23 @@
|
||||
---
|
||||
- block:
|
||||
- name: List services
|
||||
service_facts:
|
||||
|
||||
- name: Disable service firewalld
|
||||
systemd:
|
||||
name: firewalld
|
||||
state: stopped
|
||||
enabled: no
|
||||
when:
|
||||
"'firewalld.service' in services"
|
||||
|
||||
- name: Disable service ufw
|
||||
systemd:
|
||||
name: ufw
|
||||
state: stopped
|
||||
enabled: no
|
||||
when:
|
||||
"'ufw.service' in services"
|
||||
|
||||
when:
|
||||
- disable_service_firewall
|
@ -51,7 +51,7 @@ export SKIP_PIP_INSTALL=1
|
||||
%doc %{_docdir}/%{name}/inventory/sample/hosts.ini
|
||||
%config %{_sysconfdir}/%{name}/ansible.cfg
|
||||
%config %{_sysconfdir}/%{name}/inventory/sample/group_vars/all.yml
|
||||
%config %{_sysconfdir}/%{name}/inventory/sample/group_vars/k8s-cluster.yml
|
||||
%config %{_sysconfdir}/%{name}/inventory/sample/group_vars/k8s_cluster.yml
|
||||
%license %{_docdir}/%{name}/LICENSE
|
||||
%{python2_sitelib}/%{srcname}-%{release}-py%{python2_version}.egg-info
|
||||
%{_datarootdir}/%{name}/roles/
|
||||
|
@ -1,40 +1,44 @@
|
||||
## Kubernetes on AWS with Terraform
|
||||
# Kubernetes on AWS with Terraform
|
||||
|
||||
**Overview:**
|
||||
## Overview
|
||||
|
||||
This project will create:
|
||||
* VPC with Public and Private Subnets in # Availability Zones
|
||||
* Bastion Hosts and NAT Gateways in the Public Subnet
|
||||
* A dynamic number of masters, etcd, and worker nodes in the Private Subnet
|
||||
* even distributed over the # of Availability Zones
|
||||
* AWS ELB in the Public Subnet for accessing the Kubernetes API from the internet
|
||||
|
||||
**Requirements**
|
||||
- VPC with Public and Private Subnets in # Availability Zones
|
||||
- Bastion Hosts and NAT Gateways in the Public Subnet
|
||||
- A dynamic number of masters, etcd, and worker nodes in the Private Subnet
|
||||
- even distributed over the # of Availability Zones
|
||||
- AWS ELB in the Public Subnet for accessing the Kubernetes API from the internet
|
||||
|
||||
## Requirements
|
||||
|
||||
- Terraform 0.12.0 or newer
|
||||
|
||||
**How to Use:**
|
||||
## How to Use
|
||||
|
||||
- Export the variables for your AWS credentials or edit `credentials.tfvars`:
|
||||
|
||||
```
|
||||
```commandline
|
||||
export TF_VAR_AWS_ACCESS_KEY_ID="www"
|
||||
export TF_VAR_AWS_SECRET_ACCESS_KEY ="xxx"
|
||||
export TF_VAR_AWS_SSH_KEY_NAME="yyy"
|
||||
export TF_VAR_AWS_DEFAULT_REGION="zzz"
|
||||
```
|
||||
|
||||
- Update `contrib/terraform/aws/terraform.tfvars` with your data. By default, the Terraform scripts use Ubuntu 18.04 LTS (Bionic) as base image. If you want to change this behaviour, see note "Using other distrib than Ubuntu" below.
|
||||
- Create an AWS EC2 SSH Key
|
||||
- Run with `terraform apply --var-file="credentials.tfvars"` or `terraform apply` depending if you exported your AWS credentials
|
||||
|
||||
Example:
|
||||
|
||||
```commandline
|
||||
terraform apply -var-file=credentials.tfvars
|
||||
```
|
||||
|
||||
- Terraform automatically creates an Ansible Inventory file called `hosts` with the created infrastructure in the directory `inventory`
|
||||
|
||||
- Ansible will automatically generate an ssh config file for your bastion hosts. To connect to hosts with ssh using bastion host use generated ssh-bastion.conf.
|
||||
Ansible automatically detects bastion and changes ssh_args
|
||||
|
||||
```commandline
|
||||
ssh -F ./ssh-bastion.conf user@$ip
|
||||
```
|
||||
@ -42,14 +46,19 @@ ssh -F ./ssh-bastion.conf user@$ip
|
||||
- Once the infrastructure is created, you can run the kubespray playbooks and supply inventory/hosts with the `-i` flag.
|
||||
|
||||
Example (this one assumes you are using Ubuntu)
|
||||
|
||||
```commandline
|
||||
ansible-playbook -i ./inventory/hosts ./cluster.yml -e ansible_user=ubuntu -b --become-user=root --flush-cache
|
||||
```
|
||||
|
||||
***Using other distrib than Ubuntu***
|
||||
If you want to use another distribution than Ubuntu 18.04 (Bionic) LTS, you can modify the search filters of the 'data "aws_ami" "distro"' in variables.tf.
|
||||
|
||||
For example, to use:
|
||||
|
||||
- Debian Jessie, replace 'data "aws_ami" "distro"' in variables.tf with
|
||||
|
||||
```ini
|
||||
data "aws_ami" "distro" {
|
||||
most_recent = true
|
||||
|
||||
@ -65,8 +74,11 @@ data "aws_ami" "distro" {
|
||||
|
||||
owners = ["379101102735"]
|
||||
}
|
||||
```
|
||||
|
||||
- Ubuntu 16.04, replace 'data "aws_ami" "distro"' in variables.tf with
|
||||
|
||||
```ini
|
||||
data "aws_ami" "distro" {
|
||||
most_recent = true
|
||||
|
||||
@ -82,8 +94,11 @@ data "aws_ami" "distro" {
|
||||
|
||||
owners = ["099720109477"]
|
||||
}
|
||||
```
|
||||
|
||||
- Centos 7, replace 'data "aws_ami" "distro"' in variables.tf with
|
||||
|
||||
```ini
|
||||
data "aws_ami" "distro" {
|
||||
most_recent = true
|
||||
|
||||
@ -99,23 +114,49 @@ data "aws_ami" "distro" {
|
||||
|
||||
owners = ["688023202711"]
|
||||
}
|
||||
```
|
||||
|
||||
**Troubleshooting**
|
||||
## Connecting to Kubernetes
|
||||
|
||||
***Remaining AWS IAM Instance Profile***:
|
||||
You can use the following set of commands to get the kubeconfig file from your newly created cluster. Before running the commands, make sure you are in the project's root folder.
|
||||
|
||||
```commandline
|
||||
# Get the controller's IP address.
|
||||
CONTROLLER_HOST_NAME=$(cat ./inventory/hosts | grep "\[kube_control_plane\]" -A 1 | tail -n 1)
|
||||
CONTROLLER_IP=$(cat ./inventory/hosts | grep $CONTROLLER_HOST_NAME | grep ansible_host | cut -d'=' -f2)
|
||||
|
||||
# Get the hostname of the load balancer.
|
||||
LB_HOST=$(cat inventory/hosts | grep apiserver_loadbalancer_domain_name | cut -d'"' -f2)
|
||||
|
||||
# Get the controller's SSH fingerprint.
|
||||
ssh-keygen -R $CONTROLLER_IP > /dev/null 2>&1
|
||||
ssh-keyscan -H $CONTROLLER_IP >> ~/.ssh/known_hosts 2>/dev/null
|
||||
|
||||
# Get the kubeconfig from the controller.
|
||||
mkdir -p ~/.kube
|
||||
ssh -F ssh-bastion.conf centos@$CONTROLLER_IP "sudo chmod 644 /etc/kubernetes/admin.conf"
|
||||
scp -F ssh-bastion.conf centos@$CONTROLLER_IP:/etc/kubernetes/admin.conf ~/.kube/config
|
||||
sed -i "s^server:.*^server: https://$LB_HOST:6443^" ~/.kube/config
|
||||
kubectl get nodes
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Remaining AWS IAM Instance Profile
|
||||
|
||||
If the cluster was destroyed without using Terraform it is possible that
|
||||
the AWS IAM Instance Profiles still remain. To delete them you can use
|
||||
the `AWS CLI` with the following command:
|
||||
```
|
||||
|
||||
```commandline
|
||||
aws iam delete-instance-profile --region <region_name> --instance-profile-name <profile_name>
|
||||
```
|
||||
|
||||
***Ansible Inventory doesn't get created:***
|
||||
### Ansible Inventory doesn't get created
|
||||
|
||||
It could happen that Terraform doesn't create an Ansible Inventory file automatically. If this is the case copy the output after `inventory=` and create a file named `hosts`in the directory `inventory` and paste the inventory into the file.
|
||||
|
||||
**Architecture**
|
||||
## Architecture
|
||||
|
||||
Pictured is an AWS Infrastructure created with this Terraform project distributed over two Availability Zones.
|
||||
|
||||
|
@ -61,11 +61,11 @@ resource "aws_instance" "bastion-server" {
|
||||
|
||||
key_name = var.AWS_SSH_KEY_NAME
|
||||
|
||||
tags = merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-bastion-${count.index}",
|
||||
"Cluster", "${var.aws_cluster_name}",
|
||||
"Role", "bastion-${var.aws_cluster_name}-${count.index}"
|
||||
))
|
||||
tags = merge(var.default_tags, tomap({
|
||||
Name = "kubernetes-${var.aws_cluster_name}-bastion-${count.index}"
|
||||
Cluster = var.aws_cluster_name
|
||||
Role = "bastion-${var.aws_cluster_name}-${count.index}"
|
||||
}))
|
||||
}
|
||||
|
||||
/*
|
||||
@ -84,14 +84,14 @@ resource "aws_instance" "k8s-master" {
|
||||
|
||||
vpc_security_group_ids = module.aws-vpc.aws_security_group
|
||||
|
||||
iam_instance_profile = module.aws-iam.kube-master-profile
|
||||
iam_instance_profile = module.aws-iam.kube_control_plane-profile
|
||||
key_name = var.AWS_SSH_KEY_NAME
|
||||
|
||||
tags = merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-master${count.index}",
|
||||
"kubernetes.io/cluster/${var.aws_cluster_name}", "member",
|
||||
"Role", "master"
|
||||
))
|
||||
tags = merge(var.default_tags, tomap({
|
||||
Name = "kubernetes-${var.aws_cluster_name}-master${count.index}"
|
||||
"kubernetes.io/cluster/${var.aws_cluster_name}" = "member"
|
||||
Role = "master"
|
||||
}))
|
||||
}
|
||||
|
||||
resource "aws_elb_attachment" "attach_master_nodes" {
|
||||
@ -113,11 +113,11 @@ resource "aws_instance" "k8s-etcd" {
|
||||
|
||||
key_name = var.AWS_SSH_KEY_NAME
|
||||
|
||||
tags = merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-etcd${count.index}",
|
||||
"kubernetes.io/cluster/${var.aws_cluster_name}", "member",
|
||||
"Role", "etcd"
|
||||
))
|
||||
tags = merge(var.default_tags, tomap({
|
||||
Name = "kubernetes-${var.aws_cluster_name}-etcd${count.index}"
|
||||
"kubernetes.io/cluster/${var.aws_cluster_name}" = "member"
|
||||
Role = "etcd"
|
||||
}))
|
||||
}
|
||||
|
||||
resource "aws_instance" "k8s-worker" {
|
||||
@ -134,11 +134,11 @@ resource "aws_instance" "k8s-worker" {
|
||||
iam_instance_profile = module.aws-iam.kube-worker-profile
|
||||
key_name = var.AWS_SSH_KEY_NAME
|
||||
|
||||
tags = merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-worker${count.index}",
|
||||
"kubernetes.io/cluster/${var.aws_cluster_name}", "member",
|
||||
"Role", "worker"
|
||||
))
|
||||
tags = merge(var.default_tags, tomap({
|
||||
Name = "kubernetes-${var.aws_cluster_name}-worker${count.index}"
|
||||
"kubernetes.io/cluster/${var.aws_cluster_name}" = "member"
|
||||
Role = "worker"
|
||||
}))
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2,9 +2,9 @@ resource "aws_security_group" "aws-elb" {
|
||||
name = "kubernetes-${var.aws_cluster_name}-securitygroup-elb"
|
||||
vpc_id = var.aws_vpc_id
|
||||
|
||||
tags = merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-securitygroup-elb"
|
||||
))
|
||||
tags = merge(var.default_tags, tomap({
|
||||
Name = "kubernetes-${var.aws_cluster_name}-securitygroup-elb"
|
||||
}))
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "aws-allow-api-access" {
|
||||
@ -42,7 +42,7 @@ resource "aws_elb" "aws-elb-api" {
|
||||
healthy_threshold = 2
|
||||
unhealthy_threshold = 2
|
||||
timeout = 3
|
||||
target = "TCP:${var.k8s_secure_api_port}"
|
||||
target = "HTTPS:${var.k8s_secure_api_port}/healthz"
|
||||
interval = 30
|
||||
}
|
||||
|
||||
@ -51,7 +51,7 @@ resource "aws_elb" "aws-elb-api" {
|
||||
connection_draining = true
|
||||
connection_draining_timeout = 400
|
||||
|
||||
tags = merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-elb-api"
|
||||
))
|
||||
tags = merge(var.default_tags, tomap({
|
||||
Name = "kubernetes-${var.aws_cluster_name}-elb-api"
|
||||
}))
|
||||
}
|
||||
|
@ -16,15 +16,15 @@ variable "k8s_secure_api_port" {
|
||||
|
||||
variable "aws_avail_zones" {
|
||||
description = "Availability Zones Used"
|
||||
type = "list"
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "aws_subnet_ids_public" {
|
||||
description = "IDs of Public Subnets"
|
||||
type = "list"
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "default_tags" {
|
||||
description = "Tags for all resources"
|
||||
type = "map"
|
||||
type = map(string)
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
#Add AWS Roles for Kubernetes
|
||||
|
||||
resource "aws_iam_role" "kube-master" {
|
||||
resource "aws_iam_role" "kube_control_plane" {
|
||||
name = "kubernetes-${var.aws_cluster_name}-master"
|
||||
|
||||
assume_role_policy = <<EOF
|
||||
@ -40,9 +40,9 @@ EOF
|
||||
|
||||
#Add AWS Policies for Kubernetes
|
||||
|
||||
resource "aws_iam_role_policy" "kube-master" {
|
||||
resource "aws_iam_role_policy" "kube_control_plane" {
|
||||
name = "kubernetes-${var.aws_cluster_name}-master"
|
||||
role = aws_iam_role.kube-master.id
|
||||
role = aws_iam_role.kube_control_plane.id
|
||||
|
||||
policy = <<EOF
|
||||
{
|
||||
@ -130,9 +130,9 @@ EOF
|
||||
|
||||
#Create AWS Instance Profiles
|
||||
|
||||
resource "aws_iam_instance_profile" "kube-master" {
|
||||
resource "aws_iam_instance_profile" "kube_control_plane" {
|
||||
name = "kube_${var.aws_cluster_name}_master_profile"
|
||||
role = aws_iam_role.kube-master.name
|
||||
role = aws_iam_role.kube_control_plane.name
|
||||
}
|
||||
|
||||
resource "aws_iam_instance_profile" "kube-worker" {
|
||||
|
@ -1,5 +1,5 @@
|
||||
output "kube-master-profile" {
|
||||
value = aws_iam_instance_profile.kube-master.name
|
||||
output "kube_control_plane-profile" {
|
||||
value = aws_iam_instance_profile.kube_control_plane.name
|
||||
}
|
||||
|
||||
output "kube-worker-profile" {
|
||||
|
@ -5,9 +5,9 @@ resource "aws_vpc" "cluster-vpc" {
|
||||
enable_dns_support = true
|
||||
enable_dns_hostnames = true
|
||||
|
||||
tags = merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-vpc"
|
||||
))
|
||||
tags = merge(var.default_tags, tomap({
|
||||
Name = "kubernetes-${var.aws_cluster_name}-vpc"
|
||||
}))
|
||||
}
|
||||
|
||||
resource "aws_eip" "cluster-nat-eip" {
|
||||
@ -18,9 +18,9 @@ resource "aws_eip" "cluster-nat-eip" {
|
||||
resource "aws_internet_gateway" "cluster-vpc-internetgw" {
|
||||
vpc_id = aws_vpc.cluster-vpc.id
|
||||
|
||||
tags = merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-internetgw"
|
||||
))
|
||||
tags = merge(var.default_tags, tomap({
|
||||
Name = "kubernetes-${var.aws_cluster_name}-internetgw"
|
||||
}))
|
||||
}
|
||||
|
||||
resource "aws_subnet" "cluster-vpc-subnets-public" {
|
||||
@ -29,10 +29,10 @@ resource "aws_subnet" "cluster-vpc-subnets-public" {
|
||||
availability_zone = element(var.aws_avail_zones, count.index)
|
||||
cidr_block = element(var.aws_cidr_subnets_public, count.index)
|
||||
|
||||
tags = merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-public",
|
||||
"kubernetes.io/cluster/${var.aws_cluster_name}", "member"
|
||||
))
|
||||
tags = merge(var.default_tags, tomap({
|
||||
Name = "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-public"
|
||||
"kubernetes.io/cluster/${var.aws_cluster_name}" = "member"
|
||||
}))
|
||||
}
|
||||
|
||||
resource "aws_nat_gateway" "cluster-nat-gateway" {
|
||||
@ -47,9 +47,9 @@ resource "aws_subnet" "cluster-vpc-subnets-private" {
|
||||
availability_zone = element(var.aws_avail_zones, count.index)
|
||||
cidr_block = element(var.aws_cidr_subnets_private, count.index)
|
||||
|
||||
tags = merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-private"
|
||||
))
|
||||
tags = merge(var.default_tags, tomap({
|
||||
Name = "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-private"
|
||||
}))
|
||||
}
|
||||
|
||||
#Routing in VPC
|
||||
@ -64,9 +64,9 @@ resource "aws_route_table" "kubernetes-public" {
|
||||
gateway_id = aws_internet_gateway.cluster-vpc-internetgw.id
|
||||
}
|
||||
|
||||
tags = merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-routetable-public"
|
||||
))
|
||||
tags = merge(var.default_tags, tomap({
|
||||
Name = "kubernetes-${var.aws_cluster_name}-routetable-public"
|
||||
}))
|
||||
}
|
||||
|
||||
resource "aws_route_table" "kubernetes-private" {
|
||||
@ -78,9 +78,9 @@ resource "aws_route_table" "kubernetes-private" {
|
||||
nat_gateway_id = element(aws_nat_gateway.cluster-nat-gateway.*.id, count.index)
|
||||
}
|
||||
|
||||
tags = merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-routetable-private-${count.index}"
|
||||
))
|
||||
tags = merge(var.default_tags, tomap({
|
||||
Name = "kubernetes-${var.aws_cluster_name}-routetable-private-${count.index}"
|
||||
}))
|
||||
}
|
||||
|
||||
resource "aws_route_table_association" "kubernetes-public" {
|
||||
@ -101,9 +101,9 @@ resource "aws_security_group" "kubernetes" {
|
||||
name = "kubernetes-${var.aws_cluster_name}-securitygroup"
|
||||
vpc_id = aws_vpc.cluster-vpc.id
|
||||
|
||||
tags = merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-securitygroup"
|
||||
))
|
||||
tags = merge(var.default_tags, tomap({
|
||||
Name = "kubernetes-${var.aws_cluster_name}-securitygroup"
|
||||
}))
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "allow-all-ingress" {
|
||||
|
@ -8,20 +8,20 @@ variable "aws_cluster_name" {
|
||||
|
||||
variable "aws_avail_zones" {
|
||||
description = "AWS Availability Zones Used"
|
||||
type = "list"
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "aws_cidr_subnets_private" {
|
||||
description = "CIDR Blocks for private subnets in Availability zones"
|
||||
type = "list"
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "aws_cidr_subnets_public" {
|
||||
description = "CIDR Blocks for public subnets in Availability zones"
|
||||
type = "list"
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "default_tags" {
|
||||
description = "Default tags for all resources"
|
||||
type = "map"
|
||||
type = map(string)
|
||||
}
|
||||
|
@ -7,11 +7,11 @@ ${public_ip_address_bastion}
|
||||
[bastion]
|
||||
${public_ip_address_bastion}
|
||||
|
||||
[kube-master]
|
||||
[kube_control_plane]
|
||||
${list_master}
|
||||
|
||||
|
||||
[kube-node]
|
||||
[kube_node]
|
||||
${list_node}
|
||||
|
||||
|
||||
@ -19,10 +19,10 @@ ${list_node}
|
||||
${list_etcd}
|
||||
|
||||
|
||||
[k8s-cluster:children]
|
||||
kube-node
|
||||
kube-master
|
||||
[k8s_cluster:children]
|
||||
kube_node
|
||||
kube_control_plane
|
||||
|
||||
|
||||
[k8s-cluster:vars]
|
||||
[k8s_cluster:vars]
|
||||
${elb_api_fqdn}
|
||||
|
@ -44,12 +44,12 @@ variable "aws_vpc_cidr_block" {
|
||||
|
||||
variable "aws_cidr_subnets_private" {
|
||||
description = "CIDR Blocks for private subnets in Availability Zones"
|
||||
type = "list"
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "aws_cidr_subnets_public" {
|
||||
description = "CIDR Blocks for public subnets in Availability Zones"
|
||||
type = "list"
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
//AWS EC2 Settings
|
||||
@ -101,7 +101,7 @@ variable "k8s_secure_api_port" {
|
||||
|
||||
variable "default_tags" {
|
||||
description = "Default tags for all resources"
|
||||
type = "map"
|
||||
type = map(string)
|
||||
}
|
||||
|
||||
variable "inventory_file" {
|
||||
|
154
contrib/terraform/exoscale/README.md
Normal file
154
contrib/terraform/exoscale/README.md
Normal file
@ -0,0 +1,154 @@
|
||||
# Kubernetes on Exoscale with Terraform
|
||||
|
||||
Provision a Kubernetes cluster on [Exoscale](https://www.exoscale.com/) using Terraform and Kubespray
|
||||
|
||||
## Overview
|
||||
|
||||
The setup looks like following
|
||||
|
||||
```text
|
||||
Kubernetes cluster
|
||||
+-----------------------+
|
||||
+---------------+ | +--------------+ |
|
||||
| | | | +--------------+ |
|
||||
| API server LB +---------> | | | |
|
||||
| | | | | Master/etcd | |
|
||||
+---------------+ | | | node(s) | |
|
||||
| +-+ | |
|
||||
| +--------------+ |
|
||||
| ^ |
|
||||
| | |
|
||||
| v |
|
||||
+---------------+ | +--------------+ |
|
||||
| | | | +--------------+ |
|
||||
| Ingress LB +---------> | | | |
|
||||
| | | | | Worker | |
|
||||
+---------------+ | | | node(s) | |
|
||||
| +-+ | |
|
||||
| +--------------+ |
|
||||
+-----------------------+
|
||||
```
|
||||
|
||||
## Requirements
|
||||
|
||||
* Terraform 0.13.0 or newer
|
||||
|
||||
*0.12 also works if you modify the provider block to include version and remove all `versions.tf` files*
|
||||
|
||||
## Quickstart
|
||||
|
||||
NOTE: *Assumes you are at the root of the kubespray repo*
|
||||
|
||||
Copy the sample inventory for your cluster and copy the default terraform variables.
|
||||
|
||||
```bash
|
||||
CLUSTER=my-exoscale-cluster
|
||||
cp -r inventory/sample inventory/$CLUSTER
|
||||
cp contrib/terraform/exoscale/default.tfvars inventory/$CLUSTER/
|
||||
cd inventory/$CLUSTER
|
||||
```
|
||||
|
||||
Edit `default.tfvars` to match your setup. You MUST, at the very least, change `ssh_public_keys`.
|
||||
|
||||
```bash
|
||||
# Ensure $EDITOR points to your favorite editor, e.g., vim, emacs, VS Code, etc.
|
||||
$EDITOR default.tfvars
|
||||
```
|
||||
|
||||
For authentication you can use the credentials file `~/.cloudstack.ini` or `./cloudstack.ini`.
|
||||
The file should look like something like this:
|
||||
|
||||
```ini
|
||||
[cloudstack]
|
||||
key = <API key>
|
||||
secret = <API secret>
|
||||
```
|
||||
|
||||
Follow the [Exoscale IAM Quick-start](https://community.exoscale.com/documentation/iam/quick-start/) to learn how to generate API keys.
|
||||
|
||||
### Encrypted credentials
|
||||
|
||||
To have the credentials encrypted at rest, you can use [sops](https://github.com/mozilla/sops) and only decrypt the credentials at runtime.
|
||||
|
||||
```bash
|
||||
cat << EOF > cloudstack.ini
|
||||
[cloudstack]
|
||||
key =
|
||||
secret =
|
||||
EOF
|
||||
sops --encrypt --in-place --pgp <PGP key fingerprint> cloudstack.ini
|
||||
sops cloudstack.ini
|
||||
```
|
||||
|
||||
Run terraform to create the infrastructure
|
||||
|
||||
```bash
|
||||
terraform init ../../contrib/terraform/exoscale
|
||||
terraform apply -var-file default.tfvars ../../contrib/terraform/exoscale
|
||||
```
|
||||
|
||||
If your cloudstack credentials file is encrypted using sops, run the following:
|
||||
|
||||
```bash
|
||||
terraform init ../../contrib/terraform/exoscale
|
||||
sops exec-file -no-fifo cloudstack.ini 'CLOUDSTACK_CONFIG={} terraform apply -var-file default.tfvars ../../contrib/terraform/exoscale'
|
||||
```
|
||||
|
||||
You should now have a inventory file named `inventory.ini` that you can use with kubespray.
|
||||
You can now copy your inventory file and use it with kubespray to set up a cluster.
|
||||
You can type `terraform output` to find out the IP addresses of the nodes, as well as control-plane and data-plane load-balancer.
|
||||
|
||||
It is a good idea to check that you have basic SSH connectivity to the nodes. You can do that by:
|
||||
|
||||
```bash
|
||||
ansible -i inventory.ini -m ping all
|
||||
```
|
||||
|
||||
Example to use this with the default sample inventory:
|
||||
|
||||
```bash
|
||||
ansible-playbook -i inventory.ini ../../cluster.yml -b -v
|
||||
```
|
||||
|
||||
## Teardown
|
||||
|
||||
The Kubernetes cluster cannot create any load-balancers or disks, hence, teardown is as simple as Terraform destroy:
|
||||
|
||||
```bash
|
||||
terraform destroy -var-file default.tfvars ../../contrib/terraform/exoscale
|
||||
```
|
||||
|
||||
## Variables
|
||||
|
||||
### Required
|
||||
|
||||
* `ssh_public_keys`: List of public SSH keys to install on all machines
|
||||
* `zone`: The zone where to run the cluster
|
||||
* `machines`: Machines to provision. Key of this object will be used as the name of the machine
|
||||
* `node_type`: The role of this node *(master|worker)*
|
||||
* `size`: The size to use
|
||||
* `boot_disk`: The boot disk to use
|
||||
* `image_name`: Name of the image
|
||||
* `root_partition_size`: Size *(in GB)* for the root partition
|
||||
* `ceph_partition_size`: Size *(in GB)* for the partition for rook to use as ceph storage. *(Set to 0 to disable)*
|
||||
* `node_local_partition_size`: Size *(in GB)* for the partition for node-local-storage. *(Set to 0 to disable)*
|
||||
* `ssh_whitelist`: List of IP ranges (CIDR) that will be allowed to ssh to the nodes
|
||||
* `api_server_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the API server
|
||||
* `nodeport_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the kubernetes nodes on port 30000-32767 (kubernetes nodeports)
|
||||
|
||||
### Optional
|
||||
|
||||
* `prefix`: Prefix to use for all resources, required to be unique for all clusters in the same project *(Defaults to `default`)*
|
||||
|
||||
An example variables file can be found `default.tfvars`
|
||||
|
||||
## Known limitations
|
||||
|
||||
### Only single disk
|
||||
|
||||
Since Exoscale doesn't support additional disks to be mounted onto an instance, this script has the ability to create partitions for [Rook](https://rook.io/) and [node-local-storage](https://kubernetes.io/docs/concepts/storage/volumes/#local).
|
||||
|
||||
### No Kubernetes API
|
||||
|
||||
The current solution doesn't use the [Exoscale Kubernetes cloud controller](https://github.com/exoscale/exoscale-cloud-controller-manager).
|
||||
This means that we need to set up a HTTP(S) loadbalancer in front of all workers and set the Ingress controller to DaemonSet mode.
|
65
contrib/terraform/exoscale/default.tfvars
Normal file
65
contrib/terraform/exoscale/default.tfvars
Normal file
@ -0,0 +1,65 @@
|
||||
prefix = "default"
|
||||
zone = "ch-gva-2"
|
||||
|
||||
inventory_file = "inventory.ini"
|
||||
|
||||
ssh_public_keys = [
|
||||
# Put your public SSH key here
|
||||
"ssh-rsa I-did-not-read-the-docs",
|
||||
"ssh-rsa I-did-not-read-the-docs 2",
|
||||
]
|
||||
|
||||
machines = {
|
||||
"master-0" : {
|
||||
"node_type" : "master",
|
||||
"size" : "Medium",
|
||||
"boot_disk" : {
|
||||
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
||||
"root_partition_size" : 50,
|
||||
"node_local_partition_size" : 0,
|
||||
"ceph_partition_size" : 0
|
||||
}
|
||||
},
|
||||
"worker-0" : {
|
||||
"node_type" : "worker",
|
||||
"size" : "Large",
|
||||
"boot_disk" : {
|
||||
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
||||
"root_partition_size" : 50,
|
||||
"node_local_partition_size" : 0,
|
||||
"ceph_partition_size" : 0
|
||||
}
|
||||
},
|
||||
"worker-1" : {
|
||||
"node_type" : "worker",
|
||||
"size" : "Large",
|
||||
"boot_disk" : {
|
||||
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
||||
"root_partition_size" : 50,
|
||||
"node_local_partition_size" : 0,
|
||||
"ceph_partition_size" : 0
|
||||
}
|
||||
},
|
||||
"worker-2" : {
|
||||
"node_type" : "worker",
|
||||
"size" : "Large",
|
||||
"boot_disk" : {
|
||||
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
||||
"root_partition_size" : 50,
|
||||
"node_local_partition_size" : 0,
|
||||
"ceph_partition_size" : 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
nodeport_whitelist = [
|
||||
"0.0.0.0/0"
|
||||
]
|
||||
|
||||
ssh_whitelist = [
|
||||
"0.0.0.0/0"
|
||||
]
|
||||
|
||||
api_server_whitelist = [
|
||||
"0.0.0.0/0"
|
||||
]
|
49
contrib/terraform/exoscale/main.tf
Normal file
49
contrib/terraform/exoscale/main.tf
Normal file
@ -0,0 +1,49 @@
|
||||
provider "exoscale" {}
|
||||
|
||||
module "kubernetes" {
|
||||
source = "./modules/kubernetes-cluster"
|
||||
|
||||
prefix = var.prefix
|
||||
|
||||
machines = var.machines
|
||||
|
||||
ssh_public_keys = var.ssh_public_keys
|
||||
|
||||
ssh_whitelist = var.ssh_whitelist
|
||||
api_server_whitelist = var.api_server_whitelist
|
||||
nodeport_whitelist = var.nodeport_whitelist
|
||||
}
|
||||
|
||||
#
|
||||
# Generate ansible inventory
|
||||
#
|
||||
|
||||
data "template_file" "inventory" {
|
||||
template = file("${path.module}/templates/inventory.tpl")
|
||||
|
||||
vars = {
|
||||
connection_strings_master = join("\n", formatlist("%s ansible_user=ubuntu ansible_host=%s ip=%s etcd_member_name=etcd%d",
|
||||
keys(module.kubernetes.master_ip_addresses),
|
||||
values(module.kubernetes.master_ip_addresses).*.public_ip,
|
||||
values(module.kubernetes.master_ip_addresses).*.private_ip,
|
||||
range(1, length(module.kubernetes.master_ip_addresses) + 1)))
|
||||
connection_strings_worker = join("\n", formatlist("%s ansible_user=ubuntu ansible_host=%s ip=%s",
|
||||
keys(module.kubernetes.worker_ip_addresses),
|
||||
values(module.kubernetes.worker_ip_addresses).*.public_ip,
|
||||
values(module.kubernetes.worker_ip_addresses).*.private_ip))
|
||||
|
||||
list_master = join("\n", keys(module.kubernetes.master_ip_addresses))
|
||||
list_worker = join("\n", keys(module.kubernetes.worker_ip_addresses))
|
||||
api_lb_ip_address = module.kubernetes.control_plane_lb_ip_address
|
||||
}
|
||||
}
|
||||
|
||||
resource "null_resource" "inventories" {
|
||||
provisioner "local-exec" {
|
||||
command = "echo '${data.template_file.inventory.rendered}' > ${var.inventory_file}"
|
||||
}
|
||||
|
||||
triggers = {
|
||||
template = data.template_file.inventory.rendered
|
||||
}
|
||||
}
|
193
contrib/terraform/exoscale/modules/kubernetes-cluster/main.tf
Normal file
193
contrib/terraform/exoscale/modules/kubernetes-cluster/main.tf
Normal file
@ -0,0 +1,193 @@
|
||||
data "exoscale_compute_template" "os_image" {
|
||||
for_each = var.machines
|
||||
|
||||
zone = var.zone
|
||||
name = each.value.boot_disk.image_name
|
||||
}
|
||||
|
||||
data "exoscale_compute" "master_nodes" {
|
||||
for_each = exoscale_compute.master
|
||||
|
||||
id = each.value.id
|
||||
|
||||
# Since private IP address is not assigned until the nics are created we need this
|
||||
depends_on = [exoscale_nic.master_private_network_nic]
|
||||
}
|
||||
|
||||
data "exoscale_compute" "worker_nodes" {
|
||||
for_each = exoscale_compute.worker
|
||||
|
||||
id = each.value.id
|
||||
|
||||
# Since private IP address is not assigned until the nics are created we need this
|
||||
depends_on = [exoscale_nic.worker_private_network_nic]
|
||||
}
|
||||
|
||||
resource "exoscale_network" "private_network" {
|
||||
zone = var.zone
|
||||
name = "${var.prefix}-network"
|
||||
|
||||
start_ip = cidrhost(var.private_network_cidr, 1)
|
||||
# cidr -1 = Broadcast address
|
||||
# cidr -2 = DHCP server address (exoscale specific)
|
||||
end_ip = cidrhost(var.private_network_cidr, -3)
|
||||
netmask = cidrnetmask(var.private_network_cidr)
|
||||
}
|
||||
|
||||
resource "exoscale_compute" "master" {
|
||||
for_each = {
|
||||
for name, machine in var.machines :
|
||||
name => machine
|
||||
if machine.node_type == "master"
|
||||
}
|
||||
|
||||
display_name = "${var.prefix}-${each.key}"
|
||||
template_id = data.exoscale_compute_template.os_image[each.key].id
|
||||
size = each.value.size
|
||||
disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size
|
||||
state = "Running"
|
||||
zone = var.zone
|
||||
security_groups = [exoscale_security_group.master_sg.name]
|
||||
|
||||
user_data = templatefile(
|
||||
"${path.module}/templates/cloud-init.tmpl",
|
||||
{
|
||||
eip_ip_address = exoscale_ipaddress.ingress_controller_lb.ip_address
|
||||
node_local_partition_size = each.value.boot_disk.node_local_partition_size
|
||||
ceph_partition_size = each.value.boot_disk.ceph_partition_size
|
||||
root_partition_size = each.value.boot_disk.root_partition_size
|
||||
node_type = "master"
|
||||
ssh_public_keys = var.ssh_public_keys
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
resource "exoscale_compute" "worker" {
|
||||
for_each = {
|
||||
for name, machine in var.machines :
|
||||
name => machine
|
||||
if machine.node_type == "worker"
|
||||
}
|
||||
|
||||
display_name = "${var.prefix}-${each.key}"
|
||||
template_id = data.exoscale_compute_template.os_image[each.key].id
|
||||
size = each.value.size
|
||||
disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size
|
||||
state = "Running"
|
||||
zone = var.zone
|
||||
security_groups = [exoscale_security_group.worker_sg.name]
|
||||
|
||||
user_data = templatefile(
|
||||
"${path.module}/templates/cloud-init.tmpl",
|
||||
{
|
||||
eip_ip_address = exoscale_ipaddress.ingress_controller_lb.ip_address
|
||||
node_local_partition_size = each.value.boot_disk.node_local_partition_size
|
||||
ceph_partition_size = each.value.boot_disk.ceph_partition_size
|
||||
root_partition_size = each.value.boot_disk.root_partition_size
|
||||
node_type = "worker"
|
||||
ssh_public_keys = var.ssh_public_keys
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
resource "exoscale_nic" "master_private_network_nic" {
|
||||
for_each = exoscale_compute.master
|
||||
|
||||
compute_id = each.value.id
|
||||
network_id = exoscale_network.private_network.id
|
||||
}
|
||||
|
||||
resource "exoscale_nic" "worker_private_network_nic" {
|
||||
for_each = exoscale_compute.worker
|
||||
|
||||
compute_id = each.value.id
|
||||
network_id = exoscale_network.private_network.id
|
||||
}
|
||||
|
||||
resource "exoscale_security_group" "master_sg" {
|
||||
name = "${var.prefix}-master-sg"
|
||||
description = "Security group for Kubernetes masters"
|
||||
}
|
||||
|
||||
resource "exoscale_security_group_rules" "master_sg_rules" {
|
||||
security_group_id = exoscale_security_group.master_sg.id
|
||||
|
||||
# SSH
|
||||
ingress {
|
||||
protocol = "TCP"
|
||||
cidr_list = var.ssh_whitelist
|
||||
ports = ["22"]
|
||||
}
|
||||
|
||||
# Kubernetes API
|
||||
ingress {
|
||||
protocol = "TCP"
|
||||
cidr_list = var.api_server_whitelist
|
||||
ports = ["6443"]
|
||||
}
|
||||
}
|
||||
|
||||
resource "exoscale_security_group" "worker_sg" {
|
||||
name = "${var.prefix}-worker-sg"
|
||||
description = "security group for kubernetes worker nodes"
|
||||
}
|
||||
|
||||
resource "exoscale_security_group_rules" "worker_sg_rules" {
|
||||
security_group_id = exoscale_security_group.worker_sg.id
|
||||
|
||||
# SSH
|
||||
ingress {
|
||||
protocol = "TCP"
|
||||
cidr_list = var.ssh_whitelist
|
||||
ports = ["22"]
|
||||
}
|
||||
|
||||
# HTTP(S)
|
||||
ingress {
|
||||
protocol = "TCP"
|
||||
cidr_list = ["0.0.0.0/0"]
|
||||
ports = ["80", "443"]
|
||||
}
|
||||
|
||||
# Kubernetes Nodeport
|
||||
ingress {
|
||||
protocol = "TCP"
|
||||
cidr_list = var.nodeport_whitelist
|
||||
ports = ["30000-32767"]
|
||||
}
|
||||
}
|
||||
|
||||
resource "exoscale_ipaddress" "ingress_controller_lb" {
|
||||
zone = var.zone
|
||||
healthcheck_mode = "http"
|
||||
healthcheck_port = 80
|
||||
healthcheck_path = "/healthz"
|
||||
healthcheck_interval = 10
|
||||
healthcheck_timeout = 2
|
||||
healthcheck_strikes_ok = 2
|
||||
healthcheck_strikes_fail = 3
|
||||
}
|
||||
|
||||
resource "exoscale_secondary_ipaddress" "ingress_controller_lb" {
|
||||
for_each = exoscale_compute.worker
|
||||
|
||||
compute_id = each.value.id
|
||||
ip_address = exoscale_ipaddress.ingress_controller_lb.ip_address
|
||||
}
|
||||
|
||||
resource "exoscale_ipaddress" "control_plane_lb" {
|
||||
zone = var.zone
|
||||
healthcheck_mode = "tcp"
|
||||
healthcheck_port = 6443
|
||||
healthcheck_interval = 10
|
||||
healthcheck_timeout = 2
|
||||
healthcheck_strikes_ok = 2
|
||||
healthcheck_strikes_fail = 3
|
||||
}
|
||||
|
||||
resource "exoscale_secondary_ipaddress" "control_plane_lb" {
|
||||
for_each = exoscale_compute.master
|
||||
|
||||
compute_id = each.value.id
|
||||
ip_address = exoscale_ipaddress.control_plane_lb.ip_address
|
||||
}
|
@ -0,0 +1,31 @@
|
||||
output "master_ip_addresses" {
|
||||
value = {
|
||||
for key, instance in exoscale_compute.master :
|
||||
instance.name => {
|
||||
"private_ip" = contains(keys(data.exoscale_compute.master_nodes), key) ? data.exoscale_compute.master_nodes[key].private_network_ip_addresses[0] : ""
|
||||
"public_ip" = exoscale_compute.master[key].ip_address
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
output "worker_ip_addresses" {
|
||||
value = {
|
||||
for key, instance in exoscale_compute.worker :
|
||||
instance.name => {
|
||||
"private_ip" = contains(keys(data.exoscale_compute.worker_nodes), key) ? data.exoscale_compute.worker_nodes[key].private_network_ip_addresses[0] : ""
|
||||
"public_ip" = exoscale_compute.worker[key].ip_address
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
output "cluster_private_network_cidr" {
|
||||
value = var.private_network_cidr
|
||||
}
|
||||
|
||||
output "ingress_controller_lb_ip_address" {
|
||||
value = exoscale_ipaddress.ingress_controller_lb.ip_address
|
||||
}
|
||||
|
||||
output "control_plane_lb_ip_address" {
|
||||
value = exoscale_ipaddress.control_plane_lb.ip_address
|
||||
}
|
@ -0,0 +1,52 @@
|
||||
#cloud-config
|
||||
%{ if ceph_partition_size > 0 || node_local_partition_size > 0}
|
||||
bootcmd:
|
||||
- [ cloud-init-per, once, move-second-header, sgdisk, --move-second-header, /dev/vda ]
|
||||
%{ if node_local_partition_size > 0 }
|
||||
# Create partition for node local storage
|
||||
- [ cloud-init-per, once, create-node-local-part, parted, --script, /dev/vda, 'mkpart extended ext4 ${root_partition_size}GB %{ if ceph_partition_size == 0 }-1%{ else }${root_partition_size + node_local_partition_size}GB%{ endif }' ]
|
||||
- [ cloud-init-per, once, create-fs-node-local-part, mkfs.ext4, /dev/vda2 ]
|
||||
%{ endif }
|
||||
%{ if ceph_partition_size > 0 }
|
||||
# Create partition for rook to use for ceph
|
||||
- [ cloud-init-per, once, create-ceph-part, parted, --script, /dev/vda, 'mkpart extended ${root_partition_size + node_local_partition_size}GB -1' ]
|
||||
%{ endif }
|
||||
%{ endif }
|
||||
|
||||
ssh_authorized_keys:
|
||||
%{ for ssh_public_key in ssh_public_keys ~}
|
||||
- ${ssh_public_key}
|
||||
%{ endfor ~}
|
||||
|
||||
write_files:
|
||||
- path: /etc/netplan/eth1.yaml
|
||||
content: |
|
||||
network:
|
||||
version: 2
|
||||
ethernets:
|
||||
eth1:
|
||||
dhcp4: true
|
||||
%{ if node_type == "worker" }
|
||||
# TODO: When a VM is seen as healthy and is added to the EIP loadbalancer
|
||||
# pool it no longer can send traffic back to itself via the EIP IP
|
||||
# address.
|
||||
# Remove this if it ever gets solved.
|
||||
- path: /etc/netplan/20-eip-fix.yaml
|
||||
content: |
|
||||
network:
|
||||
version: 2
|
||||
ethernets:
|
||||
"lo:0":
|
||||
match:
|
||||
name: lo
|
||||
dhcp4: false
|
||||
addresses:
|
||||
- ${eip_ip_address}/32
|
||||
%{ endif }
|
||||
runcmd:
|
||||
- netplan apply
|
||||
%{ if node_local_partition_size > 0 }
|
||||
- mkdir -p /mnt/disks/node-local-storage
|
||||
- chown nobody:nogroup /mnt/disks/node-local-storage
|
||||
- mount /dev/vda2 /mnt/disks/node-local-storage
|
||||
%{ endif }
|
@ -0,0 +1,42 @@
|
||||
variable "zone" {
|
||||
type = string
|
||||
# This is currently the only zone that is supposed to be supporting
|
||||
# so called "managed private networks".
|
||||
# See: https://www.exoscale.com/syslog/introducing-managed-private-networks
|
||||
default = "ch-gva-2"
|
||||
}
|
||||
|
||||
variable "prefix" {}
|
||||
|
||||
variable "machines" {
|
||||
type = map(object({
|
||||
node_type = string
|
||||
size = string
|
||||
boot_disk = object({
|
||||
image_name = string
|
||||
root_partition_size = number
|
||||
ceph_partition_size = number
|
||||
node_local_partition_size = number
|
||||
})
|
||||
}))
|
||||
}
|
||||
|
||||
variable "ssh_public_keys" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "ssh_whitelist" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "api_server_whitelist" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "nodeport_whitelist" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "private_network_cidr" {
|
||||
default = "172.0.10.0/24"
|
||||
}
|
@ -0,0 +1,9 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
exoscale = {
|
||||
source = "exoscale/exoscale"
|
||||
version = ">= 0.21"
|
||||
}
|
||||
}
|
||||
required_version = ">= 0.13"
|
||||
}
|
15
contrib/terraform/exoscale/output.tf
Normal file
15
contrib/terraform/exoscale/output.tf
Normal file
@ -0,0 +1,15 @@
|
||||
output "master_ips" {
|
||||
value = module.kubernetes.master_ip_addresses
|
||||
}
|
||||
|
||||
output "worker_ips" {
|
||||
value = module.kubernetes.worker_ip_addresses
|
||||
}
|
||||
|
||||
output "ingress_controller_lb_ip_address" {
|
||||
value = module.kubernetes.ingress_controller_lb_ip_address
|
||||
}
|
||||
|
||||
output "control_plane_lb_ip_address" {
|
||||
value = module.kubernetes.control_plane_lb_ip_address
|
||||
}
|
65
contrib/terraform/exoscale/sample-inventory/cluster.tfvars
Normal file
65
contrib/terraform/exoscale/sample-inventory/cluster.tfvars
Normal file
@ -0,0 +1,65 @@
|
||||
prefix = "default"
|
||||
zone = "ch-gva-2"
|
||||
|
||||
inventory_file = "inventory.ini"
|
||||
|
||||
ssh_public_keys = [
|
||||
# Put your public SSH key here
|
||||
"ssh-rsa I-did-not-read-the-docs",
|
||||
"ssh-rsa I-did-not-read-the-docs 2",
|
||||
]
|
||||
|
||||
machines = {
|
||||
"master-0" : {
|
||||
"node_type" : "master",
|
||||
"size" : "Small",
|
||||
"boot_disk" : {
|
||||
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
||||
"root_partition_size" : 50,
|
||||
"node_local_partition_size" : 0,
|
||||
"ceph_partition_size" : 0
|
||||
}
|
||||
},
|
||||
"worker-0" : {
|
||||
"node_type" : "worker",
|
||||
"size" : "Large",
|
||||
"boot_disk" : {
|
||||
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
||||
"root_partition_size" : 50,
|
||||
"node_local_partition_size" : 0,
|
||||
"ceph_partition_size" : 0
|
||||
}
|
||||
},
|
||||
"worker-1" : {
|
||||
"node_type" : "worker",
|
||||
"size" : "Large",
|
||||
"boot_disk" : {
|
||||
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
||||
"root_partition_size" : 50,
|
||||
"node_local_partition_size" : 0,
|
||||
"ceph_partition_size" : 0
|
||||
}
|
||||
},
|
||||
"worker-2" : {
|
||||
"node_type" : "worker",
|
||||
"size" : "Large",
|
||||
"boot_disk" : {
|
||||
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
||||
"root_partition_size" : 50,
|
||||
"node_local_partition_size" : 0,
|
||||
"ceph_partition_size" : 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
nodeport_whitelist = [
|
||||
"0.0.0.0/0"
|
||||
]
|
||||
|
||||
ssh_whitelist = [
|
||||
"0.0.0.0/0"
|
||||
]
|
||||
|
||||
api_server_whitelist = [
|
||||
"0.0.0.0/0"
|
||||
]
|
1
contrib/terraform/exoscale/sample-inventory/group_vars
Symbolic link
1
contrib/terraform/exoscale/sample-inventory/group_vars
Symbolic link
@ -0,0 +1 @@
|
||||
../../../../inventory/sample/group_vars
|
19
contrib/terraform/exoscale/templates/inventory.tpl
Normal file
19
contrib/terraform/exoscale/templates/inventory.tpl
Normal file
@ -0,0 +1,19 @@
|
||||
[all]
|
||||
${connection_strings_master}
|
||||
${connection_strings_worker}
|
||||
|
||||
[kube_control_plane]
|
||||
${list_master}
|
||||
|
||||
[kube_control_plane:vars]
|
||||
supplementary_addresses_in_ssl_keys = [ "${api_lb_ip_address}" ]
|
||||
|
||||
[etcd]
|
||||
${list_master}
|
||||
|
||||
[kube_node]
|
||||
${list_worker}
|
||||
|
||||
[k8s_cluster:children]
|
||||
kube_control_plane
|
||||
kube_node
|
46
contrib/terraform/exoscale/variables.tf
Normal file
46
contrib/terraform/exoscale/variables.tf
Normal file
@ -0,0 +1,46 @@
|
||||
variable "zone" {
|
||||
description = "The zone where to run the cluster"
|
||||
}
|
||||
|
||||
variable "prefix" {
|
||||
description = "Prefix for resource names"
|
||||
default = "default"
|
||||
}
|
||||
|
||||
variable "machines" {
|
||||
description = "Cluster machines"
|
||||
type = map(object({
|
||||
node_type = string
|
||||
size = string
|
||||
boot_disk = object({
|
||||
image_name = string
|
||||
root_partition_size = number
|
||||
ceph_partition_size = number
|
||||
node_local_partition_size = number
|
||||
})
|
||||
}))
|
||||
}
|
||||
|
||||
variable "ssh_public_keys" {
|
||||
description = "List of public SSH keys which are injected into the VMs."
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "ssh_whitelist" {
|
||||
description = "List of IP ranges (CIDR) to whitelist for ssh"
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "api_server_whitelist" {
|
||||
description = "List of IP ranges (CIDR) to whitelist for kubernetes api server"
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "nodeport_whitelist" {
|
||||
description = "List of IP ranges (CIDR) to whitelist for kubernetes nodeports"
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "inventory_file" {
|
||||
description = "Where to store the generated inventory file"
|
||||
}
|
15
contrib/terraform/exoscale/versions.tf
Normal file
15
contrib/terraform/exoscale/versions.tf
Normal file
@ -0,0 +1,15 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
exoscale = {
|
||||
source = "exoscale/exoscale"
|
||||
version = ">= 0.21"
|
||||
}
|
||||
null = {
|
||||
source = "hashicorp/null"
|
||||
}
|
||||
template = {
|
||||
source = "hashicorp/template"
|
||||
}
|
||||
}
|
||||
required_version = ">= 0.13"
|
||||
}
|
90
contrib/terraform/gcp/README.md
Normal file
90
contrib/terraform/gcp/README.md
Normal file
@ -0,0 +1,90 @@
|
||||
# Kubernetes on GCP with Terraform
|
||||
|
||||
Provision a Kubernetes cluster on GCP using Terraform and Kubespray
|
||||
|
||||
## Overview
|
||||
|
||||
The setup looks like following
|
||||
|
||||
```text
|
||||
Kubernetes cluster
|
||||
+-----------------------+
|
||||
+---------------+ | +--------------+ |
|
||||
| | | | +--------------+ |
|
||||
| API server LB +---------> | | | |
|
||||
| | | | | Master/etcd | |
|
||||
+---------------+ | | | node(s) | |
|
||||
| +-+ | |
|
||||
| +--------------+ |
|
||||
| ^ |
|
||||
| | |
|
||||
| v |
|
||||
+---------------+ | +--------------+ |
|
||||
| | | | +--------------+ |
|
||||
| Ingress LB +---------> | | | |
|
||||
| | | | | Worker | |
|
||||
+---------------+ | | | node(s) | |
|
||||
| +-+ | |
|
||||
| +--------------+ |
|
||||
+-----------------------+
|
||||
```
|
||||
|
||||
## Requirements
|
||||
|
||||
* Terraform 0.12.0 or newer
|
||||
|
||||
## Quickstart
|
||||
|
||||
To get a cluster up and running you'll need a JSON keyfile.
|
||||
Set the path to the file in the `tfvars.json` file and run the following:
|
||||
|
||||
```bash
|
||||
terraform apply -var-file tfvars.json -state dev-cluster.tfstate -var gcp_project_id=<ID of your GCP project> -var keyfile_location=<location of the json keyfile>
|
||||
```
|
||||
|
||||
To generate kubespray inventory based on the terraform state file you can run the following:
|
||||
|
||||
```bash
|
||||
./generate-inventory.sh dev-cluster.tfstate > inventory.ini
|
||||
```
|
||||
|
||||
You should now have a inventory file named `inventory.ini` that you can use with kubespray, e.g.
|
||||
|
||||
```bash
|
||||
ansible-playbook -i contrib/terraform/gcs/inventory.ini cluster.yml -b -v
|
||||
```
|
||||
|
||||
## Variables
|
||||
|
||||
### Required
|
||||
|
||||
* `keyfile_location`: Location to the keyfile to use as credentials for the google terraform provider
|
||||
* `gcp_project_id`: ID of the GCP project to deploy the cluster in
|
||||
* `ssh_pub_key`: Path to public ssh key to use for all machines
|
||||
* `region`: The region where to run the cluster
|
||||
* `machines`: Machines to provision. Key of this object will be used as the name of the machine
|
||||
* `node_type`: The role of this node *(master|worker)*
|
||||
* `size`: The size to use
|
||||
* `zone`: The zone the machine should run in
|
||||
* `additional_disks`: Extra disks to add to the machine. Key of this object will be used as the disk name
|
||||
* `size`: Size of the disk (in GB)
|
||||
* `boot_disk`: The boot disk to use
|
||||
* `image_name`: Name of the image
|
||||
* `size`: Size of the boot disk (in GB)
|
||||
* `ssh_whitelist`: List of IP ranges (CIDR) that will be allowed to ssh to the nodes
|
||||
* `api_server_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the API server
|
||||
* `nodeport_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the kubernetes nodes on port 30000-32767 (kubernetes nodeports)
|
||||
|
||||
### Optional
|
||||
|
||||
* `prefix`: Prefix to use for all resources, required to be unique for all clusters in the same project *(Defaults to `default`)*
|
||||
* `master_sa_email`: Service account email to use for the master nodes *(Defaults to `""`, auto generate one)*
|
||||
* `master_sa_scopes`: Service account email to use for the master nodes *(Defaults to `["https://www.googleapis.com/auth/cloud-platform"]`)*
|
||||
* `worker_sa_email`: Service account email to use for the worker nodes *(Defaults to `""`, auto generate one)*
|
||||
* `worker_sa_scopes`: Service account email to use for the worker nodes *(Defaults to `["https://www.googleapis.com/auth/cloud-platform"]`)*
|
||||
|
||||
An example variables file can be found `tfvars.json`
|
||||
|
||||
## Known limitations
|
||||
|
||||
This solution does not provide a solution to use a bastion host. Thus all the nodes must expose a public IP for kubespray to work.
|
76
contrib/terraform/gcp/generate-inventory.sh
Executable file
76
contrib/terraform/gcp/generate-inventory.sh
Executable file
@ -0,0 +1,76 @@
|
||||
#!/bin/bash
|
||||
|
||||
#
|
||||
# Generates a inventory file based on the terraform output.
|
||||
# After provisioning a cluster, simply run this command and supply the terraform state file
|
||||
# Default state file is terraform.tfstate
|
||||
#
|
||||
|
||||
set -e
|
||||
|
||||
usage () {
|
||||
echo "Usage: $0 <state file>" >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
if [[ $# -ne 1 ]]; then
|
||||
usage
|
||||
fi
|
||||
|
||||
TF_STATE_FILE=${1}
|
||||
|
||||
if [[ ! -f "${TF_STATE_FILE}" ]]; then
|
||||
echo "ERROR: state file ${TF_STATE_FILE} doesn't exist" >&2
|
||||
usage
|
||||
fi
|
||||
|
||||
TF_OUT=$(terraform output -state "${TF_STATE_FILE}" -json)
|
||||
|
||||
MASTERS=$(jq -r '.master_ips.value | to_entries[]' <(echo "${TF_OUT}"))
|
||||
WORKERS=$(jq -r '.worker_ips.value | to_entries[]' <(echo "${TF_OUT}"))
|
||||
mapfile -t MASTER_NAMES < <(jq -r '.key' <(echo "${MASTERS}"))
|
||||
mapfile -t WORKER_NAMES < <(jq -r '.key' <(echo "${WORKERS}"))
|
||||
|
||||
API_LB=$(jq -r '.control_plane_lb_ip_address.value' <(echo "${TF_OUT}"))
|
||||
|
||||
# Generate master hosts
|
||||
i=1
|
||||
for name in "${MASTER_NAMES[@]}"; do
|
||||
private_ip=$(jq -r '. | select( .key=='"\"${name}\""' ) | .value.private_ip' <(echo "${MASTERS}"))
|
||||
public_ip=$(jq -r '. | select( .key=='"\"${name}\""' ) | .value.public_ip' <(echo "${MASTERS}"))
|
||||
echo "${name} ansible_user=ubuntu ansible_host=${public_ip} ip=${private_ip} etcd_member_name=etcd${i}"
|
||||
i=$(( i + 1 ))
|
||||
done
|
||||
|
||||
# Generate worker hosts
|
||||
for name in "${WORKER_NAMES[@]}"; do
|
||||
private_ip=$(jq -r '. | select( .key=='"\"${name}\""' ) | .value.private_ip' <(echo "${WORKERS}"))
|
||||
public_ip=$(jq -r '. | select( .key=='"\"${name}\""' ) | .value.public_ip' <(echo "${WORKERS}"))
|
||||
echo "${name} ansible_user=ubuntu ansible_host=${public_ip} ip=${private_ip}"
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo "[kube_control_plane]"
|
||||
for name in "${MASTER_NAMES[@]}"; do
|
||||
echo "${name}"
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo "[kube_control_plane:vars]"
|
||||
echo "supplementary_addresses_in_ssl_keys = [ '${API_LB}' ]" # Add LB address to API server certificate
|
||||
echo ""
|
||||
echo "[etcd]"
|
||||
for name in "${MASTER_NAMES[@]}"; do
|
||||
echo "${name}"
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo "[kube_node]"
|
||||
for name in "${WORKER_NAMES[@]}"; do
|
||||
echo "${name}"
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo "[k8s_cluster:children]"
|
||||
echo "kube_control_plane"
|
||||
echo "kube_node"
|
24
contrib/terraform/gcp/main.tf
Normal file
24
contrib/terraform/gcp/main.tf
Normal file
@ -0,0 +1,24 @@
|
||||
provider "google" {
|
||||
credentials = file(var.keyfile_location)
|
||||
region = var.region
|
||||
project = var.gcp_project_id
|
||||
version = "~> 3.48"
|
||||
}
|
||||
|
||||
module "kubernetes" {
|
||||
source = "./modules/kubernetes-cluster"
|
||||
region = var.region
|
||||
prefix = var.prefix
|
||||
|
||||
machines = var.machines
|
||||
ssh_pub_key = var.ssh_pub_key
|
||||
|
||||
master_sa_email = var.master_sa_email
|
||||
master_sa_scopes = var.master_sa_scopes
|
||||
worker_sa_email = var.worker_sa_email
|
||||
worker_sa_scopes = var.worker_sa_scopes
|
||||
|
||||
ssh_whitelist = var.ssh_whitelist
|
||||
api_server_whitelist = var.api_server_whitelist
|
||||
nodeport_whitelist = var.nodeport_whitelist
|
||||
}
|
360
contrib/terraform/gcp/modules/kubernetes-cluster/main.tf
Normal file
360
contrib/terraform/gcp/modules/kubernetes-cluster/main.tf
Normal file
@ -0,0 +1,360 @@
|
||||
#################################################
|
||||
##
|
||||
## General
|
||||
##
|
||||
|
||||
resource "google_compute_network" "main" {
|
||||
name = "${var.prefix}-network"
|
||||
}
|
||||
|
||||
resource "google_compute_subnetwork" "main" {
|
||||
name = "${var.prefix}-subnet"
|
||||
network = google_compute_network.main.name
|
||||
ip_cidr_range = var.private_network_cidr
|
||||
region = var.region
|
||||
}
|
||||
|
||||
resource "google_compute_firewall" "deny_all" {
|
||||
name = "${var.prefix}-default-firewall"
|
||||
network = google_compute_network.main.name
|
||||
|
||||
priority = 1000
|
||||
|
||||
deny {
|
||||
protocol = "all"
|
||||
}
|
||||
}
|
||||
|
||||
resource "google_compute_firewall" "allow_internal" {
|
||||
name = "${var.prefix}-internal-firewall"
|
||||
network = google_compute_network.main.name
|
||||
|
||||
priority = 500
|
||||
|
||||
source_ranges = [var.private_network_cidr]
|
||||
|
||||
allow {
|
||||
protocol = "all"
|
||||
}
|
||||
}
|
||||
|
||||
resource "google_compute_firewall" "ssh" {
|
||||
name = "${var.prefix}-ssh-firewall"
|
||||
network = google_compute_network.main.name
|
||||
|
||||
priority = 100
|
||||
|
||||
source_ranges = var.ssh_whitelist
|
||||
|
||||
allow {
|
||||
protocol = "tcp"
|
||||
ports = ["22"]
|
||||
}
|
||||
}
|
||||
|
||||
resource "google_compute_firewall" "api_server" {
|
||||
name = "${var.prefix}-api-server-firewall"
|
||||
network = google_compute_network.main.name
|
||||
|
||||
priority = 100
|
||||
|
||||
source_ranges = var.api_server_whitelist
|
||||
|
||||
allow {
|
||||
protocol = "tcp"
|
||||
ports = ["6443"]
|
||||
}
|
||||
}
|
||||
|
||||
resource "google_compute_firewall" "nodeport" {
|
||||
name = "${var.prefix}-nodeport-firewall"
|
||||
network = google_compute_network.main.name
|
||||
|
||||
priority = 100
|
||||
|
||||
source_ranges = var.nodeport_whitelist
|
||||
|
||||
allow {
|
||||
protocol = "tcp"
|
||||
ports = ["30000-32767"]
|
||||
}
|
||||
}
|
||||
|
||||
resource "google_compute_firewall" "ingress_http" {
|
||||
name = "${var.prefix}-http-ingress-firewall"
|
||||
network = google_compute_network.main.name
|
||||
|
||||
priority = 100
|
||||
|
||||
allow {
|
||||
protocol = "tcp"
|
||||
ports = ["80"]
|
||||
}
|
||||
}
|
||||
|
||||
resource "google_compute_firewall" "ingress_https" {
|
||||
name = "${var.prefix}-https-ingress-firewall"
|
||||
network = google_compute_network.main.name
|
||||
|
||||
priority = 100
|
||||
|
||||
allow {
|
||||
protocol = "tcp"
|
||||
ports = ["443"]
|
||||
}
|
||||
}
|
||||
|
||||
#################################################
|
||||
##
|
||||
## Local variables
|
||||
##
|
||||
|
||||
locals {
|
||||
master_target_list = [
|
||||
for name, machine in google_compute_instance.master :
|
||||
"${machine.zone}/${machine.name}"
|
||||
]
|
||||
|
||||
worker_target_list = [
|
||||
for name, machine in google_compute_instance.worker :
|
||||
"${machine.zone}/${machine.name}"
|
||||
]
|
||||
|
||||
master_disks = flatten([
|
||||
for machine_name, machine in var.machines : [
|
||||
for disk_name, disk in machine.additional_disks : {
|
||||
"${machine_name}-${disk_name}" = {
|
||||
"machine_name": machine_name,
|
||||
"machine": machine,
|
||||
"disk_size": disk.size,
|
||||
"disk_name": disk_name
|
||||
}
|
||||
}
|
||||
]
|
||||
if machine.node_type == "master"
|
||||
])
|
||||
|
||||
worker_disks = flatten([
|
||||
for machine_name, machine in var.machines : [
|
||||
for disk_name, disk in machine.additional_disks : {
|
||||
"${machine_name}-${disk_name}" = {
|
||||
"machine_name": machine_name,
|
||||
"machine": machine,
|
||||
"disk_size": disk.size,
|
||||
"disk_name": disk_name
|
||||
}
|
||||
}
|
||||
]
|
||||
if machine.node_type == "worker"
|
||||
])
|
||||
}
|
||||
|
||||
#################################################
|
||||
##
|
||||
## Master
|
||||
##
|
||||
|
||||
resource "google_compute_address" "master" {
|
||||
for_each = {
|
||||
for name, machine in var.machines :
|
||||
name => machine
|
||||
if machine.node_type == "master"
|
||||
}
|
||||
|
||||
name = "${var.prefix}-${each.key}-pip"
|
||||
address_type = "EXTERNAL"
|
||||
region = var.region
|
||||
}
|
||||
|
||||
resource "google_compute_disk" "master" {
|
||||
for_each = {
|
||||
for item in local.master_disks :
|
||||
keys(item)[0] => values(item)[0]
|
||||
}
|
||||
|
||||
name = "${var.prefix}-${each.key}"
|
||||
type = "pd-ssd"
|
||||
zone = each.value.machine.zone
|
||||
size = each.value.disk_size
|
||||
|
||||
physical_block_size_bytes = 4096
|
||||
}
|
||||
|
||||
resource "google_compute_attached_disk" "master" {
|
||||
for_each = {
|
||||
for item in local.master_disks :
|
||||
keys(item)[0] => values(item)[0]
|
||||
}
|
||||
|
||||
disk = google_compute_disk.master[each.key].id
|
||||
instance = google_compute_instance.master[each.value.machine_name].id
|
||||
}
|
||||
|
||||
resource "google_compute_instance" "master" {
|
||||
for_each = {
|
||||
for name, machine in var.machines :
|
||||
name => machine
|
||||
if machine.node_type == "master"
|
||||
}
|
||||
|
||||
name = "${var.prefix}-${each.key}"
|
||||
machine_type = each.value.size
|
||||
zone = each.value.zone
|
||||
|
||||
tags = ["master"]
|
||||
|
||||
boot_disk {
|
||||
initialize_params {
|
||||
image = each.value.boot_disk.image_name
|
||||
size = each.value.boot_disk.size
|
||||
}
|
||||
}
|
||||
|
||||
network_interface {
|
||||
subnetwork = google_compute_subnetwork.main.name
|
||||
|
||||
access_config {
|
||||
nat_ip = google_compute_address.master[each.key].address
|
||||
}
|
||||
}
|
||||
|
||||
metadata = {
|
||||
ssh-keys = "ubuntu:${trimspace(file(pathexpand(var.ssh_pub_key)))}"
|
||||
}
|
||||
|
||||
service_account {
|
||||
email = var.master_sa_email
|
||||
scopes = var.master_sa_scopes
|
||||
}
|
||||
|
||||
# Since we use google_compute_attached_disk we need to ignore this
|
||||
lifecycle {
|
||||
ignore_changes = ["attached_disk"]
|
||||
}
|
||||
}
|
||||
|
||||
resource "google_compute_forwarding_rule" "master_lb" {
|
||||
name = "${var.prefix}-master-lb-forward-rule"
|
||||
|
||||
port_range = "6443"
|
||||
|
||||
target = google_compute_target_pool.master_lb.id
|
||||
}
|
||||
|
||||
resource "google_compute_target_pool" "master_lb" {
|
||||
name = "${var.prefix}-master-lb-pool"
|
||||
instances = local.master_target_list
|
||||
}
|
||||
|
||||
#################################################
|
||||
##
|
||||
## Worker
|
||||
##
|
||||
|
||||
resource "google_compute_disk" "worker" {
|
||||
for_each = {
|
||||
for item in local.worker_disks :
|
||||
keys(item)[0] => values(item)[0]
|
||||
}
|
||||
|
||||
name = "${var.prefix}-${each.key}"
|
||||
type = "pd-ssd"
|
||||
zone = each.value.machine.zone
|
||||
size = each.value.disk_size
|
||||
|
||||
physical_block_size_bytes = 4096
|
||||
}
|
||||
|
||||
resource "google_compute_attached_disk" "worker" {
|
||||
for_each = {
|
||||
for item in local.worker_disks :
|
||||
keys(item)[0] => values(item)[0]
|
||||
}
|
||||
|
||||
disk = google_compute_disk.worker[each.key].id
|
||||
instance = google_compute_instance.worker[each.value.machine_name].id
|
||||
}
|
||||
|
||||
resource "google_compute_address" "worker" {
|
||||
for_each = {
|
||||
for name, machine in var.machines :
|
||||
name => machine
|
||||
if machine.node_type == "worker"
|
||||
}
|
||||
|
||||
name = "${var.prefix}-${each.key}-pip"
|
||||
address_type = "EXTERNAL"
|
||||
region = var.region
|
||||
}
|
||||
|
||||
resource "google_compute_instance" "worker" {
|
||||
for_each = {
|
||||
for name, machine in var.machines :
|
||||
name => machine
|
||||
if machine.node_type == "worker"
|
||||
}
|
||||
|
||||
name = "${var.prefix}-${each.key}"
|
||||
machine_type = each.value.size
|
||||
zone = each.value.zone
|
||||
|
||||
tags = ["worker"]
|
||||
|
||||
boot_disk {
|
||||
initialize_params {
|
||||
image = each.value.boot_disk.image_name
|
||||
size = each.value.boot_disk.size
|
||||
}
|
||||
}
|
||||
|
||||
network_interface {
|
||||
subnetwork = google_compute_subnetwork.main.name
|
||||
|
||||
access_config {
|
||||
nat_ip = google_compute_address.worker[each.key].address
|
||||
}
|
||||
}
|
||||
|
||||
metadata = {
|
||||
ssh-keys = "ubuntu:${trimspace(file(pathexpand(var.ssh_pub_key)))}"
|
||||
}
|
||||
|
||||
service_account {
|
||||
email = var.worker_sa_email
|
||||
scopes = var.worker_sa_scopes
|
||||
}
|
||||
|
||||
# Since we use google_compute_attached_disk we need to ignore this
|
||||
lifecycle {
|
||||
ignore_changes = ["attached_disk"]
|
||||
}
|
||||
}
|
||||
|
||||
resource "google_compute_address" "worker_lb" {
|
||||
name = "${var.prefix}-worker-lb-address"
|
||||
address_type = "EXTERNAL"
|
||||
region = var.region
|
||||
}
|
||||
|
||||
resource "google_compute_forwarding_rule" "worker_http_lb" {
|
||||
name = "${var.prefix}-worker-http-lb-forward-rule"
|
||||
|
||||
ip_address = google_compute_address.worker_lb.address
|
||||
port_range = "80"
|
||||
|
||||
target = google_compute_target_pool.worker_lb.id
|
||||
}
|
||||
|
||||
resource "google_compute_forwarding_rule" "worker_https_lb" {
|
||||
name = "${var.prefix}-worker-https-lb-forward-rule"
|
||||
|
||||
ip_address = google_compute_address.worker_lb.address
|
||||
port_range = "443"
|
||||
|
||||
target = google_compute_target_pool.worker_lb.id
|
||||
}
|
||||
|
||||
resource "google_compute_target_pool" "worker_lb" {
|
||||
name = "${var.prefix}-worker-lb-pool"
|
||||
instances = local.worker_target_list
|
||||
}
|
27
contrib/terraform/gcp/modules/kubernetes-cluster/output.tf
Normal file
27
contrib/terraform/gcp/modules/kubernetes-cluster/output.tf
Normal file
@ -0,0 +1,27 @@
|
||||
output "master_ip_addresses" {
|
||||
value = {
|
||||
for key, instance in google_compute_instance.master :
|
||||
instance.name => {
|
||||
"private_ip" = instance.network_interface.0.network_ip
|
||||
"public_ip" = instance.network_interface.0.access_config.0.nat_ip
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
output "worker_ip_addresses" {
|
||||
value = {
|
||||
for key, instance in google_compute_instance.worker :
|
||||
instance.name => {
|
||||
"private_ip" = instance.network_interface.0.network_ip
|
||||
"public_ip" = instance.network_interface.0.access_config.0.nat_ip
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
output "ingress_controller_lb_ip_address" {
|
||||
value = google_compute_address.worker_lb.address
|
||||
}
|
||||
|
||||
output "control_plane_lb_ip_address" {
|
||||
value = google_compute_forwarding_rule.master_lb.ip_address
|
||||
}
|
@ -0,0 +1,54 @@
|
||||
variable "region" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "prefix" {}
|
||||
|
||||
variable "machines" {
|
||||
type = map(object({
|
||||
node_type = string
|
||||
size = string
|
||||
zone = string
|
||||
additional_disks = map(object({
|
||||
size = number
|
||||
}))
|
||||
boot_disk = object({
|
||||
image_name = string
|
||||
size = number
|
||||
})
|
||||
}))
|
||||
}
|
||||
|
||||
variable "master_sa_email" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "master_sa_scopes" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "worker_sa_email" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "worker_sa_scopes" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "ssh_pub_key" {}
|
||||
|
||||
variable "ssh_whitelist" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "api_server_whitelist" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "nodeport_whitelist" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "private_network_cidr" {
|
||||
default = "10.0.10.0/24"
|
||||
}
|
15
contrib/terraform/gcp/output.tf
Normal file
15
contrib/terraform/gcp/output.tf
Normal file
@ -0,0 +1,15 @@
|
||||
output "master_ips" {
|
||||
value = module.kubernetes.master_ip_addresses
|
||||
}
|
||||
|
||||
output "worker_ips" {
|
||||
value = module.kubernetes.worker_ip_addresses
|
||||
}
|
||||
|
||||
output "ingress_controller_lb_ip_address" {
|
||||
value = module.kubernetes.ingress_controller_lb_ip_address
|
||||
}
|
||||
|
||||
output "control_plane_lb_ip_address" {
|
||||
value = module.kubernetes.control_plane_lb_ip_address
|
||||
}
|
60
contrib/terraform/gcp/tfvars.json
Normal file
60
contrib/terraform/gcp/tfvars.json
Normal file
@ -0,0 +1,60 @@
|
||||
{
|
||||
"gcp_project_id": "GCP_PROJECT_ID",
|
||||
"region": "us-central1",
|
||||
"ssh_pub_key": "~/.ssh/id_rsa.pub",
|
||||
|
||||
"keyfile_location": "service-account.json",
|
||||
|
||||
"prefix": "development",
|
||||
|
||||
"ssh_whitelist": [
|
||||
"1.2.3.4/32"
|
||||
],
|
||||
"api_server_whitelist": [
|
||||
"1.2.3.4/32"
|
||||
],
|
||||
"nodeport_whitelist": [
|
||||
"1.2.3.4/32"
|
||||
],
|
||||
|
||||
"machines": {
|
||||
"master-0": {
|
||||
"node_type": "master",
|
||||
"size": "n1-standard-2",
|
||||
"zone": "us-central1-a",
|
||||
"additional_disks": {},
|
||||
"boot_disk": {
|
||||
"image_name": "ubuntu-os-cloud/ubuntu-1804-bionic-v20201116",
|
||||
"size": 50
|
||||
}
|
||||
},
|
||||
"worker-0": {
|
||||
"node_type": "worker",
|
||||
"size": "n1-standard-8",
|
||||
"zone": "us-central1-a",
|
||||
"additional_disks": {
|
||||
"extra-disk-1": {
|
||||
"size": 100
|
||||
}
|
||||
},
|
||||
"boot_disk": {
|
||||
"image_name": "ubuntu-os-cloud/ubuntu-1804-bionic-v20201116",
|
||||
"size": 50
|
||||
}
|
||||
},
|
||||
"worker-1": {
|
||||
"node_type": "worker",
|
||||
"size": "n1-standard-8",
|
||||
"zone": "us-central1-a",
|
||||
"additional_disks": {
|
||||
"extra-disk-1": {
|
||||
"size": 100
|
||||
}
|
||||
},
|
||||
"boot_disk": {
|
||||
"image_name": "ubuntu-os-cloud/ubuntu-1804-bionic-v20201116",
|
||||
"size": 50
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
72
contrib/terraform/gcp/variables.tf
Normal file
72
contrib/terraform/gcp/variables.tf
Normal file
@ -0,0 +1,72 @@
|
||||
variable keyfile_location {
|
||||
description = "Location of the json keyfile to use with the google provider"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable region {
|
||||
description = "Region of all resources"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable gcp_project_id {
|
||||
description = "ID of the project"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable prefix {
|
||||
description = "Prefix for resource names"
|
||||
default = "default"
|
||||
}
|
||||
|
||||
variable machines {
|
||||
description = "Cluster machines"
|
||||
type = map(object({
|
||||
node_type = string
|
||||
size = string
|
||||
zone = string
|
||||
additional_disks = map(object({
|
||||
size = number
|
||||
}))
|
||||
boot_disk = object({
|
||||
image_name = string
|
||||
size = number
|
||||
})
|
||||
}))
|
||||
}
|
||||
|
||||
variable "master_sa_email" {
|
||||
type = string
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "master_sa_scopes" {
|
||||
type = list(string)
|
||||
default = ["https://www.googleapis.com/auth/cloud-platform"]
|
||||
}
|
||||
|
||||
variable "worker_sa_email" {
|
||||
type = string
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "worker_sa_scopes" {
|
||||
type = list(string)
|
||||
default = ["https://www.googleapis.com/auth/cloud-platform"]
|
||||
}
|
||||
|
||||
variable ssh_pub_key {
|
||||
description = "Path to public SSH key file which is injected into the VMs."
|
||||
type = string
|
||||
}
|
||||
|
||||
variable ssh_whitelist {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable api_server_whitelist {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable nodeport_whitelist {
|
||||
type = list(string)
|
||||
}
|
@ -9,6 +9,7 @@ This will install a Kubernetes cluster on an OpenStack Cloud. It should work on
|
||||
most modern installs of OpenStack that support the basic services.
|
||||
|
||||
### Known compatible public clouds
|
||||
|
||||
- [Auro](https://auro.io/)
|
||||
- [Betacloud](https://www.betacloud.io/)
|
||||
- [CityCloud](https://www.citycloud.com/)
|
||||
@ -23,8 +24,8 @@ most modern installs of OpenStack that support the basic services.
|
||||
- [VexxHost](https://vexxhost.com/)
|
||||
- [Zetta](https://www.zetta.io/)
|
||||
|
||||
|
||||
## Approach
|
||||
|
||||
The terraform configuration inspects variables found in
|
||||
[variables.tf](variables.tf) to create resources in your OpenStack cluster.
|
||||
There is a [python script](../terraform.py) that reads the generated`.tfstate`
|
||||
@ -32,6 +33,7 @@ file to generate a dynamic inventory that is consumed by the main ansible script
|
||||
to actually install kubernetes and stand up the cluster.
|
||||
|
||||
### Networking
|
||||
|
||||
The configuration includes creating a private subnet with a router to the
|
||||
external net. It will allocate floating IPs from a pool and assign them to the
|
||||
hosts where that makes sense. You have the option of creating bastion hosts
|
||||
@ -39,19 +41,23 @@ inside the private subnet to access the nodes there. Alternatively, a node with
|
||||
a floating IP can be used as a jump host to nodes without.
|
||||
|
||||
#### Using an existing router
|
||||
|
||||
It is possible to use an existing router instead of creating one. To use an
|
||||
existing router set the router\_id variable to the uuid of the router you wish
|
||||
to use.
|
||||
|
||||
For example:
|
||||
```
|
||||
|
||||
```ShellSession
|
||||
router_id = "00c542e7-6f46-4535-ae95-984c7f0391a3"
|
||||
```
|
||||
|
||||
### Kubernetes Nodes
|
||||
|
||||
You can create many different kubernetes topologies by setting the number of
|
||||
different classes of hosts. For each class there are options for allocating
|
||||
floating IP addresses or not.
|
||||
|
||||
- Master nodes with etcd
|
||||
- Master nodes without etcd
|
||||
- Standalone etcd hosts
|
||||
@ -64,10 +70,12 @@ master nodes with etcd replicas. As an example, if you have three master nodes w
|
||||
etcd replicas and three standalone etcd nodes, the script will fail since there are
|
||||
now six total etcd replicas.
|
||||
|
||||
### GlusterFS
|
||||
### GlusterFS shared file system
|
||||
|
||||
The Terraform configuration supports provisioning of an optional GlusterFS
|
||||
shared file system based on a separate set of VMs. To enable this, you need to
|
||||
specify:
|
||||
|
||||
- the number of Gluster hosts (minimum 2)
|
||||
- Size of the non-ephemeral volumes to be attached to store the GlusterFS bricks
|
||||
- Other properties related to provisioning the hosts
|
||||
@ -87,7 +95,9 @@ binaries available on hyperkube v1.4.3_coreos.0 or higher.
|
||||
- you have a pair of keys generated that can be used to secure the new hosts
|
||||
|
||||
## Module Architecture
|
||||
|
||||
The configuration is divided into three modules:
|
||||
|
||||
- Network
|
||||
- IPs
|
||||
- Compute
|
||||
@ -100,12 +110,13 @@ to be updated.
|
||||
You can force your existing IPs by modifying the compute variables in
|
||||
`kubespray.tf` as follows:
|
||||
|
||||
```
|
||||
```ini
|
||||
k8s_master_fips = ["151.101.129.67"]
|
||||
k8s_node_fips = ["151.101.129.68"]
|
||||
```
|
||||
|
||||
## Terraform
|
||||
|
||||
Terraform will be used to provision all of the OpenStack resources with base software as appropriate.
|
||||
|
||||
### Configuration
|
||||
@ -115,10 +126,10 @@ Terraform will be used to provision all of the OpenStack resources with base sof
|
||||
Create an inventory directory for your cluster by copying the existing sample and linking the `hosts` script (used to build the inventory based on Terraform state):
|
||||
|
||||
```ShellSession
|
||||
$ cp -LRp contrib/terraform/openstack/sample-inventory inventory/$CLUSTER
|
||||
$ cd inventory/$CLUSTER
|
||||
$ ln -s ../../contrib/terraform/openstack/hosts
|
||||
$ ln -s ../../contrib
|
||||
cp -LRp contrib/terraform/openstack/sample-inventory inventory/$CLUSTER
|
||||
cd inventory/$CLUSTER
|
||||
ln -s ../../contrib/terraform/openstack/hosts
|
||||
ln -s ../../contrib
|
||||
```
|
||||
|
||||
This will be the base for subsequent Terraform commands.
|
||||
@ -138,13 +149,13 @@ please read the [OpenStack provider documentation](https://www.terraform.io/docs
|
||||
|
||||
The recommended authentication method is to describe credentials in a YAML file `clouds.yaml` that can be stored in:
|
||||
|
||||
* the current directory
|
||||
* `~/.config/openstack`
|
||||
* `/etc/openstack`
|
||||
- the current directory
|
||||
- `~/.config/openstack`
|
||||
- `/etc/openstack`
|
||||
|
||||
`clouds.yaml`:
|
||||
|
||||
```
|
||||
```yaml
|
||||
clouds:
|
||||
mycloud:
|
||||
auth:
|
||||
@ -162,7 +173,7 @@ clouds:
|
||||
If you have multiple clouds defined in your `clouds.yaml` file you can choose
|
||||
the one you want to use with the environment variable `OS_CLOUD`:
|
||||
|
||||
```
|
||||
```ShellSession
|
||||
export OS_CLOUD=mycloud
|
||||
```
|
||||
|
||||
@ -174,7 +185,7 @@ from Horizon under *Project* -> *Compute* -> *Access & Security* -> *API Access*
|
||||
|
||||
With identity v2:
|
||||
|
||||
```
|
||||
```ShellSession
|
||||
source openrc
|
||||
|
||||
env | grep OS
|
||||
@ -191,7 +202,7 @@ OS_IDENTITY_API_VERSION=2
|
||||
|
||||
With identity v3:
|
||||
|
||||
```
|
||||
```ShellSession
|
||||
source openrc
|
||||
|
||||
env | grep OS
|
||||
@ -208,24 +219,24 @@ OS_IDENTITY_API_VERSION=3
|
||||
OS_USER_DOMAIN_NAME=Default
|
||||
```
|
||||
|
||||
Terraform does not support a mix of DomainName and DomainID, choose one or the
|
||||
other:
|
||||
Terraform does not support a mix of DomainName and DomainID, choose one or the other:
|
||||
|
||||
```
|
||||
* provider.openstack: You must provide exactly one of DomainID or DomainName to authenticate by Username
|
||||
```
|
||||
- provider.openstack: You must provide exactly one of DomainID or DomainName to authenticate by Username
|
||||
|
||||
```
|
||||
```ShellSession
|
||||
unset OS_USER_DOMAIN_NAME
|
||||
export OS_USER_DOMAIN_ID=default
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
```ShellSession
|
||||
unset OS_PROJECT_DOMAIN_ID
|
||||
set OS_PROJECT_DOMAIN_NAME=Default
|
||||
```
|
||||
|
||||
#### Cluster variables
|
||||
|
||||
The construction of the cluster is driven by values found in
|
||||
[variables.tf](variables.tf).
|
||||
|
||||
@ -239,6 +250,7 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
|
||||
|`network_dns_domain` | (Optional) The dns_domain for the internal network that will be generated |
|
||||
|`dns_nameservers`| An array of DNS name server names to be used by hosts in the internal subnet. |
|
||||
|`floatingip_pool` | Name of the pool from which floating IPs will be allocated |
|
||||
|`k8s_master_fips` | A list of floating IPs that you have already pre-allocated; they will be attached to master nodes instead of creating new random floating IPs. |
|
||||
|`external_net` | UUID of the external network that will be routed to |
|
||||
|`flavor_k8s_master`,`flavor_k8s_node`,`flavor_etcd`, `flavor_bastion`,`flavor_gfs_node` | Flavor depends on your openstack installation, you can get available flavor IDs through `openstack flavor list` |
|
||||
|`image`,`image_gfs` | Name of the image to use in provisioning the compute resources. Should already be loaded into glance. |
|
||||
@ -251,12 +263,13 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
|
||||
|`number_of_bastions` | Number of bastion hosts to create. Scripts assume this is really just zero or one |
|
||||
|`number_of_gfs_nodes_no_floating_ip` | Number of gluster servers to provision. |
|
||||
| `gfs_volume_size_in_gb` | Size of the non-ephemeral volumes to be attached to store the GlusterFS bricks |
|
||||
|`supplementary_master_groups` | To add ansible groups to the masters, such as `kube-node` for tainting them as nodes, empty by default. |
|
||||
|`supplementary_node_groups` | To add ansible groups to the nodes, such as `kube-ingress` for running ingress controller pods, empty by default. |
|
||||
|`supplementary_master_groups` | To add ansible groups to the masters, such as `kube_node` for tainting them as nodes, empty by default. |
|
||||
|`supplementary_node_groups` | To add ansible groups to the nodes, such as `kube_ingress` for running ingress controller pods, empty by default. |
|
||||
|`bastion_allowed_remote_ips` | List of CIDR allowed to initiate a SSH connection, `["0.0.0.0/0"]` by default |
|
||||
|`master_allowed_remote_ips` | List of CIDR blocks allowed to initiate an API connection, `["0.0.0.0/0"]` by default |
|
||||
|`k8s_allowed_remote_ips` | List of CIDR allowed to initiate a SSH connection, empty by default |
|
||||
|`worker_allowed_ports` | List of ports to open on worker nodes, `[{ "protocol" = "tcp", "port_range_min" = 30000, "port_range_max" = 32767, "remote_ip_prefix" = "0.0.0.0/0"}]` by default |
|
||||
|`master_allowed_ports` | List of ports to open on master nodes, expected format is `[{ "protocol" = "tcp", "port_range_min" = 443, "port_range_max" = 443, "remote_ip_prefix" = "0.0.0.0/0"}]`, empty by default |
|
||||
|`wait_for_floatingip` | Let Terraform poll the instance until the floating IP has been associated, `false` by default. |
|
||||
|`node_root_volume_size_in_gb` | Size of the root volume for nodes, 0 to use ephemeral storage |
|
||||
|`master_root_volume_size_in_gb` | Size of the root volume for masters, 0 to use ephemeral storage |
|
||||
@ -264,16 +277,19 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
|
||||
|`etcd_root_volume_size_in_gb` | Size of the root volume for etcd nodes, 0 to use ephemeral storage |
|
||||
|`bastion_root_volume_size_in_gb` | Size of the root volume for bastions, 0 to use ephemeral storage |
|
||||
|`use_server_group` | Create and use openstack nova servergroups, default: false |
|
||||
|`use_access_ip` | If 1, nodes with floating IPs will transmit internal cluster traffic via floating IPs; if 0 private IPs will be used instead. Default value is 1. |
|
||||
|`k8s_nodes` | Map containing worker node definition, see explanation below |
|
||||
|
||||
##### k8s_nodes
|
||||
Allows a custom defintion of worker nodes giving the operator full control over individual node flavor and
|
||||
|
||||
Allows a custom definition of worker nodes giving the operator full control over individual node flavor and
|
||||
availability zone placement. To enable the use of this mode set the `number_of_k8s_nodes` and
|
||||
`number_of_k8s_nodes_no_floating_ip` variables to 0. Then define your desired worker node configuration
|
||||
using the `k8s_nodes` variable.
|
||||
|
||||
For example:
|
||||
```
|
||||
|
||||
```ini
|
||||
k8s_nodes = {
|
||||
"1" = {
|
||||
"az" = "sto1"
|
||||
@ -294,14 +310,16 @@ k8s_nodes = {
|
||||
```
|
||||
|
||||
Would result in the same configuration as:
|
||||
```
|
||||
|
||||
```ini
|
||||
number_of_k8s_nodes = 3
|
||||
flavor_k8s_node = "83d8b44a-26a0-4f02-a981-079446926445"
|
||||
az_list = ["sto1", "sto2", "sto3"]
|
||||
```
|
||||
|
||||
And:
|
||||
```
|
||||
|
||||
```ini
|
||||
k8s_nodes = {
|
||||
"ing-1" = {
|
||||
"az" = "sto1"
|
||||
@ -355,7 +373,8 @@ Would result in three nodes in each availability zone each with their own separa
|
||||
flavor and floating ip configuration.
|
||||
|
||||
The "schema":
|
||||
```
|
||||
|
||||
```ini
|
||||
k8s_nodes = {
|
||||
"key | node name suffix, must be unique" = {
|
||||
"az" = string
|
||||
@ -364,6 +383,7 @@ k8s_nodes = {
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
All values are required.
|
||||
|
||||
#### Terraform state files
|
||||
@ -372,10 +392,10 @@ In the cluster's inventory folder, the following files might be created (either
|
||||
or manually), to prevent you from pushing them accidentally they are in a
|
||||
`.gitignore` file in the `terraform/openstack` directory :
|
||||
|
||||
* `.terraform`
|
||||
* `.tfvars`
|
||||
* `.tfstate`
|
||||
* `.tfstate.backup`
|
||||
- `.terraform`
|
||||
- `.tfvars`
|
||||
- `.tfstate`
|
||||
- `.tfstate.backup`
|
||||
|
||||
You can still add them manually if you want to.
|
||||
|
||||
@ -385,39 +405,43 @@ Before Terraform can operate on your cluster you need to install the required
|
||||
plugins. This is accomplished as follows:
|
||||
|
||||
```ShellSession
|
||||
$ cd inventory/$CLUSTER
|
||||
$ terraform init ../../contrib/terraform/openstack
|
||||
cd inventory/$CLUSTER
|
||||
terraform init ../../contrib/terraform/openstack
|
||||
```
|
||||
|
||||
This should finish fairly quickly telling you Terraform has successfully initialized and loaded necessary modules.
|
||||
|
||||
### Provisioning cluster
|
||||
|
||||
You can apply the Terraform configuration to your cluster with the following command
|
||||
issued from your cluster's inventory directory (`inventory/$CLUSTER`):
|
||||
|
||||
```ShellSession
|
||||
$ terraform apply -var-file=cluster.tfvars ../../contrib/terraform/openstack
|
||||
terraform apply -var-file=cluster.tfvars ../../contrib/terraform/openstack
|
||||
```
|
||||
|
||||
if you chose to create a bastion host, this script will create
|
||||
`contrib/terraform/openstack/k8s-cluster.yml` with an ssh command for Ansible to
|
||||
`contrib/terraform/openstack/k8s_cluster.yml` with an ssh command for Ansible to
|
||||
be able to access your machines tunneling through the bastion's IP address. If
|
||||
you want to manually handle the ssh tunneling to these machines, please delete
|
||||
or move that file. If you want to use this, just leave it there, as ansible will
|
||||
pick it up automatically.
|
||||
|
||||
### Destroying cluster
|
||||
|
||||
You can destroy your new cluster with the following command issued from the cluster's inventory directory:
|
||||
|
||||
```ShellSession
|
||||
$ terraform destroy -var-file=cluster.tfvars ../../contrib/terraform/openstack
|
||||
terraform destroy -var-file=cluster.tfvars ../../contrib/terraform/openstack
|
||||
```
|
||||
|
||||
If you've started the Ansible run, it may also be a good idea to do some manual cleanup:
|
||||
|
||||
* remove SSH keys from the destroyed cluster from your `~/.ssh/known_hosts` file
|
||||
* clean up any temporary cache files: `rm /tmp/$CLUSTER-*`
|
||||
- remove SSH keys from the destroyed cluster from your `~/.ssh/known_hosts` file
|
||||
- clean up any temporary cache files: `rm /tmp/$CLUSTER-*`
|
||||
|
||||
### Debugging
|
||||
|
||||
You can enable debugging output from Terraform by setting
|
||||
`OS_DEBUG` to 1 and`TF_LOG` to`DEBUG` before running the Terraform command.
|
||||
|
||||
@ -425,8 +449,8 @@ You can enable debugging output from Terraform by setting
|
||||
|
||||
Terraform can output values that are useful for configure Neutron/Octavia LBaaS or Cinder persistent volume provisioning as part of your Kubernetes deployment:
|
||||
|
||||
- `private_subnet_id`: the subnet where your instances are running is used for `openstack_lbaas_subnet_id`
|
||||
- `floating_network_id`: the network_id where the floating IP are provisioned is used for `openstack_lbaas_floating_network_id`
|
||||
- `private_subnet_id`: the subnet where your instances are running is used for `openstack_lbaas_subnet_id`
|
||||
- `floating_network_id`: the network_id where the floating IP are provisioned is used for `openstack_lbaas_floating_network_id`
|
||||
|
||||
## Ansible
|
||||
|
||||
@ -437,9 +461,9 @@ Terraform can output values that are useful for configure Neutron/Octavia LBaaS
|
||||
Ensure your local ssh-agent is running and your ssh key has been added. This
|
||||
step is required by the terraform provisioner:
|
||||
|
||||
```
|
||||
$ eval $(ssh-agent -s)
|
||||
$ ssh-add ~/.ssh/id_rsa
|
||||
```ShellSession
|
||||
eval $(ssh-agent -s)
|
||||
ssh-add ~/.ssh/id_rsa
|
||||
```
|
||||
|
||||
If you have deployed and destroyed a previous iteration of your cluster, you will need to clear out any stale keys from your SSH "known hosts" file ( `~/.ssh/known_hosts`).
|
||||
@ -451,13 +475,13 @@ generated`.tfstate` file to generate a dynamic inventory recognizes
|
||||
some variables within a "metadata" block, defined in a "resource"
|
||||
block (example):
|
||||
|
||||
```
|
||||
```ini
|
||||
resource "openstack_compute_instance_v2" "example" {
|
||||
...
|
||||
metadata {
|
||||
ssh_user = "ubuntu"
|
||||
prefer_ipv6 = true
|
||||
python_bin = "/usr/bin/python3"
|
||||
python_bin = "/usr/bin/python3"
|
||||
}
|
||||
...
|
||||
}
|
||||
@ -472,8 +496,8 @@ instance should be preferred over IPv4.
|
||||
|
||||
Bastion access will be determined by:
|
||||
|
||||
- Your choice on the amount of bastion hosts (set by `number_of_bastions` terraform variable).
|
||||
- The existence of nodes/masters with floating IPs (set by `number_of_k8s_masters`, `number_of_k8s_nodes`, `number_of_k8s_masters_no_etcd` terraform variables).
|
||||
- Your choice on the amount of bastion hosts (set by `number_of_bastions` terraform variable).
|
||||
- The existence of nodes/masters with floating IPs (set by `number_of_k8s_masters`, `number_of_k8s_nodes`, `number_of_k8s_masters_no_etcd` terraform variables).
|
||||
|
||||
If you have a bastion host, your ssh traffic will be directly routed through it. This is regardless of whether you have masters/nodes with a floating IP assigned.
|
||||
If you don't have a bastion host, but at least one of your masters/nodes have a floating IP, then ssh traffic will be tunneled by one of these machines.
|
||||
@ -484,7 +508,7 @@ So, either a bastion host, or at least master/node with a floating IP are requir
|
||||
|
||||
Make sure you can connect to the hosts. Note that Flatcar Container Linux by Kinvolk will have a state `FAILED` due to Python not being present. This is okay, because Python will be installed during bootstrapping, so long as the hosts are not `UNREACHABLE`.
|
||||
|
||||
```
|
||||
```ShellSession
|
||||
$ ansible -i inventory/$CLUSTER/hosts -m ping all
|
||||
example-k8s_node-1 | SUCCESS => {
|
||||
"changed": false,
|
||||
@ -505,48 +529,55 @@ If it fails try to connect manually via SSH. It could be something as simple as
|
||||
### Configure cluster variables
|
||||
|
||||
Edit `inventory/$CLUSTER/group_vars/all/all.yml`:
|
||||
|
||||
- **bin_dir**:
|
||||
```
|
||||
|
||||
```yml
|
||||
# Directory where the binaries will be installed
|
||||
# Default:
|
||||
# bin_dir: /usr/local/bin
|
||||
# For Flatcar Container Linux by Kinvolk:
|
||||
bin_dir: /opt/bin
|
||||
```
|
||||
|
||||
- and **cloud_provider**:
|
||||
```
|
||||
|
||||
```yml
|
||||
cloud_provider: openstack
|
||||
```
|
||||
Edit `inventory/$CLUSTER/group_vars/k8s-cluster/k8s-cluster.yml`:
|
||||
|
||||
Edit `inventory/$CLUSTER/group_vars/k8s_cluster/k8s_cluster.yml`:
|
||||
|
||||
- Set variable **kube_network_plugin** to your desired networking plugin.
|
||||
- **flannel** works out-of-the-box
|
||||
- **calico** requires [configuring OpenStack Neutron ports](/docs/openstack.md) to allow service and pod subnets
|
||||
```
|
||||
|
||||
```yml
|
||||
# Choose network plugin (calico, weave or flannel)
|
||||
# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
|
||||
kube_network_plugin: flannel
|
||||
```
|
||||
|
||||
- Set variable **resolvconf_mode**
|
||||
```
|
||||
|
||||
```yml
|
||||
# Can be docker_dns, host_resolvconf or none
|
||||
# Default:
|
||||
# resolvconf_mode: docker_dns
|
||||
# For Flatcar Container Linux by Kinvolk:
|
||||
resolvconf_mode: host_resolvconf
|
||||
```
|
||||
|
||||
- Set max amount of attached cinder volume per host (default 256)
|
||||
```
|
||||
|
||||
```yml
|
||||
node_volume_attach_limit: 26
|
||||
```
|
||||
- Disable access_ip, this will make all innternal cluster traffic to be sent over local network when a floating IP is attached (default this value is set to 1)
|
||||
```
|
||||
use_access_ip: 0
|
||||
```
|
||||
|
||||
### Deploy Kubernetes
|
||||
|
||||
```
|
||||
$ ansible-playbook --become -i inventory/$CLUSTER/hosts cluster.yml
|
||||
```ShellSession
|
||||
ansible-playbook --become -i inventory/$CLUSTER/hosts cluster.yml
|
||||
```
|
||||
|
||||
This will take some time as there are many tasks to run.
|
||||
@ -554,26 +585,36 @@ This will take some time as there are many tasks to run.
|
||||
## Kubernetes
|
||||
|
||||
### Set up kubectl
|
||||
|
||||
1. [Install kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) on your workstation
|
||||
2. Add a route to the internal IP of a master node (if needed):
|
||||
```
|
||||
|
||||
```ShellSession
|
||||
sudo route add [master-internal-ip] gw [router-ip]
|
||||
```
|
||||
|
||||
or
|
||||
```
|
||||
|
||||
```ShellSession
|
||||
sudo route add -net [internal-subnet]/24 gw [router-ip]
|
||||
```
|
||||
3. List Kubernetes certificates & keys:
|
||||
```
|
||||
|
||||
1. List Kubernetes certificates & keys:
|
||||
|
||||
```ShellSession
|
||||
ssh [os-user]@[master-ip] sudo ls /etc/kubernetes/ssl/
|
||||
```
|
||||
4. Get `admin`'s certificates and keys:
|
||||
```
|
||||
|
||||
1. Get `admin`'s certificates and keys:
|
||||
|
||||
```ShellSession
|
||||
ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/admin-kube-master-1-key.pem > admin-key.pem
|
||||
ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/admin-kube-master-1.pem > admin.pem
|
||||
ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/ca.pem > ca.pem
|
||||
```
|
||||
5. Configure kubectl:
|
||||
|
||||
1. Configure kubectl:
|
||||
|
||||
```ShellSession
|
||||
$ kubectl config set-cluster default-cluster --server=https://[master-internal-ip]:6443 \
|
||||
--certificate-authority=ca.pem
|
||||
@ -586,21 +627,24 @@ $ kubectl config set-credentials default-admin \
|
||||
$ kubectl config set-context default-system --cluster=default-cluster --user=default-admin
|
||||
$ kubectl config use-context default-system
|
||||
```
|
||||
7. Check it:
|
||||
```
|
||||
|
||||
1. Check it:
|
||||
|
||||
```ShellSession
|
||||
kubectl version
|
||||
```
|
||||
|
||||
## GlusterFS
|
||||
GlusterFS is not deployed by the standard`cluster.yml` playbook, see the
|
||||
|
||||
GlusterFS is not deployed by the standard `cluster.yml` playbook, see the
|
||||
[GlusterFS playbook documentation](../../network-storage/glusterfs/README.md)
|
||||
for instructions.
|
||||
|
||||
Basically you will install Gluster as
|
||||
```ShellSession
|
||||
$ ansible-playbook --become -i inventory/$CLUSTER/hosts ./contrib/network-storage/glusterfs/glusterfs.yml
|
||||
```
|
||||
|
||||
```ShellSession
|
||||
ansible-playbook --become -i inventory/$CLUSTER/hosts ./contrib/network-storage/glusterfs/glusterfs.yml
|
||||
```
|
||||
|
||||
## What's next
|
||||
|
||||
@ -609,6 +653,7 @@ Try out your new Kubernetes cluster with the [Hello Kubernetes service](https://
|
||||
## Appendix
|
||||
|
||||
### Migration from `number_of_k8s_nodes*` to `k8s_nodes`
|
||||
|
||||
If you currently have a cluster defined using the `number_of_k8s_nodes*` variables and wish
|
||||
to migrate to the `k8s_nodes` style you can do it like so:
|
||||
|
||||
|
@ -1,7 +1,3 @@
|
||||
provider "openstack" {
|
||||
version = "~> 1.17"
|
||||
}
|
||||
|
||||
module "network" {
|
||||
source = "./modules/network"
|
||||
|
||||
@ -27,6 +23,8 @@ module "ips" {
|
||||
network_name = var.network_name
|
||||
router_id = module.network.router_id
|
||||
k8s_nodes = var.k8s_nodes
|
||||
k8s_master_fips = var.k8s_master_fips
|
||||
router_internal_port_id = module.network.router_internal_port_id
|
||||
}
|
||||
|
||||
module "compute" {
|
||||
@ -54,7 +52,11 @@ module "compute" {
|
||||
master_volume_type = var.master_volume_type
|
||||
public_key_path = var.public_key_path
|
||||
image = var.image
|
||||
image_uuid = var.image_uuid
|
||||
image_gfs = var.image_gfs
|
||||
image_master = var.image_master
|
||||
image_master_uuid = var.image_master_uuid
|
||||
image_gfs_uuid = var.image_gfs_uuid
|
||||
ssh_user = var.ssh_user
|
||||
ssh_user_gfs = var.ssh_user_gfs
|
||||
flavor_k8s_master = var.flavor_k8s_master
|
||||
@ -79,6 +81,8 @@ module "compute" {
|
||||
wait_for_floatingip = var.wait_for_floatingip
|
||||
use_access_ip = var.use_access_ip
|
||||
use_server_groups = var.use_server_groups
|
||||
extra_sec_groups = var.extra_sec_groups
|
||||
extra_sec_groups_name = var.extra_sec_groups_name
|
||||
|
||||
network_id = module.network.router_id
|
||||
}
|
||||
|
@ -1,11 +1,20 @@
|
||||
data "openstack_images_image_v2" "vm_image" {
|
||||
count = var.image_uuid == "" ? 1 : 0
|
||||
most_recent = true
|
||||
name = var.image
|
||||
}
|
||||
|
||||
data "openstack_images_image_v2" "gfs_image" {
|
||||
count = var.image_gfs_uuid == "" ? var.image_uuid == "" ? 1 : 0 : 0
|
||||
most_recent = true
|
||||
name = var.image_gfs == "" ? var.image : var.image_gfs
|
||||
}
|
||||
|
||||
data "openstack_images_image_v2" "image_master" {
|
||||
count = var.image_master_uuid == "" ? var.image_uuid == "" ? 1 : 0 : 0
|
||||
name = var.image_master == "" ? var.image : var.image_master
|
||||
}
|
||||
|
||||
resource "openstack_compute_keypair_v2" "k8s" {
|
||||
name = "kubernetes-${var.cluster_name}"
|
||||
public_key = chomp(file(var.public_key_path))
|
||||
@ -17,6 +26,13 @@ resource "openstack_networking_secgroup_v2" "k8s_master" {
|
||||
delete_default_rules = true
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_v2" "k8s_master_extra" {
|
||||
count = "%{if var.extra_sec_groups}1%{else}0%{endif}"
|
||||
name = "${var.cluster_name}-k8s-master-${var.extra_sec_groups_name}"
|
||||
description = "${var.cluster_name} - Kubernetes Master nodes - rules not managed by terraform"
|
||||
delete_default_rules = true
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "k8s_master" {
|
||||
count = length(var.master_allowed_remote_ips)
|
||||
direction = "ingress"
|
||||
@ -95,6 +111,13 @@ resource "openstack_networking_secgroup_v2" "worker" {
|
||||
delete_default_rules = true
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_v2" "worker_extra" {
|
||||
count = "%{if var.extra_sec_groups}1%{else}0%{endif}"
|
||||
name = "${var.cluster_name}-k8s-worker-${var.extra_sec_groups_name}"
|
||||
description = "${var.cluster_name} - Kubernetes worker nodes - rules not managed by terraform"
|
||||
delete_default_rules = true
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "worker" {
|
||||
count = length(var.worker_allowed_ports)
|
||||
direction = "ingress"
|
||||
@ -124,17 +147,39 @@ resource "openstack_compute_servergroup_v2" "k8s_etcd" {
|
||||
policies = ["anti-affinity"]
|
||||
}
|
||||
|
||||
locals {
|
||||
# master groups
|
||||
master_sec_groups = compact([
|
||||
openstack_networking_secgroup_v2.k8s_master.name,
|
||||
openstack_networking_secgroup_v2.k8s.name,
|
||||
var.extra_sec_groups ?openstack_networking_secgroup_v2.k8s_master_extra[0].name : "",
|
||||
])
|
||||
# worker groups
|
||||
worker_sec_groups = compact([
|
||||
openstack_networking_secgroup_v2.k8s.name,
|
||||
openstack_networking_secgroup_v2.worker.name,
|
||||
var.extra_sec_groups ? openstack_networking_secgroup_v2.worker_extra[0].name : "",
|
||||
])
|
||||
|
||||
# Image uuid
|
||||
image_to_use_node = var.image_uuid != "" ? var.image_uuid : data.openstack_images_image_v2.vm_image[0].id
|
||||
# Image_gfs uuid
|
||||
image_to_use_gfs = var.image_gfs_uuid != "" ? var.image_gfs_uuid : var.image_uuid != "" ? var.image_uuid : data.openstack_images_image_v2.gfs_image[0].id
|
||||
# image_master uuidimage_gfs_uuid
|
||||
image_to_use_master = var.image_master_uuid != "" ? var.image_master_uuid : var.image_uuid != "" ? var.image_uuid : data.openstack_images_image_v2.image_master[0].id
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "bastion" {
|
||||
name = "${var.cluster_name}-bastion-${count.index + 1}"
|
||||
count = var.number_of_bastions
|
||||
image_name = var.image
|
||||
image_id = var.bastion_root_volume_size_in_gb == 0 ? local.image_to_use_node : null
|
||||
flavor_id = var.flavor_bastion
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.bastion_root_volume_size_in_gb > 0 ? [var.image] : []
|
||||
for_each = var.bastion_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : []
|
||||
content {
|
||||
uuid = data.openstack_images_image_v2.vm_image.id
|
||||
uuid = local.image_to_use_node
|
||||
source_type = "image"
|
||||
volume_size = var.bastion_root_volume_size_in_gb
|
||||
boot_index = 0
|
||||
@ -159,7 +204,7 @@ resource "openstack_compute_instance_v2" "bastion" {
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${var.bastion_fips[0]}/ > group_vars/no-floating.yml"
|
||||
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${var.bastion_fips[0]}/ > group_vars/no_floating.yml"
|
||||
}
|
||||
}
|
||||
|
||||
@ -167,15 +212,15 @@ resource "openstack_compute_instance_v2" "k8s_master" {
|
||||
name = "${var.cluster_name}-k8s-master-${count.index + 1}"
|
||||
count = var.number_of_k8s_masters
|
||||
availability_zone = element(var.az_list, count.index)
|
||||
image_name = var.image
|
||||
image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
|
||||
flavor_id = var.flavor_k8s_master
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.master_root_volume_size_in_gb > 0 ? [var.image] : []
|
||||
for_each = var.master_root_volume_size_in_gb > 0 ? [local.image_to_use_master] : []
|
||||
content {
|
||||
uuid = data.openstack_images_image_v2.vm_image.id
|
||||
uuid = local.image_to_use_master
|
||||
source_type = "image"
|
||||
volume_size = var.master_root_volume_size_in_gb
|
||||
volume_type = var.master_volume_type
|
||||
@ -189,9 +234,7 @@ resource "openstack_compute_instance_v2" "k8s_master" {
|
||||
name = var.network_name
|
||||
}
|
||||
|
||||
security_groups = [openstack_networking_secgroup_v2.k8s_master.name,
|
||||
openstack_networking_secgroup_v2.k8s.name,
|
||||
]
|
||||
security_groups = local.master_sec_groups
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||
@ -202,13 +245,13 @@ resource "openstack_compute_instance_v2" "k8s_master" {
|
||||
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "etcd,kube-master,${var.supplementary_master_groups},k8s-cluster,vault"
|
||||
kubespray_groups = "etcd,kube_control_plane,${var.supplementary_master_groups},k8s_cluster"
|
||||
depends_on = var.network_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > group_vars/no-floating.yml"
|
||||
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > group_vars/no_floating.yml"
|
||||
}
|
||||
}
|
||||
|
||||
@ -216,15 +259,15 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
|
||||
name = "${var.cluster_name}-k8s-master-ne-${count.index + 1}"
|
||||
count = var.number_of_k8s_masters_no_etcd
|
||||
availability_zone = element(var.az_list, count.index)
|
||||
image_name = var.image
|
||||
image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
|
||||
flavor_id = var.flavor_k8s_master
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.master_root_volume_size_in_gb > 0 ? [var.image] : []
|
||||
for_each = var.master_root_volume_size_in_gb > 0 ? [local.image_to_use_master] : []
|
||||
content {
|
||||
uuid = data.openstack_images_image_v2.vm_image.id
|
||||
uuid = local.image_to_use_master
|
||||
source_type = "image"
|
||||
volume_size = var.master_root_volume_size_in_gb
|
||||
volume_type = var.master_volume_type
|
||||
@ -238,9 +281,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
|
||||
name = var.network_name
|
||||
}
|
||||
|
||||
security_groups = [openstack_networking_secgroup_v2.k8s_master.name,
|
||||
openstack_networking_secgroup_v2.k8s.name,
|
||||
]
|
||||
security_groups = local.master_sec_groups
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||
@ -251,13 +292,13 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
|
||||
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "kube-master,${var.supplementary_master_groups},k8s-cluster,vault"
|
||||
kubespray_groups = "kube_control_plane,${var.supplementary_master_groups},k8s_cluster"
|
||||
depends_on = var.network_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > group_vars/no-floating.yml"
|
||||
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > group_vars/no_floating.yml"
|
||||
}
|
||||
}
|
||||
|
||||
@ -265,14 +306,14 @@ resource "openstack_compute_instance_v2" "etcd" {
|
||||
name = "${var.cluster_name}-etcd-${count.index + 1}"
|
||||
count = var.number_of_etcd
|
||||
availability_zone = element(var.az_list, count.index)
|
||||
image_name = var.image
|
||||
image_id = var.etcd_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
|
||||
flavor_id = var.flavor_etcd
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.etcd_root_volume_size_in_gb > 0 ? [var.image] : []
|
||||
for_each = var.etcd_root_volume_size_in_gb > 0 ? [local.image_to_use_master] : []
|
||||
content {
|
||||
uuid = data.openstack_images_image_v2.vm_image.id
|
||||
uuid = local.image_to_use_master
|
||||
source_type = "image"
|
||||
volume_size = var.etcd_root_volume_size_in_gb
|
||||
boot_index = 0
|
||||
@ -296,7 +337,7 @@ resource "openstack_compute_instance_v2" "etcd" {
|
||||
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "etcd,vault,no-floating"
|
||||
kubespray_groups = "etcd,no_floating"
|
||||
depends_on = var.network_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
@ -306,14 +347,14 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
|
||||
name = "${var.cluster_name}-k8s-master-nf-${count.index + 1}"
|
||||
count = var.number_of_k8s_masters_no_floating_ip
|
||||
availability_zone = element(var.az_list, count.index)
|
||||
image_name = var.image
|
||||
image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
|
||||
flavor_id = var.flavor_k8s_master
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.master_root_volume_size_in_gb > 0 ? [var.image] : []
|
||||
for_each = var.master_root_volume_size_in_gb > 0 ? [local.image_to_use_master] : []
|
||||
content {
|
||||
uuid = data.openstack_images_image_v2.vm_image.id
|
||||
uuid = local.image_to_use_master
|
||||
source_type = "image"
|
||||
volume_size = var.master_root_volume_size_in_gb
|
||||
volume_type = var.master_volume_type
|
||||
@ -327,9 +368,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
|
||||
name = var.network_name
|
||||
}
|
||||
|
||||
security_groups = [openstack_networking_secgroup_v2.k8s_master.name,
|
||||
openstack_networking_secgroup_v2.k8s.name,
|
||||
]
|
||||
security_groups = local.master_sec_groups
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||
@ -340,7 +379,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
|
||||
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "etcd,kube-master,${var.supplementary_master_groups},k8s-cluster,vault,no-floating"
|
||||
kubespray_groups = "etcd,kube_control_plane,${var.supplementary_master_groups},k8s_cluster,no_floating"
|
||||
depends_on = var.network_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
@ -350,14 +389,14 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
|
||||
name = "${var.cluster_name}-k8s-master-ne-nf-${count.index + 1}"
|
||||
count = var.number_of_k8s_masters_no_floating_ip_no_etcd
|
||||
availability_zone = element(var.az_list, count.index)
|
||||
image_name = var.image
|
||||
image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
|
||||
flavor_id = var.flavor_k8s_master
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.master_root_volume_size_in_gb > 0 ? [var.image] : []
|
||||
for_each = var.master_root_volume_size_in_gb > 0 ? [local.image_to_use_master] : []
|
||||
content {
|
||||
uuid = data.openstack_images_image_v2.vm_image.id
|
||||
uuid = local.image_to_use_master
|
||||
source_type = "image"
|
||||
volume_size = var.master_root_volume_size_in_gb
|
||||
volume_type = var.master_volume_type
|
||||
@ -371,9 +410,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
|
||||
name = var.network_name
|
||||
}
|
||||
|
||||
security_groups = [openstack_networking_secgroup_v2.k8s_master.name,
|
||||
openstack_networking_secgroup_v2.k8s.name,
|
||||
]
|
||||
security_groups = local.master_sec_groups
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||
@ -384,7 +421,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
|
||||
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "kube-master,${var.supplementary_master_groups},k8s-cluster,vault,no-floating"
|
||||
kubespray_groups = "kube_control_plane,${var.supplementary_master_groups},k8s_cluster,no_floating"
|
||||
depends_on = var.network_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
@ -394,14 +431,14 @@ resource "openstack_compute_instance_v2" "k8s_node" {
|
||||
name = "${var.cluster_name}-k8s-node-${count.index + 1}"
|
||||
count = var.number_of_k8s_nodes
|
||||
availability_zone = element(var.az_list_node, count.index)
|
||||
image_name = var.image
|
||||
image_id = var.node_root_volume_size_in_gb == 0 ? local.image_to_use_node : null
|
||||
flavor_id = var.flavor_k8s_node
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.node_root_volume_size_in_gb > 0 ? [var.image] : []
|
||||
for_each = var.node_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : []
|
||||
content {
|
||||
uuid = data.openstack_images_image_v2.vm_image.id
|
||||
uuid = local.image_to_use_node
|
||||
source_type = "image"
|
||||
volume_size = var.node_root_volume_size_in_gb
|
||||
boot_index = 0
|
||||
@ -414,9 +451,7 @@ resource "openstack_compute_instance_v2" "k8s_node" {
|
||||
name = var.network_name
|
||||
}
|
||||
|
||||
security_groups = [openstack_networking_secgroup_v2.k8s.name,
|
||||
openstack_networking_secgroup_v2.worker.name,
|
||||
]
|
||||
security_groups = local.worker_sec_groups
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||
@ -427,13 +462,13 @@ resource "openstack_compute_instance_v2" "k8s_node" {
|
||||
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "kube-node,k8s-cluster,${var.supplementary_node_groups}"
|
||||
kubespray_groups = "kube_node,k8s_cluster,${var.supplementary_node_groups}"
|
||||
depends_on = var.network_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_node_fips), 0)}/ > group_vars/no-floating.yml"
|
||||
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_node_fips), 0)}/ > group_vars/no_floating.yml"
|
||||
}
|
||||
}
|
||||
|
||||
@ -441,14 +476,14 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
||||
name = "${var.cluster_name}-k8s-node-nf-${count.index + 1}"
|
||||
count = var.number_of_k8s_nodes_no_floating_ip
|
||||
availability_zone = element(var.az_list_node, count.index)
|
||||
image_name = var.image
|
||||
image_id = var.node_root_volume_size_in_gb == 0 ? local.image_to_use_node : null
|
||||
flavor_id = var.flavor_k8s_node
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.node_root_volume_size_in_gb > 0 ? [var.image] : []
|
||||
for_each = var.node_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : []
|
||||
content {
|
||||
uuid = data.openstack_images_image_v2.vm_image.id
|
||||
uuid = local.image_to_use_node
|
||||
source_type = "image"
|
||||
volume_size = var.node_root_volume_size_in_gb
|
||||
boot_index = 0
|
||||
@ -461,9 +496,7 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
||||
name = var.network_name
|
||||
}
|
||||
|
||||
security_groups = [openstack_networking_secgroup_v2.k8s.name,
|
||||
openstack_networking_secgroup_v2.worker.name,
|
||||
]
|
||||
security_groups = local.worker_sec_groups
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||
@ -474,7 +507,7 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
||||
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "kube-node,k8s-cluster,no-floating,${var.supplementary_node_groups}"
|
||||
kubespray_groups = "kube_node,k8s_cluster,no_floating,${var.supplementary_node_groups}"
|
||||
depends_on = var.network_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
@ -484,14 +517,14 @@ resource "openstack_compute_instance_v2" "k8s_nodes" {
|
||||
for_each = var.number_of_k8s_nodes == 0 && var.number_of_k8s_nodes_no_floating_ip == 0 ? var.k8s_nodes : {}
|
||||
name = "${var.cluster_name}-k8s-node-${each.key}"
|
||||
availability_zone = each.value.az
|
||||
image_name = var.image
|
||||
image_id = var.node_root_volume_size_in_gb == 0 ? local.image_to_use_node : null
|
||||
flavor_id = each.value.flavor
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.node_root_volume_size_in_gb > 0 ? [var.image] : []
|
||||
for_each = var.node_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : []
|
||||
content {
|
||||
uuid = data.openstack_images_image_v2.vm_image.id
|
||||
uuid = local.image_to_use_node
|
||||
source_type = "image"
|
||||
volume_size = var.node_root_volume_size_in_gb
|
||||
boot_index = 0
|
||||
@ -504,9 +537,7 @@ resource "openstack_compute_instance_v2" "k8s_nodes" {
|
||||
name = var.network_name
|
||||
}
|
||||
|
||||
security_groups = [openstack_networking_secgroup_v2.k8s.name,
|
||||
openstack_networking_secgroup_v2.worker.name,
|
||||
]
|
||||
security_groups = local.worker_sec_groups
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||
@ -517,13 +548,13 @@ resource "openstack_compute_instance_v2" "k8s_nodes" {
|
||||
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "kube-node,k8s-cluster,%{if each.value.floating_ip == false}no-floating,%{endif}${var.supplementary_node_groups}"
|
||||
kubespray_groups = "kube_node,k8s_cluster,%{if each.value.floating_ip == false}no_floating,%{endif}${var.supplementary_node_groups}"
|
||||
depends_on = var.network_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "%{if each.value.floating_ip}sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, [for key, value in var.k8s_nodes_fips : value.address]), 0)}/ > group_vars/no-floating.yml%{else}true%{endif}"
|
||||
command = "%{if each.value.floating_ip}sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, [for key, value in var.k8s_nodes_fips : value.address]), 0)}/ > group_vars/no_floating.yml%{else}true%{endif}"
|
||||
}
|
||||
}
|
||||
|
||||
@ -531,14 +562,14 @@ resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
|
||||
name = "${var.cluster_name}-gfs-node-nf-${count.index + 1}"
|
||||
count = var.number_of_gfs_nodes_no_floating_ip
|
||||
availability_zone = element(var.az_list, count.index)
|
||||
image_name = var.image_gfs
|
||||
image_name = var.gfs_root_volume_size_in_gb == 0 ? local.image_to_use_gfs : null
|
||||
flavor_id = var.flavor_gfs_node
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.gfs_root_volume_size_in_gb > 0 ? [var.image] : []
|
||||
for_each = var.gfs_root_volume_size_in_gb > 0 ? [local.image_to_use_gfs] : []
|
||||
content {
|
||||
uuid = data.openstack_images_image_v2.vm_image.id
|
||||
uuid = local.image_to_use_gfs
|
||||
source_type = "image"
|
||||
volume_size = var.gfs_root_volume_size_in_gb
|
||||
boot_index = 0
|
||||
@ -562,7 +593,7 @@ resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
|
||||
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user_gfs
|
||||
kubespray_groups = "gfs-cluster,network-storage,no-floating"
|
||||
kubespray_groups = "gfs-cluster,network-storage,no_floating"
|
||||
depends_on = var.network_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
|
@ -127,3 +127,27 @@ variable "use_access_ip" {}
|
||||
variable "use_server_groups" {
|
||||
type = bool
|
||||
}
|
||||
|
||||
variable "extra_sec_groups" {
|
||||
type = bool
|
||||
}
|
||||
|
||||
variable "extra_sec_groups_name" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "image_uuid" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "image_gfs_uuid" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "image_master" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "image_master_uuid" {
|
||||
type = string
|
||||
}
|
||||
|
8
contrib/terraform/openstack/modules/compute/versions.tf
Normal file
8
contrib/terraform/openstack/modules/compute/versions.tf
Normal file
@ -0,0 +1,8 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
openstack = {
|
||||
source = "terraform-provider-openstack/openstack"
|
||||
}
|
||||
}
|
||||
required_version = ">= 0.12.26"
|
||||
}
|
@ -2,16 +2,21 @@ resource "null_resource" "dummy_dependency" {
|
||||
triggers = {
|
||||
dependency_id = var.router_id
|
||||
}
|
||||
depends_on = [
|
||||
var.router_internal_port_id
|
||||
]
|
||||
}
|
||||
|
||||
# If user specifies pre-existing IPs to use in k8s_master_fips, do not create new ones.
|
||||
resource "openstack_networking_floatingip_v2" "k8s_master" {
|
||||
count = var.number_of_k8s_masters
|
||||
count = length(var.k8s_master_fips) > 0 ? 0 : var.number_of_k8s_masters
|
||||
pool = var.floatingip_pool
|
||||
depends_on = [null_resource.dummy_dependency]
|
||||
}
|
||||
|
||||
# If user specifies pre-existing IPs to use in k8s_master_fips, do not create new ones.
|
||||
resource "openstack_networking_floatingip_v2" "k8s_master_no_etcd" {
|
||||
count = var.number_of_k8s_masters_no_etcd
|
||||
count = length(var.k8s_master_fips) > 0 ? 0 : var.number_of_k8s_masters_no_etcd
|
||||
pool = var.floatingip_pool
|
||||
depends_on = [null_resource.dummy_dependency]
|
||||
}
|
||||
|
@ -1,9 +1,11 @@
|
||||
# If k8s_master_fips is already defined as input, keep the same value since new FIPs have not been created.
|
||||
output "k8s_master_fips" {
|
||||
value = openstack_networking_floatingip_v2.k8s_master[*].address
|
||||
value = length(var.k8s_master_fips) > 0 ? var.k8s_master_fips : openstack_networking_floatingip_v2.k8s_master[*].address
|
||||
}
|
||||
|
||||
# If k8s_master_fips is already defined as input, keep the same value since new FIPs have not been created.
|
||||
output "k8s_master_no_etcd_fips" {
|
||||
value = openstack_networking_floatingip_v2.k8s_master_no_etcd[*].address
|
||||
value = length(var.k8s_master_fips) > 0 ? var.k8s_master_fips : openstack_networking_floatingip_v2.k8s_master_no_etcd[*].address
|
||||
}
|
||||
|
||||
output "k8s_node_fips" {
|
||||
|
@ -17,3 +17,7 @@ variable "router_id" {
|
||||
}
|
||||
|
||||
variable "k8s_nodes" {}
|
||||
|
||||
variable "k8s_master_fips" {}
|
||||
|
||||
variable "router_internal_port_id" {}
|
||||
|
11
contrib/terraform/openstack/modules/ips/versions.tf
Normal file
11
contrib/terraform/openstack/modules/ips/versions.tf
Normal file
@ -0,0 +1,11 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
null = {
|
||||
source = "hashicorp/null"
|
||||
}
|
||||
openstack = {
|
||||
source = "terraform-provider-openstack/openstack"
|
||||
}
|
||||
}
|
||||
required_version = ">= 0.12.26"
|
||||
}
|
8
contrib/terraform/openstack/modules/network/versions.tf
Normal file
8
contrib/terraform/openstack/modules/network/versions.tf
Normal file
@ -0,0 +1,8 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
openstack = {
|
||||
source = "terraform-provider-openstack/openstack"
|
||||
}
|
||||
}
|
||||
required_version = ">= 0.12.26"
|
||||
}
|
@ -152,7 +152,13 @@ variable "subnet_cidr" {
|
||||
|
||||
variable "dns_nameservers" {
|
||||
description = "An array of DNS name server names used by hosts in this subnet."
|
||||
type = list
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "k8s_master_fips" {
|
||||
description = "specific pre-existing floating IPs to use for master nodes"
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
|
||||
@ -171,12 +177,12 @@ variable "external_net" {
|
||||
}
|
||||
|
||||
variable "supplementary_master_groups" {
|
||||
description = "supplementary kubespray ansible groups for masters, such kube-node"
|
||||
description = "supplementary kubespray ansible groups for masters, such kube_node"
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "supplementary_node_groups" {
|
||||
description = "supplementary kubespray ansible groups for worker nodes, such as kube-ingress"
|
||||
description = "supplementary kubespray ansible groups for worker nodes, such as kube_ingress"
|
||||
default = ""
|
||||
}
|
||||
|
||||
@ -205,13 +211,13 @@ variable "k8s_allowed_egress_ips" {
|
||||
}
|
||||
|
||||
variable "master_allowed_ports" {
|
||||
type = list
|
||||
type = list(any)
|
||||
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "worker_allowed_ports" {
|
||||
type = list
|
||||
type = list(any)
|
||||
|
||||
default = [
|
||||
{
|
||||
@ -236,7 +242,39 @@ variable "router_id" {
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "router_internal_port_id" {
|
||||
description = "uuid of the port connection our router to our network"
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "k8s_nodes" {
|
||||
default = {}
|
||||
}
|
||||
|
||||
variable "extra_sec_groups" {
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "extra_sec_groups_name" {
|
||||
default = "custom"
|
||||
}
|
||||
|
||||
variable "image_uuid" {
|
||||
description = "uuid of image inside openstack to use"
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "image_gfs_uuid" {
|
||||
description = "uuid of image to be used on gluster fs nodes. If empty defaults to image_uuid"
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "image_master" {
|
||||
description = "uuid of image inside openstack to use"
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "image_master_uuid" {
|
||||
description = "uuid of image to be used on master nodes. If empty defaults to image_uuid"
|
||||
default = ""
|
||||
}
|
||||
|
9
contrib/terraform/openstack/versions.tf
Normal file
9
contrib/terraform/openstack/versions.tf
Normal file
@ -0,0 +1,9 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
openstack = {
|
||||
source = "terraform-provider-openstack/openstack"
|
||||
version = "~> 1.17"
|
||||
}
|
||||
}
|
||||
required_version = ">= 0.12.26"
|
||||
}
|
@ -8,6 +8,7 @@ Provision a Kubernetes cluster with [Terraform](https://www.terraform.io) on
|
||||
This will install a Kubernetes cluster on Packet bare metal. It should work in all locations and on most server types.
|
||||
|
||||
## Approach
|
||||
|
||||
The terraform configuration inspects variables found in
|
||||
[variables.tf](variables.tf) to create resources in your Packet project.
|
||||
There is a [python script](../terraform.py) that reads the generated`.tfstate`
|
||||
@ -15,8 +16,10 @@ file to generate a dynamic inventory that is consumed by [cluster.yml](../../../
|
||||
to actually install Kubernetes with Kubespray.
|
||||
|
||||
### Kubernetes Nodes
|
||||
|
||||
You can create many different kubernetes topologies by setting the number of
|
||||
different classes of hosts.
|
||||
|
||||
- Master nodes with etcd: `number_of_k8s_masters` variable
|
||||
- Master nodes without etcd: `number_of_k8s_masters_no_etcd` variable
|
||||
- Standalone etcd hosts: `number_of_etcd` variable
|
||||
@ -47,6 +50,7 @@ ssh-keygen -f ~/.ssh/id_rsa
|
||||
```
|
||||
|
||||
## Terraform
|
||||
|
||||
Terraform will be used to provision all of the Packet resources with base software as appropriate.
|
||||
|
||||
### Configuration
|
||||
@ -56,9 +60,9 @@ Terraform will be used to provision all of the Packet resources with base softwa
|
||||
Create an inventory directory for your cluster by copying the existing sample and linking the `hosts` script (used to build the inventory based on Terraform state):
|
||||
|
||||
```ShellSession
|
||||
$ cp -LRp contrib/terraform/packet/sample-inventory inventory/$CLUSTER
|
||||
$ cd inventory/$CLUSTER
|
||||
$ ln -s ../../contrib/terraform/packet/hosts
|
||||
cp -LRp contrib/terraform/packet/sample-inventory inventory/$CLUSTER
|
||||
cd inventory/$CLUSTER
|
||||
ln -s ../../contrib/terraform/packet/hosts
|
||||
```
|
||||
|
||||
This will be the base for subsequent Terraform commands.
|
||||
@ -69,22 +73,23 @@ Your Packet API key must be available in the `PACKET_AUTH_TOKEN` environment var
|
||||
This key is typically stored outside of the code repo since it is considered secret.
|
||||
If someone gets this key, they can startup/shutdown hosts in your project!
|
||||
|
||||
For more information on how to generate an API key or find your project ID, please see:
|
||||
https://support.packet.com/kb/articles/api-integrations
|
||||
For more information on how to generate an API key or find your project ID, please see
|
||||
[API Integrations](https://support.packet.com/kb/articles/api-integrations)
|
||||
|
||||
The Packet Project ID associated with the key will be set later in cluster.tfvars.
|
||||
|
||||
For more information about the API, please see:
|
||||
https://www.packet.com/developers/api/
|
||||
For more information about the API, please see [Packet API](https://www.packet.com/developers/api/)
|
||||
|
||||
Example:
|
||||
|
||||
```ShellSession
|
||||
$ export PACKET_AUTH_TOKEN="Example-API-Token"
|
||||
export PACKET_AUTH_TOKEN="Example-API-Token"
|
||||
```
|
||||
|
||||
Note that to deploy several clusters within the same project you need to use [terraform workspace](https://www.terraform.io/docs/state/workspaces.html#using-workspaces).
|
||||
|
||||
#### Cluster variables
|
||||
|
||||
The construction of the cluster is driven by values found in
|
||||
[variables.tf](variables.tf).
|
||||
|
||||
@ -95,14 +100,15 @@ This helps when identifying which hosts are associated with each cluster.
|
||||
|
||||
While the defaults in variables.tf will successfully deploy a cluster, it is recommended to set the following values:
|
||||
|
||||
* cluster_name = the name of the inventory directory created above as $CLUSTER
|
||||
* packet_project_id = the Packet Project ID associated with the Packet API token above
|
||||
- cluster_name = the name of the inventory directory created above as $CLUSTER
|
||||
- packet_project_id = the Packet Project ID associated with the Packet API token above
|
||||
|
||||
#### Enable localhost access
|
||||
Kubespray will pull down a Kubernetes configuration file to access this cluster by enabling the
|
||||
|
||||
Kubespray will pull down a Kubernetes configuration file to access this cluster by enabling the
|
||||
`kubeconfig_localhost: true` in the Kubespray configuration.
|
||||
|
||||
Edit `inventory/$CLUSTER/group_vars/k8s-cluster/k8s-cluster.yml` and comment back in the following line and change from `false` to `true`:
|
||||
Edit `inventory/$CLUSTER/group_vars/k8s_cluster/k8s_cluster.yml` and comment back in the following line and change from `false` to `true`:
|
||||
`\# kubeconfig_localhost: false`
|
||||
becomes:
|
||||
`kubeconfig_localhost: true`
|
||||
@ -115,10 +121,10 @@ In the cluster's inventory folder, the following files might be created (either
|
||||
or manually), to prevent you from pushing them accidentally they are in a
|
||||
`.gitignore` file in the `terraform/packet` directory :
|
||||
|
||||
* `.terraform`
|
||||
* `.tfvars`
|
||||
* `.tfstate`
|
||||
* `.tfstate.backup`
|
||||
- `.terraform`
|
||||
- `.tfvars`
|
||||
- `.tfstate`
|
||||
- `.tfstate.backup`
|
||||
|
||||
You can still add them manually if you want to.
|
||||
|
||||
@ -128,34 +134,38 @@ Before Terraform can operate on your cluster you need to install the required
|
||||
plugins. This is accomplished as follows:
|
||||
|
||||
```ShellSession
|
||||
$ cd inventory/$CLUSTER
|
||||
$ terraform init ../../contrib/terraform/packet
|
||||
cd inventory/$CLUSTER
|
||||
terraform init ../../contrib/terraform/packet
|
||||
```
|
||||
|
||||
This should finish fairly quickly telling you Terraform has successfully initialized and loaded necessary modules.
|
||||
|
||||
### Provisioning cluster
|
||||
|
||||
You can apply the Terraform configuration to your cluster with the following command
|
||||
issued from your cluster's inventory directory (`inventory/$CLUSTER`):
|
||||
|
||||
```ShellSession
|
||||
$ terraform apply -var-file=cluster.tfvars ../../contrib/terraform/packet
|
||||
$ export ANSIBLE_HOST_KEY_CHECKING=False
|
||||
$ ansible-playbook -i hosts ../../cluster.yml
|
||||
terraform apply -var-file=cluster.tfvars ../../contrib/terraform/packet
|
||||
export ANSIBLE_HOST_KEY_CHECKING=False
|
||||
ansible-playbook -i hosts ../../cluster.yml
|
||||
```
|
||||
|
||||
### Destroying cluster
|
||||
|
||||
You can destroy your new cluster with the following command issued from the cluster's inventory directory:
|
||||
|
||||
```ShellSession
|
||||
$ terraform destroy -var-file=cluster.tfvars ../../contrib/terraform/packet
|
||||
terraform destroy -var-file=cluster.tfvars ../../contrib/terraform/packet
|
||||
```
|
||||
|
||||
If you've started the Ansible run, it may also be a good idea to do some manual cleanup:
|
||||
|
||||
* remove SSH keys from the destroyed cluster from your `~/.ssh/known_hosts` file
|
||||
* clean up any temporary cache files: `rm /tmp/$CLUSTER-*`
|
||||
- Remove SSH keys from the destroyed cluster from your `~/.ssh/known_hosts` file
|
||||
- Clean up any temporary cache files: `rm /tmp/$CLUSTER-*`
|
||||
|
||||
### Debugging
|
||||
|
||||
You can enable debugging output from Terraform by setting `TF_LOG` to `DEBUG` before running the Terraform command.
|
||||
|
||||
## Ansible
|
||||
@ -167,9 +177,9 @@ You can enable debugging output from Terraform by setting `TF_LOG` to `DEBUG` be
|
||||
Ensure your local ssh-agent is running and your ssh key has been added. This
|
||||
step is required by the terraform provisioner:
|
||||
|
||||
```
|
||||
$ eval $(ssh-agent -s)
|
||||
$ ssh-add ~/.ssh/id_rsa
|
||||
```ShellSession
|
||||
eval $(ssh-agent -s)
|
||||
ssh-add ~/.ssh/id_rsa
|
||||
```
|
||||
|
||||
If you have deployed and destroyed a previous iteration of your cluster, you will need to clear out any stale keys from your SSH "known hosts" file ( `~/.ssh/known_hosts`).
|
||||
@ -178,7 +188,7 @@ If you have deployed and destroyed a previous iteration of your cluster, you wil
|
||||
|
||||
Make sure you can connect to the hosts. Note that Flatcar Container Linux by Kinvolk will have a state `FAILED` due to Python not being present. This is okay, because Python will be installed during bootstrapping, so long as the hosts are not `UNREACHABLE`.
|
||||
|
||||
```
|
||||
```ShellSession
|
||||
$ ansible -i inventory/$CLUSTER/hosts -m ping all
|
||||
example-k8s_node-1 | SUCCESS => {
|
||||
"changed": false,
|
||||
@ -198,8 +208,8 @@ If it fails try to connect manually via SSH. It could be something as simple as
|
||||
|
||||
### Deploy Kubernetes
|
||||
|
||||
```
|
||||
$ ansible-playbook --become -i inventory/$CLUSTER/hosts cluster.yml
|
||||
```ShellSession
|
||||
ansible-playbook --become -i inventory/$CLUSTER/hosts cluster.yml
|
||||
```
|
||||
|
||||
This will take some time as there are many tasks to run.
|
||||
@ -208,20 +218,22 @@ This will take some time as there are many tasks to run.
|
||||
|
||||
### Set up kubectl
|
||||
|
||||
* [Install kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) on the localhost.
|
||||
- [Install kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) on the localhost.
|
||||
- Verify that Kubectl runs correctly
|
||||
|
||||
* Verify that Kubectl runs correctly
|
||||
```
|
||||
```ShellSession
|
||||
kubectl version
|
||||
```
|
||||
|
||||
* Verify that the Kubernetes configuration file has been copied over
|
||||
```
|
||||
- Verify that the Kubernetes configuration file has been copied over
|
||||
|
||||
```ShellSession
|
||||
cat inventory/alpha/$CLUSTER/admin.conf
|
||||
```
|
||||
|
||||
* Verify that all the nodes are running correctly.
|
||||
```
|
||||
- Verify that all the nodes are running correctly.
|
||||
|
||||
```ShellSession
|
||||
kubectl version
|
||||
kubectl --kubeconfig=inventory/$CLUSTER/artifacts/admin.conf get nodes
|
||||
```
|
||||
|
@ -19,7 +19,7 @@ resource "packet_device" "k8s_master" {
|
||||
operating_system = var.operating_system
|
||||
billing_cycle = var.billing_cycle
|
||||
project_id = var.packet_project_id
|
||||
tags = ["cluster-${var.cluster_name}", "k8s-cluster", "kube-master", "etcd", "kube-node"]
|
||||
tags = ["cluster-${var.cluster_name}", "k8s_cluster", "kube_control_plane", "etcd", "kube_node"]
|
||||
}
|
||||
|
||||
resource "packet_device" "k8s_master_no_etcd" {
|
||||
@ -32,7 +32,7 @@ resource "packet_device" "k8s_master_no_etcd" {
|
||||
operating_system = var.operating_system
|
||||
billing_cycle = var.billing_cycle
|
||||
project_id = var.packet_project_id
|
||||
tags = ["cluster-${var.cluster_name}", "k8s-cluster", "kube-master"]
|
||||
tags = ["cluster-${var.cluster_name}", "k8s_cluster", "kube_control_plane"]
|
||||
}
|
||||
|
||||
resource "packet_device" "k8s_etcd" {
|
||||
@ -58,6 +58,6 @@ resource "packet_device" "k8s_node" {
|
||||
operating_system = var.operating_system
|
||||
billing_cycle = var.billing_cycle
|
||||
project_id = var.packet_project_id
|
||||
tags = ["cluster-${var.cluster_name}", "k8s-cluster", "kube-node"]
|
||||
tags = ["cluster-${var.cluster_name}", "k8s_cluster", "kube_node"]
|
||||
}
|
||||
|
||||
|
@ -1,4 +1,9 @@
|
||||
|
||||
terraform {
|
||||
required_version = ">= 0.12"
|
||||
required_providers {
|
||||
packet = {
|
||||
source = "terraform-providers/packet"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -323,7 +323,7 @@ def openstack_host(resource, module_name):
|
||||
})
|
||||
|
||||
# add groups based on attrs
|
||||
groups.append('os_image=' + attrs['image']['name'])
|
||||
groups.append('os_image=' + attrs['image']['id'])
|
||||
groups.append('os_flavor=' + attrs['flavor']['name'])
|
||||
groups.extend('os_metadata_%s=%s' % item
|
||||
for item in list(attrs['metadata'].items()))
|
||||
|
106
contrib/terraform/upcloud/README.md
Normal file
106
contrib/terraform/upcloud/README.md
Normal file
@ -0,0 +1,106 @@
|
||||
# Kubernetes on UpCloud with Terraform
|
||||
|
||||
Provision a Kubernetes cluster on [UpCloud](https://upcloud.com/) using Terraform and Kubespray
|
||||
|
||||
## Overview
|
||||
|
||||
The setup looks like following
|
||||
|
||||
```text
|
||||
Kubernetes cluster
|
||||
+-----------------------+
|
||||
| +--------------+ |
|
||||
| | +--------------+ |
|
||||
| | | | |
|
||||
| | | Master/etcd | |
|
||||
| | | node(s) | |
|
||||
| +-+ | |
|
||||
| +--------------+ |
|
||||
| ^ |
|
||||
| | |
|
||||
| v |
|
||||
| +--------------+ |
|
||||
| | +--------------+ |
|
||||
| | | | |
|
||||
| | | Worker | |
|
||||
| | | node(s) | |
|
||||
| +-+ | |
|
||||
| +--------------+ |
|
||||
+-----------------------+
|
||||
```
|
||||
|
||||
## Requirements
|
||||
|
||||
* Terraform 0.13.0 or newer
|
||||
|
||||
## Quickstart
|
||||
|
||||
NOTE: Assumes you are at the root of the kubespray repo.
|
||||
|
||||
For authentication in your cluster you can use the environment variables.
|
||||
|
||||
```bash
|
||||
export TF_VAR_UPCLOUD_USERNAME=username
|
||||
export TF_VAR_UPCLOUD_PASSWORD=password
|
||||
```
|
||||
|
||||
To allow API access to your UpCloud account, you need to allow API connections by visiting [Account-page](https://hub.upcloud.com/account) in your UpCloud Hub.
|
||||
|
||||
Copy the cluster configuration file.
|
||||
|
||||
```bash
|
||||
CLUSTER=my-upcloud-cluster
|
||||
cp -r inventory/sample inventory/$CLUSTER
|
||||
cp contrib/terraform/upcloud/cluster-settings.tfvars inventory/$CLUSTER/
|
||||
export ANSIBLE_CONFIG=ansible.cfg
|
||||
cd inventory/$CLUSTER
|
||||
```
|
||||
|
||||
Edit `cluster-settings.tfvars` to match your requirement.
|
||||
|
||||
Run Terraform to create the infrastructure.
|
||||
|
||||
```bash
|
||||
terraform init ../../contrib/terraform/upcloud
|
||||
terraform apply --var-file cluster-settings.tfvars \
|
||||
-state=tfstate-$CLUSTER.tfstate \
|
||||
../../contrib/terraform/upcloud/
|
||||
```
|
||||
|
||||
You should now have a inventory file named `inventory.ini` that you can use with kubespray.
|
||||
You can use the inventory file with kubespray to set up a cluster.
|
||||
|
||||
It is a good idea to check that you have basic SSH connectivity to the nodes. You can do that by:
|
||||
|
||||
```bash
|
||||
ansible -i inventory.ini -m ping all
|
||||
```
|
||||
|
||||
You can setup Kubernetes with kubespray using the generated inventory:
|
||||
|
||||
```bash
|
||||
ansible-playbook -i inventory.ini ../../cluster.yml -b -v
|
||||
```
|
||||
|
||||
## Teardown
|
||||
|
||||
You can teardown your infrastructure using the following Terraform command:
|
||||
|
||||
```bash
|
||||
terraform destroy --var-file cluster-settings.tfvars \
|
||||
-state=tfstate-$CLUSTER.tfstate \
|
||||
../../contrib/terraform/upcloud/
|
||||
```
|
||||
|
||||
## Variables
|
||||
|
||||
* `hostname`: A valid domain name, e.g. example.com. The maximum length is 128 characters.
|
||||
* `template_name`: The name or UUID of a base image
|
||||
* `username`: a user to access the nodes
|
||||
* `ssh_public_keys`: List of public SSH keys to install on all machines
|
||||
* `zone`: The zone where to run the cluster
|
||||
* `machines`: Machines to provision. Key of this object will be used as the name of the machine
|
||||
* `node_type`: The role of this node *(master|worker)*
|
||||
* `cpu`: number of cpu cores
|
||||
* `mem`: memory size in MB
|
||||
* `disk_size`: The size of the storage in GB
|
58
contrib/terraform/upcloud/cluster-settings.tfvars
Normal file
58
contrib/terraform/upcloud/cluster-settings.tfvars
Normal file
@ -0,0 +1,58 @@
|
||||
|
||||
# See: https://developers.upcloud.com/1.3/5-zones/
|
||||
zone = "fi-hel1"
|
||||
username = "ubuntu"
|
||||
|
||||
inventory_file = "inventory.ini"
|
||||
|
||||
# A valid domain name, e.g. host.example.com. The maximum length is 128 characters.
|
||||
hostname = "example.com"
|
||||
|
||||
# Set the operating system using UUID or exact name
|
||||
template_name = "Ubuntu Server 20.04 LTS (Focal Fossa)"
|
||||
|
||||
ssh_public_keys = [
|
||||
# Put your public SSH key here
|
||||
"ssh-rsa public key 1",
|
||||
"ssh-rsa public key 2",
|
||||
]
|
||||
|
||||
#check list of available plan https://developers.upcloud.com/1.3/7-plans/
|
||||
machines = {
|
||||
"master-0" : {
|
||||
"node_type" : "master",
|
||||
#number of cpu cores
|
||||
"cpu" : "2",
|
||||
#memory size in MB
|
||||
"mem" : "4096"
|
||||
# The size of the storage in GB
|
||||
"disk_size" : 250
|
||||
},
|
||||
"worker-0" : {
|
||||
"node_type" : "worker",
|
||||
#number of cpu cores
|
||||
"cpu" : "2",
|
||||
#memory size in MB
|
||||
"mem" : "4096"
|
||||
# The size of the storage in GB
|
||||
"disk_size" : 250
|
||||
},
|
||||
"worker-1" : {
|
||||
"node_type" : "worker",
|
||||
#number of cpu cores
|
||||
"cpu" : "2",
|
||||
#memory size in MB
|
||||
"mem" : "4096"
|
||||
# The size of the storage in GB
|
||||
"disk_size" : 250
|
||||
},
|
||||
"worker-2" : {
|
||||
"node_type" : "worker",
|
||||
#number of cpu cores
|
||||
"cpu" : "2",
|
||||
#memory size in MB
|
||||
"mem" : "4096"
|
||||
# The size of the storage in GB
|
||||
"disk_size" : 250
|
||||
}
|
||||
}
|
55
contrib/terraform/upcloud/main.tf
Normal file
55
contrib/terraform/upcloud/main.tf
Normal file
@ -0,0 +1,55 @@
|
||||
|
||||
terraform {
|
||||
required_version = ">= 0.13.0"
|
||||
}
|
||||
provider "upcloud" {
|
||||
# Your UpCloud credentials are read from environment variables:
|
||||
username = var.UPCLOUD_USERNAME
|
||||
password = var.UPCLOUD_PASSWORD
|
||||
}
|
||||
|
||||
module "kubernetes" {
|
||||
source = "./modules/kubernetes-cluster"
|
||||
|
||||
zone = var.zone
|
||||
hostname = var.hostname
|
||||
|
||||
template_name = var.template_name
|
||||
username = var.username
|
||||
|
||||
machines = var.machines
|
||||
|
||||
ssh_public_keys = var.ssh_public_keys
|
||||
}
|
||||
|
||||
#
|
||||
# Generate ansible inventory
|
||||
#
|
||||
|
||||
data "template_file" "inventory" {
|
||||
template = file("${path.module}/templates/inventory.tpl")
|
||||
|
||||
vars = {
|
||||
connection_strings_master = join("\n", formatlist("%s ansible_user=ubuntu ansible_host=%s etcd_member_name=etcd%d",
|
||||
keys(module.kubernetes.master_ip),
|
||||
values(module.kubernetes.master_ip),
|
||||
range(1, length(module.kubernetes.master_ip) + 1)))
|
||||
connection_strings_worker = join("\n", formatlist("%s ansible_user=ubuntu ansible_host=%s",
|
||||
keys(module.kubernetes.worker_ip),
|
||||
values(module.kubernetes.worker_ip)))
|
||||
list_master = join("\n", formatlist("%s",
|
||||
keys(module.kubernetes.master_ip)))
|
||||
list_worker = join("\n", formatlist("%s",
|
||||
keys(module.kubernetes.worker_ip)))
|
||||
}
|
||||
}
|
||||
|
||||
resource "null_resource" "inventories" {
|
||||
provisioner "local-exec" {
|
||||
command = "echo '${data.template_file.inventory.rendered}' > ${var.inventory_file}"
|
||||
}
|
||||
|
||||
triggers = {
|
||||
template = data.template_file.inventory.rendered
|
||||
}
|
||||
}
|
66
contrib/terraform/upcloud/modules/kubernetes-cluster/main.tf
Normal file
66
contrib/terraform/upcloud/modules/kubernetes-cluster/main.tf
Normal file
@ -0,0 +1,66 @@
|
||||
|
||||
resource "upcloud_server" "master" {
|
||||
for_each = {
|
||||
for name, machine in var.machines :
|
||||
name => machine
|
||||
if machine.node_type == "master"
|
||||
}
|
||||
|
||||
hostname = "${each.key}.${var.hostname}"
|
||||
cpu = each.value.cpu
|
||||
mem = each.value.mem
|
||||
zone = var.zone
|
||||
|
||||
template {
|
||||
storage = var.template_name
|
||||
size = each.value.disk_size
|
||||
}
|
||||
|
||||
# Network interfaces
|
||||
network_interface {
|
||||
type = "public"
|
||||
}
|
||||
|
||||
network_interface {
|
||||
type = "utility"
|
||||
}
|
||||
# Include at least one public SSH key
|
||||
login {
|
||||
user = var.username
|
||||
keys = var.ssh_public_keys
|
||||
create_password = false
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
resource "upcloud_server" "worker" {
|
||||
for_each = {
|
||||
for name, machine in var.machines :
|
||||
name => machine
|
||||
if machine.node_type == "worker"
|
||||
}
|
||||
|
||||
hostname = "${each.key}.${var.hostname}"
|
||||
cpu = each.value.cpu
|
||||
mem = each.value.mem
|
||||
zone = var.zone
|
||||
|
||||
template {
|
||||
storage = var.template_name
|
||||
size = each.value.disk_size
|
||||
}
|
||||
|
||||
# Network interfaces
|
||||
network_interface {
|
||||
type = "public"
|
||||
}
|
||||
|
||||
# Include at least one public SSH key
|
||||
login {
|
||||
user = var.username
|
||||
keys = var.ssh_public_keys
|
||||
create_password = false
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user