Compare commits
510 Commits
Author | SHA1 | Date | |
---|---|---|---|
453dbcef1d | |||
4a6600002f | |||
6eb313584e | |||
a270632466 | |||
00550ba832 | |||
b4951da405 | |||
cd93d10688 | |||
e6940d8a7b | |||
dca5cde493 | |||
1f65e6d3b5 | |||
9bf7aaf6cd | |||
5512465b34 | |||
2f30ab558a | |||
5c136ae3af | |||
c927da00e0 | |||
1600fd9082 | |||
14acd124bc | |||
e3cbbfb9ed | |||
5f21e0b58b | |||
d22204a59f | |||
90289b8502 | |||
78aacee21b | |||
f47aca3558 | |||
73fc70dbe8 | |||
dc2a18e436 | |||
82590eb087 | |||
4c97ce747c | |||
ebbc5ed0ce | |||
dc1af5a9c5 | |||
85bd1eea27 | |||
2b151c6aa2 | |||
93fe3e06ef | |||
9d3a894991 | |||
0e6b727e53 | |||
e42a01f203 | |||
a28b58dbd0 | |||
a26a9ee14f | |||
c09fcd4f92 | |||
593359ec77 | |||
34ec4d5d40 | |||
3d8f3bc0b7 | |||
eea7bb7692 | |||
3a89e31dee | |||
0c504e4984 | |||
0bf070c33b | |||
dc8ad78206 | |||
48e938660d | |||
632d457f78 | |||
569a319ff5 | |||
47812ec002 | |||
c27dee57ea | |||
b289f533b3 | |||
3eb0a4071a | |||
5684610a55 | |||
f26f544ff6 | |||
b9e5b0cb53 | |||
13443b05a6 | |||
e70c00a0fe | |||
bb67b654c5 | |||
aef25819bc | |||
1d96f465f4 | |||
8f618ab408 | |||
5296d7ef9c | |||
b715500b48 | |||
37a5271f5a | |||
42fc71fafa | |||
02b6e4833a | |||
323a111362 | |||
e7df4d3dd9 | |||
3e52a0db95 | |||
94484873d1 | |||
0d6ea85167 | |||
674ec92224 | |||
e7e5037a86 | |||
fbcf426240 | |||
2301554e98 | |||
5bc35002ba | |||
9143810a4d | |||
8f118fb619 | |||
1113460b68 | |||
74c7e009b7 | |||
c20ab7d987 | |||
fe66121287 | |||
9605bbaa67 | |||
b7ce6a9f79 | |||
c04a73c11a | |||
f184725c5f | |||
26a0b0f1e8 | |||
fa1d222eee | |||
56cf163a23 | |||
afcedf6d77 | |||
21fc197ee0 | |||
fcb4c8fb61 | |||
b6e2c56ae6 | |||
b005985d4e | |||
1294fd5730 | |||
835fd86a08 | |||
b7004d72c5 | |||
eb566ca626 | |||
aa12f1c56b | |||
6cc5b38a2e | |||
e6c4330e4e | |||
1e827f9807 | |||
a4f26dc8f3 | |||
3f065918d9 | |||
2c2d4513ac | |||
937e64d296 | |||
3261d26181 | |||
c98a0a448f | |||
7e7218f5ce | |||
45262da726 | |||
aef5f1e139 | |||
3d4baea01c | |||
30306d6ec7 | |||
d7254eead6 | |||
9dced7133c | |||
c2fb1a0747 | |||
00a4d2d3c4 | |||
424ef3b3f9 | |||
996ef98b87 | |||
19d5a1c7c3 | |||
0481dd946f | |||
29109575f5 | |||
3782573ede | |||
bba91a7524 | |||
b67cadf743 | |||
56dda4392c | |||
34fec09ff1 | |||
cefd1339fc | |||
b915376194 | |||
455cc6ff75 | |||
cc9c376d0f | |||
018611f829 | |||
1781eab21f | |||
78b05d0ffc | |||
1c0df78278 | |||
6cc9da6b0a | |||
6af9cae0a5 | |||
ef29455652 | |||
503ab0f722 | |||
90883e76af | |||
113de8381c | |||
652f2edbe1 | |||
a67e36703f | |||
73c6943402 | |||
d46817d690 | |||
97cb64c62d | |||
3f70241fb7 | |||
21b71b38a3 | |||
b2f9442aba | |||
fa9f85c7e9 | |||
ffa285c2e7 | |||
7b1dc600d5 | |||
5e67ebeb9e | |||
af7066d33c | |||
dd2d95ecdf | |||
a86d9bd8e8 | |||
21b1516d80 | |||
4c15038194 | |||
538f9df5cc | |||
efb0412b63 | |||
5a486a5cca | |||
394857b5ce | |||
5043517cfb | |||
307d122a84 | |||
d444a2fb83 | |||
fb7c56e3d3 | |||
2b79be68e7 | |||
512d5e3348 | |||
4b6892ece9 | |||
5a49ac52f9 | |||
db1e30e4fc | |||
b4a61370c8 | |||
58b2f39ce5 | |||
56d882abed | |||
39acb2b84d | |||
3ccba08983 | |||
632aa764e6 | |||
f6342b6cf4 | |||
471585dcd5 | |||
51821a811f | |||
299a9ae7ba | |||
bf7a506f79 | |||
2e925f82ef | |||
ddef7e1139 | |||
672e47a7eb | |||
3e8e64a3e5 | |||
b554246502 | |||
6d683c98a3 | |||
ee079f4740 | |||
a090038d02 | |||
4f1499bd23 | |||
36393d77d3 | |||
e053ee4272 | |||
1d46c07307 | |||
f9b5e448c1 | |||
3effb008c9 | |||
a088f492f4 | |||
e9c8913248 | |||
b9a27c91da | |||
d4f654275b | |||
f6eb4c749d | |||
418fc00718 | |||
2537177929 | |||
9af719bf99 | |||
9e020b252e | |||
cc45e365ae | |||
97c667f67c | |||
063fc525b1 | |||
0f73d87509 | |||
402e85ad6e | |||
1d635e04e4 | |||
98d5d0cdd5 | |||
31d4a38f09 | |||
1ebe456f2d | |||
c6e5314fab | |||
a6a79883b7 | |||
b02e68222f | |||
da8522af64 | |||
84b93090a8 | |||
5695c892d0 | |||
696101a910 | |||
54dfe73d24 | |||
87928baa31 | |||
6a4fd33a03 | |||
790448f48b | |||
7759494c85 | |||
aed187e56c | |||
eac799f589 | |||
5ecb07b59a | |||
ff621fb7f1 | |||
958bca8800 | |||
eacd55fbca | |||
0e2ab5c273 | |||
c47634290e | |||
92d612c3e0 | |||
2bbe5732b7 | |||
e6e7fbc25f | |||
7d4d554436 | |||
d31db847b7 | |||
3562d3378b | |||
ababcd5481 | |||
7caffde0b6 | |||
c40b43de01 | |||
b0eb5650da | |||
52f221f976 | |||
26a5948d2a | |||
d86a3b962c | |||
d64b341b38 | |||
d580014c66 | |||
be9a1f80c1 | |||
73ff3b0d3b | |||
9fce9ca42a | |||
f1adb734e3 | |||
575e0ca457 | |||
69f088bb82 | |||
ef34f5fe7d | |||
e88aa7c96b | |||
38d129a0b6 | |||
392815d97c | |||
6e2e61012a | |||
e791089466 | |||
418f12f62a | |||
caff539ccd | |||
c0d1bb1a5c | |||
ea44d64511 | |||
1a69f8c3ad | |||
ccd3180a69 | |||
01dcbc18ac | |||
7c67ec4976 | |||
43d128362f | |||
1337c9c244 | |||
86953b2ac4 | |||
135c9b29a7 | |||
e0d67367ed | |||
d007132655 | |||
cfd9873bbc | |||
b2b95cc8f9 | |||
73c889eb10 | |||
642725efe7 | |||
29aafff2ce | |||
df425ac143 | |||
57a1d18db3 | |||
aa4a3d7afd | |||
06ad5525b8 | |||
f80fd24a55 | |||
51bd9bee0d | |||
52266406f8 | |||
cd601c77c7 | |||
6abae713f7 | |||
1312f92a8d | |||
92abf26d29 | |||
c11e4ba9a7 | |||
7ae00947f5 | |||
59f62473c9 | |||
8fbd08d027 | |||
dda557ed23 | |||
cb54eb40ce | |||
3eab1129b9 | |||
24f1402a14 | |||
bf00550388 | |||
78c83a8f26 | |||
e72f8e0412 | |||
6136fa7c49 | |||
8d2b4ed4a9 | |||
9e9b177674 | |||
4c4c83f0a1 | |||
0e98814732 | |||
92f25bf267 | |||
63a53c79d0 | |||
2f9a8c04dc | |||
8c67f42689 | |||
783a51e9ac | |||
841c61aaa1 | |||
157942a462 | |||
e88a27790c | |||
ed3932b7d5 | |||
2b5c185826 | |||
996ecca78b | |||
c3c128352f | |||
02a89543d6 | |||
c1954ff918 | |||
b49ae8c21d | |||
1a7b4435f3 | |||
ff5ca5f7f8 | |||
db0e458217 | |||
f01f7c54aa | |||
c59407f105 | |||
fdc5d7458f | |||
6aafb9b2d4 | |||
aa9ad1ed60 | |||
aa9b8453a0 | |||
4daa824b3c | |||
4f2e4524b8 | |||
8ac510e4d6 | |||
4f27c763af | |||
0e969c0b72 | |||
b396801e28 | |||
682c8a59c2 | |||
5a25de37ef | |||
bdb923df4a | |||
4ef2cf4c28 | |||
990ca38d21 | |||
c7e430573f | |||
a328b64464 | |||
a16d427536 | |||
c98a07825b | |||
a98ca6fcf3 | |||
4550f8c50f | |||
9afca43807 | |||
27ab364df5 | |||
615216f397 | |||
46b1b7ab34 | |||
30d9882851 | |||
dfdebda0b6 | |||
9d8a83314b | |||
e19ce27352 | |||
4d711691d0 | |||
ee0f1e9d58 | |||
a24162f596 | |||
e82443241b | |||
9f052702e5 | |||
b38382a68f | |||
785324827c | |||
31c7b6747b | |||
dc767c14b9 | |||
30ec03259d | |||
38c12288f1 | |||
0e22a90579 | |||
0cdf75d41a | |||
3c6fa6e583 | |||
ee882fa462 | |||
3431ed9857 | |||
279808b44e | |||
2fd529a993 | |||
1f6f79c91e | |||
52ee5d0fff | |||
2f44b40d68 | |||
20157254c3 | |||
09c17ba581 | |||
a5f88e14d0 | |||
e78bda65fe | |||
3ea496013f | |||
7e1873d927 | |||
fe0810aff9 | |||
e35a87e3eb | |||
a6fcf2e066 | |||
25316825b1 | |||
c74e1c9db3 | |||
be9de6b9d9 | |||
fe8c843cc8 | |||
f48ae18630 | |||
83e0b786d4 | |||
acd5185ad4 | |||
0263c649f4 | |||
8176e9155b | |||
424163c7d3 | |||
2c87170ccf | |||
02322c46de | |||
28b5281c45 | |||
4d79a55904 | |||
027cbefb87 | |||
a08d82d94e | |||
5f1456337b | |||
6eeb4883af | |||
b5a5478a8a | |||
0d0468e127 | |||
b7ae4a2cfd | |||
039205560a | |||
801268d5c1 | |||
46c536d261 | |||
4a8757161e | |||
65540c5771 | |||
6c1ab24981 | |||
61c2ae5549 | |||
04711d3b00 | |||
cb7c30a4f1 | |||
8922c45556 | |||
58390c79d0 | |||
b7eb1cf936 | |||
6e5b9e0ebf | |||
c94291558d | |||
8d553f7e91 | |||
a0be7f0e26 | |||
1c3d082b8d | |||
2ed211ba15 | |||
1161326b54 | |||
d473a6d442 | |||
8d82033bff | |||
9d4cdb7b02 | |||
b353e062c7 | |||
d8f9b9b61f | |||
0b441ade2c | |||
6f6fad5a16 | |||
465ffa3c9f | |||
539c9e0d99 | |||
649f962ac6 | |||
16bdb3fe51 | |||
7c3369e1b9 | |||
9eacde212f | |||
331647f4ab | |||
c2d4822c38 | |||
3c30be1320 | |||
d8d01bf5aa | |||
d42b7228c2 | |||
4db057e9c2 | |||
ea8e2fc651 | |||
10c30ea5b1 | |||
84b56d23a4 | |||
19d07a4f2e | |||
6a5b87dda4 | |||
6aac59394e | |||
f147163b24 | |||
16bf3549c1 | |||
b912dafd7a | |||
8b3481f511 | |||
7019c2685d | |||
d18cc38586 | |||
cee481f63d | |||
e4c8c7188e | |||
6c004efd5f | |||
1a57780a75 | |||
ce25e4aa21 | |||
ef4044b62f | |||
9ffe5940fe | |||
c8d9afce1a | |||
285983a555 | |||
ab4356aa69 | |||
e87d4e9ce3 | |||
5fcf047191 | |||
c68fb81aa7 | |||
e707f78899 | |||
41e0ca3f85 | |||
c5c10067ed | |||
43958614e3 | |||
af04906b51 | |||
c7e17688b9 | |||
ac76840c5d | |||
f5885d05ea | |||
af949cd967 | |||
eee2eb11d8 | |||
8d3961edbe | |||
4c5328fd1f | |||
1472528f6d | |||
9416c9aa86 | |||
da92c7e215 | |||
d27cf375af | |||
432a312a35 | |||
3a6230af6b | |||
ecd267854b | |||
ac846667b7 | |||
33146b9481 | |||
4bace2491d | |||
469b3ec525 | |||
22017b7ff0 | |||
88c11b5946 | |||
843252c968 | |||
ddea79f0f0 | |||
c0e1211abe | |||
c8d7f000c9 | |||
598f178054 | |||
6f8b24f367 | |||
5d1b34bdcd | |||
8efde799e1 | |||
96b61a5f53 | |||
a517a8db01 | |||
2211504790 | |||
fb8662ec19 | |||
6f7911264f | |||
ae44aff330 |
9
.gitignore
vendored
9
.gitignore
vendored
@ -99,3 +99,12 @@ target/
|
||||
# virtualenv
|
||||
venv/
|
||||
ENV/
|
||||
|
||||
# molecule
|
||||
roles/**/molecule/**/__pycache__/
|
||||
|
||||
# macOS
|
||||
.DS_Store
|
||||
|
||||
# Temp location used by our scripts
|
||||
scripts/tmp/
|
||||
|
@ -8,7 +8,7 @@ stages:
|
||||
- deploy-special
|
||||
|
||||
variables:
|
||||
KUBESPRAY_VERSION: v2.16.0
|
||||
KUBESPRAY_VERSION: v2.18.1
|
||||
FAILFASTCI_NAMESPACE: 'kargo-ci'
|
||||
GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray'
|
||||
ANSIBLE_FORCE_COLOR: "true"
|
||||
@ -16,6 +16,7 @@ variables:
|
||||
TEST_ID: "$CI_PIPELINE_ID-$CI_BUILD_ID"
|
||||
CI_TEST_VARS: "./tests/files/${CI_JOB_NAME}.yml"
|
||||
CI_TEST_REGISTRY_MIRROR: "./tests/common/_docker_hub_registry_mirror.yml"
|
||||
CI_TEST_SETTING: "./tests/common/_kubespray_test_settings.yml"
|
||||
GS_ACCESS_KEY_ID: $GS_KEY
|
||||
GS_SECRET_ACCESS_KEY: $GS_SECRET
|
||||
CONTAINER_ENGINE: docker
|
||||
@ -26,19 +27,20 @@ variables:
|
||||
ANSIBLE_INVENTORY: ./inventory/sample/${CI_JOB_NAME}-${BUILD_NUMBER}.ini
|
||||
IDEMPOT_CHECK: "false"
|
||||
RESET_CHECK: "false"
|
||||
REMOVE_NODE_CHECK: "false"
|
||||
UPGRADE_TEST: "false"
|
||||
MITOGEN_ENABLE: "false"
|
||||
ANSIBLE_LOG_LEVEL: "-vv"
|
||||
RECOVER_CONTROL_PLANE_TEST: "false"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube_control_plane[1:]"
|
||||
TERRAFORM_14_VERSION: 0.14.11
|
||||
TERRAFORM_15_VERSION: 0.15.5
|
||||
TERRAFORM_VERSION: 1.0.8
|
||||
ANSIBLE_MAJOR_VERSION: "2.10"
|
||||
|
||||
before_script:
|
||||
- ./tests/scripts/rebase.sh
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
||||
- python -m pip uninstall -y ansible
|
||||
- python -m pip install -r tests/requirements.txt
|
||||
- python -m pip uninstall -y ansible ansible-base ansible-core
|
||||
- python -m pip install -r tests/requirements-${ANSIBLE_MAJOR_VERSION}.txt
|
||||
- mkdir -p /.ssh
|
||||
|
||||
.job: &job
|
||||
@ -79,3 +81,4 @@ include:
|
||||
- .gitlab-ci/terraform.yml
|
||||
- .gitlab-ci/packet.yml
|
||||
- .gitlab-ci/vagrant.yml
|
||||
- .gitlab-ci/molecule.yml
|
||||
|
@ -14,7 +14,7 @@ vagrant-validate:
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
variables:
|
||||
VAGRANT_VERSION: 2.2.15
|
||||
VAGRANT_VERSION: 2.2.19
|
||||
script:
|
||||
- ./tests/scripts/vagrant-validate.sh
|
||||
except: ['triggers', 'master']
|
||||
@ -23,9 +23,8 @@ ansible-lint:
|
||||
extends: .job
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
# lint every yml/yaml file that looks like it contains Ansible plays
|
||||
script: |-
|
||||
grep -Rl '^- hosts: \|^ hosts: ' --include \*.yml --include \*.yaml . | xargs -P 4 -n 25 ansible-lint -v
|
||||
script:
|
||||
- ansible-lint -v
|
||||
except: ['triggers', 'master']
|
||||
|
||||
syntax-check:
|
||||
@ -53,7 +52,7 @@ tox-inventory-builder:
|
||||
- ./tests/scripts/rebase.sh
|
||||
- apt-get update && apt-get install -y python3-pip
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
||||
- python -m pip uninstall -y ansible
|
||||
- python -m pip uninstall -y ansible ansible-base ansible-core
|
||||
- python -m pip install -r tests/requirements.txt
|
||||
script:
|
||||
- pip3 install tox
|
||||
|
93
.gitlab-ci/molecule.yml
Normal file
93
.gitlab-ci/molecule.yml
Normal file
@ -0,0 +1,93 @@
|
||||
---
|
||||
|
||||
.molecule:
|
||||
tags: [c3.small.x86]
|
||||
only: [/^pr-.*$/]
|
||||
except: ['triggers']
|
||||
image: quay.io/kubespray/vagrant:$KUBESPRAY_VERSION
|
||||
services: []
|
||||
stage: deploy-part1
|
||||
before_script:
|
||||
- tests/scripts/rebase.sh
|
||||
- apt-get update && apt-get install -y python3-pip
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
||||
- python -m pip uninstall -y ansible ansible-base ansible-core
|
||||
- python -m pip install -r tests/requirements.txt
|
||||
- ./tests/scripts/vagrant_clean.sh
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh
|
||||
after_script:
|
||||
- chronic ./tests/scripts/molecule_logs.sh
|
||||
artifacts:
|
||||
when: always
|
||||
paths:
|
||||
- molecule_logs/
|
||||
|
||||
# CI template for periodic CI jobs
|
||||
# Enabled when PERIODIC_CI_ENABLED var is set
|
||||
.molecule_periodic:
|
||||
only:
|
||||
variables:
|
||||
- $PERIODIC_CI_ENABLED
|
||||
allow_failure: true
|
||||
extends: .molecule
|
||||
|
||||
molecule_full:
|
||||
extends: .molecule_periodic
|
||||
|
||||
molecule_no_container_engines:
|
||||
extends: .molecule
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -e container-engine
|
||||
when: on_success
|
||||
|
||||
molecule_docker:
|
||||
extends: .molecule
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/docker
|
||||
when: on_success
|
||||
|
||||
molecule_containerd:
|
||||
extends: .molecule
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/containerd
|
||||
when: on_success
|
||||
|
||||
molecule_cri-o:
|
||||
extends: .molecule
|
||||
stage: deploy-part2
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/cri-o
|
||||
when: on_success
|
||||
|
||||
molecule_cri-dockerd:
|
||||
extends: .molecule
|
||||
stage: deploy-part2
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/cri-dockerd
|
||||
when: on_success
|
||||
|
||||
# Stage 3 container engines don't get as much attention so allow them to fail
|
||||
molecule_kata:
|
||||
extends: .molecule
|
||||
stage: deploy-part3
|
||||
allow_failure: true
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/kata-containers
|
||||
when: on_success
|
||||
|
||||
molecule_gvisor:
|
||||
extends: .molecule
|
||||
stage: deploy-part3
|
||||
allow_failure: true
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/gvisor
|
||||
when: on_success
|
||||
|
||||
molecule_youki:
|
||||
extends: .molecule
|
||||
stage: deploy-part3
|
||||
allow_failure: true
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/youki
|
||||
when: on_success
|
@ -2,6 +2,7 @@
|
||||
.packet:
|
||||
extends: .testcases
|
||||
variables:
|
||||
ANSIBLE_TIMEOUT: "120"
|
||||
CI_PLATFORM: packet
|
||||
SSH_USER: kubespray
|
||||
tags:
|
||||
@ -22,27 +23,62 @@
|
||||
allow_failure: true
|
||||
extends: .packet
|
||||
|
||||
packet_ubuntu18-calico-aio:
|
||||
stage: deploy-part1
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
# Future AIO job
|
||||
# The ubuntu20-calico-aio jobs are meant as early stages to prevent running the full CI if something is horribly broken
|
||||
packet_ubuntu20-calico-aio:
|
||||
stage: deploy-part1
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
variables:
|
||||
RESET_CHECK: "true"
|
||||
|
||||
# Exericse ansible variants during the nightly jobs
|
||||
packet_ubuntu20-calico-aio-ansible-2_9:
|
||||
stage: deploy-part1
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
ANSIBLE_MAJOR_VERSION: "2.9"
|
||||
RESET_CHECK: "true"
|
||||
|
||||
packet_ubuntu20-calico-aio-ansible-2_10:
|
||||
stage: deploy-part1
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
ANSIBLE_MAJOR_VERSION: "2.10"
|
||||
RESET_CHECK: "true"
|
||||
|
||||
packet_ubuntu20-calico-aio-ansible-2_11:
|
||||
stage: deploy-part1
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
ANSIBLE_MAJOR_VERSION: "2.11"
|
||||
RESET_CHECK: "true"
|
||||
|
||||
# ### PR JOBS PART2
|
||||
|
||||
packet_centos7-flannel-containerd-addons-ha:
|
||||
packet_ubuntu18-aio-docker:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu20-aio-docker:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu18-calico-aio:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_centos7-flannel-addons-ha:
|
||||
extends: .packet_pr
|
||||
stage: deploy-part2
|
||||
when: on_success
|
||||
variables:
|
||||
MITOGEN_ENABLE: "true"
|
||||
|
||||
packet_centos8-crio:
|
||||
packet_almalinux8-crio:
|
||||
extends: .packet_pr
|
||||
stage: deploy-part2
|
||||
when: on_success
|
||||
@ -51,10 +87,13 @@ packet_ubuntu18-crio:
|
||||
extends: .packet_pr
|
||||
stage: deploy-part2
|
||||
when: manual
|
||||
variables:
|
||||
MITOGEN_ENABLE: "true"
|
||||
|
||||
packet_ubuntu16-canal-kubeadm-ha:
|
||||
packet_fedora35-crio:
|
||||
extends: .packet_pr
|
||||
stage: deploy-part2
|
||||
when: manual
|
||||
|
||||
packet_ubuntu16-canal-ha:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
@ -69,33 +108,31 @@ packet_ubuntu16-flannel-ha:
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_ubuntu16-kube-router-sep:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_ubuntu16-kube-router-svc-proxy:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_debian10-cilium-svc-proxy:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
|
||||
packet_debian10-containerd:
|
||||
packet_debian10-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_debian10-docker:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
variables:
|
||||
MITOGEN_ENABLE: "true"
|
||||
|
||||
packet_debian11-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_debian11-docker:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_centos7-calico-ha-once-localhost:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
@ -106,17 +143,22 @@ packet_centos7-calico-ha-once-localhost:
|
||||
services:
|
||||
- docker:19.03.9-dind
|
||||
|
||||
packet_centos8-kube-ovn:
|
||||
packet_almalinux8-kube-ovn:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
|
||||
packet_centos8-calico:
|
||||
packet_almalinux8-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_fedora34-weave:
|
||||
packet_almalinux8-docker:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_fedora34-docker-weave:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
@ -126,14 +168,14 @@ packet_opensuse-canal:
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu18-ovn4nfv:
|
||||
packet_opensuse-docker-cilium:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
# ### MANUAL JOBS
|
||||
|
||||
packet_ubuntu16-weave-sep:
|
||||
packet_ubuntu16-docker-weave-sep:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
@ -143,12 +185,18 @@ packet_ubuntu18-cilium-sep:
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_ubuntu18-flannel-containerd-ha:
|
||||
packet_ubuntu18-flannel-ha:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_ubuntu18-flannel-containerd-ha-once:
|
||||
packet_ubuntu18-flannel-ha-once:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
# Calico HA eBPF
|
||||
packet_almalinux8-calico-ha-ebpf:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
@ -163,11 +211,6 @@ packet_centos7-calico-ha:
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_centos7-kube-router:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_centos7-multus-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
@ -178,22 +221,34 @@ packet_oracle7-canal-ha:
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_fedora33-calico:
|
||||
packet_fedora35-docker-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
RESET_CHECK: "true"
|
||||
|
||||
packet_fedora34-calico-selinux:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
|
||||
packet_fedora35-calico-swap-selinux:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_amazon-linux-2-aio:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_fedora34-kube-ovn-containerd:
|
||||
packet_almalinux8-calico-nodelocaldns-secondary:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_fedora34-kube-ovn:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
@ -201,37 +256,47 @@ packet_fedora34-kube-ovn-containerd:
|
||||
# ### PR JOBS PART3
|
||||
# Long jobs (45min+)
|
||||
|
||||
packet_centos7-weave-upgrade-ha:
|
||||
packet_centos7-docker-weave-upgrade-ha:
|
||||
stage: deploy-part3
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
UPGRADE_TEST: basic
|
||||
|
||||
packet_ubuntu20-calico-etcd-kubeadm-upgrade-ha:
|
||||
stage: deploy-part3
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
UPGRADE_TEST: basic
|
||||
MITOGEN_ENABLE: "false"
|
||||
|
||||
# Calico HA Wireguard
|
||||
packet_ubuntu20-calico-ha-wireguard:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
variables:
|
||||
MITOGEN_ENABLE: "true"
|
||||
|
||||
packet_debian9-calico-upgrade:
|
||||
packet_debian10-calico-upgrade:
|
||||
stage: deploy-part3
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
variables:
|
||||
UPGRADE_TEST: graceful
|
||||
MITOGEN_ENABLE: "false"
|
||||
|
||||
packet_debian9-calico-upgrade-once:
|
||||
packet_almalinux8-calico-remove-node:
|
||||
stage: deploy-part3
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
variables:
|
||||
REMOVE_NODE_CHECK: "true"
|
||||
REMOVE_NODE_NAME: "instance-3"
|
||||
|
||||
packet_debian10-calico-upgrade-once:
|
||||
stage: deploy-part3
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
UPGRADE_TEST: graceful
|
||||
MITOGEN_ENABLE: "false"
|
||||
|
||||
packet_ubuntu18-calico-ha-recover:
|
||||
stage: deploy-part3
|
||||
|
@ -53,92 +53,51 @@
|
||||
# Cleanup regardless of exit code
|
||||
- chronic ./tests/scripts/testcases_cleanup.sh
|
||||
|
||||
tf-0.15.x-validate-openstack:
|
||||
tf-validate-openstack:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_15_VERSION
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: openstack
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-0.15.x-validate-packet:
|
||||
tf-validate-metal:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_15_VERSION
|
||||
PROVIDER: packet
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: metal
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-0.15.x-validate-aws:
|
||||
tf-validate-aws:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_15_VERSION
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: aws
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-0.15.x-validate-exoscale:
|
||||
tf-validate-exoscale:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_15_VERSION
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: exoscale
|
||||
|
||||
tf-0.15.x-validate-vsphere:
|
||||
tf-validate-vsphere:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_15_VERSION
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: vsphere
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-0.15.x-validate-upcloud:
|
||||
tf-validate-upcloud:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_15_VERSION
|
||||
PROVIDER: upcloud
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-0.14.x-validate-openstack:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_14_VERSION
|
||||
PROVIDER: openstack
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-0.14.x-validate-packet:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_14_VERSION
|
||||
PROVIDER: packet
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-0.14.x-validate-aws:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_14_VERSION
|
||||
PROVIDER: aws
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-0.14.x-validate-exoscale:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_14_VERSION
|
||||
PROVIDER: exoscale
|
||||
|
||||
tf-0.14.x-validate-vsphere:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_14_VERSION
|
||||
PROVIDER: vsphere
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-0.14.x-validate-upcloud:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_14_VERSION
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: upcloud
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
# tf-packet-ubuntu16-default:
|
||||
# extends: .terraform_apply
|
||||
# variables:
|
||||
# TF_VERSION: $TERRAFORM_14_VERSION
|
||||
# TF_VERSION: $TERRAFORM_VERSION
|
||||
# PROVIDER: packet
|
||||
# CLUSTER: $CI_COMMIT_REF_NAME
|
||||
# TF_VAR_number_of_k8s_masters: "1"
|
||||
@ -152,7 +111,7 @@ tf-0.14.x-validate-upcloud:
|
||||
# tf-packet-ubuntu18-default:
|
||||
# extends: .terraform_apply
|
||||
# variables:
|
||||
# TF_VERSION: $TERRAFORM_14_VERSION
|
||||
# TF_VERSION: $TERRAFORM_VERSION
|
||||
# PROVIDER: packet
|
||||
# CLUSTER: $CI_COMMIT_REF_NAME
|
||||
# TF_VAR_number_of_k8s_masters: "1"
|
||||
@ -187,10 +146,6 @@ tf-0.14.x-validate-upcloud:
|
||||
OS_INTERFACE: public
|
||||
OS_IDENTITY_API_VERSION: "3"
|
||||
TF_VAR_router_id: "ab95917c-41fb-4881-b507-3a6dfe9403df"
|
||||
# Since ELASTX is in Stockholm, Mitogen helps with latency
|
||||
MITOGEN_ENABLE: "false"
|
||||
# Mitogen doesn't support interpreter discovery yet
|
||||
ANSIBLE_PYTHON_INTERPRETER: "/usr/bin/python3"
|
||||
|
||||
tf-elastx_cleanup:
|
||||
stage: unit-tests
|
||||
@ -210,7 +165,7 @@ tf-elastx_ubuntu18-calico:
|
||||
allow_failure: true
|
||||
variables:
|
||||
<<: *elastx_variables
|
||||
TF_VERSION: $TERRAFORM_15_VERSION
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: openstack
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
ANSIBLE_TIMEOUT: "60"
|
||||
@ -256,7 +211,7 @@ tf-elastx_ubuntu18-calico:
|
||||
# environment: ovh
|
||||
# variables:
|
||||
# <<: *ovh_variables
|
||||
# TF_VERSION: $TERRAFORM_14_VERSION
|
||||
# TF_VERSION: $TERRAFORM_VERSION
|
||||
# PROVIDER: openstack
|
||||
# CLUSTER: $CI_COMMIT_REF_NAME
|
||||
# ANSIBLE_TIMEOUT: "60"
|
||||
|
@ -1,22 +1,5 @@
|
||||
---
|
||||
|
||||
molecule_tests:
|
||||
tags: [c3.small.x86]
|
||||
only: [/^pr-.*$/]
|
||||
except: ['triggers']
|
||||
image: quay.io/kubespray/vagrant:$KUBESPRAY_VERSION
|
||||
services: []
|
||||
stage: deploy-part1
|
||||
before_script:
|
||||
- tests/scripts/rebase.sh
|
||||
- apt-get update && apt-get install -y python3-pip
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
||||
- python -m pip uninstall -y ansible
|
||||
- python -m pip install -r tests/requirements.txt
|
||||
- ./tests/scripts/vagrant_clean.sh
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh
|
||||
|
||||
.vagrant:
|
||||
extends: .testcases
|
||||
variables:
|
||||
@ -32,13 +15,14 @@ molecule_tests:
|
||||
before_script:
|
||||
- apt-get update && apt-get install -y python3-pip
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
||||
- python -m pip uninstall -y ansible
|
||||
- python -m pip uninstall -y ansible ansible-base ansible-core
|
||||
- python -m pip install -r tests/requirements.txt
|
||||
- ./tests/scripts/vagrant_clean.sh
|
||||
script:
|
||||
- ./tests/scripts/testcases_run.sh
|
||||
after_script:
|
||||
- chronic ./tests/scripts/testcases_cleanup.sh
|
||||
allow_failure: true
|
||||
|
||||
vagrant_ubuntu18-calico-dual-stack:
|
||||
stage: deploy-part2
|
||||
@ -59,3 +43,24 @@ vagrant_ubuntu20-flannel:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: on_success
|
||||
|
||||
vagrant_ubuntu16-kube-router-sep:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: manual
|
||||
|
||||
# Service proxy test fails connectivity testing
|
||||
vagrant_ubuntu16-kube-router-svc-proxy:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: manual
|
||||
|
||||
vagrant_fedora35-kube-router:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: on_success
|
||||
|
||||
vagrant_centos7-kube-router:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: manual
|
||||
|
@ -6,11 +6,17 @@
|
||||
|
||||
It is recommended to use filter to manage the GitHub email notification, see [examples for setting filters to Kubernetes Github notifications](https://github.com/kubernetes/community/blob/master/communication/best-practices.md#examples-for-setting-filters-to-kubernetes-github-notifications)
|
||||
|
||||
To install development dependencies you can use `pip install -r tests/requirements.txt`
|
||||
To install development dependencies you can set up a python virtual env with the necessary dependencies:
|
||||
|
||||
```ShellSession
|
||||
virtualenv venv
|
||||
source venv/bin/activate
|
||||
pip install -r tests/requirements.txt
|
||||
```
|
||||
|
||||
#### Linting
|
||||
|
||||
Kubespray uses `yamllint` and `ansible-lint`. To run them locally use `yamllint .` and `ansible-lint`
|
||||
Kubespray uses `yamllint` and `ansible-lint`. To run them locally use `yamllint .` and `ansible-lint`. It is a good idea to add call these tools as part of your pre-commit hook and avoid a lot of back end forth on fixing linting issues (<https://support.gitkraken.com/working-with-repositories/githooksexample/>).
|
||||
|
||||
#### Molecule
|
||||
|
||||
@ -29,3 +35,5 @@ Vagrant with VirtualBox or libvirt driver helps you to quickly spin test cluster
|
||||
3. Fork the desired repo, develop and test your code changes.
|
||||
4. Sign the CNCF CLA (<https://git.k8s.io/community/CLA.md#the-contributor-license-agreement>)
|
||||
5. Submit a pull request.
|
||||
6. Work with the reviewers on their suggestions.
|
||||
7. Ensure to rebase to the HEAD of your target branch and squash un-necessary commits (<https://blog.carbonfive.com/always-squash-and-rebase-your-git-commits/>) before final merger of your contribution.
|
||||
|
14
Dockerfile
14
Dockerfile
@ -1,14 +1,18 @@
|
||||
# Use imutable image tags rather than mutable tags (like ubuntu:18.04)
|
||||
FROM ubuntu:bionic-20200807
|
||||
# Use imutable image tags rather than mutable tags (like ubuntu:20.04)
|
||||
FROM ubuntu:focal-20220316
|
||||
|
||||
ARG ARCH=amd64
|
||||
ARG TZ=Etc/UTC
|
||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
|
||||
RUN apt update -y \
|
||||
&& apt install -y \
|
||||
libssl-dev python3-dev sshpass apt-transport-https jq moreutils \
|
||||
ca-certificates curl gnupg2 software-properties-common python3-pip rsync git \
|
||||
ca-certificates curl gnupg2 software-properties-common python3-pip unzip rsync git \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - \
|
||||
&& add-apt-repository \
|
||||
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
|
||||
"deb [arch=$ARCH] https://download.docker.com/linux/ubuntu \
|
||||
$(lsb_release -cs) \
|
||||
stable" \
|
||||
&& apt update -y && apt-get install --no-install-recommends -y docker-ce \
|
||||
@ -28,6 +32,6 @@ RUN /usr/bin/python3 -m pip install --no-cache-dir pip -U \
|
||||
&& update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
||||
|
||||
RUN KUBE_VERSION=$(sed -n 's/^kube_version: //p' roles/kubespray-defaults/defaults/main.yaml) \
|
||||
&& curl -LO https://storage.googleapis.com/kubernetes-release/release/$KUBE_VERSION/bin/linux/amd64/kubectl \
|
||||
&& curl -LO https://storage.googleapis.com/kubernetes-release/release/$KUBE_VERSION/bin/linux/$ARCH/kubectl \
|
||||
&& chmod a+x kubectl \
|
||||
&& mv kubectl /usr/local/bin/kubectl
|
||||
|
4
Makefile
4
Makefile
@ -1,5 +1,7 @@
|
||||
mitogen:
|
||||
ansible-playbook -c local mitogen.yml -vv
|
||||
@echo Mitogen support is deprecated.
|
||||
@echo Please run the following command manually:
|
||||
@echo ansible-playbook -c local mitogen.yml -vv
|
||||
clean:
|
||||
rm -rf dist/
|
||||
rm *.retry
|
||||
|
@ -4,15 +4,20 @@ aliases:
|
||||
- chadswen
|
||||
- mirwan
|
||||
- miouge1
|
||||
- woopstar
|
||||
- luckysb
|
||||
- floryut
|
||||
- oomichi
|
||||
- cristicalin
|
||||
kubespray-reviewers:
|
||||
- holmsten
|
||||
- bozzo
|
||||
- eppo
|
||||
- oomichi
|
||||
- jayonlau
|
||||
- cristicalin
|
||||
- liupeng0518
|
||||
kubespray-emeritus_approvers:
|
||||
- riverzhang
|
||||
- atoms
|
||||
- ant31
|
||||
- woopstar
|
||||
|
57
README.md
57
README.md
@ -19,10 +19,10 @@ To deploy the cluster you can use :
|
||||
|
||||
#### Usage
|
||||
|
||||
```ShellSession
|
||||
# Install dependencies from ``requirements.txt``
|
||||
sudo pip3 install -r requirements.txt
|
||||
Install Ansible according to [Ansible installation guide](/docs/ansible.md#installing-ansible)
|
||||
then run the following steps:
|
||||
|
||||
```ShellSession
|
||||
# Copy ``inventory/sample`` as ``inventory/mycluster``
|
||||
cp -rfp inventory/sample inventory/mycluster
|
||||
|
||||
@ -57,10 +57,10 @@ A simple way to ensure you get all the correct version of Ansible is to use the
|
||||
You will then need to use [bind mounts](https://docs.docker.com/storage/bind-mounts/) to get the inventory and ssh key into the container, like this:
|
||||
|
||||
```ShellSession
|
||||
docker pull quay.io/kubespray/kubespray:v2.16.0
|
||||
docker pull quay.io/kubespray/kubespray:v2.19.0
|
||||
docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inventory \
|
||||
--mount type=bind,source="${HOME}"/.ssh/id_rsa,dst=/root/.ssh/id_rsa \
|
||||
quay.io/kubespray/kubespray:v2.16.0 bash
|
||||
quay.io/kubespray/kubespray:v2.19.0 bash
|
||||
# Inside the container you may now run the kubespray playbooks:
|
||||
ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml
|
||||
```
|
||||
@ -75,10 +75,11 @@ python -V && pip -V
|
||||
```
|
||||
|
||||
If this returns the version of the software, you're good to go. If not, download and install Python from here <https://www.python.org/downloads/source/>
|
||||
Install the necessary requirements
|
||||
|
||||
Install Ansible according to [Ansible installation guide](/docs/ansible.md#installing-ansible)
|
||||
then run the following step:
|
||||
|
||||
```ShellSession
|
||||
sudo pip install -r requirements.txt
|
||||
vagrant up
|
||||
```
|
||||
|
||||
@ -110,6 +111,7 @@ vagrant up
|
||||
- [Adding/replacing a node](docs/nodes.md)
|
||||
- [Upgrades basics](docs/upgrades.md)
|
||||
- [Air-Gap installation](docs/offline-environment.md)
|
||||
- [Hardening](docs/hardening.md)
|
||||
- [Roadmap](docs/roadmap.md)
|
||||
|
||||
## Supported Linux Distributions
|
||||
@ -118,11 +120,12 @@ vagrant up
|
||||
- **Debian** Bullseye, Buster, Jessie, Stretch
|
||||
- **Ubuntu** 16.04, 18.04, 20.04
|
||||
- **CentOS/RHEL** 7, [8](docs/centos8.md)
|
||||
- **Fedora** 33, 34
|
||||
- **Fedora** 34, 35
|
||||
- **Fedora CoreOS** (see [fcos Note](docs/fcos.md))
|
||||
- **openSUSE** Leap 15.x/Tumbleweed
|
||||
- **Oracle Linux** 7, [8](docs/centos8.md)
|
||||
- **Alma Linux** [8](docs/centos8.md)
|
||||
- **Rocky Linux** [8](docs/centos8.md)
|
||||
- **Amazon Linux 2** (experimental: see [amazon linux notes](docs/amazonlinux.md))
|
||||
|
||||
Note: Upstart/SysV init based OS types are not supported.
|
||||
@ -130,29 +133,27 @@ Note: Upstart/SysV init based OS types are not supported.
|
||||
## Supported Components
|
||||
|
||||
- Core
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.21.5
|
||||
- [etcd](https://github.com/coreos/etcd) v3.4.13
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.23.7
|
||||
- [etcd](https://github.com/etcd-io/etcd) v3.5.3
|
||||
- [docker](https://www.docker.com/) v20.10 (see note)
|
||||
- [containerd](https://containerd.io/) v1.4.9
|
||||
- [cri-o](http://cri-o.io/) v1.21 (experimental: see [CRI-O Note](docs/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
||||
- [containerd](https://containerd.io/) v1.6.4
|
||||
- [cri-o](http://cri-o.io/) v1.22 (experimental: see [CRI-O Note](docs/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
||||
- Network Plugin
|
||||
- [cni-plugins](https://github.com/containernetworking/plugins) v0.9.1
|
||||
- [calico](https://github.com/projectcalico/calico) v3.19.2
|
||||
- [cni-plugins](https://github.com/containernetworking/plugins) v1.1.1
|
||||
- [calico](https://github.com/projectcalico/calico) v3.22.3
|
||||
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
|
||||
- [cilium](https://github.com/cilium/cilium) v1.9.10
|
||||
- [flanneld](https://github.com/flannel-io/flannel) v0.14.0
|
||||
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.7.2
|
||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) v1.3.0
|
||||
- [multus](https://github.com/intel/multus-cni) v3.7.2
|
||||
- [ovn4nfv](https://github.com/opnfv/ovn4nfv-k8s-plugin) v1.1.0
|
||||
- [cilium](https://github.com/cilium/cilium) v1.11.3
|
||||
- [flanneld](https://github.com/flannel-io/flannel) v0.17.0
|
||||
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.9.2
|
||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) v1.4.0
|
||||
- [multus](https://github.com/intel/multus-cni) v3.8
|
||||
- [weave](https://github.com/weaveworks/weave) v2.8.1
|
||||
- Application
|
||||
- [ambassador](https://github.com/datawire/ambassador): v1.5
|
||||
- [cephfs-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.0-k8s1.11
|
||||
- [rbd-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.1-k8s1.11
|
||||
- [cert-manager](https://github.com/jetstack/cert-manager) v1.0.4
|
||||
- [coredns](https://github.com/coredns/coredns) v1.8.0
|
||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v1.0.0
|
||||
- [cert-manager](https://github.com/jetstack/cert-manager) v1.8.0
|
||||
- [coredns](https://github.com/coredns/coredns) v1.8.6
|
||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v1.2.1
|
||||
|
||||
## Container Runtime Notes
|
||||
|
||||
@ -161,8 +162,8 @@ Note: Upstart/SysV init based OS types are not supported.
|
||||
|
||||
## Requirements
|
||||
|
||||
- **Minimum required version of Kubernetes is v1.19**
|
||||
- **Ansible v2.9.x, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands, Ansible 2.10.x is experimentally supported for now**
|
||||
- **Minimum required version of Kubernetes is v1.21**
|
||||
- **Ansible v2.9.x, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands**
|
||||
- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](docs/offline-environment.md))
|
||||
- The target servers are configured to allow **IPv4 forwarding**.
|
||||
- If using IPv6 for pods and services, the target servers are configured to allow **IPv6 forwarding**.
|
||||
@ -195,8 +196,6 @@ You can choose between 10 network plugins. (default: `calico`, except Vagrant us
|
||||
|
||||
- [cilium](http://docs.cilium.io/en/latest/): layer 3/4 networking (as well as layer 7 to protect and secure application protocols), supports dynamic insertion of BPF bytecode into the Linux kernel to implement security services, networking and visibility logic.
|
||||
|
||||
- [ovn4nfv](docs/ovn4nfv.md): [ovn4nfv-k8s-plugins](https://github.com/opnfv/ovn4nfv-k8s-plugin) is the network controller, OVS agent and CNI server to offer basic SFC and OVN overlay networking.
|
||||
|
||||
- [weave](docs/weave.md): Weave is a lightweight container overlay network that doesn't require an external K/V database cluster.
|
||||
(Please refer to `weave` [troubleshooting documentation](https://www.weave.works/docs/net/latest/troubleshooting/)).
|
||||
|
||||
@ -217,8 +216,6 @@ See also [Network checker](docs/netcheck.md).
|
||||
|
||||
## Ingress Plugins
|
||||
|
||||
- [ambassador](docs/ambassador.md): the Ambassador Ingress Controller and API gateway.
|
||||
|
||||
- [nginx](https://kubernetes.github.io/ingress-nginx): the NGINX Ingress Controller.
|
||||
|
||||
- [metallb](docs/metallb.md): the MetalLB bare-metal service LoadBalancer provider.
|
||||
|
33
RELEASE.md
33
RELEASE.md
@ -2,17 +2,18 @@
|
||||
|
||||
The Kubespray Project is released on an as-needed basis. The process is as follows:
|
||||
|
||||
1. An issue is proposing a new release with a changelog since the last release
|
||||
1. An issue is proposing a new release with a changelog since the last release. Please see [a good sample issue](https://github.com/kubernetes-sigs/kubespray/issues/8325)
|
||||
2. At least one of the [approvers](OWNERS_ALIASES) must approve this release
|
||||
3. The `kube_version_min_required` variable is set to `n-1`
|
||||
4. Remove hashes for [EOL versions](https://github.com/kubernetes/sig-release/blob/master/releases/patch-releases.md) of kubernetes from `*_checksums` variables.
|
||||
5. An approver creates [new release in GitHub](https://github.com/kubernetes-sigs/kubespray/releases/new) using a version and tag name like `vX.Y.Z` and attaching the release notes
|
||||
6. An approver creates a release branch in the form `release-X.Y`
|
||||
7. The corresponding version of [quay.io/kubespray/kubespray:vX.Y.Z](https://quay.io/repository/kubespray/kubespray) and [quay.io/kubespray/vagrant:vX.Y.Z](https://quay.io/repository/kubespray/vagrant) docker images are built and tagged
|
||||
8. The `KUBESPRAY_VERSION` variable is updated in `.gitlab-ci.yml`
|
||||
9. The release issue is closed
|
||||
10. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] Kubespray $VERSION is released`
|
||||
11. The topic of the #kubespray channel is updated with `vX.Y.Z is released! | ...`
|
||||
4. Remove hashes for [EOL versions](https://github.com/kubernetes/website/blob/main/content/en/releases/patch-releases.md) of kubernetes from `*_checksums` variables.
|
||||
5. Create the release note with [Kubernetes Release Notes Generator](https://github.com/kubernetes/release/blob/master/cmd/release-notes/README.md). See the following `Release note creation` section for the details.
|
||||
6. An approver creates [new release in GitHub](https://github.com/kubernetes-sigs/kubespray/releases/new) using a version and tag name like `vX.Y.Z` and attaching the release notes
|
||||
7. An approver creates a release branch in the form `release-X.Y`
|
||||
8. The corresponding version of [quay.io/kubespray/kubespray:vX.Y.Z](https://quay.io/repository/kubespray/kubespray) and [quay.io/kubespray/vagrant:vX.Y.Z](https://quay.io/repository/kubespray/vagrant) docker images are built and tagged
|
||||
9. The `KUBESPRAY_VERSION` variable is updated in `.gitlab-ci.yml`
|
||||
10. The release issue is closed
|
||||
11. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] Kubespray $VERSION is released`
|
||||
12. The topic of the #kubespray channel is updated with `vX.Y.Z is released! | ...`
|
||||
|
||||
## Major/minor releases and milestones
|
||||
|
||||
@ -46,3 +47,17 @@ The Kubespray Project is released on an as-needed basis. The process is as follo
|
||||
then Kubespray v2.1.0 may be bound to only minor changes to `kube_version`, like v1.5.1
|
||||
and *any* changes to other components, like etcd v4, or calico 1.2.3.
|
||||
And Kubespray v3.x.x shall be bound to `kube_version: 2.x.x` respectively.
|
||||
|
||||
## Release note creation
|
||||
|
||||
You can create a release note with:
|
||||
|
||||
```shell
|
||||
export GITHUB_TOKEN=<your-github-token>
|
||||
export ORG=kubernetes-sigs
|
||||
export REPO=kubespray
|
||||
release-notes --start-sha <The start commit-id> --end-sha <The end commit-id> --dependencies=false --output=/tmp/kubespray-release-note --required-author=""
|
||||
```
|
||||
|
||||
If the release note file(/tmp/kubespray-release-note) contains "### Uncategorized" pull requests, those pull requests don't have a valid kind label(`kind/feature`, etc.).
|
||||
It is necessary to put a valid label on each pull request and run the above release-notes command again to get a better release note)
|
||||
|
23
Vagrantfile
vendored
23
Vagrantfile
vendored
@ -26,9 +26,11 @@ SUPPORTED_OS = {
|
||||
"centos-bento" => {box: "bento/centos-7.6", user: "vagrant"},
|
||||
"centos8" => {box: "centos/8", user: "vagrant"},
|
||||
"centos8-bento" => {box: "bento/centos-8", user: "vagrant"},
|
||||
"fedora33" => {box: "fedora/33-cloud-base", user: "vagrant"},
|
||||
"almalinux8" => {box: "almalinux/8", user: "vagrant"},
|
||||
"almalinux8-bento" => {box: "bento/almalinux-8", user: "vagrant"},
|
||||
"fedora34" => {box: "fedora/34-cloud-base", user: "vagrant"},
|
||||
"opensuse" => {box: "bento/opensuse-leap-15.2", user: "vagrant"},
|
||||
"fedora35" => {box: "fedora/35-cloud-base", user: "vagrant"},
|
||||
"opensuse" => {box: "opensuse/Leap-15.3.x86_64", user: "vagrant"},
|
||||
"opensuse-tumbleweed" => {box: "opensuse/Tumbleweed.x86_64", user: "vagrant"},
|
||||
"oraclelinux" => {box: "generic/oracle7", user: "vagrant"},
|
||||
"oraclelinux8" => {box: "generic/oracle8", user: "vagrant"},
|
||||
@ -53,9 +55,9 @@ $subnet_ipv6 ||= "fd3c:b398:0698:0756"
|
||||
$os ||= "ubuntu1804"
|
||||
$network_plugin ||= "flannel"
|
||||
# Setting multi_networking to true will install Multus: https://github.com/intel/multus-cni
|
||||
$multi_networking ||= false
|
||||
$multi_networking ||= "False"
|
||||
$download_run_once ||= "True"
|
||||
$download_force_cache ||= "True"
|
||||
$download_force_cache ||= "False"
|
||||
# The first three nodes are etcd servers
|
||||
$etcd_instances ||= $num_instances
|
||||
# The first two nodes are kube masters
|
||||
@ -68,9 +70,12 @@ $kube_node_instances_with_disks_size ||= "20G"
|
||||
$kube_node_instances_with_disks_number ||= 2
|
||||
$override_disk_size ||= false
|
||||
$disk_size ||= "20GB"
|
||||
$local_path_provisioner_enabled ||= false
|
||||
$local_path_provisioner_enabled ||= "False"
|
||||
$local_path_provisioner_claim_root ||= "/opt/local-path-provisioner/"
|
||||
$libvirt_nested ||= false
|
||||
# boolean or string (e.g. "-vvv")
|
||||
$ansible_verbosity ||= false
|
||||
$ansible_tags ||= ENV['VAGRANT_ANSIBLE_TAGS'] || ""
|
||||
|
||||
$playbook ||= "cluster.yml"
|
||||
|
||||
@ -167,7 +172,7 @@ Vagrant.configure("2") do |config|
|
||||
# always make /dev/sd{a/b/c} so that CI can ensure that
|
||||
# virtualbox and libvirt will have the same devices to use for OSDs
|
||||
(1..$kube_node_instances_with_disks_number).each do |d|
|
||||
lv.storage :file, :device => "hd#{driverletters[d]}", :path => "disk-#{i}-#{d}-#{DISK_UUID}.disk", :size => $kube_node_instances_with_disks_size, :bus => "ide"
|
||||
lv.storage :file, :device => "hd#{driverletters[d]}", :path => "disk-#{i}-#{d}-#{DISK_UUID}.disk", :size => $kube_node_instances_with_disks_size, :bus => "scsi"
|
||||
end
|
||||
end
|
||||
end
|
||||
@ -238,9 +243,11 @@ Vagrant.configure("2") do |config|
|
||||
}
|
||||
|
||||
# Only execute the Ansible provisioner once, when all the machines are up and ready.
|
||||
# And limit the action to gathering facts, the full playbook is going to be ran by testcases_run.sh
|
||||
if i == $num_instances
|
||||
node.vm.provision "ansible" do |ansible|
|
||||
ansible.playbook = $playbook
|
||||
ansible.verbose = $ansible_verbosity
|
||||
$ansible_inventory_path = File.join( $inventory, "hosts.ini")
|
||||
if File.exist?($ansible_inventory_path)
|
||||
ansible.inventory_path = $ansible_inventory_path
|
||||
@ -250,7 +257,9 @@ Vagrant.configure("2") do |config|
|
||||
ansible.host_key_checking = false
|
||||
ansible.raw_arguments = ["--forks=#{$num_instances}", "--flush-cache", "-e ansible_become_pass=vagrant"]
|
||||
ansible.host_vars = host_vars
|
||||
#ansible.tags = ['download']
|
||||
if $ansible_tags != ""
|
||||
ansible.tags = [$ansible_tags]
|
||||
end
|
||||
ansible.groups = {
|
||||
"etcd" => ["#{$instance_name_prefix}-[1:#{$etcd_instances}]"],
|
||||
"kube_control_plane" => ["#{$instance_name_prefix}-[1:#{$kube_master_instances}]"],
|
||||
|
@ -3,7 +3,6 @@ pipelining=True
|
||||
ssh_args = -o ControlMaster=auto -o ControlPersist=30m -o ConnectionAttempts=100 -o UserKnownHostsFile=/dev/null
|
||||
#control_path = ~/.ssh/ansible-%%r@%%h:%%p
|
||||
[defaults]
|
||||
strategy_plugins = plugins/mitogen/ansible_mitogen/plugins/strategy
|
||||
# https://github.com/ansible/ansible/issues/56930 (to ignore group names with - and .)
|
||||
force_valid_group_names = ignore
|
||||
|
||||
@ -15,7 +14,7 @@ fact_caching_timeout = 7200
|
||||
stdout_callback = default
|
||||
display_skipped_hosts = no
|
||||
library = ./library
|
||||
callback_whitelist = profile_tasks
|
||||
callback_whitelist = profile_tasks,ara_default
|
||||
roles_path = roles:$VIRTUAL_ENV/usr/local/share/kubespray/roles:$VIRTUAL_ENV/usr/local/share/ansible/roles:/usr/share/kubespray/roles
|
||||
deprecation_warnings=False
|
||||
inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo, .creds, .gpg
|
||||
|
@ -5,7 +5,7 @@
|
||||
vars:
|
||||
minimal_ansible_version: 2.9.0
|
||||
minimal_ansible_version_2_10: 2.10.11
|
||||
maximal_ansible_version: 2.11.0
|
||||
maximal_ansible_version: 2.13.0
|
||||
ansible_connection: local
|
||||
tags: always
|
||||
tasks:
|
||||
|
@ -32,7 +32,7 @@
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes/preinstall, tags: preinstall }
|
||||
- { role: "container-engine", tags: "container-engine", when: deploy_container_engine|default(true) }
|
||||
- { role: "container-engine", tags: "container-engine", when: deploy_container_engine }
|
||||
- { role: download, tags: download, when: "not skip_downloads" }
|
||||
|
||||
- hosts: etcd
|
||||
@ -46,7 +46,7 @@
|
||||
vars:
|
||||
etcd_cluster_setup: true
|
||||
etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}"
|
||||
when: not etcd_kubeadm_enabled| default(false)
|
||||
when: etcd_deployment_type != "kubeadm"
|
||||
|
||||
- hosts: k8s_cluster
|
||||
gather_facts: False
|
||||
@ -59,7 +59,7 @@
|
||||
vars:
|
||||
etcd_cluster_setup: false
|
||||
etcd_events_cluster_setup: false
|
||||
when: not etcd_kubeadm_enabled| default(false)
|
||||
when: etcd_deployment_type != "kubeadm"
|
||||
|
||||
- hosts: k8s_cluster
|
||||
gather_facts: False
|
||||
@ -118,7 +118,8 @@
|
||||
- { role: kubernetes-apps/external_provisioner, tags: external-provisioner }
|
||||
- { role: kubernetes-apps, tags: apps }
|
||||
|
||||
- hosts: k8s_cluster
|
||||
- name: Apply resolv.conf changes now that cluster DNS is up
|
||||
hosts: k8s_cluster
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
|
@ -47,6 +47,10 @@ If you need to delete all resources from a resource group, simply call:
|
||||
|
||||
**WARNING** this really deletes everything from your resource group, including everything that was later created by you!
|
||||
|
||||
## Installing Ansible and the dependencies
|
||||
|
||||
Install Ansible according to [Ansible installation guide](/docs/ansible.md#installing-ansible)
|
||||
|
||||
## Generating an inventory for kubespray
|
||||
|
||||
After you have applied the templates, you can generate an inventory with this call:
|
||||
@ -59,6 +63,5 @@ It will create the file ./inventory which can then be used with kubespray, e.g.:
|
||||
|
||||
```shell
|
||||
cd kubespray-root-dir
|
||||
sudo pip3 install -r requirements.txt
|
||||
ansible-playbook -i contrib/azurerm/inventory -u devops --become -e "@inventory/sample/group_vars/all/all.yml" cluster.yml
|
||||
```
|
||||
|
@ -83,11 +83,15 @@ class KubesprayInventory(object):
|
||||
self.config_file = config_file
|
||||
self.yaml_config = {}
|
||||
loadPreviousConfig = False
|
||||
printHostnames = False
|
||||
# See whether there are any commands to process
|
||||
if changed_hosts and changed_hosts[0] in AVAILABLE_COMMANDS:
|
||||
if changed_hosts[0] == "add":
|
||||
loadPreviousConfig = True
|
||||
changed_hosts = changed_hosts[1:]
|
||||
elif changed_hosts[0] == "print_hostnames":
|
||||
loadPreviousConfig = True
|
||||
printHostnames = True
|
||||
else:
|
||||
self.parse_command(changed_hosts[0], changed_hosts[1:])
|
||||
sys.exit(0)
|
||||
@ -105,6 +109,10 @@ class KubesprayInventory(object):
|
||||
print(e)
|
||||
sys.exit(1)
|
||||
|
||||
if printHostnames:
|
||||
self.print_hostnames()
|
||||
sys.exit(0)
|
||||
|
||||
self.ensure_required_groups(ROLES)
|
||||
|
||||
if changed_hosts:
|
||||
|
@ -13,6 +13,7 @@
|
||||
# under the License.
|
||||
|
||||
import inventory
|
||||
from test import support
|
||||
import unittest
|
||||
from unittest import mock
|
||||
|
||||
@ -26,6 +27,28 @@ if path not in sys.path:
|
||||
import inventory # noqa
|
||||
|
||||
|
||||
class TestInventoryPrintHostnames(unittest.TestCase):
|
||||
|
||||
@mock.patch('ruamel.yaml.YAML.load')
|
||||
def test_print_hostnames(self, load_mock):
|
||||
mock_io = mock.mock_open(read_data='')
|
||||
load_mock.return_value = OrderedDict({'all': {'hosts': {
|
||||
'node1': {'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'},
|
||||
'node2': {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'}}}})
|
||||
with mock.patch('builtins.open', mock_io):
|
||||
with self.assertRaises(SystemExit) as cm:
|
||||
with support.captured_stdout() as stdout:
|
||||
inventory.KubesprayInventory(
|
||||
changed_hosts=["print_hostnames"],
|
||||
config_file="file")
|
||||
self.assertEqual("node1 node2\n", stdout.getvalue())
|
||||
self.assertEqual(cm.exception.code, 0)
|
||||
|
||||
|
||||
class TestInventory(unittest.TestCase):
|
||||
@mock.patch('inventory.sys')
|
||||
def setUp(self, sys_mock):
|
||||
|
@ -28,7 +28,7 @@
|
||||
sysctl:
|
||||
name: net.ipv4.ip_forward
|
||||
value: 1
|
||||
sysctl_file: /etc/sysctl.d/ipv4-ip_forward.conf
|
||||
sysctl_file: "{{ sysctl_file_path }}"
|
||||
state: present
|
||||
reload: yes
|
||||
|
||||
@ -37,7 +37,7 @@
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
value: 0
|
||||
sysctl_file: /etc/sysctl.d/bridge-nf-call.conf
|
||||
sysctl_file: "{{ sysctl_file_path }}"
|
||||
reload: yes
|
||||
with_items:
|
||||
- net.bridge.bridge-nf-call-arptables
|
||||
|
@ -5,8 +5,8 @@
|
||||
- hosts: localhost
|
||||
strategy: linear
|
||||
vars:
|
||||
mitogen_version: 0.3.0rc1
|
||||
mitogen_url: https://github.com/dw/mitogen/archive/v{{ mitogen_version }}.tar.gz
|
||||
mitogen_version: 0.3.2
|
||||
mitogen_url: https://github.com/mitogen-hq/mitogen/archive/refs/tags/v{{ mitogen_version }}.tar.gz
|
||||
ansible_connection: local
|
||||
tasks:
|
||||
- name: Create mitogen plugin dir
|
||||
@ -38,7 +38,12 @@
|
||||
- name: add strategy to ansible.cfg
|
||||
ini_file:
|
||||
path: ansible.cfg
|
||||
section: defaults
|
||||
option: strategy
|
||||
value: mitogen_linear
|
||||
mode: 0644
|
||||
section: "{{ item.section | d('defaults') }}"
|
||||
option: "{{ item.option }}"
|
||||
value: "{{ item.value }}"
|
||||
with_items:
|
||||
- option: strategy
|
||||
value: mitogen_linear
|
||||
- option: strategy_plugins
|
||||
value: plugins/mitogen/ansible_mitogen/plugins/strategy
|
@ -3,6 +3,7 @@
|
||||
template:
|
||||
src: "{{ item.file }}"
|
||||
dest: "{{ kube_config_dir }}/{{ item.dest }}"
|
||||
mode: 0644
|
||||
with_items:
|
||||
- { file: glusterfs-kubernetes-endpoint.json.j2, type: ep, dest: glusterfs-kubernetes-endpoint.json}
|
||||
- { file: glusterfs-kubernetes-pv.yml.j2, type: pv, dest: glusterfs-kubernetes-pv.yml}
|
||||
|
@ -2,6 +2,13 @@ all:
|
||||
vars:
|
||||
heketi_admin_key: "11elfeinhundertundelf"
|
||||
heketi_user_key: "!!einseinseins"
|
||||
glusterfs_daemonset:
|
||||
readiness_probe:
|
||||
timeout_seconds: 3
|
||||
initial_delay_seconds: 3
|
||||
liveness_probe:
|
||||
timeout_seconds: 3
|
||||
initial_delay_seconds: 10
|
||||
children:
|
||||
k8s_cluster:
|
||||
vars:
|
||||
|
@ -5,7 +5,7 @@
|
||||
changed_when: false
|
||||
|
||||
- name: "Kubernetes Apps | Deploy cluster role binding."
|
||||
when: "clusterrolebinding_state.stdout | length > 0"
|
||||
when: "clusterrolebinding_state.stdout | length == 0"
|
||||
command: "{{ bin_dir }}/kubectl create clusterrolebinding heketi-gluster-admin --clusterrole=edit --serviceaccount=default:heketi-service-account"
|
||||
|
||||
- name: Get clusterrolebindings again
|
||||
@ -31,7 +31,7 @@
|
||||
mode: 0644
|
||||
|
||||
- name: "Deploy Heketi config secret"
|
||||
when: "secret_state.stdout | length > 0"
|
||||
when: "secret_state.stdout | length == 0"
|
||||
command: "{{ bin_dir }}/kubectl create secret generic heketi-config-secret --from-file={{ kube_config_dir }}/heketi.json"
|
||||
|
||||
- name: Get the heketi-config-secret secret again
|
||||
@ -41,5 +41,5 @@
|
||||
|
||||
- name: Make sure the heketi-config-secret secret exists now
|
||||
assert:
|
||||
that: "secret_state.stdout != \"\""
|
||||
that: "secret_state.stdout | length > 0"
|
||||
msg: "Heketi config secret is not present."
|
||||
|
@ -73,8 +73,8 @@
|
||||
"privileged": true
|
||||
},
|
||||
"readinessProbe": {
|
||||
"timeoutSeconds": 3,
|
||||
"initialDelaySeconds": 3,
|
||||
"timeoutSeconds": {{ glusterfs_daemonset.readiness_probe.timeout_seconds }},
|
||||
"initialDelaySeconds": {{ glusterfs_daemonset.readiness_probe.initial_delay_seconds }},
|
||||
"exec": {
|
||||
"command": [
|
||||
"/bin/bash",
|
||||
@ -84,8 +84,8 @@
|
||||
}
|
||||
},
|
||||
"livenessProbe": {
|
||||
"timeoutSeconds": 3,
|
||||
"initialDelaySeconds": 10,
|
||||
"timeoutSeconds": {{ glusterfs_daemonset.liveness_probe.timeout_seconds }},
|
||||
"initialDelaySeconds": {{ glusterfs_daemonset.liveness_probe.initial_delay_seconds }},
|
||||
"exec": {
|
||||
"command": [
|
||||
"/bin/bash",
|
||||
|
@ -9,7 +9,8 @@ This script has two features:
|
||||
(2) Deploy local container registry and register the container images to the registry.
|
||||
|
||||
Step(1) should be done online site as a preparation, then we bring the gotten images
|
||||
to the target offline environment.
|
||||
to the target offline environment. if images are from a private registry,
|
||||
you need to set `PRIVATE_REGISTRY` environment variable.
|
||||
Then we will run step(2) for registering the images to local registry.
|
||||
|
||||
Step(1) can be operated with:
|
||||
@ -28,16 +29,19 @@ manage-offline-container-images.sh register
|
||||
|
||||
This script generates the list of downloaded files and the list of container images by `roles/download/defaults/main.yml` file.
|
||||
|
||||
Run this script will generates three files, all downloaded files url in files.list, all container images in images.list, all component version in generate.sh.
|
||||
Run this script will execute `generate_list.yml` playbook in kubespray root directory and generate four files,
|
||||
all downloaded files url in files.list, all container images in images.list, jinja2 templates in *.template.
|
||||
|
||||
```shell
|
||||
bash generate_list.sh
|
||||
./generate_list.sh
|
||||
tree temp
|
||||
temp
|
||||
├── files.list
|
||||
├── generate.sh
|
||||
└── images.list
|
||||
0 directories, 3 files
|
||||
├── files.list.template
|
||||
├── images.list
|
||||
└── images.list.template
|
||||
0 directories, 5 files
|
||||
```
|
||||
|
||||
In some cases you may want to update some component version, you can edit `generate.sh` file, then run `bash generate.sh | grep 'https' > files.list` to update file.list or run `bash generate.sh | grep -v 'https'> images.list` to update images.list.
|
||||
In some cases you may want to update some component version, you can declare version variables in ansible inventory file or group_vars,
|
||||
then run `./generate_list.sh -i [inventory_file]` to update file.list and images.list.
|
||||
|
56
contrib/offline/generate_list.sh
Normal file → Executable file
56
contrib/offline/generate_list.sh
Normal file → Executable file
@ -5,53 +5,29 @@ CURRENT_DIR=$(cd $(dirname $0); pwd)
|
||||
TEMP_DIR="${CURRENT_DIR}/temp"
|
||||
REPO_ROOT_DIR="${CURRENT_DIR%/contrib/offline}"
|
||||
|
||||
: ${IMAGE_ARCH:="amd64"}
|
||||
: ${ANSIBLE_SYSTEM:="linux"}
|
||||
: ${ANSIBLE_ARCHITECTURE:="x86_64"}
|
||||
: ${DOWNLOAD_YML:="roles/download/defaults/main.yml"}
|
||||
: ${KUBE_VERSION_YAML:="roles/kubespray-defaults/defaults/main.yaml"}
|
||||
|
||||
mkdir -p ${TEMP_DIR}
|
||||
|
||||
# ARCH used in convert {%- if image_arch != 'amd64' -%}-{{ image_arch }}{%- endif -%} to {{arch}}
|
||||
if [ "${IMAGE_ARCH}" != "amd64" ]; then ARCH="${IMAGE_ARCH}"; fi
|
||||
|
||||
cat > ${TEMP_DIR}/generate.sh << EOF
|
||||
arch=${ARCH}
|
||||
image_arch=${IMAGE_ARCH}
|
||||
ansible_system=${ANSIBLE_SYSTEM}
|
||||
ansible_architecture=${ANSIBLE_ARCHITECTURE}
|
||||
EOF
|
||||
|
||||
# generate all component version by $DOWNLOAD_YML
|
||||
grep 'kube_version:' ${REPO_ROOT_DIR}/${KUBE_VERSION_YAML} \
|
||||
| sed 's/: /=/g' >> ${TEMP_DIR}/generate.sh
|
||||
grep '_version:' ${REPO_ROOT_DIR}/${DOWNLOAD_YML} \
|
||||
| sed 's/: /=/g;s/{{/${/g;s/}}/}/g' | tr -d ' ' >> ${TEMP_DIR}/generate.sh
|
||||
sed -i 's/kube_major_version=.*/kube_major_version=${kube_version%.*}/g' ${TEMP_DIR}/generate.sh
|
||||
sed -i 's/crictl_version=.*/crictl_version=${kube_version%.*}.0/g' ${TEMP_DIR}/generate.sh
|
||||
|
||||
# generate all download files url
|
||||
# generate all download files url template
|
||||
grep 'download_url:' ${REPO_ROOT_DIR}/${DOWNLOAD_YML} \
|
||||
| sed 's/: /=/g;s/ //g;s/{{/${/g;s/}}/}/g;s/|lower//g;s/^.*_url=/echo /g' >> ${TEMP_DIR}/generate.sh
|
||||
| sed 's/^.*_url: //g;s/\"//g' > ${TEMP_DIR}/files.list.template
|
||||
|
||||
# generate all images list
|
||||
grep -E '_repo:|_tag:' ${REPO_ROOT_DIR}/${DOWNLOAD_YML} \
|
||||
| sed "s#{%- if image_arch != 'amd64' -%}-{{ image_arch }}{%- endif -%}#{{arch}}#g" \
|
||||
| sed 's/: /=/g;s/{{/${/g;s/}}/}/g' | tr -d ' ' >> ${TEMP_DIR}/generate.sh
|
||||
# generate all images list template
|
||||
sed -n '/^downloads:/,/download_defaults:/p' ${REPO_ROOT_DIR}/${DOWNLOAD_YML} \
|
||||
| sed -n "s/repo: //p;s/tag: //p" | tr -d ' ' | sed 's/{{/${/g;s/}}/}/g' \
|
||||
| sed 'N;s#\n# #g' | tr ' ' ':' | sed 's/^/echo /g' >> ${TEMP_DIR}/generate.sh
|
||||
| sed -n "s/repo: //p;s/tag: //p" | tr -d ' ' \
|
||||
| sed 'N;s#\n# #g' | tr ' ' ':' | sed 's/\"//g' > ${TEMP_DIR}/images.list.template
|
||||
|
||||
# special handling for https://github.com/kubernetes-sigs/kubespray/pull/7570
|
||||
sed -i 's#^coredns_image_repo=.*#coredns_image_repo=${kube_image_repo}$(if printf "%s\\n%s\\n" v1.21 ${kube_version%.*} | sort --check=quiet --version-sort; then echo -n /coredns/coredns;else echo -n /coredns; fi)#' ${TEMP_DIR}/generate.sh
|
||||
sed -i 's#^coredns_image_tag=.*#coredns_image_tag=$(if printf "%s\\n%s\\n" v1.21 ${kube_version%.*} | sort --check=quiet --version-sort; then echo -n ${coredns_version};else echo -n ${coredns_version/v/}; fi)#' ${TEMP_DIR}/generate.sh
|
||||
|
||||
# add kube-* images to images list
|
||||
# add kube-* images to images list template
|
||||
# Those container images are downloaded by kubeadm, then roles/download/defaults/main.yml
|
||||
# doesn't contain those images. That is reason why here needs to put those images into the
|
||||
# list separately.
|
||||
KUBE_IMAGES="kube-apiserver kube-controller-manager kube-scheduler kube-proxy"
|
||||
echo "${KUBE_IMAGES}" | tr ' ' '\n' | xargs -L1 -I {} \
|
||||
echo 'echo ${kube_image_repo}/{}:${kube_version}' >> ${TEMP_DIR}/generate.sh
|
||||
for i in $KUBE_IMAGES; do
|
||||
echo "{{ kube_image_repo }}/$i:{{ kube_version }}" >> ${TEMP_DIR}/images.list.template
|
||||
done
|
||||
|
||||
# print files.list and images.list
|
||||
bash ${TEMP_DIR}/generate.sh | grep 'https' | sort > ${TEMP_DIR}/files.list
|
||||
bash ${TEMP_DIR}/generate.sh | grep -v 'https' | sort > ${TEMP_DIR}/images.list
|
||||
# run ansible to expand templates
|
||||
/bin/cp ${CURRENT_DIR}/generate_list.yml ${REPO_ROOT_DIR}
|
||||
|
||||
(cd ${REPO_ROOT_DIR} && ansible-playbook $* generate_list.yml && /bin/rm generate_list.yml) || exit 1
|
||||
|
19
contrib/offline/generate_list.yml
Normal file
19
contrib/offline/generate_list.yml
Normal file
@ -0,0 +1,19 @@
|
||||
---
|
||||
- hosts: localhost
|
||||
become: no
|
||||
|
||||
roles:
|
||||
# Just load default variables from roles.
|
||||
- role: kubespray-defaults
|
||||
when: false
|
||||
- role: download
|
||||
when: false
|
||||
|
||||
tasks:
|
||||
# Generate files.list and images.list files from templates.
|
||||
- template:
|
||||
src: ./contrib/offline/temp/{{ item }}.list.template
|
||||
dest: ./contrib/offline/temp/{{ item }}.list
|
||||
with_items:
|
||||
- files
|
||||
- images
|
@ -54,7 +54,8 @@ function create_container_image_tar() {
|
||||
if [ "${FIRST_PART}" = "k8s.gcr.io" ] ||
|
||||
[ "${FIRST_PART}" = "gcr.io" ] ||
|
||||
[ "${FIRST_PART}" = "docker.io" ] ||
|
||||
[ "${FIRST_PART}" = "quay.io" ]; then
|
||||
[ "${FIRST_PART}" = "quay.io" ] ||
|
||||
[ "${FIRST_PART}" = "${PRIVATE_REGISTRY}" ]; then
|
||||
image=$(echo ${image} | sed s@"${FIRST_PART}/"@@)
|
||||
fi
|
||||
echo "${FILE_NAME} ${image}" >> ${IMAGE_LIST}
|
||||
@ -152,7 +153,8 @@ else
|
||||
echo "(2) Deploy local container registry and register the container images to the registry."
|
||||
echo ""
|
||||
echo "Step(1) should be done online site as a preparation, then we bring"
|
||||
echo "the gotten images to the target offline environment."
|
||||
echo "the gotten images to the target offline environment. if images are from"
|
||||
echo "a private registry, you need to set PRIVATE_REGISTRY environment variable."
|
||||
echo "Then we will run step(2) for registering the images to local registry."
|
||||
echo ""
|
||||
echo "${IMAGE_TAR_FILE} is created to contain your container images."
|
||||
|
@ -20,4 +20,4 @@
|
||||
"'ufw.service' in services"
|
||||
|
||||
when:
|
||||
- disable_service_firewall
|
||||
- disable_service_firewall is defined and disable_service_firewall
|
||||
|
@ -20,20 +20,20 @@ module "aws-vpc" {
|
||||
|
||||
aws_cluster_name = var.aws_cluster_name
|
||||
aws_vpc_cidr_block = var.aws_vpc_cidr_block
|
||||
aws_avail_zones = slice(data.aws_availability_zones.available.names, 0, length(var.aws_cidr_subnets_public) <= length(data.aws_availability_zones.available.names) ? length(var.aws_cidr_subnets_public) : length(data.aws_availability_zones.available.names))
|
||||
aws_avail_zones = data.aws_availability_zones.available.names
|
||||
aws_cidr_subnets_private = var.aws_cidr_subnets_private
|
||||
aws_cidr_subnets_public = var.aws_cidr_subnets_public
|
||||
default_tags = var.default_tags
|
||||
}
|
||||
|
||||
module "aws-elb" {
|
||||
source = "./modules/elb"
|
||||
module "aws-nlb" {
|
||||
source = "./modules/nlb"
|
||||
|
||||
aws_cluster_name = var.aws_cluster_name
|
||||
aws_vpc_id = module.aws-vpc.aws_vpc_id
|
||||
aws_avail_zones = slice(data.aws_availability_zones.available.names, 0, length(var.aws_cidr_subnets_public) <= length(data.aws_availability_zones.available.names) ? length(var.aws_cidr_subnets_public) : length(data.aws_availability_zones.available.names))
|
||||
aws_avail_zones = data.aws_availability_zones.available.names
|
||||
aws_subnet_ids_public = module.aws-vpc.aws_subnet_ids_public
|
||||
aws_elb_api_port = var.aws_elb_api_port
|
||||
aws_nlb_api_port = var.aws_nlb_api_port
|
||||
k8s_secure_api_port = var.k8s_secure_api_port
|
||||
default_tags = var.default_tags
|
||||
}
|
||||
@ -54,7 +54,6 @@ resource "aws_instance" "bastion-server" {
|
||||
instance_type = var.aws_bastion_size
|
||||
count = var.aws_bastion_num
|
||||
associate_public_ip_address = true
|
||||
availability_zone = element(slice(data.aws_availability_zones.available.names, 0, length(var.aws_cidr_subnets_public) <= length(data.aws_availability_zones.available.names) ? length(var.aws_cidr_subnets_public) : length(data.aws_availability_zones.available.names)), count.index)
|
||||
subnet_id = element(module.aws-vpc.aws_subnet_ids_public, count.index)
|
||||
|
||||
vpc_security_group_ids = module.aws-vpc.aws_security_group
|
||||
@ -79,8 +78,7 @@ resource "aws_instance" "k8s-master" {
|
||||
|
||||
count = var.aws_kube_master_num
|
||||
|
||||
availability_zone = element(slice(data.aws_availability_zones.available.names, 0, length(var.aws_cidr_subnets_public) <= length(data.aws_availability_zones.available.names) ? length(var.aws_cidr_subnets_public) : length(data.aws_availability_zones.available.names)), count.index)
|
||||
subnet_id = element(module.aws-vpc.aws_subnet_ids_private, count.index)
|
||||
subnet_id = element(module.aws-vpc.aws_subnet_ids_private, count.index)
|
||||
|
||||
vpc_security_group_ids = module.aws-vpc.aws_security_group
|
||||
|
||||
@ -98,10 +96,10 @@ resource "aws_instance" "k8s-master" {
|
||||
}))
|
||||
}
|
||||
|
||||
resource "aws_elb_attachment" "attach_master_nodes" {
|
||||
count = var.aws_kube_master_num
|
||||
elb = module.aws-elb.aws_elb_api_id
|
||||
instance = element(aws_instance.k8s-master.*.id, count.index)
|
||||
resource "aws_lb_target_group_attachment" "tg-attach_master_nodes" {
|
||||
count = var.aws_kube_master_num
|
||||
target_group_arn = module.aws-nlb.aws_nlb_api_tg_arn
|
||||
target_id = element(aws_instance.k8s-master.*.private_ip, count.index)
|
||||
}
|
||||
|
||||
resource "aws_instance" "k8s-etcd" {
|
||||
@ -110,8 +108,7 @@ resource "aws_instance" "k8s-etcd" {
|
||||
|
||||
count = var.aws_etcd_num
|
||||
|
||||
availability_zone = element(slice(data.aws_availability_zones.available.names, 0, length(var.aws_cidr_subnets_public) <= length(data.aws_availability_zones.available.names) ? length(var.aws_cidr_subnets_public) : length(data.aws_availability_zones.available.names)), count.index)
|
||||
subnet_id = element(module.aws-vpc.aws_subnet_ids_private, count.index)
|
||||
subnet_id = element(module.aws-vpc.aws_subnet_ids_private, count.index)
|
||||
|
||||
vpc_security_group_ids = module.aws-vpc.aws_security_group
|
||||
|
||||
@ -134,8 +131,7 @@ resource "aws_instance" "k8s-worker" {
|
||||
|
||||
count = var.aws_kube_worker_num
|
||||
|
||||
availability_zone = element(slice(data.aws_availability_zones.available.names, 0, length(var.aws_cidr_subnets_public) <= length(data.aws_availability_zones.available.names) ? length(var.aws_cidr_subnets_public) : length(data.aws_availability_zones.available.names)), count.index)
|
||||
subnet_id = element(module.aws-vpc.aws_subnet_ids_private, count.index)
|
||||
subnet_id = element(module.aws-vpc.aws_subnet_ids_private, count.index)
|
||||
|
||||
vpc_security_group_ids = module.aws-vpc.aws_security_group
|
||||
|
||||
@ -168,7 +164,7 @@ data "template_file" "inventory" {
|
||||
list_node = join("\n", aws_instance.k8s-worker.*.private_dns)
|
||||
connection_strings_etcd = join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-etcd.*.private_dns, aws_instance.k8s-etcd.*.private_ip))
|
||||
list_etcd = join("\n", ((var.aws_etcd_num > 0) ? (aws_instance.k8s-etcd.*.private_dns) : (aws_instance.k8s-master.*.private_dns)))
|
||||
elb_api_fqdn = "apiserver_loadbalancer_domain_name=\"${module.aws-elb.aws_elb_api_fqdn}\""
|
||||
nlb_api_fqdn = "apiserver_loadbalancer_domain_name=\"${module.aws-nlb.aws_nlb_api_fqdn}\""
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,57 +0,0 @@
|
||||
resource "aws_security_group" "aws-elb" {
|
||||
name = "kubernetes-${var.aws_cluster_name}-securitygroup-elb"
|
||||
vpc_id = var.aws_vpc_id
|
||||
|
||||
tags = merge(var.default_tags, tomap({
|
||||
Name = "kubernetes-${var.aws_cluster_name}-securitygroup-elb"
|
||||
}))
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "aws-allow-api-access" {
|
||||
type = "ingress"
|
||||
from_port = var.aws_elb_api_port
|
||||
to_port = var.k8s_secure_api_port
|
||||
protocol = "TCP"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
security_group_id = aws_security_group.aws-elb.id
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "aws-allow-api-egress" {
|
||||
type = "egress"
|
||||
from_port = 0
|
||||
to_port = 65535
|
||||
protocol = "TCP"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
security_group_id = aws_security_group.aws-elb.id
|
||||
}
|
||||
|
||||
# Create a new AWS ELB for K8S API
|
||||
resource "aws_elb" "aws-elb-api" {
|
||||
name = "kubernetes-elb-${var.aws_cluster_name}"
|
||||
subnets = var.aws_subnet_ids_public
|
||||
security_groups = [aws_security_group.aws-elb.id]
|
||||
|
||||
listener {
|
||||
instance_port = var.k8s_secure_api_port
|
||||
instance_protocol = "tcp"
|
||||
lb_port = var.aws_elb_api_port
|
||||
lb_protocol = "tcp"
|
||||
}
|
||||
|
||||
health_check {
|
||||
healthy_threshold = 2
|
||||
unhealthy_threshold = 2
|
||||
timeout = 3
|
||||
target = "HTTPS:${var.k8s_secure_api_port}/healthz"
|
||||
interval = 30
|
||||
}
|
||||
|
||||
cross_zone_load_balancing = true
|
||||
idle_timeout = 400
|
||||
connection_draining = true
|
||||
connection_draining_timeout = 400
|
||||
|
||||
tags = merge(var.default_tags, tomap({
|
||||
Name = "kubernetes-${var.aws_cluster_name}-elb-api"
|
||||
}))
|
||||
}
|
@ -1,7 +0,0 @@
|
||||
output "aws_elb_api_id" {
|
||||
value = aws_elb.aws-elb-api.id
|
||||
}
|
||||
|
||||
output "aws_elb_api_fqdn" {
|
||||
value = aws_elb.aws-elb-api.dns_name
|
||||
}
|
41
contrib/terraform/aws/modules/nlb/main.tf
Normal file
41
contrib/terraform/aws/modules/nlb/main.tf
Normal file
@ -0,0 +1,41 @@
|
||||
# Create a new AWS NLB for K8S API
|
||||
resource "aws_lb" "aws-nlb-api" {
|
||||
name = "kubernetes-nlb-${var.aws_cluster_name}"
|
||||
load_balancer_type = "network"
|
||||
subnets = length(var.aws_subnet_ids_public) <= length(var.aws_avail_zones) ? var.aws_subnet_ids_public : slice(var.aws_subnet_ids_public, 0, length(var.aws_avail_zones))
|
||||
idle_timeout = 400
|
||||
enable_cross_zone_load_balancing = true
|
||||
|
||||
tags = merge(var.default_tags, tomap({
|
||||
Name = "kubernetes-${var.aws_cluster_name}-nlb-api"
|
||||
}))
|
||||
}
|
||||
|
||||
# Create a new AWS NLB Instance Target Group
|
||||
resource "aws_lb_target_group" "aws-nlb-api-tg" {
|
||||
name = "kubernetes-nlb-tg-${var.aws_cluster_name}"
|
||||
port = var.k8s_secure_api_port
|
||||
protocol = "TCP"
|
||||
target_type = "ip"
|
||||
vpc_id = var.aws_vpc_id
|
||||
|
||||
health_check {
|
||||
healthy_threshold = 2
|
||||
unhealthy_threshold = 2
|
||||
interval = 30
|
||||
protocol = "HTTPS"
|
||||
path = "/healthz"
|
||||
}
|
||||
}
|
||||
|
||||
# Create a new AWS NLB Listener listen to target group
|
||||
resource "aws_lb_listener" "aws-nlb-api-listener" {
|
||||
load_balancer_arn = aws_lb.aws-nlb-api.arn
|
||||
port = var.aws_nlb_api_port
|
||||
protocol = "TCP"
|
||||
|
||||
default_action {
|
||||
type = "forward"
|
||||
target_group_arn = aws_lb_target_group.aws-nlb-api-tg.arn
|
||||
}
|
||||
}
|
11
contrib/terraform/aws/modules/nlb/outputs.tf
Normal file
11
contrib/terraform/aws/modules/nlb/outputs.tf
Normal file
@ -0,0 +1,11 @@
|
||||
output "aws_nlb_api_id" {
|
||||
value = aws_lb.aws-nlb-api.id
|
||||
}
|
||||
|
||||
output "aws_nlb_api_fqdn" {
|
||||
value = aws_lb.aws-nlb-api.dns_name
|
||||
}
|
||||
|
||||
output "aws_nlb_api_tg_arn" {
|
||||
value = aws_lb_target_group.aws-nlb-api-tg.arn
|
||||
}
|
@ -6,8 +6,8 @@ variable "aws_vpc_id" {
|
||||
description = "AWS VPC ID"
|
||||
}
|
||||
|
||||
variable "aws_elb_api_port" {
|
||||
description = "Port for AWS ELB"
|
||||
variable "aws_nlb_api_port" {
|
||||
description = "Port for AWS NLB"
|
||||
}
|
||||
|
||||
variable "k8s_secure_api_port" {
|
@ -25,13 +25,14 @@ resource "aws_internet_gateway" "cluster-vpc-internetgw" {
|
||||
|
||||
resource "aws_subnet" "cluster-vpc-subnets-public" {
|
||||
vpc_id = aws_vpc.cluster-vpc.id
|
||||
count = length(var.aws_avail_zones)
|
||||
availability_zone = element(var.aws_avail_zones, count.index)
|
||||
count = length(var.aws_cidr_subnets_public)
|
||||
availability_zone = element(var.aws_avail_zones, count.index % length(var.aws_avail_zones))
|
||||
cidr_block = element(var.aws_cidr_subnets_public, count.index)
|
||||
|
||||
tags = merge(var.default_tags, tomap({
|
||||
Name = "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-public"
|
||||
"kubernetes.io/cluster/${var.aws_cluster_name}" = "member"
|
||||
"kubernetes.io/cluster/${var.aws_cluster_name}" = "shared"
|
||||
"kubernetes.io/role/elb" = "1"
|
||||
}))
|
||||
}
|
||||
|
||||
@ -43,12 +44,14 @@ resource "aws_nat_gateway" "cluster-nat-gateway" {
|
||||
|
||||
resource "aws_subnet" "cluster-vpc-subnets-private" {
|
||||
vpc_id = aws_vpc.cluster-vpc.id
|
||||
count = length(var.aws_avail_zones)
|
||||
availability_zone = element(var.aws_avail_zones, count.index)
|
||||
count = length(var.aws_cidr_subnets_private)
|
||||
availability_zone = element(var.aws_avail_zones, count.index % length(var.aws_avail_zones))
|
||||
cidr_block = element(var.aws_cidr_subnets_private, count.index)
|
||||
|
||||
tags = merge(var.default_tags, tomap({
|
||||
Name = "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-private"
|
||||
"kubernetes.io/cluster/${var.aws_cluster_name}" = "shared"
|
||||
"kubernetes.io/role/internal-elb" = "1"
|
||||
}))
|
||||
}
|
||||
|
||||
|
@ -14,8 +14,8 @@ output "etcd" {
|
||||
value = join("\n", ((var.aws_etcd_num > 0) ? (aws_instance.k8s-etcd.*.private_ip) : (aws_instance.k8s-master.*.private_ip)))
|
||||
}
|
||||
|
||||
output "aws_elb_api_fqdn" {
|
||||
value = "${module.aws-elb.aws_elb_api_fqdn}:${var.aws_elb_api_port}"
|
||||
output "aws_nlb_api_fqdn" {
|
||||
value = "${module.aws-nlb.aws_nlb_api_fqdn}:${var.aws_nlb_api_port}"
|
||||
}
|
||||
|
||||
output "inventory" {
|
||||
|
@ -33,9 +33,9 @@ aws_kube_worker_size = "t2.medium"
|
||||
|
||||
aws_kube_worker_disk_size = 50
|
||||
|
||||
#Settings AWS ELB
|
||||
#Settings AWS NLB
|
||||
|
||||
aws_elb_api_port = 6443
|
||||
aws_nlb_api_port = 6443
|
||||
|
||||
k8s_secure_api_port = 6443
|
||||
|
||||
|
@ -24,4 +24,4 @@ kube_control_plane
|
||||
calico_rr
|
||||
|
||||
[k8s_cluster:vars]
|
||||
${elb_api_fqdn}
|
||||
${nlb_api_fqdn}
|
||||
|
@ -32,7 +32,7 @@ aws_kube_worker_size = "t3.medium"
|
||||
aws_kube_worker_disk_size = 50
|
||||
|
||||
#Settings AWS ELB
|
||||
aws_elb_api_port = 6443
|
||||
aws_nlb_api_port = 6443
|
||||
k8s_secure_api_port = 6443
|
||||
|
||||
default_tags = {
|
||||
|
@ -25,7 +25,7 @@ aws_kube_worker_size = "t3.medium"
|
||||
aws_kube_worker_disk_size = 50
|
||||
|
||||
#Settings AWS ELB
|
||||
aws_elb_api_port = 6443
|
||||
aws_nlb_api_port = 6443
|
||||
k8s_secure_api_port = 6443
|
||||
|
||||
default_tags = { }
|
||||
|
@ -104,11 +104,11 @@ variable "aws_kube_worker_size" {
|
||||
}
|
||||
|
||||
/*
|
||||
* AWS ELB Settings
|
||||
* AWS NLB Settings
|
||||
*
|
||||
*/
|
||||
variable "aws_elb_api_port" {
|
||||
description = "Port for AWS ELB"
|
||||
variable "aws_nlb_api_port" {
|
||||
description = "Port for AWS NLB"
|
||||
}
|
||||
|
||||
variable "k8s_secure_api_port" {
|
||||
|
@ -74,14 +74,23 @@ ansible-playbook -i contrib/terraform/gcs/inventory.ini cluster.yml -b -v
|
||||
* `ssh_whitelist`: List of IP ranges (CIDR) that will be allowed to ssh to the nodes
|
||||
* `api_server_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the API server
|
||||
* `nodeport_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the kubernetes nodes on port 30000-32767 (kubernetes nodeports)
|
||||
* `ingress_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to ingress on ports 80 and 443
|
||||
|
||||
### Optional
|
||||
|
||||
* `prefix`: Prefix to use for all resources, required to be unique for all clusters in the same project *(Defaults to `default`)*
|
||||
* `master_sa_email`: Service account email to use for the master nodes *(Defaults to `""`, auto generate one)*
|
||||
* `master_sa_scopes`: Service account email to use for the master nodes *(Defaults to `["https://www.googleapis.com/auth/cloud-platform"]`)*
|
||||
* `master_sa_email`: Service account email to use for the control plane nodes *(Defaults to `""`, auto generate one)*
|
||||
* `master_sa_scopes`: Service account email to use for the control plane nodes *(Defaults to `["https://www.googleapis.com/auth/cloud-platform"]`)*
|
||||
* `master_preemptible`: Enable [preemptible](https://cloud.google.com/compute/docs/instances/preemptible)
|
||||
for the control plane nodes *(Defaults to `false`)*
|
||||
* `master_additional_disk_type`: [Disk type](https://cloud.google.com/compute/docs/disks/#disk-types)
|
||||
for extra disks added on the control plane nodes *(Defaults to `"pd-ssd"`)*
|
||||
* `worker_sa_email`: Service account email to use for the worker nodes *(Defaults to `""`, auto generate one)*
|
||||
* `worker_sa_scopes`: Service account email to use for the worker nodes *(Defaults to `["https://www.googleapis.com/auth/cloud-platform"]`)*
|
||||
* `worker_preemptible`: Enable [preemptible](https://cloud.google.com/compute/docs/instances/preemptible)
|
||||
for the worker nodes *(Defaults to `false`)*
|
||||
* `worker_additional_disk_type`: [Disk type](https://cloud.google.com/compute/docs/disks/#disk-types)
|
||||
for extra disks added on the worker nodes *(Defaults to `"pd-ssd"`)*
|
||||
|
||||
An example variables file can be found `tfvars.json`
|
||||
|
||||
|
@ -1,8 +1,16 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
google = {
|
||||
source = "hashicorp/google"
|
||||
version = "~> 4.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
provider "google" {
|
||||
credentials = file(var.keyfile_location)
|
||||
region = var.region
|
||||
project = var.gcp_project_id
|
||||
version = "~> 3.48"
|
||||
}
|
||||
|
||||
module "kubernetes" {
|
||||
@ -13,12 +21,17 @@ module "kubernetes" {
|
||||
machines = var.machines
|
||||
ssh_pub_key = var.ssh_pub_key
|
||||
|
||||
master_sa_email = var.master_sa_email
|
||||
master_sa_scopes = var.master_sa_scopes
|
||||
worker_sa_email = var.worker_sa_email
|
||||
worker_sa_scopes = var.worker_sa_scopes
|
||||
master_sa_email = var.master_sa_email
|
||||
master_sa_scopes = var.master_sa_scopes
|
||||
master_preemptible = var.master_preemptible
|
||||
master_additional_disk_type = var.master_additional_disk_type
|
||||
worker_sa_email = var.worker_sa_email
|
||||
worker_sa_scopes = var.worker_sa_scopes
|
||||
worker_preemptible = var.worker_preemptible
|
||||
worker_additional_disk_type = var.worker_additional_disk_type
|
||||
|
||||
ssh_whitelist = var.ssh_whitelist
|
||||
api_server_whitelist = var.api_server_whitelist
|
||||
nodeport_whitelist = var.nodeport_whitelist
|
||||
ingress_whitelist = var.ingress_whitelist
|
||||
}
|
||||
|
@ -5,6 +5,8 @@
|
||||
|
||||
resource "google_compute_network" "main" {
|
||||
name = "${var.prefix}-network"
|
||||
|
||||
auto_create_subnetworks = false
|
||||
}
|
||||
|
||||
resource "google_compute_subnetwork" "main" {
|
||||
@ -20,6 +22,8 @@ resource "google_compute_firewall" "deny_all" {
|
||||
|
||||
priority = 1000
|
||||
|
||||
source_ranges = ["0.0.0.0/0"]
|
||||
|
||||
deny {
|
||||
protocol = "all"
|
||||
}
|
||||
@ -39,6 +43,8 @@ resource "google_compute_firewall" "allow_internal" {
|
||||
}
|
||||
|
||||
resource "google_compute_firewall" "ssh" {
|
||||
count = length(var.ssh_whitelist) > 0 ? 1 : 0
|
||||
|
||||
name = "${var.prefix}-ssh-firewall"
|
||||
network = google_compute_network.main.name
|
||||
|
||||
@ -53,6 +59,8 @@ resource "google_compute_firewall" "ssh" {
|
||||
}
|
||||
|
||||
resource "google_compute_firewall" "api_server" {
|
||||
count = length(var.api_server_whitelist) > 0 ? 1 : 0
|
||||
|
||||
name = "${var.prefix}-api-server-firewall"
|
||||
network = google_compute_network.main.name
|
||||
|
||||
@ -67,6 +75,8 @@ resource "google_compute_firewall" "api_server" {
|
||||
}
|
||||
|
||||
resource "google_compute_firewall" "nodeport" {
|
||||
count = length(var.nodeport_whitelist) > 0 ? 1 : 0
|
||||
|
||||
name = "${var.prefix}-nodeport-firewall"
|
||||
network = google_compute_network.main.name
|
||||
|
||||
@ -81,11 +91,15 @@ resource "google_compute_firewall" "nodeport" {
|
||||
}
|
||||
|
||||
resource "google_compute_firewall" "ingress_http" {
|
||||
count = length(var.ingress_whitelist) > 0 ? 1 : 0
|
||||
|
||||
name = "${var.prefix}-http-ingress-firewall"
|
||||
network = google_compute_network.main.name
|
||||
|
||||
priority = 100
|
||||
|
||||
source_ranges = var.ingress_whitelist
|
||||
|
||||
allow {
|
||||
protocol = "tcp"
|
||||
ports = ["80"]
|
||||
@ -93,11 +107,15 @@ resource "google_compute_firewall" "ingress_http" {
|
||||
}
|
||||
|
||||
resource "google_compute_firewall" "ingress_https" {
|
||||
count = length(var.ingress_whitelist) > 0 ? 1 : 0
|
||||
|
||||
name = "${var.prefix}-https-ingress-firewall"
|
||||
network = google_compute_network.main.name
|
||||
|
||||
priority = 100
|
||||
|
||||
source_ranges = var.ingress_whitelist
|
||||
|
||||
allow {
|
||||
protocol = "tcp"
|
||||
ports = ["443"]
|
||||
@ -173,7 +191,7 @@ resource "google_compute_disk" "master" {
|
||||
}
|
||||
|
||||
name = "${var.prefix}-${each.key}"
|
||||
type = "pd-ssd"
|
||||
type = var.master_additional_disk_type
|
||||
zone = each.value.machine.zone
|
||||
size = each.value.disk_size
|
||||
|
||||
@ -229,19 +247,28 @@ resource "google_compute_instance" "master" {
|
||||
|
||||
# Since we use google_compute_attached_disk we need to ignore this
|
||||
lifecycle {
|
||||
ignore_changes = ["attached_disk"]
|
||||
ignore_changes = [attached_disk]
|
||||
}
|
||||
|
||||
scheduling {
|
||||
preemptible = var.master_preemptible
|
||||
automatic_restart = !var.master_preemptible
|
||||
}
|
||||
}
|
||||
|
||||
resource "google_compute_forwarding_rule" "master_lb" {
|
||||
count = length(var.api_server_whitelist) > 0 ? 1 : 0
|
||||
|
||||
name = "${var.prefix}-master-lb-forward-rule"
|
||||
|
||||
port_range = "6443"
|
||||
|
||||
target = google_compute_target_pool.master_lb.id
|
||||
target = google_compute_target_pool.master_lb[count.index].id
|
||||
}
|
||||
|
||||
resource "google_compute_target_pool" "master_lb" {
|
||||
count = length(var.api_server_whitelist) > 0 ? 1 : 0
|
||||
|
||||
name = "${var.prefix}-master-lb-pool"
|
||||
instances = local.master_target_list
|
||||
}
|
||||
@ -258,7 +285,7 @@ resource "google_compute_disk" "worker" {
|
||||
}
|
||||
|
||||
name = "${var.prefix}-${each.key}"
|
||||
type = "pd-ssd"
|
||||
type = var.worker_additional_disk_type
|
||||
zone = each.value.machine.zone
|
||||
size = each.value.disk_size
|
||||
|
||||
@ -326,35 +353,48 @@ resource "google_compute_instance" "worker" {
|
||||
|
||||
# Since we use google_compute_attached_disk we need to ignore this
|
||||
lifecycle {
|
||||
ignore_changes = ["attached_disk"]
|
||||
ignore_changes = [attached_disk]
|
||||
}
|
||||
|
||||
scheduling {
|
||||
preemptible = var.worker_preemptible
|
||||
automatic_restart = !var.worker_preemptible
|
||||
}
|
||||
}
|
||||
|
||||
resource "google_compute_address" "worker_lb" {
|
||||
count = length(var.ingress_whitelist) > 0 ? 1 : 0
|
||||
|
||||
name = "${var.prefix}-worker-lb-address"
|
||||
address_type = "EXTERNAL"
|
||||
region = var.region
|
||||
}
|
||||
|
||||
resource "google_compute_forwarding_rule" "worker_http_lb" {
|
||||
count = length(var.ingress_whitelist) > 0 ? 1 : 0
|
||||
|
||||
name = "${var.prefix}-worker-http-lb-forward-rule"
|
||||
|
||||
ip_address = google_compute_address.worker_lb.address
|
||||
ip_address = google_compute_address.worker_lb[count.index].address
|
||||
port_range = "80"
|
||||
|
||||
target = google_compute_target_pool.worker_lb.id
|
||||
target = google_compute_target_pool.worker_lb[count.index].id
|
||||
}
|
||||
|
||||
resource "google_compute_forwarding_rule" "worker_https_lb" {
|
||||
count = length(var.ingress_whitelist) > 0 ? 1 : 0
|
||||
|
||||
name = "${var.prefix}-worker-https-lb-forward-rule"
|
||||
|
||||
ip_address = google_compute_address.worker_lb.address
|
||||
ip_address = google_compute_address.worker_lb[count.index].address
|
||||
port_range = "443"
|
||||
|
||||
target = google_compute_target_pool.worker_lb.id
|
||||
target = google_compute_target_pool.worker_lb[count.index].id
|
||||
}
|
||||
|
||||
resource "google_compute_target_pool" "worker_lb" {
|
||||
count = length(var.ingress_whitelist) > 0 ? 1 : 0
|
||||
|
||||
name = "${var.prefix}-worker-lb-pool"
|
||||
instances = local.worker_target_list
|
||||
}
|
||||
|
@ -19,9 +19,9 @@ output "worker_ip_addresses" {
|
||||
}
|
||||
|
||||
output "ingress_controller_lb_ip_address" {
|
||||
value = google_compute_address.worker_lb.address
|
||||
value = length(var.ingress_whitelist) > 0 ? google_compute_address.worker_lb.0.address : ""
|
||||
}
|
||||
|
||||
output "control_plane_lb_ip_address" {
|
||||
value = google_compute_forwarding_rule.master_lb.ip_address
|
||||
value = length(var.api_server_whitelist) > 0 ? google_compute_forwarding_rule.master_lb.0.ip_address : ""
|
||||
}
|
||||
|
@ -27,6 +27,14 @@ variable "master_sa_scopes" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "master_preemptible" {
|
||||
type = bool
|
||||
}
|
||||
|
||||
variable "master_additional_disk_type" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "worker_sa_email" {
|
||||
type = string
|
||||
}
|
||||
@ -35,6 +43,14 @@ variable "worker_sa_scopes" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "worker_preemptible" {
|
||||
type = bool
|
||||
}
|
||||
|
||||
variable "worker_additional_disk_type" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "ssh_pub_key" {}
|
||||
|
||||
variable "ssh_whitelist" {
|
||||
@ -49,6 +65,11 @@ variable "nodeport_whitelist" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "ingress_whitelist" {
|
||||
type = list(string)
|
||||
default = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
variable "private_network_cidr" {
|
||||
default = "10.0.10.0/24"
|
||||
}
|
||||
|
@ -16,6 +16,9 @@
|
||||
"nodeport_whitelist": [
|
||||
"1.2.3.4/32"
|
||||
],
|
||||
"ingress_whitelist": [
|
||||
"0.0.0.0/0"
|
||||
],
|
||||
|
||||
"machines": {
|
||||
"master-0": {
|
||||
@ -24,7 +27,7 @@
|
||||
"zone": "us-central1-a",
|
||||
"additional_disks": {},
|
||||
"boot_disk": {
|
||||
"image_name": "ubuntu-os-cloud/ubuntu-1804-bionic-v20201116",
|
||||
"image_name": "ubuntu-os-cloud/ubuntu-2004-focal-v20220118",
|
||||
"size": 50
|
||||
}
|
||||
},
|
||||
@ -38,7 +41,7 @@
|
||||
}
|
||||
},
|
||||
"boot_disk": {
|
||||
"image_name": "ubuntu-os-cloud/ubuntu-1804-bionic-v20201116",
|
||||
"image_name": "ubuntu-os-cloud/ubuntu-2004-focal-v20220118",
|
||||
"size": 50
|
||||
}
|
||||
},
|
||||
@ -52,7 +55,7 @@
|
||||
}
|
||||
},
|
||||
"boot_disk": {
|
||||
"image_name": "ubuntu-os-cloud/ubuntu-1804-bionic-v20201116",
|
||||
"image_name": "ubuntu-os-cloud/ubuntu-2004-focal-v20220118",
|
||||
"size": 50
|
||||
}
|
||||
}
|
||||
|
@ -44,6 +44,16 @@ variable "master_sa_scopes" {
|
||||
default = ["https://www.googleapis.com/auth/cloud-platform"]
|
||||
}
|
||||
|
||||
variable "master_preemptible" {
|
||||
type = bool
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "master_additional_disk_type" {
|
||||
type = string
|
||||
default = "pd-ssd"
|
||||
}
|
||||
|
||||
variable "worker_sa_email" {
|
||||
type = string
|
||||
default = ""
|
||||
@ -54,6 +64,16 @@ variable "worker_sa_scopes" {
|
||||
default = ["https://www.googleapis.com/auth/cloud-platform"]
|
||||
}
|
||||
|
||||
variable "worker_preemptible" {
|
||||
type = bool
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "worker_additional_disk_type" {
|
||||
type = string
|
||||
default = "pd-ssd"
|
||||
}
|
||||
|
||||
variable ssh_pub_key {
|
||||
description = "Path to public SSH key file which is injected into the VMs."
|
||||
type = string
|
||||
@ -70,3 +90,8 @@ variable api_server_whitelist {
|
||||
variable nodeport_whitelist {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "ingress_whitelist" {
|
||||
type = list(string)
|
||||
default = ["0.0.0.0/0"]
|
||||
}
|
||||
|
108
contrib/terraform/hetzner/README.md
Normal file
108
contrib/terraform/hetzner/README.md
Normal file
@ -0,0 +1,108 @@
|
||||
# Kubernetes on Hetzner with Terraform
|
||||
|
||||
Provision a Kubernetes cluster on [Hetzner](https://www.hetzner.com/cloud) using Terraform and Kubespray
|
||||
|
||||
## Overview
|
||||
|
||||
The setup looks like following
|
||||
|
||||
```text
|
||||
Kubernetes cluster
|
||||
+--------------------------+
|
||||
| +--------------+ |
|
||||
| | +--------------+ |
|
||||
| --> | | | |
|
||||
| | | Master/etcd | |
|
||||
| | | node(s) | |
|
||||
| +-+ | |
|
||||
| +--------------+ |
|
||||
| ^ |
|
||||
| | |
|
||||
| v |
|
||||
| +--------------+ |
|
||||
| | +--------------+ |
|
||||
| --> | | | |
|
||||
| | | Worker | |
|
||||
| | | node(s) | |
|
||||
| +-+ | |
|
||||
| +--------------+ |
|
||||
+--------------------------+
|
||||
```
|
||||
|
||||
The nodes uses a private network for node to node communication and a public interface for all external communication.
|
||||
|
||||
## Requirements
|
||||
|
||||
* Terraform 0.14.0 or newer
|
||||
|
||||
## Quickstart
|
||||
|
||||
NOTE: Assumes you are at the root of the kubespray repo.
|
||||
|
||||
For authentication in your cluster you can use the environment variables.
|
||||
|
||||
```bash
|
||||
export HCLOUD_TOKEN=api-token
|
||||
```
|
||||
|
||||
Copy the cluster configuration file.
|
||||
|
||||
```bash
|
||||
CLUSTER=my-hetzner-cluster
|
||||
cp -r inventory/sample inventory/$CLUSTER
|
||||
cp contrib/terraform/hetzner/default.tfvars inventory/$CLUSTER/
|
||||
cd inventory/$CLUSTER
|
||||
```
|
||||
|
||||
Edit `default.tfvars` to match your requirement.
|
||||
|
||||
Run Terraform to create the infrastructure.
|
||||
|
||||
```bash
|
||||
terraform init ../../contrib/terraform/hetzner
|
||||
terraform apply --var-file default.tfvars ../../contrib/terraform/hetzner/
|
||||
```
|
||||
|
||||
You should now have a inventory file named `inventory.ini` that you can use with kubespray.
|
||||
You can use the inventory file with kubespray to set up a cluster.
|
||||
|
||||
It is a good idea to check that you have basic SSH connectivity to the nodes. You can do that by:
|
||||
|
||||
```bash
|
||||
ansible -i inventory.ini -m ping all
|
||||
```
|
||||
|
||||
You can setup Kubernetes with kubespray using the generated inventory:
|
||||
|
||||
```bash
|
||||
ansible-playbook -i inventory.ini ../../cluster.yml -b -v
|
||||
```
|
||||
|
||||
## Cloud controller
|
||||
|
||||
For better support with the cloud you can install the [hcloud cloud controller](https://github.com/hetznercloud/hcloud-cloud-controller-manager) and [CSI driver](https://github.com/hetznercloud/csi-driver).
|
||||
|
||||
Please read the instructions in both repos on how to install it.
|
||||
|
||||
## Teardown
|
||||
|
||||
You can teardown your infrastructure using the following Terraform command:
|
||||
|
||||
```bash
|
||||
terraform destroy --var-file default.tfvars ../../contrib/terraform/hetzner
|
||||
```
|
||||
|
||||
## Variables
|
||||
|
||||
* `prefix`: Prefix to add to all resources, if set to "" don't set any prefix
|
||||
* `ssh_public_keys`: List of public SSH keys to install on all machines
|
||||
* `zone`: The zone where to run the cluster
|
||||
* `network_zone`: the network zone where the cluster is running
|
||||
* `machines`: Machines to provision. Key of this object will be used as the name of the machine
|
||||
* `node_type`: The role of this node *(master|worker)*
|
||||
* `size`: Size of the VM
|
||||
* `image`: The image to use for the VM
|
||||
* `ssh_whitelist`: List of IP ranges (CIDR) that will be allowed to ssh to the nodes
|
||||
* `api_server_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the API server
|
||||
* `nodeport_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the kubernetes nodes on port 30000-32767 (kubernetes nodeports)
|
||||
* `ingress_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to kubernetes workers on port 80 and 443
|
44
contrib/terraform/hetzner/default.tfvars
Normal file
44
contrib/terraform/hetzner/default.tfvars
Normal file
@ -0,0 +1,44 @@
|
||||
prefix = "default"
|
||||
zone = "hel1"
|
||||
network_zone = "eu-central"
|
||||
inventory_file = "inventory.ini"
|
||||
|
||||
ssh_public_keys = [
|
||||
# Put your public SSH key here
|
||||
"ssh-rsa I-did-not-read-the-docs",
|
||||
"ssh-rsa I-did-not-read-the-docs 2",
|
||||
]
|
||||
|
||||
machines = {
|
||||
"master-0" : {
|
||||
"node_type" : "master",
|
||||
"size" : "cx21",
|
||||
"image" : "ubuntu-20.04",
|
||||
},
|
||||
"worker-0" : {
|
||||
"node_type" : "worker",
|
||||
"size" : "cx21",
|
||||
"image" : "ubuntu-20.04",
|
||||
},
|
||||
"worker-1" : {
|
||||
"node_type" : "worker",
|
||||
"size" : "cx21",
|
||||
"image" : "ubuntu-20.04",
|
||||
}
|
||||
}
|
||||
|
||||
nodeport_whitelist = [
|
||||
"0.0.0.0/0"
|
||||
]
|
||||
|
||||
ingress_whitelist = [
|
||||
"0.0.0.0/0"
|
||||
]
|
||||
|
||||
ssh_whitelist = [
|
||||
"0.0.0.0/0"
|
||||
]
|
||||
|
||||
api_server_whitelist = [
|
||||
"0.0.0.0/0"
|
||||
]
|
52
contrib/terraform/hetzner/main.tf
Normal file
52
contrib/terraform/hetzner/main.tf
Normal file
@ -0,0 +1,52 @@
|
||||
provider "hcloud" {}
|
||||
|
||||
module "kubernetes" {
|
||||
source = "./modules/kubernetes-cluster"
|
||||
|
||||
prefix = var.prefix
|
||||
|
||||
zone = var.zone
|
||||
|
||||
machines = var.machines
|
||||
|
||||
ssh_public_keys = var.ssh_public_keys
|
||||
network_zone = var.network_zone
|
||||
|
||||
ssh_whitelist = var.ssh_whitelist
|
||||
api_server_whitelist = var.api_server_whitelist
|
||||
nodeport_whitelist = var.nodeport_whitelist
|
||||
ingress_whitelist = var.ingress_whitelist
|
||||
}
|
||||
|
||||
#
|
||||
# Generate ansible inventory
|
||||
#
|
||||
|
||||
data "template_file" "inventory" {
|
||||
template = file("${path.module}/templates/inventory.tpl")
|
||||
|
||||
vars = {
|
||||
connection_strings_master = join("\n", formatlist("%s ansible_user=ubuntu ansible_host=%s ip=%s etcd_member_name=etcd%d",
|
||||
keys(module.kubernetes.master_ip_addresses),
|
||||
values(module.kubernetes.master_ip_addresses).*.public_ip,
|
||||
values(module.kubernetes.master_ip_addresses).*.private_ip,
|
||||
range(1, length(module.kubernetes.master_ip_addresses) + 1)))
|
||||
connection_strings_worker = join("\n", formatlist("%s ansible_user=ubuntu ansible_host=%s ip=%s",
|
||||
keys(module.kubernetes.worker_ip_addresses),
|
||||
values(module.kubernetes.worker_ip_addresses).*.public_ip,
|
||||
values(module.kubernetes.worker_ip_addresses).*.private_ip))
|
||||
list_master = join("\n", keys(module.kubernetes.master_ip_addresses))
|
||||
list_worker = join("\n", keys(module.kubernetes.worker_ip_addresses))
|
||||
network_id = module.kubernetes.network_id
|
||||
}
|
||||
}
|
||||
|
||||
resource "null_resource" "inventories" {
|
||||
provisioner "local-exec" {
|
||||
command = "echo '${data.template_file.inventory.rendered}' > ${var.inventory_file}"
|
||||
}
|
||||
|
||||
triggers = {
|
||||
template = data.template_file.inventory.rendered
|
||||
}
|
||||
}
|
122
contrib/terraform/hetzner/modules/kubernetes-cluster/main.tf
Normal file
122
contrib/terraform/hetzner/modules/kubernetes-cluster/main.tf
Normal file
@ -0,0 +1,122 @@
|
||||
resource "hcloud_network" "kubernetes" {
|
||||
name = "${var.prefix}-network"
|
||||
ip_range = var.private_network_cidr
|
||||
}
|
||||
|
||||
resource "hcloud_network_subnet" "kubernetes" {
|
||||
type = "cloud"
|
||||
network_id = hcloud_network.kubernetes.id
|
||||
network_zone = var.network_zone
|
||||
ip_range = var.private_subnet_cidr
|
||||
}
|
||||
|
||||
resource "hcloud_server" "master" {
|
||||
for_each = {
|
||||
for name, machine in var.machines :
|
||||
name => machine
|
||||
if machine.node_type == "master"
|
||||
}
|
||||
|
||||
name = "${var.prefix}-${each.key}"
|
||||
image = each.value.image
|
||||
server_type = each.value.size
|
||||
location = var.zone
|
||||
|
||||
user_data = templatefile(
|
||||
"${path.module}/templates/cloud-init.tmpl",
|
||||
{
|
||||
ssh_public_keys = var.ssh_public_keys
|
||||
}
|
||||
)
|
||||
|
||||
firewall_ids = [hcloud_firewall.master.id]
|
||||
}
|
||||
|
||||
resource "hcloud_server_network" "master" {
|
||||
for_each = hcloud_server.master
|
||||
|
||||
server_id = each.value.id
|
||||
|
||||
subnet_id = hcloud_network_subnet.kubernetes.id
|
||||
}
|
||||
|
||||
resource "hcloud_server" "worker" {
|
||||
for_each = {
|
||||
for name, machine in var.machines :
|
||||
name => machine
|
||||
if machine.node_type == "worker"
|
||||
}
|
||||
|
||||
name = "${var.prefix}-${each.key}"
|
||||
image = each.value.image
|
||||
server_type = each.value.size
|
||||
location = var.zone
|
||||
|
||||
user_data = templatefile(
|
||||
"${path.module}/templates/cloud-init.tmpl",
|
||||
{
|
||||
ssh_public_keys = var.ssh_public_keys
|
||||
}
|
||||
)
|
||||
|
||||
firewall_ids = [hcloud_firewall.worker.id]
|
||||
|
||||
}
|
||||
|
||||
resource "hcloud_server_network" "worker" {
|
||||
for_each = hcloud_server.worker
|
||||
|
||||
server_id = each.value.id
|
||||
|
||||
subnet_id = hcloud_network_subnet.kubernetes.id
|
||||
}
|
||||
|
||||
resource "hcloud_firewall" "master" {
|
||||
name = "${var.prefix}-master-firewall"
|
||||
|
||||
rule {
|
||||
direction = "in"
|
||||
protocol = "tcp"
|
||||
port = "22"
|
||||
source_ips = var.ssh_whitelist
|
||||
}
|
||||
|
||||
rule {
|
||||
direction = "in"
|
||||
protocol = "tcp"
|
||||
port = "6443"
|
||||
source_ips = var.api_server_whitelist
|
||||
}
|
||||
}
|
||||
|
||||
resource "hcloud_firewall" "worker" {
|
||||
name = "${var.prefix}-worker-firewall"
|
||||
|
||||
rule {
|
||||
direction = "in"
|
||||
protocol = "tcp"
|
||||
port = "22"
|
||||
source_ips = var.ssh_whitelist
|
||||
}
|
||||
|
||||
rule {
|
||||
direction = "in"
|
||||
protocol = "tcp"
|
||||
port = "80"
|
||||
source_ips = var.ingress_whitelist
|
||||
}
|
||||
|
||||
rule {
|
||||
direction = "in"
|
||||
protocol = "tcp"
|
||||
port = "443"
|
||||
source_ips = var.ingress_whitelist
|
||||
}
|
||||
|
||||
rule {
|
||||
direction = "in"
|
||||
protocol = "tcp"
|
||||
port = "30000-32767"
|
||||
source_ips = var.nodeport_whitelist
|
||||
}
|
||||
}
|
@ -0,0 +1,27 @@
|
||||
output "master_ip_addresses" {
|
||||
value = {
|
||||
for key, instance in hcloud_server.master :
|
||||
instance.name => {
|
||||
"private_ip" = hcloud_server_network.master[key].ip
|
||||
"public_ip" = hcloud_server.master[key].ipv4_address
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
output "worker_ip_addresses" {
|
||||
value = {
|
||||
for key, instance in hcloud_server.worker :
|
||||
instance.name => {
|
||||
"private_ip" = hcloud_server_network.worker[key].ip
|
||||
"public_ip" = hcloud_server.worker[key].ipv4_address
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
output "cluster_private_network_cidr" {
|
||||
value = var.private_subnet_cidr
|
||||
}
|
||||
|
||||
output "network_id" {
|
||||
value = hcloud_network.kubernetes.id
|
||||
}
|
@ -0,0 +1,17 @@
|
||||
#cloud-config
|
||||
|
||||
users:
|
||||
- default
|
||||
- name: ubuntu
|
||||
shell: /bin/bash
|
||||
sudo: "ALL=(ALL) NOPASSWD:ALL"
|
||||
ssh_authorized_keys:
|
||||
%{ for ssh_public_key in ssh_public_keys ~}
|
||||
- ${ssh_public_key}
|
||||
%{ endfor ~}
|
||||
|
||||
ssh_authorized_keys:
|
||||
%{ for ssh_public_key in ssh_public_keys ~}
|
||||
- ${ssh_public_key}
|
||||
%{ endfor ~}
|
||||
|
@ -0,0 +1,44 @@
|
||||
variable "zone" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "prefix" {}
|
||||
|
||||
variable "machines" {
|
||||
type = map(object({
|
||||
node_type = string
|
||||
size = string
|
||||
image = string
|
||||
}))
|
||||
}
|
||||
|
||||
variable "ssh_public_keys" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "ssh_whitelist" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "api_server_whitelist" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "nodeport_whitelist" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "ingress_whitelist" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "private_network_cidr" {
|
||||
default = "10.0.0.0/16"
|
||||
}
|
||||
|
||||
variable "private_subnet_cidr" {
|
||||
default = "10.0.10.0/24"
|
||||
}
|
||||
variable "network_zone" {
|
||||
default = "eu-central"
|
||||
}
|
@ -0,0 +1,9 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
hcloud = {
|
||||
source = "hetznercloud/hcloud"
|
||||
version = "1.31.1"
|
||||
}
|
||||
}
|
||||
required_version = ">= 0.14"
|
||||
}
|
7
contrib/terraform/hetzner/output.tf
Normal file
7
contrib/terraform/hetzner/output.tf
Normal file
@ -0,0 +1,7 @@
|
||||
output "master_ips" {
|
||||
value = module.kubernetes.master_ip_addresses
|
||||
}
|
||||
|
||||
output "worker_ips" {
|
||||
value = module.kubernetes.worker_ip_addresses
|
||||
}
|
19
contrib/terraform/hetzner/templates/inventory.tpl
Normal file
19
contrib/terraform/hetzner/templates/inventory.tpl
Normal file
@ -0,0 +1,19 @@
|
||||
[all]
|
||||
${connection_strings_master}
|
||||
${connection_strings_worker}
|
||||
|
||||
[kube-master]
|
||||
${list_master}
|
||||
|
||||
[etcd]
|
||||
${list_master}
|
||||
|
||||
[kube-node]
|
||||
${list_worker}
|
||||
|
||||
[k8s-cluster:children]
|
||||
kube-master
|
||||
kube-node
|
||||
|
||||
[k8s-cluster:vars]
|
||||
network_id=${network_id}
|
50
contrib/terraform/hetzner/variables.tf
Normal file
50
contrib/terraform/hetzner/variables.tf
Normal file
@ -0,0 +1,50 @@
|
||||
variable "zone" {
|
||||
description = "The zone where to run the cluster"
|
||||
}
|
||||
variable "network_zone" {
|
||||
description = "The network zone where the cluster is running"
|
||||
default = "eu-central"
|
||||
}
|
||||
|
||||
variable "prefix" {
|
||||
description = "Prefix for resource names"
|
||||
default = "default"
|
||||
}
|
||||
|
||||
variable "machines" {
|
||||
description = "Cluster machines"
|
||||
type = map(object({
|
||||
node_type = string
|
||||
size = string
|
||||
image = string
|
||||
}))
|
||||
}
|
||||
|
||||
variable "ssh_public_keys" {
|
||||
description = "Public SSH key which are injected into the VMs."
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "ssh_whitelist" {
|
||||
description = "List of IP ranges (CIDR) to whitelist for ssh"
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "api_server_whitelist" {
|
||||
description = "List of IP ranges (CIDR) to whitelist for kubernetes api server"
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "nodeport_whitelist" {
|
||||
description = "List of IP ranges (CIDR) to whitelist for kubernetes nodeports"
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "ingress_whitelist" {
|
||||
description = "List of IP ranges (CIDR) to whitelist for HTTP"
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "inventory_file" {
|
||||
description = "Where to store the generated inventory file"
|
||||
}
|
15
contrib/terraform/hetzner/versions.tf
Normal file
15
contrib/terraform/hetzner/versions.tf
Normal file
@ -0,0 +1,15 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
hcloud = {
|
||||
source = "hetznercloud/hcloud"
|
||||
version = "1.31.1"
|
||||
}
|
||||
null = {
|
||||
source = "hashicorp/null"
|
||||
}
|
||||
template = {
|
||||
source = "hashicorp/template"
|
||||
}
|
||||
}
|
||||
required_version = ">= 0.14"
|
||||
}
|
@ -35,7 +35,7 @@ now six total etcd replicas.
|
||||
## Requirements
|
||||
|
||||
- [Install Terraform](https://www.terraform.io/intro/getting-started/install.html)
|
||||
- Install dependencies: `sudo pip install -r requirements.txt`
|
||||
- [Install Ansible dependencies](/docs/ansible.md#installing-ansible)
|
||||
- Account with Equinix Metal
|
||||
- An SSH key pair
|
||||
|
||||
@ -60,9 +60,9 @@ Terraform will be used to provision all of the Equinix Metal resources with base
|
||||
Create an inventory directory for your cluster by copying the existing sample and linking the `hosts` script (used to build the inventory based on Terraform state):
|
||||
|
||||
```ShellSession
|
||||
cp -LRp contrib/terraform/packet/sample-inventory inventory/$CLUSTER
|
||||
cp -LRp contrib/terraform/metal/sample-inventory inventory/$CLUSTER
|
||||
cd inventory/$CLUSTER
|
||||
ln -s ../../contrib/terraform/packet/hosts
|
||||
ln -s ../../contrib/terraform/metal/hosts
|
||||
```
|
||||
|
||||
This will be the base for subsequent Terraform commands.
|
||||
@ -101,7 +101,7 @@ This helps when identifying which hosts are associated with each cluster.
|
||||
While the defaults in variables.tf will successfully deploy a cluster, it is recommended to set the following values:
|
||||
|
||||
- cluster_name = the name of the inventory directory created above as $CLUSTER
|
||||
- packet_project_id = the Equinix Metal Project ID associated with the Equinix Metal API token above
|
||||
- metal_project_id = the Equinix Metal Project ID associated with the Equinix Metal API token above
|
||||
|
||||
#### Enable localhost access
|
||||
|
||||
@ -119,7 +119,7 @@ Once the Kubespray playbooks are run, a Kubernetes configuration file will be wr
|
||||
|
||||
In the cluster's inventory folder, the following files might be created (either by Terraform
|
||||
or manually), to prevent you from pushing them accidentally they are in a
|
||||
`.gitignore` file in the `terraform/packet` directory :
|
||||
`.gitignore` file in the `terraform/metal` directory :
|
||||
|
||||
- `.terraform`
|
||||
- `.tfvars`
|
||||
@ -135,7 +135,7 @@ plugins. This is accomplished as follows:
|
||||
|
||||
```ShellSession
|
||||
cd inventory/$CLUSTER
|
||||
terraform init ../../contrib/terraform/packet
|
||||
terraform init ../../contrib/terraform/metal
|
||||
```
|
||||
|
||||
This should finish fairly quickly telling you Terraform has successfully initialized and loaded necessary modules.
|
||||
@ -146,7 +146,7 @@ You can apply the Terraform configuration to your cluster with the following com
|
||||
issued from your cluster's inventory directory (`inventory/$CLUSTER`):
|
||||
|
||||
```ShellSession
|
||||
terraform apply -var-file=cluster.tfvars ../../contrib/terraform/packet
|
||||
terraform apply -var-file=cluster.tfvars ../../contrib/terraform/metal
|
||||
export ANSIBLE_HOST_KEY_CHECKING=False
|
||||
ansible-playbook -i hosts ../../cluster.yml
|
||||
```
|
||||
@ -156,7 +156,7 @@ ansible-playbook -i hosts ../../cluster.yml
|
||||
You can destroy your new cluster with the following command issued from the cluster's inventory directory:
|
||||
|
||||
```ShellSession
|
||||
terraform destroy -var-file=cluster.tfvars ../../contrib/terraform/packet
|
||||
terraform destroy -var-file=cluster.tfvars ../../contrib/terraform/metal
|
||||
```
|
||||
|
||||
If you've started the Ansible run, it may also be a good idea to do some manual cleanup:
|
@ -1,16 +1,15 @@
|
||||
# Configure the Equinix Metal Provider
|
||||
provider "packet" {
|
||||
version = "~> 2.0"
|
||||
provider "metal" {
|
||||
}
|
||||
|
||||
resource "packet_ssh_key" "k8s" {
|
||||
resource "metal_ssh_key" "k8s" {
|
||||
count = var.public_key_path != "" ? 1 : 0
|
||||
name = "kubernetes-${var.cluster_name}"
|
||||
public_key = chomp(file(var.public_key_path))
|
||||
}
|
||||
|
||||
resource "packet_device" "k8s_master" {
|
||||
depends_on = [packet_ssh_key.k8s]
|
||||
resource "metal_device" "k8s_master" {
|
||||
depends_on = [metal_ssh_key.k8s]
|
||||
|
||||
count = var.number_of_k8s_masters
|
||||
hostname = "${var.cluster_name}-k8s-master-${count.index + 1}"
|
||||
@ -18,12 +17,12 @@ resource "packet_device" "k8s_master" {
|
||||
facilities = [var.facility]
|
||||
operating_system = var.operating_system
|
||||
billing_cycle = var.billing_cycle
|
||||
project_id = var.packet_project_id
|
||||
project_id = var.metal_project_id
|
||||
tags = ["cluster-${var.cluster_name}", "k8s_cluster", "kube_control_plane", "etcd", "kube_node"]
|
||||
}
|
||||
|
||||
resource "packet_device" "k8s_master_no_etcd" {
|
||||
depends_on = [packet_ssh_key.k8s]
|
||||
resource "metal_device" "k8s_master_no_etcd" {
|
||||
depends_on = [metal_ssh_key.k8s]
|
||||
|
||||
count = var.number_of_k8s_masters_no_etcd
|
||||
hostname = "${var.cluster_name}-k8s-master-${count.index + 1}"
|
||||
@ -31,12 +30,12 @@ resource "packet_device" "k8s_master_no_etcd" {
|
||||
facilities = [var.facility]
|
||||
operating_system = var.operating_system
|
||||
billing_cycle = var.billing_cycle
|
||||
project_id = var.packet_project_id
|
||||
project_id = var.metal_project_id
|
||||
tags = ["cluster-${var.cluster_name}", "k8s_cluster", "kube_control_plane"]
|
||||
}
|
||||
|
||||
resource "packet_device" "k8s_etcd" {
|
||||
depends_on = [packet_ssh_key.k8s]
|
||||
resource "metal_device" "k8s_etcd" {
|
||||
depends_on = [metal_ssh_key.k8s]
|
||||
|
||||
count = var.number_of_etcd
|
||||
hostname = "${var.cluster_name}-etcd-${count.index + 1}"
|
||||
@ -44,12 +43,12 @@ resource "packet_device" "k8s_etcd" {
|
||||
facilities = [var.facility]
|
||||
operating_system = var.operating_system
|
||||
billing_cycle = var.billing_cycle
|
||||
project_id = var.packet_project_id
|
||||
project_id = var.metal_project_id
|
||||
tags = ["cluster-${var.cluster_name}", "etcd"]
|
||||
}
|
||||
|
||||
resource "packet_device" "k8s_node" {
|
||||
depends_on = [packet_ssh_key.k8s]
|
||||
resource "metal_device" "k8s_node" {
|
||||
depends_on = [metal_ssh_key.k8s]
|
||||
|
||||
count = var.number_of_k8s_nodes
|
||||
hostname = "${var.cluster_name}-k8s-node-${count.index + 1}"
|
||||
@ -57,7 +56,7 @@ resource "packet_device" "k8s_node" {
|
||||
facilities = [var.facility]
|
||||
operating_system = var.operating_system
|
||||
billing_cycle = var.billing_cycle
|
||||
project_id = var.packet_project_id
|
||||
project_id = var.metal_project_id
|
||||
tags = ["cluster-${var.cluster_name}", "k8s_cluster", "kube_node"]
|
||||
}
|
||||
|
16
contrib/terraform/metal/output.tf
Normal file
16
contrib/terraform/metal/output.tf
Normal file
@ -0,0 +1,16 @@
|
||||
output "k8s_masters" {
|
||||
value = metal_device.k8s_master.*.access_public_ipv4
|
||||
}
|
||||
|
||||
output "k8s_masters_no_etc" {
|
||||
value = metal_device.k8s_master_no_etcd.*.access_public_ipv4
|
||||
}
|
||||
|
||||
output "k8s_etcds" {
|
||||
value = metal_device.k8s_etcd.*.access_public_ipv4
|
||||
}
|
||||
|
||||
output "k8s_nodes" {
|
||||
value = metal_device.k8s_node.*.access_public_ipv4
|
||||
}
|
||||
|
@ -2,7 +2,7 @@
|
||||
cluster_name = "mycluster"
|
||||
|
||||
# Your Equinix Metal project ID. See hhttps://metal.equinix.com/developers/docs/accounts/
|
||||
packet_project_id = "Example-API-Token"
|
||||
metal_project_id = "Example-API-Token"
|
||||
|
||||
# The public SSH key to be uploaded into authorized_keys in bare metal Equinix Metal nodes provisioned
|
||||
# leave this value blank if the public key is already setup in the Equinix Metal project
|
@ -2,12 +2,12 @@ variable "cluster_name" {
|
||||
default = "kubespray"
|
||||
}
|
||||
|
||||
variable "packet_project_id" {
|
||||
variable "metal_project_id" {
|
||||
description = "Your Equinix Metal project ID. See https://metal.equinix.com/developers/docs/accounts/"
|
||||
}
|
||||
|
||||
variable "operating_system" {
|
||||
default = "ubuntu_16_04"
|
||||
default = "ubuntu_20_04"
|
||||
}
|
||||
|
||||
variable "public_key_path" {
|
||||
@ -24,23 +24,23 @@ variable "facility" {
|
||||
}
|
||||
|
||||
variable "plan_k8s_masters" {
|
||||
default = "c2.medium.x86"
|
||||
default = "c3.small.x86"
|
||||
}
|
||||
|
||||
variable "plan_k8s_masters_no_etcd" {
|
||||
default = "c2.medium.x86"
|
||||
default = "c3.small.x86"
|
||||
}
|
||||
|
||||
variable "plan_etcd" {
|
||||
default = "c2.medium.x86"
|
||||
default = "c3.small.x86"
|
||||
}
|
||||
|
||||
variable "plan_k8s_nodes" {
|
||||
default = "c2.medium.x86"
|
||||
default = "c3.medium.x86"
|
||||
}
|
||||
|
||||
variable "number_of_k8s_masters" {
|
||||
default = 0
|
||||
default = 1
|
||||
}
|
||||
|
||||
variable "number_of_k8s_masters_no_etcd" {
|
||||
@ -52,6 +52,6 @@ variable "number_of_etcd" {
|
||||
}
|
||||
|
||||
variable "number_of_k8s_nodes" {
|
||||
default = 0
|
||||
default = 1
|
||||
}
|
||||
|
@ -2,8 +2,8 @@
|
||||
terraform {
|
||||
required_version = ">= 0.12"
|
||||
required_providers {
|
||||
packet = {
|
||||
source = "terraform-providers/packet"
|
||||
metal = {
|
||||
source = "equinix/metal"
|
||||
}
|
||||
}
|
||||
}
|
@ -17,9 +17,10 @@ most modern installs of OpenStack that support the basic services.
|
||||
- [ELASTX](https://elastx.se/)
|
||||
- [EnterCloudSuite](https://www.entercloudsuite.com/)
|
||||
- [FugaCloud](https://fuga.cloud/)
|
||||
- [Open Telekom Cloud](https://cloud.telekom.de/) : requires to set the variable `wait_for_floatingip = "true"` in your cluster.tfvars
|
||||
- [Open Telekom Cloud](https://cloud.telekom.de/)
|
||||
- [OVH](https://www.ovh.com/)
|
||||
- [Rackspace](https://www.rackspace.com/)
|
||||
- [Safespring](https://www.safespring.com)
|
||||
- [Ultimum](https://ultimum.io/)
|
||||
- [VexxHost](https://vexxhost.com/)
|
||||
- [Zetta](https://www.zetta.io/)
|
||||
@ -247,10 +248,12 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
|
||||
|`cluster_name` | All OpenStack resources will use the Terraform variable`cluster_name` (default`example`) in their name to make it easier to track. For example the first compute resource will be named`example-kubernetes-1`. |
|
||||
|`az_list` | List of Availability Zones available in your OpenStack cluster. |
|
||||
|`network_name` | The name to be given to the internal network that will be generated |
|
||||
|`use_existing_network`| Use an existing network with the name of `network_name`. `false` by default |
|
||||
|`network_dns_domain` | (Optional) The dns_domain for the internal network that will be generated |
|
||||
|`dns_nameservers`| An array of DNS name server names to be used by hosts in the internal subnet. |
|
||||
|`floatingip_pool` | Name of the pool from which floating IPs will be allocated |
|
||||
|`k8s_master_fips` | A list of floating IPs that you have already pre-allocated; they will be attached to master nodes instead of creating new random floating IPs. |
|
||||
|`bastion_fips` | A list of floating IPs that you have already pre-allocated; they will be attached to bastion node instead of creating new random floating IPs. |
|
||||
|`external_net` | UUID of the external network that will be routed to |
|
||||
|`flavor_k8s_master`,`flavor_k8s_node`,`flavor_etcd`, `flavor_bastion`,`flavor_gfs_node` | Flavor depends on your openstack installation, you can get available flavor IDs through `openstack flavor list` |
|
||||
|`image`,`image_gfs` | Name of the image to use in provisioning the compute resources. Should already be loaded into glance. |
|
||||
@ -270,16 +273,21 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
|
||||
|`k8s_allowed_remote_ips` | List of CIDR allowed to initiate a SSH connection, empty by default |
|
||||
|`worker_allowed_ports` | List of ports to open on worker nodes, `[{ "protocol" = "tcp", "port_range_min" = 30000, "port_range_max" = 32767, "remote_ip_prefix" = "0.0.0.0/0"}]` by default |
|
||||
|`master_allowed_ports` | List of ports to open on master nodes, expected format is `[{ "protocol" = "tcp", "port_range_min" = 443, "port_range_max" = 443, "remote_ip_prefix" = "0.0.0.0/0"}]`, empty by default |
|
||||
|`wait_for_floatingip` | Let Terraform poll the instance until the floating IP has been associated, `false` by default. |
|
||||
|`node_root_volume_size_in_gb` | Size of the root volume for nodes, 0 to use ephemeral storage |
|
||||
|`master_root_volume_size_in_gb` | Size of the root volume for masters, 0 to use ephemeral storage |
|
||||
|`master_volume_type` | Volume type of the root volume for control_plane, 'Default' by default |
|
||||
|`node_volume_type` | Volume type of the root volume for nodes, 'Default' by default |
|
||||
|`gfs_root_volume_size_in_gb` | Size of the root volume for gluster, 0 to use ephemeral storage |
|
||||
|`etcd_root_volume_size_in_gb` | Size of the root volume for etcd nodes, 0 to use ephemeral storage |
|
||||
|`bastion_root_volume_size_in_gb` | Size of the root volume for bastions, 0 to use ephemeral storage |
|
||||
|`use_server_group` | Create and use openstack nova servergroups, default: false |
|
||||
|`master_server_group_policy` | Enable and use openstack nova servergroups for masters with set policy, default: "" (disabled) |
|
||||
|`node_server_group_policy` | Enable and use openstack nova servergroups for nodes with set policy, default: "" (disabled) |
|
||||
|`etcd_server_group_policy` | Enable and use openstack nova servergroups for etcd with set policy, default: "" (disabled) |
|
||||
|`use_access_ip` | If 1, nodes with floating IPs will transmit internal cluster traffic via floating IPs; if 0 private IPs will be used instead. Default value is 1. |
|
||||
|`port_security_enabled` | Allow to disable port security by setting this to `false`. `true` by default |
|
||||
|`force_null_port_security` | Set `null` instead of `true` or `false` for `port_security`. `false` by default |
|
||||
|`k8s_nodes` | Map containing worker node definition, see explanation below |
|
||||
|`k8s_masters` | Map containing master node definition, see explanation for k8s_nodes and `sample-inventory/cluster.tfvars` |
|
||||
|
||||
##### k8s_nodes
|
||||
|
||||
@ -407,18 +415,39 @@ plugins. This is accomplished as follows:
|
||||
|
||||
```ShellSession
|
||||
cd inventory/$CLUSTER
|
||||
terraform init ../../contrib/terraform/openstack
|
||||
terraform -chdir="../../contrib/terraform/openstack" init
|
||||
```
|
||||
|
||||
This should finish fairly quickly telling you Terraform has successfully initialized and loaded necessary modules.
|
||||
|
||||
### Customizing with cloud-init
|
||||
|
||||
You can apply cloud-init based customization for the openstack instances before provisioning your cluster.
|
||||
One common template is used for all instances. Adjust the file shown below:
|
||||
`contrib/terraform/openstack/modules/compute/templates/cloudinit.yaml`
|
||||
For example, to enable openstack novnc access and ansible_user=root SSH access:
|
||||
|
||||
```ShellSession
|
||||
#cloud-config
|
||||
## in some cases novnc console access is required
|
||||
## it requires ssh password to be set
|
||||
ssh_pwauth: yes
|
||||
chpasswd:
|
||||
list: |
|
||||
root:secret
|
||||
expire: False
|
||||
|
||||
## in some cases direct root ssh access via ssh key is required
|
||||
disable_root: false
|
||||
```
|
||||
|
||||
### Provisioning cluster
|
||||
|
||||
You can apply the Terraform configuration to your cluster with the following command
|
||||
issued from your cluster's inventory directory (`inventory/$CLUSTER`):
|
||||
|
||||
```ShellSession
|
||||
terraform apply -var-file=cluster.tfvars ../../contrib/terraform/openstack
|
||||
terraform -chdir="../../contrib/terraform/openstack" apply -var-file=cluster.tfvars
|
||||
```
|
||||
|
||||
if you chose to create a bastion host, this script will create
|
||||
@ -433,7 +462,7 @@ pick it up automatically.
|
||||
You can destroy your new cluster with the following command issued from the cluster's inventory directory:
|
||||
|
||||
```ShellSession
|
||||
terraform destroy -var-file=cluster.tfvars ../../contrib/terraform/openstack
|
||||
terraform -chdir="../../contrib/terraform/openstack" destroy -var-file=cluster.tfvars
|
||||
```
|
||||
|
||||
If you've started the Ansible run, it may also be a good idea to do some manual cleanup:
|
||||
|
@ -1,14 +1,15 @@
|
||||
module "network" {
|
||||
source = "./modules/network"
|
||||
|
||||
external_net = var.external_net
|
||||
network_name = var.network_name
|
||||
subnet_cidr = var.subnet_cidr
|
||||
cluster_name = var.cluster_name
|
||||
dns_nameservers = var.dns_nameservers
|
||||
network_dns_domain = var.network_dns_domain
|
||||
use_neutron = var.use_neutron
|
||||
router_id = var.router_id
|
||||
external_net = var.external_net
|
||||
network_name = var.network_name
|
||||
subnet_cidr = var.subnet_cidr
|
||||
cluster_name = var.cluster_name
|
||||
dns_nameservers = var.dns_nameservers
|
||||
network_dns_domain = var.network_dns_domain
|
||||
use_neutron = var.use_neutron
|
||||
port_security_enabled = var.port_security_enabled
|
||||
router_id = var.router_id
|
||||
}
|
||||
|
||||
module "ips" {
|
||||
@ -23,7 +24,9 @@ module "ips" {
|
||||
network_name = var.network_name
|
||||
router_id = module.network.router_id
|
||||
k8s_nodes = var.k8s_nodes
|
||||
k8s_masters = var.k8s_masters
|
||||
k8s_master_fips = var.k8s_master_fips
|
||||
bastion_fips = var.bastion_fips
|
||||
router_internal_port_id = module.network.router_internal_port_id
|
||||
}
|
||||
|
||||
@ -42,6 +45,7 @@ module "compute" {
|
||||
number_of_bastions = var.number_of_bastions
|
||||
number_of_k8s_nodes_no_floating_ip = var.number_of_k8s_nodes_no_floating_ip
|
||||
number_of_gfs_nodes_no_floating_ip = var.number_of_gfs_nodes_no_floating_ip
|
||||
k8s_masters = var.k8s_masters
|
||||
k8s_nodes = var.k8s_nodes
|
||||
bastion_root_volume_size_in_gb = var.bastion_root_volume_size_in_gb
|
||||
etcd_root_volume_size_in_gb = var.etcd_root_volume_size_in_gb
|
||||
@ -50,6 +54,7 @@ module "compute" {
|
||||
gfs_root_volume_size_in_gb = var.gfs_root_volume_size_in_gb
|
||||
gfs_volume_size_in_gb = var.gfs_volume_size_in_gb
|
||||
master_volume_type = var.master_volume_type
|
||||
node_volume_type = var.node_volume_type
|
||||
public_key_path = var.public_key_path
|
||||
image = var.image
|
||||
image_uuid = var.image_uuid
|
||||
@ -67,6 +72,7 @@ module "compute" {
|
||||
flavor_bastion = var.flavor_bastion
|
||||
k8s_master_fips = module.ips.k8s_master_fips
|
||||
k8s_master_no_etcd_fips = module.ips.k8s_master_no_etcd_fips
|
||||
k8s_masters_fips = module.ips.k8s_masters_fips
|
||||
k8s_node_fips = module.ips.k8s_node_fips
|
||||
k8s_nodes_fips = module.ips.k8s_nodes_fips
|
||||
bastion_fips = module.ips.bastion_fips
|
||||
@ -78,14 +84,18 @@ module "compute" {
|
||||
supplementary_node_groups = var.supplementary_node_groups
|
||||
master_allowed_ports = var.master_allowed_ports
|
||||
worker_allowed_ports = var.worker_allowed_ports
|
||||
wait_for_floatingip = var.wait_for_floatingip
|
||||
use_access_ip = var.use_access_ip
|
||||
use_server_groups = var.use_server_groups
|
||||
master_server_group_policy = var.master_server_group_policy
|
||||
node_server_group_policy = var.node_server_group_policy
|
||||
etcd_server_group_policy = var.etcd_server_group_policy
|
||||
extra_sec_groups = var.extra_sec_groups
|
||||
extra_sec_groups_name = var.extra_sec_groups_name
|
||||
group_vars_path = var.group_vars_path
|
||||
|
||||
network_id = module.network.router_id
|
||||
port_security_enabled = var.port_security_enabled
|
||||
force_null_port_security = var.force_null_port_security
|
||||
network_router_id = module.network.router_id
|
||||
network_id = module.network.network_id
|
||||
use_existing_network = var.use_existing_network
|
||||
}
|
||||
|
||||
output "private_subnet_id" {
|
||||
|
@ -15,6 +15,15 @@ data "openstack_images_image_v2" "image_master" {
|
||||
name = var.image_master == "" ? var.image : var.image_master
|
||||
}
|
||||
|
||||
data "template_file" "cloudinit" {
|
||||
template = file("${path.module}/templates/cloudinit.yaml")
|
||||
}
|
||||
|
||||
data "openstack_networking_network_v2" "k8s_network" {
|
||||
count = var.use_existing_network ? 1 : 0
|
||||
name = var.network_name
|
||||
}
|
||||
|
||||
resource "openstack_compute_keypair_v2" "k8s" {
|
||||
name = "kubernetes-${var.cluster_name}"
|
||||
public_key = chomp(file(var.public_key_path))
|
||||
@ -130,36 +139,45 @@ resource "openstack_networking_secgroup_rule_v2" "worker" {
|
||||
}
|
||||
|
||||
resource "openstack_compute_servergroup_v2" "k8s_master" {
|
||||
count = "%{if var.use_server_groups}1%{else}0%{endif}"
|
||||
count = var.master_server_group_policy != "" ? 1 : 0
|
||||
name = "k8s-master-srvgrp"
|
||||
policies = ["anti-affinity"]
|
||||
policies = [var.master_server_group_policy]
|
||||
}
|
||||
|
||||
resource "openstack_compute_servergroup_v2" "k8s_node" {
|
||||
count = "%{if var.use_server_groups}1%{else}0%{endif}"
|
||||
count = var.node_server_group_policy != "" ? 1 : 0
|
||||
name = "k8s-node-srvgrp"
|
||||
policies = ["anti-affinity"]
|
||||
policies = [var.node_server_group_policy]
|
||||
}
|
||||
|
||||
resource "openstack_compute_servergroup_v2" "k8s_etcd" {
|
||||
count = "%{if var.use_server_groups}1%{else}0%{endif}"
|
||||
count = var.etcd_server_group_policy != "" ? 1 : 0
|
||||
name = "k8s-etcd-srvgrp"
|
||||
policies = ["anti-affinity"]
|
||||
policies = [var.etcd_server_group_policy]
|
||||
}
|
||||
|
||||
locals {
|
||||
# master groups
|
||||
master_sec_groups = compact([
|
||||
openstack_networking_secgroup_v2.k8s_master.name,
|
||||
openstack_networking_secgroup_v2.k8s.name,
|
||||
var.extra_sec_groups ?openstack_networking_secgroup_v2.k8s_master_extra[0].name : "",
|
||||
openstack_networking_secgroup_v2.k8s_master.id,
|
||||
openstack_networking_secgroup_v2.k8s.id,
|
||||
var.extra_sec_groups ?openstack_networking_secgroup_v2.k8s_master_extra[0].id : "",
|
||||
])
|
||||
# worker groups
|
||||
worker_sec_groups = compact([
|
||||
openstack_networking_secgroup_v2.k8s.name,
|
||||
openstack_networking_secgroup_v2.worker.name,
|
||||
var.extra_sec_groups ? openstack_networking_secgroup_v2.worker_extra[0].name : "",
|
||||
openstack_networking_secgroup_v2.k8s.id,
|
||||
openstack_networking_secgroup_v2.worker.id,
|
||||
var.extra_sec_groups ? openstack_networking_secgroup_v2.worker_extra[0].id : "",
|
||||
])
|
||||
# bastion groups
|
||||
bastion_sec_groups = compact(concat([
|
||||
openstack_networking_secgroup_v2.k8s.id,
|
||||
openstack_networking_secgroup_v2.bastion[0].id,
|
||||
]))
|
||||
# etcd groups
|
||||
etcd_sec_groups = compact([openstack_networking_secgroup_v2.k8s.id])
|
||||
# glusterfs groups
|
||||
gfs_sec_groups = compact([openstack_networking_secgroup_v2.k8s.id])
|
||||
|
||||
# Image uuid
|
||||
image_to_use_node = var.image_uuid != "" ? var.image_uuid : data.openstack_images_image_v2.vm_image[0].id
|
||||
@ -169,12 +187,27 @@ locals {
|
||||
image_to_use_master = var.image_master_uuid != "" ? var.image_master_uuid : var.image_uuid != "" ? var.image_uuid : data.openstack_images_image_v2.image_master[0].id
|
||||
}
|
||||
|
||||
resource "openstack_networking_port_v2" "bastion_port" {
|
||||
count = var.number_of_bastions
|
||||
name = "${var.cluster_name}-bastion-${count.index + 1}"
|
||||
network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id
|
||||
admin_state_up = "true"
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.bastion_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
]
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "bastion" {
|
||||
name = "${var.cluster_name}-bastion-${count.index + 1}"
|
||||
count = var.number_of_bastions
|
||||
image_id = var.bastion_root_volume_size_in_gb == 0 ? local.image_to_use_node : null
|
||||
flavor_id = var.flavor_bastion
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
user_data = data.template_file.cloudinit.rendered
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.bastion_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : []
|
||||
@ -189,25 +222,35 @@ resource "openstack_compute_instance_v2" "bastion" {
|
||||
}
|
||||
|
||||
network {
|
||||
name = var.network_name
|
||||
port = element(openstack_networking_port_v2.bastion_port.*.id, count.index)
|
||||
}
|
||||
|
||||
security_groups = [openstack_networking_secgroup_v2.k8s.name,
|
||||
element(openstack_networking_secgroup_v2.bastion.*.name, count.index),
|
||||
]
|
||||
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "bastion"
|
||||
depends_on = var.network_id
|
||||
depends_on = var.network_router_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ ${path.root}/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${var.bastion_fips[0]}/ > ${var.group_vars_path}/no_floating.yml"
|
||||
command = "sed -e s/USER/${var.ssh_user}/ -e s/BASTION_ADDRESS/${var.bastion_fips[0]}/ ${path.module}/ansible_bastion_template.txt > ${var.group_vars_path}/no_floating.yml"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_networking_port_v2" "k8s_master_port" {
|
||||
count = var.number_of_k8s_masters
|
||||
name = "${var.cluster_name}-k8s-master-${count.index + 1}"
|
||||
network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id
|
||||
admin_state_up = "true"
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
]
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_master" {
|
||||
name = "${var.cluster_name}-k8s-master-${count.index + 1}"
|
||||
count = var.number_of_k8s_masters
|
||||
@ -215,6 +258,7 @@ resource "openstack_compute_instance_v2" "k8s_master" {
|
||||
image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
|
||||
flavor_id = var.flavor_k8s_master
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
user_data = data.template_file.cloudinit.rendered
|
||||
|
||||
|
||||
dynamic "block_device" {
|
||||
@ -231,13 +275,11 @@ resource "openstack_compute_instance_v2" "k8s_master" {
|
||||
}
|
||||
|
||||
network {
|
||||
name = var.network_name
|
||||
port = element(openstack_networking_port_v2.k8s_master_port.*.id, count.index)
|
||||
}
|
||||
|
||||
security_groups = local.master_sec_groups
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||
for_each = var.master_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||
content {
|
||||
group = openstack_compute_servergroup_v2.k8s_master[0].id
|
||||
}
|
||||
@ -246,15 +288,87 @@ resource "openstack_compute_instance_v2" "k8s_master" {
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "etcd,kube_control_plane,${var.supplementary_master_groups},k8s_cluster"
|
||||
depends_on = var.network_id
|
||||
depends_on = var.network_router_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ ${path.root}/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > ${var.group_vars_path}/no_floating.yml"
|
||||
command = "sed -e s/USER/${var.ssh_user}/ -e s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_master_fips), 0)}/ ${path.module}/ansible_bastion_template.txt > ${var.group_vars_path}/no_floating.yml"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_networking_port_v2" "k8s_masters_port" {
|
||||
for_each = var.number_of_k8s_masters == 0 && var.number_of_k8s_masters_no_etcd == 0 && var.number_of_k8s_masters_no_floating_ip == 0 && var.number_of_k8s_masters_no_floating_ip_no_etcd == 0 ? var.k8s_masters : {}
|
||||
name = "${var.cluster_name}-k8s-${each.key}"
|
||||
network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id
|
||||
admin_state_up = "true"
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
]
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_masters" {
|
||||
for_each = var.number_of_k8s_masters == 0 && var.number_of_k8s_masters_no_etcd == 0 && var.number_of_k8s_masters_no_floating_ip == 0 && var.number_of_k8s_masters_no_floating_ip_no_etcd == 0 ? var.k8s_masters : {}
|
||||
name = "${var.cluster_name}-k8s-${each.key}"
|
||||
availability_zone = each.value.az
|
||||
image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
|
||||
flavor_id = each.value.flavor
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.master_root_volume_size_in_gb > 0 ? [local.image_to_use_master] : []
|
||||
content {
|
||||
uuid = local.image_to_use_master
|
||||
source_type = "image"
|
||||
volume_size = var.master_root_volume_size_in_gb
|
||||
volume_type = var.master_volume_type
|
||||
boot_index = 0
|
||||
destination_type = "volume"
|
||||
delete_on_termination = true
|
||||
}
|
||||
}
|
||||
|
||||
network {
|
||||
port = openstack_networking_port_v2.k8s_masters_port[each.key].id
|
||||
}
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.master_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||
content {
|
||||
group = openstack_compute_servergroup_v2.k8s_master[0].id
|
||||
}
|
||||
}
|
||||
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "%{if each.value.etcd == true}etcd,%{endif}kube_control_plane,${var.supplementary_master_groups},k8s_cluster%{if each.value.floating_ip == false},no_floating%{endif}"
|
||||
depends_on = var.network_router_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "%{if each.value.floating_ip}sed s/USER/${var.ssh_user}/ ${path.root}/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, [for key, value in var.k8s_masters_fips : value.address]), 0)}/ > ${var.group_vars_path}/no_floating.yml%{else}true%{endif}"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_networking_port_v2" "k8s_master_no_etcd_port" {
|
||||
count = var.number_of_k8s_masters_no_etcd
|
||||
name = "${var.cluster_name}-k8s-master-ne-${count.index + 1}"
|
||||
network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id
|
||||
admin_state_up = "true"
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
]
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
|
||||
name = "${var.cluster_name}-k8s-master-ne-${count.index + 1}"
|
||||
count = var.number_of_k8s_masters_no_etcd
|
||||
@ -262,6 +376,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
|
||||
image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
|
||||
flavor_id = var.flavor_k8s_master
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
user_data = data.template_file.cloudinit.rendered
|
||||
|
||||
|
||||
dynamic "block_device" {
|
||||
@ -278,13 +393,11 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
|
||||
}
|
||||
|
||||
network {
|
||||
name = var.network_name
|
||||
port = element(openstack_networking_port_v2.k8s_master_no_etcd_port.*.id, count.index)
|
||||
}
|
||||
|
||||
security_groups = local.master_sec_groups
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||
for_each = var.master_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||
content {
|
||||
group = openstack_compute_servergroup_v2.k8s_master[0].id
|
||||
}
|
||||
@ -293,15 +406,29 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "kube_control_plane,${var.supplementary_master_groups},k8s_cluster"
|
||||
depends_on = var.network_id
|
||||
depends_on = var.network_router_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ ${path.root}/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > ${var.group_vars_path}/no_floating.yml"
|
||||
command = "sed -e s/USER/${var.ssh_user}/ -e s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_master_fips), 0)}/ ${path.module}/ansible_bastion_template.txt > ${var.group_vars_path}/no_floating.yml"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_networking_port_v2" "etcd_port" {
|
||||
count = var.number_of_etcd
|
||||
name = "${var.cluster_name}-etcd-${count.index + 1}"
|
||||
network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id
|
||||
admin_state_up = "true"
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.etcd_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
]
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "etcd" {
|
||||
name = "${var.cluster_name}-etcd-${count.index + 1}"
|
||||
count = var.number_of_etcd
|
||||
@ -309,6 +436,7 @@ resource "openstack_compute_instance_v2" "etcd" {
|
||||
image_id = var.etcd_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
|
||||
flavor_id = var.flavor_etcd
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
user_data = data.template_file.cloudinit.rendered
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.etcd_root_volume_size_in_gb > 0 ? [local.image_to_use_master] : []
|
||||
@ -323,13 +451,11 @@ resource "openstack_compute_instance_v2" "etcd" {
|
||||
}
|
||||
|
||||
network {
|
||||
name = var.network_name
|
||||
port = element(openstack_networking_port_v2.etcd_port.*.id, count.index)
|
||||
}
|
||||
|
||||
security_groups = [openstack_networking_secgroup_v2.k8s.name]
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_etcd[0]] : []
|
||||
for_each = var.etcd_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_etcd[0]] : []
|
||||
content {
|
||||
group = openstack_compute_servergroup_v2.k8s_etcd[0].id
|
||||
}
|
||||
@ -338,11 +464,25 @@ resource "openstack_compute_instance_v2" "etcd" {
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "etcd,no_floating"
|
||||
depends_on = var.network_id
|
||||
depends_on = var.network_router_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_networking_port_v2" "k8s_master_no_floating_ip_port" {
|
||||
count = var.number_of_k8s_masters_no_floating_ip
|
||||
name = "${var.cluster_name}-k8s-master-nf-${count.index + 1}"
|
||||
network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id
|
||||
admin_state_up = "true"
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
]
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
|
||||
name = "${var.cluster_name}-k8s-master-nf-${count.index + 1}"
|
||||
count = var.number_of_k8s_masters_no_floating_ip
|
||||
@ -365,13 +505,11 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
|
||||
}
|
||||
|
||||
network {
|
||||
name = var.network_name
|
||||
port = element(openstack_networking_port_v2.k8s_master_no_floating_ip_port.*.id, count.index)
|
||||
}
|
||||
|
||||
security_groups = local.master_sec_groups
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||
for_each = var.master_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||
content {
|
||||
group = openstack_compute_servergroup_v2.k8s_master[0].id
|
||||
}
|
||||
@ -380,11 +518,25 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "etcd,kube_control_plane,${var.supplementary_master_groups},k8s_cluster,no_floating"
|
||||
depends_on = var.network_id
|
||||
depends_on = var.network_router_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_networking_port_v2" "k8s_master_no_floating_ip_no_etcd_port" {
|
||||
count = var.number_of_k8s_masters_no_floating_ip_no_etcd
|
||||
name = "${var.cluster_name}-k8s-master-ne-nf-${count.index + 1}"
|
||||
network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id
|
||||
admin_state_up = "true"
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
]
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
|
||||
name = "${var.cluster_name}-k8s-master-ne-nf-${count.index + 1}"
|
||||
count = var.number_of_k8s_masters_no_floating_ip_no_etcd
|
||||
@ -392,6 +544,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
|
||||
image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
|
||||
flavor_id = var.flavor_k8s_master
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
user_data = data.template_file.cloudinit.rendered
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.master_root_volume_size_in_gb > 0 ? [local.image_to_use_master] : []
|
||||
@ -407,13 +560,11 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
|
||||
}
|
||||
|
||||
network {
|
||||
name = var.network_name
|
||||
port = element(openstack_networking_port_v2.k8s_master_no_floating_ip_no_etcd_port.*.id, count.index)
|
||||
}
|
||||
|
||||
security_groups = local.master_sec_groups
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||
for_each = var.master_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||
content {
|
||||
group = openstack_compute_servergroup_v2.k8s_master[0].id
|
||||
}
|
||||
@ -422,11 +573,25 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "kube_control_plane,${var.supplementary_master_groups},k8s_cluster,no_floating"
|
||||
depends_on = var.network_id
|
||||
depends_on = var.network_router_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_networking_port_v2" "k8s_node_port" {
|
||||
count = var.number_of_k8s_nodes
|
||||
name = "${var.cluster_name}-k8s-node-${count.index + 1}"
|
||||
network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id
|
||||
admin_state_up = "true"
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.worker_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
]
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_node" {
|
||||
name = "${var.cluster_name}-k8s-node-${count.index + 1}"
|
||||
count = var.number_of_k8s_nodes
|
||||
@ -434,6 +599,7 @@ resource "openstack_compute_instance_v2" "k8s_node" {
|
||||
image_id = var.node_root_volume_size_in_gb == 0 ? local.image_to_use_node : null
|
||||
flavor_id = var.flavor_k8s_node
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
user_data = data.template_file.cloudinit.rendered
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.node_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : []
|
||||
@ -441,6 +607,7 @@ resource "openstack_compute_instance_v2" "k8s_node" {
|
||||
uuid = local.image_to_use_node
|
||||
source_type = "image"
|
||||
volume_size = var.node_root_volume_size_in_gb
|
||||
volume_type = var.node_volume_type
|
||||
boot_index = 0
|
||||
destination_type = "volume"
|
||||
delete_on_termination = true
|
||||
@ -448,13 +615,12 @@ resource "openstack_compute_instance_v2" "k8s_node" {
|
||||
}
|
||||
|
||||
network {
|
||||
name = var.network_name
|
||||
port = element(openstack_networking_port_v2.k8s_node_port.*.id, count.index)
|
||||
}
|
||||
|
||||
security_groups = local.worker_sec_groups
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||
for_each = var.node_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||
content {
|
||||
group = openstack_compute_servergroup_v2.k8s_node[0].id
|
||||
}
|
||||
@ -463,15 +629,29 @@ resource "openstack_compute_instance_v2" "k8s_node" {
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "kube_node,k8s_cluster,${var.supplementary_node_groups}"
|
||||
depends_on = var.network_id
|
||||
depends_on = var.network_router_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ ${path.root}/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_node_fips), 0)}/ > ${var.group_vars_path}/no_floating.yml"
|
||||
command = "sed -e s/USER/${var.ssh_user}/ -e s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_node_fips), 0)}/ ${path.module}/ansible_bastion_template.txt > ${var.group_vars_path}/no_floating.yml"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_networking_port_v2" "k8s_node_no_floating_ip_port" {
|
||||
count = var.number_of_k8s_nodes_no_floating_ip
|
||||
name = "${var.cluster_name}-k8s-node-nf-${count.index + 1}"
|
||||
network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id
|
||||
admin_state_up = "true"
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.worker_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
]
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
||||
name = "${var.cluster_name}-k8s-node-nf-${count.index + 1}"
|
||||
count = var.number_of_k8s_nodes_no_floating_ip
|
||||
@ -479,6 +659,7 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
||||
image_id = var.node_root_volume_size_in_gb == 0 ? local.image_to_use_node : null
|
||||
flavor_id = var.flavor_k8s_node
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
user_data = data.template_file.cloudinit.rendered
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.node_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : []
|
||||
@ -486,6 +667,7 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
||||
uuid = local.image_to_use_node
|
||||
source_type = "image"
|
||||
volume_size = var.node_root_volume_size_in_gb
|
||||
volume_type = var.node_volume_type
|
||||
boot_index = 0
|
||||
destination_type = "volume"
|
||||
delete_on_termination = true
|
||||
@ -493,13 +675,11 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
||||
}
|
||||
|
||||
network {
|
||||
name = var.network_name
|
||||
port = element(openstack_networking_port_v2.k8s_node_no_floating_ip_port.*.id, count.index)
|
||||
}
|
||||
|
||||
security_groups = local.worker_sec_groups
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||
for_each = var.node_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||
content {
|
||||
group = openstack_compute_servergroup_v2.k8s_node[0].id
|
||||
}
|
||||
@ -508,11 +688,25 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "kube_node,k8s_cluster,no_floating,${var.supplementary_node_groups}"
|
||||
depends_on = var.network_id
|
||||
depends_on = var.network_router_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_networking_port_v2" "k8s_nodes_port" {
|
||||
for_each = var.number_of_k8s_nodes == 0 && var.number_of_k8s_nodes_no_floating_ip == 0 ? var.k8s_nodes : {}
|
||||
name = "${var.cluster_name}-k8s-node-${each.key}"
|
||||
network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id
|
||||
admin_state_up = "true"
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.worker_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
]
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_nodes" {
|
||||
for_each = var.number_of_k8s_nodes == 0 && var.number_of_k8s_nodes_no_floating_ip == 0 ? var.k8s_nodes : {}
|
||||
name = "${var.cluster_name}-k8s-node-${each.key}"
|
||||
@ -520,6 +714,7 @@ resource "openstack_compute_instance_v2" "k8s_nodes" {
|
||||
image_id = var.node_root_volume_size_in_gb == 0 ? local.image_to_use_node : null
|
||||
flavor_id = each.value.flavor
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
user_data = data.template_file.cloudinit.rendered
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.node_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : []
|
||||
@ -527,6 +722,7 @@ resource "openstack_compute_instance_v2" "k8s_nodes" {
|
||||
uuid = local.image_to_use_node
|
||||
source_type = "image"
|
||||
volume_size = var.node_root_volume_size_in_gb
|
||||
volume_type = var.node_volume_type
|
||||
boot_index = 0
|
||||
destination_type = "volume"
|
||||
delete_on_termination = true
|
||||
@ -534,13 +730,11 @@ resource "openstack_compute_instance_v2" "k8s_nodes" {
|
||||
}
|
||||
|
||||
network {
|
||||
name = var.network_name
|
||||
port = openstack_networking_port_v2.k8s_nodes_port[each.key].id
|
||||
}
|
||||
|
||||
security_groups = local.worker_sec_groups
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||
for_each = var.node_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||
content {
|
||||
group = openstack_compute_servergroup_v2.k8s_node[0].id
|
||||
}
|
||||
@ -549,15 +743,29 @@ resource "openstack_compute_instance_v2" "k8s_nodes" {
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "kube_node,k8s_cluster,%{if each.value.floating_ip == false}no_floating,%{endif}${var.supplementary_node_groups}"
|
||||
depends_on = var.network_id
|
||||
depends_on = var.network_router_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "%{if each.value.floating_ip}sed s/USER/${var.ssh_user}/ ${path.root}/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, [for key, value in var.k8s_nodes_fips : value.address]), 0)}/ > ${var.group_vars_path}/no_floating.yml%{else}true%{endif}"
|
||||
command = "%{if each.value.floating_ip}sed -e s/USER/${var.ssh_user}/ -e s/BASTION_ADDRESS/${element(concat(var.bastion_fips, [for key, value in var.k8s_nodes_fips : value.address]), 0)}/ ${path.module}/ansible_bastion_template.txt > ${var.group_vars_path}/no_floating.yml%{else}true%{endif}"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_networking_port_v2" "glusterfs_node_no_floating_ip_port" {
|
||||
count = var.number_of_gfs_nodes_no_floating_ip
|
||||
name = "${var.cluster_name}-gfs-node-nf-${count.index + 1}"
|
||||
network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id
|
||||
admin_state_up = "true"
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.gfs_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
]
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
|
||||
name = "${var.cluster_name}-gfs-node-nf-${count.index + 1}"
|
||||
count = var.number_of_gfs_nodes_no_floating_ip
|
||||
@ -579,13 +787,11 @@ resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
|
||||
}
|
||||
|
||||
network {
|
||||
name = var.network_name
|
||||
port = element(openstack_networking_port_v2.glusterfs_node_no_floating_ip_port.*.id, count.index)
|
||||
}
|
||||
|
||||
security_groups = [openstack_networking_secgroup_v2.k8s.name]
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||
for_each = var.node_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||
content {
|
||||
group = openstack_compute_servergroup_v2.k8s_node[0].id
|
||||
}
|
||||
@ -594,44 +800,46 @@ resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user_gfs
|
||||
kubespray_groups = "gfs-cluster,network-storage,no_floating"
|
||||
depends_on = var.network_id
|
||||
depends_on = var.network_router_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_compute_floatingip_associate_v2" "bastion" {
|
||||
resource "openstack_networking_floatingip_associate_v2" "bastion" {
|
||||
count = var.number_of_bastions
|
||||
floating_ip = var.bastion_fips[count.index]
|
||||
instance_id = element(openstack_compute_instance_v2.bastion.*.id, count.index)
|
||||
wait_until_associated = var.wait_for_floatingip
|
||||
port_id = element(openstack_networking_port_v2.bastion_port.*.id, count.index)
|
||||
}
|
||||
|
||||
|
||||
resource "openstack_compute_floatingip_associate_v2" "k8s_master" {
|
||||
resource "openstack_networking_floatingip_associate_v2" "k8s_master" {
|
||||
count = var.number_of_k8s_masters
|
||||
instance_id = element(openstack_compute_instance_v2.k8s_master.*.id, count.index)
|
||||
floating_ip = var.k8s_master_fips[count.index]
|
||||
wait_until_associated = var.wait_for_floatingip
|
||||
port_id = element(openstack_networking_port_v2.k8s_master_port.*.id, count.index)
|
||||
}
|
||||
|
||||
resource "openstack_compute_floatingip_associate_v2" "k8s_master_no_etcd" {
|
||||
count = var.master_root_volume_size_in_gb == 0 ? var.number_of_k8s_masters_no_etcd : 0
|
||||
instance_id = element(openstack_compute_instance_v2.k8s_master_no_etcd.*.id, count.index)
|
||||
floating_ip = var.k8s_master_no_etcd_fips[count.index]
|
||||
resource "openstack_networking_floatingip_associate_v2" "k8s_masters" {
|
||||
for_each = var.number_of_k8s_masters == 0 && var.number_of_k8s_masters_no_etcd == 0 && var.number_of_k8s_masters_no_floating_ip == 0 && var.number_of_k8s_masters_no_floating_ip_no_etcd == 0 ? { for key, value in var.k8s_masters : key => value if value.floating_ip } : {}
|
||||
floating_ip = var.k8s_masters_fips[each.key].address
|
||||
port_id = openstack_networking_port_v2.k8s_masters_port[each.key].id
|
||||
}
|
||||
|
||||
resource "openstack_compute_floatingip_associate_v2" "k8s_node" {
|
||||
resource "openstack_networking_floatingip_associate_v2" "k8s_master_no_etcd" {
|
||||
count = var.master_root_volume_size_in_gb == 0 ? var.number_of_k8s_masters_no_etcd : 0
|
||||
floating_ip = var.k8s_master_no_etcd_fips[count.index]
|
||||
port_id = element(openstack_networking_port_v2.k8s_master_no_etcd_port.*.id, count.index)
|
||||
}
|
||||
|
||||
resource "openstack_networking_floatingip_associate_v2" "k8s_node" {
|
||||
count = var.node_root_volume_size_in_gb == 0 ? var.number_of_k8s_nodes : 0
|
||||
floating_ip = var.k8s_node_fips[count.index]
|
||||
instance_id = element(openstack_compute_instance_v2.k8s_node[*].id, count.index)
|
||||
wait_until_associated = var.wait_for_floatingip
|
||||
port_id = element(openstack_networking_port_v2.k8s_node_port.*.id, count.index)
|
||||
}
|
||||
|
||||
resource "openstack_compute_floatingip_associate_v2" "k8s_nodes" {
|
||||
resource "openstack_networking_floatingip_associate_v2" "k8s_nodes" {
|
||||
for_each = var.number_of_k8s_nodes == 0 && var.number_of_k8s_nodes_no_floating_ip == 0 ? { for key, value in var.k8s_nodes : key => value if value.floating_ip } : {}
|
||||
floating_ip = var.k8s_nodes_fips[each.key].address
|
||||
instance_id = openstack_compute_instance_v2.k8s_nodes[each.key].id
|
||||
wait_until_associated = var.wait_for_floatingip
|
||||
port_id = openstack_networking_port_v2.k8s_nodes_port[each.key].id
|
||||
}
|
||||
|
||||
resource "openstack_blockstorage_volume_v2" "glusterfs_volume" {
|
||||
|
@ -0,0 +1,17 @@
|
||||
# yamllint disable rule:comments
|
||||
#cloud-config
|
||||
## in some cases novnc console access is required
|
||||
## it requires ssh password to be set
|
||||
#ssh_pwauth: yes
|
||||
#chpasswd:
|
||||
# list: |
|
||||
# root:secret
|
||||
# expire: False
|
||||
|
||||
## in some cases direct root ssh access via ssh key is required
|
||||
#disable_root: false
|
||||
|
||||
## in some cases additional CA certs are required
|
||||
#ca-certs:
|
||||
# trusted: |
|
||||
# -----BEGIN CERTIFICATE-----
|
@ -40,6 +40,8 @@ variable "gfs_volume_size_in_gb" {}
|
||||
|
||||
variable "master_volume_type" {}
|
||||
|
||||
variable "node_volume_type" {}
|
||||
|
||||
variable "public_key_path" {}
|
||||
|
||||
variable "image" {}
|
||||
@ -66,6 +68,14 @@ variable "network_id" {
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "use_existing_network" {
|
||||
type = bool
|
||||
}
|
||||
|
||||
variable "network_router_id" {
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "k8s_master_fips" {
|
||||
type = list
|
||||
}
|
||||
@ -78,6 +88,10 @@ variable "k8s_node_fips" {
|
||||
type = list
|
||||
}
|
||||
|
||||
variable "k8s_masters_fips" {
|
||||
type = map
|
||||
}
|
||||
|
||||
variable "k8s_nodes_fips" {
|
||||
type = map
|
||||
}
|
||||
@ -102,9 +116,9 @@ variable "k8s_allowed_egress_ips" {
|
||||
type = list
|
||||
}
|
||||
|
||||
variable "k8s_nodes" {}
|
||||
variable "k8s_masters" {}
|
||||
|
||||
variable "wait_for_floatingip" {}
|
||||
variable "k8s_nodes" {}
|
||||
|
||||
variable "supplementary_master_groups" {
|
||||
default = ""
|
||||
@ -124,8 +138,16 @@ variable "worker_allowed_ports" {
|
||||
|
||||
variable "use_access_ip" {}
|
||||
|
||||
variable "use_server_groups" {
|
||||
type = bool
|
||||
variable "master_server_group_policy" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "node_server_group_policy" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "etcd_server_group_policy" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "extra_sec_groups" {
|
||||
@ -155,3 +177,11 @@ variable "image_master_uuid" {
|
||||
variable "group_vars_path" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "port_security_enabled" {
|
||||
type = bool
|
||||
}
|
||||
|
||||
variable "force_null_port_security" {
|
||||
type = bool
|
||||
}
|
||||
|
@ -14,6 +14,12 @@ resource "openstack_networking_floatingip_v2" "k8s_master" {
|
||||
depends_on = [null_resource.dummy_dependency]
|
||||
}
|
||||
|
||||
resource "openstack_networking_floatingip_v2" "k8s_masters" {
|
||||
for_each = var.number_of_k8s_masters == 0 && var.number_of_k8s_masters_no_etcd == 0 ? { for key, value in var.k8s_masters : key => value if value.floating_ip } : {}
|
||||
pool = var.floatingip_pool
|
||||
depends_on = [null_resource.dummy_dependency]
|
||||
}
|
||||
|
||||
# If user specifies pre-existing IPs to use in k8s_master_fips, do not create new ones.
|
||||
resource "openstack_networking_floatingip_v2" "k8s_master_no_etcd" {
|
||||
count = length(var.k8s_master_fips) > 0 ? 0 : var.number_of_k8s_masters_no_etcd
|
||||
@ -28,7 +34,7 @@ resource "openstack_networking_floatingip_v2" "k8s_node" {
|
||||
}
|
||||
|
||||
resource "openstack_networking_floatingip_v2" "bastion" {
|
||||
count = var.number_of_bastions
|
||||
count = length(var.bastion_fips) > 0 ? 0 : var.number_of_bastions
|
||||
pool = var.floatingip_pool
|
||||
depends_on = [null_resource.dummy_dependency]
|
||||
}
|
||||
|
@ -3,6 +3,10 @@ output "k8s_master_fips" {
|
||||
value = length(var.k8s_master_fips) > 0 ? var.k8s_master_fips : openstack_networking_floatingip_v2.k8s_master[*].address
|
||||
}
|
||||
|
||||
output "k8s_masters_fips" {
|
||||
value = openstack_networking_floatingip_v2.k8s_masters
|
||||
}
|
||||
|
||||
# If k8s_master_fips is already defined as input, keep the same value since new FIPs have not been created.
|
||||
output "k8s_master_no_etcd_fips" {
|
||||
value = length(var.k8s_master_fips) > 0 ? var.k8s_master_fips : openstack_networking_floatingip_v2.k8s_master_no_etcd[*].address
|
||||
@ -17,5 +21,5 @@ output "k8s_nodes_fips" {
|
||||
}
|
||||
|
||||
output "bastion_fips" {
|
||||
value = openstack_networking_floatingip_v2.bastion[*].address
|
||||
value = length(var.bastion_fips) > 0 ? var.bastion_fips : openstack_networking_floatingip_v2.bastion[*].address
|
||||
}
|
||||
|
@ -16,8 +16,12 @@ variable "router_id" {
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "k8s_masters" {}
|
||||
|
||||
variable "k8s_nodes" {}
|
||||
|
||||
variable "k8s_master_fips" {}
|
||||
|
||||
variable "bastion_fips" {}
|
||||
|
||||
variable "router_internal_port_id" {}
|
||||
|
@ -11,10 +11,11 @@ data "openstack_networking_router_v2" "k8s" {
|
||||
}
|
||||
|
||||
resource "openstack_networking_network_v2" "k8s" {
|
||||
name = var.network_name
|
||||
count = var.use_neutron
|
||||
dns_domain = var.network_dns_domain != null ? var.network_dns_domain : null
|
||||
admin_state_up = "true"
|
||||
name = var.network_name
|
||||
count = var.use_neutron
|
||||
dns_domain = var.network_dns_domain != null ? var.network_dns_domain : null
|
||||
admin_state_up = "true"
|
||||
port_security_enabled = var.port_security_enabled
|
||||
}
|
||||
|
||||
resource "openstack_networking_subnet_v2" "k8s" {
|
||||
|
@ -2,6 +2,10 @@ output "router_id" {
|
||||
value = "%{if var.use_neutron == 1} ${var.router_id == null ? element(concat(openstack_networking_router_v2.k8s.*.id, [""]), 0) : var.router_id} %{else} %{endif}"
|
||||
}
|
||||
|
||||
output "network_id" {
|
||||
value = element(concat(openstack_networking_network_v2.k8s.*.id, [""]),0)
|
||||
}
|
||||
|
||||
output "router_internal_port_id" {
|
||||
value = element(concat(openstack_networking_router_interface_v2.k8s.*.id, [""]), 0)
|
||||
}
|
||||
|
@ -10,6 +10,10 @@ variable "dns_nameservers" {
|
||||
type = list
|
||||
}
|
||||
|
||||
variable "port_security_enabled" {
|
||||
type = bool
|
||||
}
|
||||
|
||||
variable "subnet_cidr" {}
|
||||
|
||||
variable "use_neutron" {}
|
||||
|
@ -32,6 +32,28 @@ number_of_k8s_masters_no_floating_ip_no_etcd = 0
|
||||
|
||||
flavor_k8s_master = "<UUID>"
|
||||
|
||||
k8s_masters = {
|
||||
# "master-1" = {
|
||||
# "az" = "nova"
|
||||
# "flavor" = "<UUID>"
|
||||
# "floating_ip" = true
|
||||
# "etcd" = true
|
||||
# },
|
||||
# "master-2" = {
|
||||
# "az" = "nova"
|
||||
# "flavor" = "<UUID>"
|
||||
# "floating_ip" = false
|
||||
# "etcd" = true
|
||||
# },
|
||||
# "master-3" = {
|
||||
# "az" = "nova"
|
||||
# "flavor" = "<UUID>"
|
||||
# "floating_ip" = true
|
||||
# "etcd" = true
|
||||
# },
|
||||
}
|
||||
|
||||
|
||||
# nodes
|
||||
number_of_k8s_nodes = 2
|
||||
|
||||
@ -52,6 +74,9 @@ number_of_k8s_nodes_no_floating_ip = 4
|
||||
# networking
|
||||
network_name = "<network>"
|
||||
|
||||
# Use a existing network with the name of network_name. Set to false to create a network with name of network_name.
|
||||
# use_existing_network = true
|
||||
|
||||
external_net = "<UUID>"
|
||||
|
||||
subnet_cidr = "<cidr>"
|
||||
@ -59,3 +84,6 @@ subnet_cidr = "<cidr>"
|
||||
floatingip_pool = "<pool>"
|
||||
|
||||
bastion_allowed_remote_ips = ["0.0.0.0/0"]
|
||||
|
||||
# Force port security to be null. Some cloud providers do not allow to set port security.
|
||||
# force_null_port_security = false
|
@ -78,6 +78,10 @@ variable "master_volume_type" {
|
||||
default = "Default"
|
||||
}
|
||||
|
||||
variable "node_volume_type" {
|
||||
default = "Default"
|
||||
}
|
||||
|
||||
variable "public_key_path" {
|
||||
description = "The path of the ssh pub key"
|
||||
default = "~/.ssh/id_rsa.pub"
|
||||
@ -133,6 +137,12 @@ variable "network_name" {
|
||||
default = "internal"
|
||||
}
|
||||
|
||||
variable "use_existing_network" {
|
||||
description = "Use an existing network"
|
||||
type = bool
|
||||
default = "false"
|
||||
}
|
||||
|
||||
variable "network_dns_domain" {
|
||||
description = "dns_domain for the internal network"
|
||||
type = string
|
||||
@ -144,6 +154,18 @@ variable "use_neutron" {
|
||||
default = 1
|
||||
}
|
||||
|
||||
variable "port_security_enabled" {
|
||||
description = "Enable port security on the internal network"
|
||||
type = bool
|
||||
default = "true"
|
||||
}
|
||||
|
||||
variable "force_null_port_security" {
|
||||
description = "Force port security to be null. Some providers does not allow setting port security"
|
||||
type = bool
|
||||
default = "false"
|
||||
}
|
||||
|
||||
variable "subnet_cidr" {
|
||||
description = "Subnet CIDR block."
|
||||
type = string
|
||||
@ -162,6 +184,12 @@ variable "k8s_master_fips" {
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "bastion_fips" {
|
||||
description = "specific pre-existing floating IPs to use for bastion node"
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "floatingip_pool" {
|
||||
description = "name of the floating ip pool to use"
|
||||
default = "external"
|
||||
@ -233,8 +261,19 @@ variable "use_access_ip" {
|
||||
default = 1
|
||||
}
|
||||
|
||||
variable "use_server_groups" {
|
||||
default = false
|
||||
variable "master_server_group_policy" {
|
||||
description = "desired server group policy, e.g. anti-affinity"
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "node_server_group_policy" {
|
||||
description = "desired server group policy, e.g. anti-affinity"
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "etcd_server_group_policy" {
|
||||
description = "desired server group policy, e.g. anti-affinity"
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "router_id" {
|
||||
@ -247,6 +286,10 @@ variable "router_internal_port_id" {
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "k8s_masters" {
|
||||
default = {}
|
||||
}
|
||||
|
||||
variable "k8s_nodes" {
|
||||
default = {}
|
||||
}
|
||||
|
@ -1,16 +0,0 @@
|
||||
output "k8s_masters" {
|
||||
value = packet_device.k8s_master.*.access_public_ipv4
|
||||
}
|
||||
|
||||
output "k8s_masters_no_etc" {
|
||||
value = packet_device.k8s_master_no_etcd.*.access_public_ipv4
|
||||
}
|
||||
|
||||
output "k8s_etcds" {
|
||||
value = packet_device.k8s_etcd.*.access_public_ipv4
|
||||
}
|
||||
|
||||
output "k8s_nodes" {
|
||||
value = packet_device.k8s_node.*.access_public_ipv4
|
||||
}
|
||||
|
@ -114,10 +114,10 @@ def iterhosts(resources):
|
||||
|
||||
|
||||
def iterips(resources):
|
||||
'''yield ip tuples of (instance_id, ip)'''
|
||||
'''yield ip tuples of (port_id, ip)'''
|
||||
for module_name, key, resource in resources:
|
||||
resource_type, name = key.split('.', 1)
|
||||
if resource_type == 'openstack_compute_floatingip_associate_v2':
|
||||
if resource_type == 'openstack_networking_floatingip_associate_v2':
|
||||
yield openstack_floating_ips(resource)
|
||||
|
||||
|
||||
@ -195,8 +195,8 @@ def parse_bool(string_form):
|
||||
raise ValueError('could not convert %r to a bool' % string_form)
|
||||
|
||||
|
||||
@parses('packet_device')
|
||||
def packet_device(resource, tfvars=None):
|
||||
@parses('metal_device')
|
||||
def metal_device(resource, tfvars=None):
|
||||
raw_attrs = resource['primary']['attributes']
|
||||
name = raw_attrs['hostname']
|
||||
groups = []
|
||||
@ -213,14 +213,14 @@ def packet_device(resource, tfvars=None):
|
||||
'state': raw_attrs['state'],
|
||||
# ansible
|
||||
'ansible_ssh_host': raw_attrs['network.0.address'],
|
||||
'ansible_ssh_user': 'root', # Use root by default in packet
|
||||
'ansible_ssh_user': 'root', # Use root by default in metal
|
||||
# generic
|
||||
'ipv4_address': raw_attrs['network.0.address'],
|
||||
'public_ipv4': raw_attrs['network.0.address'],
|
||||
'ipv6_address': raw_attrs['network.1.address'],
|
||||
'public_ipv6': raw_attrs['network.1.address'],
|
||||
'private_ipv4': raw_attrs['network.2.address'],
|
||||
'provider': 'packet',
|
||||
'provider': 'metal',
|
||||
}
|
||||
|
||||
if raw_attrs['operating_system'] == 'flatcar_stable':
|
||||
@ -228,10 +228,10 @@ def packet_device(resource, tfvars=None):
|
||||
attrs.update({'ansible_ssh_user': 'core'})
|
||||
|
||||
# add groups based on attrs
|
||||
groups.append('packet_operating_system=' + attrs['operating_system'])
|
||||
groups.append('packet_locked=%s' % attrs['locked'])
|
||||
groups.append('packet_state=' + attrs['state'])
|
||||
groups.append('packet_plan=' + attrs['plan'])
|
||||
groups.append('metal_operating_system=' + attrs['operating_system'])
|
||||
groups.append('metal_locked=%s' % attrs['locked'])
|
||||
groups.append('metal_state=' + attrs['state'])
|
||||
groups.append('metal_plan=' + attrs['plan'])
|
||||
|
||||
# groups specific to kubespray
|
||||
groups = groups + attrs['tags']
|
||||
@ -243,13 +243,13 @@ def openstack_floating_ips(resource):
|
||||
raw_attrs = resource['primary']['attributes']
|
||||
attrs = {
|
||||
'ip': raw_attrs['floating_ip'],
|
||||
'instance_id': raw_attrs['instance_id'],
|
||||
'port_id': raw_attrs['port_id'],
|
||||
}
|
||||
return attrs
|
||||
|
||||
def openstack_floating_ips(resource):
|
||||
raw_attrs = resource['primary']['attributes']
|
||||
return raw_attrs['instance_id'], raw_attrs['floating_ip']
|
||||
return raw_attrs['port_id'], raw_attrs['floating_ip']
|
||||
|
||||
@parses('openstack_compute_instance_v2')
|
||||
@calculate_mantl_vars
|
||||
@ -282,6 +282,7 @@ def openstack_host(resource, module_name):
|
||||
# generic
|
||||
'public_ipv4': raw_attrs['access_ip_v4'],
|
||||
'private_ipv4': raw_attrs['access_ip_v4'],
|
||||
'port_id' : raw_attrs['network.0.port'],
|
||||
'provider': 'openstack',
|
||||
}
|
||||
|
||||
@ -339,10 +340,10 @@ def openstack_host(resource, module_name):
|
||||
def iter_host_ips(hosts, ips):
|
||||
'''Update hosts that have an entry in the floating IP list'''
|
||||
for host in hosts:
|
||||
host_id = host[1]['id']
|
||||
port_id = host[1]['port_id']
|
||||
|
||||
if host_id in ips:
|
||||
ip = ips[host_id]
|
||||
if port_id in ips:
|
||||
ip = ips[port_id]
|
||||
|
||||
host[1].update({
|
||||
'access_ip_v4': ip,
|
||||
|
@ -104,9 +104,22 @@ terraform destroy --var-file cluster-settings.tfvars \
|
||||
* `zone`: The zone where to run the cluster
|
||||
* `machines`: Machines to provision. Key of this object will be used as the name of the machine
|
||||
* `node_type`: The role of this node *(master|worker)*
|
||||
* `plan`: Preconfigured cpu/mem plan to use (disables `cpu` and `mem` attributes below)
|
||||
* `cpu`: number of cpu cores
|
||||
* `mem`: memory size in MB
|
||||
* `disk_size`: The size of the storage in GB
|
||||
* `additional_disks`: Additional disks to attach to the node.
|
||||
* `size`: The size of the additional disk in GB
|
||||
* `tier`: The tier of disk to use (`maxiops` is the only one you can choose atm)
|
||||
* `firewall_enabled`: Enable firewall rules
|
||||
* `master_allowed_remote_ips`: List of IP ranges that should be allowed to access API of masters
|
||||
* `start_address`: Start of address range to allow
|
||||
* `end_address`: End of address range to allow
|
||||
* `k8s_allowed_remote_ips`: List of IP ranges that should be allowed SSH access to all nodes
|
||||
* `start_address`: Start of address range to allow
|
||||
* `end_address`: End of address range to allow
|
||||
* `loadbalancer_enabled`: Enable managed load balancer
|
||||
* `loadbalancer_plan`: Plan to use for load balancer *(development|production-small)*
|
||||
* `loadbalancers`: Ports to load balance and which machines to forward to. Key of this object will be used as the name of the load balancer frontends/backends
|
||||
* `port`: Port to load balance.
|
||||
* `backend_servers`: List of servers that traffic to the port should be forwarded to.
|
||||
|
@ -20,6 +20,8 @@ ssh_public_keys = [
|
||||
machines = {
|
||||
"master-0" : {
|
||||
"node_type" : "master",
|
||||
# plan to use instead of custom cpu/mem
|
||||
"plan" : null,
|
||||
#number of cpu cores
|
||||
"cpu" : "2",
|
||||
#memory size in MB
|
||||
@ -30,6 +32,8 @@ machines = {
|
||||
},
|
||||
"worker-0" : {
|
||||
"node_type" : "worker",
|
||||
# plan to use instead of custom cpu/mem
|
||||
"plan" : null,
|
||||
#number of cpu cores
|
||||
"cpu" : "2",
|
||||
#memory size in MB
|
||||
@ -49,6 +53,8 @@ machines = {
|
||||
},
|
||||
"worker-1" : {
|
||||
"node_type" : "worker",
|
||||
# plan to use instead of custom cpu/mem
|
||||
"plan" : null,
|
||||
#number of cpu cores
|
||||
"cpu" : "2",
|
||||
#memory size in MB
|
||||
@ -68,6 +74,8 @@ machines = {
|
||||
},
|
||||
"worker-2" : {
|
||||
"node_type" : "worker",
|
||||
# plan to use instead of custom cpu/mem
|
||||
"plan" : null,
|
||||
#number of cpu cores
|
||||
"cpu" : "2",
|
||||
#memory size in MB
|
||||
@ -86,3 +94,32 @@ machines = {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
firewall_enabled = false
|
||||
|
||||
master_allowed_remote_ips = [
|
||||
{
|
||||
"start_address" : "0.0.0.0"
|
||||
"end_address" : "255.255.255.255"
|
||||
}
|
||||
]
|
||||
|
||||
k8s_allowed_remote_ips = [
|
||||
{
|
||||
"start_address" : "0.0.0.0"
|
||||
"end_address" : "255.255.255.255"
|
||||
}
|
||||
]
|
||||
|
||||
loadbalancer_enabled = false
|
||||
loadbalancer_plan = "development"
|
||||
loadbalancers = {
|
||||
# "http" : {
|
||||
# "port" : 80,
|
||||
# "backend_servers" : [
|
||||
# "worker-0",
|
||||
# "worker-1",
|
||||
# "worker-2"
|
||||
# ]
|
||||
# }
|
||||
}
|
||||
|
@ -22,6 +22,14 @@ module "kubernetes" {
|
||||
machines = var.machines
|
||||
|
||||
ssh_public_keys = var.ssh_public_keys
|
||||
|
||||
firewall_enabled = var.firewall_enabled
|
||||
master_allowed_remote_ips = var.master_allowed_remote_ips
|
||||
k8s_allowed_remote_ips = var.k8s_allowed_remote_ips
|
||||
|
||||
loadbalancer_enabled = var.loadbalancer_enabled
|
||||
loadbalancer_plan = var.loadbalancer_plan
|
||||
loadbalancers = var.loadbalancers
|
||||
}
|
||||
|
||||
#
|
||||
|
@ -10,6 +10,16 @@ locals {
|
||||
]
|
||||
])
|
||||
|
||||
lb_backend_servers = flatten([
|
||||
for lb_name, loadbalancer in var.loadbalancers : [
|
||||
for backend_server in loadbalancer.backend_servers : {
|
||||
port = loadbalancer.port
|
||||
lb_name = lb_name
|
||||
server_name = backend_server
|
||||
}
|
||||
]
|
||||
])
|
||||
|
||||
# If prefix is set, all resources will be prefixed with "${var.prefix}-"
|
||||
# Else don't prefix with anything
|
||||
resource-prefix = "%{ if var.prefix != ""}${var.prefix}-%{ endif }"
|
||||
@ -45,8 +55,9 @@ resource "upcloud_server" "master" {
|
||||
}
|
||||
|
||||
hostname = "${local.resource-prefix}${each.key}"
|
||||
cpu = each.value.cpu
|
||||
mem = each.value.mem
|
||||
plan = each.value.plan
|
||||
cpu = each.value.plan == null ? each.value.cpu : null
|
||||
mem = each.value.plan == null ? each.value.mem : null
|
||||
zone = var.zone
|
||||
|
||||
template {
|
||||
@ -65,6 +76,13 @@ resource "upcloud_server" "master" {
|
||||
network = upcloud_network.private.id
|
||||
}
|
||||
|
||||
# Ignore volumes created by csi-driver
|
||||
lifecycle {
|
||||
ignore_changes = [storage_devices]
|
||||
}
|
||||
|
||||
firewall = var.firewall_enabled
|
||||
|
||||
dynamic "storage_devices" {
|
||||
for_each = {
|
||||
for disk_key_name, disk in upcloud_storage.additional_disks :
|
||||
@ -94,8 +112,9 @@ resource "upcloud_server" "worker" {
|
||||
}
|
||||
|
||||
hostname = "${local.resource-prefix}${each.key}"
|
||||
cpu = each.value.cpu
|
||||
mem = each.value.mem
|
||||
plan = each.value.plan
|
||||
cpu = each.value.plan == null ? each.value.cpu : null
|
||||
mem = each.value.plan == null ? each.value.mem : null
|
||||
zone = var.zone
|
||||
|
||||
template {
|
||||
@ -114,6 +133,13 @@ resource "upcloud_server" "worker" {
|
||||
network = upcloud_network.private.id
|
||||
}
|
||||
|
||||
# Ignore volumes created by csi-driver
|
||||
lifecycle {
|
||||
ignore_changes = [storage_devices]
|
||||
}
|
||||
|
||||
firewall = var.firewall_enabled
|
||||
|
||||
dynamic "storage_devices" {
|
||||
for_each = {
|
||||
for disk_key_name, disk in upcloud_storage.additional_disks :
|
||||
@ -134,3 +160,151 @@ resource "upcloud_server" "worker" {
|
||||
create_password = false
|
||||
}
|
||||
}
|
||||
|
||||
resource "upcloud_firewall_rules" "master" {
|
||||
for_each = upcloud_server.master
|
||||
server_id = each.value.id
|
||||
|
||||
dynamic firewall_rule {
|
||||
for_each = var.master_allowed_remote_ips
|
||||
|
||||
content {
|
||||
action = "accept"
|
||||
comment = "Allow master API access from this network"
|
||||
destination_port_end = "6443"
|
||||
destination_port_start = "6443"
|
||||
direction = "in"
|
||||
family = "IPv4"
|
||||
protocol = "tcp"
|
||||
source_address_end = firewall_rule.value.end_address
|
||||
source_address_start = firewall_rule.value.start_address
|
||||
}
|
||||
}
|
||||
|
||||
dynamic firewall_rule {
|
||||
for_each = length(var.master_allowed_remote_ips) > 0 ? [1] : []
|
||||
|
||||
content {
|
||||
action = "drop"
|
||||
comment = "Deny master API access from other networks"
|
||||
destination_port_end = "6443"
|
||||
destination_port_start = "6443"
|
||||
direction = "in"
|
||||
family = "IPv4"
|
||||
protocol = "tcp"
|
||||
source_address_end = "255.255.255.255"
|
||||
source_address_start = "0.0.0.0"
|
||||
}
|
||||
}
|
||||
|
||||
dynamic firewall_rule {
|
||||
for_each = var.k8s_allowed_remote_ips
|
||||
|
||||
content {
|
||||
action = "accept"
|
||||
comment = "Allow SSH from this network"
|
||||
destination_port_end = "22"
|
||||
destination_port_start = "22"
|
||||
direction = "in"
|
||||
family = "IPv4"
|
||||
protocol = "tcp"
|
||||
source_address_end = firewall_rule.value.end_address
|
||||
source_address_start = firewall_rule.value.start_address
|
||||
}
|
||||
}
|
||||
|
||||
dynamic firewall_rule {
|
||||
for_each = length(var.k8s_allowed_remote_ips) > 0 ? [1] : []
|
||||
|
||||
content {
|
||||
action = "drop"
|
||||
comment = "Deny SSH from other networks"
|
||||
destination_port_end = "22"
|
||||
destination_port_start = "22"
|
||||
direction = "in"
|
||||
family = "IPv4"
|
||||
protocol = "tcp"
|
||||
source_address_end = "255.255.255.255"
|
||||
source_address_start = "0.0.0.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "upcloud_firewall_rules" "k8s" {
|
||||
for_each = upcloud_server.worker
|
||||
server_id = each.value.id
|
||||
|
||||
dynamic firewall_rule {
|
||||
for_each = var.k8s_allowed_remote_ips
|
||||
|
||||
content {
|
||||
action = "accept"
|
||||
comment = "Allow SSH from this network"
|
||||
destination_port_end = "22"
|
||||
destination_port_start = "22"
|
||||
direction = "in"
|
||||
family = "IPv4"
|
||||
protocol = "tcp"
|
||||
source_address_end = firewall_rule.value.end_address
|
||||
source_address_start = firewall_rule.value.start_address
|
||||
}
|
||||
}
|
||||
|
||||
dynamic firewall_rule {
|
||||
for_each = length(var.k8s_allowed_remote_ips) > 0 ? [1] : []
|
||||
|
||||
content {
|
||||
action = "drop"
|
||||
comment = "Deny SSH from other networks"
|
||||
destination_port_end = "22"
|
||||
destination_port_start = "22"
|
||||
direction = "in"
|
||||
family = "IPv4"
|
||||
protocol = "tcp"
|
||||
source_address_end = "255.255.255.255"
|
||||
source_address_start = "0.0.0.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "upcloud_loadbalancer" "lb" {
|
||||
count = var.loadbalancer_enabled ? 1 : 0
|
||||
configured_status = "started"
|
||||
name = "${local.resource-prefix}lb"
|
||||
plan = var.loadbalancer_plan
|
||||
zone = var.zone
|
||||
network = upcloud_network.private.id
|
||||
}
|
||||
|
||||
resource "upcloud_loadbalancer_backend" "lb_backend" {
|
||||
for_each = var.loadbalancer_enabled ? var.loadbalancers : {}
|
||||
|
||||
loadbalancer = upcloud_loadbalancer.lb[0].id
|
||||
name = "lb-backend-${each.key}"
|
||||
}
|
||||
|
||||
resource "upcloud_loadbalancer_frontend" "lb_frontend" {
|
||||
for_each = var.loadbalancer_enabled ? var.loadbalancers : {}
|
||||
|
||||
loadbalancer = upcloud_loadbalancer.lb[0].id
|
||||
name = "lb-frontend-${each.key}"
|
||||
mode = "tcp"
|
||||
port = each.value.port
|
||||
default_backend_name = upcloud_loadbalancer_backend.lb_backend[each.key].name
|
||||
}
|
||||
|
||||
resource "upcloud_loadbalancer_static_backend_member" "lb_backend_member" {
|
||||
for_each = {
|
||||
for be_server in local.lb_backend_servers:
|
||||
"${be_server.server_name}-lb-backend-${be_server.lb_name}" => be_server
|
||||
if var.loadbalancer_enabled
|
||||
}
|
||||
|
||||
backend = upcloud_loadbalancer_backend.lb_backend[each.value.lb_name].id
|
||||
name = "${local.resource-prefix}${each.key}"
|
||||
ip = merge(upcloud_server.master, upcloud_server.worker)[each.value.server_name].network_interface[1].ip_address
|
||||
port = each.value.port
|
||||
weight = 100
|
||||
max_sessions = var.loadbalancer_plan == "production-small" ? 50000 : 1000
|
||||
enabled = true
|
||||
}
|
||||
|
@ -18,3 +18,7 @@ output "worker_ip" {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
output "loadbalancer_domain" {
|
||||
value = var.loadbalancer_enabled ? upcloud_loadbalancer.lb[0].dns_name : null
|
||||
}
|
||||
|
@ -16,6 +16,7 @@ variable "machines" {
|
||||
description = "Cluster machines"
|
||||
type = map(object({
|
||||
node_type = string
|
||||
plan = string
|
||||
cpu = string
|
||||
mem = string
|
||||
disk_size = number
|
||||
@ -29,3 +30,38 @@ variable "machines" {
|
||||
variable "ssh_public_keys" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "firewall_enabled" {
|
||||
type = bool
|
||||
}
|
||||
|
||||
variable "master_allowed_remote_ips" {
|
||||
type = list(object({
|
||||
start_address = string
|
||||
end_address = string
|
||||
}))
|
||||
}
|
||||
|
||||
variable "k8s_allowed_remote_ips" {
|
||||
type = list(object({
|
||||
start_address = string
|
||||
end_address = string
|
||||
}))
|
||||
}
|
||||
|
||||
variable "loadbalancer_enabled" {
|
||||
type = bool
|
||||
}
|
||||
|
||||
variable "loadbalancer_plan" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "loadbalancers" {
|
||||
description = "Load balancers"
|
||||
|
||||
type = map(object({
|
||||
port = number
|
||||
backend_servers = list(string)
|
||||
}))
|
||||
}
|
||||
|
@ -3,7 +3,7 @@ terraform {
|
||||
required_providers {
|
||||
upcloud = {
|
||||
source = "UpCloudLtd/upcloud"
|
||||
version = "~>2.0.0"
|
||||
version = "~>2.4.0"
|
||||
}
|
||||
}
|
||||
required_version = ">= 0.13"
|
||||
|
@ -6,3 +6,7 @@ output "master_ip" {
|
||||
output "worker_ip" {
|
||||
value = module.kubernetes.worker_ip
|
||||
}
|
||||
|
||||
output "loadbalancer_domain" {
|
||||
value = module.kubernetes.loadbalancer_domain
|
||||
}
|
||||
|
@ -20,6 +20,8 @@ ssh_public_keys = [
|
||||
machines = {
|
||||
"master-0" : {
|
||||
"node_type" : "master",
|
||||
# plan to use instead of custom cpu/mem
|
||||
"plan" : null,
|
||||
#number of cpu cores
|
||||
"cpu" : "2",
|
||||
#memory size in MB
|
||||
@ -30,6 +32,8 @@ machines = {
|
||||
},
|
||||
"worker-0" : {
|
||||
"node_type" : "worker",
|
||||
# plan to use instead of custom cpu/mem
|
||||
"plan" : null,
|
||||
#number of cpu cores
|
||||
"cpu" : "2",
|
||||
#memory size in MB
|
||||
@ -49,6 +53,8 @@ machines = {
|
||||
},
|
||||
"worker-1" : {
|
||||
"node_type" : "worker",
|
||||
# plan to use instead of custom cpu/mem
|
||||
"plan" : null,
|
||||
#number of cpu cores
|
||||
"cpu" : "2",
|
||||
#memory size in MB
|
||||
@ -68,6 +74,8 @@ machines = {
|
||||
},
|
||||
"worker-2" : {
|
||||
"node_type" : "worker",
|
||||
# plan to use instead of custom cpu/mem
|
||||
"plan" : null,
|
||||
#number of cpu cores
|
||||
"cpu" : "2",
|
||||
#memory size in MB
|
||||
@ -86,3 +94,32 @@ machines = {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
firewall_enabled = false
|
||||
|
||||
master_allowed_remote_ips = [
|
||||
{
|
||||
"start_address" : "0.0.0.0"
|
||||
"end_address" : "255.255.255.255"
|
||||
}
|
||||
]
|
||||
|
||||
k8s_allowed_remote_ips = [
|
||||
{
|
||||
"start_address" : "0.0.0.0"
|
||||
"end_address" : "255.255.255.255"
|
||||
}
|
||||
]
|
||||
|
||||
loadbalancer_enabled = false
|
||||
loadbalancer_plan = "development"
|
||||
loadbalancers = {
|
||||
# "http" : {
|
||||
# "port" : 80,
|
||||
# "backend_servers" : [
|
||||
# "worker-0",
|
||||
# "worker-1",
|
||||
# "worker-2"
|
||||
# ]
|
||||
# }
|
||||
}
|
||||
|
@ -28,6 +28,7 @@ variable "machines" {
|
||||
|
||||
type = map(object({
|
||||
node_type = string
|
||||
plan = string
|
||||
cpu = string
|
||||
mem = string
|
||||
disk_size = number
|
||||
@ -54,3 +55,46 @@ variable "UPCLOUD_USERNAME" {
|
||||
variable "UPCLOUD_PASSWORD" {
|
||||
description = "Password for UpCloud API user"
|
||||
}
|
||||
|
||||
variable "firewall_enabled" {
|
||||
description = "Enable firewall rules"
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "master_allowed_remote_ips" {
|
||||
description = "List of IP start/end addresses allowed to access API of masters"
|
||||
type = list(object({
|
||||
start_address = string
|
||||
end_address = string
|
||||
}))
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "k8s_allowed_remote_ips" {
|
||||
description = "List of IP start/end addresses allowed to SSH to hosts"
|
||||
type = list(object({
|
||||
start_address = string
|
||||
end_address = string
|
||||
}))
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "loadbalancer_enabled" {
|
||||
description = "Enable load balancer"
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "loadbalancer_plan" {
|
||||
description = "Load balancer plan (development/production-small)"
|
||||
default = "development"
|
||||
}
|
||||
|
||||
variable "loadbalancers" {
|
||||
description = "Load balancers"
|
||||
|
||||
type = map(object({
|
||||
port = number
|
||||
backend_servers = list(string)
|
||||
}))
|
||||
default = {}
|
||||
}
|
||||
|
@ -3,7 +3,7 @@ terraform {
|
||||
required_providers {
|
||||
upcloud = {
|
||||
source = "UpCloudLtd/upcloud"
|
||||
version = "~>2.0.0"
|
||||
version = "~>2.4.0"
|
||||
}
|
||||
}
|
||||
required_version = ">= 0.13"
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Kubernetes on Exoscale with Terraform
|
||||
# Kubernetes on vSphere with Terraform
|
||||
|
||||
Provision a Kubernetes cluster on [vSphere](https://www.vmware.com/se/products/vsphere.html) using Terraform and Kubespray.
|
||||
Provision a Kubernetes cluster on [vSphere](https://www.vmware.com/products/vsphere.html) using Terraform and Kubespray.
|
||||
|
||||
## Overview
|
||||
|
||||
@ -98,20 +98,33 @@ ansible-playbook -i inventory.ini ../../cluster.yml -b -v
|
||||
|
||||
* `machines`: Machines to provision. Key of this object will be used as the name of the machine
|
||||
* `node_type`: The role of this node *(master|worker)*
|
||||
* `ip`: The IP address with the netmask (CIDR notation)
|
||||
* `ip`: The IP address of the machine
|
||||
* `netmask`: The netmask to use (to be used on the right hand side in CIDR notation, e.g., `24`)
|
||||
* `network`: The name of the network to attach the machines to
|
||||
* `gateway`: The IP address of the network gateway
|
||||
* `ssh_public_keys`: List of public SSH keys to install on all machines
|
||||
* `vsphere_datacenter`: The identifier of vSphere data center
|
||||
* `vsphere_compute_cluster`: The identifier of vSphere compute cluster
|
||||
* `vsphere_datastore`: The identifier of vSphere data store
|
||||
* `vsphere_server`: The address of vSphere server
|
||||
* `vsphere_hostname`: The IP address of vSphere hostname
|
||||
* `template_name`: The name of a base image (the image has to be uploaded to vSphere beforehand)
|
||||
* `vsphere_server`: This is the vCenter server name or address for vSphere API operations.
|
||||
* `ssh_public_keys`: List of public SSH keys to install on all machines
|
||||
* `template_name`: The name of a base image (the OVF template be defined in vSphere beforehand)
|
||||
|
||||
### Optional
|
||||
|
||||
* `prefix`: Prefix to use for all resources, required to be unique for all clusters in the same project *(Defaults to `default`)*
|
||||
* `dns_primary`: The IP address of primary DNS server *(Defaults to `8.8.4.4`)*
|
||||
* `dns_secondary`:The IP address of secondary DNS server *(Defaults to `8.8.8.8`)*
|
||||
* `folder`: Name of the folder to put all machines in (default: `""`)
|
||||
* `prefix`: Prefix to use for all resources, required to be unique for all clusters in the same project (default: `"k8s"`)
|
||||
* `inventory_file`: Name of the generated inventory file for Kubespray to use in the Ansible step (default: `inventory.ini`)
|
||||
* `dns_primary`: The IP address of primary DNS server (default: `8.8.4.4`)
|
||||
* `dns_secondary`: The IP address of secondary DNS server (default: `8.8.8.8`)
|
||||
* `firmware`: Firmware to use (default: `bios`)
|
||||
* `hardware_version`: The version of the hardware (default: `15`)
|
||||
* `master_cores`: The number of CPU cores for the master nodes (default: 4)
|
||||
* `master_memory`: The amount of RAM for the master nodes in MB (default: 4096)
|
||||
* `master_disk_size`: The amount of disk space for the master nodes in GB (default: 20)
|
||||
* `worker_cores`: The number of CPU cores for the worker nodes (default: 16)
|
||||
* `worker_memory`: The amount of RAM for the worker nodes in MB (default: 8192)
|
||||
* `worker_disk_size`: The amount of disk space for the worker nodes in GB (default: 100)
|
||||
* `vapp`: Boolean to set the template type to vapp. (Default: false)
|
||||
* `interface_name`: Name of the interface to configure. (Default: ens192)
|
||||
|
||||
An example variables file can be found `default.tfvars`
|
||||
|
@ -1,23 +1,28 @@
|
||||
prefix = "default"
|
||||
prefix = "k8s"
|
||||
|
||||
inventory_file = "inventory.ini"
|
||||
|
||||
network = "VM Network"
|
||||
|
||||
machines = {
|
||||
"master-0" : {
|
||||
"node_type" : "master",
|
||||
"ip" : "i-did-not-read-the-docs" # e.g. 192.168.0.2/24
|
||||
"ip" : "i-did-not-read-the-docs", # e.g. 192.168.0.10
|
||||
"netmask" : "24"
|
||||
},
|
||||
"worker-0" : {
|
||||
"node_type" : "worker",
|
||||
"ip" : "i-did-not-read-the-docs" # e.g. 192.168.0.2/24
|
||||
"ip" : "i-did-not-read-the-docs", # e.g. 192.168.0.20
|
||||
"netmask" : "24"
|
||||
},
|
||||
"worker-1" : {
|
||||
"node_type" : "worker",
|
||||
"ip" : "i-did-not-read-the-docs" # e.g. 192.168.0.2/24
|
||||
"ip" : "i-did-not-read-the-docs", # e.g. 192.168.0.21
|
||||
"netmask" : "24"
|
||||
}
|
||||
}
|
||||
|
||||
gateway = "i-did-not-read-the-docs" # e.g. 192.168.0.2
|
||||
gateway = "i-did-not-read-the-docs" # e.g. 192.168.0.1
|
||||
|
||||
ssh_public_keys = [
|
||||
# Put your public SSH key here
|
||||
@ -29,6 +34,5 @@ vsphere_datacenter = "i-did-not-read-the-docs"
|
||||
vsphere_compute_cluster = "i-did-not-read-the-docs" # e.g. Cluster
|
||||
vsphere_datastore = "i-did-not-read-the-docs" # e.g. ssd-000000
|
||||
vsphere_server = "i-did-not-read-the-docs" # e.g. vsphere.server.com
|
||||
vsphere_hostname = "i-did-not-read-the-docs" # e.g. 192.168.0.2
|
||||
|
||||
template_name = "i-did-not-read-the-docs" # e.g. ubuntu-bionic-18.04-cloudimg
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user