Compare commits
513 Commits
pre-commit
...
v2.22.1
Author | SHA1 | Date | |
---|---|---|---|
2cf23e3104 | |||
200e0d54a2 | |||
fc28bfc336 | |||
733ac8ffa9 | |||
70450a4882 | |||
71349c9a17 | |||
4014a1cccb | |||
c55844b80e | |||
a4fa9aed75 | |||
659001c9d7 | |||
07647fb720 | |||
161bd55ab2 | |||
4b67c7d6a6 | |||
e26921e3e1 | |||
f80a5755c3 | |||
feeea7e512 | |||
09ea2ca688 | |||
b7a8d7a4d5 | |||
9405eb821b | |||
708677caf1 | |||
d5cdae1f16 | |||
b7a9217d77 | |||
82633c6f61 | |||
7afbdb3e1e | |||
c14d9c5c97 | |||
48035e3a7e | |||
a257e61f60 | |||
9948863d3a | |||
3a3addb91e | |||
72b8830f62 | |||
e6ba73349e | |||
55e581be3b | |||
9cd7d66332 | |||
6ea7abf443 | |||
3254080a1c | |||
4ffe138dfa | |||
86b81a855a | |||
bde261bd06 | |||
2b75552d1c | |||
951face343 | |||
07d45e6b62 | |||
9a72de54de | |||
4313c13656 | |||
c880b24a80 | |||
29827711f1 | |||
ab6d204641 | |||
426b8913c0 | |||
970ecbb008 | |||
eb951f1c2a | |||
3378c9f385 | |||
4c820b853b | |||
a505a4c71f | |||
8727f88e41 | |||
c2a8d543fb | |||
4ddbd2bd2d | |||
f9f5143c93 | |||
fccd99c96c | |||
dc7cf7ecd8 | |||
169eb34a59 | |||
4deeaba335 | |||
a59e27cb6b | |||
617af4beda | |||
b3ed25ee35 | |||
c7072b48dc | |||
02dc9fbd3e | |||
c98e1d1b5b | |||
e907d55621 | |||
cb318931aa | |||
709ae1d244 | |||
73ce6aef97 | |||
6682a843b4 | |||
dc33a1971d | |||
ed6f8df784 | |||
43216436ab | |||
cdc25523bf | |||
b77780ebf7 | |||
f27bea574e | |||
c38cf5dd5c | |||
2985b129fc | |||
107cb7f549 | |||
6c30b3f263 | |||
0104396c50 | |||
eecaec2919 | |||
4a03d13d08 | |||
fcb5e77338 | |||
ece174da7c | |||
a94b893e2c | |||
5e2cb4d244 | |||
dff58023d9 | |||
9a8f95e73d | |||
766d3696c9 | |||
b88229a662 | |||
c00cea7b17 | |||
0c4f57a093 | |||
3a6069916d | |||
e6eda9d811 | |||
e8f0fb82fe | |||
19856cf692 | |||
3450865d3f | |||
deb532ce27 | |||
1bb4f88af1 | |||
dcc04e54f3 | |||
4020a93d7e | |||
a676c106d3 | |||
acbf44a1b4 | |||
baed5f0b32 | |||
8afd74ce1f | |||
f6e4a231cb | |||
3a5f5692ca | |||
9b37699d0d | |||
cc382f2412 | |||
9a8bf0e38a | |||
97dfdcd8fe | |||
a9f52060c9 | |||
8cf5fefe84 | |||
f73b941d8a | |||
fb8631cdf6 | |||
7859aee735 | |||
83c3ce7f8f | |||
309aaee427 | |||
349c8901f8 | |||
df9aba6298 | |||
8f0bd36155 | |||
2ae3ea9ee3 | |||
99115ad04b | |||
7747ff2572 | |||
fff400513b | |||
eb4bd36f73 | |||
2d20f0c024 | |||
b0793df293 | |||
ab213a7db0 | |||
9fb1814784 | |||
1ca50f3eea | |||
82f68ca395 | |||
3a675393dc | |||
9c41769dab | |||
dba29db58d | |||
e175ccdde0 | |||
9e2104c7d3 | |||
1d9502e01d | |||
c710c93c02 | |||
13c793fd0d | |||
1555d78155 | |||
fd8260b930 | |||
6769bb32b1 | |||
677b7ecd89 | |||
659fa0eddc | |||
501deecdd0 | |||
7fec254f62 | |||
835811ec84 | |||
b7fe368469 | |||
8b3f3c04cc | |||
ecd649846a | |||
27c2d7e9e2 | |||
f366863a99 | |||
5bb54ef6a2 | |||
f7dade867a | |||
5cbcec8968 | |||
62f34c6085 | |||
d908e86590 | |||
f9ce176211 | |||
1dab5b5d9c | |||
739608454d | |||
260dad8f10 | |||
c950bfface | |||
75b07ad40c | |||
bd84353fc9 | |||
9ee2fbc51c | |||
fa92d9c0e9 | |||
4aacec4542 | |||
6278b12af6 | |||
64e4de371e | |||
ad4958249f | |||
29f01d3e5b | |||
3fd7d91452 | |||
4ba1df5237 | |||
145c80e9ab | |||
ab0e06eae6 | |||
786ce8ddd7 | |||
f06de0735f | |||
6ff845a199 | |||
fe9e11b501 | |||
3c2eb52828 | |||
2838a7c304 | |||
2788a02096 | |||
8a2e1189fb | |||
bdd1c7bcb5 | |||
d81978625c | |||
2c93c997cf | |||
10337f2fcb | |||
6c41191646 | |||
7730cfd619 | |||
1853085ffe | |||
9247137e60 | |||
e8f048c71d | |||
6cb027dfab | |||
edde594bbe | |||
0707c8ea6f | |||
c0c2cd6e03 | |||
36c6de9abd | |||
c5debf013c | |||
f9cc8ae10c | |||
94dd02121b | |||
c360501854 | |||
8523f525aa | |||
b9a34b83d4 | |||
2a24c2e359 | |||
8d6cfd6e53 | |||
1f36df666d | |||
64dbf2e429 | |||
6881398941 | |||
57638124c5 | |||
ee2193d4cf | |||
eb56130433 | |||
5fbbcedebc | |||
18f2abad2f | |||
391dd97f95 | |||
44243eada9 | |||
34d0451585 | |||
c4346e590f | |||
bd81c615c3 | |||
3d9fd082ff | |||
826282fe89 | |||
73774326b3 | |||
374438a3d6 | |||
fd80ef1ff1 | |||
235173bb5f | |||
1750dec254 | |||
52f52db8f3 | |||
db94812163 | |||
4a6eb7eaa2 | |||
58fe1a0ed6 | |||
c80bb0007a | |||
8a03bb1bb4 | |||
d919c58e21 | |||
19bc610f44 | |||
85a5a79ef5 | |||
c7cffb14a7 | |||
6f61f3d9cb | |||
6b4bb2a121 | |||
e288449c5d | |||
ea35021c96 | |||
754424eca7 | |||
4ad56e2772 | |||
6f1352eb53 | |||
bf8c64af08 | |||
a98ab40434 | |||
6549bb12fc | |||
1329d3f03b | |||
843e908fa4 | |||
0ff883afeb | |||
0d5bcd3e20 | |||
a8cef962e2 | |||
b50890172b | |||
ffad2152b3 | |||
6674438849 | |||
4bc5e8d912 | |||
8ca0bfffe0 | |||
48282a344f | |||
050fde6327 | |||
4d3104b334 | |||
85fa6af313 | |||
1c4db6132d | |||
744c81d451 | |||
61be93b173 | |||
406fbdb4e7 | |||
136f14dec4 | |||
ab80342750 | |||
2c2e608eac | |||
8267922a16 | |||
90719a9990 | |||
93f71df628 | |||
791064a3d9 | |||
e90f32bdee | |||
9fe89a0641 | |||
14699f5e98 | |||
2f81bfa25e | |||
438da0c8e6 | |||
25f317233c | |||
5e4d68b848 | |||
4728739597 | |||
fc0d58ff48 | |||
491e260d20 | |||
a132733b2d | |||
b377dbb96f | |||
c4d753c931 | |||
ee3b7c5da5 | |||
dcc267f6f4 | |||
ccf60fc9ca | |||
a38a3e7ddf | |||
beb4aa52ea | |||
f7d0fb9ab2 | |||
ff331f4eba | |||
94eae6a8dc | |||
f8d6b54dbb | |||
67c4f2d95e | |||
03fefa8933 | |||
c8ec77a734 | |||
4f32f94a51 | |||
3dc384a17a | |||
f1d0d1a9fe | |||
c036a7d871 | |||
6e63f3d2b4 | |||
09748e80e9 | |||
44a4f356ba | |||
a0f41bf82a | |||
5ae3e2818b | |||
1a0b81ac64 | |||
20d99886ca | |||
b9fe301036 | |||
b5844018f2 | |||
30508502d3 | |||
bca601d377 | |||
65191375b8 | |||
a534eb45ce | |||
e796f08184 | |||
ed38d8d3a1 | |||
07ad5ecfce | |||
4db5e663c3 | |||
529faeea9e | |||
47510899c7 | |||
4cd949c7e1 | |||
31d7e64073 | |||
7c1ee142dd | |||
25e86c5ca9 | |||
c41dd92007 | |||
a564d89d46 | |||
6c6a6e85da | |||
ed0acd8027 | |||
b9a690463d | |||
cbf4586c4c | |||
c3986957c4 | |||
8795cf6494 | |||
80af8a5e79 | |||
b60f65c1e8 | |||
943107115a | |||
ddbe9956e4 | |||
fdbcce3a5e | |||
f007c77641 | |||
9439487219 | |||
df6da52195 | |||
6ca89c80af | |||
7fe0b87d83 | |||
8a654b6955 | |||
5a8cf824f6 | |||
5c25b57989 | |||
5d1fe64bc8 | |||
a731e25778 | |||
0d6dc08578 | |||
40261fdf14 | |||
590b4aa240 | |||
2a696ddb34 | |||
d7f08d1b0c | |||
4aa1ef28ea | |||
58faef6ff6 | |||
34a52a7028 | |||
ce751cb89d | |||
5cf2883444 | |||
6bff338bad | |||
c78862052c | |||
1f54cef71c | |||
d00508105b | |||
c272421910 | |||
78624c5bcb | |||
c681435432 | |||
4d3f637684 | |||
5e14398af4 | |||
990f87acc8 | |||
eeb376460d | |||
ef707b3461 | |||
2af918132e | |||
b9b654714e | |||
fe399e0e0c | |||
b192053e28 | |||
a84271aa7e | |||
1901b512d2 | |||
9fdda7eca8 | |||
a68ed897f0 | |||
582ff96d19 | |||
0374a55eb3 | |||
ccbe38f78c | |||
958840da89 | |||
1530411218 | |||
e5ec0f18c0 | |||
0f44e8c812 | |||
1cc0f3c8c9 | |||
d9c39c274e | |||
c38fb866b7 | |||
5ad1d9db5e | |||
32f3d92d6b | |||
72b45eec2e | |||
23716b0eff | |||
859df84b45 | |||
131bd933a6 | |||
52904ee6ad | |||
e3339fe3d8 | |||
547ef747da | |||
63b27ea067 | |||
bc5881b70a | |||
f4b95d42a6 | |||
ef76a578a4 | |||
3b99d24ceb | |||
4701abff4c | |||
717b8daafe | |||
c346e46022 | |||
24632ae81b | |||
befde271eb | |||
d689f57c94 | |||
ad3f503c0c | |||
ae6c780af6 | |||
8b9cd3959a | |||
dffeab320e | |||
999586a110 | |||
f8d5487f8e | |||
4189008245 | |||
44115d7d7a | |||
841e2f44c0 | |||
a8e4984cf7 | |||
49196c2ec4 | |||
3646dc0bd2 | |||
694de1d67b | |||
31caab5f92 | |||
472996c8b3 | |||
d62c67a5f5 | |||
e486151aea | |||
9c407e667d | |||
18efdc2c51 | |||
6dff39344b | |||
c4de3df492 | |||
f2e11f088b | |||
782f0511b9 | |||
fa093ee609 | |||
612bcc4bb8 | |||
4ad67acedd | |||
467dc19cbd | |||
726711513f | |||
9468642269 | |||
d387d4811f | |||
1b3c2dab2e | |||
76573bf293 | |||
5d3326b93f | |||
68dac4e181 | |||
262c96ec0b | |||
2acdc33aa1 | |||
8acd33d0df | |||
a2e23c1a71 | |||
1b5cc175b9 | |||
a71da25b57 | |||
5ac614f97d | |||
b8b8b82ff4 | |||
7da3dbcb39 | |||
680293e79c | |||
023b16349e | |||
c4976437a8 | |||
97ca2f3c78 | |||
e76385e7cd | |||
7c2fb227f4 | |||
08bfa0b18f | |||
952cad8d63 | |||
5bce39abf8 | |||
fc57c0b27e | |||
dd4bc5fbfe | |||
d2a7434c67 | |||
5fa885b150 | |||
f3fb758f0c | |||
6386ec029c | |||
ad7cefa352 | |||
09d9bc910e | |||
e2f1f8d69d | |||
be2bfd867c | |||
133a7a0e1b | |||
efb47edb9f | |||
36bec19a84 | |||
6db6c8678c | |||
5603f9f374 | |||
7ebb8c3f2e | |||
acb6f243fd | |||
220f149299 | |||
1baabb3c05 | |||
617b17ad46 | |||
8af86e4c1e | |||
9dc9a670a5 | |||
b46ddf35fc | |||
de762400ad | |||
e60ece2b5e | |||
e6976a54e1 | |||
64daaf1887 | |||
1c75ec9ec1 | |||
c8a61ec98c | |||
aeeae76750 | |||
30b062fd43 | |||
8f899a1101 | |||
386c739d5b | |||
fddff783c8 | |||
bbd1161147 | |||
ab938602a9 | |||
e31890806c | |||
30c77ea4c1 | |||
175cdba9b1 | |||
ea29cd0890 | |||
68653c31c0 | |||
be5fdab3aa | |||
f4daf5856e | |||
49d869f662 | |||
b36bb9115a | |||
9ad2d24ad8 | |||
0088fe0ab7 | |||
ab93b17a7e | |||
9f1b980844 | |||
86d05ac180 | |||
bf6fcf6347 | |||
b9e4e27195 | |||
8585134db4 |
@ -24,7 +24,17 @@ skip_list:
|
||||
# (Disabled in June 2021)
|
||||
- 'role-name'
|
||||
|
||||
- 'experimental'
|
||||
# [var-naming] "defaults/main.yml" File defines variable 'apiVersion' that violates variable naming standards
|
||||
# In Kubespray we use variables that use camelCase to match their k8s counterparts
|
||||
# (Disabled in June 2021)
|
||||
- 'var-naming'
|
||||
- 'var-spacing'
|
||||
|
||||
# [fqcn-builtins]
|
||||
# Roles in kubespray don't need fully qualified collection names
|
||||
# (Disabled in Feb 2023)
|
||||
- 'fqcn-builtins'
|
||||
exclude_paths:
|
||||
# Generated files
|
||||
- tests/files/custom_cni/cilium.yaml
|
||||
|
6
.gitignore
vendored
6
.gitignore
vendored
@ -12,6 +12,7 @@ contrib/offline/offline-files.tar.gz
|
||||
*.bak
|
||||
*.tfstate
|
||||
*.tfstate.backup
|
||||
*.lock.hcl
|
||||
.terraform/
|
||||
contrib/terraform/aws/credentials.tfvars
|
||||
.terraform.lock.hcl
|
||||
@ -112,3 +113,8 @@ roles/**/molecule/**/__pycache__/
|
||||
|
||||
# Temp location used by our scripts
|
||||
scripts/tmp/
|
||||
tmp.md
|
||||
|
||||
# Ansible collection files
|
||||
kubernetes_sigs-kubespray*tar.gz
|
||||
ansible_collections
|
||||
|
@ -1,5 +1,6 @@
|
||||
---
|
||||
stages:
|
||||
- build
|
||||
- unit-tests
|
||||
- deploy-part1
|
||||
- moderator
|
||||
@ -8,12 +9,12 @@ stages:
|
||||
- deploy-special
|
||||
|
||||
variables:
|
||||
KUBESPRAY_VERSION: v2.19.0
|
||||
KUBESPRAY_VERSION: v2.21.0
|
||||
FAILFASTCI_NAMESPACE: 'kargo-ci'
|
||||
GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray'
|
||||
ANSIBLE_FORCE_COLOR: "true"
|
||||
MAGIC: "ci check this"
|
||||
TEST_ID: "$CI_PIPELINE_ID-$CI_BUILD_ID"
|
||||
TEST_ID: "$CI_PIPELINE_ID-$CI_JOB_ID"
|
||||
CI_TEST_VARS: "./tests/files/${CI_JOB_NAME}.yml"
|
||||
CI_TEST_REGISTRY_MIRROR: "./tests/common/_docker_hub_registry_mirror.yml"
|
||||
CI_TEST_SETTING: "./tests/common/_kubespray_test_settings.yml"
|
||||
@ -33,8 +34,9 @@ variables:
|
||||
ANSIBLE_LOG_LEVEL: "-vv"
|
||||
RECOVER_CONTROL_PLANE_TEST: "false"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube_control_plane[1:]"
|
||||
TERRAFORM_VERSION: 1.0.8
|
||||
TERRAFORM_VERSION: 1.3.7
|
||||
ANSIBLE_MAJOR_VERSION: "2.11"
|
||||
PIPELINE_IMAGE: "$CI_REGISTRY_IMAGE/pipeline:${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}"
|
||||
|
||||
before_script:
|
||||
- ./tests/scripts/rebase.sh
|
||||
@ -46,7 +48,7 @@ before_script:
|
||||
.job: &job
|
||||
tags:
|
||||
- packet
|
||||
image: quay.io/kubespray/kubespray:$KUBESPRAY_VERSION
|
||||
image: $PIPELINE_IMAGE
|
||||
artifacts:
|
||||
when: always
|
||||
paths:
|
||||
@ -76,6 +78,7 @@ ci-authorized:
|
||||
only: []
|
||||
|
||||
include:
|
||||
- .gitlab-ci/build.yml
|
||||
- .gitlab-ci/lint.yml
|
||||
- .gitlab-ci/shellcheck.yml
|
||||
- .gitlab-ci/terraform.yml
|
||||
|
40
.gitlab-ci/build.yml
Normal file
40
.gitlab-ci/build.yml
Normal file
@ -0,0 +1,40 @@
|
||||
---
|
||||
.build:
|
||||
stage: build
|
||||
image:
|
||||
name: moby/buildkit:rootless
|
||||
entrypoint: [""]
|
||||
variables:
|
||||
BUILDKITD_FLAGS: --oci-worker-no-process-sandbox
|
||||
before_script:
|
||||
- mkdir ~/.docker
|
||||
- echo "{\"auths\":{\"$CI_REGISTRY\":{\"username\":\"$CI_REGISTRY_USER\",\"password\":\"$CI_REGISTRY_PASSWORD\"}}}" > ~/.docker/config.json
|
||||
|
||||
pipeline image:
|
||||
extends: .build
|
||||
script:
|
||||
- |
|
||||
buildctl-daemonless.sh build \
|
||||
--frontend=dockerfile.v0 \
|
||||
--local context=. \
|
||||
--local dockerfile=. \
|
||||
--opt filename=./pipeline.Dockerfile \
|
||||
--output type=image,name=$PIPELINE_IMAGE,push=true \
|
||||
--import-cache type=registry,ref=$CI_REGISTRY_IMAGE/pipeline:cache
|
||||
rules:
|
||||
- if: '$CI_COMMIT_REF_NAME != $CI_DEFAULT_BRANCH'
|
||||
|
||||
pipeline image and build cache:
|
||||
extends: .build
|
||||
script:
|
||||
- |
|
||||
buildctl-daemonless.sh build \
|
||||
--frontend=dockerfile.v0 \
|
||||
--local context=. \
|
||||
--local dockerfile=. \
|
||||
--opt filename=./pipeline.Dockerfile \
|
||||
--output type=image,name=$PIPELINE_IMAGE,push=true \
|
||||
--import-cache type=registry,ref=$CI_REGISTRY_IMAGE/pipeline:cache \
|
||||
--export-cache type=registry,ref=$CI_REGISTRY_IMAGE/pipeline:cache,mode=max
|
||||
rules:
|
||||
- if: '$CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH'
|
@ -14,7 +14,7 @@ vagrant-validate:
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
variables:
|
||||
VAGRANT_VERSION: 2.2.19
|
||||
VAGRANT_VERSION: 2.3.4
|
||||
script:
|
||||
- ./tests/scripts/vagrant-validate.sh
|
||||
except: ['triggers', 'master']
|
||||
@ -39,11 +39,28 @@ syntax-check:
|
||||
ANSIBLE_VERBOSITY: "3"
|
||||
script:
|
||||
- ansible-playbook --syntax-check cluster.yml
|
||||
- ansible-playbook --syntax-check playbooks/cluster.yml
|
||||
- ansible-playbook --syntax-check upgrade-cluster.yml
|
||||
- ansible-playbook --syntax-check playbooks/upgrade_cluster.yml
|
||||
- ansible-playbook --syntax-check reset.yml
|
||||
- ansible-playbook --syntax-check playbooks/reset.yml
|
||||
- ansible-playbook --syntax-check extra_playbooks/upgrade-only-k8s.yml
|
||||
except: ['triggers', 'master']
|
||||
|
||||
collection-build-install-sanity-check:
|
||||
extends: .job
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
variables:
|
||||
ANSIBLE_COLLECTIONS_PATH: "./ansible_collections"
|
||||
script:
|
||||
- ansible-galaxy collection build
|
||||
- ansible-galaxy collection install kubernetes_sigs-kubespray-$(grep "^version:" galaxy.yml | awk '{print $2}').tar.gz
|
||||
- ansible-galaxy collection list $(egrep -i '(name:\s+|namespace:\s+)' galaxy.yml | awk '{print $2}' | tr '\n' '.' | sed 's|\.$||g') | grep "^kubernetes_sigs.kubespray"
|
||||
- test -f ansible_collections/kubernetes_sigs/kubespray/playbooks/cluster.yml
|
||||
- test -f ansible_collections/kubernetes_sigs/kubespray/playbooks/reset.yml
|
||||
except: ['triggers', 'master']
|
||||
|
||||
tox-inventory-builder:
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
@ -75,6 +92,20 @@ check-readme-versions:
|
||||
script:
|
||||
- tests/scripts/check_readme_versions.sh
|
||||
|
||||
check-galaxy-version:
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
image: python:3
|
||||
script:
|
||||
- tests/scripts/check_galaxy_version.sh
|
||||
|
||||
check-typo:
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
image: python:3
|
||||
script:
|
||||
- tests/scripts/check_typo.sh
|
||||
|
||||
ci-matrix:
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
|
@ -4,7 +4,7 @@
|
||||
tags: [c3.small.x86]
|
||||
only: [/^pr-.*$/]
|
||||
except: ['triggers']
|
||||
image: quay.io/kubespray/vagrant:$KUBESPRAY_VERSION
|
||||
image: $PIPELINE_IMAGE
|
||||
services: []
|
||||
stage: deploy-part1
|
||||
before_script:
|
||||
|
@ -51,6 +51,11 @@ packet_ubuntu20-aio-docker:
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu20-calico-aio-hardening:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu18-calico-aio:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
@ -86,16 +91,6 @@ packet_fedora35-crio:
|
||||
stage: deploy-part2
|
||||
when: manual
|
||||
|
||||
packet_ubuntu16-canal-ha:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu16-canal-sep:
|
||||
stage: deploy-special
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_ubuntu16-flannel-ha:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
@ -151,6 +146,18 @@ packet_rockylinux8-calico:
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_rockylinux9-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_rockylinux9-cilium:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
variables:
|
||||
RESET_CHECK: "true"
|
||||
|
||||
packet_almalinux8-docker:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
@ -161,11 +168,6 @@ packet_fedora36-docker-weave:
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_opensuse-canal:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
|
||||
packet_opensuse-docker-cilium:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
@ -199,7 +201,7 @@ packet_almalinux8-calico-ha-ebpf:
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_debian9-macvlan:
|
||||
packet_debian10-macvlan:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
@ -214,11 +216,6 @@ packet_centos7-multus-calico:
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_centos7-canal-ha:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_fedora36-docker-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
@ -251,6 +248,16 @@ packet_fedora36-kube-ovn:
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
|
||||
packet_debian11-custom-cni:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_debian11-kubelet-csr-approver:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
# ### PR JOBS PART3
|
||||
# Long jobs (45min+)
|
||||
|
||||
|
@ -60,11 +60,11 @@ tf-validate-openstack:
|
||||
PROVIDER: openstack
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-validate-metal:
|
||||
tf-validate-equinix:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: metal
|
||||
PROVIDER: equinix
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-validate-aws:
|
||||
@ -80,6 +80,12 @@ tf-validate-exoscale:
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: exoscale
|
||||
|
||||
tf-validate-hetzner:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: hetzner
|
||||
|
||||
tf-validate-vsphere:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
@ -104,7 +110,7 @@ tf-validate-upcloud:
|
||||
# TF_VAR_number_of_k8s_nodes: "1"
|
||||
# TF_VAR_plan_k8s_masters: t1.small.x86
|
||||
# TF_VAR_plan_k8s_nodes: t1.small.x86
|
||||
# TF_VAR_facility: ewr1
|
||||
# TF_VAR_metro: ny
|
||||
# TF_VAR_public_key_path: ""
|
||||
# TF_VAR_operating_system: ubuntu_16_04
|
||||
#
|
||||
@ -118,7 +124,7 @@ tf-validate-upcloud:
|
||||
# TF_VAR_number_of_k8s_nodes: "1"
|
||||
# TF_VAR_plan_k8s_masters: t1.small.x86
|
||||
# TF_VAR_plan_k8s_nodes: t1.small.x86
|
||||
# TF_VAR_facility: ams1
|
||||
# TF_VAR_metro: am
|
||||
# TF_VAR_public_key_path: ""
|
||||
# TF_VAR_operating_system: ubuntu_18_04
|
||||
|
||||
|
@ -10,7 +10,7 @@
|
||||
tags: [c3.small.x86]
|
||||
only: [/^pr-.*$/]
|
||||
except: ['triggers']
|
||||
image: quay.io/kubespray/vagrant:$KUBESPRAY_VERSION
|
||||
image: $PIPELINE_IMAGE
|
||||
services: []
|
||||
before_script:
|
||||
- apt-get update && apt-get install -y python3-pip
|
||||
@ -43,6 +43,12 @@ vagrant_ubuntu20-flannel:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: on_success
|
||||
allow_failure: false
|
||||
|
||||
vagrant_ubuntu20-flannel-collection:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: on_success
|
||||
|
||||
vagrant_ubuntu16-kube-router-sep:
|
||||
stage: deploy-part2
|
||||
|
@ -1,2 +1,3 @@
|
||||
---
|
||||
MD013: false
|
||||
MD029: false
|
||||
|
71
.pre-commit-config.yaml
Normal file
71
.pre-commit-config.yaml
Normal file
@ -0,0 +1,71 @@
|
||||
---
|
||||
repos:
|
||||
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v3.4.0
|
||||
hooks:
|
||||
- id: check-added-large-files
|
||||
- id: check-case-conflict
|
||||
- id: check-executables-have-shebangs
|
||||
- id: check-xml
|
||||
- id: check-merge-conflict
|
||||
- id: detect-private-key
|
||||
- id: end-of-file-fixer
|
||||
- id: forbid-new-submodules
|
||||
- id: requirements-txt-fixer
|
||||
- id: trailing-whitespace
|
||||
|
||||
- repo: https://github.com/adrienverge/yamllint.git
|
||||
rev: v1.27.1
|
||||
hooks:
|
||||
- id: yamllint
|
||||
args: [--strict]
|
||||
|
||||
- repo: https://github.com/markdownlint/markdownlint
|
||||
rev: v0.11.0
|
||||
hooks:
|
||||
- id: markdownlint
|
||||
args: [ -r, "~MD013,~MD029" ]
|
||||
exclude: "^.git"
|
||||
|
||||
- repo: https://github.com/jumanjihouse/pre-commit-hooks
|
||||
rev: 3.0.0
|
||||
hooks:
|
||||
- id: shellcheck
|
||||
args: [ --severity, "error" ]
|
||||
exclude: "^.git"
|
||||
files: "\\.sh$"
|
||||
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: ansible-lint
|
||||
name: ansible-lint
|
||||
entry: ansible-lint -v
|
||||
language: python
|
||||
pass_filenames: false
|
||||
additional_dependencies:
|
||||
- .[community]
|
||||
|
||||
- id: ansible-syntax-check
|
||||
name: ansible-syntax-check
|
||||
entry: env ANSIBLE_INVENTORY=inventory/local-tests.cfg ANSIBLE_REMOTE_USER=root ANSIBLE_BECOME="true" ANSIBLE_BECOME_USER=root ANSIBLE_VERBOSITY="3" ansible-playbook --syntax-check
|
||||
language: python
|
||||
files: "^cluster.yml|^upgrade-cluster.yml|^reset.yml|^extra_playbooks/upgrade-only-k8s.yml"
|
||||
|
||||
- id: tox-inventory-builder
|
||||
name: tox-inventory-builder
|
||||
entry: bash -c "cd contrib/inventory_builder && tox"
|
||||
language: python
|
||||
pass_filenames: false
|
||||
|
||||
- id: check-readme-versions
|
||||
name: check-readme-versions
|
||||
entry: tests/scripts/check_readme_versions.sh
|
||||
language: script
|
||||
pass_filenames: false
|
||||
|
||||
- id: ci-matrix
|
||||
name: ci-matrix
|
||||
entry: tests/scripts/md-table/test.sh
|
||||
language: script
|
||||
pass_filenames: false
|
@ -3,6 +3,8 @@ extends: default
|
||||
|
||||
ignore: |
|
||||
.git/
|
||||
# Generated file
|
||||
tests/files/custom_cni/cilium.yaml
|
||||
|
||||
rules:
|
||||
braces:
|
||||
|
@ -16,7 +16,12 @@ pip install -r tests/requirements.txt
|
||||
|
||||
#### Linting
|
||||
|
||||
Kubespray uses `yamllint` and `ansible-lint`. To run them locally use `yamllint .` and `ansible-lint`. It is a good idea to add call these tools as part of your pre-commit hook and avoid a lot of back end forth on fixing linting issues (<https://support.gitkraken.com/working-with-repositories/githooksexample/>).
|
||||
Kubespray uses [pre-commit](https://pre-commit.com) hook configuration to run several linters, please install this tool and use it to run validation tests before submitting a PR.
|
||||
|
||||
```ShellSession
|
||||
pre-commit install
|
||||
pre-commit run -a # To run pre-commit hook on all files in the repository, even if they were not modified
|
||||
```
|
||||
|
||||
#### Molecule
|
||||
|
||||
@ -33,7 +38,9 @@ Vagrant with VirtualBox or libvirt driver helps you to quickly spin test cluster
|
||||
1. Submit an issue describing your proposed change to the repo in question.
|
||||
2. The [repo owners](OWNERS) will respond to your issue promptly.
|
||||
3. Fork the desired repo, develop and test your code changes.
|
||||
4. Sign the CNCF CLA (<https://git.k8s.io/community/CLA.md#the-contributor-license-agreement>)
|
||||
5. Submit a pull request.
|
||||
6. Work with the reviewers on their suggestions.
|
||||
7. Ensure to rebase to the HEAD of your target branch and squash un-necessary commits (<https://blog.carbonfive.com/always-squash-and-rebase-your-git-commits/>) before final merger of your contribution.
|
||||
4. Install [pre-commit](https://pre-commit.com) and install it in your development repo.
|
||||
5. Addess any pre-commit validation failures.
|
||||
6. Sign the CNCF CLA (<https://git.k8s.io/community/CLA.md#the-contributor-license-agreement>)
|
||||
7. Submit a pull request.
|
||||
8. Work with the reviewers on their suggestions.
|
||||
9. Ensure to rebase to the HEAD of your target branch and squash un-necessary commits (<https://blog.carbonfive.com/always-squash-and-rebase-your-git-commits/>) before final merger of your contribution.
|
||||
|
69
Dockerfile
69
Dockerfile
@ -1,37 +1,44 @@
|
||||
# Use imutable image tags rather than mutable tags (like ubuntu:20.04)
|
||||
FROM ubuntu:focal-20220531
|
||||
|
||||
ARG ARCH=amd64
|
||||
ARG TZ=Etc/UTC
|
||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
|
||||
RUN apt update -y \
|
||||
&& apt install -y \
|
||||
libssl-dev python3-dev sshpass apt-transport-https jq moreutils \
|
||||
ca-certificates curl gnupg2 software-properties-common python3-pip unzip rsync git \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - \
|
||||
&& add-apt-repository \
|
||||
"deb [arch=$ARCH] https://download.docker.com/linux/ubuntu \
|
||||
$(lsb_release -cs) \
|
||||
stable" \
|
||||
&& apt update -y && apt-get install --no-install-recommends -y docker-ce \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Use imutable image tags rather than mutable tags (like ubuntu:22.04)
|
||||
FROM ubuntu:jammy-20230308
|
||||
# Some tools like yamllint need this
|
||||
# Pip needs this as well at the moment to install ansible
|
||||
# (and potentially other packages)
|
||||
# See: https://github.com/pypa/pip/issues/10219
|
||||
ENV LANG=C.UTF-8
|
||||
|
||||
ENV LANG=C.UTF-8 \
|
||||
DEBIAN_FRONTEND=noninteractive \
|
||||
PYTHONDONTWRITEBYTECODE=1
|
||||
WORKDIR /kubespray
|
||||
COPY . .
|
||||
RUN /usr/bin/python3 -m pip install --no-cache-dir pip -U \
|
||||
&& /usr/bin/python3 -m pip install --no-cache-dir -r tests/requirements.txt \
|
||||
&& python3 -m pip install --no-cache-dir -r requirements.txt \
|
||||
&& update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
||||
COPY *.yml ./
|
||||
COPY *.cfg ./
|
||||
COPY roles ./roles
|
||||
COPY contrib ./contrib
|
||||
COPY inventory ./inventory
|
||||
COPY library ./library
|
||||
COPY extra_playbooks ./extra_playbooks
|
||||
COPY playbooks ./playbooks
|
||||
COPY plugins ./plugins
|
||||
|
||||
RUN KUBE_VERSION=$(sed -n 's/^kube_version: //p' roles/kubespray-defaults/defaults/main.yaml) \
|
||||
&& curl -LO https://storage.googleapis.com/kubernetes-release/release/$KUBE_VERSION/bin/linux/$ARCH/kubectl \
|
||||
&& chmod a+x kubectl \
|
||||
&& mv kubectl /usr/local/bin/kubectl
|
||||
RUN apt update -q \
|
||||
&& apt install -yq --no-install-recommends \
|
||||
curl \
|
||||
python3 \
|
||||
python3-pip \
|
||||
sshpass \
|
||||
vim \
|
||||
rsync \
|
||||
openssh-client \
|
||||
&& pip install --no-compile --no-cache-dir \
|
||||
ansible==5.7.1 \
|
||||
ansible-core==2.12.5 \
|
||||
cryptography==3.4.8 \
|
||||
jinja2==3.1.2 \
|
||||
netaddr==0.8.0 \
|
||||
jmespath==1.0.1 \
|
||||
MarkupSafe==2.1.2 \
|
||||
ruamel.yaml==0.17.21 \
|
||||
&& KUBE_VERSION=$(sed -n 's/^kube_version: //p' roles/kubespray-defaults/defaults/main.yaml) \
|
||||
&& curl -L https://dl.k8s.io/release/$KUBE_VERSION/bin/linux/$(dpkg --print-architecture)/kubectl -o /usr/local/bin/kubectl \
|
||||
&& echo $(curl -L https://dl.k8s.io/release/$KUBE_VERSION/bin/linux/$(dpkg --print-architecture)/kubectl.sha256) /usr/local/bin/kubectl | sha256sum --check \
|
||||
&& chmod a+x /usr/local/bin/kubectl \
|
||||
&& rm -rf /var/lib/apt/lists/* /var/log/* \
|
||||
&& find /usr -type d -name '*__pycache__' -prune -exec rm -rf {} \;
|
||||
|
@ -8,6 +8,9 @@ aliases:
|
||||
- floryut
|
||||
- oomichi
|
||||
- cristicalin
|
||||
- liupeng0518
|
||||
- yankay
|
||||
- mzaian
|
||||
kubespray-reviewers:
|
||||
- holmsten
|
||||
- bozzo
|
||||
@ -16,6 +19,10 @@ aliases:
|
||||
- jayonlau
|
||||
- cristicalin
|
||||
- liupeng0518
|
||||
- yankay
|
||||
- cyclinder
|
||||
- mzaian
|
||||
- mrfreezeex
|
||||
kubespray-emeritus_approvers:
|
||||
- riverzhang
|
||||
- atoms
|
||||
|
117
README.md
117
README.md
@ -13,7 +13,7 @@ You can get your invite [here](http://slack.k8s.io/)
|
||||
|
||||
## Quick Start
|
||||
|
||||
To deploy the cluster you can use :
|
||||
Below are several ways to use Kubespray to deploy a Kubernetes cluster.
|
||||
|
||||
### Ansible
|
||||
|
||||
@ -34,6 +34,13 @@ CONFIG_FILE=inventory/mycluster/hosts.yaml python3 contrib/inventory_builder/inv
|
||||
cat inventory/mycluster/group_vars/all/all.yml
|
||||
cat inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml
|
||||
|
||||
# Clean up old Kubernete cluster with Ansible Playbook - run the playbook as root
|
||||
# The option `--become` is required, as for example cleaning up SSL keys in /etc/,
|
||||
# uninstalling old packages and interacting with various systemd daemons.
|
||||
# Without --become the playbook will fail to run!
|
||||
# And be mind it will remove the current kubernetes cluster (if it's running)!
|
||||
ansible-playbook -i inventory/mycluster/hosts.yaml --become --become-user=root reset.yml
|
||||
|
||||
# Deploy Kubespray with Ansible Playbook - run the playbook as root
|
||||
# The option `--become` is required, as for example writing SSL keys in /etc/,
|
||||
# installing packages and interacting with various systemd daemons.
|
||||
@ -41,34 +48,50 @@ cat inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml
|
||||
ansible-playbook -i inventory/mycluster/hosts.yaml --become --become-user=root cluster.yml
|
||||
```
|
||||
|
||||
Note: When Ansible is already installed via system packages on the control machine, other python packages installed via `sudo pip install -r requirements.txt` will go to a different directory tree (e.g. `/usr/local/lib/python2.7/dist-packages` on Ubuntu) from Ansible's (e.g. `/usr/lib/python2.7/dist-packages/ansible` still on Ubuntu).
|
||||
As a consequence, `ansible-playbook` command will fail with:
|
||||
Note: When Ansible is already installed via system packages on the control node,
|
||||
Python packages installed via `sudo pip install -r requirements.txt` will go to
|
||||
a different directory tree (e.g. `/usr/local/lib/python2.7/dist-packages` on
|
||||
Ubuntu) from Ansible's (e.g. `/usr/lib/python2.7/dist-packages/ansible` still on
|
||||
Ubuntu). As a consequence, the `ansible-playbook` command will fail with:
|
||||
|
||||
```raw
|
||||
ERROR! no action detected in task. This often indicates a misspelled module name, or incorrect module path.
|
||||
```
|
||||
|
||||
probably pointing on a task depending on a module present in requirements.txt.
|
||||
This likely indicates that a task depends on a module present in ``requirements.txt``.
|
||||
|
||||
One way of solving this would be to uninstall the Ansible package and then, to install it via pip but it is not always possible.
|
||||
A workaround consists of setting `ANSIBLE_LIBRARY` and `ANSIBLE_MODULE_UTILS` environment variables respectively to the `ansible/modules` and `ansible/module_utils` subdirectories of pip packages installation location, which can be found in the Location field of the output of `pip show [package]` before executing `ansible-playbook`.
|
||||
One way of addressing this is to uninstall the system Ansible package then
|
||||
reinstall Ansible via ``pip``, but this not always possible and one must
|
||||
take care regarding package versions.
|
||||
A workaround consists of setting the `ANSIBLE_LIBRARY`
|
||||
and `ANSIBLE_MODULE_UTILS` environment variables respectively to
|
||||
the `ansible/modules` and `ansible/module_utils` subdirectories of the ``pip``
|
||||
installation location, which is the ``Location`` shown by running
|
||||
`pip show [package]` before executing `ansible-playbook`.
|
||||
|
||||
A simple way to ensure you get all the correct version of Ansible is to use the [pre-built docker image from Quay](https://quay.io/repository/kubespray/kubespray?tab=tags).
|
||||
You will then need to use [bind mounts](https://docs.docker.com/storage/bind-mounts/) to get the inventory and ssh key into the container, like this:
|
||||
A simple way to ensure you get all the correct version of Ansible is to use
|
||||
the [pre-built docker image from Quay](https://quay.io/repository/kubespray/kubespray?tab=tags).
|
||||
You will then need to use [bind mounts](https://docs.docker.com/storage/bind-mounts/)
|
||||
to access the inventory and SSH key in the container, like this:
|
||||
|
||||
```ShellSession
|
||||
docker pull quay.io/kubespray/kubespray:v2.19.0
|
||||
git checkout v2.22.0
|
||||
docker pull quay.io/kubespray/kubespray:v2.22.0
|
||||
docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inventory \
|
||||
--mount type=bind,source="${HOME}"/.ssh/id_rsa,dst=/root/.ssh/id_rsa \
|
||||
quay.io/kubespray/kubespray:v2.19.0 bash
|
||||
quay.io/kubespray/kubespray:v2.22.0 bash
|
||||
# Inside the container you may now run the kubespray playbooks:
|
||||
ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml
|
||||
```
|
||||
|
||||
#### Collection
|
||||
|
||||
See [here](docs/ansible_collection.md) if you wish to use this repository as an Ansible collection
|
||||
|
||||
### Vagrant
|
||||
|
||||
For Vagrant we need to install python dependencies for provisioning tasks.
|
||||
Check if Python and pip are installed:
|
||||
For Vagrant we need to install Python dependencies for provisioning tasks.
|
||||
Check that ``Python`` and ``pip`` are installed:
|
||||
|
||||
```ShellSession
|
||||
python -V && pip -V
|
||||
@ -113,52 +136,54 @@ vagrant up
|
||||
- [Air-Gap installation](docs/offline-environment.md)
|
||||
- [NTP](docs/ntp.md)
|
||||
- [Hardening](docs/hardening.md)
|
||||
- [Mirror](docs/mirror.md)
|
||||
- [Roadmap](docs/roadmap.md)
|
||||
|
||||
## Supported Linux Distributions
|
||||
|
||||
- **Flatcar Container Linux by Kinvolk**
|
||||
- **Debian** Bullseye, Buster, Jessie, Stretch
|
||||
- **Debian** Bullseye, Buster
|
||||
- **Ubuntu** 16.04, 18.04, 20.04, 22.04
|
||||
- **CentOS/RHEL** 7, [8](docs/centos.md#centos-8)
|
||||
- **CentOS/RHEL** 7, [8, 9](docs/centos.md#centos-8)
|
||||
- **Fedora** 35, 36
|
||||
- **Fedora CoreOS** (see [fcos Note](docs/fcos.md))
|
||||
- **openSUSE** Leap 15.x/Tumbleweed
|
||||
- **Oracle Linux** 7, [8](docs/centos.md#centos-8)
|
||||
- **Alma Linux** [8](docs/centos.md#centos-8)
|
||||
- **Rocky Linux** [8](docs/centos.md#centos-8)
|
||||
- **Oracle Linux** 7, [8, 9](docs/centos.md#centos-8)
|
||||
- **Alma Linux** [8, 9](docs/centos.md#centos-8)
|
||||
- **Rocky Linux** [8, 9](docs/centos.md#centos-8)
|
||||
- **Kylin Linux Advanced Server V10** (experimental: see [kylin linux notes](docs/kylinlinux.md))
|
||||
- **Amazon Linux 2** (experimental: see [amazon linux notes](docs/amazonlinux.md))
|
||||
- **UOS Linux** (experimental: see [uos linux notes](docs/uoslinux.md))
|
||||
- **openEuler** (experimental: see [openEuler notes](docs/openeuler.md))
|
||||
|
||||
Note: Upstart/SysV init based OS types are not supported.
|
||||
|
||||
## Supported Components
|
||||
|
||||
- Core
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.24.3
|
||||
- [etcd](https://github.com/etcd-io/etcd) v3.5.4
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.26.5
|
||||
- [etcd](https://github.com/etcd-io/etcd) v3.5.6
|
||||
- [docker](https://www.docker.com/) v20.10 (see note)
|
||||
- [containerd](https://containerd.io/) v1.6.6
|
||||
- [containerd](https://containerd.io/) v1.7.1
|
||||
- [cri-o](http://cri-o.io/) v1.24 (experimental: see [CRI-O Note](docs/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
||||
- Network Plugin
|
||||
- [cni-plugins](https://github.com/containernetworking/plugins) v1.1.1
|
||||
- [calico](https://github.com/projectcalico/calico) v3.23.3
|
||||
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
|
||||
- [cilium](https://github.com/cilium/cilium) v1.11.7
|
||||
- [flannel](https://github.com/flannel-io/flannel) v0.18.1
|
||||
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.9.7
|
||||
- [cni-plugins](https://github.com/containernetworking/plugins) v1.2.0
|
||||
- [calico](https://github.com/projectcalico/calico) v3.25.1
|
||||
- [cilium](https://github.com/cilium/cilium) v1.13.0
|
||||
- [flannel](https://github.com/flannel-io/flannel) v0.21.4
|
||||
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.10.7
|
||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) v1.5.1
|
||||
- [multus](https://github.com/intel/multus-cni) v3.8
|
||||
- [multus](https://github.com/k8snetworkplumbingwg/multus-cni) v3.8
|
||||
- [weave](https://github.com/weaveworks/weave) v2.8.1
|
||||
- [kube-vip](https://github.com/kube-vip/kube-vip) v0.4.2
|
||||
- [kube-vip](https://github.com/kube-vip/kube-vip) v0.5.12
|
||||
- Application
|
||||
- [cert-manager](https://github.com/jetstack/cert-manager) v1.9.0
|
||||
- [coredns](https://github.com/coredns/coredns) v1.8.6
|
||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v1.3.0
|
||||
- [cert-manager](https://github.com/jetstack/cert-manager) v1.11.1
|
||||
- [coredns](https://github.com/coredns/coredns) v1.9.3
|
||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v1.7.1
|
||||
- [krew](https://github.com/kubernetes-sigs/krew) v0.4.3
|
||||
- [argocd](https://argoproj.github.io/) v2.4.7
|
||||
- [helm](https://helm.sh/) v3.9.2
|
||||
- [metallb](https://metallb.universe.tf/) v0.12.1
|
||||
- [argocd](https://argoproj.github.io/) v2.7.2
|
||||
- [helm](https://helm.sh/) v3.12.0
|
||||
- [metallb](https://metallb.universe.tf/) v0.13.9
|
||||
- [registry](https://github.com/distribution/distribution) v2.8.1
|
||||
- Storage Plugin
|
||||
- [cephfs-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.0-k8s1.11
|
||||
@ -167,29 +192,29 @@ Note: Upstart/SysV init based OS types are not supported.
|
||||
- [azure-csi-plugin](https://github.com/kubernetes-sigs/azuredisk-csi-driver) v1.10.0
|
||||
- [cinder-csi-plugin](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/cinder-csi-plugin/using-cinder-csi-plugin.md) v1.22.0
|
||||
- [gcp-pd-csi-plugin](https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver) v1.4.0
|
||||
- [local-path-provisioner](https://github.com/rancher/local-path-provisioner) v0.0.22
|
||||
- [local-volume-provisioner](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner) v2.4.0
|
||||
- [local-path-provisioner](https://github.com/rancher/local-path-provisioner) v0.0.23
|
||||
- [local-volume-provisioner](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner) v2.5.0
|
||||
|
||||
## Container Runtime Notes
|
||||
|
||||
- The list of available docker version is 18.09, 19.03 and 20.10. The recommended docker version is 20.10. The kubelet might break on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
|
||||
- Supported Docker versions are 18.09, 19.03 and 20.10. The *recommended* Docker version is 20.10. `Kubelet` might break on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. the YUM ``versionlock`` plugin or ``apt pin``).
|
||||
- The cri-o version should be aligned with the respective kubernetes version (i.e. kube_version=1.20.x, crio_version=1.20)
|
||||
|
||||
## Requirements
|
||||
|
||||
- **Minimum required version of Kubernetes is v1.22**
|
||||
- **Minimum required version of Kubernetes is v1.24**
|
||||
- **Ansible v2.11+, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands**
|
||||
- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](docs/offline-environment.md))
|
||||
- The target servers are configured to allow **IPv4 forwarding**.
|
||||
- If using IPv6 for pods and services, the target servers are configured to allow **IPv6 forwarding**.
|
||||
- The **firewalls are not managed**, you'll need to implement your own rules the way you used to.
|
||||
in order to avoid any issue during deployment you should disable your firewall.
|
||||
- If kubespray is ran from non-root user account, correct privilege escalation method
|
||||
- If kubespray is run from non-root user account, correct privilege escalation method
|
||||
should be configured in the target servers. Then the `ansible_become` flag
|
||||
or command parameters `--become or -b` should be specified.
|
||||
|
||||
Hardware:
|
||||
These limits are safe guarded by Kubespray. Actual requirements for your workload can differ. For a sizing guide go to the [Building Large Clusters](https://kubernetes.io/docs/setup/cluster-large/#size-of-master-and-master-components) guide.
|
||||
These limits are safeguarded by Kubespray. Actual requirements for your workload can differ. For a sizing guide go to the [Building Large Clusters](https://kubernetes.io/docs/setup/cluster-large/#size-of-master-and-master-components) guide.
|
||||
|
||||
- Master
|
||||
- Memory: 1500 MB
|
||||
@ -198,7 +223,7 @@ These limits are safe guarded by Kubespray. Actual requirements for your workloa
|
||||
|
||||
## Network Plugins
|
||||
|
||||
You can choose between 10 network plugins. (default: `calico`, except Vagrant uses `flannel`)
|
||||
You can choose among ten network plugins. (default: `calico`, except Vagrant uses `flannel`)
|
||||
|
||||
- [flannel](docs/flannel.md): gre/vxlan (layer 2) networking.
|
||||
|
||||
@ -207,8 +232,6 @@ You can choose between 10 network plugins. (default: `calico`, except Vagrant us
|
||||
and overlay networks, with or without BGP. Calico uses the same engine to enforce network policy for hosts,
|
||||
pods, and (if using Istio and Envoy) applications at the service mesh layer.
|
||||
|
||||
- [canal](https://github.com/projectcalico/canal): a composition of calico and flannel plugins.
|
||||
|
||||
- [cilium](http://docs.cilium.io/en/latest/): layer 3/4 networking (as well as layer 7 to protect and secure application protocols), supports dynamic insertion of BPF bytecode into the Linux kernel to implement security services, networking and visibility logic.
|
||||
|
||||
- [weave](docs/weave.md): Weave is a lightweight container overlay network that doesn't require an external K/V database cluster.
|
||||
@ -225,7 +248,10 @@ You can choose between 10 network plugins. (default: `calico`, except Vagrant us
|
||||
|
||||
- [multus](docs/multus.md): Multus is a meta CNI plugin that provides multiple network interface support to pods. For each interface Multus delegates CNI calls to secondary CNI plugins such as Calico, macvlan, etc.
|
||||
|
||||
The choice is defined with the variable `kube_network_plugin`. There is also an
|
||||
- [custom_cni](roles/network-plugin/custom_cni/) : You can specify some manifests that will be applied to the clusters to bring you own CNI and use non-supported ones by Kubespray.
|
||||
See `tests/files/custom_cni/README.md` and `tests/files/custom_cni/values.yaml`for an example with a CNI provided by a Helm Chart.
|
||||
|
||||
The network plugin to use is defined by the variable `kube_network_plugin`. There is also an
|
||||
option to leverage built-in cloud provider networking instead.
|
||||
See also [Network checker](docs/netcheck.md).
|
||||
|
||||
@ -246,10 +272,11 @@ See also [Network checker](docs/netcheck.md).
|
||||
|
||||
- [Digital Rebar Provision](https://github.com/digitalrebar/provision/blob/v4/doc/integrations/ansible.rst)
|
||||
- [Terraform Contrib](https://github.com/kubernetes-sigs/kubespray/tree/master/contrib/terraform)
|
||||
- [Kubean](https://github.com/kubean-io/kubean)
|
||||
|
||||
## CI Tests
|
||||
|
||||
[](https://gitlab.com/kargo-ci/kubernetes-sigs-kubespray/pipelines)
|
||||
[](https://gitlab.com/kargo-ci/kubernetes-sigs-kubespray/-/pipelines)
|
||||
|
||||
CI/end-to-end tests sponsored by: [CNCF](https://cncf.io), [Equinix Metal](https://metal.equinix.com/), [OVHcloud](https://www.ovhcloud.com/), [ELASTX](https://elastx.se/).
|
||||
|
||||
|
@ -60,7 +60,7 @@ release-notes --start-sha <The start commit-id> --end-sha <The end commit-id> --
|
||||
```
|
||||
|
||||
If the release note file(/tmp/kubespray-release-note) contains "### Uncategorized" pull requests, those pull requests don't have a valid kind label(`kind/feature`, etc.).
|
||||
It is necessary to put a valid label on each pull request and run the above release-notes command again to get a better release note)
|
||||
It is necessary to put a valid label on each pull request and run the above release-notes command again to get a better release note
|
||||
|
||||
## Container image creation
|
||||
|
||||
|
@ -9,5 +9,7 @@
|
||||
#
|
||||
# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
|
||||
# INSTRUCTIONS AT https://kubernetes.io/security/
|
||||
atoms
|
||||
mattymo
|
||||
floryut
|
||||
oomichi
|
||||
cristicalin
|
||||
|
18
Vagrantfile
vendored
18
Vagrantfile
vendored
@ -10,6 +10,7 @@ Vagrant.require_version ">= 2.0.0"
|
||||
CONFIG = File.join(File.dirname(__FILE__), ENV['KUBESPRAY_VAGRANT_CONFIG'] || 'vagrant/config.rb')
|
||||
|
||||
FLATCAR_URL_TEMPLATE = "https://%s.release.flatcar-linux.net/amd64-usr/current/flatcar_production_vagrant.json"
|
||||
FEDORA35_MIRROR = "https://download.fedoraproject.org/pub/fedora/linux/releases/35/Cloud/x86_64/images/Fedora-Cloud-Base-Vagrant-35-1.2.x86_64.vagrant-libvirt.box"
|
||||
|
||||
# Uniq disk UUID for libvirt
|
||||
DISK_UUID = Time.now.utc.to_i
|
||||
@ -29,9 +30,9 @@ SUPPORTED_OS = {
|
||||
"almalinux8" => {box: "almalinux/8", user: "vagrant"},
|
||||
"almalinux8-bento" => {box: "bento/almalinux-8", user: "vagrant"},
|
||||
"rockylinux8" => {box: "generic/rocky8", user: "vagrant"},
|
||||
"fedora35" => {box: "fedora/35-cloud-base", user: "vagrant"},
|
||||
"fedora35" => {box: "fedora/35-cloud-base", user: "vagrant", box_url: FEDORA35_MIRROR},
|
||||
"fedora36" => {box: "fedora/36-cloud-base", user: "vagrant"},
|
||||
"opensuse" => {box: "opensuse/Leap-15.3.x86_64", user: "vagrant"},
|
||||
"opensuse" => {box: "opensuse/Leap-15.4.x86_64", user: "vagrant"},
|
||||
"opensuse-tumbleweed" => {box: "opensuse/Tumbleweed.x86_64", user: "vagrant"},
|
||||
"oraclelinux" => {box: "generic/oracle7", user: "vagrant"},
|
||||
"oraclelinux8" => {box: "generic/oracle8", user: "vagrant"},
|
||||
@ -55,14 +56,14 @@ $subnet ||= "172.18.8"
|
||||
$subnet_ipv6 ||= "fd3c:b398:0698:0756"
|
||||
$os ||= "ubuntu1804"
|
||||
$network_plugin ||= "flannel"
|
||||
# Setting multi_networking to true will install Multus: https://github.com/intel/multus-cni
|
||||
# Setting multi_networking to true will install Multus: https://github.com/k8snetworkplumbingwg/multus-cni
|
||||
$multi_networking ||= "False"
|
||||
$download_run_once ||= "True"
|
||||
$download_force_cache ||= "False"
|
||||
# The first three nodes are etcd servers
|
||||
$etcd_instances ||= $num_instances
|
||||
$etcd_instances ||= [$num_instances, 3].min
|
||||
# The first two nodes are kube masters
|
||||
$kube_master_instances ||= $num_instances == 1 ? $num_instances : ($num_instances - 1)
|
||||
$kube_master_instances ||= [$num_instances, 2].min
|
||||
# All nodes are kube nodes
|
||||
$kube_node_instances ||= $num_instances
|
||||
# The following only works when using the libvirt provider
|
||||
@ -82,6 +83,13 @@ $playbook ||= "cluster.yml"
|
||||
|
||||
host_vars = {}
|
||||
|
||||
# throw error if os is not supported
|
||||
if ! SUPPORTED_OS.key?($os)
|
||||
puts "Unsupported OS: #{$os}"
|
||||
puts "Supported OS are: #{SUPPORTED_OS.keys.join(', ')}"
|
||||
exit 1
|
||||
end
|
||||
|
||||
$box = SUPPORTED_OS[$os][:box]
|
||||
# if $inventory is not set, try to use example
|
||||
$inventory = "inventory/sample" if ! $inventory
|
||||
|
129
cluster.yml
129
cluster.yml
@ -1,128 +1,3 @@
|
||||
---
|
||||
- name: Check ansible version
|
||||
import_playbook: ansible_version.yml
|
||||
|
||||
- name: Ensure compatibility with old groups
|
||||
import_playbook: legacy_groups.yml
|
||||
|
||||
- hosts: bastion[0]
|
||||
gather_facts: False
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: bastion-ssh-config, tags: ["localhost", "bastion"] }
|
||||
|
||||
- hosts: k8s_cluster:etcd
|
||||
strategy: linear
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
gather_facts: false
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: bootstrap-os, tags: bootstrap-os}
|
||||
|
||||
- name: Gather facts
|
||||
tags: always
|
||||
import_playbook: facts.yml
|
||||
|
||||
- hosts: k8s_cluster:etcd
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes/preinstall, tags: preinstall }
|
||||
- { role: "container-engine", tags: "container-engine", when: deploy_container_engine }
|
||||
- { role: download, tags: download, when: "not skip_downloads" }
|
||||
|
||||
- hosts: etcd
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- role: etcd
|
||||
tags: etcd
|
||||
vars:
|
||||
etcd_cluster_setup: true
|
||||
etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}"
|
||||
when: etcd_deployment_type != "kubeadm"
|
||||
|
||||
- hosts: k8s_cluster
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- role: etcd
|
||||
tags: etcd
|
||||
vars:
|
||||
etcd_cluster_setup: false
|
||||
etcd_events_cluster_setup: false
|
||||
when: etcd_deployment_type != "kubeadm"
|
||||
|
||||
- hosts: k8s_cluster
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes/node, tags: node }
|
||||
|
||||
- hosts: kube_control_plane
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes/control-plane, tags: master }
|
||||
- { role: kubernetes/client, tags: client }
|
||||
- { role: kubernetes-apps/cluster_roles, tags: cluster-roles }
|
||||
|
||||
- hosts: k8s_cluster
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes/kubeadm, tags: kubeadm}
|
||||
- { role: kubernetes/node-label, tags: node-label }
|
||||
- { role: network_plugin, tags: network }
|
||||
|
||||
- hosts: calico_rr
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: network_plugin/calico/rr, tags: ['network', 'calico_rr'] }
|
||||
|
||||
- hosts: kube_control_plane[0]
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"] }
|
||||
|
||||
- hosts: kube_control_plane
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes-apps/external_cloud_controller, tags: external-cloud-controller }
|
||||
- { role: kubernetes-apps/network_plugin, tags: network }
|
||||
- { role: kubernetes-apps/policy_controller, tags: policy-controller }
|
||||
- { role: kubernetes-apps/ingress_controller, tags: ingress-controller }
|
||||
- { role: kubernetes-apps/external_provisioner, tags: external-provisioner }
|
||||
- { role: kubernetes-apps, tags: apps }
|
||||
|
||||
- name: Apply resolv.conf changes now that cluster DNS is up
|
||||
hosts: k8s_cluster
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf, dns_late: true }
|
||||
- name: Install Kubernetes
|
||||
ansible.builtin.import_playbook: playbooks/cluster.yml
|
||||
|
@ -31,4 +31,3 @@
|
||||
[k8s_cluster:children]
|
||||
kube_node
|
||||
kube_control_plane
|
||||
|
||||
|
@ -43,7 +43,7 @@
|
||||
package:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
with_items: "{{ distro_extra_packages }} + [ 'rsyslog', 'openssh-server' ]"
|
||||
with_items: "{{ distro_extra_packages + [ 'rsyslog', 'openssh-server' ] }}"
|
||||
|
||||
- name: Start needed services
|
||||
service:
|
||||
|
@ -1,3 +1,3 @@
|
||||
configparser>=3.3.0
|
||||
ruamel.yaml>=0.15.88
|
||||
ipaddress
|
||||
ruamel.yaml>=0.15.88
|
||||
|
@ -1,3 +1,3 @@
|
||||
hacking>=0.10.2
|
||||
pytest>=2.8.0
|
||||
mock>=1.3.0
|
||||
pytest>=2.8.0
|
||||
|
@ -13,7 +13,7 @@
|
||||
# under the License.
|
||||
|
||||
import inventory
|
||||
from test import support
|
||||
from io import StringIO
|
||||
import unittest
|
||||
from unittest import mock
|
||||
|
||||
@ -41,7 +41,7 @@ class TestInventoryPrintHostnames(unittest.TestCase):
|
||||
'access_ip': '10.90.0.3'}}}})
|
||||
with mock.patch('builtins.open', mock_io):
|
||||
with self.assertRaises(SystemExit) as cm:
|
||||
with support.captured_stdout() as stdout:
|
||||
with mock.patch('sys.stdout', new_callable=StringIO) as stdout:
|
||||
inventory.KubesprayInventory(
|
||||
changed_hosts=["print_hostnames"],
|
||||
config_file="file")
|
||||
|
@ -1,3 +1,2 @@
|
||||
#k8s_deployment_user: kubespray
|
||||
#k8s_deployment_user_pkey_path: /tmp/ssh_rsa
|
||||
|
||||
|
@ -41,4 +41,3 @@
|
||||
|
||||
# [network-storage:children]
|
||||
# gfs-cluster
|
||||
|
||||
|
@ -14,12 +14,16 @@ This role performs basic installation and setup of Gluster, but it does not conf
|
||||
|
||||
Available variables are listed below, along with default values (see `defaults/main.yml`):
|
||||
|
||||
glusterfs_default_release: ""
|
||||
```yaml
|
||||
glusterfs_default_release: ""
|
||||
```
|
||||
|
||||
You can specify a `default_release` for apt on Debian/Ubuntu by overriding this variable. This is helpful if you need a different package or version for the main GlusterFS packages (e.g. GlusterFS 3.5.x instead of 3.2.x with the `wheezy-backports` default release on Debian Wheezy).
|
||||
|
||||
glusterfs_ppa_use: yes
|
||||
glusterfs_ppa_version: "3.5"
|
||||
```yaml
|
||||
glusterfs_ppa_use: yes
|
||||
glusterfs_ppa_version: "3.5"
|
||||
```
|
||||
|
||||
For Ubuntu, specify whether to use the official Gluster PPA, and which version of the PPA to use. See Gluster's [Getting Started Guide](https://docs.gluster.org/en/latest/Quick-Start-Guide/Quickstart/) for more info.
|
||||
|
||||
@ -29,9 +33,11 @@ None.
|
||||
|
||||
## Example Playbook
|
||||
|
||||
```yaml
|
||||
- hosts: server
|
||||
roles:
|
||||
- geerlingguy.glusterfs
|
||||
```
|
||||
|
||||
For a real-world use example, read through [Simple GlusterFS Setup with Ansible](http://www.jeffgeerling.com/blog/simple-glusterfs-setup-ansible), a blog post by this role's author, which is included in Chapter 8 of [Ansible for DevOps](https://www.ansiblefordevops.com/).
|
||||
|
||||
|
@ -21,4 +21,3 @@
|
||||
{% endfor %}
|
||||
]
|
||||
}
|
||||
|
||||
|
@ -36,8 +36,7 @@ terraform apply -var-file=credentials.tfvars
|
||||
```
|
||||
|
||||
- Terraform automatically creates an Ansible Inventory file called `hosts` with the created infrastructure in the directory `inventory`
|
||||
- Ansible will automatically generate an ssh config file for your bastion hosts. To connect to hosts with ssh using bastion host use generated ssh-bastion.conf.
|
||||
Ansible automatically detects bastion and changes ssh_args
|
||||
- Ansible will automatically generate an ssh config file for your bastion hosts. To connect to hosts with ssh using bastion host use generated `ssh-bastion.conf`. Ansible automatically detects bastion and changes `ssh_args`
|
||||
|
||||
```commandline
|
||||
ssh -F ./ssh-bastion.conf user@$ip
|
||||
|
@ -12,7 +12,7 @@ This will install a Kubernetes cluster on Equinix Metal. It should work in all l
|
||||
The terraform configuration inspects variables found in
|
||||
[variables.tf](variables.tf) to create resources in your Equinix Metal project.
|
||||
There is a [python script](../terraform.py) that reads the generated`.tfstate`
|
||||
file to generate a dynamic inventory that is consumed by [cluster.yml](../../..//cluster.yml)
|
||||
file to generate a dynamic inventory that is consumed by [cluster.yml](../../../cluster.yml)
|
||||
to actually install Kubernetes with Kubespray.
|
||||
|
||||
### Kubernetes Nodes
|
||||
@ -60,16 +60,16 @@ Terraform will be used to provision all of the Equinix Metal resources with base
|
||||
Create an inventory directory for your cluster by copying the existing sample and linking the `hosts` script (used to build the inventory based on Terraform state):
|
||||
|
||||
```ShellSession
|
||||
cp -LRp contrib/terraform/metal/sample-inventory inventory/$CLUSTER
|
||||
cp -LRp contrib/terraform/equinix/sample-inventory inventory/$CLUSTER
|
||||
cd inventory/$CLUSTER
|
||||
ln -s ../../contrib/terraform/metal/hosts
|
||||
ln -s ../../contrib/terraform/equinix/hosts
|
||||
```
|
||||
|
||||
This will be the base for subsequent Terraform commands.
|
||||
|
||||
#### Equinix Metal API access
|
||||
|
||||
Your Equinix Metal API key must be available in the `PACKET_AUTH_TOKEN` environment variable.
|
||||
Your Equinix Metal API key must be available in the `METAL_AUTH_TOKEN` environment variable.
|
||||
This key is typically stored outside of the code repo since it is considered secret.
|
||||
If someone gets this key, they can startup/shutdown hosts in your project!
|
||||
|
||||
@ -80,10 +80,12 @@ The Equinix Metal Project ID associated with the key will be set later in `clust
|
||||
|
||||
For more information about the API, please see [Equinix Metal API](https://metal.equinix.com/developers/api/).
|
||||
|
||||
For more information about terraform provider authentication, please see [the equinix provider documentation](https://registry.terraform.io/providers/equinix/equinix/latest/docs).
|
||||
|
||||
Example:
|
||||
|
||||
```ShellSession
|
||||
export PACKET_AUTH_TOKEN="Example-API-Token"
|
||||
export METAL_AUTH_TOKEN="Example-API-Token"
|
||||
```
|
||||
|
||||
Note that to deploy several clusters within the same project you need to use [terraform workspace](https://www.terraform.io/docs/state/workspaces.html#using-workspaces).
|
||||
@ -101,7 +103,7 @@ This helps when identifying which hosts are associated with each cluster.
|
||||
While the defaults in variables.tf will successfully deploy a cluster, it is recommended to set the following values:
|
||||
|
||||
- cluster_name = the name of the inventory directory created above as $CLUSTER
|
||||
- metal_project_id = the Equinix Metal Project ID associated with the Equinix Metal API token above
|
||||
- equinix_metal_project_id = the Equinix Metal Project ID associated with the Equinix Metal API token above
|
||||
|
||||
#### Enable localhost access
|
||||
|
||||
@ -119,12 +121,13 @@ Once the Kubespray playbooks are run, a Kubernetes configuration file will be wr
|
||||
|
||||
In the cluster's inventory folder, the following files might be created (either by Terraform
|
||||
or manually), to prevent you from pushing them accidentally they are in a
|
||||
`.gitignore` file in the `terraform/metal` directory :
|
||||
`.gitignore` file in the `contrib/terraform/equinix` directory :
|
||||
|
||||
- `.terraform`
|
||||
- `.tfvars`
|
||||
- `.tfstate`
|
||||
- `.tfstate.backup`
|
||||
- `.lock.hcl`
|
||||
|
||||
You can still add them manually if you want to.
|
||||
|
||||
@ -135,7 +138,7 @@ plugins. This is accomplished as follows:
|
||||
|
||||
```ShellSession
|
||||
cd inventory/$CLUSTER
|
||||
terraform init ../../contrib/terraform/metal
|
||||
terraform -chdir=../../contrib/terraform/metal init -var-file=cluster.tfvars
|
||||
```
|
||||
|
||||
This should finish fairly quickly telling you Terraform has successfully initialized and loaded necessary modules.
|
||||
@ -146,7 +149,7 @@ You can apply the Terraform configuration to your cluster with the following com
|
||||
issued from your cluster's inventory directory (`inventory/$CLUSTER`):
|
||||
|
||||
```ShellSession
|
||||
terraform apply -var-file=cluster.tfvars ../../contrib/terraform/metal
|
||||
terraform -chdir=../../contrib/terraform/equinix apply -var-file=cluster.tfvars
|
||||
export ANSIBLE_HOST_KEY_CHECKING=False
|
||||
ansible-playbook -i hosts ../../cluster.yml
|
||||
```
|
||||
@ -156,7 +159,7 @@ ansible-playbook -i hosts ../../cluster.yml
|
||||
You can destroy your new cluster with the following command issued from the cluster's inventory directory:
|
||||
|
||||
```ShellSession
|
||||
terraform destroy -var-file=cluster.tfvars ../../contrib/terraform/metal
|
||||
terraform -chdir=../../contrib/terraform/equinix destroy -var-file=cluster.tfvars
|
||||
```
|
||||
|
||||
If you've started the Ansible run, it may also be a good idea to do some manual cleanup:
|
@ -1,62 +1,57 @@
|
||||
# Configure the Equinix Metal Provider
|
||||
provider "metal" {
|
||||
}
|
||||
|
||||
resource "metal_ssh_key" "k8s" {
|
||||
resource "equinix_metal_ssh_key" "k8s" {
|
||||
count = var.public_key_path != "" ? 1 : 0
|
||||
name = "kubernetes-${var.cluster_name}"
|
||||
public_key = chomp(file(var.public_key_path))
|
||||
}
|
||||
|
||||
resource "metal_device" "k8s_master" {
|
||||
depends_on = [metal_ssh_key.k8s]
|
||||
resource "equinix_metal_device" "k8s_master" {
|
||||
depends_on = [equinix_metal_ssh_key.k8s]
|
||||
|
||||
count = var.number_of_k8s_masters
|
||||
hostname = "${var.cluster_name}-k8s-master-${count.index + 1}"
|
||||
plan = var.plan_k8s_masters
|
||||
facilities = [var.facility]
|
||||
metro = var.metro
|
||||
operating_system = var.operating_system
|
||||
billing_cycle = var.billing_cycle
|
||||
project_id = var.metal_project_id
|
||||
project_id = var.equinix_metal_project_id
|
||||
tags = ["cluster-${var.cluster_name}", "k8s_cluster", "kube_control_plane", "etcd", "kube_node"]
|
||||
}
|
||||
|
||||
resource "metal_device" "k8s_master_no_etcd" {
|
||||
depends_on = [metal_ssh_key.k8s]
|
||||
resource "equinix_metal_device" "k8s_master_no_etcd" {
|
||||
depends_on = [equinix_metal_ssh_key.k8s]
|
||||
|
||||
count = var.number_of_k8s_masters_no_etcd
|
||||
hostname = "${var.cluster_name}-k8s-master-${count.index + 1}"
|
||||
plan = var.plan_k8s_masters_no_etcd
|
||||
facilities = [var.facility]
|
||||
metro = var.metro
|
||||
operating_system = var.operating_system
|
||||
billing_cycle = var.billing_cycle
|
||||
project_id = var.metal_project_id
|
||||
project_id = var.equinix_metal_project_id
|
||||
tags = ["cluster-${var.cluster_name}", "k8s_cluster", "kube_control_plane"]
|
||||
}
|
||||
|
||||
resource "metal_device" "k8s_etcd" {
|
||||
depends_on = [metal_ssh_key.k8s]
|
||||
resource "equinix_metal_device" "k8s_etcd" {
|
||||
depends_on = [equinix_metal_ssh_key.k8s]
|
||||
|
||||
count = var.number_of_etcd
|
||||
hostname = "${var.cluster_name}-etcd-${count.index + 1}"
|
||||
plan = var.plan_etcd
|
||||
facilities = [var.facility]
|
||||
metro = var.metro
|
||||
operating_system = var.operating_system
|
||||
billing_cycle = var.billing_cycle
|
||||
project_id = var.metal_project_id
|
||||
project_id = var.equinix_metal_project_id
|
||||
tags = ["cluster-${var.cluster_name}", "etcd"]
|
||||
}
|
||||
|
||||
resource "metal_device" "k8s_node" {
|
||||
depends_on = [metal_ssh_key.k8s]
|
||||
resource "equinix_metal_device" "k8s_node" {
|
||||
depends_on = [equinix_metal_ssh_key.k8s]
|
||||
|
||||
count = var.number_of_k8s_nodes
|
||||
hostname = "${var.cluster_name}-k8s-node-${count.index + 1}"
|
||||
plan = var.plan_k8s_nodes
|
||||
facilities = [var.facility]
|
||||
metro = var.metro
|
||||
operating_system = var.operating_system
|
||||
billing_cycle = var.billing_cycle
|
||||
project_id = var.metal_project_id
|
||||
project_id = var.equinix_metal_project_id
|
||||
tags = ["cluster-${var.cluster_name}", "k8s_cluster", "kube_node"]
|
||||
}
|
||||
|
15
contrib/terraform/equinix/output.tf
Normal file
15
contrib/terraform/equinix/output.tf
Normal file
@ -0,0 +1,15 @@
|
||||
output "k8s_masters" {
|
||||
value = equinix_metal_device.k8s_master.*.access_public_ipv4
|
||||
}
|
||||
|
||||
output "k8s_masters_no_etc" {
|
||||
value = equinix_metal_device.k8s_master_no_etcd.*.access_public_ipv4
|
||||
}
|
||||
|
||||
output "k8s_etcds" {
|
||||
value = equinix_metal_device.k8s_etcd.*.access_public_ipv4
|
||||
}
|
||||
|
||||
output "k8s_nodes" {
|
||||
value = equinix_metal_device.k8s_node.*.access_public_ipv4
|
||||
}
|
17
contrib/terraform/equinix/provider.tf
Normal file
17
contrib/terraform/equinix/provider.tf
Normal file
@ -0,0 +1,17 @@
|
||||
terraform {
|
||||
required_version = ">= 1.0.0"
|
||||
|
||||
provider_meta "equinix" {
|
||||
module_name = "kubespray"
|
||||
}
|
||||
required_providers {
|
||||
equinix = {
|
||||
source = "equinix/equinix"
|
||||
version = "~> 1.14"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Configure the Equinix Metal Provider
|
||||
provider "equinix" {
|
||||
}
|
@ -1,16 +1,19 @@
|
||||
# your Kubernetes cluster name here
|
||||
cluster_name = "mycluster"
|
||||
|
||||
# Your Equinix Metal project ID. See hhttps://metal.equinix.com/developers/docs/accounts/
|
||||
metal_project_id = "Example-API-Token"
|
||||
# Your Equinix Metal project ID. See https://metal.equinix.com/developers/docs/accounts/
|
||||
equinix_metal_project_id = "Example-Project-Id"
|
||||
|
||||
# The public SSH key to be uploaded into authorized_keys in bare metal Equinix Metal nodes provisioned
|
||||
# leave this value blank if the public key is already setup in the Equinix Metal project
|
||||
# Terraform will complain if the public key is setup in Equinix Metal
|
||||
public_key_path = "~/.ssh/id_rsa.pub"
|
||||
|
||||
# cluster location
|
||||
facility = "ewr1"
|
||||
# Equinix interconnected bare metal across our global metros.
|
||||
metro = "da"
|
||||
|
||||
# operating_system
|
||||
operating_system = "ubuntu_22_04"
|
||||
|
||||
# standalone etcds
|
||||
number_of_etcd = 0
|
@ -2,12 +2,12 @@ variable "cluster_name" {
|
||||
default = "kubespray"
|
||||
}
|
||||
|
||||
variable "metal_project_id" {
|
||||
variable "equinix_metal_project_id" {
|
||||
description = "Your Equinix Metal project ID. See https://metal.equinix.com/developers/docs/accounts/"
|
||||
}
|
||||
|
||||
variable "operating_system" {
|
||||
default = "ubuntu_20_04"
|
||||
default = "ubuntu_22_04"
|
||||
}
|
||||
|
||||
variable "public_key_path" {
|
||||
@ -19,8 +19,8 @@ variable "billing_cycle" {
|
||||
default = "hourly"
|
||||
}
|
||||
|
||||
variable "facility" {
|
||||
default = "dfw2"
|
||||
variable "metro" {
|
||||
default = "da"
|
||||
}
|
||||
|
||||
variable "plan_k8s_masters" {
|
||||
@ -54,4 +54,3 @@ variable "number_of_etcd" {
|
||||
variable "number_of_k8s_nodes" {
|
||||
default = 1
|
||||
}
|
||||
|
@ -31,9 +31,7 @@ The setup looks like following
|
||||
|
||||
## Requirements
|
||||
|
||||
* Terraform 0.13.0 or newer
|
||||
|
||||
*0.12 also works if you modify the provider block to include version and remove all `versions.tf` files*
|
||||
* Terraform 0.13.0 or newer (0.12 also works if you modify the provider block to include version and remove all `versions.tf` files)
|
||||
|
||||
## Quickstart
|
||||
|
||||
|
@ -4,7 +4,7 @@ module "kubernetes" {
|
||||
source = "./modules/kubernetes-cluster"
|
||||
|
||||
prefix = var.prefix
|
||||
|
||||
zone = var.zone
|
||||
machines = var.machines
|
||||
|
||||
ssh_public_keys = var.ssh_public_keys
|
||||
|
@ -75,6 +75,11 @@ ansible-playbook -i contrib/terraform/gcs/inventory.ini cluster.yml -b -v
|
||||
* `api_server_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the API server
|
||||
* `nodeport_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the kubernetes nodes on port 30000-32767 (kubernetes nodeports)
|
||||
* `ingress_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to ingress on ports 80 and 443
|
||||
* `extra_ingress_firewalls`: Additional ingress firewall rules. Key will be used as the name of the rule
|
||||
* `source_ranges`: List of IP ranges (CIDR). Example: `["8.8.8.8"]`
|
||||
* `protocol`: Protocol. Example `"tcp"`
|
||||
* `ports`: List of ports, as string. Example `["53"]`
|
||||
* `target_tags`: List of target tag (either the machine name or `control-plane` or `worker`). Example: `["control-plane", "worker-0"]`
|
||||
|
||||
### Optional
|
||||
|
||||
|
@ -34,4 +34,6 @@ module "kubernetes" {
|
||||
api_server_whitelist = var.api_server_whitelist
|
||||
nodeport_whitelist = var.nodeport_whitelist
|
||||
ingress_whitelist = var.ingress_whitelist
|
||||
|
||||
extra_ingress_firewalls = var.extra_ingress_firewalls
|
||||
}
|
||||
|
@ -219,7 +219,7 @@ resource "google_compute_instance" "master" {
|
||||
machine_type = each.value.size
|
||||
zone = each.value.zone
|
||||
|
||||
tags = ["master"]
|
||||
tags = ["control-plane", "master", each.key]
|
||||
|
||||
boot_disk {
|
||||
initialize_params {
|
||||
@ -325,7 +325,7 @@ resource "google_compute_instance" "worker" {
|
||||
machine_type = each.value.size
|
||||
zone = each.value.zone
|
||||
|
||||
tags = ["worker"]
|
||||
tags = ["worker", each.key]
|
||||
|
||||
boot_disk {
|
||||
initialize_params {
|
||||
@ -398,3 +398,24 @@ resource "google_compute_target_pool" "worker_lb" {
|
||||
name = "${var.prefix}-worker-lb-pool"
|
||||
instances = local.worker_target_list
|
||||
}
|
||||
|
||||
resource "google_compute_firewall" "extra_ingress_firewall" {
|
||||
for_each = {
|
||||
for name, firewall in var.extra_ingress_firewalls :
|
||||
name => firewall
|
||||
}
|
||||
|
||||
name = "${var.prefix}-${each.key}-ingress"
|
||||
network = google_compute_network.main.name
|
||||
|
||||
priority = 100
|
||||
|
||||
source_ranges = each.value.source_ranges
|
||||
|
||||
target_tags = each.value.target_tags
|
||||
|
||||
allow {
|
||||
protocol = each.value.protocol
|
||||
ports = each.value.ports
|
||||
}
|
||||
}
|
||||
|
@ -73,3 +73,14 @@ variable "ingress_whitelist" {
|
||||
variable "private_network_cidr" {
|
||||
default = "10.0.10.0/24"
|
||||
}
|
||||
|
||||
variable "extra_ingress_firewalls" {
|
||||
type = map(object({
|
||||
source_ranges = set(string)
|
||||
protocol = string
|
||||
ports = list(string)
|
||||
target_tags = set(string)
|
||||
}))
|
||||
|
||||
default = {}
|
||||
}
|
||||
|
@ -95,3 +95,14 @@ variable "ingress_whitelist" {
|
||||
type = list(string)
|
||||
default = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
variable "extra_ingress_firewalls" {
|
||||
type = map(object({
|
||||
source_ranges = set(string)
|
||||
protocol = string
|
||||
ports = list(string)
|
||||
target_tags = set(string)
|
||||
}))
|
||||
|
||||
default = {}
|
||||
}
|
||||
|
@ -56,11 +56,24 @@ cd inventory/$CLUSTER
|
||||
|
||||
Edit `default.tfvars` to match your requirement.
|
||||
|
||||
Flatcar Container Linux instead of the basic Hetzner Images.
|
||||
|
||||
```bash
|
||||
cd ../../contrib/terraform/hetzner
|
||||
```
|
||||
|
||||
Edit `main.tf` and reactivate the module `source = "./modules/kubernetes-cluster-flatcar"`and
|
||||
comment out the `#source = "./modules/kubernetes-cluster"`.
|
||||
|
||||
activate `ssh_private_key_path = var.ssh_private_key_path`. The VM boots into
|
||||
Rescue-Mode with the selected image of the `var.machines` but installs Flatcar instead.
|
||||
|
||||
Run Terraform to create the infrastructure.
|
||||
|
||||
```bash
|
||||
terraform init ../../contrib/terraform/hetzner
|
||||
terraform apply --var-file default.tfvars ../../contrib/terraform/hetzner/
|
||||
cd ./kubespray
|
||||
terraform -chdir=./contrib/terraform/hetzner/ init
|
||||
terraform -chdir=./contrib/terraform/hetzner/ apply --var-file=../../../inventory/$CLUSTER/default.tfvars
|
||||
```
|
||||
|
||||
You should now have a inventory file named `inventory.ini` that you can use with kubespray.
|
||||
|
@ -9,21 +9,23 @@ ssh_public_keys = [
|
||||
"ssh-rsa I-did-not-read-the-docs 2",
|
||||
]
|
||||
|
||||
ssh_private_key_path = "~/.ssh/id_rsa"
|
||||
|
||||
machines = {
|
||||
"master-0" : {
|
||||
"node_type" : "master",
|
||||
"size" : "cx21",
|
||||
"image" : "ubuntu-20.04",
|
||||
"image" : "ubuntu-22.04",
|
||||
},
|
||||
"worker-0" : {
|
||||
"node_type" : "worker",
|
||||
"size" : "cx21",
|
||||
"image" : "ubuntu-20.04",
|
||||
"image" : "ubuntu-22.04",
|
||||
},
|
||||
"worker-1" : {
|
||||
"node_type" : "worker",
|
||||
"size" : "cx21",
|
||||
"image" : "ubuntu-20.04",
|
||||
"image" : "ubuntu-22.04",
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2,6 +2,7 @@ provider "hcloud" {}
|
||||
|
||||
module "kubernetes" {
|
||||
source = "./modules/kubernetes-cluster"
|
||||
# source = "./modules/kubernetes-cluster-flatcar"
|
||||
|
||||
prefix = var.prefix
|
||||
|
||||
@ -9,6 +10,9 @@ module "kubernetes" {
|
||||
|
||||
machines = var.machines
|
||||
|
||||
#only for flatcar
|
||||
#ssh_private_key_path = var.ssh_private_key_path
|
||||
|
||||
ssh_public_keys = var.ssh_public_keys
|
||||
network_zone = var.network_zone
|
||||
|
||||
@ -22,10 +26,10 @@ module "kubernetes" {
|
||||
# Generate ansible inventory
|
||||
#
|
||||
|
||||
data "template_file" "inventory" {
|
||||
template = file("${path.module}/templates/inventory.tpl")
|
||||
|
||||
vars = {
|
||||
locals {
|
||||
inventory = templatefile(
|
||||
"${path.module}/templates/inventory.tpl",
|
||||
{
|
||||
connection_strings_master = join("\n", formatlist("%s ansible_user=ubuntu ansible_host=%s ip=%s etcd_member_name=etcd%d",
|
||||
keys(module.kubernetes.master_ip_addresses),
|
||||
values(module.kubernetes.master_ip_addresses).*.public_ip,
|
||||
@ -39,14 +43,15 @@ data "template_file" "inventory" {
|
||||
list_worker = join("\n", keys(module.kubernetes.worker_ip_addresses))
|
||||
network_id = module.kubernetes.network_id
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
resource "null_resource" "inventories" {
|
||||
provisioner "local-exec" {
|
||||
command = "echo '${data.template_file.inventory.rendered}' > ${var.inventory_file}"
|
||||
command = "echo '${local.inventory}' > ${var.inventory_file}"
|
||||
}
|
||||
|
||||
triggers = {
|
||||
template = data.template_file.inventory.rendered
|
||||
template = local.inventory
|
||||
}
|
||||
}
|
||||
|
@ -0,0 +1,144 @@
|
||||
resource "hcloud_network" "kubernetes" {
|
||||
name = "${var.prefix}-network"
|
||||
ip_range = var.private_network_cidr
|
||||
}
|
||||
|
||||
resource "hcloud_network_subnet" "kubernetes" {
|
||||
type = "cloud"
|
||||
network_id = hcloud_network.kubernetes.id
|
||||
network_zone = var.network_zone
|
||||
ip_range = var.private_subnet_cidr
|
||||
}
|
||||
|
||||
resource "hcloud_ssh_key" "first" {
|
||||
name = var.prefix
|
||||
public_key = var.ssh_public_keys.0
|
||||
}
|
||||
|
||||
resource "hcloud_server" "machine" {
|
||||
for_each = {
|
||||
for name, machine in var.machines :
|
||||
name => machine
|
||||
}
|
||||
|
||||
name = "${var.prefix}-${each.key}"
|
||||
ssh_keys = [hcloud_ssh_key.first.id]
|
||||
# boot into rescue OS
|
||||
rescue = "linux64"
|
||||
# dummy value for the OS because Flatcar is not available
|
||||
image = each.value.image
|
||||
server_type = each.value.size
|
||||
location = var.zone
|
||||
connection {
|
||||
host = self.ipv4_address
|
||||
timeout = "5m"
|
||||
private_key = file(var.ssh_private_key_path)
|
||||
}
|
||||
firewall_ids = each.value.node_type == "master" ? [hcloud_firewall.master.id] : [hcloud_firewall.worker.id]
|
||||
provisioner "file" {
|
||||
content = data.ct_config.machine-ignitions[each.key].rendered
|
||||
destination = "/root/ignition.json"
|
||||
}
|
||||
|
||||
provisioner "remote-exec" {
|
||||
inline = [
|
||||
"set -ex",
|
||||
"apt update",
|
||||
"apt install -y gawk",
|
||||
"curl -fsSLO --retry-delay 1 --retry 60 --retry-connrefused --retry-max-time 60 --connect-timeout 20 https://raw.githubusercontent.com/flatcar/init/flatcar-master/bin/flatcar-install",
|
||||
"chmod +x flatcar-install",
|
||||
"./flatcar-install -s -i /root/ignition.json -C stable",
|
||||
"shutdown -r +1",
|
||||
]
|
||||
}
|
||||
|
||||
# optional:
|
||||
provisioner "remote-exec" {
|
||||
connection {
|
||||
host = self.ipv4_address
|
||||
private_key = file(var.ssh_private_key_path)
|
||||
timeout = "3m"
|
||||
user = var.user_flatcar
|
||||
}
|
||||
|
||||
inline = [
|
||||
"sudo hostnamectl set-hostname ${self.name}",
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
resource "hcloud_server_network" "machine" {
|
||||
for_each = {
|
||||
for name, machine in var.machines :
|
||||
name => hcloud_server.machine[name]
|
||||
}
|
||||
server_id = each.value.id
|
||||
subnet_id = hcloud_network_subnet.kubernetes.id
|
||||
}
|
||||
|
||||
data "ct_config" "machine-ignitions" {
|
||||
for_each = {
|
||||
for name, machine in var.machines :
|
||||
name => machine
|
||||
}
|
||||
|
||||
strict = false
|
||||
content = templatefile(
|
||||
"${path.module}/templates/machine.yaml.tmpl",
|
||||
{
|
||||
ssh_keys = jsonencode(var.ssh_public_keys)
|
||||
user_flatcar = var.user_flatcar
|
||||
name = each.key
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
resource "hcloud_firewall" "master" {
|
||||
name = "${var.prefix}-master-firewall"
|
||||
|
||||
rule {
|
||||
direction = "in"
|
||||
protocol = "tcp"
|
||||
port = "22"
|
||||
source_ips = var.ssh_whitelist
|
||||
}
|
||||
|
||||
rule {
|
||||
direction = "in"
|
||||
protocol = "tcp"
|
||||
port = "6443"
|
||||
source_ips = var.api_server_whitelist
|
||||
}
|
||||
}
|
||||
|
||||
resource "hcloud_firewall" "worker" {
|
||||
name = "${var.prefix}-worker-firewall"
|
||||
|
||||
rule {
|
||||
direction = "in"
|
||||
protocol = "tcp"
|
||||
port = "22"
|
||||
source_ips = var.ssh_whitelist
|
||||
}
|
||||
|
||||
rule {
|
||||
direction = "in"
|
||||
protocol = "tcp"
|
||||
port = "80"
|
||||
source_ips = var.ingress_whitelist
|
||||
}
|
||||
|
||||
rule {
|
||||
direction = "in"
|
||||
protocol = "tcp"
|
||||
port = "443"
|
||||
source_ips = var.ingress_whitelist
|
||||
}
|
||||
|
||||
rule {
|
||||
direction = "in"
|
||||
protocol = "tcp"
|
||||
port = "30000-32767"
|
||||
source_ips = var.nodeport_whitelist
|
||||
}
|
||||
}
|
@ -0,0 +1,29 @@
|
||||
output "master_ip_addresses" {
|
||||
value = {
|
||||
for name, machine in var.machines :
|
||||
name => {
|
||||
"private_ip" = hcloud_server_network.machine[name].ip
|
||||
"public_ip" = hcloud_server.machine[name].ipv4_address
|
||||
}
|
||||
if machine.node_type == "master"
|
||||
}
|
||||
}
|
||||
|
||||
output "worker_ip_addresses" {
|
||||
value = {
|
||||
for name, machine in var.machines :
|
||||
name => {
|
||||
"private_ip" = hcloud_server_network.machine[name].ip
|
||||
"public_ip" = hcloud_server.machine[name].ipv4_address
|
||||
}
|
||||
if machine.node_type == "worker"
|
||||
}
|
||||
}
|
||||
|
||||
output "cluster_private_network_cidr" {
|
||||
value = var.private_subnet_cidr
|
||||
}
|
||||
|
||||
output "network_id" {
|
||||
value = hcloud_network.kubernetes.id
|
||||
}
|
@ -0,0 +1,19 @@
|
||||
variant: flatcar
|
||||
version: 1.0.0
|
||||
|
||||
passwd:
|
||||
users:
|
||||
- name: ${user_flatcar}
|
||||
ssh_authorized_keys: ${ssh_keys}
|
||||
|
||||
storage:
|
||||
files:
|
||||
- path: /home/core/works
|
||||
filesystem: root
|
||||
mode: 0755
|
||||
contents:
|
||||
inline: |
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
hostname="$(hostname)"
|
||||
echo My name is ${name} and the hostname is $${hostname}
|
@ -0,0 +1,60 @@
|
||||
|
||||
variable "zone" {
|
||||
type = string
|
||||
default = "fsn1"
|
||||
}
|
||||
|
||||
variable "prefix" {
|
||||
default = "k8s"
|
||||
}
|
||||
|
||||
variable "user_flatcar" {
|
||||
type = string
|
||||
default = "core"
|
||||
}
|
||||
|
||||
variable "machines" {
|
||||
type = map(object({
|
||||
node_type = string
|
||||
size = string
|
||||
image = string
|
||||
}))
|
||||
}
|
||||
|
||||
|
||||
|
||||
variable "ssh_public_keys" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "ssh_private_key_path" {
|
||||
type = string
|
||||
default = "~/.ssh/id_rsa"
|
||||
}
|
||||
|
||||
variable "ssh_whitelist" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "api_server_whitelist" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "nodeport_whitelist" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "ingress_whitelist" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "private_network_cidr" {
|
||||
default = "10.0.0.0/16"
|
||||
}
|
||||
|
||||
variable "private_subnet_cidr" {
|
||||
default = "10.0.10.0/24"
|
||||
}
|
||||
variable "network_zone" {
|
||||
default = "eu-central"
|
||||
}
|
@ -0,0 +1,14 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
hcloud = {
|
||||
source = "hetznercloud/hcloud"
|
||||
}
|
||||
ct = {
|
||||
source = "poseidon/ct"
|
||||
version = "0.11.0"
|
||||
}
|
||||
null = {
|
||||
source = "hashicorp/null"
|
||||
}
|
||||
}
|
||||
}
|
@ -14,4 +14,3 @@ ssh_authorized_keys:
|
||||
%{ for ssh_public_key in ssh_public_keys ~}
|
||||
- ${ssh_public_key}
|
||||
%{ endfor ~}
|
||||
|
||||
|
@ -2,7 +2,7 @@ terraform {
|
||||
required_providers {
|
||||
hcloud = {
|
||||
source = "hetznercloud/hcloud"
|
||||
version = "1.31.1"
|
||||
version = "1.38.2"
|
||||
}
|
||||
}
|
||||
required_version = ">= 0.14"
|
||||
|
46
contrib/terraform/hetzner/sample-inventory/cluster.tfvars
Normal file
46
contrib/terraform/hetzner/sample-inventory/cluster.tfvars
Normal file
@ -0,0 +1,46 @@
|
||||
prefix = "default"
|
||||
zone = "hel1"
|
||||
network_zone = "eu-central"
|
||||
inventory_file = "inventory.ini"
|
||||
|
||||
ssh_public_keys = [
|
||||
# Put your public SSH key here
|
||||
"ssh-rsa I-did-not-read-the-docs",
|
||||
"ssh-rsa I-did-not-read-the-docs 2",
|
||||
]
|
||||
|
||||
ssh_private_key_path = "~/.ssh/id_rsa"
|
||||
|
||||
machines = {
|
||||
"master-0" : {
|
||||
"node_type" : "master",
|
||||
"size" : "cx21",
|
||||
"image" : "ubuntu-22.04",
|
||||
},
|
||||
"worker-0" : {
|
||||
"node_type" : "worker",
|
||||
"size" : "cx21",
|
||||
"image" : "ubuntu-22.04",
|
||||
},
|
||||
"worker-1" : {
|
||||
"node_type" : "worker",
|
||||
"size" : "cx21",
|
||||
"image" : "ubuntu-22.04",
|
||||
}
|
||||
}
|
||||
|
||||
nodeport_whitelist = [
|
||||
"0.0.0.0/0"
|
||||
]
|
||||
|
||||
ingress_whitelist = [
|
||||
"0.0.0.0/0"
|
||||
]
|
||||
|
||||
ssh_whitelist = [
|
||||
"0.0.0.0/0"
|
||||
]
|
||||
|
||||
api_server_whitelist = [
|
||||
"0.0.0.0/0"
|
||||
]
|
1
contrib/terraform/hetzner/sample-inventory/group_vars
Symbolic link
1
contrib/terraform/hetzner/sample-inventory/group_vars
Symbolic link
@ -0,0 +1 @@
|
||||
../../../../inventory/sample/group_vars
|
@ -2,18 +2,18 @@
|
||||
${connection_strings_master}
|
||||
${connection_strings_worker}
|
||||
|
||||
[kube-master]
|
||||
[kube_control_plane]
|
||||
${list_master}
|
||||
|
||||
[etcd]
|
||||
${list_master}
|
||||
|
||||
[kube-node]
|
||||
[kube_node]
|
||||
${list_worker}
|
||||
|
||||
[k8s-cluster:children]
|
||||
[k8s_cluster:children]
|
||||
kube-master
|
||||
kube-node
|
||||
|
||||
[k8s-cluster:vars]
|
||||
[k8s_cluster:vars]
|
||||
network_id=${network_id}
|
||||
|
@ -25,6 +25,12 @@ variable "ssh_public_keys" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "ssh_private_key_path" {
|
||||
description = "Private SSH key which connect to the VMs."
|
||||
type = string
|
||||
default = "~/.ssh/id_rsa"
|
||||
}
|
||||
|
||||
variable "ssh_whitelist" {
|
||||
description = "List of IP ranges (CIDR) to whitelist for ssh"
|
||||
type = list(string)
|
||||
|
@ -2,14 +2,11 @@ terraform {
|
||||
required_providers {
|
||||
hcloud = {
|
||||
source = "hetznercloud/hcloud"
|
||||
version = "1.31.1"
|
||||
version = "1.38.2"
|
||||
}
|
||||
null = {
|
||||
source = "hashicorp/null"
|
||||
}
|
||||
template = {
|
||||
source = "hashicorp/template"
|
||||
}
|
||||
}
|
||||
required_version = ">= 0.14"
|
||||
}
|
||||
|
@ -1,16 +0,0 @@
|
||||
output "k8s_masters" {
|
||||
value = metal_device.k8s_master.*.access_public_ipv4
|
||||
}
|
||||
|
||||
output "k8s_masters_no_etc" {
|
||||
value = metal_device.k8s_master_no_etcd.*.access_public_ipv4
|
||||
}
|
||||
|
||||
output "k8s_etcds" {
|
||||
value = metal_device.k8s_etcd.*.access_public_ipv4
|
||||
}
|
||||
|
||||
output "k8s_nodes" {
|
||||
value = metal_device.k8s_node.*.access_public_ipv4
|
||||
}
|
||||
|
@ -1,9 +0,0 @@
|
||||
|
||||
terraform {
|
||||
required_version = ">= 0.12"
|
||||
required_providers {
|
||||
metal = {
|
||||
source = "equinix/metal"
|
||||
}
|
||||
}
|
||||
}
|
@ -88,7 +88,7 @@ binaries available on hyperkube v1.4.3_coreos.0 or higher.
|
||||
|
||||
## Requirements
|
||||
|
||||
- [Install Terraform](https://www.terraform.io/intro/getting-started/install.html) 0.12 or later
|
||||
- [Install Terraform](https://www.terraform.io/intro/getting-started/install.html) 0.14 or later
|
||||
- [Install Ansible](http://docs.ansible.com/ansible/latest/intro_installation.html)
|
||||
- you already have a suitable OS image in Glance
|
||||
- you already have a floating IP pool created
|
||||
@ -270,6 +270,7 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
|
||||
|`supplementary_node_groups` | To add ansible groups to the nodes, such as `kube_ingress` for running ingress controller pods, empty by default. |
|
||||
|`bastion_allowed_remote_ips` | List of CIDR allowed to initiate a SSH connection, `["0.0.0.0/0"]` by default |
|
||||
|`master_allowed_remote_ips` | List of CIDR blocks allowed to initiate an API connection, `["0.0.0.0/0"]` by default |
|
||||
|`bastion_allowed_ports` | List of ports to open on bastion node, `[]` by default |
|
||||
|`k8s_allowed_remote_ips` | List of CIDR allowed to initiate a SSH connection, empty by default |
|
||||
|`worker_allowed_ports` | List of ports to open on worker nodes, `[{ "protocol" = "tcp", "port_range_min" = 30000, "port_range_max" = 32767, "remote_ip_prefix" = "0.0.0.0/0"}]` by default |
|
||||
|`master_allowed_ports` | List of ports to open on master nodes, expected format is `[{ "protocol" = "tcp", "port_range_min" = 443, "port_range_max" = 443, "remote_ip_prefix" = "0.0.0.0/0"}]`, empty by default |
|
||||
@ -283,6 +284,7 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
|
||||
|`master_server_group_policy` | Enable and use openstack nova servergroups for masters with set policy, default: "" (disabled) |
|
||||
|`node_server_group_policy` | Enable and use openstack nova servergroups for nodes with set policy, default: "" (disabled) |
|
||||
|`etcd_server_group_policy` | Enable and use openstack nova servergroups for etcd with set policy, default: "" (disabled) |
|
||||
|`additional_server_groups` | Extra server groups to create. Set "policy" to the policy for the group, expected format is `{"new-server-group" = {"policy" = "anti-affinity"}}`, default: {} (to not create any extra groups) |
|
||||
|`use_access_ip` | If 1, nodes with floating IPs will transmit internal cluster traffic via floating IPs; if 0 private IPs will be used instead. Default value is 1. |
|
||||
|`port_security_enabled` | Allow to disable port security by setting this to `false`. `true` by default |
|
||||
|`force_null_port_security` | Set `null` instead of `true` or `false` for `port_security`. `false` by default |
|
||||
@ -291,10 +293,32 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
|
||||
|
||||
##### k8s_nodes
|
||||
|
||||
Allows a custom definition of worker nodes giving the operator full control over individual node flavor and
|
||||
availability zone placement. To enable the use of this mode set the `number_of_k8s_nodes` and
|
||||
`number_of_k8s_nodes_no_floating_ip` variables to 0. Then define your desired worker node configuration
|
||||
using the `k8s_nodes` variable.
|
||||
Allows a custom definition of worker nodes giving the operator full control over individual node flavor and availability zone placement.
|
||||
To enable the use of this mode set the `number_of_k8s_nodes` and `number_of_k8s_nodes_no_floating_ip` variables to 0.
|
||||
Then define your desired worker node configuration using the `k8s_nodes` variable.
|
||||
The `az`, `flavor` and `floating_ip` parameters are mandatory.
|
||||
The optional parameter `extra_groups` (a comma-delimited string) can be used to define extra inventory group memberships for specific nodes.
|
||||
|
||||
```yaml
|
||||
k8s_nodes:
|
||||
node-name:
|
||||
az: string # Name of the AZ
|
||||
flavor: string # Flavor ID to use
|
||||
floating_ip: bool # If floating IPs should be created or not
|
||||
extra_groups: string # (optional) Additional groups to add for kubespray, defaults to no groups
|
||||
image_id: string # (optional) Image ID to use, defaults to var.image_id or var.image
|
||||
root_volume_size_in_gb: number # (optional) Size of the block storage to use as root disk, defaults to var.node_root_volume_size_in_gb or to use volume from flavor otherwise
|
||||
volume_type: string # (optional) Volume type to use, defaults to var.node_volume_type
|
||||
network_id: string # (optional) Use this network_id for the node, defaults to either var.network_id or ID of var.network_name
|
||||
server_group: string # (optional) Server group to add this node to. If set, this has to be one specified in additional_server_groups, defaults to use the server group specified in node_server_group_policy
|
||||
cloudinit: # (optional) Options for cloud-init
|
||||
extra_partitions: # List of extra partitions (other than the root partition) to setup during creation
|
||||
volume_path: string # Path to the volume to create partition for (e.g. /dev/vda )
|
||||
partition_path: string # Path to the partition (e.g. /dev/vda2 )
|
||||
mount_path: string # Path to where the partition should be mounted
|
||||
partition_start: string # Where the partition should start (e.g. 10GB ). Note, if you set the partition_start to 0 there will be no space left for the root partition
|
||||
partition_end: string # Where the partition should end (e.g. 10GB or -1 for end of volume)
|
||||
```
|
||||
|
||||
For example:
|
||||
|
||||
@ -314,6 +338,7 @@ k8s_nodes = {
|
||||
"az" = "sto3"
|
||||
"flavor" = "83d8b44a-26a0-4f02-a981-079446926445"
|
||||
"floating_ip" = true
|
||||
"extra_groups" = "calico_rr"
|
||||
}
|
||||
}
|
||||
```
|
||||
@ -424,7 +449,7 @@ This should finish fairly quickly telling you Terraform has successfully initial
|
||||
|
||||
You can apply cloud-init based customization for the openstack instances before provisioning your cluster.
|
||||
One common template is used for all instances. Adjust the file shown below:
|
||||
`contrib/terraform/openstack/modules/compute/templates/cloudinit.yaml`
|
||||
`contrib/terraform/openstack/modules/compute/templates/cloudinit.yaml.tmpl`
|
||||
For example, to enable openstack novnc access and ansible_user=root SSH access:
|
||||
|
||||
```ShellSession
|
||||
|
@ -84,6 +84,7 @@ module "compute" {
|
||||
supplementary_node_groups = var.supplementary_node_groups
|
||||
master_allowed_ports = var.master_allowed_ports
|
||||
worker_allowed_ports = var.worker_allowed_ports
|
||||
bastion_allowed_ports = var.bastion_allowed_ports
|
||||
use_access_ip = var.use_access_ip
|
||||
master_server_group_policy = var.master_server_group_policy
|
||||
node_server_group_policy = var.node_server_group_policy
|
||||
@ -96,6 +97,12 @@ module "compute" {
|
||||
network_router_id = module.network.router_id
|
||||
network_id = module.network.network_id
|
||||
use_existing_network = var.use_existing_network
|
||||
private_subnet_id = module.network.subnet_id
|
||||
additional_server_groups = var.additional_server_groups
|
||||
|
||||
depends_on = [
|
||||
module.network.subnet_id
|
||||
]
|
||||
}
|
||||
|
||||
output "private_subnet_id" {
|
||||
@ -111,7 +118,7 @@ output "router_id" {
|
||||
}
|
||||
|
||||
output "k8s_master_fips" {
|
||||
value = concat(module.ips.k8s_master_fips, module.ips.k8s_master_no_etcd_fips)
|
||||
value = var.number_of_k8s_masters + var.number_of_k8s_masters_no_etcd > 0 ? concat(module.ips.k8s_master_fips, module.ips.k8s_master_no_etcd_fips) : [for key, value in module.ips.k8s_masters_fips : value.address]
|
||||
}
|
||||
|
||||
output "k8s_node_fips" {
|
||||
|
@ -15,8 +15,14 @@ data "openstack_images_image_v2" "image_master" {
|
||||
name = var.image_master == "" ? var.image : var.image_master
|
||||
}
|
||||
|
||||
data "template_file" "cloudinit" {
|
||||
template = file("${path.module}/templates/cloudinit.yaml")
|
||||
data "cloudinit_config" "cloudinit" {
|
||||
part {
|
||||
content_type = "text/cloud-config"
|
||||
content = templatefile("${path.module}/templates/cloudinit.yaml.tmpl", {
|
||||
# template_file doesn't support lists
|
||||
extra_partitions = ""
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
data "openstack_networking_network_v2" "k8s_network" {
|
||||
@ -82,6 +88,17 @@ resource "openstack_networking_secgroup_rule_v2" "bastion" {
|
||||
security_group_id = openstack_networking_secgroup_v2.bastion[0].id
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "k8s_bastion_ports" {
|
||||
count = length(var.bastion_allowed_ports)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = lookup(var.bastion_allowed_ports[count.index], "protocol", "tcp")
|
||||
port_range_min = lookup(var.bastion_allowed_ports[count.index], "port_range_min")
|
||||
port_range_max = lookup(var.bastion_allowed_ports[count.index], "port_range_max")
|
||||
remote_ip_prefix = lookup(var.bastion_allowed_ports[count.index], "remote_ip_prefix", "0.0.0.0/0")
|
||||
security_group_id = openstack_networking_secgroup_v2.bastion[0].id
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_v2" "k8s" {
|
||||
name = "${var.cluster_name}-k8s"
|
||||
description = "${var.cluster_name} - Kubernetes"
|
||||
@ -156,6 +173,12 @@ resource "openstack_compute_servergroup_v2" "k8s_etcd" {
|
||||
policies = [var.etcd_server_group_policy]
|
||||
}
|
||||
|
||||
resource "openstack_compute_servergroup_v2" "k8s_node_additional" {
|
||||
for_each = var.additional_server_groups
|
||||
name = "k8s-${each.key}-srvgrp"
|
||||
policies = [each.value.policy]
|
||||
}
|
||||
|
||||
locals {
|
||||
# master groups
|
||||
master_sec_groups = compact([
|
||||
@ -185,6 +208,29 @@ locals {
|
||||
image_to_use_gfs = var.image_gfs_uuid != "" ? var.image_gfs_uuid : var.image_uuid != "" ? var.image_uuid : data.openstack_images_image_v2.gfs_image[0].id
|
||||
# image_master uuidimage_gfs_uuid
|
||||
image_to_use_master = var.image_master_uuid != "" ? var.image_master_uuid : var.image_uuid != "" ? var.image_uuid : data.openstack_images_image_v2.image_master[0].id
|
||||
|
||||
k8s_nodes_settings = {
|
||||
for name, node in var.k8s_nodes :
|
||||
name => {
|
||||
"use_local_disk" = (node.root_volume_size_in_gb != null ? node.root_volume_size_in_gb : var.node_root_volume_size_in_gb) == 0,
|
||||
"image_id" = node.image_id != null ? node.image_id : local.image_to_use_node,
|
||||
"volume_size" = node.root_volume_size_in_gb != null ? node.root_volume_size_in_gb : var.node_root_volume_size_in_gb,
|
||||
"volume_type" = node.volume_type != null ? node.volume_type : var.node_volume_type,
|
||||
"network_id" = node.network_id != null ? node.network_id : (var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id)
|
||||
"server_group" = node.server_group != null ? [openstack_compute_servergroup_v2.k8s_node_additional[node.server_group].id] : (var.node_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_node[0].id] : [])
|
||||
}
|
||||
}
|
||||
|
||||
k8s_masters_settings = {
|
||||
for name, node in var.k8s_masters :
|
||||
name => {
|
||||
"use_local_disk" = (node.root_volume_size_in_gb != null ? node.root_volume_size_in_gb : var.master_root_volume_size_in_gb) == 0,
|
||||
"image_id" = node.image_id != null ? node.image_id : local.image_to_use_master,
|
||||
"volume_size" = node.root_volume_size_in_gb != null ? node.root_volume_size_in_gb : var.master_root_volume_size_in_gb,
|
||||
"volume_type" = node.volume_type != null ? node.volume_type : var.master_volume_type,
|
||||
"network_id" = node.network_id != null ? node.network_id : (var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_networking_port_v2" "bastion_port" {
|
||||
@ -195,6 +241,12 @@ resource "openstack_networking_port_v2" "bastion_port" {
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.bastion_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
dynamic "fixed_ip" {
|
||||
for_each = var.private_subnet_id == "" ? [] : [true]
|
||||
content {
|
||||
subnet_id = var.private_subnet_id
|
||||
}
|
||||
}
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
@ -207,7 +259,7 @@ resource "openstack_compute_instance_v2" "bastion" {
|
||||
image_id = var.bastion_root_volume_size_in_gb == 0 ? local.image_to_use_node : null
|
||||
flavor_id = var.flavor_bastion
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
user_data = data.template_file.cloudinit.rendered
|
||||
user_data = data.cloudinit_config.cloudinit.rendered
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.bastion_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : []
|
||||
@ -245,6 +297,12 @@ resource "openstack_networking_port_v2" "k8s_master_port" {
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
dynamic "fixed_ip" {
|
||||
for_each = var.private_subnet_id == "" ? [] : [true]
|
||||
content {
|
||||
subnet_id = var.private_subnet_id
|
||||
}
|
||||
}
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
@ -258,7 +316,7 @@ resource "openstack_compute_instance_v2" "k8s_master" {
|
||||
image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
|
||||
flavor_id = var.flavor_k8s_master
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
user_data = data.template_file.cloudinit.rendered
|
||||
user_data = data.cloudinit_config.cloudinit.rendered
|
||||
|
||||
|
||||
dynamic "block_device" {
|
||||
@ -300,11 +358,17 @@ resource "openstack_compute_instance_v2" "k8s_master" {
|
||||
resource "openstack_networking_port_v2" "k8s_masters_port" {
|
||||
for_each = var.number_of_k8s_masters == 0 && var.number_of_k8s_masters_no_etcd == 0 && var.number_of_k8s_masters_no_floating_ip == 0 && var.number_of_k8s_masters_no_floating_ip_no_etcd == 0 ? var.k8s_masters : {}
|
||||
name = "${var.cluster_name}-k8s-${each.key}"
|
||||
network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id
|
||||
network_id = local.k8s_masters_settings[each.key].network_id
|
||||
admin_state_up = "true"
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
dynamic "fixed_ip" {
|
||||
for_each = var.private_subnet_id == "" ? [] : [true]
|
||||
content {
|
||||
subnet_id = var.private_subnet_id
|
||||
}
|
||||
}
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
@ -315,17 +379,17 @@ resource "openstack_compute_instance_v2" "k8s_masters" {
|
||||
for_each = var.number_of_k8s_masters == 0 && var.number_of_k8s_masters_no_etcd == 0 && var.number_of_k8s_masters_no_floating_ip == 0 && var.number_of_k8s_masters_no_floating_ip_no_etcd == 0 ? var.k8s_masters : {}
|
||||
name = "${var.cluster_name}-k8s-${each.key}"
|
||||
availability_zone = each.value.az
|
||||
image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
|
||||
image_id = local.k8s_masters_settings[each.key].use_local_disk ? local.k8s_masters_settings[each.key].image_id : null
|
||||
flavor_id = each.value.flavor
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.master_root_volume_size_in_gb > 0 ? [local.image_to_use_master] : []
|
||||
for_each = !local.k8s_masters_settings[each.key].use_local_disk ? [local.k8s_masters_settings[each.key].image_id] : []
|
||||
content {
|
||||
uuid = local.image_to_use_master
|
||||
uuid = block_device.value
|
||||
source_type = "image"
|
||||
volume_size = var.master_root_volume_size_in_gb
|
||||
volume_type = var.master_volume_type
|
||||
volume_size = local.k8s_masters_settings[each.key].volume_size
|
||||
volume_type = local.k8s_masters_settings[each.key].volume_type
|
||||
boot_index = 0
|
||||
destination_type = "volume"
|
||||
delete_on_termination = true
|
||||
@ -351,7 +415,7 @@ resource "openstack_compute_instance_v2" "k8s_masters" {
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "%{if each.value.floating_ip}sed s/USER/${var.ssh_user}/ ${path.root}/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, [for key, value in var.k8s_masters_fips : value.address]), 0)}/ > ${var.group_vars_path}/no_floating.yml%{else}true%{endif}"
|
||||
command = "%{if each.value.floating_ip}sed s/USER/${var.ssh_user}/ ${path.module}/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, [for key, value in var.k8s_masters_fips : value.address]), 0)}/ > ${var.group_vars_path}/no_floating.yml%{else}true%{endif}"
|
||||
}
|
||||
}
|
||||
|
||||
@ -363,6 +427,12 @@ resource "openstack_networking_port_v2" "k8s_master_no_etcd_port" {
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
dynamic "fixed_ip" {
|
||||
for_each = var.private_subnet_id == "" ? [] : [true]
|
||||
content {
|
||||
subnet_id = var.private_subnet_id
|
||||
}
|
||||
}
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
@ -376,7 +446,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
|
||||
image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
|
||||
flavor_id = var.flavor_k8s_master
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
user_data = data.template_file.cloudinit.rendered
|
||||
user_data = data.cloudinit_config.cloudinit.rendered
|
||||
|
||||
|
||||
dynamic "block_device" {
|
||||
@ -423,6 +493,12 @@ resource "openstack_networking_port_v2" "etcd_port" {
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.etcd_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
dynamic "fixed_ip" {
|
||||
for_each = var.private_subnet_id == "" ? [] : [true]
|
||||
content {
|
||||
subnet_id = var.private_subnet_id
|
||||
}
|
||||
}
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
@ -436,7 +512,7 @@ resource "openstack_compute_instance_v2" "etcd" {
|
||||
image_id = var.etcd_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
|
||||
flavor_id = var.flavor_etcd
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
user_data = data.template_file.cloudinit.rendered
|
||||
user_data = data.cloudinit_config.cloudinit.rendered
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.etcd_root_volume_size_in_gb > 0 ? [local.image_to_use_master] : []
|
||||
@ -477,6 +553,12 @@ resource "openstack_networking_port_v2" "k8s_master_no_floating_ip_port" {
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
dynamic "fixed_ip" {
|
||||
for_each = var.private_subnet_id == "" ? [] : [true]
|
||||
content {
|
||||
subnet_id = var.private_subnet_id
|
||||
}
|
||||
}
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
@ -531,6 +613,12 @@ resource "openstack_networking_port_v2" "k8s_master_no_floating_ip_no_etcd_port"
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
dynamic "fixed_ip" {
|
||||
for_each = var.private_subnet_id == "" ? [] : [true]
|
||||
content {
|
||||
subnet_id = var.private_subnet_id
|
||||
}
|
||||
}
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
@ -544,7 +632,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
|
||||
image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
|
||||
flavor_id = var.flavor_k8s_master
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
user_data = data.template_file.cloudinit.rendered
|
||||
user_data = data.cloudinit_config.cloudinit.rendered
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.master_root_volume_size_in_gb > 0 ? [local.image_to_use_master] : []
|
||||
@ -586,6 +674,12 @@ resource "openstack_networking_port_v2" "k8s_node_port" {
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.worker_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
dynamic "fixed_ip" {
|
||||
for_each = var.private_subnet_id == "" ? [] : [true]
|
||||
content {
|
||||
subnet_id = var.private_subnet_id
|
||||
}
|
||||
}
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
@ -599,7 +693,7 @@ resource "openstack_compute_instance_v2" "k8s_node" {
|
||||
image_id = var.node_root_volume_size_in_gb == 0 ? local.image_to_use_node : null
|
||||
flavor_id = var.flavor_k8s_node
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
user_data = data.template_file.cloudinit.rendered
|
||||
user_data = data.cloudinit_config.cloudinit.rendered
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.node_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : []
|
||||
@ -646,6 +740,12 @@ resource "openstack_networking_port_v2" "k8s_node_no_floating_ip_port" {
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.worker_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
dynamic "fixed_ip" {
|
||||
for_each = var.private_subnet_id == "" ? [] : [true]
|
||||
content {
|
||||
subnet_id = var.private_subnet_id
|
||||
}
|
||||
}
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
@ -659,7 +759,7 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
||||
image_id = var.node_root_volume_size_in_gb == 0 ? local.image_to_use_node : null
|
||||
flavor_id = var.flavor_k8s_node
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
user_data = data.template_file.cloudinit.rendered
|
||||
user_data = data.cloudinit_config.cloudinit.rendered
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.node_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : []
|
||||
@ -679,9 +779,9 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
||||
}
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.node_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||
for_each = var.node_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_node[0].id] : []
|
||||
content {
|
||||
group = openstack_compute_servergroup_v2.k8s_node[0].id
|
||||
group = scheduler_hints.value
|
||||
}
|
||||
}
|
||||
|
||||
@ -696,11 +796,17 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
||||
resource "openstack_networking_port_v2" "k8s_nodes_port" {
|
||||
for_each = var.number_of_k8s_nodes == 0 && var.number_of_k8s_nodes_no_floating_ip == 0 ? var.k8s_nodes : {}
|
||||
name = "${var.cluster_name}-k8s-node-${each.key}"
|
||||
network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id
|
||||
network_id = local.k8s_nodes_settings[each.key].network_id
|
||||
admin_state_up = "true"
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.worker_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
dynamic "fixed_ip" {
|
||||
for_each = var.private_subnet_id == "" ? [] : [true]
|
||||
content {
|
||||
subnet_id = var.private_subnet_id
|
||||
}
|
||||
}
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
@ -711,18 +817,20 @@ resource "openstack_compute_instance_v2" "k8s_nodes" {
|
||||
for_each = var.number_of_k8s_nodes == 0 && var.number_of_k8s_nodes_no_floating_ip == 0 ? var.k8s_nodes : {}
|
||||
name = "${var.cluster_name}-k8s-node-${each.key}"
|
||||
availability_zone = each.value.az
|
||||
image_id = var.node_root_volume_size_in_gb == 0 ? local.image_to_use_node : null
|
||||
image_id = local.k8s_nodes_settings[each.key].use_local_disk ? local.k8s_nodes_settings[each.key].image_id : null
|
||||
flavor_id = each.value.flavor
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
user_data = data.template_file.cloudinit.rendered
|
||||
user_data = each.value.cloudinit != null ? templatefile("${path.module}/templates/cloudinit.yaml.tmpl", {
|
||||
extra_partitions = each.value.cloudinit.extra_partitions
|
||||
}) : data.cloudinit_config.cloudinit.rendered
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.node_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : []
|
||||
for_each = !local.k8s_nodes_settings[each.key].use_local_disk ? [local.k8s_nodes_settings[each.key].image_id] : []
|
||||
content {
|
||||
uuid = local.image_to_use_node
|
||||
uuid = block_device.value
|
||||
source_type = "image"
|
||||
volume_size = var.node_root_volume_size_in_gb
|
||||
volume_type = var.node_volume_type
|
||||
volume_size = local.k8s_nodes_settings[each.key].volume_size
|
||||
volume_type = local.k8s_nodes_settings[each.key].volume_type
|
||||
boot_index = 0
|
||||
destination_type = "volume"
|
||||
delete_on_termination = true
|
||||
@ -734,15 +842,15 @@ resource "openstack_compute_instance_v2" "k8s_nodes" {
|
||||
}
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.node_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||
for_each = local.k8s_nodes_settings[each.key].server_group
|
||||
content {
|
||||
group = openstack_compute_servergroup_v2.k8s_node[0].id
|
||||
group = scheduler_hints.value
|
||||
}
|
||||
}
|
||||
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "kube_node,k8s_cluster,%{if each.value.floating_ip == false}no_floating,%{endif}${var.supplementary_node_groups}"
|
||||
kubespray_groups = "kube_node,k8s_cluster,%{if each.value.floating_ip == false}no_floating,%{endif}${var.supplementary_node_groups}${each.value.extra_groups != null ? ",${each.value.extra_groups}" : ""}"
|
||||
depends_on = var.network_router_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
@ -760,6 +868,12 @@ resource "openstack_networking_port_v2" "glusterfs_node_no_floating_ip_port" {
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.gfs_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
dynamic "fixed_ip" {
|
||||
for_each = var.private_subnet_id == "" ? [] : [true]
|
||||
content {
|
||||
subnet_id = var.private_subnet_id
|
||||
}
|
||||
}
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
|
@ -1,17 +0,0 @@
|
||||
# yamllint disable rule:comments
|
||||
#cloud-config
|
||||
## in some cases novnc console access is required
|
||||
## it requires ssh password to be set
|
||||
#ssh_pwauth: yes
|
||||
#chpasswd:
|
||||
# list: |
|
||||
# root:secret
|
||||
# expire: False
|
||||
|
||||
## in some cases direct root ssh access via ssh key is required
|
||||
#disable_root: false
|
||||
|
||||
## in some cases additional CA certs are required
|
||||
#ca-certs:
|
||||
# trusted: |
|
||||
# -----BEGIN CERTIFICATE-----
|
@ -0,0 +1,39 @@
|
||||
%{~ if length(extra_partitions) > 0 }
|
||||
#cloud-config
|
||||
bootcmd:
|
||||
%{~ for idx, partition in extra_partitions }
|
||||
- [ cloud-init-per, once, move-second-header, sgdisk, --move-second-header, ${partition.volume_path} ]
|
||||
- [ cloud-init-per, once, create-part-${idx}, parted, --script, ${partition.volume_path}, 'mkpart extended ext4 ${partition.partition_start} ${partition.partition_end}' ]
|
||||
- [ cloud-init-per, once, create-fs-part-${idx}, mkfs.ext4, ${partition.partition_path} ]
|
||||
%{~ endfor }
|
||||
|
||||
runcmd:
|
||||
%{~ for idx, partition in extra_partitions }
|
||||
- mkdir -p ${partition.mount_path}
|
||||
- chown nobody:nogroup ${partition.mount_path}
|
||||
- mount ${partition.partition_path} ${partition.mount_path}
|
||||
%{~ endfor }
|
||||
|
||||
mounts:
|
||||
%{~ for idx, partition in extra_partitions }
|
||||
- [ ${partition.partition_path}, ${partition.mount_path} ]
|
||||
%{~ endfor }
|
||||
%{~ else ~}
|
||||
# yamllint disable rule:comments
|
||||
#cloud-config
|
||||
## in some cases novnc console access is required
|
||||
## it requires ssh password to be set
|
||||
#ssh_pwauth: yes
|
||||
#chpasswd:
|
||||
# list: |
|
||||
# root:secret
|
||||
# expire: False
|
||||
|
||||
## in some cases direct root ssh access via ssh key is required
|
||||
#disable_root: false
|
||||
|
||||
## in some cases additional CA certs are required
|
||||
#ca-certs:
|
||||
# trusted: |
|
||||
# -----BEGIN CERTIFICATE-----
|
||||
%{~ endif }
|
@ -116,9 +116,48 @@ variable "k8s_allowed_egress_ips" {
|
||||
type = list
|
||||
}
|
||||
|
||||
variable "k8s_masters" {}
|
||||
variable "k8s_masters" {
|
||||
type = map(object({
|
||||
az = string
|
||||
flavor = string
|
||||
floating_ip = bool
|
||||
etcd = bool
|
||||
image_id = optional(string)
|
||||
root_volume_size_in_gb = optional(number)
|
||||
volume_type = optional(string)
|
||||
network_id = optional(string)
|
||||
}))
|
||||
}
|
||||
|
||||
variable "k8s_nodes" {}
|
||||
variable "k8s_nodes" {
|
||||
type = map(object({
|
||||
az = string
|
||||
flavor = string
|
||||
floating_ip = bool
|
||||
extra_groups = optional(string)
|
||||
image_id = optional(string)
|
||||
root_volume_size_in_gb = optional(number)
|
||||
volume_type = optional(string)
|
||||
network_id = optional(string)
|
||||
additional_server_groups = optional(list(string))
|
||||
server_group = optional(string)
|
||||
cloudinit = optional(object({
|
||||
extra_partitions = list(object({
|
||||
volume_path = string
|
||||
partition_path = string
|
||||
partition_start = string
|
||||
partition_end = string
|
||||
mount_path = string
|
||||
}))
|
||||
}))
|
||||
}))
|
||||
}
|
||||
|
||||
variable "additional_server_groups" {
|
||||
type = map(object({
|
||||
policy = string
|
||||
}))
|
||||
}
|
||||
|
||||
variable "supplementary_master_groups" {
|
||||
default = ""
|
||||
@ -136,6 +175,10 @@ variable "worker_allowed_ports" {
|
||||
type = list
|
||||
}
|
||||
|
||||
variable "bastion_allowed_ports" {
|
||||
type = list
|
||||
}
|
||||
|
||||
variable "use_access_ip" {}
|
||||
|
||||
variable "master_server_group_policy" {
|
||||
@ -185,3 +228,7 @@ variable "port_security_enabled" {
|
||||
variable "force_null_port_security" {
|
||||
type = bool
|
||||
}
|
||||
|
||||
variable "private_subnet_id" {
|
||||
type = string
|
||||
}
|
||||
|
@ -4,5 +4,5 @@ terraform {
|
||||
source = "terraform-provider-openstack/openstack"
|
||||
}
|
||||
}
|
||||
required_version = ">= 0.12.26"
|
||||
required_version = ">= 1.3.0"
|
||||
}
|
||||
|
@ -44,4 +44,3 @@ resource "openstack_networking_floatingip_v2" "k8s_nodes" {
|
||||
pool = var.floatingip_pool
|
||||
depends_on = [null_resource.dummy_dependency]
|
||||
}
|
||||
|
||||
|
@ -257,6 +257,12 @@ variable "worker_allowed_ports" {
|
||||
]
|
||||
}
|
||||
|
||||
variable "bastion_allowed_ports" {
|
||||
type = list(any)
|
||||
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "use_access_ip" {
|
||||
default = 1
|
||||
}
|
||||
@ -294,6 +300,13 @@ variable "k8s_nodes" {
|
||||
default = {}
|
||||
}
|
||||
|
||||
variable "additional_server_groups" {
|
||||
default = {}
|
||||
type = map(object({
|
||||
policy = string
|
||||
}))
|
||||
}
|
||||
|
||||
variable "extra_sec_groups" {
|
||||
default = false
|
||||
}
|
||||
|
@ -5,5 +5,5 @@ terraform {
|
||||
version = "~> 1.17"
|
||||
}
|
||||
}
|
||||
required_version = ">= 0.12.26"
|
||||
required_version = ">= 1.3.0"
|
||||
}
|
||||
|
@ -194,9 +194,19 @@ def parse_bool(string_form):
|
||||
else:
|
||||
raise ValueError('could not convert %r to a bool' % string_form)
|
||||
|
||||
def sanitize_groups(groups):
|
||||
_groups = []
|
||||
chars_to_replace = ['+', '-', '=', '.', '/', ' ']
|
||||
for i in groups:
|
||||
_i = i
|
||||
for char in chars_to_replace:
|
||||
_i = _i.replace(char, '_')
|
||||
_groups.append(_i)
|
||||
groups.clear()
|
||||
groups.extend(_groups)
|
||||
|
||||
@parses('metal_device')
|
||||
def metal_device(resource, tfvars=None):
|
||||
@parses('equinix_metal_device')
|
||||
def equinix_metal_device(resource, tfvars=None):
|
||||
raw_attrs = resource['primary']['attributes']
|
||||
name = raw_attrs['hostname']
|
||||
groups = []
|
||||
@ -220,7 +230,7 @@ def metal_device(resource, tfvars=None):
|
||||
'ipv6_address': raw_attrs['network.1.address'],
|
||||
'public_ipv6': raw_attrs['network.1.address'],
|
||||
'private_ipv4': raw_attrs['network.2.address'],
|
||||
'provider': 'metal',
|
||||
'provider': 'equinix',
|
||||
}
|
||||
|
||||
if raw_attrs['operating_system'] == 'flatcar_stable':
|
||||
@ -228,13 +238,14 @@ def metal_device(resource, tfvars=None):
|
||||
attrs.update({'ansible_ssh_user': 'core'})
|
||||
|
||||
# add groups based on attrs
|
||||
groups.append('metal_operating_system=' + attrs['operating_system'])
|
||||
groups.append('metal_locked=%s' % attrs['locked'])
|
||||
groups.append('metal_state=' + attrs['state'])
|
||||
groups.append('metal_plan=' + attrs['plan'])
|
||||
groups.append('equinix_metal_operating_system_%s' % attrs['operating_system'])
|
||||
groups.append('equinix_metal_locked_%s' % attrs['locked'])
|
||||
groups.append('equinix_metal_state_%s' % attrs['state'])
|
||||
groups.append('equinix_metal_plan_%s' % attrs['plan'])
|
||||
|
||||
# groups specific to kubespray
|
||||
groups = groups + attrs['tags']
|
||||
sanitize_groups(groups)
|
||||
|
||||
return name, attrs, groups
|
||||
|
||||
@ -273,8 +284,6 @@ def openstack_host(resource, module_name):
|
||||
'network': parse_attr_list(raw_attrs, 'network'),
|
||||
'region': raw_attrs.get('region', ''),
|
||||
'security_groups': parse_list(raw_attrs, 'security_groups'),
|
||||
# ansible
|
||||
'ansible_ssh_port': 22,
|
||||
# workaround for an OpenStack bug where hosts have a different domain
|
||||
# after they're restarted
|
||||
'host_domain': 'novalocal',
|
||||
@ -289,6 +298,9 @@ def openstack_host(resource, module_name):
|
||||
if 'floating_ip' in raw_attrs:
|
||||
attrs['private_ipv4'] = raw_attrs['network.0.fixed_ip_v4']
|
||||
|
||||
if 'metadata.use_access_ip' in raw_attrs and raw_attrs['metadata.use_access_ip'] == "0":
|
||||
attrs.pop('access_ip')
|
||||
|
||||
try:
|
||||
if 'metadata.prefer_ipv6' in raw_attrs and raw_attrs['metadata.prefer_ipv6'] == "1":
|
||||
attrs.update({
|
||||
@ -307,7 +319,9 @@ def openstack_host(resource, module_name):
|
||||
|
||||
# attrs specific to Ansible
|
||||
if 'metadata.ssh_user' in raw_attrs:
|
||||
attrs['ansible_ssh_user'] = raw_attrs['metadata.ssh_user']
|
||||
attrs['ansible_user'] = raw_attrs['metadata.ssh_user']
|
||||
if 'metadata.ssh_port' in raw_attrs:
|
||||
attrs['ansible_port'] = raw_attrs['metadata.ssh_port']
|
||||
|
||||
if 'volume.#' in list(raw_attrs.keys()) and int(raw_attrs['volume.#']) > 0:
|
||||
device_index = 1
|
||||
@ -334,6 +348,8 @@ def openstack_host(resource, module_name):
|
||||
for group in attrs['metadata'].get('kubespray_groups', "").split(","):
|
||||
groups.append(group)
|
||||
|
||||
sanitize_groups(groups)
|
||||
|
||||
return name, attrs, groups
|
||||
|
||||
|
||||
|
@ -136,4 +136,8 @@ terraform destroy --var-file cluster-settings.tfvars \
|
||||
* `loadbalancer_plan`: Plan to use for load balancer *(development|production-small)*
|
||||
* `loadbalancers`: Ports to load balance and which machines to forward to. Key of this object will be used as the name of the load balancer frontends/backends
|
||||
* `port`: Port to load balance.
|
||||
* `target_port`: Port to the backend servers.
|
||||
* `backend_servers`: List of servers that traffic to the port should be forwarded to.
|
||||
* `server_groups`: Group servers together
|
||||
* `servers`: The servers that should be included in the group.
|
||||
* `anti_affinity`: If anti-affinity should be enabled, try to spread the VMs out on separate nodes.
|
||||
|
@ -121,6 +121,7 @@ loadbalancer_plan = "development"
|
||||
loadbalancers = {
|
||||
# "http" : {
|
||||
# "port" : 80,
|
||||
# "target_port" : 80,
|
||||
# "backend_servers" : [
|
||||
# "worker-0",
|
||||
# "worker-1",
|
||||
@ -128,3 +129,20 @@ loadbalancers = {
|
||||
# ]
|
||||
# }
|
||||
}
|
||||
|
||||
server_groups = {
|
||||
# "control-plane" = {
|
||||
# servers = [
|
||||
# "master-0"
|
||||
# ]
|
||||
# anti_affinity = true
|
||||
# },
|
||||
# "workers" = {
|
||||
# servers = [
|
||||
# "worker-0",
|
||||
# "worker-1",
|
||||
# "worker-2"
|
||||
# ]
|
||||
# anti_affinity = true
|
||||
# }
|
||||
}
|
@ -34,6 +34,8 @@ module "kubernetes" {
|
||||
loadbalancer_enabled = var.loadbalancer_enabled
|
||||
loadbalancer_plan = var.loadbalancer_plan
|
||||
loadbalancers = var.loadbalancers
|
||||
|
||||
server_groups = var.server_groups
|
||||
}
|
||||
|
||||
#
|
||||
|
@ -13,7 +13,7 @@ locals {
|
||||
lb_backend_servers = flatten([
|
||||
for lb_name, loadbalancer in var.loadbalancers : [
|
||||
for backend_server in loadbalancer.backend_servers : {
|
||||
port = loadbalancer.port
|
||||
port = loadbalancer.target_port
|
||||
lb_name = lb_name
|
||||
server_name = backend_server
|
||||
}
|
||||
@ -251,8 +251,8 @@ resource "upcloud_firewall_rules" "master" {
|
||||
content {
|
||||
action = "accept"
|
||||
comment = "UpCloud DNS"
|
||||
destination_port_end = "53"
|
||||
destination_port_start = "53"
|
||||
source_port_end = "53"
|
||||
source_port_start = "53"
|
||||
direction = "in"
|
||||
family = "IPv4"
|
||||
protocol = firewall_rule.value
|
||||
@ -267,8 +267,8 @@ resource "upcloud_firewall_rules" "master" {
|
||||
content {
|
||||
action = "accept"
|
||||
comment = "UpCloud DNS"
|
||||
destination_port_end = "53"
|
||||
destination_port_start = "53"
|
||||
source_port_end = "53"
|
||||
source_port_start = "53"
|
||||
direction = "in"
|
||||
family = "IPv4"
|
||||
protocol = firewall_rule.value
|
||||
@ -283,8 +283,8 @@ resource "upcloud_firewall_rules" "master" {
|
||||
content {
|
||||
action = "accept"
|
||||
comment = "UpCloud DNS"
|
||||
destination_port_end = "53"
|
||||
destination_port_start = "53"
|
||||
source_port_end = "53"
|
||||
source_port_start = "53"
|
||||
direction = "in"
|
||||
family = "IPv6"
|
||||
protocol = firewall_rule.value
|
||||
@ -299,8 +299,8 @@ resource "upcloud_firewall_rules" "master" {
|
||||
content {
|
||||
action = "accept"
|
||||
comment = "UpCloud DNS"
|
||||
destination_port_end = "53"
|
||||
destination_port_start = "53"
|
||||
source_port_end = "53"
|
||||
source_port_start = "53"
|
||||
direction = "in"
|
||||
family = "IPv6"
|
||||
protocol = firewall_rule.value
|
||||
@ -315,8 +315,8 @@ resource "upcloud_firewall_rules" "master" {
|
||||
content {
|
||||
action = "accept"
|
||||
comment = "NTP Port"
|
||||
destination_port_end = "123"
|
||||
destination_port_start = "123"
|
||||
source_port_end = "123"
|
||||
source_port_start = "123"
|
||||
direction = "in"
|
||||
family = "IPv4"
|
||||
protocol = firewall_rule.value
|
||||
@ -325,6 +325,20 @@ resource "upcloud_firewall_rules" "master" {
|
||||
}
|
||||
}
|
||||
|
||||
dynamic firewall_rule {
|
||||
for_each = var.firewall_default_deny_in ? ["udp"] : []
|
||||
|
||||
content {
|
||||
action = "accept"
|
||||
comment = "NTP Port"
|
||||
source_port_end = "123"
|
||||
source_port_start = "123"
|
||||
direction = "in"
|
||||
family = "IPv6"
|
||||
protocol = firewall_rule.value
|
||||
}
|
||||
}
|
||||
|
||||
firewall_rule {
|
||||
action = var.firewall_default_deny_in ? "drop" : "accept"
|
||||
direction = "in"
|
||||
@ -394,8 +408,8 @@ resource "upcloud_firewall_rules" "k8s" {
|
||||
content {
|
||||
action = "accept"
|
||||
comment = "UpCloud DNS"
|
||||
destination_port_end = "53"
|
||||
destination_port_start = "53"
|
||||
source_port_end = "53"
|
||||
source_port_start = "53"
|
||||
direction = "in"
|
||||
family = "IPv4"
|
||||
protocol = firewall_rule.value
|
||||
@ -410,8 +424,8 @@ resource "upcloud_firewall_rules" "k8s" {
|
||||
content {
|
||||
action = "accept"
|
||||
comment = "UpCloud DNS"
|
||||
destination_port_end = "53"
|
||||
destination_port_start = "53"
|
||||
source_port_end = "53"
|
||||
source_port_start = "53"
|
||||
direction = "in"
|
||||
family = "IPv4"
|
||||
protocol = firewall_rule.value
|
||||
@ -426,8 +440,8 @@ resource "upcloud_firewall_rules" "k8s" {
|
||||
content {
|
||||
action = "accept"
|
||||
comment = "UpCloud DNS"
|
||||
destination_port_end = "53"
|
||||
destination_port_start = "53"
|
||||
source_port_end = "53"
|
||||
source_port_start = "53"
|
||||
direction = "in"
|
||||
family = "IPv6"
|
||||
protocol = firewall_rule.value
|
||||
@ -442,8 +456,8 @@ resource "upcloud_firewall_rules" "k8s" {
|
||||
content {
|
||||
action = "accept"
|
||||
comment = "UpCloud DNS"
|
||||
destination_port_end = "53"
|
||||
destination_port_start = "53"
|
||||
source_port_end = "53"
|
||||
source_port_start = "53"
|
||||
direction = "in"
|
||||
family = "IPv6"
|
||||
protocol = firewall_rule.value
|
||||
@ -458,8 +472,8 @@ resource "upcloud_firewall_rules" "k8s" {
|
||||
content {
|
||||
action = "accept"
|
||||
comment = "NTP Port"
|
||||
destination_port_end = "123"
|
||||
destination_port_start = "123"
|
||||
source_port_end = "123"
|
||||
source_port_start = "123"
|
||||
direction = "in"
|
||||
family = "IPv4"
|
||||
protocol = firewall_rule.value
|
||||
@ -468,6 +482,20 @@ resource "upcloud_firewall_rules" "k8s" {
|
||||
}
|
||||
}
|
||||
|
||||
dynamic firewall_rule {
|
||||
for_each = var.firewall_default_deny_in ? ["udp"] : []
|
||||
|
||||
content {
|
||||
action = "accept"
|
||||
comment = "NTP Port"
|
||||
source_port_end = "123"
|
||||
source_port_start = "123"
|
||||
direction = "in"
|
||||
family = "IPv6"
|
||||
protocol = firewall_rule.value
|
||||
}
|
||||
}
|
||||
|
||||
firewall_rule {
|
||||
action = var.firewall_default_deny_in ? "drop" : "accept"
|
||||
direction = "in"
|
||||
@ -520,3 +548,11 @@ resource "upcloud_loadbalancer_static_backend_member" "lb_backend_member" {
|
||||
max_sessions = var.loadbalancer_plan == "production-small" ? 50000 : 1000
|
||||
enabled = true
|
||||
}
|
||||
|
||||
resource "upcloud_server_group" "server_groups" {
|
||||
for_each = var.server_groups
|
||||
title = each.key
|
||||
anti_affinity = each.value.anti_affinity
|
||||
labels = {}
|
||||
members = [for server in each.value.servers : merge(upcloud_server.master, upcloud_server.worker)[server].id]
|
||||
}
|
@ -90,6 +90,16 @@ variable "loadbalancers" {
|
||||
|
||||
type = map(object({
|
||||
port = number
|
||||
target_port = number
|
||||
backend_servers = list(string)
|
||||
}))
|
||||
}
|
||||
|
||||
variable "server_groups" {
|
||||
description = "Server groups"
|
||||
|
||||
type = map(object({
|
||||
anti_affinity = bool
|
||||
servers = list(string)
|
||||
}))
|
||||
}
|
@ -3,7 +3,7 @@ terraform {
|
||||
required_providers {
|
||||
upcloud = {
|
||||
source = "UpCloudLtd/upcloud"
|
||||
version = "~>2.5.0"
|
||||
version = "~>2.7.1"
|
||||
}
|
||||
}
|
||||
required_version = ">= 0.13"
|
||||
|
@ -122,6 +122,7 @@ loadbalancer_plan = "development"
|
||||
loadbalancers = {
|
||||
# "http" : {
|
||||
# "port" : 80,
|
||||
# "target_port" : 80,
|
||||
# "backend_servers" : [
|
||||
# "worker-0",
|
||||
# "worker-1",
|
||||
@ -129,3 +130,20 @@ loadbalancers = {
|
||||
# ]
|
||||
# }
|
||||
}
|
||||
|
||||
server_groups = {
|
||||
# "control-plane" = {
|
||||
# servers = [
|
||||
# "master-0"
|
||||
# ]
|
||||
# anti_affinity = true
|
||||
# },
|
||||
# "workers" = {
|
||||
# servers = [
|
||||
# "worker-0",
|
||||
# "worker-1",
|
||||
# "worker-2"
|
||||
# ]
|
||||
# anti_affinity = true
|
||||
# }
|
||||
}
|
@ -126,7 +126,19 @@ variable "loadbalancers" {
|
||||
|
||||
type = map(object({
|
||||
port = number
|
||||
target_port = number
|
||||
backend_servers = list(string)
|
||||
}))
|
||||
default = {}
|
||||
}
|
||||
|
||||
variable "server_groups" {
|
||||
description = "Server groups"
|
||||
|
||||
type = map(object({
|
||||
anti_affinity = bool
|
||||
servers = list(string)
|
||||
}))
|
||||
|
||||
default = {}
|
||||
}
|
||||
|
@ -3,7 +3,7 @@ terraform {
|
||||
required_providers {
|
||||
upcloud = {
|
||||
source = "UpCloudLtd/upcloud"
|
||||
version = "~>2.5.0"
|
||||
version = "~>2.7.1"
|
||||
}
|
||||
}
|
||||
required_version = ">= 0.13"
|
||||
|
@ -35,9 +35,7 @@ This setup assumes that the DHCP is disabled in the vSphere cluster and IP addre
|
||||
|
||||
## Requirements
|
||||
|
||||
* Terraform 0.13.0 or newer
|
||||
|
||||
*0.12 also works if you modify the provider block to include version and remove all `versions.tf` files*
|
||||
* Terraform 0.13.0 or newer (0.12 also works if you modify the provider block to include version and remove all `versions.tf` files)
|
||||
|
||||
## Quickstart
|
||||
|
||||
|
@ -23,7 +23,9 @@ variable "vsphere_datastore" {}
|
||||
|
||||
variable "vsphere_user" {}
|
||||
|
||||
variable "vsphere_password" {}
|
||||
variable "vsphere_password" {
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
variable "vsphere_server" {}
|
||||
|
||||
|
@ -4,12 +4,6 @@ terraform {
|
||||
source = "hashicorp/vsphere"
|
||||
version = ">= 1.24.3"
|
||||
}
|
||||
null = {
|
||||
source = "hashicorp/null"
|
||||
}
|
||||
template = {
|
||||
source = "hashicorp/template"
|
||||
}
|
||||
}
|
||||
required_version = ">= 0.13"
|
||||
}
|
||||
|
@ -37,6 +37,8 @@
|
||||
* [CentOS/OracleLinux/AlmaLinux/Rocky Linux](docs/centos.md)
|
||||
* [Kylin Linux Advanced Server V10](docs/kylinlinux.md)
|
||||
* [Amazon Linux 2](docs/amazonlinux.md)
|
||||
* [UOS Linux](docs/uoslinux.md)
|
||||
* [openEuler notes](docs/openeuler.md)
|
||||
* CRI
|
||||
* [Containerd](docs/containerd.md)
|
||||
* [Docker](docs/docker.md)
|
||||
|
@ -5,7 +5,7 @@ Amazon Linux is supported with docker,containerd and cri-o runtimes.
|
||||
**Note:** that Amazon Linux is not currently covered in kubespray CI and
|
||||
support for it is currently considered experimental.
|
||||
|
||||
Amazon Linux 2, while derrived from the Redhat OS family, does not keep in
|
||||
Amazon Linux 2, while derived from the Redhat OS family, does not keep in
|
||||
sync with RHEL upstream like CentOS/AlmaLinux/Oracle Linux. In order to use
|
||||
Amazon Linux as the ansible host for your kubespray deployments you need to
|
||||
manually install `python3` and deploy ansible and kubespray dependencies in
|
||||
|
260
docs/ansible.md
260
docs/ansible.md
@ -3,7 +3,7 @@
|
||||
## Installing Ansible
|
||||
|
||||
Kubespray supports multiple ansible versions and ships different `requirements.txt` files for them.
|
||||
Depending on your available python version you may be limited in chooding which ansible version to use.
|
||||
Depending on your available python version you may be limited in choosing which ansible version to use.
|
||||
|
||||
It is recommended to deploy the ansible version used by kubespray into a python virtual environment.
|
||||
|
||||
@ -15,9 +15,6 @@ virtualenv --python=$(which python3) $VENVDIR
|
||||
source $VENVDIR/bin/activate
|
||||
cd $KUBESPRAYDIR
|
||||
pip install -U -r requirements-$ANSIBLE_VERSION.txt
|
||||
test -f requirements-$ANSIBLE_VERSION.yml && \
|
||||
ansible-galaxy role install -r requirements-$ANSIBLE_VERSION.yml && \
|
||||
ansible-galaxy collection -r requirements-$ANSIBLE_VERSION.yml
|
||||
```
|
||||
|
||||
### Ansible Python Compatibility
|
||||
@ -25,7 +22,7 @@ test -f requirements-$ANSIBLE_VERSION.yml && \
|
||||
Based on the table below and the available python version for your ansible host you should choose the appropriate ansible version to use with kubespray.
|
||||
|
||||
| Ansible Version | Python Version |
|
||||
| --------------- | -------------- |
|
||||
|-----------------|----------------|
|
||||
| 2.11 | 2.7,3.5-3.9 |
|
||||
| 2.12 | 3.8-3.10 |
|
||||
|
||||
@ -104,135 +101,134 @@ the `-e` runtime flags (most simple way) or other layers described in the docs.
|
||||
Kubespray uses only a few layers to override things (or expect them to
|
||||
be overridden for roles):
|
||||
|
||||
Layer | Comment
|
||||
------|--------
|
||||
**role defaults** | provides best UX to override things for Kubespray deployments
|
||||
inventory vars | Unused
|
||||
**inventory group_vars** | Expects users to use ``all.yml``,``k8s_cluster.yml`` etc. to override things
|
||||
inventory host_vars | Unused
|
||||
playbook group_vars | Unused
|
||||
playbook host_vars | Unused
|
||||
**host facts** | Kubespray overrides for internal roles' logic, like state flags
|
||||
play vars | Unused
|
||||
play vars_prompt | Unused
|
||||
play vars_files | Unused
|
||||
registered vars | Unused
|
||||
set_facts | Kubespray overrides those, for some places
|
||||
**role and include vars** | Provides bad UX to override things! Use extra vars to enforce
|
||||
block vars (only for tasks in block) | Kubespray overrides for internal roles' logic
|
||||
task vars (only for the task) | Unused for roles, but only for helper scripts
|
||||
**extra vars** (always win precedence) | override with ``ansible-playbook -e @foo.yml``
|
||||
| Layer | Comment |
|
||||
|----------------------------------------|------------------------------------------------------------------------------|
|
||||
| **role defaults** | provides best UX to override things for Kubespray deployments |
|
||||
| inventory vars | Unused |
|
||||
| **inventory group_vars** | Expects users to use ``all.yml``,``k8s_cluster.yml`` etc. to override things |
|
||||
| inventory host_vars | Unused |
|
||||
| playbook group_vars | Unused |
|
||||
| playbook host_vars | Unused |
|
||||
| **host facts** | Kubespray overrides for internal roles' logic, like state flags |
|
||||
| play vars | Unused |
|
||||
| play vars_prompt | Unused |
|
||||
| play vars_files | Unused |
|
||||
| registered vars | Unused |
|
||||
| set_facts | Kubespray overrides those, for some places |
|
||||
| **role and include vars** | Provides bad UX to override things! Use extra vars to enforce |
|
||||
| block vars (only for tasks in block) | Kubespray overrides for internal roles' logic |
|
||||
| task vars (only for the task) | Unused for roles, but only for helper scripts |
|
||||
| **extra vars** (always win precedence) | override with ``ansible-playbook -e @foo.yml`` |
|
||||
|
||||
## Ansible tags
|
||||
|
||||
The following tags are defined in playbooks:
|
||||
|
||||
| Tag name | Used for
|
||||
|--------------------------------|---------
|
||||
| annotate | Create kube-router annotation
|
||||
| apps | K8s apps definitions
|
||||
| asserts | Check tasks for download role
|
||||
| aws-ebs-csi-driver | Configuring csi driver: aws-ebs
|
||||
| azure-csi-driver | Configuring csi driver: azure
|
||||
| bastion | Setup ssh config for bastion
|
||||
| bootstrap-os | Anything related to host OS configuration
|
||||
| calico | Network plugin Calico
|
||||
| calico_rr | Configuring Calico route reflector
|
||||
| canal | Network plugin Canal
|
||||
| cephfs-provisioner | Configuring CephFS
|
||||
| cert-manager | Configuring certificate manager for K8s
|
||||
| cilium | Network plugin Cilium
|
||||
| cinder-csi-driver | Configuring csi driver: cinder
|
||||
| client | Kubernetes clients role
|
||||
| cloud-provider | Cloud-provider related tasks
|
||||
| cluster-roles | Configuring cluster wide application (psp ...)
|
||||
| cni | CNI plugins for Network Plugins
|
||||
| containerd | Configuring containerd engine runtime for hosts
|
||||
| container_engine_accelerator | Enable nvidia accelerator for runtimes
|
||||
| container-engine | Configuring container engines
|
||||
| container-runtimes | Configuring container runtimes
|
||||
| coredns | Configuring coredns deployment
|
||||
| crio | Configuring crio container engine for hosts
|
||||
| crun | Configuring crun runtime
|
||||
| csi-driver | Configuring csi driver
|
||||
| dashboard | Installing and configuring the Kubernetes Dashboard
|
||||
| dns | Remove dns entries when resetting
|
||||
| docker | Configuring docker engine runtime for hosts
|
||||
| download | Fetching container images to a delegate host
|
||||
| etcd | Configuring etcd cluster
|
||||
| etcd-secrets | Configuring etcd certs/keys
|
||||
| etchosts | Configuring /etc/hosts entries for hosts
|
||||
| external-cloud-controller | Configure cloud controllers
|
||||
| external-openstack | Cloud controller : openstack
|
||||
| external-provisioner | Configure external provisioners
|
||||
| external-vsphere | Cloud controller : vsphere
|
||||
| facts | Gathering facts and misc check results
|
||||
| files | Remove files when resetting
|
||||
| flannel | Network plugin flannel
|
||||
| gce | Cloud-provider GCP
|
||||
| gcp-pd-csi-driver | Configuring csi driver: gcp-pd
|
||||
| gvisor | Configuring gvisor runtime
|
||||
| helm | Installing and configuring Helm
|
||||
| ingress-controller | Configure ingress controllers
|
||||
| ingress_alb | AWS ALB Ingress Controller
|
||||
| init | Windows kubernetes init nodes
|
||||
| iptables | Flush and clear iptable when resetting
|
||||
| k8s-pre-upgrade | Upgrading K8s cluster
|
||||
| k8s-secrets | Configuring K8s certs/keys
|
||||
| k8s-gen-tokens | Configuring K8s tokens
|
||||
| kata-containers | Configuring kata-containers runtime
|
||||
| krew | Install and manage krew
|
||||
| kubeadm | Roles linked to kubeadm tasks
|
||||
| kube-apiserver | Configuring static pod kube-apiserver
|
||||
| kube-controller-manager | Configuring static pod kube-controller-manager
|
||||
| kube-vip | Installing and configuring kube-vip
|
||||
| kubectl | Installing kubectl and bash completion
|
||||
| kubelet | Configuring kubelet service
|
||||
| kube-ovn | Network plugin kube-ovn
|
||||
| kube-router | Network plugin kube-router
|
||||
| kube-proxy | Configuring static pod kube-proxy
|
||||
| localhost | Special steps for the localhost (ansible runner)
|
||||
| local-path-provisioner | Configure External provisioner: local-path
|
||||
| local-volume-provisioner | Configure External provisioner: local-volume
|
||||
| macvlan | Network plugin macvlan
|
||||
| master | Configuring K8s master node role
|
||||
| metallb | Installing and configuring metallb
|
||||
| metrics_server | Configuring metrics_server
|
||||
| netchecker | Installing netchecker K8s app
|
||||
| network | Configuring networking plugins for K8s
|
||||
| mounts | Umount kubelet dirs when reseting
|
||||
| multus | Network plugin multus
|
||||
| nginx | Configuring LB for kube-apiserver instances
|
||||
| node | Configuring K8s minion (compute) node role
|
||||
| nodelocaldns | Configuring nodelocaldns daemonset
|
||||
| node-label | Tasks linked to labeling of nodes
|
||||
| node-webhook | Tasks linked to webhook (grating access to resources)
|
||||
| nvidia_gpu | Enable nvidia accelerator for runtimes
|
||||
| oci | Cloud provider: oci
|
||||
| persistent_volumes | Configure csi volumes
|
||||
| persistent_volumes_aws_ebs_csi | Configuring csi driver: aws-ebs
|
||||
| persistent_volumes_cinder_csi | Configuring csi driver: cinder
|
||||
| persistent_volumes_gcp_pd_csi | Configuring csi driver: gcp-pd
|
||||
| persistent_volumes_openstack | Configuring csi driver: openstack
|
||||
| policy-controller | Configuring Calico policy controller
|
||||
| post-remove | Tasks running post-remove operation
|
||||
| post-upgrade | Tasks running post-upgrade operation
|
||||
| pre-remove | Tasks running pre-remove operation
|
||||
| pre-upgrade | Tasks running pre-upgrade operation
|
||||
| preinstall | Preliminary configuration steps
|
||||
| registry | Configuring local docker registry
|
||||
| reset | Tasks running doing the node reset
|
||||
| resolvconf | Configuring /etc/resolv.conf for hosts/apps
|
||||
| rbd-provisioner | Configure External provisioner: rdb
|
||||
| services | Remove services (etcd, kubelet etc...) when resetting
|
||||
| snapshot | Enabling csi snapshot
|
||||
| snapshot-controller | Configuring csi snapshot controller
|
||||
| upgrade | Upgrading, f.e. container images/binaries
|
||||
| upload | Distributing images/binaries across hosts
|
||||
| vsphere-csi-driver | Configuring csi driver: vsphere
|
||||
| weave | Network plugin Weave
|
||||
| win_nodes | Running windows specific tasks
|
||||
| youki | Configuring youki runtime
|
||||
| Tag name | Used for |
|
||||
|--------------------------------|-------------------------------------------------------|
|
||||
| annotate | Create kube-router annotation |
|
||||
| apps | K8s apps definitions |
|
||||
| asserts | Check tasks for download role |
|
||||
| aws-ebs-csi-driver | Configuring csi driver: aws-ebs |
|
||||
| azure-csi-driver | Configuring csi driver: azure |
|
||||
| bastion | Setup ssh config for bastion |
|
||||
| bootstrap-os | Anything related to host OS configuration |
|
||||
| calico | Network plugin Calico |
|
||||
| calico_rr | Configuring Calico route reflector |
|
||||
| cephfs-provisioner | Configuring CephFS |
|
||||
| cert-manager | Configuring certificate manager for K8s |
|
||||
| cilium | Network plugin Cilium |
|
||||
| cinder-csi-driver | Configuring csi driver: cinder |
|
||||
| client | Kubernetes clients role |
|
||||
| cloud-provider | Cloud-provider related tasks |
|
||||
| cluster-roles | Configuring cluster wide application (psp ...) |
|
||||
| cni | CNI plugins for Network Plugins |
|
||||
| containerd | Configuring containerd engine runtime for hosts |
|
||||
| container_engine_accelerator | Enable nvidia accelerator for runtimes |
|
||||
| container-engine | Configuring container engines |
|
||||
| container-runtimes | Configuring container runtimes |
|
||||
| coredns | Configuring coredns deployment |
|
||||
| crio | Configuring crio container engine for hosts |
|
||||
| crun | Configuring crun runtime |
|
||||
| csi-driver | Configuring csi driver |
|
||||
| dashboard | Installing and configuring the Kubernetes Dashboard |
|
||||
| dns | Remove dns entries when resetting |
|
||||
| docker | Configuring docker engine runtime for hosts |
|
||||
| download | Fetching container images to a delegate host |
|
||||
| etcd | Configuring etcd cluster |
|
||||
| etcd-secrets | Configuring etcd certs/keys |
|
||||
| etchosts | Configuring /etc/hosts entries for hosts |
|
||||
| external-cloud-controller | Configure cloud controllers |
|
||||
| external-openstack | Cloud controller : openstack |
|
||||
| external-provisioner | Configure external provisioners |
|
||||
| external-vsphere | Cloud controller : vsphere |
|
||||
| facts | Gathering facts and misc check results |
|
||||
| files | Remove files when resetting |
|
||||
| flannel | Network plugin flannel |
|
||||
| gce | Cloud-provider GCP |
|
||||
| gcp-pd-csi-driver | Configuring csi driver: gcp-pd |
|
||||
| gvisor | Configuring gvisor runtime |
|
||||
| helm | Installing and configuring Helm |
|
||||
| ingress-controller | Configure ingress controllers |
|
||||
| ingress_alb | AWS ALB Ingress Controller |
|
||||
| init | Windows kubernetes init nodes |
|
||||
| iptables | Flush and clear iptable when resetting |
|
||||
| k8s-pre-upgrade | Upgrading K8s cluster |
|
||||
| k8s-secrets | Configuring K8s certs/keys |
|
||||
| k8s-gen-tokens | Configuring K8s tokens |
|
||||
| kata-containers | Configuring kata-containers runtime |
|
||||
| krew | Install and manage krew |
|
||||
| kubeadm | Roles linked to kubeadm tasks |
|
||||
| kube-apiserver | Configuring static pod kube-apiserver |
|
||||
| kube-controller-manager | Configuring static pod kube-controller-manager |
|
||||
| kube-vip | Installing and configuring kube-vip |
|
||||
| kubectl | Installing kubectl and bash completion |
|
||||
| kubelet | Configuring kubelet service |
|
||||
| kube-ovn | Network plugin kube-ovn |
|
||||
| kube-router | Network plugin kube-router |
|
||||
| kube-proxy | Configuring static pod kube-proxy |
|
||||
| localhost | Special steps for the localhost (ansible runner) |
|
||||
| local-path-provisioner | Configure External provisioner: local-path |
|
||||
| local-volume-provisioner | Configure External provisioner: local-volume |
|
||||
| macvlan | Network plugin macvlan |
|
||||
| master | Configuring K8s master node role |
|
||||
| metallb | Installing and configuring metallb |
|
||||
| metrics_server | Configuring metrics_server |
|
||||
| netchecker | Installing netchecker K8s app |
|
||||
| network | Configuring networking plugins for K8s |
|
||||
| mounts | Umount kubelet dirs when reseting |
|
||||
| multus | Network plugin multus |
|
||||
| nginx | Configuring LB for kube-apiserver instances |
|
||||
| node | Configuring K8s minion (compute) node role |
|
||||
| nodelocaldns | Configuring nodelocaldns daemonset |
|
||||
| node-label | Tasks linked to labeling of nodes |
|
||||
| node-webhook | Tasks linked to webhook (grating access to resources) |
|
||||
| nvidia_gpu | Enable nvidia accelerator for runtimes |
|
||||
| oci | Cloud provider: oci |
|
||||
| persistent_volumes | Configure csi volumes |
|
||||
| persistent_volumes_aws_ebs_csi | Configuring csi driver: aws-ebs |
|
||||
| persistent_volumes_cinder_csi | Configuring csi driver: cinder |
|
||||
| persistent_volumes_gcp_pd_csi | Configuring csi driver: gcp-pd |
|
||||
| persistent_volumes_openstack | Configuring csi driver: openstack |
|
||||
| policy-controller | Configuring Calico policy controller |
|
||||
| post-remove | Tasks running post-remove operation |
|
||||
| post-upgrade | Tasks running post-upgrade operation |
|
||||
| pre-remove | Tasks running pre-remove operation |
|
||||
| pre-upgrade | Tasks running pre-upgrade operation |
|
||||
| preinstall | Preliminary configuration steps |
|
||||
| registry | Configuring local docker registry |
|
||||
| reset | Tasks running doing the node reset |
|
||||
| resolvconf | Configuring /etc/resolv.conf for hosts/apps |
|
||||
| rbd-provisioner | Configure External provisioner: rdb |
|
||||
| services | Remove services (etcd, kubelet etc...) when resetting |
|
||||
| snapshot | Enabling csi snapshot |
|
||||
| snapshot-controller | Configuring csi snapshot controller |
|
||||
| upgrade | Upgrading, f.e. container images/binaries |
|
||||
| upload | Distributing images/binaries across hosts |
|
||||
| vsphere-csi-driver | Configuring csi driver: vsphere |
|
||||
| weave | Network plugin Weave |
|
||||
| win_nodes | Running windows specific tasks |
|
||||
| youki | Configuring youki runtime |
|
||||
|
||||
Note: Use the ``bash scripts/gen_tags.sh`` command to generate a list of all
|
||||
tags found in the codebase. New tags will be listed with the empty "Used for"
|
||||
@ -267,7 +263,7 @@ Note: use `--tags` and `--skip-tags` wise and only if you're 100% sure what you'
|
||||
## Bastion host
|
||||
|
||||
If you prefer to not make your nodes publicly accessible (nodes with private IPs only),
|
||||
you can use a so called *bastion* host to connect to your nodes. To specify and use a bastion,
|
||||
you can use a so-called _bastion_ host to connect to your nodes. To specify and use a bastion,
|
||||
simply add a line to your inventory, where you have to replace x.x.x.x with the public IP of the
|
||||
bastion host.
|
||||
|
||||
@ -281,7 +277,7 @@ For more information about Ansible and bastion hosts, read
|
||||
|
||||
## Mitogen
|
||||
|
||||
Mitogen support is deprecated, please see [mitogen related docs](/docs/mitogen.md) for useage and reasons for deprecation.
|
||||
Mitogen support is deprecated, please see [mitogen related docs](/docs/mitogen.md) for usage and reasons for deprecation.
|
||||
|
||||
## Beyond ansible 2.9
|
||||
|
||||
@ -290,7 +286,7 @@ two projects which are now joined under the Ansible umbrella.
|
||||
|
||||
Ansible-base (2.10.x branch) will contain just the ansible language implementation while
|
||||
ansible modules that were previously bundled into a single repository will be part of the
|
||||
ansible 3.x package. Pleasee see [this blog post](https://blog.while-true-do.io/ansible-release-3-0-0/)
|
||||
ansible 3.x package. Please see [this blog post](https://blog.while-true-do.io/ansible-release-3-0-0/)
|
||||
that explains in detail the need and the evolution plan.
|
||||
|
||||
**Note:** this change means that ansible virtual envs cannot be upgraded with `pip install -U`.
|
||||
|
38
docs/ansible_collection.md
Normal file
38
docs/ansible_collection.md
Normal file
@ -0,0 +1,38 @@
|
||||
# Ansible collection
|
||||
|
||||
Kubespray can be installed as an [Ansible collection](https://docs.ansible.com/ansible/latest/user_guide/collections_using.html).
|
||||
|
||||
## Requirements
|
||||
|
||||
- An inventory file with the appropriate host groups. See the [README](../README.md#usage).
|
||||
- A `group_vars` directory. These group variables **need** to match the appropriate variable names under `inventory/local/group_vars`. See the [README](../README.md#usage).
|
||||
|
||||
## Usage
|
||||
|
||||
1. Add Kubespray to your requirements.yml file
|
||||
|
||||
```yaml
|
||||
collections:
|
||||
- name: https://github.com/kubernetes-sigs/kubespray
|
||||
type: git
|
||||
version: v2.21.0
|
||||
```
|
||||
|
||||
2. Install your collection
|
||||
|
||||
```ShellSession
|
||||
ansible-galaxy install -r requirements.yml
|
||||
```
|
||||
|
||||
3. Create a playbook to install your Kubernetes cluster
|
||||
|
||||
```yaml
|
||||
- name: Install Kubernetes
|
||||
ansible.builtin.import_playbook: kubernetes_sigs.kubespray.cluster
|
||||
```
|
||||
|
||||
4. Update INVENTORY and PLAYBOOK so that they point to your inventory file and the playbook you created above, and then install Kubespray
|
||||
|
||||
```ShellSession
|
||||
ansible-playbook -i INVENTORY --become --become-user=root PLAYBOOK
|
||||
```
|
@ -7,7 +7,7 @@ The following table shows the impact of the CPU architecture on compatible featu
|
||||
- amd64 + arm64: Cluster with a mix of x86/amd64 and arm64 CPUs
|
||||
|
||||
| kube_network_plugin | amd64 | arm64 | amd64 + arm64 |
|
||||
| ------------------- | ----- | ----- | ------------- |
|
||||
|---------------------|-------|-------|---------------|
|
||||
| Calico | Y | Y | Y |
|
||||
| Weave | Y | Y | Y |
|
||||
| Flannel | Y | N | N |
|
||||
|
24
docs/aws.md
24
docs/aws.md
@ -67,15 +67,15 @@ export REGION="us-east-2"
|
||||
|
||||
Declare the cloud config variables for the `aws` provider as follows. Setting these variables are optional and depend on your use case.
|
||||
|
||||
Variable|Type|Comment
|
||||
---|---|---
|
||||
aws_zone|string|Force set the AWS zone. Recommended to leave blank.
|
||||
aws_vpc|string|The AWS VPC flag enables the possibility to run the master components on a different aws account, on a different cloud provider or on-premise. If the flag is set also the KubernetesClusterTag must be provided
|
||||
aws_subnet_id|string|SubnetID enables using a specific subnet to use for ELB's
|
||||
aws_route_table_id|string|RouteTableID enables using a specific RouteTable
|
||||
aws_role_arn|string|RoleARN is the IAM role to assume when interaction with AWS APIs
|
||||
aws_kubernetes_cluster_tag|string|KubernetesClusterTag is the legacy cluster id we'll use to identify our cluster resources
|
||||
aws_kubernetes_cluster_id|string|KubernetesClusterID is the cluster id we'll use to identify our cluster resources
|
||||
aws_disable_security_group_ingress|bool|The aws provider creates an inbound rule per load balancer on the node security group. However, this can run into the AWS security group rule limit of 50 if many LoadBalancers are created. This flag disables the automatic ingress creation. It requires that the user has setup a rule that allows inbound traffic on kubelet ports from the local VPC subnet (so load balancers can access it). E.g. 10.82.0.0/16 30000-32000.
|
||||
aws_elb_security_group|string|Only in Kubelet version >= 1.7 : AWS has a hard limit of 500 security groups. For large clusters creating a security group for each ELB can cause the max number of security groups to be reached. If this is set instead of creating a new Security group for each ELB this security group will be used instead.
|
||||
aws_disable_strict_zone_check|bool|During the instantiation of an new AWS cloud provider, the detected region is validated against a known set of regions. In a non-standard, AWS like environment (e.g. Eucalyptus), this check may be undesirable. Setting this to true will disable the check and provide a warning that the check was skipped. Please note that this is an experimental feature and work-in-progress for the moment.
|
||||
| Variable | Type | Comment |
|
||||
|------------------------------------|--------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| aws_zone | string | Force set the AWS zone. Recommended to leave blank. |
|
||||
| aws_vpc | string | The AWS VPC flag enables the possibility to run the master components on a different aws account, on a different cloud provider or on-premise. If the flag is set also the KubernetesClusterTag must be provided |
|
||||
| aws_subnet_id | string | SubnetID enables using a specific subnet to use for ELB's |
|
||||
| aws_route_table_id | string | RouteTableID enables using a specific RouteTable |
|
||||
| aws_role_arn | string | RoleARN is the IAM role to assume when interaction with AWS APIs |
|
||||
| aws_kubernetes_cluster_tag | string | KubernetesClusterTag is the legacy cluster id we'll use to identify our cluster resources |
|
||||
| aws_kubernetes_cluster_id | string | KubernetesClusterID is the cluster id we'll use to identify our cluster resources |
|
||||
| aws_disable_security_group_ingress | bool | The aws provider creates an inbound rule per load balancer on the node security group. However, this can run into the AWS security group rule limit of 50 if many LoadBalancers are created. This flag disables the automatic ingress creation. It requires that the user has setup a rule that allows inbound traffic on kubelet ports from the local VPC subnet (so load balancers can access it). E.g. 10.82.0.0/16 30000-32000. |
|
||||
| aws_elb_security_group | string | Only in Kubelet version >= 1.7 : AWS has a hard limit of 500 security groups. For large clusters creating a security group for each ELB can cause the max number of security groups to be reached. If this is set instead of creating a new Security group for each ELB this security group will be used instead. |
|
||||
| aws_disable_strict_zone_check | bool | During the instantiation of an new AWS cloud provider, the detected region is validated against a known set of regions. In a non-standard, AWS like environment (e.g. Eucalyptus), this check may be undesirable. Setting this to true will disable the check and provide a warning that the check was skipped. Please note that this is an experimental feature and work-in-progress for the moment. |
|
||||
|
@ -14,7 +14,7 @@ If you want to deploy the Azure Disk storage class to provision volumes dynamica
|
||||
|
||||
Before creating the instances you must first set the `azure_csi_` variables in the `group_vars/all.yml` file.
|
||||
|
||||
All of the values can be retrieved using the azure cli tool which can be downloaded here: <https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest>
|
||||
All values can be retrieved using the azure cli tool which can be downloaded here: <https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest>
|
||||
|
||||
After installation you have to run `az login` to get access to your account.
|
||||
|
||||
@ -34,7 +34,7 @@ The name of the resource group your instances are in, a list of your resource gr
|
||||
|
||||
Or you can do `az vm list | grep resourceGroup` and get the resource group corresponding to the VMs of your cluster.
|
||||
|
||||
The resource group name is not case sensitive.
|
||||
The resource group name is not case-sensitive.
|
||||
|
||||
### azure\_csi\_vnet\_name
|
||||
|
||||
@ -57,19 +57,28 @@ The name of the network security group your instances are in, can be retrieved v
|
||||
These will have to be generated first:
|
||||
|
||||
- Create an Azure AD Application with:
|
||||
`az ad app create --display-name kubespray --identifier-uris http://kubespray --homepage http://kubespray.com --password CLIENT_SECRET`
|
||||
|
||||
```ShellSession
|
||||
az ad app create --display-name kubespray --identifier-uris http://kubespray --homepage http://kubespray.com --password CLIENT_SECRET
|
||||
```
|
||||
|
||||
Display name, identifier-uri, homepage and the password can be chosen
|
||||
|
||||
Note the AppId in the output.
|
||||
|
||||
- Create Service principal for the application with:
|
||||
`az ad sp create --id AppId`
|
||||
|
||||
```ShellSession
|
||||
az ad sp create --id AppId
|
||||
```
|
||||
|
||||
This is the AppId from the last command
|
||||
|
||||
- Create the role assignment with:
|
||||
`az role assignment create --role "Owner" --assignee http://kubespray --subscription SUBSCRIPTION_ID`
|
||||
|
||||
```ShellSession
|
||||
az role assignment create --role "Owner" --assignee http://kubespray --subscription SUBSCRIPTION_ID
|
||||
```
|
||||
|
||||
azure\_csi\_aad\_client\_id must be set to the AppId, azure\_csi\_aad\_client\_secret is your chosen secret.
|
||||
|
||||
|
@ -10,7 +10,7 @@ Not all features are supported yet though, for a list of the current status have
|
||||
|
||||
Before creating the instances you must first set the `azure_` variables in the `group_vars/all/all.yml` file.
|
||||
|
||||
All of the values can be retrieved using the Azure CLI tool which can be downloaded here: <https://docs.microsoft.com/en-gb/cli/azure/install-azure-cli>
|
||||
All values can be retrieved using the Azure CLI tool which can be downloaded here: <https://docs.microsoft.com/en-gb/cli/azure/install-azure-cli>
|
||||
After installation you have to run `az login` to get access to your account.
|
||||
|
||||
### azure_cloud
|
||||
@ -71,14 +71,27 @@ The name of the resource group that contains the route table. Defaults to `azur
|
||||
These will have to be generated first:
|
||||
|
||||
- Create an Azure AD Application with:
|
||||
`az ad app create --display-name kubernetes --identifier-uris http://kubernetes --homepage http://example.com --password CLIENT_SECRET`
|
||||
|
||||
```ShellSession
|
||||
az ad app create --display-name kubernetes --identifier-uris http://kubernetes --homepage http://example.com --password CLIENT_SECRET
|
||||
```
|
||||
|
||||
display name, identifier-uri, homepage and the password can be chosen
|
||||
Note the AppId in the output.
|
||||
|
||||
- Create Service principal for the application with:
|
||||
`az ad sp create --id AppId`
|
||||
|
||||
```ShellSession
|
||||
az ad sp create --id AppId
|
||||
```
|
||||
|
||||
This is the AppId from the last command
|
||||
|
||||
- Create the role assignment with:
|
||||
`az role assignment create --role "Owner" --assignee http://kubernetes --subscription SUBSCRIPTION_ID`
|
||||
|
||||
```ShellSession
|
||||
az role assignment create --role "Owner" --assignee http://kubernetes --subscription SUBSCRIPTION_ID
|
||||
```
|
||||
|
||||
azure\_aad\_client\_id must be set to the AppId, azure\_aad\_client\_secret is your chosen secret.
|
||||
|
||||
|
@ -48,11 +48,13 @@ The `kubespray-defaults` role is expected to be run before this role.
|
||||
|
||||
Remember to disable fact gathering since Python might not be present on hosts.
|
||||
|
||||
- hosts: all
|
||||
```yaml
|
||||
- hosts: all
|
||||
gather_facts: false # not all hosts might be able to run modules yet
|
||||
roles:
|
||||
- kubespray-defaults
|
||||
- bootstrap-os
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
|
@ -72,9 +72,14 @@ calico_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112
|
||||
|
||||
In some cases you may want to route the pods subnet and so NAT is not needed on the nodes.
|
||||
For instance if you have a cluster spread on different locations and you want your pods to talk each other no matter where they are located.
|
||||
The following variables need to be set:
|
||||
`peer_with_router` to enable the peering with the datacenter's border router (default value: false).
|
||||
you'll need to edit the inventory and add a hostvar `local_as` by node.
|
||||
The following variables need to be set as follow:
|
||||
|
||||
```yml
|
||||
peer_with_router: true # enable the peering with the datacenter's border router (default value: false).
|
||||
nat_outgoing: false # (optional) NAT outgoing (default value: true).
|
||||
```
|
||||
|
||||
And you'll need to edit the inventory and add a hostvar `local_as` by node.
|
||||
|
||||
```ShellSession
|
||||
node1 ansible_ssh_host=95.54.0.12 local_as=xxxxxx
|
||||
@ -124,8 +129,7 @@ You need to edit your inventory and add:
|
||||
* `calico_rr` group with nodes in it. `calico_rr` can be combined with
|
||||
`kube_node` and/or `kube_control_plane`. `calico_rr` group also must be a child
|
||||
group of `k8s_cluster` group.
|
||||
* `cluster_id` by route reflector node/group (see details
|
||||
[here](https://hub.docker.com/r/calico/routereflector/))
|
||||
* `cluster_id` by route reflector node/group (see details [here](https://hub.docker.com/r/calico/routereflector/))
|
||||
|
||||
Here's an example of Kubespray inventory with standalone route reflectors:
|
||||
|
||||
@ -172,6 +176,8 @@ node5
|
||||
|
||||
[rack0:vars]
|
||||
cluster_id="1.0.0.1"
|
||||
calico_rr_id=rr1
|
||||
calico_group_id=rr1
|
||||
```
|
||||
|
||||
The inventory above will deploy the following topology assuming that calico's
|
||||
@ -181,7 +187,7 @@ The inventory above will deploy the following topology assuming that calico's
|
||||
|
||||
### Optional : Define default endpoint to host action
|
||||
|
||||
By default Calico blocks traffic from endpoints to the host itself by using an iptables DROP action. When using it in kubernetes the action has to be changed to RETURN (default in kubespray) or ACCEPT (see <https://github.com/projectcalico/felix/issues/660> and <https://github.com/projectcalico/calicoctl/issues/1389).> Otherwise all network packets from pods (with hostNetwork=False) to services endpoints (with hostNetwork=True) within the same node are dropped.
|
||||
By default Calico blocks traffic from endpoints to the host itself by using an iptables DROP action. When using it in kubernetes the action has to be changed to RETURN (default in kubespray) or ACCEPT (see <https://docs.tigera.io/calico/latest/network-policy/hosts/protect-hosts#control-default-behavior-of-workload-endpoint-to-host-traffic> ) Otherwise all network packets from pods (with hostNetwork=False) to services endpoints (with hostNetwork=True) within the same node are dropped.
|
||||
|
||||
To re-define default action please set the following variable in your inventory:
|
||||
|
||||
@ -199,6 +205,14 @@ To re-define health host please set the following variable in your inventory:
|
||||
calico_healthhost: "0.0.0.0"
|
||||
```
|
||||
|
||||
### Optional : Configure VXLAN hardware Offload
|
||||
|
||||
The VXLAN Offload is disable by default. It can be configured like this to enabled it:
|
||||
|
||||
```yml
|
||||
calico_feature_detect_override: "ChecksumOffloadBroken=false" # The vxlan offload will enabled (It may cause problem on buggy NIC driver)
|
||||
```
|
||||
|
||||
### Optional : Configure Calico Node probe timeouts
|
||||
|
||||
Under certain conditions a deployer may need to tune the Calico liveness and readiness probes timeout settings. These can be configured like this:
|
||||
@ -212,7 +226,7 @@ calico_node_readinessprobe_timeout: 10
|
||||
|
||||
Calico supports two types of encapsulation: [VXLAN and IP in IP](https://docs.projectcalico.org/v3.11/networking/vxlan-ipip). VXLAN is the more mature implementation and enabled by default, please check your environment if you need *IP in IP* encapsulation.
|
||||
|
||||
*IP in IP* and *VXLAN* is mutualy exclusive modes.
|
||||
*IP in IP* and *VXLAN* is mutually exclusive modes.
|
||||
|
||||
Kubespray defaults have changed after version 2.18 from auto-enabling `ipip` mode to auto-enabling `vxlan`. This was done to facilitate wider deployment scenarios including those where vxlan acceleration is provided by the underlying network devices.
|
||||
|
||||
@ -221,6 +235,8 @@ If you are running your cluster with the default calico settings and are upgradi
|
||||
* perform a manual migration to vxlan before upgrading kubespray (see migrating from IP in IP to VXLAN below)
|
||||
* pin the pre-2.19 settings in your ansible inventory (see IP in IP mode settings below)
|
||||
|
||||
**Note:**: Vxlan in ipv6 only supported when kernel >= 3.12. So if your kernel version < 3.12, Please don't set `calico_vxlan_mode_ipv6: vxlanAlways`. More details see [#Issue 6877](https://github.com/projectcalico/calico/issues/6877).
|
||||
|
||||
### IP in IP mode
|
||||
|
||||
To configure Ip in Ip mode you need to use the bird network backend.
|
||||
@ -245,14 +261,14 @@ calico_network_backend: 'bird'
|
||||
|
||||
If you would like to migrate from the old IP in IP with `bird` network backends default to the new VXLAN based encapsulation you need to perform this change before running an upgrade of your cluster; the `cluster.yml` and `upgrade-cluster.yml` playbooks will refuse to continue if they detect incompatible settings.
|
||||
|
||||
Execute the following sters on one of the control plane nodes, ensure the cluster in healthy before proceeding.
|
||||
Execute the following steps on one of the control plane nodes, ensure the cluster in healthy before proceeding.
|
||||
|
||||
```shell
|
||||
calicoctl.sh patch felixconfig default -p '{"spec":{"vxlanEnabled":true}}'
|
||||
calicoctl.sh patch ippool default-pool -p '{"spec":{"ipipMode":"Never", "vxlanMode":"Always"}}'
|
||||
```
|
||||
|
||||
**Note:** if you created multiple ippools you will need to patch all of them individually to change their encapsulation. The kubespray playbooks only handle the default ippool creaded by kubespray.
|
||||
**Note:** if you created multiple ippools you will need to patch all of them individually to change their encapsulation. The kubespray playbooks only handle the default ippool created by kubespray.
|
||||
|
||||
Wait for the `vxlan.calico` interfaces to be created on all cluster nodes and traffic to be routed through it then you can disable `ipip`.
|
||||
|
||||
@ -360,16 +376,11 @@ Calico node, typha and kube-controllers need to be able to talk to the kubernete
|
||||
|
||||
Kubespray sets up the `kubernetes-services-endpoint` configmap based on the contents of the `loadbalancer_apiserver` inventory variable documented in [HA Mode](/docs/ha-mode.md).
|
||||
|
||||
If no external loadbalancer is used, Calico eBPF can also use the localhost loadbalancer option. In this case Calico Automatic Host Endpoints need to be enabled to allow services like `coredns` and `metrics-server` to communicate with the kubernetes host endpoint. See [this blog post](https://www.projectcalico.org/securing-kubernetes-nodes-with-calico-automatic-host-endpoints/) on enabling automatic host endpoints.
|
||||
|
||||
```yaml
|
||||
loadbalancer_apiserver_localhost: true
|
||||
use_localhost_as_kubeapi_loadbalancer: true
|
||||
```
|
||||
If no external loadbalancer is used, Calico eBPF can also use the localhost loadbalancer option. We are able to do so only if you use the same port for the localhost apiserver loadbalancer and the kube-apiserver. In this case Calico Automatic Host Endpoints need to be enabled to allow services like `coredns` and `metrics-server` to communicate with the kubernetes host endpoint. See [this blog post](https://www.projectcalico.org/securing-kubernetes-nodes-with-calico-automatic-host-endpoints/) on enabling automatic host endpoints.
|
||||
|
||||
### Tunneled versus Direct Server Return
|
||||
|
||||
By default Calico usese Tunneled service mode but it can use direct server return (DSR) in order to optimize the return path for a service.
|
||||
By default Calico uses Tunneled service mode but it can use direct server return (DSR) in order to optimize the return path for a service.
|
||||
|
||||
To configure DSR:
|
||||
|
||||
@ -395,7 +406,7 @@ Please see [Calico eBPF troubleshooting guide](https://docs.projectcalico.org/ma
|
||||
|
||||
## Wireguard Encryption
|
||||
|
||||
Calico supports using Wireguard for encryption. Please see the docs on [encryptiong cluster pod traffic](https://docs.projectcalico.org/security/encrypt-cluster-pod-traffic).
|
||||
Calico supports using Wireguard for encryption. Please see the docs on [encrypt cluster pod traffic](https://docs.projectcalico.org/security/encrypt-cluster-pod-traffic).
|
||||
|
||||
To enable wireguard support:
|
||||
|
||||
|
@ -2,12 +2,12 @@
|
||||
|
||||
## CentOS 7
|
||||
|
||||
The maximum python version offically supported in CentOS is 3.6. Ansible as of version 5 (ansible core 2.12.x) increased their python requirement to python 3.8 and above.
|
||||
The maximum python version officially supported in CentOS is 3.6. Ansible as of version 5 (ansible core 2.12.x) increased their python requirement to python 3.8 and above.
|
||||
Kubespray supports multiple ansible versions but only the default (5.x) gets wide testing coverage. If your deployment host is CentOS 7 it is recommended to use one of the earlier versions still supported.
|
||||
|
||||
## CentOS 8
|
||||
|
||||
CentOS 8 / Oracle Linux 8 / AlmaLinux 8 / Rocky Linux 8 ship only with iptables-nft (ie without iptables-legacy similar to RHEL8)
|
||||
CentOS 8 / Oracle Linux 8,9 / AlmaLinux 8,9 / Rocky Linux 8,9 ship only with iptables-nft (ie without iptables-legacy similar to RHEL8)
|
||||
The only tested configuration for now is using Calico CNI
|
||||
You need to add `calico_iptables_backend: "NFT"` to your configuration.
|
||||
|
||||
|
@ -114,7 +114,7 @@ sudo apt-get install -y golang-cfssl
|
||||
|
||||
#### Create Root Certificate Authority (CA) Configuration File
|
||||
|
||||
The default TLS certificate expiry time period is `8760h` which is 5 years from the date the certificate is created.
|
||||
The default TLS certificate expiry time period is `8760h` which is 1 years from the date the certificate is created.
|
||||
|
||||
```shell
|
||||
$ cat > ca-config.json <<EOF
|
||||
|
72
docs/cgroups.md
Normal file
72
docs/cgroups.md
Normal file
@ -0,0 +1,72 @@
|
||||
# cgroups
|
||||
|
||||
To avoid the rivals for resources between containers or the impact on the host in Kubernetes, the kubelet components will rely on cgroups to limit the container’s resources usage.
|
||||
|
||||
## Enforcing Node Allocatable
|
||||
|
||||
You can use `kubelet_enforce_node_allocatable` to set node allocatable enforcement.
|
||||
|
||||
```yaml
|
||||
# A comma separated list of levels of node allocatable enforcement to be enforced by kubelet.
|
||||
kubelet_enforce_node_allocatable: "pods"
|
||||
# kubelet_enforce_node_allocatable: "pods,kube-reserved"
|
||||
# kubelet_enforce_node_allocatable: "pods,kube-reserved,system-reserved"
|
||||
```
|
||||
|
||||
Note that to enforce kube-reserved or system-reserved, `kube_reserved_cgroups` or `system_reserved_cgroups` needs to be specified respectively.
|
||||
|
||||
Here is an example:
|
||||
|
||||
```yaml
|
||||
kubelet_enforce_node_allocatable: "pods,kube-reserved,system-reserved"
|
||||
|
||||
# Reserve this space for kube resources
|
||||
# Set to true to reserve resources for kube daemons
|
||||
kube_reserved: true
|
||||
kube_reserved_cgroups_for_service_slice: kube.slice
|
||||
kube_reserved_cgroups: "/{{ kube_reserved_cgroups_for_service_slice }}"
|
||||
kube_memory_reserved: 256Mi
|
||||
kube_cpu_reserved: 100m
|
||||
# kube_ephemeral_storage_reserved: 2Gi
|
||||
# kube_pid_reserved: "1000"
|
||||
# Reservation for master hosts
|
||||
kube_master_memory_reserved: 512Mi
|
||||
kube_master_cpu_reserved: 200m
|
||||
# kube_master_ephemeral_storage_reserved: 2Gi
|
||||
# kube_master_pid_reserved: "1000"
|
||||
|
||||
# Set to true to reserve resources for system daemons
|
||||
system_reserved: true
|
||||
system_reserved_cgroups_for_service_slice: system.slice
|
||||
system_reserved_cgroups: "/{{ system_reserved_cgroups_for_service_slice }}"
|
||||
system_memory_reserved: 512Mi
|
||||
system_cpu_reserved: 500m
|
||||
# system_ephemeral_storage_reserved: 2Gi
|
||||
# system_pid_reserved: "1000"
|
||||
# Reservation for master hosts
|
||||
system_master_memory_reserved: 256Mi
|
||||
system_master_cpu_reserved: 250m
|
||||
# system_master_ephemeral_storage_reserved: 2Gi
|
||||
# system_master_pid_reserved: "1000"
|
||||
```
|
||||
|
||||
After the setup, the cgroups hierarchy is as follows:
|
||||
|
||||
```bash
|
||||
/ (Cgroups Root)
|
||||
├── kubepods.slice
|
||||
│ ├── ...
|
||||
│ ├── kubepods-besteffort.slice
|
||||
│ ├── kubepods-burstable.slice
|
||||
│ └── ...
|
||||
├── kube.slice
|
||||
│ ├── ...
|
||||
│ ├── {{container_manager}}.service
|
||||
│ ├── kubelet.service
|
||||
│ └── ...
|
||||
├── system.slice
|
||||
│ └── ...
|
||||
└── ...
|
||||
```
|
||||
|
||||
You can learn more in the [official kubernetes documentation](https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/).
|
193
docs/ci-setup.md
193
docs/ci-setup.md
@ -2,18 +2,19 @@
|
||||
|
||||
## Pipeline
|
||||
|
||||
1. unit-tests: fast jobs for fast feedback (linting, etc...)
|
||||
2. deploy-part1: small number of jobs to test if the PR works with default settings
|
||||
3. deploy-part2: slow jobs testing different platforms, OS, settings, CNI, etc...
|
||||
4. deploy-part3: very slow jobs (upgrades, etc...)
|
||||
1. build: build a docker image to be used in the pipeline
|
||||
2. unit-tests: fast jobs for fast feedback (linting, etc...)
|
||||
3. deploy-part1: small number of jobs to test if the PR works with default settings
|
||||
4. deploy-part2: slow jobs testing different platforms, OS, settings, CNI, etc...
|
||||
5. deploy-part3: very slow jobs (upgrades, etc...)
|
||||
|
||||
## Runners
|
||||
|
||||
Kubespray has 3 types of GitLab runners:
|
||||
|
||||
- packet runners: used for E2E jobs (usually long)
|
||||
- light runners: used for short lived jobs
|
||||
- auto scaling runners: used for on-demand resources, see [GitLab docs](https://docs.gitlab.com/runner/configuration/autoscale.html) for more info
|
||||
- packet runners: used for E2E jobs (usually long), running on Equinix Metal (ex-packet), on kubevirt managed VMs
|
||||
- light runners: used for short lived jobs, running on Equinix Metal (ex-packet), as managed pods
|
||||
- auto scaling runners (managed via docker-machine on Equinix Metal): used for on-demand resources, see [GitLab docs](https://docs.gitlab.com/runner/configuration/autoscale.html) for more info
|
||||
|
||||
## Vagrant
|
||||
|
||||
@ -25,3 +26,181 @@ In CI we have a set of overrides we use to ensure greater success of our CI jobs
|
||||
|
||||
- [Docker mirrors](/tests/common/_docker_hub_registry_mirror.yml)
|
||||
- [Test settings](/tests/common/_kubespray_test_settings.yml)
|
||||
|
||||
## CI Environment
|
||||
|
||||
The CI packet and light runners are deployed on a kubernetes cluster on Equinix Metal. The cluster is deployed with kubespray itself and maintained by the kubespray maintainers.
|
||||
|
||||
The following files are used for that inventory:
|
||||
|
||||
### cluster.tfvars
|
||||
|
||||
```ini
|
||||
# your Kubernetes cluster name here
|
||||
cluster_name = "ci"
|
||||
|
||||
# Your Equinix Metal project ID. See https://metal.equinix.com/developers/docs/accounts/
|
||||
equinix_metal_project_id = "_redacted_"
|
||||
|
||||
# The public SSH key to be uploaded into authorized_keys in bare metal Equinix Metal nodes provisioned
|
||||
# leave this value blank if the public key is already setup in the Equinix Metal project
|
||||
# Terraform will complain if the public key is setup in Equinix Metal
|
||||
public_key_path = "~/.ssh/id_rsa.pub"
|
||||
|
||||
# cluster location
|
||||
metro = "da"
|
||||
|
||||
# standalone etcds
|
||||
number_of_etcd = 0
|
||||
|
||||
plan_etcd = "t1.small.x86"
|
||||
|
||||
# masters
|
||||
number_of_k8s_masters = 1
|
||||
|
||||
number_of_k8s_masters_no_etcd = 0
|
||||
|
||||
plan_k8s_masters = "c3.small.x86"
|
||||
|
||||
plan_k8s_masters_no_etcd = "t1.small.x86"
|
||||
|
||||
# nodes
|
||||
number_of_k8s_nodes = 1
|
||||
|
||||
plan_k8s_nodes = "c3.medium.x86"
|
||||
```
|
||||
|
||||
### group_vars/all/mirrors.yml
|
||||
|
||||
```yaml
|
||||
---
|
||||
docker_registry_mirrors:
|
||||
- "https://mirror.gcr.io"
|
||||
|
||||
containerd_grpc_max_recv_message_size: 16777216
|
||||
containerd_grpc_max_send_message_size: 16777216
|
||||
|
||||
containerd_registries:
|
||||
"docker.io":
|
||||
- "https://mirror.gcr.io"
|
||||
- "https://registry-1.docker.io"
|
||||
|
||||
containerd_max_container_log_line_size: -1
|
||||
|
||||
crio_registries_mirrors:
|
||||
- prefix: docker.io
|
||||
insecure: false
|
||||
blocked: false
|
||||
location: registry-1.docker.io
|
||||
mirrors:
|
||||
- location: mirror.gcr.io
|
||||
insecure: false
|
||||
|
||||
netcheck_agent_image_repo: "{{ quay_image_repo }}/kubespray/k8s-netchecker-agent"
|
||||
netcheck_server_image_repo: "{{ quay_image_repo }}/kubespray/k8s-netchecker-server"
|
||||
|
||||
nginx_image_repo: "{{ quay_image_repo }}/kubespray/nginx"
|
||||
```
|
||||
|
||||
### group_vars/all/settings.yml
|
||||
|
||||
```yaml
|
||||
---
|
||||
# Networking setting
|
||||
kube_service_addresses: 172.30.0.0/18
|
||||
kube_pods_subnet: 172.30.64.0/18
|
||||
kube_network_plugin: calico
|
||||
# avoid overlap with CI jobs deploying nodelocaldns
|
||||
nodelocaldns_ip: 169.254.255.100
|
||||
|
||||
# ipip: False
|
||||
calico_ipip_mode: "Never"
|
||||
calico_vxlan_mode: "Never"
|
||||
calico_network_backend: "bird"
|
||||
calico_wireguard_enabled: True
|
||||
|
||||
# Cluster settings
|
||||
upgrade_cluster_setup: True
|
||||
force_certificate_regeneration: True
|
||||
|
||||
# Etcd settings
|
||||
etcd_deployment_type: "host"
|
||||
|
||||
# Kubernetes settings
|
||||
kube_controller_terminated_pod_gc_threshold: 100
|
||||
kubelet_enforce_node_allocatable: pods
|
||||
kubelet_preferred_address_types: 'InternalIP,ExternalIP,Hostname'
|
||||
kubelet_custom_flags:
|
||||
- "--serialize-image-pulls=true"
|
||||
- "--eviction-hard=memory.available<1Gi"
|
||||
- "--eviction-soft-grace-period=memory.available=30s"
|
||||
- "--eviction-soft=memory.available<2Gi"
|
||||
- "--system-reserved cpu=100m,memory=4Gi"
|
||||
- "--eviction-minimum-reclaim=memory.available=2Gi"
|
||||
|
||||
# DNS settings
|
||||
resolvconf_mode: none
|
||||
dns_min_replicas: 1
|
||||
upstream_dns_servers:
|
||||
- 1.1.1.1
|
||||
- 1.0.0.1
|
||||
|
||||
# Extensions
|
||||
ingress_nginx_enabled: True
|
||||
helm_enabled: True
|
||||
cert_manager_enabled: True
|
||||
metrics_server_enabled: True
|
||||
|
||||
# Enable ZSWAP
|
||||
kubelet_fail_swap_on: False
|
||||
kube_feature_gates:
|
||||
- "NodeSwap=True"
|
||||
```
|
||||
|
||||
## Aditional files
|
||||
|
||||
This section documents additional files used to complete a deployment of the kubespray CI, these files sit on the control-plane node and assume a working kubernetes cluster.
|
||||
|
||||
### /root/nscleanup.sh
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
|
||||
kubectl=/usr/local/bin/kubectl
|
||||
|
||||
$kubectl get ns | grep -P "(\d.+-\d.+)" | awk 'match($3,/[0-9]+d/) {print $1}' | xargs -r $kubectl delete ns
|
||||
$kubectl get ns | grep -P "(\d.+-\d.+)" | awk 'match($3,/[3-9]+h/) {print $1}' | xargs -r $kubectl delete ns
|
||||
$kubectl get ns | grep Terminating | awk '{print $1}' | xargs -i $kubectl delete vmi/instance-1 vmi/instance-0 vmi/instance-2 -n {} --force --grace-period=0 &
|
||||
```
|
||||
|
||||
### /root/path-calico.sh
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
|
||||
calicoctl patch felixconfig default -p '{"spec":{"allowIPIPPacketsFromWorkloads":true, "allowVXLANPacketsFromWorkloads": true}}'
|
||||
```
|
||||
|
||||
### /root/kubevirt/kubevirt.sh
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
|
||||
export VERSION=$(curl -s https://api.github.com/repos/kubevirt/kubevirt/releases | grep tag_name | grep -v -- '-rc' | sort -r | head -1 | awk -F': ' '{print $2}' | sed 's/,//' | xargs)
|
||||
echo $VERSION
|
||||
kubectl apply -f https://github.com/kubevirt/kubevirt/releases/download/${VERSION}/kubevirt-operator.yaml
|
||||
kubectl apply -f https://github.com/kubevirt/kubevirt/releases/download/${VERSION}/kubevirt-cr.yaml
|
||||
```
|
||||
|
||||
### /root/kubevirt/virtctl.sh
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
|
||||
VERSION=$(kubectl get kubevirt.kubevirt.io/kubevirt -n kubevirt -o=jsonpath="{.status.observedKubeVirtVersion}")
|
||||
ARCH=$(uname -s | tr A-Z a-z)-$(uname -m | sed 's/x86_64/amd64/') || windows-amd64.exe
|
||||
echo ${ARCH}
|
||||
curl -L -o virtctl https://github.com/kubevirt/kubevirt/releases/download/${VERSION}/virtctl-${VERSION}-${ARCH}
|
||||
chmod +x virtctl
|
||||
sudo install virtctl /usr/local/bin
|
||||
```
|
||||
|
26
docs/ci.md
26
docs/ci.md
@ -4,37 +4,37 @@ To generate this Matrix run `./tests/scripts/md-table/main.py`
|
||||
|
||||
## containerd
|
||||
|
||||
| OS / CNI | calico | canal | cilium | flannel | kube-ovn | kube-router | macvlan | weave |
|
||||
| OS / CNI | calico | cilium | custom_cni | flannel | kube-ovn | kube-router | macvlan | weave |
|
||||
|---| --- | --- | --- | --- | --- | --- | --- | --- |
|
||||
almalinux8 | :white_check_mark: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: |
|
||||
amazon | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
centos7 | :white_check_mark: | :white_check_mark: | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: | :white_check_mark: |
|
||||
debian10 | :white_check_mark: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian11 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian9 | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: |
|
||||
centos7 | :white_check_mark: | :x: | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: | :white_check_mark: |
|
||||
debian10 | :white_check_mark: | :white_check_mark: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: |
|
||||
debian11 | :white_check_mark: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora35 | :white_check_mark: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: |
|
||||
fedora36 | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: |
|
||||
opensuse | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
opensuse | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
rockylinux8 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu16 | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: | :x: |
|
||||
ubuntu18 | :white_check_mark: | :x: | :white_check_mark: | :white_check_mark: | :x: | :x: | :x: | :white_check_mark: |
|
||||
rockylinux9 | :white_check_mark: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu16 | :x: | :x: | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: | :x: |
|
||||
ubuntu18 | :white_check_mark: | :white_check_mark: | :x: | :white_check_mark: | :x: | :x: | :x: | :white_check_mark: |
|
||||
ubuntu20 | :white_check_mark: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: |
|
||||
ubuntu22 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
|
||||
## crio
|
||||
|
||||
| OS / CNI | calico | canal | cilium | flannel | kube-ovn | kube-router | macvlan | weave |
|
||||
| OS / CNI | calico | cilium | custom_cni | flannel | kube-ovn | kube-router | macvlan | weave |
|
||||
|---| --- | --- | --- | --- | --- | --- | --- | --- |
|
||||
almalinux8 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
amazon | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
centos7 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian10 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian11 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora35 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora36 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
opensuse | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
rockylinux8 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
rockylinux9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu16 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu18 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu20 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
@ -42,18 +42,18 @@ ubuntu22 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
|
||||
## docker
|
||||
|
||||
| OS / CNI | calico | canal | cilium | flannel | kube-ovn | kube-router | macvlan | weave |
|
||||
| OS / CNI | calico | cilium | custom_cni | flannel | kube-ovn | kube-router | macvlan | weave |
|
||||
|---| --- | --- | --- | --- | --- | --- | --- | --- |
|
||||
almalinux8 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
amazon | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
centos7 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian10 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian11 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora35 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora36 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: |
|
||||
opensuse | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
|
||||
opensuse | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
rockylinux8 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
rockylinux9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu16 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: |
|
||||
ubuntu18 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu20 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user