Compare commits
195 Commits
v2.17.0
...
floryut-pa
Author | SHA1 | Date | |
---|---|---|---|
3314f95be4 | |||
ed3932b7d5 | |||
2b5c185826 | |||
996ecca78b | |||
c3c128352f | |||
02a89543d6 | |||
c1954ff918 | |||
b49ae8c21d | |||
1a7b4435f3 | |||
ff5ca5f7f8 | |||
db0e458217 | |||
f01f7c54aa | |||
c59407f105 | |||
fdc5d7458f | |||
6aafb9b2d4 | |||
aa9ad1ed60 | |||
aa9b8453a0 | |||
4daa824b3c | |||
4f2e4524b8 | |||
8ac510e4d6 | |||
4f27c763af | |||
0e969c0b72 | |||
b396801e28 | |||
682c8a59c2 | |||
5a25de37ef | |||
bdb923df4a | |||
4ef2cf4c28 | |||
990ca38d21 | |||
c7e430573f | |||
a328b64464 | |||
a16d427536 | |||
c98a07825b | |||
a98ca6fcf3 | |||
4550f8c50f | |||
9afca43807 | |||
27ab364df5 | |||
615216f397 | |||
46b1b7ab34 | |||
30d9882851 | |||
dfdebda0b6 | |||
9d8a83314b | |||
e19ce27352 | |||
4d711691d0 | |||
ee0f1e9d58 | |||
a24162f596 | |||
e82443241b | |||
9f052702e5 | |||
b38382a68f | |||
785324827c | |||
31c7b6747b | |||
dc767c14b9 | |||
30ec03259d | |||
38c12288f1 | |||
0e22a90579 | |||
0cdf75d41a | |||
3c6fa6e583 | |||
ee882fa462 | |||
3431ed9857 | |||
279808b44e | |||
2fd529a993 | |||
1f6f79c91e | |||
52ee5d0fff | |||
2f44b40d68 | |||
20157254c3 | |||
09c17ba581 | |||
a5f88e14d0 | |||
e78bda65fe | |||
3ea496013f | |||
7e1873d927 | |||
fe0810aff9 | |||
e35a87e3eb | |||
a6fcf2e066 | |||
25316825b1 | |||
c74e1c9db3 | |||
be9de6b9d9 | |||
fe8c843cc8 | |||
f48ae18630 | |||
83e0b786d4 | |||
acd5185ad4 | |||
0263c649f4 | |||
8176e9155b | |||
424163c7d3 | |||
2c87170ccf | |||
02322c46de | |||
28b5281c45 | |||
4d79a55904 | |||
027cbefb87 | |||
a08d82d94e | |||
5f1456337b | |||
6eeb4883af | |||
b5a5478a8a | |||
0d0468e127 | |||
b7ae4a2cfd | |||
039205560a | |||
801268d5c1 | |||
46c536d261 | |||
4a8757161e | |||
65540c5771 | |||
6c1ab24981 | |||
61c2ae5549 | |||
04711d3b00 | |||
cb7c30a4f1 | |||
8922c45556 | |||
58390c79d0 | |||
b7eb1cf936 | |||
6e5b9e0ebf | |||
c94291558d | |||
8d553f7e91 | |||
a0be7f0e26 | |||
1c3d082b8d | |||
2ed211ba15 | |||
1161326b54 | |||
d473a6d442 | |||
8d82033bff | |||
9d4cdb7b02 | |||
b353e062c7 | |||
d8f9b9b61f | |||
0b441ade2c | |||
6f6fad5a16 | |||
465ffa3c9f | |||
539c9e0d99 | |||
649f962ac6 | |||
16bdb3fe51 | |||
7c3369e1b9 | |||
9eacde212f | |||
331647f4ab | |||
c2d4822c38 | |||
3c30be1320 | |||
d8d01bf5aa | |||
d42b7228c2 | |||
4db057e9c2 | |||
ea8e2fc651 | |||
10c30ea5b1 | |||
84b56d23a4 | |||
19d07a4f2e | |||
6a5b87dda4 | |||
6aac59394e | |||
f147163b24 | |||
16bf3549c1 | |||
b912dafd7a | |||
8b3481f511 | |||
7019c2685d | |||
d18cc38586 | |||
cee481f63d | |||
e4c8c7188e | |||
6c004efd5f | |||
1a57780a75 | |||
ce25e4aa21 | |||
ef4044b62f | |||
9ffe5940fe | |||
c8d9afce1a | |||
285983a555 | |||
ab4356aa69 | |||
e87d4e9ce3 | |||
5fcf047191 | |||
c68fb81aa7 | |||
e707f78899 | |||
41e0ca3f85 | |||
c5c10067ed | |||
43958614e3 | |||
af04906b51 | |||
c7e17688b9 | |||
ac76840c5d | |||
f5885d05ea | |||
af949cd967 | |||
eee2eb11d8 | |||
8d3961edbe | |||
4c5328fd1f | |||
1472528f6d | |||
9416c9aa86 | |||
da92c7e215 | |||
d27cf375af | |||
432a312a35 | |||
3a6230af6b | |||
ecd267854b | |||
ac846667b7 | |||
33146b9481 | |||
4bace2491d | |||
469b3ec525 | |||
22017b7ff0 | |||
88c11b5946 | |||
843252c968 | |||
ddea79f0f0 | |||
c0e1211abe | |||
c8d7f000c9 | |||
598f178054 | |||
6f8b24f367 | |||
5d1b34bdcd | |||
8efde799e1 | |||
96b61a5f53 | |||
a517a8db01 | |||
2211504790 | |||
fb8662ec19 | |||
6f7911264f | |||
ae44aff330 |
10
.gitignore
vendored
10
.gitignore
vendored
@ -99,3 +99,13 @@ target/
|
||||
# virtualenv
|
||||
venv/
|
||||
ENV/
|
||||
|
||||
# molecule
|
||||
roles/**/molecule/**/__pycache__/
|
||||
roles/**/molecule/**/*.conf
|
||||
|
||||
# macOS
|
||||
.DS_Store
|
||||
|
||||
# Temp location used by our scripts
|
||||
scripts/tmp/
|
||||
|
@ -8,7 +8,7 @@ stages:
|
||||
- deploy-special
|
||||
|
||||
variables:
|
||||
KUBESPRAY_VERSION: v2.16.0
|
||||
KUBESPRAY_VERSION: v2.17.1
|
||||
FAILFASTCI_NAMESPACE: 'kargo-ci'
|
||||
GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray'
|
||||
ANSIBLE_FORCE_COLOR: "true"
|
||||
@ -16,6 +16,7 @@ variables:
|
||||
TEST_ID: "$CI_PIPELINE_ID-$CI_BUILD_ID"
|
||||
CI_TEST_VARS: "./tests/files/${CI_JOB_NAME}.yml"
|
||||
CI_TEST_REGISTRY_MIRROR: "./tests/common/_docker_hub_registry_mirror.yml"
|
||||
CI_TEST_SETTING: "./tests/common/_kubespray_test_settings.yml"
|
||||
GS_ACCESS_KEY_ID: $GS_KEY
|
||||
GS_SECRET_ACCESS_KEY: $GS_SECRET
|
||||
CONTAINER_ENGINE: docker
|
||||
@ -31,14 +32,14 @@ variables:
|
||||
ANSIBLE_LOG_LEVEL: "-vv"
|
||||
RECOVER_CONTROL_PLANE_TEST: "false"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube_control_plane[1:]"
|
||||
TERRAFORM_14_VERSION: 0.14.11
|
||||
TERRAFORM_15_VERSION: 0.15.5
|
||||
TERRAFORM_VERSION: 1.0.8
|
||||
ANSIBLE_MAJOR_VERSION: "2.10"
|
||||
|
||||
before_script:
|
||||
- ./tests/scripts/rebase.sh
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
||||
- python -m pip uninstall -y ansible
|
||||
- python -m pip install -r tests/requirements.txt
|
||||
- python -m pip uninstall -y ansible ansible-base ansible-core
|
||||
- python -m pip install -r tests/requirements-${ANSIBLE_MAJOR_VERSION}.txt
|
||||
- mkdir -p /.ssh
|
||||
|
||||
.job: &job
|
||||
|
@ -14,7 +14,7 @@ vagrant-validate:
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
variables:
|
||||
VAGRANT_VERSION: 2.2.15
|
||||
VAGRANT_VERSION: 2.2.19
|
||||
script:
|
||||
- ./tests/scripts/vagrant-validate.sh
|
||||
except: ['triggers', 'master']
|
||||
|
@ -2,6 +2,7 @@
|
||||
.packet:
|
||||
extends: .testcases
|
||||
variables:
|
||||
ANSIBLE_TIMEOUT: "120"
|
||||
CI_PLATFORM: packet
|
||||
SSH_USER: kubespray
|
||||
tags:
|
||||
@ -22,25 +23,52 @@
|
||||
allow_failure: true
|
||||
extends: .packet
|
||||
|
||||
packet_ubuntu18-calico-aio:
|
||||
stage: deploy-part1
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
# Future AIO job
|
||||
# The ubuntu20-calico-aio jobs are meant as early stages to prevent running the full CI if something is horribly broken
|
||||
packet_ubuntu20-calico-aio:
|
||||
stage: deploy-part1
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
variables:
|
||||
RESET_CHECK: "true"
|
||||
|
||||
# Exericse ansible variants
|
||||
packet_ubuntu20-calico-aio-ansible-2_9:
|
||||
stage: deploy-part1
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
variables:
|
||||
ANSIBLE_MAJOR_VERSION: "2.9"
|
||||
RESET_CHECK: "true"
|
||||
|
||||
packet_ubuntu20-calico-aio-ansible-2_11:
|
||||
stage: deploy-part1
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
variables:
|
||||
ANSIBLE_MAJOR_VERSION: "2.11"
|
||||
RESET_CHECK: "true"
|
||||
|
||||
# ### PR JOBS PART2
|
||||
|
||||
packet_centos7-flannel-containerd-addons-ha:
|
||||
packet_ubuntu18-aio-docker:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu20-aio-docker:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu18-calico-aio:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_centos7-flannel-addons-ha:
|
||||
extends: .packet_pr
|
||||
stage: deploy-part2
|
||||
when: on_success
|
||||
variables:
|
||||
MITOGEN_ENABLE: "true"
|
||||
|
||||
packet_centos8-crio:
|
||||
extends: .packet_pr
|
||||
@ -51,10 +79,13 @@ packet_ubuntu18-crio:
|
||||
extends: .packet_pr
|
||||
stage: deploy-part2
|
||||
when: manual
|
||||
variables:
|
||||
MITOGEN_ENABLE: "true"
|
||||
|
||||
packet_ubuntu16-canal-kubeadm-ha:
|
||||
packet_fedora35-crio:
|
||||
extends: .packet_pr
|
||||
stage: deploy-part2
|
||||
when: manual
|
||||
|
||||
packet_ubuntu16-canal-ha:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
@ -84,18 +115,26 @@ packet_debian10-cilium-svc-proxy:
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
|
||||
packet_debian10-containerd:
|
||||
packet_debian10-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_debian10-docker:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
variables:
|
||||
MITOGEN_ENABLE: "true"
|
||||
|
||||
packet_debian11-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_debian11-docker:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_centos7-calico-ha-once-localhost:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
@ -116,7 +155,17 @@ packet_centos8-calico:
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_fedora34-weave:
|
||||
packet_centos8-docker:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_fedora34-docker-weave:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_fedora35-kube-router:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
@ -126,14 +175,14 @@ packet_opensuse-canal:
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu18-ovn4nfv:
|
||||
packet_opensuse-docker-cilium:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
# ### MANUAL JOBS
|
||||
|
||||
packet_ubuntu16-weave-sep:
|
||||
packet_ubuntu16-docker-weave-sep:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
@ -143,12 +192,18 @@ packet_ubuntu18-cilium-sep:
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_ubuntu18-flannel-containerd-ha:
|
||||
packet_ubuntu18-flannel-ha:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_ubuntu18-flannel-containerd-ha-once:
|
||||
packet_ubuntu18-flannel-ha-once:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
# Calico HA eBPF
|
||||
packet_centos8-calico-ha-ebpf:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
@ -178,22 +233,34 @@ packet_oracle7-canal-ha:
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_fedora33-calico:
|
||||
packet_fedora35-docker-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
RESET_CHECK: "true"
|
||||
|
||||
packet_fedora34-calico-selinux:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
|
||||
packet_fedora35-calico-swap-selinux:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_amazon-linux-2-aio:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_fedora34-kube-ovn-containerd:
|
||||
packet_centos8-calico-nodelocaldns-secondary:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_fedora34-kube-ovn:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
@ -201,37 +268,32 @@ packet_fedora34-kube-ovn-containerd:
|
||||
# ### PR JOBS PART3
|
||||
# Long jobs (45min+)
|
||||
|
||||
packet_centos7-weave-upgrade-ha:
|
||||
packet_centos7-docker-weave-upgrade-ha:
|
||||
stage: deploy-part3
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
UPGRADE_TEST: basic
|
||||
MITOGEN_ENABLE: "false"
|
||||
|
||||
# Calico HA Wireguard
|
||||
packet_ubuntu20-calico-ha-wireguard:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
variables:
|
||||
MITOGEN_ENABLE: "true"
|
||||
|
||||
packet_debian9-calico-upgrade:
|
||||
packet_debian10-calico-upgrade:
|
||||
stage: deploy-part3
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
variables:
|
||||
UPGRADE_TEST: graceful
|
||||
MITOGEN_ENABLE: "false"
|
||||
|
||||
packet_debian9-calico-upgrade-once:
|
||||
packet_debian10-calico-upgrade-once:
|
||||
stage: deploy-part3
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
UPGRADE_TEST: graceful
|
||||
MITOGEN_ENABLE: "false"
|
||||
|
||||
packet_ubuntu18-calico-ha-recover:
|
||||
stage: deploy-part3
|
||||
|
@ -53,92 +53,51 @@
|
||||
# Cleanup regardless of exit code
|
||||
- chronic ./tests/scripts/testcases_cleanup.sh
|
||||
|
||||
tf-0.15.x-validate-openstack:
|
||||
tf-validate-openstack:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_15_VERSION
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: openstack
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-0.15.x-validate-packet:
|
||||
tf-validate-packet:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_15_VERSION
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: packet
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-0.15.x-validate-aws:
|
||||
tf-validate-aws:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_15_VERSION
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: aws
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-0.15.x-validate-exoscale:
|
||||
tf-validate-exoscale:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_15_VERSION
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: exoscale
|
||||
|
||||
tf-0.15.x-validate-vsphere:
|
||||
tf-validate-vsphere:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_15_VERSION
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: vsphere
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-0.15.x-validate-upcloud:
|
||||
tf-validate-upcloud:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_15_VERSION
|
||||
PROVIDER: upcloud
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-0.14.x-validate-openstack:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_14_VERSION
|
||||
PROVIDER: openstack
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-0.14.x-validate-packet:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_14_VERSION
|
||||
PROVIDER: packet
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-0.14.x-validate-aws:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_14_VERSION
|
||||
PROVIDER: aws
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-0.14.x-validate-exoscale:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_14_VERSION
|
||||
PROVIDER: exoscale
|
||||
|
||||
tf-0.14.x-validate-vsphere:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_14_VERSION
|
||||
PROVIDER: vsphere
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-0.14.x-validate-upcloud:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_14_VERSION
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: upcloud
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
# tf-packet-ubuntu16-default:
|
||||
# extends: .terraform_apply
|
||||
# variables:
|
||||
# TF_VERSION: $TERRAFORM_14_VERSION
|
||||
# TF_VERSION: $TERRAFORM_VERSION
|
||||
# PROVIDER: packet
|
||||
# CLUSTER: $CI_COMMIT_REF_NAME
|
||||
# TF_VAR_number_of_k8s_masters: "1"
|
||||
@ -152,7 +111,7 @@ tf-0.14.x-validate-upcloud:
|
||||
# tf-packet-ubuntu18-default:
|
||||
# extends: .terraform_apply
|
||||
# variables:
|
||||
# TF_VERSION: $TERRAFORM_14_VERSION
|
||||
# TF_VERSION: $TERRAFORM_VERSION
|
||||
# PROVIDER: packet
|
||||
# CLUSTER: $CI_COMMIT_REF_NAME
|
||||
# TF_VAR_number_of_k8s_masters: "1"
|
||||
@ -187,10 +146,6 @@ tf-0.14.x-validate-upcloud:
|
||||
OS_INTERFACE: public
|
||||
OS_IDENTITY_API_VERSION: "3"
|
||||
TF_VAR_router_id: "ab95917c-41fb-4881-b507-3a6dfe9403df"
|
||||
# Since ELASTX is in Stockholm, Mitogen helps with latency
|
||||
MITOGEN_ENABLE: "false"
|
||||
# Mitogen doesn't support interpreter discovery yet
|
||||
ANSIBLE_PYTHON_INTERPRETER: "/usr/bin/python3"
|
||||
|
||||
tf-elastx_cleanup:
|
||||
stage: unit-tests
|
||||
@ -210,7 +165,7 @@ tf-elastx_ubuntu18-calico:
|
||||
allow_failure: true
|
||||
variables:
|
||||
<<: *elastx_variables
|
||||
TF_VERSION: $TERRAFORM_15_VERSION
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: openstack
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
ANSIBLE_TIMEOUT: "60"
|
||||
@ -256,7 +211,7 @@ tf-elastx_ubuntu18-calico:
|
||||
# environment: ovh
|
||||
# variables:
|
||||
# <<: *ovh_variables
|
||||
# TF_VERSION: $TERRAFORM_14_VERSION
|
||||
# TF_VERSION: $TERRAFORM_VERSION
|
||||
# PROVIDER: openstack
|
||||
# CLUSTER: $CI_COMMIT_REF_NAME
|
||||
# ANSIBLE_TIMEOUT: "60"
|
||||
|
@ -16,6 +16,12 @@ molecule_tests:
|
||||
- ./tests/scripts/vagrant_clean.sh
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh
|
||||
after_script:
|
||||
- chronic ./tests/scripts/molecule_logs.sh
|
||||
artifacts:
|
||||
when: always
|
||||
paths:
|
||||
- molecule_logs/
|
||||
|
||||
.vagrant:
|
||||
extends: .testcases
|
||||
@ -39,6 +45,7 @@ molecule_tests:
|
||||
- ./tests/scripts/testcases_run.sh
|
||||
after_script:
|
||||
- chronic ./tests/scripts/testcases_cleanup.sh
|
||||
allow_failure: true
|
||||
|
||||
vagrant_ubuntu18-calico-dual-stack:
|
||||
stage: deploy-part2
|
||||
|
@ -6,11 +6,17 @@
|
||||
|
||||
It is recommended to use filter to manage the GitHub email notification, see [examples for setting filters to Kubernetes Github notifications](https://github.com/kubernetes/community/blob/master/communication/best-practices.md#examples-for-setting-filters-to-kubernetes-github-notifications)
|
||||
|
||||
To install development dependencies you can use `pip install -r tests/requirements.txt`
|
||||
To install development dependencies you can set up a python virtual env with the necessary dependencies:
|
||||
|
||||
```ShellSession
|
||||
virtualenv venv
|
||||
source venv/bin/activate
|
||||
pip install -r tests/requirements.txt
|
||||
```
|
||||
|
||||
#### Linting
|
||||
|
||||
Kubespray uses `yamllint` and `ansible-lint`. To run them locally use `yamllint .` and `ansible-lint`
|
||||
Kubespray uses `yamllint` and `ansible-lint`. To run them locally use `yamllint .` and `ansible-lint`. It is a good idea to add call these tools as part of your pre-commit hook and avoid a lot of back end forth on fixing linting issues (<https://support.gitkraken.com/working-with-repositories/githooksexample/>).
|
||||
|
||||
#### Molecule
|
||||
|
||||
@ -29,3 +35,5 @@ Vagrant with VirtualBox or libvirt driver helps you to quickly spin test cluster
|
||||
3. Fork the desired repo, develop and test your code changes.
|
||||
4. Sign the CNCF CLA (<https://git.k8s.io/community/CLA.md#the-contributor-license-agreement>)
|
||||
5. Submit a pull request.
|
||||
6. Work with the reviewers on their suggestions.
|
||||
7. Ensure to rebase to the HEAD of your target branch and squash un-necessary commits (<https://blog.carbonfive.com/always-squash-and-rebase-your-git-commits/>) before final merger of your contribution.
|
||||
|
@ -4,7 +4,7 @@ FROM ubuntu:bionic-20200807
|
||||
RUN apt update -y \
|
||||
&& apt install -y \
|
||||
libssl-dev python3-dev sshpass apt-transport-https jq moreutils \
|
||||
ca-certificates curl gnupg2 software-properties-common python3-pip rsync git \
|
||||
ca-certificates curl gnupg2 software-properties-common python3-pip unzip rsync git \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - \
|
||||
&& add-apt-repository \
|
||||
|
4
Makefile
4
Makefile
@ -1,5 +1,7 @@
|
||||
mitogen:
|
||||
ansible-playbook -c local mitogen.yml -vv
|
||||
@echo Mitogen support is deprecated.
|
||||
@echo Please run the following command manually:
|
||||
@echo ansible-playbook -c local mitogen.yml -vv
|
||||
clean:
|
||||
rm -rf dist/
|
||||
rm *.retry
|
||||
|
@ -7,11 +7,14 @@ aliases:
|
||||
- woopstar
|
||||
- luckysb
|
||||
- floryut
|
||||
- oomichi
|
||||
kubespray-reviewers:
|
||||
- holmsten
|
||||
- bozzo
|
||||
- eppo
|
||||
- oomichi
|
||||
- jayonlau
|
||||
- cristicalin
|
||||
kubespray-emeritus_approvers:
|
||||
- riverzhang
|
||||
- atoms
|
||||
|
41
README.md
41
README.md
@ -57,10 +57,10 @@ A simple way to ensure you get all the correct version of Ansible is to use the
|
||||
You will then need to use [bind mounts](https://docs.docker.com/storage/bind-mounts/) to get the inventory and ssh key into the container, like this:
|
||||
|
||||
```ShellSession
|
||||
docker pull quay.io/kubespray/kubespray:v2.16.0
|
||||
docker pull quay.io/kubespray/kubespray:v2.17.1
|
||||
docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inventory \
|
||||
--mount type=bind,source="${HOME}"/.ssh/id_rsa,dst=/root/.ssh/id_rsa \
|
||||
quay.io/kubespray/kubespray:v2.16.0 bash
|
||||
quay.io/kubespray/kubespray:v2.17.1 bash
|
||||
# Inside the container you may now run the kubespray playbooks:
|
||||
ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml
|
||||
```
|
||||
@ -118,11 +118,12 @@ vagrant up
|
||||
- **Debian** Bullseye, Buster, Jessie, Stretch
|
||||
- **Ubuntu** 16.04, 18.04, 20.04
|
||||
- **CentOS/RHEL** 7, [8](docs/centos8.md)
|
||||
- **Fedora** 33, 34
|
||||
- **Fedora** 34, 35
|
||||
- **Fedora CoreOS** (see [fcos Note](docs/fcos.md))
|
||||
- **openSUSE** Leap 15.x/Tumbleweed
|
||||
- **Oracle Linux** 7, [8](docs/centos8.md)
|
||||
- **Alma Linux** [8](docs/centos8.md)
|
||||
- **Rocky Linux** [8](docs/centos8.md)
|
||||
- **Amazon Linux 2** (experimental: see [amazon linux notes](docs/amazonlinux.md))
|
||||
|
||||
Note: Upstart/SysV init based OS types are not supported.
|
||||
@ -130,29 +131,27 @@ Note: Upstart/SysV init based OS types are not supported.
|
||||
## Supported Components
|
||||
|
||||
- Core
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.21.5
|
||||
- [etcd](https://github.com/coreos/etcd) v3.4.13
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.22.5
|
||||
- [etcd](https://github.com/coreos/etcd) v3.5.0
|
||||
- [docker](https://www.docker.com/) v20.10 (see note)
|
||||
- [containerd](https://containerd.io/) v1.4.9
|
||||
- [cri-o](http://cri-o.io/) v1.21 (experimental: see [CRI-O Note](docs/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
||||
- [containerd](https://containerd.io/) v1.5.8
|
||||
- [cri-o](http://cri-o.io/) v1.22 (experimental: see [CRI-O Note](docs/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
||||
- Network Plugin
|
||||
- [cni-plugins](https://github.com/containernetworking/plugins) v0.9.1
|
||||
- [calico](https://github.com/projectcalico/calico) v3.19.2
|
||||
- [cni-plugins](https://github.com/containernetworking/plugins) v1.0.1
|
||||
- [calico](https://github.com/projectcalico/calico) v3.20.3
|
||||
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
|
||||
- [cilium](https://github.com/cilium/cilium) v1.9.10
|
||||
- [flanneld](https://github.com/flannel-io/flannel) v0.14.0
|
||||
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.7.2
|
||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) v1.3.0
|
||||
- [multus](https://github.com/intel/multus-cni) v3.7.2
|
||||
- [ovn4nfv](https://github.com/opnfv/ovn4nfv-k8s-plugin) v1.1.0
|
||||
- [cilium](https://github.com/cilium/cilium) v1.9.11
|
||||
- [flanneld](https://github.com/flannel-io/flannel) v0.15.1
|
||||
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.8.1
|
||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) v1.3.2
|
||||
- [multus](https://github.com/intel/multus-cni) v3.8
|
||||
- [weave](https://github.com/weaveworks/weave) v2.8.1
|
||||
- Application
|
||||
- [ambassador](https://github.com/datawire/ambassador): v1.5
|
||||
- [cephfs-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.0-k8s1.11
|
||||
- [rbd-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.1-k8s1.11
|
||||
- [cert-manager](https://github.com/jetstack/cert-manager) v1.0.4
|
||||
- [cert-manager](https://github.com/jetstack/cert-manager) v1.5.4
|
||||
- [coredns](https://github.com/coredns/coredns) v1.8.0
|
||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v1.0.0
|
||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v1.0.4
|
||||
|
||||
## Container Runtime Notes
|
||||
|
||||
@ -161,7 +160,7 @@ Note: Upstart/SysV init based OS types are not supported.
|
||||
|
||||
## Requirements
|
||||
|
||||
- **Minimum required version of Kubernetes is v1.19**
|
||||
- **Minimum required version of Kubernetes is v1.20**
|
||||
- **Ansible v2.9.x, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands, Ansible 2.10.x is experimentally supported for now**
|
||||
- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](docs/offline-environment.md))
|
||||
- The target servers are configured to allow **IPv4 forwarding**.
|
||||
@ -195,8 +194,6 @@ You can choose between 10 network plugins. (default: `calico`, except Vagrant us
|
||||
|
||||
- [cilium](http://docs.cilium.io/en/latest/): layer 3/4 networking (as well as layer 7 to protect and secure application protocols), supports dynamic insertion of BPF bytecode into the Linux kernel to implement security services, networking and visibility logic.
|
||||
|
||||
- [ovn4nfv](docs/ovn4nfv.md): [ovn4nfv-k8s-plugins](https://github.com/opnfv/ovn4nfv-k8s-plugin) is the network controller, OVS agent and CNI server to offer basic SFC and OVN overlay networking.
|
||||
|
||||
- [weave](docs/weave.md): Weave is a lightweight container overlay network that doesn't require an external K/V database cluster.
|
||||
(Please refer to `weave` [troubleshooting documentation](https://www.weave.works/docs/net/latest/troubleshooting/)).
|
||||
|
||||
@ -217,8 +214,6 @@ See also [Network checker](docs/netcheck.md).
|
||||
|
||||
## Ingress Plugins
|
||||
|
||||
- [ambassador](docs/ambassador.md): the Ambassador Ingress Controller and API gateway.
|
||||
|
||||
- [nginx](https://kubernetes.github.io/ingress-nginx): the NGINX Ingress Controller.
|
||||
|
||||
- [metallb](docs/metallb.md): the MetalLB bare-metal service LoadBalancer provider.
|
||||
|
4
Vagrantfile
vendored
4
Vagrantfile
vendored
@ -26,8 +26,8 @@ SUPPORTED_OS = {
|
||||
"centos-bento" => {box: "bento/centos-7.6", user: "vagrant"},
|
||||
"centos8" => {box: "centos/8", user: "vagrant"},
|
||||
"centos8-bento" => {box: "bento/centos-8", user: "vagrant"},
|
||||
"fedora33" => {box: "fedora/33-cloud-base", user: "vagrant"},
|
||||
"fedora34" => {box: "fedora/34-cloud-base", user: "vagrant"},
|
||||
"fedora35" => {box: "fedora/35-cloud-base", user: "vagrant"},
|
||||
"opensuse" => {box: "bento/opensuse-leap-15.2", user: "vagrant"},
|
||||
"opensuse-tumbleweed" => {box: "opensuse/Tumbleweed.x86_64", user: "vagrant"},
|
||||
"oraclelinux" => {box: "generic/oracle7", user: "vagrant"},
|
||||
@ -55,7 +55,7 @@ $network_plugin ||= "flannel"
|
||||
# Setting multi_networking to true will install Multus: https://github.com/intel/multus-cni
|
||||
$multi_networking ||= false
|
||||
$download_run_once ||= "True"
|
||||
$download_force_cache ||= "True"
|
||||
$download_force_cache ||= "False"
|
||||
# The first three nodes are etcd servers
|
||||
$etcd_instances ||= $num_instances
|
||||
# The first two nodes are kube masters
|
||||
|
@ -3,7 +3,6 @@ pipelining=True
|
||||
ssh_args = -o ControlMaster=auto -o ControlPersist=30m -o ConnectionAttempts=100 -o UserKnownHostsFile=/dev/null
|
||||
#control_path = ~/.ssh/ansible-%%r@%%h:%%p
|
||||
[defaults]
|
||||
strategy_plugins = plugins/mitogen/ansible_mitogen/plugins/strategy
|
||||
# https://github.com/ansible/ansible/issues/56930 (to ignore group names with - and .)
|
||||
force_valid_group_names = ignore
|
||||
|
||||
|
@ -5,7 +5,7 @@
|
||||
vars:
|
||||
minimal_ansible_version: 2.9.0
|
||||
minimal_ansible_version_2_10: 2.10.11
|
||||
maximal_ansible_version: 2.11.0
|
||||
maximal_ansible_version: 2.12.0
|
||||
ansible_connection: local
|
||||
tags: always
|
||||
tasks:
|
||||
|
@ -32,7 +32,7 @@
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes/preinstall, tags: preinstall }
|
||||
- { role: "container-engine", tags: "container-engine", when: deploy_container_engine|default(true) }
|
||||
- { role: "container-engine", tags: "container-engine", when: deploy_container_engine }
|
||||
- { role: download, tags: download, when: "not skip_downloads" }
|
||||
|
||||
- hosts: etcd
|
||||
|
@ -2,6 +2,13 @@ all:
|
||||
vars:
|
||||
heketi_admin_key: "11elfeinhundertundelf"
|
||||
heketi_user_key: "!!einseinseins"
|
||||
glusterfs_daemonset:
|
||||
readiness_probe:
|
||||
timeout_seconds: 3
|
||||
initial_delay_seconds: 3
|
||||
liveness_probe:
|
||||
timeout_seconds: 3
|
||||
initial_delay_seconds: 10
|
||||
children:
|
||||
k8s_cluster:
|
||||
vars:
|
||||
|
@ -5,7 +5,7 @@
|
||||
changed_when: false
|
||||
|
||||
- name: "Kubernetes Apps | Deploy cluster role binding."
|
||||
when: "clusterrolebinding_state.stdout | length > 0"
|
||||
when: "clusterrolebinding_state.stdout | length == 0"
|
||||
command: "{{ bin_dir }}/kubectl create clusterrolebinding heketi-gluster-admin --clusterrole=edit --serviceaccount=default:heketi-service-account"
|
||||
|
||||
- name: Get clusterrolebindings again
|
||||
@ -31,7 +31,7 @@
|
||||
mode: 0644
|
||||
|
||||
- name: "Deploy Heketi config secret"
|
||||
when: "secret_state.stdout | length > 0"
|
||||
when: "secret_state.stdout | length == 0"
|
||||
command: "{{ bin_dir }}/kubectl create secret generic heketi-config-secret --from-file={{ kube_config_dir }}/heketi.json"
|
||||
|
||||
- name: Get the heketi-config-secret secret again
|
||||
@ -41,5 +41,5 @@
|
||||
|
||||
- name: Make sure the heketi-config-secret secret exists now
|
||||
assert:
|
||||
that: "secret_state.stdout != \"\""
|
||||
that: "secret_state.stdout | length > 0"
|
||||
msg: "Heketi config secret is not present."
|
||||
|
@ -73,8 +73,8 @@
|
||||
"privileged": true
|
||||
},
|
||||
"readinessProbe": {
|
||||
"timeoutSeconds": 3,
|
||||
"initialDelaySeconds": 3,
|
||||
"timeoutSeconds": {{ glusterfs_daemonset.readiness_probe.timeout_seconds }},
|
||||
"initialDelaySeconds": {{ glusterfs_daemonset.readiness_probe.initial_delay_seconds }},
|
||||
"exec": {
|
||||
"command": [
|
||||
"/bin/bash",
|
||||
@ -84,8 +84,8 @@
|
||||
}
|
||||
},
|
||||
"livenessProbe": {
|
||||
"timeoutSeconds": 3,
|
||||
"initialDelaySeconds": 10,
|
||||
"timeoutSeconds": {{ glusterfs_daemonset.liveness_probe.timeout_seconds }},
|
||||
"initialDelaySeconds": {{ glusterfs_daemonset.liveness_probe.initial_delay_seconds }},
|
||||
"exec": {
|
||||
"command": [
|
||||
"/bin/bash",
|
||||
|
@ -20,4 +20,4 @@
|
||||
"'ufw.service' in services"
|
||||
|
||||
when:
|
||||
- disable_service_firewall
|
||||
- disable_service_firewall is defined and disable_service_firewall
|
||||
|
107
contrib/terraform/hetzner/README.md
Normal file
107
contrib/terraform/hetzner/README.md
Normal file
@ -0,0 +1,107 @@
|
||||
# Kubernetes on Hetzner with Terraform
|
||||
|
||||
Provision a Kubernetes cluster on [Hetzner](https://www.hetzner.com/cloud) using Terraform and Kubespray
|
||||
|
||||
## Overview
|
||||
|
||||
The setup looks like following
|
||||
|
||||
```text
|
||||
Kubernetes cluster
|
||||
+--------------------------+
|
||||
| +--------------+ |
|
||||
| | +--------------+ |
|
||||
| --> | | | |
|
||||
| | | Master/etcd | |
|
||||
| | | node(s) | |
|
||||
| +-+ | |
|
||||
| +--------------+ |
|
||||
| ^ |
|
||||
| | |
|
||||
| v |
|
||||
| +--------------+ |
|
||||
| | +--------------+ |
|
||||
| --> | | | |
|
||||
| | | Worker | |
|
||||
| | | node(s) | |
|
||||
| +-+ | |
|
||||
| +--------------+ |
|
||||
+--------------------------+
|
||||
```
|
||||
|
||||
The nodes uses a private network for node to node communication and a public interface for all external communication.
|
||||
|
||||
## Requirements
|
||||
|
||||
* Terraform 0.14.0 or newer
|
||||
|
||||
## Quickstart
|
||||
|
||||
NOTE: Assumes you are at the root of the kubespray repo.
|
||||
|
||||
For authentication in your cluster you can use the environment variables.
|
||||
|
||||
```bash
|
||||
export HCLOUD_TOKEN=api-token
|
||||
```
|
||||
|
||||
Copy the cluster configuration file.
|
||||
|
||||
```bash
|
||||
CLUSTER=my-hetzner-cluster
|
||||
cp -r inventory/sample inventory/$CLUSTER
|
||||
cp contrib/terraform/hetzner/default.tfvars inventory/$CLUSTER/
|
||||
cd inventory/$CLUSTER
|
||||
```
|
||||
|
||||
Edit `default.tfvars` to match your requirement.
|
||||
|
||||
Run Terraform to create the infrastructure.
|
||||
|
||||
```bash
|
||||
terraform init ../../contrib/terraform/hetzner
|
||||
terraform apply --var-file default.tfvars ../../contrib/terraform/hetzner/
|
||||
```
|
||||
|
||||
You should now have a inventory file named `inventory.ini` that you can use with kubespray.
|
||||
You can use the inventory file with kubespray to set up a cluster.
|
||||
|
||||
It is a good idea to check that you have basic SSH connectivity to the nodes. You can do that by:
|
||||
|
||||
```bash
|
||||
ansible -i inventory.ini -m ping all
|
||||
```
|
||||
|
||||
You can setup Kubernetes with kubespray using the generated inventory:
|
||||
|
||||
```bash
|
||||
ansible-playbook -i inventory.ini ../../cluster.yml -b -v
|
||||
```
|
||||
|
||||
## Cloud controller
|
||||
|
||||
For better support with the cloud you can install the [hcloud cloud controller](https://github.com/hetznercloud/hcloud-cloud-controller-manager) and [CSI driver](https://github.com/hetznercloud/csi-driver).
|
||||
|
||||
Please read the instructions in both repos on how to install it.
|
||||
|
||||
## Teardown
|
||||
|
||||
You can teardown your infrastructure using the following Terraform command:
|
||||
|
||||
```bash
|
||||
terraform destroy --var-file default.tfvars ../../contrib/terraform/hetzner
|
||||
```
|
||||
|
||||
## Variables
|
||||
|
||||
* `prefix`: Prefix to add to all resources, if set to "" don't set any prefix
|
||||
* `ssh_public_keys`: List of public SSH keys to install on all machines
|
||||
* `zone`: The zone where to run the cluster
|
||||
* `machines`: Machines to provision. Key of this object will be used as the name of the machine
|
||||
* `node_type`: The role of this node *(master|worker)*
|
||||
* `size`: Size of the VM
|
||||
* `image`: The image to use for the VM
|
||||
* `ssh_whitelist`: List of IP ranges (CIDR) that will be allowed to ssh to the nodes
|
||||
* `api_server_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the API server
|
||||
* `nodeport_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the kubernetes nodes on port 30000-32767 (kubernetes nodeports)
|
||||
* `ingress_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to kubernetes workers on port 80 and 443
|
44
contrib/terraform/hetzner/default.tfvars
Normal file
44
contrib/terraform/hetzner/default.tfvars
Normal file
@ -0,0 +1,44 @@
|
||||
prefix = "default"
|
||||
zone = "hel1"
|
||||
|
||||
inventory_file = "inventory.ini"
|
||||
|
||||
ssh_public_keys = [
|
||||
# Put your public SSH key here
|
||||
"ssh-rsa I-did-not-read-the-docs",
|
||||
"ssh-rsa I-did-not-read-the-docs 2",
|
||||
]
|
||||
|
||||
machines = {
|
||||
"master-0" : {
|
||||
"node_type" : "master",
|
||||
"size" : "cx21",
|
||||
"image" : "ubuntu-20.04",
|
||||
},
|
||||
"worker-0" : {
|
||||
"node_type" : "worker",
|
||||
"size" : "cx21",
|
||||
"image" : "ubuntu-20.04",
|
||||
},
|
||||
"worker-1" : {
|
||||
"node_type" : "worker",
|
||||
"size" : "cx21",
|
||||
"image" : "ubuntu-20.04",
|
||||
}
|
||||
}
|
||||
|
||||
nodeport_whitelist = [
|
||||
"0.0.0.0/0"
|
||||
]
|
||||
|
||||
ingress_whitelist = [
|
||||
"0.0.0.0/0"
|
||||
]
|
||||
|
||||
ssh_whitelist = [
|
||||
"0.0.0.0/0"
|
||||
]
|
||||
|
||||
api_server_whitelist = [
|
||||
"0.0.0.0/0"
|
||||
]
|
51
contrib/terraform/hetzner/main.tf
Normal file
51
contrib/terraform/hetzner/main.tf
Normal file
@ -0,0 +1,51 @@
|
||||
provider "hcloud" {}
|
||||
|
||||
module "kubernetes" {
|
||||
source = "./modules/kubernetes-cluster"
|
||||
|
||||
prefix = var.prefix
|
||||
|
||||
zone = var.zone
|
||||
|
||||
machines = var.machines
|
||||
|
||||
ssh_public_keys = var.ssh_public_keys
|
||||
|
||||
ssh_whitelist = var.ssh_whitelist
|
||||
api_server_whitelist = var.api_server_whitelist
|
||||
nodeport_whitelist = var.nodeport_whitelist
|
||||
ingress_whitelist = var.ingress_whitelist
|
||||
}
|
||||
|
||||
#
|
||||
# Generate ansible inventory
|
||||
#
|
||||
|
||||
data "template_file" "inventory" {
|
||||
template = file("${path.module}/templates/inventory.tpl")
|
||||
|
||||
vars = {
|
||||
connection_strings_master = join("\n", formatlist("%s ansible_user=ubuntu ansible_host=%s ip=%s etcd_member_name=etcd%d",
|
||||
keys(module.kubernetes.master_ip_addresses),
|
||||
values(module.kubernetes.master_ip_addresses).*.public_ip,
|
||||
values(module.kubernetes.master_ip_addresses).*.private_ip,
|
||||
range(1, length(module.kubernetes.master_ip_addresses) + 1)))
|
||||
connection_strings_worker = join("\n", formatlist("%s ansible_user=ubuntu ansible_host=%s ip=%s",
|
||||
keys(module.kubernetes.worker_ip_addresses),
|
||||
values(module.kubernetes.worker_ip_addresses).*.public_ip,
|
||||
values(module.kubernetes.worker_ip_addresses).*.private_ip))
|
||||
|
||||
list_master = join("\n", keys(module.kubernetes.master_ip_addresses))
|
||||
list_worker = join("\n", keys(module.kubernetes.worker_ip_addresses))
|
||||
}
|
||||
}
|
||||
|
||||
resource "null_resource" "inventories" {
|
||||
provisioner "local-exec" {
|
||||
command = "echo '${data.template_file.inventory.rendered}' > ${var.inventory_file}"
|
||||
}
|
||||
|
||||
triggers = {
|
||||
template = data.template_file.inventory.rendered
|
||||
}
|
||||
}
|
122
contrib/terraform/hetzner/modules/kubernetes-cluster/main.tf
Normal file
122
contrib/terraform/hetzner/modules/kubernetes-cluster/main.tf
Normal file
@ -0,0 +1,122 @@
|
||||
resource "hcloud_network" "kubernetes" {
|
||||
name = "${var.prefix}-network"
|
||||
ip_range = var.private_network_cidr
|
||||
}
|
||||
|
||||
resource "hcloud_network_subnet" "kubernetes" {
|
||||
type = "cloud"
|
||||
network_id = hcloud_network.kubernetes.id
|
||||
network_zone = "eu-central"
|
||||
ip_range = var.private_subnet_cidr
|
||||
}
|
||||
|
||||
resource "hcloud_server" "master" {
|
||||
for_each = {
|
||||
for name, machine in var.machines :
|
||||
name => machine
|
||||
if machine.node_type == "master"
|
||||
}
|
||||
|
||||
name = "${var.prefix}-${each.key}"
|
||||
image = each.value.image
|
||||
server_type = each.value.size
|
||||
location = var.zone
|
||||
|
||||
user_data = templatefile(
|
||||
"${path.module}/templates/cloud-init.tmpl",
|
||||
{
|
||||
ssh_public_keys = var.ssh_public_keys
|
||||
}
|
||||
)
|
||||
|
||||
firewall_ids = [hcloud_firewall.master.id]
|
||||
}
|
||||
|
||||
resource "hcloud_server_network" "master" {
|
||||
for_each = hcloud_server.master
|
||||
|
||||
server_id = each.value.id
|
||||
|
||||
subnet_id = hcloud_network_subnet.kubernetes.id
|
||||
}
|
||||
|
||||
resource "hcloud_server" "worker" {
|
||||
for_each = {
|
||||
for name, machine in var.machines :
|
||||
name => machine
|
||||
if machine.node_type == "worker"
|
||||
}
|
||||
|
||||
name = "${var.prefix}-${each.key}"
|
||||
image = each.value.image
|
||||
server_type = each.value.size
|
||||
location = var.zone
|
||||
|
||||
user_data = templatefile(
|
||||
"${path.module}/templates/cloud-init.tmpl",
|
||||
{
|
||||
ssh_public_keys = var.ssh_public_keys
|
||||
}
|
||||
)
|
||||
|
||||
firewall_ids = [hcloud_firewall.worker.id]
|
||||
|
||||
}
|
||||
|
||||
resource "hcloud_server_network" "worker" {
|
||||
for_each = hcloud_server.worker
|
||||
|
||||
server_id = each.value.id
|
||||
|
||||
subnet_id = hcloud_network_subnet.kubernetes.id
|
||||
}
|
||||
|
||||
resource "hcloud_firewall" "master" {
|
||||
name = "${var.prefix}-master-firewall"
|
||||
|
||||
rule {
|
||||
direction = "in"
|
||||
protocol = "tcp"
|
||||
port = "22"
|
||||
source_ips = var.ssh_whitelist
|
||||
}
|
||||
|
||||
rule {
|
||||
direction = "in"
|
||||
protocol = "tcp"
|
||||
port = "6443"
|
||||
source_ips = var.api_server_whitelist
|
||||
}
|
||||
}
|
||||
|
||||
resource "hcloud_firewall" "worker" {
|
||||
name = "${var.prefix}-worker-firewall"
|
||||
|
||||
rule {
|
||||
direction = "in"
|
||||
protocol = "tcp"
|
||||
port = "22"
|
||||
source_ips = var.ssh_whitelist
|
||||
}
|
||||
|
||||
rule {
|
||||
direction = "in"
|
||||
protocol = "tcp"
|
||||
port = "80"
|
||||
source_ips = var.ingress_whitelist
|
||||
}
|
||||
|
||||
rule {
|
||||
direction = "in"
|
||||
protocol = "tcp"
|
||||
port = "443"
|
||||
source_ips = var.ingress_whitelist
|
||||
}
|
||||
|
||||
rule {
|
||||
direction = "in"
|
||||
protocol = "tcp"
|
||||
port = "30000-32767"
|
||||
source_ips = var.nodeport_whitelist
|
||||
}
|
||||
}
|
@ -0,0 +1,23 @@
|
||||
output "master_ip_addresses" {
|
||||
value = {
|
||||
for key, instance in hcloud_server.master :
|
||||
instance.name => {
|
||||
"private_ip" = hcloud_server_network.master[key].ip
|
||||
"public_ip" = hcloud_server.master[key].ipv4_address
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
output "worker_ip_addresses" {
|
||||
value = {
|
||||
for key, instance in hcloud_server.worker :
|
||||
instance.name => {
|
||||
"private_ip" = hcloud_server_network.worker[key].ip
|
||||
"public_ip" = hcloud_server.worker[key].ipv4_address
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
output "cluster_private_network_cidr" {
|
||||
value = var.private_subnet_cidr
|
||||
}
|
@ -0,0 +1,17 @@
|
||||
#cloud-config
|
||||
|
||||
users:
|
||||
- default
|
||||
- name: ubuntu
|
||||
shell: /bin/bash
|
||||
sudo: "ALL=(ALL) NOPASSWD:ALL"
|
||||
ssh_authorized_keys:
|
||||
%{ for ssh_public_key in ssh_public_keys ~}
|
||||
- ${ssh_public_key}
|
||||
%{ endfor ~}
|
||||
|
||||
ssh_authorized_keys:
|
||||
%{ for ssh_public_key in ssh_public_keys ~}
|
||||
- ${ssh_public_key}
|
||||
%{ endfor ~}
|
||||
|
@ -0,0 +1,41 @@
|
||||
variable "zone" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "prefix" {}
|
||||
|
||||
variable "machines" {
|
||||
type = map(object({
|
||||
node_type = string
|
||||
size = string
|
||||
image = string
|
||||
}))
|
||||
}
|
||||
|
||||
variable "ssh_public_keys" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "ssh_whitelist" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "api_server_whitelist" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "nodeport_whitelist" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "ingress_whitelist" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "private_network_cidr" {
|
||||
default = "10.0.0.0/16"
|
||||
}
|
||||
|
||||
variable "private_subnet_cidr" {
|
||||
default = "10.0.10.0/24"
|
||||
}
|
@ -0,0 +1,9 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
hcloud = {
|
||||
source = "hetznercloud/hcloud"
|
||||
version = "1.31.1"
|
||||
}
|
||||
}
|
||||
required_version = ">= 0.14"
|
||||
}
|
7
contrib/terraform/hetzner/output.tf
Normal file
7
contrib/terraform/hetzner/output.tf
Normal file
@ -0,0 +1,7 @@
|
||||
output "master_ips" {
|
||||
value = module.kubernetes.master_ip_addresses
|
||||
}
|
||||
|
||||
output "worker_ips" {
|
||||
value = module.kubernetes.worker_ip_addresses
|
||||
}
|
16
contrib/terraform/hetzner/templates/inventory.tpl
Normal file
16
contrib/terraform/hetzner/templates/inventory.tpl
Normal file
@ -0,0 +1,16 @@
|
||||
[all]
|
||||
${connection_strings_master}
|
||||
${connection_strings_worker}
|
||||
|
||||
[kube-master]
|
||||
${list_master}
|
||||
|
||||
[etcd]
|
||||
${list_master}
|
||||
|
||||
[kube-node]
|
||||
${list_worker}
|
||||
|
||||
[k8s-cluster:children]
|
||||
kube-master
|
||||
kube-node
|
46
contrib/terraform/hetzner/variables.tf
Normal file
46
contrib/terraform/hetzner/variables.tf
Normal file
@ -0,0 +1,46 @@
|
||||
variable "zone" {
|
||||
description = "The zone where to run the cluster"
|
||||
}
|
||||
|
||||
variable "prefix" {
|
||||
description = "Prefix for resource names"
|
||||
default = "default"
|
||||
}
|
||||
|
||||
variable "machines" {
|
||||
description = "Cluster machines"
|
||||
type = map(object({
|
||||
node_type = string
|
||||
size = string
|
||||
image = string
|
||||
}))
|
||||
}
|
||||
|
||||
variable "ssh_public_keys" {
|
||||
description = "Public SSH key which are injected into the VMs."
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "ssh_whitelist" {
|
||||
description = "List of IP ranges (CIDR) to whitelist for ssh"
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "api_server_whitelist" {
|
||||
description = "List of IP ranges (CIDR) to whitelist for kubernetes api server"
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "nodeport_whitelist" {
|
||||
description = "List of IP ranges (CIDR) to whitelist for kubernetes nodeports"
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "ingress_whitelist" {
|
||||
description = "List of IP ranges (CIDR) to whitelist for HTTP"
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "inventory_file" {
|
||||
description = "Where to store the generated inventory file"
|
||||
}
|
15
contrib/terraform/hetzner/versions.tf
Normal file
15
contrib/terraform/hetzner/versions.tf
Normal file
@ -0,0 +1,15 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
hcloud = {
|
||||
source = "hetznercloud/hcloud"
|
||||
version = "1.31.1"
|
||||
}
|
||||
null = {
|
||||
source = "hashicorp/null"
|
||||
}
|
||||
template = {
|
||||
source = "hashicorp/template"
|
||||
}
|
||||
}
|
||||
required_version = ">= 0.14"
|
||||
}
|
@ -251,6 +251,7 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
|
||||
|`dns_nameservers`| An array of DNS name server names to be used by hosts in the internal subnet. |
|
||||
|`floatingip_pool` | Name of the pool from which floating IPs will be allocated |
|
||||
|`k8s_master_fips` | A list of floating IPs that you have already pre-allocated; they will be attached to master nodes instead of creating new random floating IPs. |
|
||||
|`bastion_fips` | A list of floating IPs that you have already pre-allocated; they will be attached to bastion node instead of creating new random floating IPs. |
|
||||
|`external_net` | UUID of the external network that will be routed to |
|
||||
|`flavor_k8s_master`,`flavor_k8s_node`,`flavor_etcd`, `flavor_bastion`,`flavor_gfs_node` | Flavor depends on your openstack installation, you can get available flavor IDs through `openstack flavor list` |
|
||||
|`image`,`image_gfs` | Name of the image to use in provisioning the compute resources. Should already be loaded into glance. |
|
||||
@ -274,10 +275,13 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
|
||||
|`node_root_volume_size_in_gb` | Size of the root volume for nodes, 0 to use ephemeral storage |
|
||||
|`master_root_volume_size_in_gb` | Size of the root volume for masters, 0 to use ephemeral storage |
|
||||
|`master_volume_type` | Volume type of the root volume for control_plane, 'Default' by default |
|
||||
|`node_volume_type` | Volume type of the root volume for nodes, 'Default' by default |
|
||||
|`gfs_root_volume_size_in_gb` | Size of the root volume for gluster, 0 to use ephemeral storage |
|
||||
|`etcd_root_volume_size_in_gb` | Size of the root volume for etcd nodes, 0 to use ephemeral storage |
|
||||
|`bastion_root_volume_size_in_gb` | Size of the root volume for bastions, 0 to use ephemeral storage |
|
||||
|`use_server_group` | Create and use openstack nova servergroups, default: false |
|
||||
|`master_server_group_policy` | Enable and use openstack nova servergroups for masters with set policy, default: "" (disabled) |
|
||||
|`node_server_group_policy` | Enable and use openstack nova servergroups for nodes with set policy, default: "" (disabled) |
|
||||
|`etcd_server_group_policy` | Enable and use openstack nova servergroups for etcd with set policy, default: "" (disabled) |
|
||||
|`use_access_ip` | If 1, nodes with floating IPs will transmit internal cluster traffic via floating IPs; if 0 private IPs will be used instead. Default value is 1. |
|
||||
|`k8s_nodes` | Map containing worker node definition, see explanation below |
|
||||
|
||||
|
@ -24,6 +24,7 @@ module "ips" {
|
||||
router_id = module.network.router_id
|
||||
k8s_nodes = var.k8s_nodes
|
||||
k8s_master_fips = var.k8s_master_fips
|
||||
bastion_fips = var.bastion_fips
|
||||
router_internal_port_id = module.network.router_internal_port_id
|
||||
}
|
||||
|
||||
@ -50,6 +51,7 @@ module "compute" {
|
||||
gfs_root_volume_size_in_gb = var.gfs_root_volume_size_in_gb
|
||||
gfs_volume_size_in_gb = var.gfs_volume_size_in_gb
|
||||
master_volume_type = var.master_volume_type
|
||||
node_volume_type = var.node_volume_type
|
||||
public_key_path = var.public_key_path
|
||||
image = var.image
|
||||
image_uuid = var.image_uuid
|
||||
@ -80,7 +82,9 @@ module "compute" {
|
||||
worker_allowed_ports = var.worker_allowed_ports
|
||||
wait_for_floatingip = var.wait_for_floatingip
|
||||
use_access_ip = var.use_access_ip
|
||||
use_server_groups = var.use_server_groups
|
||||
master_server_group_policy = var.master_server_group_policy
|
||||
node_server_group_policy = var.node_server_group_policy
|
||||
etcd_server_group_policy = var.etcd_server_group_policy
|
||||
extra_sec_groups = var.extra_sec_groups
|
||||
extra_sec_groups_name = var.extra_sec_groups_name
|
||||
group_vars_path = var.group_vars_path
|
||||
|
@ -130,21 +130,21 @@ resource "openstack_networking_secgroup_rule_v2" "worker" {
|
||||
}
|
||||
|
||||
resource "openstack_compute_servergroup_v2" "k8s_master" {
|
||||
count = "%{if var.use_server_groups}1%{else}0%{endif}"
|
||||
count = var.master_server_group_policy != "" ? 1 : 0
|
||||
name = "k8s-master-srvgrp"
|
||||
policies = ["anti-affinity"]
|
||||
policies = [var.master_server_group_policy]
|
||||
}
|
||||
|
||||
resource "openstack_compute_servergroup_v2" "k8s_node" {
|
||||
count = "%{if var.use_server_groups}1%{else}0%{endif}"
|
||||
count = var.node_server_group_policy != "" ? 1 : 0
|
||||
name = "k8s-node-srvgrp"
|
||||
policies = ["anti-affinity"]
|
||||
policies = [var.node_server_group_policy]
|
||||
}
|
||||
|
||||
resource "openstack_compute_servergroup_v2" "k8s_etcd" {
|
||||
count = "%{if var.use_server_groups}1%{else}0%{endif}"
|
||||
count = var.etcd_server_group_policy != "" ? 1 : 0
|
||||
name = "k8s-etcd-srvgrp"
|
||||
policies = ["anti-affinity"]
|
||||
policies = [var.etcd_server_group_policy]
|
||||
}
|
||||
|
||||
locals {
|
||||
@ -237,7 +237,7 @@ resource "openstack_compute_instance_v2" "k8s_master" {
|
||||
security_groups = local.master_sec_groups
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||
for_each = var.master_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||
content {
|
||||
group = openstack_compute_servergroup_v2.k8s_master[0].id
|
||||
}
|
||||
@ -284,7 +284,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
|
||||
security_groups = local.master_sec_groups
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||
for_each = var.master_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||
content {
|
||||
group = openstack_compute_servergroup_v2.k8s_master[0].id
|
||||
}
|
||||
@ -329,7 +329,7 @@ resource "openstack_compute_instance_v2" "etcd" {
|
||||
security_groups = [openstack_networking_secgroup_v2.k8s.name]
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_etcd[0]] : []
|
||||
for_each = var.etcd_server_group_policy ? [openstack_compute_servergroup_v2.k8s_etcd[0]] : []
|
||||
content {
|
||||
group = openstack_compute_servergroup_v2.k8s_etcd[0].id
|
||||
}
|
||||
@ -371,7 +371,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
|
||||
security_groups = local.master_sec_groups
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||
for_each = var.master_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||
content {
|
||||
group = openstack_compute_servergroup_v2.k8s_master[0].id
|
||||
}
|
||||
@ -413,7 +413,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
|
||||
security_groups = local.master_sec_groups
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||
for_each = var.master_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||
content {
|
||||
group = openstack_compute_servergroup_v2.k8s_master[0].id
|
||||
}
|
||||
@ -441,6 +441,7 @@ resource "openstack_compute_instance_v2" "k8s_node" {
|
||||
uuid = local.image_to_use_node
|
||||
source_type = "image"
|
||||
volume_size = var.node_root_volume_size_in_gb
|
||||
volume_type = var.node_volume_type
|
||||
boot_index = 0
|
||||
destination_type = "volume"
|
||||
delete_on_termination = true
|
||||
@ -454,7 +455,7 @@ resource "openstack_compute_instance_v2" "k8s_node" {
|
||||
security_groups = local.worker_sec_groups
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||
for_each = var.node_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||
content {
|
||||
group = openstack_compute_servergroup_v2.k8s_node[0].id
|
||||
}
|
||||
@ -486,6 +487,7 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
||||
uuid = local.image_to_use_node
|
||||
source_type = "image"
|
||||
volume_size = var.node_root_volume_size_in_gb
|
||||
volume_type = var.node_volume_type
|
||||
boot_index = 0
|
||||
destination_type = "volume"
|
||||
delete_on_termination = true
|
||||
@ -499,7 +501,7 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
||||
security_groups = local.worker_sec_groups
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||
for_each = var.node_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||
content {
|
||||
group = openstack_compute_servergroup_v2.k8s_node[0].id
|
||||
}
|
||||
@ -527,6 +529,7 @@ resource "openstack_compute_instance_v2" "k8s_nodes" {
|
||||
uuid = local.image_to_use_node
|
||||
source_type = "image"
|
||||
volume_size = var.node_root_volume_size_in_gb
|
||||
volume_type = var.node_volume_type
|
||||
boot_index = 0
|
||||
destination_type = "volume"
|
||||
delete_on_termination = true
|
||||
@ -540,7 +543,7 @@ resource "openstack_compute_instance_v2" "k8s_nodes" {
|
||||
security_groups = local.worker_sec_groups
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||
for_each = var.node_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||
content {
|
||||
group = openstack_compute_servergroup_v2.k8s_node[0].id
|
||||
}
|
||||
@ -585,7 +588,7 @@ resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
|
||||
security_groups = [openstack_networking_secgroup_v2.k8s.name]
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||
for_each = var.node_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||
content {
|
||||
group = openstack_compute_servergroup_v2.k8s_node[0].id
|
||||
}
|
||||
|
@ -40,6 +40,8 @@ variable "gfs_volume_size_in_gb" {}
|
||||
|
||||
variable "master_volume_type" {}
|
||||
|
||||
variable "node_volume_type" {}
|
||||
|
||||
variable "public_key_path" {}
|
||||
|
||||
variable "image" {}
|
||||
@ -124,8 +126,16 @@ variable "worker_allowed_ports" {
|
||||
|
||||
variable "use_access_ip" {}
|
||||
|
||||
variable "use_server_groups" {
|
||||
type = bool
|
||||
variable "master_server_group_policy" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "node_server_group_policy" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "etcd_server_group_policy" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "extra_sec_groups" {
|
||||
|
@ -28,7 +28,7 @@ resource "openstack_networking_floatingip_v2" "k8s_node" {
|
||||
}
|
||||
|
||||
resource "openstack_networking_floatingip_v2" "bastion" {
|
||||
count = var.number_of_bastions
|
||||
count = length(var.bastion_fips) > 0 ? 0 : var.number_of_bastions
|
||||
pool = var.floatingip_pool
|
||||
depends_on = [null_resource.dummy_dependency]
|
||||
}
|
||||
|
@ -17,5 +17,5 @@ output "k8s_nodes_fips" {
|
||||
}
|
||||
|
||||
output "bastion_fips" {
|
||||
value = openstack_networking_floatingip_v2.bastion[*].address
|
||||
value = length(var.bastion_fips) > 0 ? var.bastion_fips : openstack_networking_floatingip_v2.bastion[*].address
|
||||
}
|
||||
|
@ -20,4 +20,6 @@ variable "k8s_nodes" {}
|
||||
|
||||
variable "k8s_master_fips" {}
|
||||
|
||||
variable "bastion_fips" {}
|
||||
|
||||
variable "router_internal_port_id" {}
|
||||
|
@ -78,6 +78,10 @@ variable "master_volume_type" {
|
||||
default = "Default"
|
||||
}
|
||||
|
||||
variable "node_volume_type" {
|
||||
default = "Default"
|
||||
}
|
||||
|
||||
variable "public_key_path" {
|
||||
description = "The path of the ssh pub key"
|
||||
default = "~/.ssh/id_rsa.pub"
|
||||
@ -162,6 +166,12 @@ variable "k8s_master_fips" {
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "bastion_fips" {
|
||||
description = "specific pre-existing floating IPs to use for bastion node"
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "floatingip_pool" {
|
||||
description = "name of the floating ip pool to use"
|
||||
default = "external"
|
||||
@ -233,8 +243,19 @@ variable "use_access_ip" {
|
||||
default = 1
|
||||
}
|
||||
|
||||
variable "use_server_groups" {
|
||||
default = false
|
||||
variable "master_server_group_policy" {
|
||||
description = "desired server group policy, e.g. anti-affinity"
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "node_server_group_policy" {
|
||||
description = "desired server group policy, e.g. anti-affinity"
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "etcd_server_group_policy" {
|
||||
description = "desired server group policy, e.g. anti-affinity"
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "router_id" {
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Kubernetes on Exoscale with Terraform
|
||||
# Kubernetes on vSphere with Terraform
|
||||
|
||||
Provision a Kubernetes cluster on [vSphere](https://www.vmware.com/se/products/vsphere.html) using Terraform and Kubespray.
|
||||
Provision a Kubernetes cluster on [vSphere](https://www.vmware.com/products/vsphere.html) using Terraform and Kubespray.
|
||||
|
||||
## Overview
|
||||
|
||||
@ -98,20 +98,32 @@ ansible-playbook -i inventory.ini ../../cluster.yml -b -v
|
||||
|
||||
* `machines`: Machines to provision. Key of this object will be used as the name of the machine
|
||||
* `node_type`: The role of this node *(master|worker)*
|
||||
* `ip`: The IP address with the netmask (CIDR notation)
|
||||
* `ip`: The IP address of the machine
|
||||
* `netmask`: The netmask to use (to be used on the right hand side in CIDR notation, e.g., `24`)
|
||||
* `network`: The name of the network to attach the machines to
|
||||
* `gateway`: The IP address of the network gateway
|
||||
* `ssh_public_keys`: List of public SSH keys to install on all machines
|
||||
* `vsphere_datacenter`: The identifier of vSphere data center
|
||||
* `vsphere_compute_cluster`: The identifier of vSphere compute cluster
|
||||
* `vsphere_datastore`: The identifier of vSphere data store
|
||||
* `vsphere_server`: The address of vSphere server
|
||||
* `vsphere_hostname`: The IP address of vSphere hostname
|
||||
* `template_name`: The name of a base image (the image has to be uploaded to vSphere beforehand)
|
||||
* `ssh_public_keys`: List of public SSH keys to install on all machines
|
||||
* `template_name`: The name of a base image (the OVF template be defined in vSphere beforehand)
|
||||
|
||||
### Optional
|
||||
|
||||
* `prefix`: Prefix to use for all resources, required to be unique for all clusters in the same project *(Defaults to `default`)*
|
||||
* `dns_primary`: The IP address of primary DNS server *(Defaults to `8.8.4.4`)*
|
||||
* `dns_secondary`:The IP address of secondary DNS server *(Defaults to `8.8.8.8`)*
|
||||
* `folder`: Name of the folder to put all machines in (default: `""`)
|
||||
* `prefix`: Prefix to use for all resources, required to be unique for all clusters in the same project (default: `"k8s"`)
|
||||
* `inventory_file`: Name of the generated inventory file for Kubespray to use in the Ansible step (default: `inventory.ini`)
|
||||
* `dns_primary`: The IP address of primary DNS server (default: `8.8.4.4`)
|
||||
* `dns_secondary`: The IP address of secondary DNS server (default: `8.8.8.8`)
|
||||
* `firmware`: Firmware to use (default: `bios`)
|
||||
* `hardware_version`: The version of the hardware (default: `15`)
|
||||
* `master_cores`: The number of CPU cores for the master nodes (default: 4)
|
||||
* `master_memory`: The amount of RAM for the master nodes in MB (default: 4096)
|
||||
* `master_disk_size`: The amount of disk space for the master nodes in GB (default: 20)
|
||||
* `worker_cores`: The number of CPU cores for the worker nodes (default: 16)
|
||||
* `worker_memory`: The amount of RAM for the worker nodes in MB (default: 8192)
|
||||
* `worker_disk_size`: The amount of disk space for the worker nodes in GB (default: 100)
|
||||
|
||||
An example variables file can be found `default.tfvars`
|
||||
|
@ -1,23 +1,28 @@
|
||||
prefix = "default"
|
||||
prefix = "k8s"
|
||||
|
||||
inventory_file = "inventory.ini"
|
||||
|
||||
network = "VM Network"
|
||||
|
||||
machines = {
|
||||
"master-0" : {
|
||||
"node_type" : "master",
|
||||
"ip" : "i-did-not-read-the-docs" # e.g. 192.168.0.2/24
|
||||
"ip" : "i-did-not-read-the-docs", # e.g. 192.168.0.10
|
||||
"netmask" : "24"
|
||||
},
|
||||
"worker-0" : {
|
||||
"node_type" : "worker",
|
||||
"ip" : "i-did-not-read-the-docs" # e.g. 192.168.0.2/24
|
||||
"ip" : "i-did-not-read-the-docs", # e.g. 192.168.0.20
|
||||
"netmask" : "24"
|
||||
},
|
||||
"worker-1" : {
|
||||
"node_type" : "worker",
|
||||
"ip" : "i-did-not-read-the-docs" # e.g. 192.168.0.2/24
|
||||
"ip" : "i-did-not-read-the-docs", # e.g. 192.168.0.21
|
||||
"netmask" : "24"
|
||||
}
|
||||
}
|
||||
|
||||
gateway = "i-did-not-read-the-docs" # e.g. 192.168.0.2
|
||||
gateway = "i-did-not-read-the-docs" # e.g. 192.168.0.1
|
||||
|
||||
ssh_public_keys = [
|
||||
# Put your public SSH key here
|
||||
|
@ -19,7 +19,7 @@ data "vsphere_datastore" "datastore" {
|
||||
}
|
||||
|
||||
data "vsphere_network" "network" {
|
||||
name = "VM Network"
|
||||
name = var.network
|
||||
datacenter_id = data.vsphere_datacenter.dc.id
|
||||
}
|
||||
|
||||
@ -69,7 +69,7 @@ module "kubernetes" {
|
||||
pool_id = vsphere_resource_pool.pool.id
|
||||
datastore_id = data.vsphere_datastore.datastore.id
|
||||
|
||||
folder = ""
|
||||
folder = var.folder
|
||||
guest_id = data.vsphere_virtual_machine.template.guest_id
|
||||
scsi_type = data.vsphere_virtual_machine.template.scsi_type
|
||||
network_id = data.vsphere_network.network.id
|
||||
|
@ -5,7 +5,8 @@ resource "vsphere_virtual_machine" "worker" {
|
||||
if machine.node_type == "worker"
|
||||
}
|
||||
|
||||
name = each.key
|
||||
name = "${var.prefix}-${each.key}"
|
||||
|
||||
resource_pool_id = var.pool_id
|
||||
datastore_id = var.datastore_id
|
||||
|
||||
@ -13,13 +14,14 @@ resource "vsphere_virtual_machine" "worker" {
|
||||
memory = var.worker_memory
|
||||
memory_reservation = var.worker_memory
|
||||
guest_id = var.guest_id
|
||||
enable_disk_uuid = "true"
|
||||
enable_disk_uuid = "true" # needed for CSI provider
|
||||
scsi_type = var.scsi_type
|
||||
folder = var.folder
|
||||
firmware = var.firmware
|
||||
hardware_version = var.hardware_version
|
||||
|
||||
wait_for_guest_net_routable = false
|
||||
wait_for_guest_net_timeout = 0
|
||||
|
||||
network_interface {
|
||||
network_id = var.network_id
|
||||
@ -47,6 +49,7 @@ resource "vsphere_virtual_machine" "worker" {
|
||||
vapp {
|
||||
properties = {
|
||||
"user-data" = base64encode(templatefile("${path.module}/templates/cloud-init.tmpl", { ip = each.value.ip,
|
||||
netmask = each.value.netmask,
|
||||
gw = var.gateway,
|
||||
dns = var.dns_primary,
|
||||
ssh_public_keys = var.ssh_public_keys}))
|
||||
@ -61,7 +64,8 @@ resource "vsphere_virtual_machine" "master" {
|
||||
if machine.node_type == "master"
|
||||
}
|
||||
|
||||
name = each.key
|
||||
name = "${var.prefix}-${each.key}"
|
||||
|
||||
resource_pool_id = var.pool_id
|
||||
datastore_id = var.datastore_id
|
||||
|
||||
@ -69,12 +73,15 @@ resource "vsphere_virtual_machine" "master" {
|
||||
memory = var.master_memory
|
||||
memory_reservation = var.master_memory
|
||||
guest_id = var.guest_id
|
||||
enable_disk_uuid = "true"
|
||||
enable_disk_uuid = "true" # needed for CSI provider
|
||||
scsi_type = var.scsi_type
|
||||
folder = var.folder
|
||||
firmware = var.firmware
|
||||
hardware_version = var.hardware_version
|
||||
|
||||
wait_for_guest_net_routable = false
|
||||
wait_for_guest_net_timeout = 0
|
||||
|
||||
network_interface {
|
||||
network_id = var.network_id
|
||||
adapter_type = var.adapter_type
|
||||
@ -101,6 +108,7 @@ resource "vsphere_virtual_machine" "master" {
|
||||
vapp {
|
||||
properties = {
|
||||
"user-data" = base64encode(templatefile("${path.module}/templates/cloud-init.tmpl", { ip = each.value.ip,
|
||||
netmask = each.value.netmask,
|
||||
gw = var.gateway,
|
||||
dns = var.dns_primary,
|
||||
ssh_public_keys = var.ssh_public_keys}))
|
||||
|
@ -1,13 +1,16 @@
|
||||
output "master_ip" {
|
||||
value = {
|
||||
for instance in vsphere_virtual_machine.master :
|
||||
instance.name => instance.default_ip_address
|
||||
for name, machine in var.machines :
|
||||
name => machine.ip
|
||||
if machine.node_type == "master"
|
||||
}
|
||||
}
|
||||
|
||||
output "worker_ip" {
|
||||
value = {
|
||||
for instance in vsphere_virtual_machine.worker :
|
||||
instance.name => instance.default_ip_address
|
||||
for name, machine in var.machines :
|
||||
name => machine.ip
|
||||
if machine.node_type == "worker"
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -25,7 +25,7 @@ write_files:
|
||||
ens192:
|
||||
dhcp4: false #true to use dhcp
|
||||
addresses:
|
||||
- ${ip}
|
||||
- ${ip}/${netmask}
|
||||
gateway4: ${gw} # Set gw here
|
||||
nameservers:
|
||||
addresses:
|
||||
|
@ -5,7 +5,8 @@ variable "machines" {
|
||||
description = "Cluster machines"
|
||||
type = map(object({
|
||||
node_type = string
|
||||
ip = string
|
||||
ip = string
|
||||
netmask = string
|
||||
}))
|
||||
}
|
||||
|
||||
|
@ -23,7 +23,7 @@ output "vsphere_network" {
|
||||
}
|
||||
|
||||
output "vsphere_folder" {
|
||||
value = terraform.workspace
|
||||
value = var.folder
|
||||
}
|
||||
|
||||
output "vsphere_pool" {
|
||||
|
@ -1,35 +1,20 @@
|
||||
## Global ##
|
||||
|
||||
variable "prefix" {
|
||||
default = ""
|
||||
}
|
||||
# Required variables
|
||||
|
||||
variable "machines" {
|
||||
description = "Cluster machines"
|
||||
type = map(object({
|
||||
node_type = string
|
||||
ip = string
|
||||
netmask = string
|
||||
}))
|
||||
}
|
||||
|
||||
variable "inventory_file" {
|
||||
default = "inventory.ini"
|
||||
}
|
||||
|
||||
variable "network" {
|
||||
default = "VM Network"
|
||||
}
|
||||
variable "network" {}
|
||||
|
||||
variable "gateway" {}
|
||||
|
||||
variable "dns_primary" {
|
||||
default = "8.8.4.4"
|
||||
}
|
||||
|
||||
variable "dns_secondary" {
|
||||
default = "8.8.8.8"
|
||||
}
|
||||
|
||||
variable "vsphere_datacenter" {}
|
||||
|
||||
variable "vsphere_compute_cluster" {}
|
||||
@ -44,6 +29,35 @@ variable "vsphere_server" {}
|
||||
|
||||
variable "vsphere_hostname" {}
|
||||
|
||||
variable "ssh_public_keys" {
|
||||
description = "List of public SSH keys which are injected into the VMs."
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "template_name" {}
|
||||
|
||||
# Optional variables (ones where reasonable defaults exist)
|
||||
|
||||
variable "folder" {
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "prefix" {
|
||||
default = "k8s"
|
||||
}
|
||||
|
||||
variable "inventory_file" {
|
||||
default = "inventory.ini"
|
||||
}
|
||||
|
||||
variable "dns_primary" {
|
||||
default = "8.8.4.4"
|
||||
}
|
||||
|
||||
variable "dns_secondary" {
|
||||
default = "8.8.8.8"
|
||||
}
|
||||
|
||||
variable "firmware" {
|
||||
default = "bios"
|
||||
}
|
||||
@ -52,15 +66,6 @@ variable "hardware_version" {
|
||||
default = "15"
|
||||
}
|
||||
|
||||
variable "template_name" {
|
||||
default = "ubuntu-focal-20.04-cloudimg"
|
||||
}
|
||||
|
||||
variable "ssh_public_keys" {
|
||||
description = "List of public SSH keys which are injected into the VMs."
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
## Master ##
|
||||
|
||||
variable "master_cores" {
|
||||
|
@ -17,10 +17,8 @@
|
||||
* [Kube OVN](docs/kube-ovn.md)
|
||||
* [Weave](docs/weave.md)
|
||||
* [Multus](docs/multus.md)
|
||||
* [OVN4NFV](docs/ovn4nfv.md)
|
||||
* Ingress
|
||||
* [ALB Ingress](docs/ingress_controller/alb_ingress_controller.md)
|
||||
* [Ambassador](docs/ingress_controller/ambassador.md)
|
||||
* [MetalLB](docs/metallb.md)
|
||||
* [Nginx Ingress](docs/ingress_controller/ingress_nginx.md)
|
||||
* [Cloud providers](docs/cloud.md)
|
||||
@ -35,7 +33,7 @@
|
||||
* [Fedora CoreOS](docs/fcos.md)
|
||||
* [OpenSUSE](docs/opensuse.md)
|
||||
* [RedHat Enterprise Linux](docs/rhel.md)
|
||||
* [CentOS/OracleLinux/AlmaLinux](docs/centos8.md)
|
||||
* [CentOS/OracleLinux/AlmaLinux/Rocky Linux](docs/centos8.md)
|
||||
* [Amazon Linux 2](docs/amazonlinux.md)
|
||||
* CRI
|
||||
* [Containerd](docs/containerd.md)
|
||||
|
@ -25,7 +25,7 @@ not _kube_node_.
|
||||
|
||||
There are also two special groups:
|
||||
|
||||
* **calico_rr** : explained for [advanced Calico networking cases](calico.md)
|
||||
* **calico_rr** : explained for [advanced Calico networking cases](/docs/calico.md)
|
||||
* **bastion** : configure a bastion host if your nodes are not directly reachable
|
||||
|
||||
Below is a complete inventory example:
|
||||
@ -100,7 +100,6 @@ The following tags are defined in playbooks:
|
||||
|
||||
| Tag name | Used for
|
||||
|--------------------------------|---------
|
||||
| ambassador | Ambassador Ingress Controller
|
||||
| annotate | Create kube-router annotation
|
||||
| apps | K8s apps definitions
|
||||
| asserts | Check tasks for download role
|
||||
@ -180,7 +179,6 @@ The following tags are defined in playbooks:
|
||||
| node-webhook | Tasks linked to webhook (grating access to resources)
|
||||
| nvidia_gpu | Enable nvidia accelerator for runtimes
|
||||
| oci | Cloud provider: oci
|
||||
| ovn4nfv | Network plugin ovn4nfv
|
||||
| persistent_volumes | Configure csi volumes
|
||||
| persistent_volumes_aws_ebs_csi | Configuring csi driver: aws-ebs
|
||||
| persistent_volumes_cinder_csi | Configuring csi driver: cinder
|
||||
@ -252,7 +250,7 @@ For more information about Ansible and bastion hosts, read
|
||||
|
||||
## Mitogen
|
||||
|
||||
You can use [mitogen](mitogen.md) to speed up kubespray.
|
||||
Mitogen support is deprecated, please see [mitogen related docs](/docs/mitogen.md) for useage and reasons for deprecation.
|
||||
|
||||
## Beyond ansible 2.9
|
||||
|
||||
|
@ -35,7 +35,7 @@ Variables are listed with their default values, if applicable.
|
||||
* `coreos_locksmithd_disable: false`
|
||||
Whether `locksmithd` (responsible for rolling restarts) should be disabled or be left alone.
|
||||
|
||||
#### CentOS/RHEL/AlmaLinux
|
||||
#### CentOS/RHEL/AlmaLinux/Rocky Linux
|
||||
|
||||
* `centos_fastestmirror_enabled: false`
|
||||
Whether the [fastestmirror](https://wiki.centos.org/PackageManagement/Yum/FastestMirror) yum plugin should be enabled.
|
||||
|
@ -189,7 +189,7 @@ To re-define default action please set the following variable in your inventory:
|
||||
calico_endpoint_to_host_action: "ACCEPT"
|
||||
```
|
||||
|
||||
## Optional : Define address on which Felix will respond to health requests
|
||||
### Optional : Define address on which Felix will respond to health requests
|
||||
|
||||
Since Calico 3.2.0, HealthCheck default behavior changed from listening on all interfaces to just listening on localhost.
|
||||
|
||||
@ -199,6 +199,15 @@ To re-define health host please set the following variable in your inventory:
|
||||
calico_healthhost: "0.0.0.0"
|
||||
```
|
||||
|
||||
### Optional : Configure Calico Node probe timeouts
|
||||
|
||||
Under certain conditions a deployer may need to tune the Calico liveness and readiness probes timeout settings. These can be configured like this:
|
||||
|
||||
```yml
|
||||
calico_node_livenessprobe_timeout: 10
|
||||
calico_node_readinessprobe_timeout: 10
|
||||
```
|
||||
|
||||
## Config encapsulation for cross server traffic
|
||||
|
||||
Calico supports two types of encapsulation: [VXLAN and IP in IP](https://docs.projectcalico.org/v3.11/networking/vxlan-ipip). VXLAN is supported in some environments where IP in IP is not (for example, Azure).
|
||||
@ -286,9 +295,10 @@ To enable the eBPF dataplane support ensure you add the following to your invent
|
||||
|
||||
```yaml
|
||||
calico_bpf_enabled: true
|
||||
kube_proxy_remove: true
|
||||
```
|
||||
|
||||
**NOTE:** there is known incompatibility in using the `kernel-kvm` kernel package on Ubuntu OSes because it is missing support for `CONFIG_NET_SCHED` which is a requirement for Calico eBPF support. When using Calico eBPF with Ubuntu ensure you run the `-generic` kernel.
|
||||
|
||||
### Cleaning up after kube-proxy
|
||||
|
||||
Calico node cannot clean up after kube-proxy has run in ipvs mode. If you are converting an existing cluster to eBPF you will need to ensure the `kube-proxy` DaemonSet is deleted and that ipvs rules are cleaned.
|
||||
@ -309,7 +319,7 @@ To clean up any ipvs leftovers:
|
||||
|
||||
Calico node, typha and kube-controllers need to be able to talk to the kubernetes API. Please reference the [Enabling eBPF Calico Docs](https://docs.projectcalico.org/maintenance/ebpf/enabling-bpf) for guidelines on how to do this.
|
||||
|
||||
Kubespray sets up the `kubernetes-services-endpoint` configmap based on the contents of the `loadbalancer_apiserver` inventory variable documented in [HA Mode](./ha-mode.md).
|
||||
Kubespray sets up the `kubernetes-services-endpoint` configmap based on the contents of the `loadbalancer_apiserver` inventory variable documented in [HA Mode](/docs/ha-mode.md).
|
||||
|
||||
If no external loadbalancer is used, Calico eBPF can also use the localhost loadbalancer option. In this case Calico Automatic Host Endpoints need to be enabled to allow services like `coredns` and `metrics-server` to communicate with the kubernetes host endpoint. See [this blog post](https://www.projectcalico.org/securing-kubernetes-nodes-with-calico-automatic-host-endpoints/) on enabling automatic host endpoints.
|
||||
|
||||
@ -358,6 +368,7 @@ The following OSes will require enabling the EPEL repo in order to bring in wire
|
||||
|
||||
* CentOS 7 & 8
|
||||
* AlmaLinux 8
|
||||
* Rocky Linux 8
|
||||
* Amazon Linux 2
|
||||
|
||||
```yaml
|
||||
|
@ -2,8 +2,10 @@
|
||||
# peers:
|
||||
# - router_id: "10.99.0.34"
|
||||
# as: "65xxx"
|
||||
# sourceaddress: "None"
|
||||
# - router_id: "10.99.0.35"
|
||||
# as: "65xxx"
|
||||
# sourceaddress: "None"
|
||||
|
||||
# loadbalancer_apiserver:
|
||||
# address: "10.99.0.44"
|
||||
|
@ -2,8 +2,10 @@
|
||||
# peers:
|
||||
# - router_id: "10.99.0.2"
|
||||
# as: "65xxx"
|
||||
# sourceaddress: "None"
|
||||
# - router_id: "10.99.0.3"
|
||||
# as: "65xxx"
|
||||
# sourceaddress: "None"
|
||||
|
||||
# loadbalancer_apiserver:
|
||||
# address: "10.99.0.21"
|
||||
|
@ -1,6 +1,6 @@
|
||||
# CentOS 8 and derivatives
|
||||
|
||||
CentOS 8 / Oracle Linux 8 / AlmaLinux 8 ship only with iptables-nft (ie without iptables-legacy similar to RHEL8)
|
||||
CentOS 8 / Oracle Linux 8 / AlmaLinux 8 / Rocky Linux 8 ship only with iptables-nft (ie without iptables-legacy similar to RHEL8)
|
||||
The only tested configuration for now is using Calico CNI
|
||||
You need to add `calico_iptables_backend: "NFT"` or `calico_iptables_backend: "Auto"` to your configuration.
|
||||
|
||||
|
@ -11,29 +11,13 @@
|
||||
|
||||
Cert-Manager is a native Kubernetes certificate management controller. It can help with issuing certificates from a variety of sources, such as Let’s Encrypt, HashiCorp Vault, Venafi, a simple signing key pair, or self signed. It will ensure certificates are valid and up to date, and attempt to renew certificates at a configured time before expiry.
|
||||
|
||||
The Kubespray out-of-the-box cert-manager deployment uses a TLS Root CA certificate and key stored as the Kubernetes `ca-key-pair` secret consisting of `tls.crt` and `tls.key`, which are the base64 encode values of the TLS Root CA certificate and key respectively.
|
||||
|
||||
Integration with other PKI/Certificate management solutions, such as HashiCorp Vault will require some further development changes to the current cert-manager deployment and may be introduced in the future.
|
||||
|
||||
## Kubernetes TLS Root CA Certificate/Key Secret
|
||||
|
||||
If you're planning to secure your ingress resources using TLS client certificates, you'll need to create and deploy the Kubernetes `ca-key-pair` secret consisting of the Root CA certificate and key to your K8s cluster.
|
||||
|
||||
If these are already available, simply update `templates\secret-cert-manager.yml.j2` with the base64 encoded values of your TLS Root CA certificate and key prior to enabling and deploying cert-manager.
|
||||
|
||||
e.g.
|
||||
|
||||
```shell
|
||||
$ cat ca.pem | base64 -w 0
|
||||
LS0tLS1CRUdJTiBDRVJU...
|
||||
|
||||
$ cat ca-key.pem | base64 -w 0
|
||||
LS0tLS1CRUdJTiBSU0Eg...
|
||||
```
|
||||
|
||||
For further information, read the official [Cert-Manager CA Configuration](https://cert-manager.io/docs/configuration/ca/) doc.
|
||||
|
||||
Once the base64 encoded values have been added to `templates\secret-cert-manager.yml.j2`, cert-manager can now be enabled by editing your K8s cluster addons inventory e.g. `inventory\sample\group_vars\k8s_cluster\addons.yml` and setting `cert_manager_enabled` to true.
|
||||
`cert-manager` can now be enabled by editing your K8s cluster addons inventory e.g. `inventory\sample\group_vars\k8s_cluster\addons.yml` and setting `cert_manager_enabled` to true.
|
||||
|
||||
```ini
|
||||
# Cert manager deployment
|
||||
@ -86,7 +70,37 @@ spec:
|
||||
|
||||
Once deployed to your K8s cluster, every 3 months cert-manager will automatically rotate the Prometheus `prometheus.example.com` TLS client certificate and key, and store these as the Kubernetes `prometheus-dashboard-certs` secret.
|
||||
|
||||
For further information, read the official [Cert-Manager Ingress](https://cert-manager.io/docs/usage/ingress/) doc.
|
||||
Please consult the official upstream documentation:
|
||||
|
||||
- [cert-manager Ingress Usage](https://cert-manager.io/v1.5-docs/usage/ingress/)
|
||||
- [cert-manager Ingress Tutorial](https://cert-manager.io/v1.5-docs/tutorials/acme/ingress/#step-3-assign-a-dns-name)
|
||||
|
||||
### ACME
|
||||
|
||||
The ACME Issuer type represents a single account registered with the Automated Certificate Management Environment (ACME) Certificate Authority server. When you create a new ACME Issuer, cert-manager will generate a private key which is used to identify you with the ACME server.
|
||||
|
||||
Certificates issued by public ACME servers are typically trusted by client’s computers by default. This means that, for example, visiting a website that is backed by an ACME certificate issued for that URL, will be trusted by default by most client’s web browsers. ACME certificates are typically free.
|
||||
|
||||
- [ACME Configuration](https://cert-manager.io/v1.5-docs/configuration/acme/)
|
||||
- [ACME HTTP Validation](https://cert-manager.io/v1.5-docs/tutorials/acme/http-validation/)
|
||||
- [HTTP01 Challenges](https://cert-manager.io/v1.5-docs/configuration/acme/http01/)
|
||||
- [ACME DNS Validation](https://cert-manager.io/v1.5-docs/tutorials/acme/dns-validation/)
|
||||
- [DNS01 Challenges](https://cert-manager.io/v1.5-docs/configuration/acme/dns01/)
|
||||
- [ACME FAQ](https://cert-manager.io/v1.5-docs/faq/acme/)
|
||||
|
||||
#### ACME With An Internal Certificate Authority
|
||||
|
||||
The ACME Issuer with an internal certificate authority requires cert-manager to trust the certificate authority. This trust must be done at the cert-manager deployment level.
|
||||
To add a trusted certificate authority to cert-manager, add it's certificate to `group_vars/k8s-cluster/addons.yml`:
|
||||
|
||||
```yaml
|
||||
cert_manager_trusted_internal_ca: |
|
||||
-----BEGIN CERTIFICATE-----
|
||||
[REPLACE with your CA certificate]
|
||||
-----END CERTIFICATE-----
|
||||
```
|
||||
|
||||
Once the CA is trusted, you can define your issuer normally.
|
||||
|
||||
### Create New TLS Root CA Certificate and Key
|
||||
|
||||
|
@ -18,3 +18,10 @@ Kubespray has 3 types of GitLab runners:
|
||||
## Vagrant
|
||||
|
||||
Vagrant jobs are using the [quay.io/kubespray/vagrant](/test-infra/vagrant-docker/Dockerfile) docker image with `/var/run/libvirt/libvirt-sock` exposed from the host, allowing the container to boot VMs on the host.
|
||||
|
||||
## CI Variables
|
||||
|
||||
In CI we have a set of overrides we use to ensure greater success of our CI jobs and avoid throttling by various APIs we depend on. See:
|
||||
|
||||
- [Docker mirrors](/tests/common/_docker_hub_registry_mirror.yml)
|
||||
- [Test settings](/tests/common/_kubespray_test_settings.yml)
|
||||
|
94
docs/ci.md
94
docs/ci.md
@ -2,56 +2,56 @@
|
||||
|
||||
To generate this Matrix run `./tests/scripts/md-table/main.py`
|
||||
|
||||
## docker
|
||||
## containerd
|
||||
|
||||
| OS / CNI | calico | canal | cilium | flannel | kube-ovn | kube-router | macvlan | ovn4nfv | weave |
|
||||
|---| --- | --- | --- | --- | --- | --- | --- | --- | --- |
|
||||
amazon | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
centos7 | :white_check_mark: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :white_check_mark: |
|
||||
centos8 | :white_check_mark: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: |
|
||||
debian10 | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian11 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian9 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: |
|
||||
fedora33 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora34 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: |
|
||||
opensuse | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
oracle7 | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu16 | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: | :x: | :white_check_mark: |
|
||||
ubuntu18 | :white_check_mark: | :x: | :white_check_mark: | :white_check_mark: | :x: | :x: | :x: | :white_check_mark: | :white_check_mark: |
|
||||
ubuntu20 | :white_check_mark: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
|
||||
| OS / CNI | calico | canal | cilium | flannel | kube-ovn | kube-router | macvlan | weave |
|
||||
|---| --- | --- | --- | --- | --- | --- | --- | --- |
|
||||
amazon | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
centos7 | :white_check_mark: | :x: | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: | :x: |
|
||||
centos8 | :white_check_mark: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: |
|
||||
debian10 | :white_check_mark: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian11 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian9 | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: |
|
||||
fedora34 | :white_check_mark: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: |
|
||||
fedora35 | :white_check_mark: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: |
|
||||
opensuse | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
oracle7 | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu16 | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: | :x: |
|
||||
ubuntu18 | :white_check_mark: | :x: | :white_check_mark: | :white_check_mark: | :x: | :x: | :x: | :white_check_mark: |
|
||||
ubuntu20 | :white_check_mark: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: |
|
||||
|
||||
## crio
|
||||
|
||||
| OS / CNI | calico | canal | cilium | flannel | kube-ovn | kube-router | macvlan | ovn4nfv | weave |
|
||||
|---| --- | --- | --- | --- | --- | --- | --- | --- | --- |
|
||||
amazon | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
centos7 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
centos8 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian10 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian11 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora33 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora34 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
opensuse | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
oracle7 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu16 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu18 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu20 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
| OS / CNI | calico | canal | cilium | flannel | kube-ovn | kube-router | macvlan | weave |
|
||||
|---| --- | --- | --- | --- | --- | --- | --- | --- |
|
||||
amazon | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
centos7 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
centos8 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian10 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian11 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora34 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora35 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
opensuse | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
oracle7 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu16 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu18 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu20 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
|
||||
## containerd
|
||||
## docker
|
||||
|
||||
| OS / CNI | calico | canal | cilium | flannel | kube-ovn | kube-router | macvlan | ovn4nfv | weave |
|
||||
|---| --- | --- | --- | --- | --- | --- | --- | --- | --- |
|
||||
amazon | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
centos7 | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
|
||||
centos8 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian10 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian11 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora33 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora34 | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: |
|
||||
opensuse | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
oracle7 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu16 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu18 | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu20 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
| OS / CNI | calico | canal | cilium | flannel | kube-ovn | kube-router | macvlan | weave |
|
||||
|---| --- | --- | --- | --- | --- | --- | --- | --- |
|
||||
amazon | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
centos7 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: |
|
||||
centos8 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian10 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian11 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora34 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: |
|
||||
fedora35 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
opensuse | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
|
||||
oracle7 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu16 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: |
|
||||
ubuntu18 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu20 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
|
@ -60,3 +60,24 @@ crio_pids_limit: 4096
|
||||
|
||||
[CRI-O]: https://cri-o.io/
|
||||
[cri-o#1921]: https://github.com/cri-o/cri-o/issues/1921
|
||||
|
||||
## Note about user namespaces
|
||||
|
||||
CRI-O has support for user namespaces. This feature is optional and can be enabled by setting the following two variables.
|
||||
|
||||
```yaml
|
||||
crio_runtimes:
|
||||
- name: runc
|
||||
path: /usr/bin/runc
|
||||
type: oci
|
||||
root: /run/runc
|
||||
allowed_annotations:
|
||||
- "io.kubernetes.cri-o.userns-mode"
|
||||
|
||||
crio_remap_enable: true
|
||||
```
|
||||
|
||||
The `allowed_annotations` configures `crio.conf` accordingly.
|
||||
|
||||
The `crio_remap_enable` configures the `/etc/subuid` and `/etc/subgid` files to add an entry for the **containers** user.
|
||||
By default, 16M uids and gids are reserved for user namespaces (256 pods * 65536 uids/gids) at the end of the uid/gid space.
|
||||
|
@ -212,6 +212,22 @@ nodelocaldns_external_zones:
|
||||
|
||||
See [dns_etchosts](#dns_etchosts-coredns) above.
|
||||
|
||||
### Nodelocal DNS HA
|
||||
|
||||
Under some circumstances the single POD nodelocaldns implementation may not be able to be replaced soon enough and a cluster upgrade or a nodelocaldns upgrade can cause DNS requests to time out for short intervals. If for any reason your applications cannot tollerate this behavior you can enable a redundant nodelocal DNS pod on each node:
|
||||
|
||||
```yaml
|
||||
enable_nodelocaldns_secondary: true
|
||||
```
|
||||
|
||||
**Note:** when the nodelocaldns secondary is enabled, the primary is instructed to no longer tear down the iptables rules it sets up to direct traffic to itself. In case both daemonsets have failing pods on the same node, this can cause a DNS blackout with traffic no longer being forwarded to the coredns central service as a fallback. Please ensure you account for this also if you decide to disable the nodelocaldns cache.
|
||||
|
||||
There is a time delta (in seconds) allowed for the secondary nodelocaldns to survive in case both primary and secondary daemonsets are updated at the same time. It is advised to tune this variable after you have performed some tests in your own environment.
|
||||
|
||||
```yaml
|
||||
nodelocaldns_secondary_skew_seconds: 5
|
||||
```
|
||||
|
||||
## Limitations
|
||||
|
||||
* Kubespray has yet ways to configure Kubedns addon to forward requests SkyDns can
|
||||
|
30
docs/etcd.md
Normal file
30
docs/etcd.md
Normal file
@ -0,0 +1,30 @@
|
||||
# etcd
|
||||
|
||||
## Metrics
|
||||
|
||||
To expose metrics on a separate HTTP port, define it in the inventory with:
|
||||
|
||||
```yaml
|
||||
etcd_metrics_port: 2381
|
||||
```
|
||||
|
||||
To create a service `etcd-metrics` and associated endpoints in the `kube-system` namespace,
|
||||
define it's labels in the inventory with:
|
||||
|
||||
```yaml
|
||||
etcd_metrics_service_labels:
|
||||
k8s-app: etcd
|
||||
app.kubernetes.io/managed-by: Kubespray
|
||||
app: kube-prometheus-stack-kube-etcd
|
||||
release: prometheus-stack
|
||||
```
|
||||
|
||||
The last two labels in the above example allows to scrape the metrics from the
|
||||
[kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack)
|
||||
chart with the following Helm `values.yaml` :
|
||||
|
||||
```yaml
|
||||
kubeEtcd:
|
||||
service:
|
||||
enabled: false
|
||||
```
|
16
docs/gcp-lb.md
Normal file
16
docs/gcp-lb.md
Normal file
@ -0,0 +1,16 @@
|
||||
# GCP Load Balancers for type=LoadBalacer of Kubernetes Services
|
||||
|
||||
Google Cloud Platform can be used for creation of Kubernetes Service Load Balancer.
|
||||
|
||||
This feature is able to deliver by adding parameters to kube-controller-manager and kubelet. You need specify:
|
||||
|
||||
--cloud-provider=gce
|
||||
--cloud-config=/etc/kubernetes/cloud-config
|
||||
|
||||
To get working it in kubespray, you need to add tag to GCE instances and specify it in kubespray group vars and also set cloud_provider to gce. So for example, in file group_vars/all/gcp.yml:
|
||||
|
||||
cloud_provider: gce
|
||||
gce_node_tags: k8s-lb
|
||||
|
||||
When you will setup it and create SVC in Kubernetes with type=LoadBalancer, cloud provider will create public IP and will set firewall.
|
||||
Note: Cloud provider run under VM service account, so this account needs to have correct permissions to be able to create all GCP resources.
|
@ -86,7 +86,7 @@ More details on this process are in the [HA guide](/docs/ha-mode.md).
|
||||
Kubespray permits connecting to the cluster remotely on any IP of any
|
||||
kube_control_plane host on port 6443 by default. However, this requires
|
||||
authentication. One can get a kubeconfig from kube_control_plane hosts
|
||||
(see [below](#accessing-kubernetes-api)) or connect with a [username and password](/docs/vars.md#user-accounts).
|
||||
(see [below](#accessing-kubernetes-api)).
|
||||
|
||||
For more information on kubeconfig and accessing a Kubernetes cluster, refer to
|
||||
the Kubernetes [documentation](https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/).
|
||||
|
@ -102,12 +102,13 @@ exclusive to `loadbalancer_apiserver_localhost`.
|
||||
|
||||
Access API endpoints are evaluated automatically, as the following:
|
||||
|
||||
| Endpoint type | kube_control_plane | non-master | external |
|
||||
|------------------------------|--------------------|-------------------------|-----------------------|
|
||||
| Local LB (default) | `https://bip:sp` | `https://lc:nsp` | `https://m[0].aip:sp` |
|
||||
| Local LB + Unmanaged here LB | `https://bip:sp` | `https://lc:nsp` | `https://ext` |
|
||||
| External LB, no internal | `https://bip:sp` | `<https://lb:lp>` | `https://lb:lp` |
|
||||
| No ext/int LB | `https://bip:sp` | `<https://m[0].aip:sp>` | `https://m[0].aip:sp` |
|
||||
| Endpoint type | kube_control_plane | non-master | external |
|
||||
|------------------------------|------------------------------------------|-------------------------|-----------------------|
|
||||
| Local LB (default) | `https://dbip:sp` | `https://lc:nsp` | `https://m[0].aip:sp` |
|
||||
| Local LB (default) + cbip | `https://cbip:sp` and `https://lc:nsp` | `https://lc:nsp` | `https://m[0].aip:sp` |
|
||||
| Local LB + Unmanaged here LB | `https://dbip:sp` | `https://lc:nsp` | `https://ext` |
|
||||
| External LB, no internal | `https://dbip:sp` | `<https://lb:lp>` | `https://lb:lp` |
|
||||
| No ext/int LB | `https://dbip:sp` | `<https://m[0].aip:sp>` | `https://m[0].aip:sp` |
|
||||
|
||||
Where:
|
||||
|
||||
@ -115,7 +116,8 @@ Where:
|
||||
* `lb` - LB FQDN, `apiserver_loadbalancer_domain_name`;
|
||||
* `ext` - Externally load balanced VIP:port and FQDN, not managed by Kubespray;
|
||||
* `lc` - localhost;
|
||||
* `bip` - a custom bind IP or localhost for the default bind IP '0.0.0.0';
|
||||
* `cbip` - a custom bind IP, `kube_apiserver_bind_address`;
|
||||
* `dbip` - localhost for the default bind IP '0.0.0.0';
|
||||
* `nsp` - nginx secure port, `loadbalancer_apiserver_port`, defers to `sp`;
|
||||
* `sp` - secure port, `kube_apiserver_port`;
|
||||
* `lp` - LB port, `loadbalancer_apiserver.port`, defers to the secure port;
|
||||
|
@ -1,97 +0,0 @@
|
||||
|
||||
# Ambassador
|
||||
|
||||
The [Ambassador API Gateway](https://github.com/datawire/ambassador) provides all the functionality of a traditional ingress controller
|
||||
(e.g., path-based routing) while exposing many additional capabilities such as authentication,
|
||||
URL rewriting, CORS, rate limiting, and automatic metrics collection.
|
||||
|
||||
## Installation
|
||||
|
||||
### Configuration
|
||||
|
||||
* `ingress_ambassador_namespace` (default `ambassador`): namespace for installing Ambassador.
|
||||
* `ingress_ambassador_update_window` (default `0 0 * * SUN`): _crontab_-like expression
|
||||
for specifying when the Operator should try to update the Ambassador API Gateway.
|
||||
* `ingress_ambassador_version` (default: `*`): SemVer rule for versions allowed for
|
||||
installation/updates.
|
||||
* `ingress_ambassador_secure_port` (default: 443): HTTPS port to listen at.
|
||||
* `ingress_ambassador_insecure_port` (default: 80): HTTP port to listen at.
|
||||
* `ingress_ambassador_multi_namespaces` (default `false`): By default, Ambassador will only
|
||||
watch the `ingress_ambassador_namespace` namespace for `AmbassadorInstallation` CRD resources.
|
||||
When set to `true`, this value will tell the Ambassador Operator to watch **all** namespaces
|
||||
for CRDs. If you want to run multiple Ambassador ingress instances, set this to `true`.
|
||||
|
||||
### Ingress annotations
|
||||
|
||||
The Ambassador API Gateway will automatically load balance `Ingress` resources
|
||||
that include the annotation `kubernetes.io/ingress.class=ambassador`. All the other
|
||||
resources will be just ignored.
|
||||
|
||||
### Ambassador Operator
|
||||
|
||||
This Ambassador addon deploys the Ambassador Operator, which in turn will install
|
||||
the [Ambassador API Gateway](https://github.com/datawire/ambassador) in
|
||||
a Kubernetes cluster.
|
||||
|
||||
The Ambassador Operator is a Kubernetes Operator that controls Ambassador's complete lifecycle
|
||||
in your cluster, automating many of the repeatable tasks you would otherwise have to perform
|
||||
yourself. Once installed, the Operator will complete installations and seamlessly upgrade to new
|
||||
versions of Ambassador as they become available.
|
||||
|
||||
## Usage
|
||||
|
||||
The following example creates simple http-echo services and an `Ingress` object
|
||||
to route to these services.
|
||||
|
||||
Note well that the [Ambassador API Gateway](https://github.com/datawire/ambassador) will automatically load balance `Ingress` resources
|
||||
that include the annotation `kubernetes.io/ingress.class=ambassador`. All the other
|
||||
resources will be just ignored.
|
||||
|
||||
```yaml
|
||||
kind: Pod
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: foo-app
|
||||
labels:
|
||||
app: foo
|
||||
spec:
|
||||
containers:
|
||||
- name: foo-app
|
||||
image: hashicorp/http-echo
|
||||
args:
|
||||
- "-text=foo"
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: foo-service
|
||||
spec:
|
||||
selector:
|
||||
app: foo
|
||||
ports:
|
||||
# Default port used by the image
|
||||
- port: 5678
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: example-ingress
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: ambassador
|
||||
spec:
|
||||
rules:
|
||||
- http:
|
||||
paths:
|
||||
- path: /foo
|
||||
backend:
|
||||
serviceName: foo-service
|
||||
servicePort: 5678
|
||||
```
|
||||
|
||||
Now you can test that the ingress is working with curl:
|
||||
|
||||
```console
|
||||
$ export AMB_IP=$(kubectl get service ambassador -n ambassador -o 'go-template={{range .status.loadBalancer.ingress}}{{print .ip "\n"}}{{end}}')
|
||||
$ curl $AMB_IP/foo
|
||||
foo
|
||||
```
|
@ -5,7 +5,7 @@
|
||||
* All forked public repos at github will be also public, so **never commit sensitive data to your public forks**.
|
||||
* List of all forked repos could be retrieved from github page of original project.
|
||||
|
||||
2. Add **forked repo** as submodule to desired folder in your existent ansible repo(for example 3d/kubespray):
|
||||
2. Add **forked repo** as submodule to desired folder in your existent ansible repo (for example 3d/kubespray):
|
||||
```git submodule add https://github.com/YOUR_GITHUB/kubespray.git kubespray```
|
||||
Git will create `.gitmodules` file in your existent ansible repo:
|
||||
|
||||
@ -19,7 +19,7 @@
|
||||
```git config --global status.submoduleSummary true```
|
||||
|
||||
4. Add *original* kubespray repo as upstream:
|
||||
```git remote add upstream https://github.com/kubernetes-sigs/kubespray.git```
|
||||
```cd kubespray && git remote add upstream https://github.com/kubernetes-sigs/kubespray.git```
|
||||
|
||||
5. Sync your master branch with upstream:
|
||||
|
||||
@ -34,45 +34,46 @@
|
||||
```git checkout -b work```
|
||||
***Never*** use master branch of your repository for your commits.
|
||||
|
||||
7. Modify path to library and roles in your ansible.cfg file (role naming should be uniq, you may have to rename your existent roles if they have same names as kubespray project):
|
||||
7. Modify path to library and roles in your ansible.cfg file (role naming should be unique, you may have to rename your existent roles if they have same names as kubespray project),
|
||||
if you had roles in your existing ansible project before, you can add the path to those separated with `:`:
|
||||
|
||||
```ini
|
||||
8. ```ini
|
||||
...
|
||||
library = 3d/kubespray/library/
|
||||
roles_path = 3d/kubespray/roles/
|
||||
library = ./library/:3d/kubespray/library/
|
||||
roles_path = ./roles/:3d/kubespray/roles/
|
||||
...
|
||||
```
|
||||
|
||||
8. Copy and modify configs from kubespray `group_vars` folder to corresponding `group_vars` folder in your existent project.
|
||||
9. Copy and modify configs from kubespray `group_vars` folder to corresponding `group_vars` folder in your existent project.
|
||||
You could rename *all.yml* config to something else, i.e. *kubespray.yml* and create corresponding group in your inventory file, which will include all hosts groups related to kubernetes setup.
|
||||
|
||||
9. Modify your ansible inventory file by adding mapping of your existent groups (if any) to kubespray naming.
|
||||
For example:
|
||||
10. Modify your ansible inventory file by adding mapping of your existent groups (if any) to kubespray naming.
|
||||
For example:
|
||||
|
||||
```ini
|
||||
...
|
||||
#Kargo groups:
|
||||
[kube_node:children]
|
||||
kubenode
|
||||
```ini
|
||||
...
|
||||
#Kargo groups:
|
||||
[kube_node:children]
|
||||
kubenode
|
||||
|
||||
[k8s_cluster:children]
|
||||
kubernetes
|
||||
[k8s_cluster:children]
|
||||
kubernetes
|
||||
|
||||
[etcd:children]
|
||||
kubemaster
|
||||
kubemaster-ha
|
||||
[etcd:children]
|
||||
kubemaster
|
||||
kubemaster-ha
|
||||
|
||||
[kube_control_plane:children]
|
||||
kubemaster
|
||||
kubemaster-ha
|
||||
[kube_control_plane:children]
|
||||
kubemaster
|
||||
kubemaster-ha
|
||||
|
||||
[kubespray:children]
|
||||
kubernetes
|
||||
```
|
||||
[kubespray:children]
|
||||
kubernetes
|
||||
```
|
||||
|
||||
* Last entry here needed to apply kubespray.yml config file, renamed from all.yml of kubespray project.
|
||||
* Last entry here needed to apply kubespray.yml config file, renamed from all.yml of kubespray project.
|
||||
|
||||
10. Now you can include kubespray tasks in you existent playbooks by including cluster.yml file:
|
||||
11. Now you can include kubespray tasks in you existent playbooks by including cluster.yml file:
|
||||
|
||||
```yml
|
||||
- name: Include kubespray tasks
|
||||
@ -81,7 +82,7 @@ You could rename *all.yml* config to something else, i.e. *kubespray.yml* and cr
|
||||
|
||||
Or your could copy separate tasks from cluster.yml into your ansible repository.
|
||||
|
||||
11. Commit changes to your ansible repo. Keep in mind, that submodule folder is just a link to the git commit hash of your forked repo.
|
||||
12. Commit changes to your ansible repo. Keep in mind, that submodule folder is just a link to the git commit hash of your forked repo.
|
||||
When you update your "work" branch you need to commit changes to ansible repo as well.
|
||||
Other members of your team should use ```git submodule sync```, ```git submodule update --init``` to get actual code from submodule.
|
||||
|
||||
|
@ -37,10 +37,18 @@ Pod Overhead is mandatory if you run Pods with Kata Containers that use [resourc
|
||||
|
||||
To enable Pod Overhead feature you have to configure Kubelet with the appropriate cgroup driver, using the following configuration:
|
||||
|
||||
`cgroupfs` works best:
|
||||
|
||||
```yaml
|
||||
kubelet_cgroup_driver: cgroupfs
|
||||
```
|
||||
|
||||
... but when using `cgroups v2` (see <https://www.redhat.com/en/blog/world-domination-cgroups-rhel-8-welcome-cgroups-v2>) you can use systemd as well:
|
||||
|
||||
```yaml
|
||||
kubelet_cgroup_driver: systemd
|
||||
```
|
||||
|
||||
**Qemu hypervisor configuration**:
|
||||
|
||||
The configuration for the Qemu hypervisor uses the following values:
|
||||
@ -56,7 +64,7 @@ kata_containers_qemu_overhead_fixed_memory: 290Mi
|
||||
Optionally you can select the Kata Containers release version to be installed. The available releases are published in [GitHub](https://github.com/kata-containers/runtime/releases).
|
||||
|
||||
```yaml
|
||||
kata_containers_version: 1.11.1
|
||||
kata_containers_version: 2.2.2
|
||||
```
|
||||
|
||||
### Optional : Debug
|
||||
|
@ -140,6 +140,8 @@ spec:
|
||||
```
|
||||
<!-- END MUNGE: EXAMPLE registry-rc.yaml -->
|
||||
|
||||
*Note:* that if you have set multiple replicas, make sure your CSI driver has support for the `ReadWriteMany` accessMode.
|
||||
|
||||
## Expose the registry in the cluster
|
||||
|
||||
Now that we have a registry `Pod` running, we can expose it as a Service:
|
||||
|
@ -32,7 +32,7 @@ For a large scaled deployments, consider the following configuration changes:
|
||||
``kube_controller_node_monitor_period``,
|
||||
``kube_apiserver_pod_eviction_not_ready_timeout_seconds`` &
|
||||
``kube_apiserver_pod_eviction_unreachable_timeout_seconds`` for better Kubernetes reliability.
|
||||
Check out [Kubernetes Reliability](kubernetes-reliability.md)
|
||||
Check out [Kubernetes Reliability](/docs/kubernetes-reliability.md)
|
||||
|
||||
* Tune network prefix sizes. Those are ``kube_network_node_prefix``,
|
||||
``kube_service_addresses`` and ``kube_pods_subnet``.
|
||||
@ -43,7 +43,7 @@ For a large scaled deployments, consider the following configuration changes:
|
||||
etcd role is okay).
|
||||
|
||||
* Check out the
|
||||
[Inventory](getting-started.md#building-your-own-inventory)
|
||||
[Inventory](/docs/getting-started.md#building-your-own-inventory)
|
||||
section of the Getting started guide for tips on creating a large scale
|
||||
Ansible inventory.
|
||||
|
||||
|
@ -1,5 +1,7 @@
|
||||
# Mitogen
|
||||
|
||||
*Warning:* Mitogen support is now deprecated in kubespray due to upstream not releasing an updated version to support ansible 4.x (ansible-base 2.11.x) and above. The CI support has been stripped for mitogen and we are no longer validating any support or regressions for it. The supporting mitogen install playbook and integration documentation will be removed in a later version.
|
||||
|
||||
[Mitogen for Ansible](https://mitogen.networkgenomics.com/ansible_detailed.html) allow a 1.25x - 7x speedup and a CPU usage reduction of at least 2x, depending on network conditions, modules executed, and time already spent by targets on useful work. Mitogen cannot improve a module once it is executing, it can only ensure the module executes as quickly as possible.
|
||||
|
||||
## Install
|
||||
@ -8,6 +10,21 @@
|
||||
ansible-playbook mitogen.yml
|
||||
```
|
||||
|
||||
Ensure to enable mitogen use by environment varialbles:
|
||||
|
||||
```ShellSession
|
||||
export ANSIBLE_STRATEGY=mitogen_linear
|
||||
export ANSIBLE_STRATEGY_PLUGINS=plugins/mitogen/ansible_mitogen/plugins/strategy
|
||||
```
|
||||
|
||||
... or `ansible.cfg` setup:
|
||||
|
||||
```ini
|
||||
[defaults]
|
||||
strategy_plugins = plugins/mitogen/ansible_mitogen/plugins/strategy
|
||||
strategy=mitogen_linear
|
||||
```
|
||||
|
||||
## Limitation
|
||||
|
||||
If you are experiencing problems, please see the [documentation](https://mitogen.networkgenomics.com/ansible_detailed.html#noteworthy-differences).
|
||||
|
@ -1,7 +1,7 @@
|
||||
# Network Checker Application
|
||||
|
||||
With the ``deploy_netchecker`` var enabled (defaults to false), Kubespray deploys a
|
||||
Network Checker Application from the 3rd side `l23network/k8s-netchecker` docker
|
||||
Network Checker Application from the 3rd side `mirantis/k8s-netchecker` docker
|
||||
images. It consists of the server and agents trying to reach the server by usual
|
||||
for Kubernetes applications network connectivity meanings. Therefore, this
|
||||
automatically verifies a pod to pod connectivity via the cluster IP and checks
|
||||
|
@ -31,7 +31,7 @@ calicoctl_download_url: "{{ files_repo }}/kubernetes/calico/{{ calico_ctl_versio
|
||||
# If using Calico with kdd
|
||||
calico_crds_download_url: "{{ files_repo }}/kubernetes/calico/{{ calico_version }}.tar.gz"
|
||||
|
||||
# CentOS/Redhat/AlmaLinux
|
||||
# CentOS/Redhat/AlmaLinux/Rocky Linux
|
||||
## Docker / Containerd
|
||||
docker_rh_repo_base_url: "{{ yum_repo }}/docker-ce/$releasever/$basearch"
|
||||
docker_rh_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg"
|
||||
|
@ -1,49 +0,0 @@
|
||||
# OVN4NFV-k8S-Plugin
|
||||
|
||||
Intro to [ovn4nfv-k8s-plugin](https://github.com/opnfv/ovn4nfv-k8s-plugin)
|
||||
|
||||
## How to use it
|
||||
|
||||
* Enable ovn4nfv in `group_vars/k8s_cluster/k8s_cluster.yml`
|
||||
|
||||
```yml
|
||||
...
|
||||
kube_network_plugin: ovn4nfv
|
||||
...
|
||||
```
|
||||
|
||||
## Verifying ovn4nfv kube network plugin
|
||||
|
||||
* ovn4nfv install ovn control plan in the master and ovn daemonset in all nodes
|
||||
* Network function Networking(nfn) operator is install in the master and nfn agent is installed in all the node
|
||||
* ovn4nfv install `ovn4nfvk8s-cni` cni shim binary in `/opt/cni/bin/` and nfn agent act as the cni server
|
||||
* All ovn4nfv pods are installed in the kube-system
|
||||
|
||||
```ShellSession
|
||||
# From K8s client
|
||||
# kubectl get pods -n kube-system -l app=ovn-control-plane -o wide
|
||||
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
|
||||
ovn-control-plane-5f8b7bcc65-w759g 1/1 Running 0 3d18h 192.168.121.25 master <none> <none>
|
||||
|
||||
# kubectl get pods -n kube-system -l app=ovn-controller -o wide
|
||||
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
|
||||
ovn-controller-54zzj 1/1 Running 0 3d18h 192.168.121.24 minion01 <none> <none>
|
||||
ovn-controller-7cljt 1/1 Running 0 3d18h 192.168.121.25 master <none> <none>
|
||||
ovn-controller-cx46g 1/1 Running 0 3d18h 192.168.121.15 minion02 <none> <none>
|
||||
|
||||
# kubectl get pods -n kube-system -l name=nfn-operator -o wide
|
||||
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
|
||||
nfn-operator-6dc44dbf48-xk9zl 1/1 Running 0 3d18h 192.168.121.25 master <none> <none>
|
||||
|
||||
# kubectl get pods -n kube-system -l app=nfn-agent -o wide
|
||||
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
|
||||
nfn-agent-dzlpp 1/1 Running 0 3d18h 192.168.121.15 minion02 <none> <none>
|
||||
nfn-agent-jcdbn 1/1 Running 0 3d18h 192.168.121.25 master <none> <none>
|
||||
nfn-agent-lrkzk 1/1 Running 0 3d18h 192.168.121.24 minion01 <none> <none>
|
||||
|
||||
# kubectl get pods -n kube-system -l app=ovn4nfv -o wide
|
||||
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
|
||||
ovn4nfv-cni-5zdz2 1/1 Running 0 3d18h 192.168.121.24 minion01 <none> <none>
|
||||
ovn4nfv-cni-k5wjp 1/1 Running 0 3d18h 192.168.121.25 master <none> <none>
|
||||
ovn4nfv-cni-t6z5b 1/1 Running 0 3d18h 192.168.121.15 minion02 <none> <none>
|
||||
```
|
@ -1,62 +1,3 @@
|
||||
# Kubespray's roadmap
|
||||
|
||||
## Self deployment (pull-mode) [#320](https://github.com/kubespray/kubespray/issues/320)
|
||||
|
||||
- the playbook would install and configure docker and the etcd cluster
|
||||
- the following data would be inserted into etcd: certs,tokens,users,inventory,group_vars.
|
||||
- a "kubespray" container would be deployed (kubespray-cli, ansible-playbook)
|
||||
- to be discussed, a way to provide the inventory
|
||||
- **self deployment** of the node from inside a container [#321](https://github.com/kubespray/kubespray/issues/321)
|
||||
|
||||
## Provisioning and cloud providers
|
||||
|
||||
- [ ] Terraform to provision instances on:
|
||||
- [ ] GCE
|
||||
- [x] AWS (contrib/terraform/aws)
|
||||
- [x] OpenStack (contrib/terraform/openstack)
|
||||
- [x] Equinix Metal
|
||||
- [ ] Digital Ocean
|
||||
- [ ] Azure
|
||||
- [ ] On AWS autoscaling, multi AZ
|
||||
- [ ] On Azure autoscaling, create loadbalancer [#297](https://github.com/kubespray/kubespray/issues/297)
|
||||
- [ ] On GCE be able to create a loadbalancer automatically (IAM ?) [#280](https://github.com/kubespray/kubespray/issues/280)
|
||||
- [x] **TLS bootstrap** support for kubelet (covered by kubeadm, but not in standard deployment) [#234](https://github.com/kubespray/kubespray/issues/234)
|
||||
(related issues: <https://github.com/kubernetes/kubernetes/pull/20439> <https://github.com/kubernetes/kubernetes/issues/18112)>
|
||||
|
||||
## Tests
|
||||
|
||||
- [x] Run kubernetes e2e tests
|
||||
- [ ] Test idempotency on single OS but for all network plugins/container engines
|
||||
- [ ] single test on AWS per day
|
||||
- [ ] test scale up cluster: +1 etcd, +1 control plane, +1 node
|
||||
- [x] Reorganize CI test vars into group var files
|
||||
|
||||
## Lifecycle
|
||||
|
||||
- [ ] Upgrade granularity: select components to upgrade and skip others
|
||||
|
||||
## Networking
|
||||
|
||||
- [ ] Opencontrail
|
||||
- [ ] Consolidate roles/network_plugin and roles/kubernetes-apps/network_plugin
|
||||
|
||||
## Kubespray API
|
||||
|
||||
- Perform all actions through an **API**
|
||||
- Store inventories / configurations of multiple clusters
|
||||
- Make sure that state of cluster is completely saved in no more than one config file beyond hosts inventory
|
||||
|
||||
## Addons (helm or native ansible)
|
||||
|
||||
- [x] Helm
|
||||
- [x] Ingress-nginx
|
||||
- [x] kubernetes-dashboard
|
||||
|
||||
## Others
|
||||
|
||||
- Organize and update documentation (split in categories)
|
||||
- Refactor downloads so it all runs in the beginning of deployment
|
||||
- Make bootstrapping OS more consistent
|
||||
- **consul** -> if officially supported by k8s
|
||||
- Flex volumes options (e.g. **torrus** support) [#312](https://github.com/kubespray/kubespray/issues/312)
|
||||
- Clusters federation option (aka **ubernetes**) [#329](https://github.com/kubespray/kubespray/issues/329)
|
||||
We are tracking the evolution towards Kubespray 3.0 in [#6400](https://github.com/kubernetes-sigs/kubespray/issues/6400) as well as in other open issue in our [github issues](https://github.com/kubernetes-sigs/kubespray/issues/) section.
|
||||
|
@ -14,7 +14,7 @@ hands-on guide to get started with Kubespray.
|
||||
|
||||
## Cluster Details
|
||||
|
||||
* [kubespray](https://github.com/kubernetes-sigs/kubespray) v2.13.x
|
||||
* [kubespray](https://github.com/kubernetes-sigs/kubespray) v2.17.x
|
||||
* [kubernetes](https://github.com/kubernetes/kubernetes) v1.17.9
|
||||
|
||||
## Prerequisites
|
||||
@ -48,7 +48,7 @@ gcloud compute networks create kubernetes-the-kubespray-way --subnet-mode custom
|
||||
|
||||
A [subnet](https://cloud.google.com/compute/docs/vpc/#vpc_networks_and_subnets) must be provisioned with an IP address range large enough to assign a private IP address to each node in the Kubernetes cluster.
|
||||
|
||||
Create the `kubernetes` subnet in the `kubernetes-the-hard-way` VPC network:
|
||||
Create the `kubernetes` subnet in the `kubernetes-the-kubespray-way` VPC network:
|
||||
|
||||
```ShellSession
|
||||
gcloud compute networks subnets create kubernetes \
|
||||
@ -196,7 +196,7 @@ Next, we will git clone the Kubespray code into our working directory:
|
||||
```ShellSession
|
||||
git clone https://github.com/kubernetes-sigs/kubespray.git
|
||||
cd kubespray
|
||||
git checkout release-2.13
|
||||
git checkout release-2.17
|
||||
```
|
||||
|
||||
Now we need to install the dependencies for Ansible to run the Kubespray
|
||||
|
@ -7,7 +7,8 @@ You can also individually control versions of components by explicitly defining
|
||||
versions. Here are all version vars for each component:
|
||||
|
||||
* docker_version
|
||||
* containerd_version
|
||||
* docker_containerd_version (relevant when `container_manager` == `docker`)
|
||||
* containerd_version (relevant when `container_manager` == `containerd`)
|
||||
* kube_version
|
||||
* etcd_version
|
||||
* calico_version
|
||||
|
13
docs/vars.md
13
docs/vars.md
@ -3,7 +3,7 @@
|
||||
## Generic Ansible variables
|
||||
|
||||
You can view facts gathered by Ansible automatically
|
||||
[here](https://docs.ansible.com/ansible/latest/playbooks_variables.html#information-discovered-from-systems-facts).
|
||||
[here](https://docs.ansible.com/ansible/latest/user_guide/playbooks_vars_facts.html#ansible-facts).
|
||||
|
||||
Some variables of note include:
|
||||
|
||||
@ -18,7 +18,8 @@ Some variables of note include:
|
||||
* *docker_version* - Specify version of Docker to used (should be quoted
|
||||
string). Must match one of the keys defined for *docker_versioned_pkg*
|
||||
in `roles/container-engine/docker/vars/*.yml`.
|
||||
* *containerd_version* - Specify version of Containerd to use
|
||||
* *containerd_version* - Specify version of containerd to use when setting `container_manager` to `containerd`
|
||||
* *docker_containerd_version* - Specify which version of containerd to use when setting `container_manager` to `docker`
|
||||
* *etcd_version* - Specify version of ETCD to use
|
||||
* *ipip* - Enables Calico ipip encapsulation by default
|
||||
* *kube_network_plugin* - Sets k8s network plugin (default Calico)
|
||||
@ -119,14 +120,14 @@ Stack](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/dns-stack.m
|
||||
``--insecure-registry=myregistry.mydomain:5000``
|
||||
* *docker_plugins* - This list can be used to define [Docker plugins](https://docs.docker.com/engine/extend/) to install.
|
||||
* *containerd_default_runtime* - Sets the default Containerd runtime used by the Kubernetes CRI plugin.
|
||||
* *containerd_runtimes* - Sets the Containerd runtime attributes used by the Kubernetes CRI plugin.
|
||||
* *containerd_additional_runtimes* - Sets the additional Containerd runtimes used by the Kubernetes CRI plugin.
|
||||
[Default config](https://github.com/kubernetes-sigs/kubespray/blob/master/roles/container-engine/containerd/defaults/main.yml) can be overriden in inventory vars.
|
||||
* *http_proxy/https_proxy/no_proxy/no_proxy_exclude_workers/additional_no_proxy* - Proxy variables for deploying behind a
|
||||
proxy. Note that no_proxy defaults to all internal cluster IPs and hostnames
|
||||
that correspond to each node.
|
||||
* *kubelet_cgroup_driver* - Allows manual override of the
|
||||
cgroup-driver option for Kubelet. By default autodetection is used
|
||||
to match Docker configuration.
|
||||
* *kubelet_cgroup_driver* - Allows manual override of the cgroup-driver option for Kubelet.
|
||||
By default autodetection is used to match container manager configuration.
|
||||
`systemd` is the preferred driver for `containerd` though it can have issues with `cgroups v1` and `kata-containers` in which case you may want to change to `cgroupfs`.
|
||||
* *kubelet_rotate_certificates* - Auto rotate the kubelet client certificates by requesting new certificates
|
||||
from the kube-apiserver when the certificate expiration approaches.
|
||||
* *kubelet_rotate_server_certificates* - Auto rotate the kubelet server certificates by requesting new certificates
|
||||
|
@ -54,7 +54,7 @@ external_vsphere_kubernetes_cluster_id: "kubernetes-cluster-id"
|
||||
vsphere_csi_enabled: true
|
||||
```
|
||||
|
||||
For a more fine-grained CSI setup, refer to the [vsphere-csi](vsphere-csi.md) documentation.
|
||||
For a more fine-grained CSI setup, refer to the [vsphere-csi](/docs/vsphere-csi.md) documentation.
|
||||
|
||||
### Deployment
|
||||
|
||||
|
@ -8,11 +8,13 @@
|
||||
# containerd_default_runtime: "runc"
|
||||
# containerd_snapshotter: "native"
|
||||
|
||||
# containerd_runtimes:
|
||||
# - name: runc
|
||||
# type: "io.containerd.runc.v2"
|
||||
# engine: ""
|
||||
# root: ""
|
||||
# containerd_runc_runtime:
|
||||
# name: runc
|
||||
# type: "io.containerd.runc.v2"
|
||||
# engine: ""
|
||||
# root: ""
|
||||
|
||||
# containerd_additional_runtimes:
|
||||
# Example for Kata Containers as additional runtime:
|
||||
# - name: kata
|
||||
# type: "io.containerd.kata.v2"
|
||||
@ -28,6 +30,14 @@
|
||||
|
||||
# containerd_metrics_grpc_histogram: false
|
||||
|
||||
## An obvious use case is allowing insecure-registry access to self hosted registries.
|
||||
## Can be ipaddress and domain_name.
|
||||
## example define mirror.registry.io or 172.19.16.11:5000
|
||||
## Port number is also needed if the default HTTPS port is not used.
|
||||
# containerd_insecure_registries:
|
||||
# - mirror.registry.io
|
||||
# - 172.19.16.11:5000
|
||||
|
||||
# containerd_registries:
|
||||
# "docker.io": "https://registry-1.docker.io"
|
||||
|
||||
|
@ -49,6 +49,10 @@
|
||||
# crio_download_base: "download.opensuse.org/repositories/devel:kubic:libcontainers:stable"
|
||||
# crio_download_crio: "http://{{ crio_download_base }}:/cri-o:/"
|
||||
|
||||
# [Optional] runc,containerd: only if you set container_runtime: containerd
|
||||
# runc_download_url: "{{ files_repo }}/{{ runc_version }}/runc.{{ image_arch }}"
|
||||
# containerd_download_url: "{{ files_repo }}/containerd/v{{ containerd_version }}/containerd-{{ containerd_version }}-linux-{{ image_arch }}.tar.gz"
|
||||
|
||||
## CentOS/Redhat/AlmaLinux
|
||||
### For EL7, base and extras repo must be available, for EL8, baseos and appstream
|
||||
### By default we enable those repo automatically
|
||||
|
@ -14,18 +14,18 @@
|
||||
## gcr.io/cloud-provider-vsphere/cpi/release/manager
|
||||
# external_vsphere_cloud_controller_image_tag: "latest"
|
||||
## gcr.io/cloud-provider-vsphere/csi/release/syncer
|
||||
# vsphere_syncer_image_tag: "v2.2.1"
|
||||
## quay.io/k8scsi/csi-attacher
|
||||
# vsphere_csi_attacher_image_tag: "v3.1.0"
|
||||
# vsphere_syncer_image_tag: "v2.4.0"
|
||||
## k8s.gcr.io/sig-storage/csi-attacher
|
||||
# vsphere_csi_attacher_image_tag: "v3.3.0"
|
||||
## gcr.io/cloud-provider-vsphere/csi/release/driver
|
||||
# vsphere_csi_controller: "v2.2.1"
|
||||
## quay.io/k8scsi/livenessprobe
|
||||
# vsphere_csi_liveness_probe_image_tag: "v2.2.0"
|
||||
## quay.io/k8scsi/csi-provisioner
|
||||
# vsphere_csi_provisioner_image_tag: "v2.1.0"
|
||||
## quay.io/k8scsi/csi-resizer
|
||||
# vsphere_csi_controller: "v2.4.0"
|
||||
## k8s.gcr.io/sig-storage/livenessprobe
|
||||
# vsphere_csi_liveness_probe_image_tag: "v2.4.0"
|
||||
## k8s.gcr.io/sig-storage/csi-provisioner
|
||||
# vsphere_csi_provisioner_image_tag: "v3.0.0"
|
||||
## k8s.gcr.io/sig-storage/csi-resizer
|
||||
## makes sense only for vSphere version >=7.0
|
||||
# vsphere_csi_resizer_tag: "v1.1.0"
|
||||
# vsphere_csi_resizer_tag: "v1.3.0"
|
||||
|
||||
## To use vSphere CSI plugin to provision volumes set this value to true
|
||||
# vsphere_csi_enabled: true
|
||||
|
@ -19,4 +19,5 @@
|
||||
# etcd_peer_client_auth: true
|
||||
|
||||
## Settings for etcd deployment type
|
||||
etcd_deployment_type: docker
|
||||
# Set this to docker if you are using container_manager: docker
|
||||
etcd_deployment_type: host
|
||||
|
@ -14,6 +14,7 @@ registry_enabled: false
|
||||
|
||||
# Metrics Server deployment
|
||||
metrics_server_enabled: false
|
||||
# metrics_server_resizer: false
|
||||
# metrics_server_kubelet_insecure_tls: true
|
||||
# metrics_server_metric_resolution: 15s
|
||||
# metrics_server_kubelet_preferred_address_types: "InternalIP"
|
||||
@ -115,14 +116,9 @@ ingress_publish_status_address: ""
|
||||
# 53: "kube-system/coredns:53"
|
||||
# ingress_nginx_extra_args:
|
||||
# - --default-ssl-certificate=default/foo-tls
|
||||
# ingress_nginx_termination_grace_period_seconds: 300
|
||||
# ingress_nginx_class: nginx
|
||||
|
||||
# ambassador ingress controller deployment
|
||||
ingress_ambassador_enabled: false
|
||||
# ingress_ambassador_namespace: "ambassador"
|
||||
# ingress_ambassador_version: "*"
|
||||
# ingress_ambassador_multi_namespaces: false
|
||||
|
||||
# ALB ingress controller deployment
|
||||
ingress_alb_enabled: false
|
||||
# alb_ingress_aws_region: "us-east-1"
|
||||
@ -134,12 +130,18 @@ ingress_alb_enabled: false
|
||||
# Cert manager deployment
|
||||
cert_manager_enabled: false
|
||||
# cert_manager_namespace: "cert-manager"
|
||||
# cert_manager_trusted_internal_ca: |
|
||||
# -----BEGIN CERTIFICATE-----
|
||||
# [REPLACE with your CA certificate]
|
||||
# -----END CERTIFICATE-----
|
||||
|
||||
# MetalLB deployment
|
||||
metallb_enabled: false
|
||||
metallb_speaker_enabled: true
|
||||
# metallb_ip_range:
|
||||
# - "10.5.0.50-10.5.0.99"
|
||||
# metallb_pool_name: "loadbalanced"
|
||||
# matallb_auto_assign: true
|
||||
# metallb_speaker_nodeselector:
|
||||
# kubernetes.io/os: "linux"
|
||||
# metallb_controller_nodeselector:
|
||||
@ -162,7 +164,7 @@ metallb_speaker_enabled: true
|
||||
# operator: "Equal"
|
||||
# value: ""
|
||||
# effect: "NoSchedule"
|
||||
# metallb_version: v0.10.2
|
||||
# metallb_version: v0.10.3
|
||||
# metallb_protocol: "layer2"
|
||||
# metallb_port: "7472"
|
||||
# metallb_memberlist_port: "7946"
|
||||
@ -181,6 +183,19 @@ metallb_speaker_enabled: true
|
||||
# peer_asn: 64513
|
||||
# my_asn: 4200000000
|
||||
|
||||
|
||||
argocd_enabled: false
|
||||
# argocd_version: v2.1.6
|
||||
# argocd_namespace: argocd
|
||||
# Default password:
|
||||
# - https://argoproj.github.io/argo-cd/getting_started/#4-login-using-the-cli
|
||||
# ---
|
||||
# The initial password is autogenerated to be the pod name of the Argo CD API server. This can be retrieved with the command:
|
||||
# kubectl get pods -n argocd -l app.kubernetes.io/name=argocd-server -o name | cut -d'/' -f 2
|
||||
# ---
|
||||
# Use the following var to set admin password
|
||||
# argocd_admin_password: "password"
|
||||
|
||||
# The plugin manager for kubectl
|
||||
krew_enabled: false
|
||||
krew_root_dir: "/usr/local/krew"
|
||||
|
@ -17,7 +17,7 @@ kube_token_dir: "{{ kube_config_dir }}/tokens"
|
||||
kube_api_anonymous_auth: true
|
||||
|
||||
## Change this to use another Kubernetes version, e.g. a current beta release
|
||||
kube_version: v1.21.5
|
||||
kube_version: v1.22.5
|
||||
|
||||
# Where the binaries will be downloaded.
|
||||
# Note: ensure that you've enough disk space (about 1G)
|
||||
@ -150,6 +150,8 @@ kube_proxy_nodeport_addresses: >-
|
||||
kube_encrypt_secret_data: false
|
||||
|
||||
# Graceful Node Shutdown (Kubernetes >= 1.21.0), see https://kubernetes.io/blog/2021/04/21/graceful-node-shutdown-beta/
|
||||
# kubelet_shutdown_grace_period had to be greater than kubelet_shutdown_grace_period_critical_pods to allow
|
||||
# non-critical podsa to also terminate gracefully
|
||||
# kubelet_shutdown_grace_period: 60s
|
||||
# kubelet_shutdown_grace_period_critical_pods: 20s
|
||||
|
||||
@ -164,9 +166,12 @@ dns_mode: coredns
|
||||
# manual_dns_server: 10.x.x.x
|
||||
# Enable nodelocal dns cache
|
||||
enable_nodelocaldns: true
|
||||
enable_nodelocaldns_secondary: false
|
||||
nodelocaldns_ip: 169.254.25.10
|
||||
nodelocaldns_health_port: 9254
|
||||
nodelocaldns_second_health_port: 9256
|
||||
nodelocaldns_bind_metrics_host_ip: false
|
||||
nodelocaldns_secondary_skew_seconds: 5
|
||||
# nodelocaldns_external_zones:
|
||||
# - zones:
|
||||
# - example.com
|
||||
@ -187,7 +192,7 @@ coredns_k8s_external_zone: k8s_external.local
|
||||
enable_coredns_k8s_endpoint_pod_names: false
|
||||
|
||||
# Can be docker_dns, host_resolvconf or none
|
||||
resolvconf_mode: docker_dns
|
||||
resolvconf_mode: host_resolvconf
|
||||
# Deploy netchecker app to verify DNS resolve as an HTTP service
|
||||
deploy_netchecker: false
|
||||
# Ip address of the kubernetes skydns service
|
||||
@ -197,7 +202,8 @@ dns_domain: "{{ cluster_name }}"
|
||||
|
||||
## Container runtime
|
||||
## docker for docker, crio for cri-o and containerd for containerd.
|
||||
container_manager: docker
|
||||
## Default: containerd
|
||||
container_manager: containerd
|
||||
|
||||
# Additional container runtimes
|
||||
kata_containers_enabled: false
|
||||
|
@ -89,6 +89,9 @@
|
||||
# calico_vxlan_vni: 4096
|
||||
# calico_vxlan_port: 4789
|
||||
|
||||
# Cenable eBPF mode
|
||||
# calico_bpf_enabled: false
|
||||
|
||||
# If you want to use non default IP_AUTODETECTION_METHOD for calico node set this option to one of:
|
||||
# * can-reach=DESTINATION
|
||||
# * interface=INTERFACE-REGEX
|
||||
@ -103,3 +106,7 @@
|
||||
|
||||
# Enable calico traffic encryption with wireguard
|
||||
# calico_wireguard_enabled: false
|
||||
|
||||
# Under certain situations liveness and readiness probes may need tunning
|
||||
# calico_node_livenessprobe_timeout: 10
|
||||
# calico_node_readinessprobe_timeout: 10
|
||||
|
@ -59,3 +59,6 @@
|
||||
# Extra variables that passing to launch.sh, useful for enabling seed mode, see
|
||||
# https://www.weave.works/docs/net/latest/tasks/ipam/ipam/
|
||||
# weave_extra_args: ~
|
||||
|
||||
# Extra variables for weave_npc that passing to launch.sh, useful for change log level, ex --log-level=error
|
||||
# weave_npc_extra_args: ~
|
||||
|
@ -28,6 +28,9 @@
|
||||
- { role: kubespray-defaults }
|
||||
- { role: remove-node/pre-remove, tags: pre-remove }
|
||||
|
||||
- name: Gather facts
|
||||
import_playbook: facts.yml
|
||||
|
||||
- hosts: "{{ node | default('kube_node') }}"
|
||||
gather_facts: no
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
|
10
requirements-2.10.txt
Normal file
10
requirements-2.10.txt
Normal file
@ -0,0 +1,10 @@
|
||||
ansible==3.4.0
|
||||
ansible-base==2.10.15
|
||||
cryptography==2.8
|
||||
jinja2==2.11.3
|
||||
netaddr==0.7.19
|
||||
pbr==5.4.4
|
||||
jmespath==0.9.5
|
||||
ruamel.yaml==0.16.10
|
||||
ruamel.yaml.clib==0.2.4
|
||||
MarkupSafe==1.1.1
|
10
requirements-2.11.txt
Normal file
10
requirements-2.11.txt
Normal file
@ -0,0 +1,10 @@
|
||||
ansible==4.8.0
|
||||
ansible-core==2.11.6
|
||||
cryptography==2.8
|
||||
jinja2==2.11.3
|
||||
netaddr==0.7.19
|
||||
pbr==5.4.4
|
||||
jmespath==0.9.5
|
||||
ruamel.yaml==0.16.10
|
||||
ruamel.yaml.clib==0.2.4
|
||||
MarkupSafe==1.1.1
|
9
requirements-2.9.txt
Normal file
9
requirements-2.9.txt
Normal file
@ -0,0 +1,9 @@
|
||||
ansible==2.9.27
|
||||
jinja2==2.11.3
|
||||
netaddr==0.7.19
|
||||
pbr==5.4.4
|
||||
jmespath==0.9.5
|
||||
ruamel.yaml==0.16.10
|
||||
ruamel.yaml.clib==0.2.4 ; python_version >= '3.5'
|
||||
ruamel.yaml.clib==0.2.2 ; python_version < '3.5'
|
||||
MarkupSafe==1.1.1
|
4
requirements-2.9.yml
Normal file
4
requirements-2.9.yml
Normal file
@ -0,0 +1,4 @@
|
||||
---
|
||||
collections:
|
||||
- name: community.general
|
||||
version: '<3.0'
|
@ -1,10 +0,0 @@
|
||||
ansible==3.4.0
|
||||
ansible-base==2.10.11
|
||||
cryptography==2.8
|
||||
jinja2==2.11.3
|
||||
netaddr==0.7.19
|
||||
pbr==5.4.4
|
||||
jmespath==0.9.5
|
||||
ruamel.yaml==0.16.10
|
||||
ruamel.yaml.clib==0.2.2
|
||||
MarkupSafe==1.1.1
|
1
requirements.txt
Symbolic link
1
requirements.txt
Symbolic link
@ -0,0 +1 @@
|
||||
requirements-2.10.txt
|
10
roles/adduser/molecule/default/converge.yml
Normal file
10
roles/adduser/molecule/default/converge.yml
Normal file
@ -0,0 +1,10 @@
|
||||
---
|
||||
- name: Converge
|
||||
hosts: all
|
||||
become: true
|
||||
gather_facts: false
|
||||
roles:
|
||||
- role: adduser
|
||||
vars:
|
||||
user:
|
||||
name: foo
|
27
roles/adduser/molecule/default/molecule.yml
Normal file
27
roles/adduser/molecule/default/molecule.yml
Normal file
@ -0,0 +1,27 @@
|
||||
---
|
||||
dependency:
|
||||
name: galaxy
|
||||
lint: |
|
||||
set -e
|
||||
yamllint -c ../../.yamllint .
|
||||
driver:
|
||||
name: vagrant
|
||||
provider:
|
||||
name: libvirt
|
||||
platforms:
|
||||
- name: adduser-01
|
||||
box: generic/ubuntu2004
|
||||
cpus: 1
|
||||
memory: 512
|
||||
provisioner:
|
||||
name: ansible
|
||||
config_options:
|
||||
defaults:
|
||||
callback_whitelist: profile_tasks
|
||||
timeout: 120
|
||||
lint:
|
||||
name: ansible-lint
|
||||
verifier:
|
||||
name: testinfra
|
||||
lint:
|
||||
name: flake8
|
37
roles/adduser/molecule/default/tests/test_default.py
Normal file
37
roles/adduser/molecule/default/tests/test_default.py
Normal file
@ -0,0 +1,37 @@
|
||||
import os
|
||||
import yaml
|
||||
import glob
|
||||
import testinfra.utils.ansible_runner
|
||||
from ansible.playbook import Playbook
|
||||
from ansible.cli.playbook import PlaybookCLI
|
||||
|
||||
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
|
||||
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
|
||||
|
||||
def read_playbook(playbook):
|
||||
cli_args = [os.path.realpath(playbook), testinfra_hosts]
|
||||
cli = PlaybookCLI(cli_args)
|
||||
cli.parse()
|
||||
loader, inventory, variable_manager = cli._play_prereqs()
|
||||
|
||||
pb = Playbook.load(cli.args[0], variable_manager, loader)
|
||||
|
||||
for play in pb.get_plays():
|
||||
yield variable_manager.get_vars(play)
|
||||
|
||||
def get_playbook():
|
||||
with open(os.path.realpath(' '.join(map(str,glob.glob('molecule.*')))), 'r') as yamlfile:
|
||||
data = yaml.load(yamlfile, Loader=yaml.FullLoader)
|
||||
if 'playbooks' in data['provisioner'].keys():
|
||||
if 'converge' in data['provisioner']['playbooks'].keys():
|
||||
return data['provisioner']['playbooks']['converge']
|
||||
else:
|
||||
return ' '.join(map(str,glob.glob('converge.*')))
|
||||
|
||||
def test_user(host):
|
||||
for vars in read_playbook(get_playbook()):
|
||||
assert host.user(vars['user']['name']).exists
|
||||
if 'group' in vars['user'].keys():
|
||||
assert host.group(vars['user']['group']).exists
|
||||
else:
|
||||
assert host.group(vars['user']['name']).exists
|
2
roles/bastion-ssh-config/defaults/main.yml
Normal file
2
roles/bastion-ssh-config/defaults/main.yml
Normal file
@ -0,0 +1,2 @@
|
||||
---
|
||||
ssh_bastion_confing__name: ssh-bastion.conf
|
15
roles/bastion-ssh-config/molecule/default/converge.yml
Normal file
15
roles/bastion-ssh-config/molecule/default/converge.yml
Normal file
@ -0,0 +1,15 @@
|
||||
---
|
||||
- name: Converge
|
||||
hosts: all
|
||||
become: true
|
||||
gather_facts: false
|
||||
roles:
|
||||
- role: bastion-ssh-config
|
||||
tasks:
|
||||
- name: Copy config to remote host
|
||||
copy:
|
||||
src: "{{ playbook_dir }}/{{ ssh_bastion_confing__name }}"
|
||||
dest: "{{ ssh_bastion_confing__name }}"
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "{{ ansible_user }}"
|
||||
mode: 0644
|
35
roles/bastion-ssh-config/molecule/default/molecule.yml
Normal file
35
roles/bastion-ssh-config/molecule/default/molecule.yml
Normal file
@ -0,0 +1,35 @@
|
||||
---
|
||||
dependency:
|
||||
name: galaxy
|
||||
lint: |
|
||||
set -e
|
||||
yamllint -c ../../.yamllint .
|
||||
driver:
|
||||
name: vagrant
|
||||
provider:
|
||||
name: libvirt
|
||||
platforms:
|
||||
- name: bastion-01
|
||||
box: generic/ubuntu2004
|
||||
cpus: 1
|
||||
memory: 512
|
||||
provisioner:
|
||||
name: ansible
|
||||
config_options:
|
||||
defaults:
|
||||
callback_whitelist: profile_tasks
|
||||
timeout: 120
|
||||
lint:
|
||||
name: ansible-lint
|
||||
inventory:
|
||||
hosts:
|
||||
all:
|
||||
hosts:
|
||||
children:
|
||||
bastion:
|
||||
hosts:
|
||||
bastion-01:
|
||||
verifier:
|
||||
name: testinfra
|
||||
lint:
|
||||
name: flake8
|
@ -0,0 +1,34 @@
|
||||
import os
|
||||
import yaml
|
||||
import glob
|
||||
import testinfra.utils.ansible_runner
|
||||
from ansible.playbook import Playbook
|
||||
from ansible.cli.playbook import PlaybookCLI
|
||||
|
||||
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
|
||||
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
|
||||
|
||||
def read_playbook(playbook):
|
||||
cli_args = [os.path.realpath(playbook), testinfra_hosts]
|
||||
cli = PlaybookCLI(cli_args)
|
||||
cli.parse()
|
||||
loader, inventory, variable_manager = cli._play_prereqs()
|
||||
|
||||
pb = Playbook.load(cli.args[0], variable_manager, loader)
|
||||
|
||||
for play in pb.get_plays():
|
||||
yield variable_manager.get_vars(play)
|
||||
|
||||
def get_playbook():
|
||||
with open(os.path.realpath(' '.join(map(str,glob.glob('molecule.*')))), 'r') as yamlfile:
|
||||
data = yaml.load(yamlfile, Loader=yaml.FullLoader)
|
||||
if 'playbooks' in data['provisioner'].keys():
|
||||
if 'converge' in data['provisioner']['playbooks'].keys():
|
||||
return data['provisioner']['playbooks']['converge']
|
||||
else:
|
||||
return ' '.join(map(str,glob.glob('converge.*')))
|
||||
|
||||
def test_ssh_config(host):
|
||||
for vars in read_playbook(get_playbook()):
|
||||
assert host.file(vars['ssh_bastion_confing__name']).exists
|
||||
assert host.file(vars['ssh_bastion_confing__name']).is_file
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user