Compare commits
220 Commits
release-2.
...
release-2.
Author | SHA1 | Date | |
---|---|---|---|
e86c0cf036 | |||
b50e4c9eb7 | |||
e4ac3ab799 | |||
0af6dfca5d | |||
d6f688f060 | |||
a97fbec320 | |||
c3e73aabcf | |||
d498df20db | |||
08467ad6b3 | |||
3f41d8b274 | |||
0634be4c88 | |||
919e666fb9 | |||
813576efeb | |||
c4346e590f | |||
bd81c615c3 | |||
3d9fd082ff | |||
826282fe89 | |||
73774326b3 | |||
374438a3d6 | |||
fd80ef1ff1 | |||
235173bb5f | |||
1750dec254 | |||
52f52db8f3 | |||
db94812163 | |||
4a6eb7eaa2 | |||
58fe1a0ed6 | |||
c80bb0007a | |||
8a03bb1bb4 | |||
d919c58e21 | |||
19bc610f44 | |||
85a5a79ef5 | |||
c7cffb14a7 | |||
6f61f3d9cb | |||
6b4bb2a121 | |||
e288449c5d | |||
ea35021c96 | |||
754424eca7 | |||
4ad56e2772 | |||
6f1352eb53 | |||
bf8c64af08 | |||
a98ab40434 | |||
6549bb12fc | |||
1329d3f03b | |||
843e908fa4 | |||
0ff883afeb | |||
0d5bcd3e20 | |||
a8cef962e2 | |||
b50890172b | |||
ffad2152b3 | |||
6674438849 | |||
4bc5e8d912 | |||
8ca0bfffe0 | |||
48282a344f | |||
050fde6327 | |||
4d3104b334 | |||
85fa6af313 | |||
1c4db6132d | |||
744c81d451 | |||
61be93b173 | |||
406fbdb4e7 | |||
136f14dec4 | |||
ab80342750 | |||
2c2e608eac | |||
8267922a16 | |||
90719a9990 | |||
93f71df628 | |||
791064a3d9 | |||
e90f32bdee | |||
9fe89a0641 | |||
14699f5e98 | |||
2f81bfa25e | |||
438da0c8e6 | |||
25f317233c | |||
5e4d68b848 | |||
4728739597 | |||
fc0d58ff48 | |||
491e260d20 | |||
a132733b2d | |||
b377dbb96f | |||
c4d753c931 | |||
ee3b7c5da5 | |||
dcc267f6f4 | |||
ccf60fc9ca | |||
a38a3e7ddf | |||
beb4aa52ea | |||
f7d0fb9ab2 | |||
ff331f4eba | |||
94eae6a8dc | |||
f8d6b54dbb | |||
67c4f2d95e | |||
03fefa8933 | |||
c8ec77a734 | |||
4f32f94a51 | |||
3dc384a17a | |||
f1d0d1a9fe | |||
c036a7d871 | |||
6e63f3d2b4 | |||
09748e80e9 | |||
44a4f356ba | |||
a0f41bf82a | |||
5ae3e2818b | |||
1a0b81ac64 | |||
20d99886ca | |||
b9fe301036 | |||
b5844018f2 | |||
30508502d3 | |||
bca601d377 | |||
65191375b8 | |||
a534eb45ce | |||
e796f08184 | |||
ed38d8d3a1 | |||
07ad5ecfce | |||
4db5e663c3 | |||
529faeea9e | |||
47510899c7 | |||
4cd949c7e1 | |||
31d7e64073 | |||
7c1ee142dd | |||
25e86c5ca9 | |||
c41dd92007 | |||
a564d89d46 | |||
6c6a6e85da | |||
ed0acd8027 | |||
b9a690463d | |||
cbf4586c4c | |||
c3986957c4 | |||
8795cf6494 | |||
80af8a5e79 | |||
b60f65c1e8 | |||
943107115a | |||
ddbe9956e4 | |||
fdbcce3a5e | |||
f007c77641 | |||
9439487219 | |||
df6da52195 | |||
6ca89c80af | |||
7fe0b87d83 | |||
8a654b6955 | |||
5a8cf824f6 | |||
5c25b57989 | |||
5d1fe64bc8 | |||
a731e25778 | |||
0d6dc08578 | |||
40261fdf14 | |||
590b4aa240 | |||
2a696ddb34 | |||
d7f08d1b0c | |||
4aa1ef28ea | |||
58faef6ff6 | |||
34a52a7028 | |||
ce751cb89d | |||
5cf2883444 | |||
6bff338bad | |||
c78862052c | |||
1f54cef71c | |||
d00508105b | |||
c272421910 | |||
78624c5bcb | |||
c681435432 | |||
4d3f637684 | |||
5e14398af4 | |||
990f87acc8 | |||
eeb376460d | |||
ef707b3461 | |||
2af918132e | |||
b9b654714e | |||
fe399e0e0c | |||
b192053e28 | |||
a84271aa7e | |||
1901b512d2 | |||
9fdda7eca8 | |||
a68ed897f0 | |||
582ff96d19 | |||
0374a55eb3 | |||
ccbe38f78c | |||
958840da89 | |||
1530411218 | |||
e5ec0f18c0 | |||
0f44e8c812 | |||
1cc0f3c8c9 | |||
d9c39c274e | |||
c38fb866b7 | |||
5ad1d9db5e | |||
32f3d92d6b | |||
72b45eec2e | |||
23716b0eff | |||
859df84b45 | |||
131bd933a6 | |||
52904ee6ad | |||
e3339fe3d8 | |||
547ef747da | |||
63b27ea067 | |||
bc5881b70a | |||
f4b95d42a6 | |||
ef76a578a4 | |||
3b99d24ceb | |||
4701abff4c | |||
717b8daafe | |||
c346e46022 | |||
24632ae81b | |||
befde271eb | |||
d689f57c94 | |||
ad3f503c0c | |||
ae6c780af6 | |||
8b9cd3959a | |||
dffeab320e | |||
999586a110 | |||
f8d5487f8e | |||
4189008245 | |||
44115d7d7a | |||
841e2f44c0 | |||
a8e4984cf7 | |||
49196c2ec4 | |||
3646dc0bd2 | |||
694de1d67b | |||
31caab5f92 | |||
472996c8b3 | |||
d62c67a5f5 | |||
e486151aea | |||
9c407e667d |
@ -1,5 +1,6 @@
|
||||
---
|
||||
stages:
|
||||
- build
|
||||
- unit-tests
|
||||
- deploy-part1
|
||||
- moderator
|
||||
@ -8,12 +9,12 @@ stages:
|
||||
- deploy-special
|
||||
|
||||
variables:
|
||||
KUBESPRAY_VERSION: v2.19.1
|
||||
KUBESPRAY_VERSION: v2.20.0
|
||||
FAILFASTCI_NAMESPACE: 'kargo-ci'
|
||||
GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray'
|
||||
ANSIBLE_FORCE_COLOR: "true"
|
||||
MAGIC: "ci check this"
|
||||
TEST_ID: "$CI_PIPELINE_ID-$CI_BUILD_ID"
|
||||
TEST_ID: "$CI_PIPELINE_ID-$CI_JOB_ID"
|
||||
CI_TEST_VARS: "./tests/files/${CI_JOB_NAME}.yml"
|
||||
CI_TEST_REGISTRY_MIRROR: "./tests/common/_docker_hub_registry_mirror.yml"
|
||||
CI_TEST_SETTING: "./tests/common/_kubespray_test_settings.yml"
|
||||
@ -35,6 +36,7 @@ variables:
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube_control_plane[1:]"
|
||||
TERRAFORM_VERSION: 1.0.8
|
||||
ANSIBLE_MAJOR_VERSION: "2.11"
|
||||
PIPELINE_IMAGE: "$CI_REGISTRY_IMAGE/pipeline:${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}"
|
||||
|
||||
before_script:
|
||||
- ./tests/scripts/rebase.sh
|
||||
@ -46,7 +48,7 @@ before_script:
|
||||
.job: &job
|
||||
tags:
|
||||
- packet
|
||||
image: quay.io/kubespray/kubespray:$KUBESPRAY_VERSION
|
||||
image: $PIPELINE_IMAGE
|
||||
artifacts:
|
||||
when: always
|
||||
paths:
|
||||
@ -76,6 +78,7 @@ ci-authorized:
|
||||
only: []
|
||||
|
||||
include:
|
||||
- .gitlab-ci/build.yml
|
||||
- .gitlab-ci/lint.yml
|
||||
- .gitlab-ci/shellcheck.yml
|
||||
- .gitlab-ci/terraform.yml
|
||||
|
16
.gitlab-ci/build.yml
Normal file
16
.gitlab-ci/build.yml
Normal file
@ -0,0 +1,16 @@
|
||||
---
|
||||
pipeline image:
|
||||
stage: build
|
||||
image: docker:20.10.22-cli
|
||||
variables:
|
||||
DOCKER_TLS_CERTDIR: ""
|
||||
services:
|
||||
- name: docker:20.10.22-dind
|
||||
# See https://gitlab.com/gitlab-org/gitlab-runner/-/issues/27300 for why this is required
|
||||
command: ["--tls=false"]
|
||||
before_script:
|
||||
- echo $CI_REGISTRY_PASSWORD | docker login -u $CI_REGISTRY_USER --password-stdin $CI_REGISTRY
|
||||
script:
|
||||
# DOCKER_HOST is overwritten if we set it as a GitLab variable
|
||||
- DOCKER_HOST=tcp://docker:2375; docker build --network host --file pipeline.Dockerfile --tag $PIPELINE_IMAGE .
|
||||
- docker push $PIPELINE_IMAGE
|
@ -14,7 +14,7 @@ vagrant-validate:
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
variables:
|
||||
VAGRANT_VERSION: 2.2.19
|
||||
VAGRANT_VERSION: 2.3.7
|
||||
script:
|
||||
- ./tests/scripts/vagrant-validate.sh
|
||||
except: ['triggers', 'master']
|
||||
@ -75,6 +75,13 @@ check-readme-versions:
|
||||
script:
|
||||
- tests/scripts/check_readme_versions.sh
|
||||
|
||||
check-typo:
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
image: python:3
|
||||
script:
|
||||
- tests/scripts/check_typo.sh
|
||||
|
||||
ci-matrix:
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
|
@ -4,7 +4,7 @@
|
||||
tags: [c3.small.x86]
|
||||
only: [/^pr-.*$/]
|
||||
except: ['triggers']
|
||||
image: quay.io/kubespray/vagrant:$KUBESPRAY_VERSION
|
||||
image: $PIPELINE_IMAGE
|
||||
services: []
|
||||
stage: deploy-part1
|
||||
before_script:
|
||||
|
@ -51,6 +51,11 @@ packet_ubuntu20-aio-docker:
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu20-calico-aio-hardening:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu18-calico-aio:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
@ -81,7 +86,7 @@ packet_ubuntu18-crio:
|
||||
stage: deploy-part2
|
||||
when: manual
|
||||
|
||||
packet_fedora35-crio:
|
||||
packet_fedora37-crio:
|
||||
extends: .packet_pr
|
||||
stage: deploy-part2
|
||||
when: manual
|
||||
@ -156,15 +161,23 @@ packet_rockylinux9-calico:
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_rockylinux9-cilium:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
variables:
|
||||
RESET_CHECK: "true"
|
||||
|
||||
packet_almalinux8-docker:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_fedora36-docker-weave:
|
||||
packet_fedora38-docker-weave:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
allow_failure: true
|
||||
|
||||
packet_opensuse-canal:
|
||||
stage: deploy-part2
|
||||
@ -224,19 +237,19 @@ packet_centos7-canal-ha:
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_fedora36-docker-calico:
|
||||
packet_fedora38-docker-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
RESET_CHECK: "true"
|
||||
|
||||
packet_fedora35-calico-selinux:
|
||||
packet_fedora37-calico-selinux:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
|
||||
packet_fedora35-calico-swap-selinux:
|
||||
packet_fedora37-calico-swap-selinux:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
@ -251,7 +264,7 @@ packet_almalinux8-calico-nodelocaldns-secondary:
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_fedora36-kube-ovn:
|
||||
packet_fedora38-kube-ovn:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
|
@ -10,7 +10,7 @@
|
||||
tags: [c3.small.x86]
|
||||
only: [/^pr-.*$/]
|
||||
except: ['triggers']
|
||||
image: quay.io/kubespray/vagrant:$KUBESPRAY_VERSION
|
||||
image: $PIPELINE_IMAGE
|
||||
services: []
|
||||
before_script:
|
||||
- apt-get update && apt-get install -y python3-pip
|
||||
@ -43,6 +43,7 @@ vagrant_ubuntu20-flannel:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: on_success
|
||||
allow_failure: false
|
||||
|
||||
vagrant_ubuntu16-kube-router-sep:
|
||||
stage: deploy-part2
|
||||
@ -55,7 +56,7 @@ vagrant_ubuntu16-kube-router-svc-proxy:
|
||||
extends: .vagrant
|
||||
when: manual
|
||||
|
||||
vagrant_fedora35-kube-router:
|
||||
vagrant_fedora37-kube-router:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: on_success
|
||||
|
31
Dockerfile
31
Dockerfile
@ -7,15 +7,7 @@ RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
|
||||
RUN apt update -y \
|
||||
&& apt install -y \
|
||||
libssl-dev python3-dev sshpass apt-transport-https jq moreutils \
|
||||
ca-certificates curl gnupg2 software-properties-common python3-pip unzip rsync git \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - \
|
||||
&& add-apt-repository \
|
||||
"deb [arch=$ARCH] https://download.docker.com/linux/ubuntu \
|
||||
$(lsb_release -cs) \
|
||||
stable" \
|
||||
&& apt update -y && apt-get install --no-install-recommends -y docker-ce \
|
||||
curl python3 python3-pip sshpass rsync \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Some tools like yamllint need this
|
||||
@ -25,13 +17,22 @@ RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - \
|
||||
ENV LANG=C.UTF-8
|
||||
|
||||
WORKDIR /kubespray
|
||||
COPY . .
|
||||
RUN /usr/bin/python3 -m pip install --no-cache-dir pip -U \
|
||||
&& /usr/bin/python3 -m pip install --no-cache-dir -r tests/requirements.txt \
|
||||
&& python3 -m pip install --no-cache-dir -r requirements.txt \
|
||||
&& update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
||||
COPY *yml /kubespray/
|
||||
COPY roles /kubespray/roles
|
||||
COPY inventory /kubespray/inventory
|
||||
COPY library /kubespray/library
|
||||
COPY extra_playbooks /kubespray/extra_playbooks
|
||||
|
||||
RUN KUBE_VERSION=$(sed -n 's/^kube_version: //p' roles/kubespray-defaults/defaults/main.yaml) \
|
||||
RUN python3 -m pip install --no-cache-dir \
|
||||
ansible==5.7.1 \
|
||||
ansible-core==2.12.5 \
|
||||
cryptography==3.4.8 \
|
||||
jinja2==2.11.3 \
|
||||
netaddr==0.7.19 \
|
||||
jmespath==1.0.1 \
|
||||
MarkupSafe==1.1.1 \
|
||||
ruamel.yaml==0.17.21 \
|
||||
&& KUBE_VERSION=$(sed -n 's/^kube_version: //p' roles/kubespray-defaults/defaults/main.yaml) \
|
||||
&& curl -LO https://storage.googleapis.com/kubernetes-release/release/$KUBE_VERSION/bin/linux/$ARCH/kubectl \
|
||||
&& chmod a+x kubectl \
|
||||
&& mv kubectl /usr/local/bin/kubectl
|
||||
|
@ -9,6 +9,7 @@ aliases:
|
||||
- oomichi
|
||||
- cristicalin
|
||||
- liupeng0518
|
||||
- yankay
|
||||
kubespray-reviewers:
|
||||
- holmsten
|
||||
- bozzo
|
||||
@ -18,6 +19,8 @@ aliases:
|
||||
- cristicalin
|
||||
- liupeng0518
|
||||
- yankay
|
||||
- cyclinder
|
||||
- mzaian
|
||||
kubespray-emeritus_approvers:
|
||||
- riverzhang
|
||||
- atoms
|
||||
|
80
README.md
80
README.md
@ -13,7 +13,7 @@ You can get your invite [here](http://slack.k8s.io/)
|
||||
|
||||
## Quick Start
|
||||
|
||||
To deploy the cluster you can use :
|
||||
Below are several ways to use Kubespray to deploy a Kubernetes cluster.
|
||||
|
||||
### Ansible
|
||||
|
||||
@ -41,34 +41,46 @@ cat inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml
|
||||
ansible-playbook -i inventory/mycluster/hosts.yaml --become --become-user=root cluster.yml
|
||||
```
|
||||
|
||||
Note: When Ansible is already installed via system packages on the control machine, other python packages installed via `sudo pip install -r requirements.txt` will go to a different directory tree (e.g. `/usr/local/lib/python2.7/dist-packages` on Ubuntu) from Ansible's (e.g. `/usr/lib/python2.7/dist-packages/ansible` still on Ubuntu).
|
||||
As a consequence, `ansible-playbook` command will fail with:
|
||||
Note: When Ansible is already installed via system packages on the control node,
|
||||
Python packages installed via `sudo pip install -r requirements.txt` will go to
|
||||
a different directory tree (e.g. `/usr/local/lib/python2.7/dist-packages` on
|
||||
Ubuntu) from Ansible's (e.g. `/usr/lib/python2.7/dist-packages/ansible` still on
|
||||
buntu). As a consequence, the `ansible-playbook` command will fail with:
|
||||
|
||||
```raw
|
||||
ERROR! no action detected in task. This often indicates a misspelled module name, or incorrect module path.
|
||||
```
|
||||
|
||||
probably pointing on a task depending on a module present in requirements.txt.
|
||||
This likely indicates that a task depends on a module present in ``requirements.txt``.
|
||||
|
||||
One way of solving this would be to uninstall the Ansible package and then, to install it via pip but it is not always possible.
|
||||
A workaround consists of setting `ANSIBLE_LIBRARY` and `ANSIBLE_MODULE_UTILS` environment variables respectively to the `ansible/modules` and `ansible/module_utils` subdirectories of pip packages installation location, which can be found in the Location field of the output of `pip show [package]` before executing `ansible-playbook`.
|
||||
One way of addressing this is to uninstall the system Ansible package then
|
||||
reinstall Ansible via ``pip``, but this not always possible and one must
|
||||
take care regarding package versions.
|
||||
A workaround consists of setting the `ANSIBLE_LIBRARY`
|
||||
and `ANSIBLE_MODULE_UTILS` environment variables respectively to
|
||||
the `ansible/modules` and `ansible/module_utils` subdirectories of the ``pip``
|
||||
installation location, which is the ``Location`` shown by running
|
||||
`pip show [package]` before executing `ansible-playbook`.
|
||||
|
||||
A simple way to ensure you get all the correct version of Ansible is to use the [pre-built docker image from Quay](https://quay.io/repository/kubespray/kubespray?tab=tags).
|
||||
You will then need to use [bind mounts](https://docs.docker.com/storage/bind-mounts/) to get the inventory and ssh key into the container, like this:
|
||||
A simple way to ensure you get all the correct version of Ansible is to use
|
||||
the [pre-built docker image from Quay](https://quay.io/repository/kubespray/kubespray?tab=tags).
|
||||
You will then need to use [bind mounts](https://docs.docker.com/storage/bind-mounts/)
|
||||
to access the inventory and SSH key in the container, like this:
|
||||
|
||||
```ShellSession
|
||||
docker pull quay.io/kubespray/kubespray:v2.19.1
|
||||
git checkout v2.20.0
|
||||
docker pull quay.io/kubespray/kubespray:v2.20.0
|
||||
docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inventory \
|
||||
--mount type=bind,source="${HOME}"/.ssh/id_rsa,dst=/root/.ssh/id_rsa \
|
||||
quay.io/kubespray/kubespray:v2.19.1 bash
|
||||
quay.io/kubespray/kubespray:v2.20.0 bash
|
||||
# Inside the container you may now run the kubespray playbooks:
|
||||
ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml
|
||||
```
|
||||
|
||||
### Vagrant
|
||||
|
||||
For Vagrant we need to install python dependencies for provisioning tasks.
|
||||
Check if Python and pip are installed:
|
||||
For Vagrant we need to install Python dependencies for provisioning tasks.
|
||||
Check that ``Python`` and ``pip`` are installed:
|
||||
|
||||
```ShellSession
|
||||
python -V && pip -V
|
||||
@ -113,6 +125,7 @@ vagrant up
|
||||
- [Air-Gap installation](docs/offline-environment.md)
|
||||
- [NTP](docs/ntp.md)
|
||||
- [Hardening](docs/hardening.md)
|
||||
- [Mirror](docs/mirror.md)
|
||||
- [Roadmap](docs/roadmap.md)
|
||||
|
||||
## Supported Linux Distributions
|
||||
@ -121,7 +134,7 @@ vagrant up
|
||||
- **Debian** Bullseye, Buster, Jessie, Stretch
|
||||
- **Ubuntu** 16.04, 18.04, 20.04, 22.04
|
||||
- **CentOS/RHEL** 7, [8, 9](docs/centos.md#centos-8)
|
||||
- **Fedora** 35, 36
|
||||
- **Fedora** 37, 38
|
||||
- **Fedora CoreOS** (see [fcos Note](docs/fcos.md))
|
||||
- **openSUSE** Leap 15.x/Tumbleweed
|
||||
- **Oracle Linux** 7, [8, 9](docs/centos.md#centos-8)
|
||||
@ -129,35 +142,37 @@ vagrant up
|
||||
- **Rocky Linux** [8, 9](docs/centos.md#centos-8)
|
||||
- **Kylin Linux Advanced Server V10** (experimental: see [kylin linux notes](docs/kylinlinux.md))
|
||||
- **Amazon Linux 2** (experimental: see [amazon linux notes](docs/amazonlinux.md))
|
||||
- **UOS Linux** (experimental: see [uos linux notes](docs/uoslinux.md))
|
||||
- **openEuler** (experimental: see [openEuler notes](docs/openeuler.md))
|
||||
|
||||
Note: Upstart/SysV init based OS types are not supported.
|
||||
|
||||
## Supported Components
|
||||
|
||||
- Core
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.24.6
|
||||
- [etcd](https://github.com/etcd-io/etcd) v3.5.4
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.25.6
|
||||
- [etcd](https://github.com/etcd-io/etcd) v3.5.6
|
||||
- [docker](https://www.docker.com/) v20.10 (see note)
|
||||
- [containerd](https://containerd.io/) v1.6.8
|
||||
- [containerd](https://containerd.io/) v1.6.15
|
||||
- [cri-o](http://cri-o.io/) v1.24 (experimental: see [CRI-O Note](docs/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
||||
- Network Plugin
|
||||
- [cni-plugins](https://github.com/containernetworking/plugins) v1.1.1
|
||||
- [calico](https://github.com/projectcalico/calico) v3.23.3
|
||||
- [cni-plugins](https://github.com/containernetworking/plugins) v1.2.0
|
||||
- [calico](https://github.com/projectcalico/calico) v3.24.5
|
||||
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
|
||||
- [cilium](https://github.com/cilium/cilium) v1.12.1
|
||||
- [flannel](https://github.com/flannel-io/flannel) v0.19.2
|
||||
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.9.7
|
||||
- [flannel](https://github.com/flannel-io/flannel) v0.20.2
|
||||
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.10.7
|
||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) v1.5.1
|
||||
- [multus](https://github.com/intel/multus-cni) v3.8
|
||||
- [weave](https://github.com/weaveworks/weave) v2.8.1
|
||||
- [kube-vip](https://github.com/kube-vip/kube-vip) v0.4.2
|
||||
- [kube-vip](https://github.com/kube-vip/kube-vip) v0.5.5
|
||||
- Application
|
||||
- [cert-manager](https://github.com/jetstack/cert-manager) v1.9.1
|
||||
- [coredns](https://github.com/coredns/coredns) v1.8.6
|
||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v1.3.1
|
||||
- [cert-manager](https://github.com/jetstack/cert-manager) v1.11.0
|
||||
- [coredns](https://github.com/coredns/coredns) v1.9.3
|
||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v1.5.1
|
||||
- [krew](https://github.com/kubernetes-sigs/krew) v0.4.3
|
||||
- [argocd](https://argoproj.github.io/) v2.4.12
|
||||
- [helm](https://helm.sh/) v3.9.4
|
||||
- [argocd](https://argoproj.github.io/) v2.5.7
|
||||
- [helm](https://helm.sh/) v3.10.3
|
||||
- [metallb](https://metallb.universe.tf/) v0.12.1
|
||||
- [registry](https://github.com/distribution/distribution) v2.8.1
|
||||
- Storage Plugin
|
||||
@ -168,16 +183,16 @@ Note: Upstart/SysV init based OS types are not supported.
|
||||
- [cinder-csi-plugin](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/cinder-csi-plugin/using-cinder-csi-plugin.md) v1.22.0
|
||||
- [gcp-pd-csi-plugin](https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver) v1.4.0
|
||||
- [local-path-provisioner](https://github.com/rancher/local-path-provisioner) v0.0.22
|
||||
- [local-volume-provisioner](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner) v2.4.0
|
||||
- [local-volume-provisioner](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner) v2.5.0
|
||||
|
||||
## Container Runtime Notes
|
||||
|
||||
- The list of available docker version is 18.09, 19.03 and 20.10. The recommended docker version is 20.10. The kubelet might break on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
|
||||
- Supported Docker versions are 18.09, 19.03 and 20.10. The *recommended* Docker version is 20.10. `Kubelet` might break on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. the YUM ``versionlock`` plugin or ``apt pin``).
|
||||
- The cri-o version should be aligned with the respective kubernetes version (i.e. kube_version=1.20.x, crio_version=1.20)
|
||||
|
||||
## Requirements
|
||||
|
||||
- **Minimum required version of Kubernetes is v1.22**
|
||||
- **Minimum required version of Kubernetes is v1.23**
|
||||
- **Ansible v2.11+, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands**
|
||||
- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](docs/offline-environment.md))
|
||||
- The target servers are configured to allow **IPv4 forwarding**.
|
||||
@ -189,7 +204,7 @@ Note: Upstart/SysV init based OS types are not supported.
|
||||
or command parameters `--become or -b` should be specified.
|
||||
|
||||
Hardware:
|
||||
These limits are safe guarded by Kubespray. Actual requirements for your workload can differ. For a sizing guide go to the [Building Large Clusters](https://kubernetes.io/docs/setup/cluster-large/#size-of-master-and-master-components) guide.
|
||||
These limits are safeguarded by Kubespray. Actual requirements for your workload can differ. For a sizing guide go to the [Building Large Clusters](https://kubernetes.io/docs/setup/cluster-large/#size-of-master-and-master-components) guide.
|
||||
|
||||
- Master
|
||||
- Memory: 1500 MB
|
||||
@ -198,7 +213,7 @@ These limits are safe guarded by Kubespray. Actual requirements for your workloa
|
||||
|
||||
## Network Plugins
|
||||
|
||||
You can choose between 10 network plugins. (default: `calico`, except Vagrant uses `flannel`)
|
||||
You can choose among ten network plugins. (default: `calico`, except Vagrant uses `flannel`)
|
||||
|
||||
- [flannel](docs/flannel.md): gre/vxlan (layer 2) networking.
|
||||
|
||||
@ -225,7 +240,7 @@ You can choose between 10 network plugins. (default: `calico`, except Vagrant us
|
||||
|
||||
- [multus](docs/multus.md): Multus is a meta CNI plugin that provides multiple network interface support to pods. For each interface Multus delegates CNI calls to secondary CNI plugins such as Calico, macvlan, etc.
|
||||
|
||||
The choice is defined with the variable `kube_network_plugin`. There is also an
|
||||
The network plugin to use is defined by the variable `kube_network_plugin`. There is also an
|
||||
option to leverage built-in cloud provider networking instead.
|
||||
See also [Network checker](docs/netcheck.md).
|
||||
|
||||
@ -246,6 +261,7 @@ See also [Network checker](docs/netcheck.md).
|
||||
|
||||
- [Digital Rebar Provision](https://github.com/digitalrebar/provision/blob/v4/doc/integrations/ansible.rst)
|
||||
- [Terraform Contrib](https://github.com/kubernetes-sigs/kubespray/tree/master/contrib/terraform)
|
||||
- [Kubean](https://github.com/kubean-io/kubean)
|
||||
|
||||
## CI Tests
|
||||
|
||||
|
16
Vagrantfile
vendored
16
Vagrantfile
vendored
@ -29,8 +29,8 @@ SUPPORTED_OS = {
|
||||
"almalinux8" => {box: "almalinux/8", user: "vagrant"},
|
||||
"almalinux8-bento" => {box: "bento/almalinux-8", user: "vagrant"},
|
||||
"rockylinux8" => {box: "generic/rocky8", user: "vagrant"},
|
||||
"fedora35" => {box: "fedora/35-cloud-base", user: "vagrant"},
|
||||
"fedora36" => {box: "fedora/36-cloud-base", user: "vagrant"},
|
||||
"fedora37" => {box: "fedora/37-cloud-base", user: "vagrant"},
|
||||
"fedora38" => {box: "fedora/38-cloud-base", user: "vagrant"},
|
||||
"opensuse" => {box: "opensuse/Leap-15.4.x86_64", user: "vagrant"},
|
||||
"opensuse-tumbleweed" => {box: "opensuse/Tumbleweed.x86_64", user: "vagrant"},
|
||||
"oraclelinux" => {box: "generic/oracle7", user: "vagrant"},
|
||||
@ -201,7 +201,8 @@ Vagrant.configure("2") do |config|
|
||||
end
|
||||
|
||||
ip = "#{$subnet}.#{i+100}"
|
||||
node.vm.network :private_network, ip: ip,
|
||||
node.vm.network :private_network,
|
||||
:ip => ip,
|
||||
:libvirt__guest_ipv6 => 'yes',
|
||||
:libvirt__ipv6_address => "#{$subnet_ipv6}::#{i+100}",
|
||||
:libvirt__ipv6_prefix => "64",
|
||||
@ -216,6 +217,14 @@ Vagrant.configure("2") do |config|
|
||||
node.vm.provision "shell", inline: "rm -f /etc/modprobe.d/local.conf"
|
||||
node.vm.provision "shell", inline: "sed -i '/net.ipv6.conf.all.disable_ipv6/d' /etc/sysctl.d/99-sysctl.conf /etc/sysctl.conf"
|
||||
end
|
||||
# Hack for fedora37/38 to get the IP address of the second interface
|
||||
if ["fedora37", "fedora38"].include? $os
|
||||
config.vm.provision "shell", inline: <<-SHELL
|
||||
nmcli conn modify 'Wired connection 2' ipv4.addresses $(cat /etc/sysconfig/network-scripts/ifcfg-eth1 | grep IPADDR | cut -d "=" -f2)
|
||||
nmcli conn modify 'Wired connection 2' ipv4.method manual
|
||||
service NetworkManager restart
|
||||
SHELL
|
||||
end
|
||||
|
||||
# Disable firewalld on oraclelinux/redhat vms
|
||||
if ["oraclelinux","oraclelinux8","rhel7","rhel8"].include? $os
|
||||
@ -248,6 +257,7 @@ Vagrant.configure("2") do |config|
|
||||
if i == $num_instances
|
||||
node.vm.provision "ansible" do |ansible|
|
||||
ansible.playbook = $playbook
|
||||
ansible.compatibility_mode = "2.0"
|
||||
ansible.verbose = $ansible_verbosity
|
||||
$ansible_inventory_path = File.join( $inventory, "hosts.ini")
|
||||
if File.exist?($ansible_inventory_path)
|
||||
|
@ -13,7 +13,7 @@
|
||||
# under the License.
|
||||
|
||||
import inventory
|
||||
from test import support
|
||||
from io import StringIO
|
||||
import unittest
|
||||
from unittest import mock
|
||||
|
||||
@ -41,7 +41,7 @@ class TestInventoryPrintHostnames(unittest.TestCase):
|
||||
'access_ip': '10.90.0.3'}}}})
|
||||
with mock.patch('builtins.open', mock_io):
|
||||
with self.assertRaises(SystemExit) as cm:
|
||||
with support.captured_stdout() as stdout:
|
||||
with mock.patch('sys.stdout', new_callable=StringIO) as stdout:
|
||||
inventory.KubesprayInventory(
|
||||
changed_hosts=["print_hostnames"],
|
||||
config_file="file")
|
||||
|
@ -12,7 +12,7 @@ ssh_public_keys = [
|
||||
machines = {
|
||||
"master-0" : {
|
||||
"node_type" : "master",
|
||||
"size" : "Medium",
|
||||
"size" : "standard.medium",
|
||||
"boot_disk" : {
|
||||
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
||||
"root_partition_size" : 50,
|
||||
@ -22,7 +22,7 @@ machines = {
|
||||
},
|
||||
"worker-0" : {
|
||||
"node_type" : "worker",
|
||||
"size" : "Large",
|
||||
"size" : "standard.large",
|
||||
"boot_disk" : {
|
||||
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
||||
"root_partition_size" : 50,
|
||||
@ -32,7 +32,7 @@ machines = {
|
||||
},
|
||||
"worker-1" : {
|
||||
"node_type" : "worker",
|
||||
"size" : "Large",
|
||||
"size" : "standard.large",
|
||||
"boot_disk" : {
|
||||
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
||||
"root_partition_size" : 50,
|
||||
@ -42,7 +42,7 @@ machines = {
|
||||
},
|
||||
"worker-2" : {
|
||||
"node_type" : "worker",
|
||||
"size" : "Large",
|
||||
"size" : "standard.large",
|
||||
"boot_disk" : {
|
||||
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
||||
"root_partition_size" : 50,
|
||||
|
@ -3,8 +3,8 @@ provider "exoscale" {}
|
||||
module "kubernetes" {
|
||||
source = "./modules/kubernetes-cluster"
|
||||
|
||||
prefix = var.prefix
|
||||
|
||||
prefix = var.prefix
|
||||
zone = var.zone
|
||||
machines = var.machines
|
||||
|
||||
ssh_public_keys = var.ssh_public_keys
|
||||
|
@ -1,29 +1,25 @@
|
||||
data "exoscale_compute_template" "os_image" {
|
||||
data "exoscale_template" "os_image" {
|
||||
for_each = var.machines
|
||||
|
||||
zone = var.zone
|
||||
name = each.value.boot_disk.image_name
|
||||
}
|
||||
|
||||
data "exoscale_compute" "master_nodes" {
|
||||
for_each = exoscale_compute.master
|
||||
data "exoscale_compute_instance" "master_nodes" {
|
||||
for_each = exoscale_compute_instance.master
|
||||
|
||||
id = each.value.id
|
||||
|
||||
# Since private IP address is not assigned until the nics are created we need this
|
||||
depends_on = [exoscale_nic.master_private_network_nic]
|
||||
id = each.value.id
|
||||
zone = var.zone
|
||||
}
|
||||
|
||||
data "exoscale_compute" "worker_nodes" {
|
||||
for_each = exoscale_compute.worker
|
||||
data "exoscale_compute_instance" "worker_nodes" {
|
||||
for_each = exoscale_compute_instance.worker
|
||||
|
||||
id = each.value.id
|
||||
|
||||
# Since private IP address is not assigned until the nics are created we need this
|
||||
depends_on = [exoscale_nic.worker_private_network_nic]
|
||||
id = each.value.id
|
||||
zone = var.zone
|
||||
}
|
||||
|
||||
resource "exoscale_network" "private_network" {
|
||||
resource "exoscale_private_network" "private_network" {
|
||||
zone = var.zone
|
||||
name = "${var.prefix}-network"
|
||||
|
||||
@ -34,25 +30,29 @@ resource "exoscale_network" "private_network" {
|
||||
netmask = cidrnetmask(var.private_network_cidr)
|
||||
}
|
||||
|
||||
resource "exoscale_compute" "master" {
|
||||
resource "exoscale_compute_instance" "master" {
|
||||
for_each = {
|
||||
for name, machine in var.machines :
|
||||
name => machine
|
||||
if machine.node_type == "master"
|
||||
}
|
||||
|
||||
display_name = "${var.prefix}-${each.key}"
|
||||
template_id = data.exoscale_compute_template.os_image[each.key].id
|
||||
size = each.value.size
|
||||
disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size
|
||||
state = "Running"
|
||||
zone = var.zone
|
||||
security_groups = [exoscale_security_group.master_sg.name]
|
||||
name = "${var.prefix}-${each.key}"
|
||||
template_id = data.exoscale_template.os_image[each.key].id
|
||||
type = each.value.size
|
||||
disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size
|
||||
state = "Running"
|
||||
zone = var.zone
|
||||
security_group_ids = [exoscale_security_group.master_sg.id]
|
||||
network_interface {
|
||||
network_id = exoscale_private_network.private_network.id
|
||||
}
|
||||
elastic_ip_ids = [exoscale_elastic_ip.control_plane_lb.id]
|
||||
|
||||
user_data = templatefile(
|
||||
"${path.module}/templates/cloud-init.tmpl",
|
||||
{
|
||||
eip_ip_address = exoscale_ipaddress.ingress_controller_lb.ip_address
|
||||
eip_ip_address = exoscale_elastic_ip.ingress_controller_lb.ip_address
|
||||
node_local_partition_size = each.value.boot_disk.node_local_partition_size
|
||||
ceph_partition_size = each.value.boot_disk.ceph_partition_size
|
||||
root_partition_size = each.value.boot_disk.root_partition_size
|
||||
@ -62,25 +62,29 @@ resource "exoscale_compute" "master" {
|
||||
)
|
||||
}
|
||||
|
||||
resource "exoscale_compute" "worker" {
|
||||
resource "exoscale_compute_instance" "worker" {
|
||||
for_each = {
|
||||
for name, machine in var.machines :
|
||||
name => machine
|
||||
if machine.node_type == "worker"
|
||||
}
|
||||
|
||||
display_name = "${var.prefix}-${each.key}"
|
||||
template_id = data.exoscale_compute_template.os_image[each.key].id
|
||||
size = each.value.size
|
||||
disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size
|
||||
state = "Running"
|
||||
zone = var.zone
|
||||
security_groups = [exoscale_security_group.worker_sg.name]
|
||||
name = "${var.prefix}-${each.key}"
|
||||
template_id = data.exoscale_template.os_image[each.key].id
|
||||
type = each.value.size
|
||||
disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size
|
||||
state = "Running"
|
||||
zone = var.zone
|
||||
security_group_ids = [exoscale_security_group.worker_sg.id]
|
||||
network_interface {
|
||||
network_id = exoscale_private_network.private_network.id
|
||||
}
|
||||
elastic_ip_ids = [exoscale_elastic_ip.ingress_controller_lb.id]
|
||||
|
||||
user_data = templatefile(
|
||||
"${path.module}/templates/cloud-init.tmpl",
|
||||
{
|
||||
eip_ip_address = exoscale_ipaddress.ingress_controller_lb.ip_address
|
||||
eip_ip_address = exoscale_elastic_ip.ingress_controller_lb.ip_address
|
||||
node_local_partition_size = each.value.boot_disk.node_local_partition_size
|
||||
ceph_partition_size = each.value.boot_disk.ceph_partition_size
|
||||
root_partition_size = each.value.boot_disk.root_partition_size
|
||||
@ -90,41 +94,33 @@ resource "exoscale_compute" "worker" {
|
||||
)
|
||||
}
|
||||
|
||||
resource "exoscale_nic" "master_private_network_nic" {
|
||||
for_each = exoscale_compute.master
|
||||
|
||||
compute_id = each.value.id
|
||||
network_id = exoscale_network.private_network.id
|
||||
}
|
||||
|
||||
resource "exoscale_nic" "worker_private_network_nic" {
|
||||
for_each = exoscale_compute.worker
|
||||
|
||||
compute_id = each.value.id
|
||||
network_id = exoscale_network.private_network.id
|
||||
}
|
||||
|
||||
resource "exoscale_security_group" "master_sg" {
|
||||
name = "${var.prefix}-master-sg"
|
||||
description = "Security group for Kubernetes masters"
|
||||
}
|
||||
|
||||
resource "exoscale_security_group_rules" "master_sg_rules" {
|
||||
resource "exoscale_security_group_rule" "master_sg_rule_ssh" {
|
||||
security_group_id = exoscale_security_group.master_sg.id
|
||||
|
||||
for_each = toset(var.ssh_whitelist)
|
||||
# SSH
|
||||
ingress {
|
||||
protocol = "TCP"
|
||||
cidr_list = var.ssh_whitelist
|
||||
ports = ["22"]
|
||||
}
|
||||
type = "INGRESS"
|
||||
start_port = 22
|
||||
end_port = 22
|
||||
protocol = "TCP"
|
||||
cidr = each.value
|
||||
}
|
||||
|
||||
resource "exoscale_security_group_rule" "master_sg_rule_k8s_api" {
|
||||
security_group_id = exoscale_security_group.master_sg.id
|
||||
|
||||
for_each = toset(var.api_server_whitelist)
|
||||
# Kubernetes API
|
||||
ingress {
|
||||
protocol = "TCP"
|
||||
cidr_list = var.api_server_whitelist
|
||||
ports = ["6443"]
|
||||
}
|
||||
type = "INGRESS"
|
||||
start_port = 6443
|
||||
end_port = 6443
|
||||
protocol = "TCP"
|
||||
cidr = each.value
|
||||
}
|
||||
|
||||
resource "exoscale_security_group" "worker_sg" {
|
||||
@ -132,62 +128,64 @@ resource "exoscale_security_group" "worker_sg" {
|
||||
description = "security group for kubernetes worker nodes"
|
||||
}
|
||||
|
||||
resource "exoscale_security_group_rules" "worker_sg_rules" {
|
||||
resource "exoscale_security_group_rule" "worker_sg_rule_ssh" {
|
||||
security_group_id = exoscale_security_group.worker_sg.id
|
||||
|
||||
# SSH
|
||||
ingress {
|
||||
protocol = "TCP"
|
||||
cidr_list = var.ssh_whitelist
|
||||
ports = ["22"]
|
||||
}
|
||||
for_each = toset(var.ssh_whitelist)
|
||||
type = "INGRESS"
|
||||
start_port = 22
|
||||
end_port = 22
|
||||
protocol = "TCP"
|
||||
cidr = each.value
|
||||
}
|
||||
|
||||
resource "exoscale_security_group_rule" "worker_sg_rule_http" {
|
||||
security_group_id = exoscale_security_group.worker_sg.id
|
||||
|
||||
# HTTP(S)
|
||||
ingress {
|
||||
protocol = "TCP"
|
||||
cidr_list = ["0.0.0.0/0"]
|
||||
ports = ["80", "443"]
|
||||
}
|
||||
for_each = toset(["80", "443"])
|
||||
type = "INGRESS"
|
||||
start_port = each.value
|
||||
end_port = each.value
|
||||
protocol = "TCP"
|
||||
cidr = "0.0.0.0/0"
|
||||
}
|
||||
|
||||
# Kubernetes Nodeport
|
||||
ingress {
|
||||
protocol = "TCP"
|
||||
cidr_list = var.nodeport_whitelist
|
||||
ports = ["30000-32767"]
|
||||
|
||||
resource "exoscale_security_group_rule" "worker_sg_rule_nodeport" {
|
||||
security_group_id = exoscale_security_group.worker_sg.id
|
||||
|
||||
# HTTP(S)
|
||||
for_each = toset(var.nodeport_whitelist)
|
||||
type = "INGRESS"
|
||||
start_port = 30000
|
||||
end_port = 32767
|
||||
protocol = "TCP"
|
||||
cidr = each.value
|
||||
}
|
||||
|
||||
resource "exoscale_elastic_ip" "ingress_controller_lb" {
|
||||
zone = var.zone
|
||||
healthcheck {
|
||||
mode = "http"
|
||||
port = 80
|
||||
uri = "/healthz"
|
||||
interval = 10
|
||||
timeout = 2
|
||||
strikes_ok = 2
|
||||
strikes_fail = 3
|
||||
}
|
||||
}
|
||||
|
||||
resource "exoscale_ipaddress" "ingress_controller_lb" {
|
||||
zone = var.zone
|
||||
healthcheck_mode = "http"
|
||||
healthcheck_port = 80
|
||||
healthcheck_path = "/healthz"
|
||||
healthcheck_interval = 10
|
||||
healthcheck_timeout = 2
|
||||
healthcheck_strikes_ok = 2
|
||||
healthcheck_strikes_fail = 3
|
||||
}
|
||||
|
||||
resource "exoscale_secondary_ipaddress" "ingress_controller_lb" {
|
||||
for_each = exoscale_compute.worker
|
||||
|
||||
compute_id = each.value.id
|
||||
ip_address = exoscale_ipaddress.ingress_controller_lb.ip_address
|
||||
}
|
||||
|
||||
resource "exoscale_ipaddress" "control_plane_lb" {
|
||||
zone = var.zone
|
||||
healthcheck_mode = "tcp"
|
||||
healthcheck_port = 6443
|
||||
healthcheck_interval = 10
|
||||
healthcheck_timeout = 2
|
||||
healthcheck_strikes_ok = 2
|
||||
healthcheck_strikes_fail = 3
|
||||
}
|
||||
|
||||
resource "exoscale_secondary_ipaddress" "control_plane_lb" {
|
||||
for_each = exoscale_compute.master
|
||||
|
||||
compute_id = each.value.id
|
||||
ip_address = exoscale_ipaddress.control_plane_lb.ip_address
|
||||
resource "exoscale_elastic_ip" "control_plane_lb" {
|
||||
zone = var.zone
|
||||
healthcheck {
|
||||
mode = "tcp"
|
||||
port = 6443
|
||||
interval = 10
|
||||
timeout = 2
|
||||
strikes_ok = 2
|
||||
strikes_fail = 3
|
||||
}
|
||||
}
|
||||
|
@ -1,19 +1,19 @@
|
||||
output "master_ip_addresses" {
|
||||
value = {
|
||||
for key, instance in exoscale_compute.master :
|
||||
for key, instance in exoscale_compute_instance.master :
|
||||
instance.name => {
|
||||
"private_ip" = contains(keys(data.exoscale_compute.master_nodes), key) ? data.exoscale_compute.master_nodes[key].private_network_ip_addresses[0] : ""
|
||||
"public_ip" = exoscale_compute.master[key].ip_address
|
||||
"private_ip" = contains(keys(data.exoscale_compute_instance.master_nodes), key) ? data.exoscale_compute_instance.master_nodes[key].private_network_ip_addresses[0] : ""
|
||||
"public_ip" = exoscale_compute_instance.master[key].ip_address
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
output "worker_ip_addresses" {
|
||||
value = {
|
||||
for key, instance in exoscale_compute.worker :
|
||||
for key, instance in exoscale_compute_instance.worker :
|
||||
instance.name => {
|
||||
"private_ip" = contains(keys(data.exoscale_compute.worker_nodes), key) ? data.exoscale_compute.worker_nodes[key].private_network_ip_addresses[0] : ""
|
||||
"public_ip" = exoscale_compute.worker[key].ip_address
|
||||
"private_ip" = contains(keys(data.exoscale_compute_instance.worker_nodes), key) ? data.exoscale_compute_instance.worker_nodes[key].private_network_ip_addresses[0] : ""
|
||||
"public_ip" = exoscale_compute_instance.worker[key].ip_address
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -23,9 +23,9 @@ output "cluster_private_network_cidr" {
|
||||
}
|
||||
|
||||
output "ingress_controller_lb_ip_address" {
|
||||
value = exoscale_ipaddress.ingress_controller_lb.ip_address
|
||||
value = exoscale_elastic_ip.ingress_controller_lb.ip_address
|
||||
}
|
||||
|
||||
output "control_plane_lb_ip_address" {
|
||||
value = exoscale_ipaddress.control_plane_lb.ip_address
|
||||
value = exoscale_elastic_ip.control_plane_lb.ip_address
|
||||
}
|
||||
|
@ -1,7 +1,7 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
exoscale = {
|
||||
source = "exoscale/exoscale"
|
||||
source = "exoscale/exoscale"
|
||||
version = ">= 0.21"
|
||||
}
|
||||
}
|
||||
|
@ -75,6 +75,11 @@ ansible-playbook -i contrib/terraform/gcs/inventory.ini cluster.yml -b -v
|
||||
* `api_server_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the API server
|
||||
* `nodeport_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to the kubernetes nodes on port 30000-32767 (kubernetes nodeports)
|
||||
* `ingress_whitelist`: List of IP ranges (CIDR) that will be allowed to connect to ingress on ports 80 and 443
|
||||
* `extra_ingress_firewalls`: Additional ingress firewall rules. Key will be used as the name of the rule
|
||||
* `source_ranges`: List of IP ranges (CIDR). Example: `["8.8.8.8"]`
|
||||
* `protocol`: Protocol. Example `"tcp"`
|
||||
* `ports`: List of ports, as string. Example `["53"]`
|
||||
* `target_tags`: List of target tag (either the machine name or `control-plane` or `worker`). Example: `["control-plane", "worker-0"]`
|
||||
|
||||
### Optional
|
||||
|
||||
|
@ -34,4 +34,6 @@ module "kubernetes" {
|
||||
api_server_whitelist = var.api_server_whitelist
|
||||
nodeport_whitelist = var.nodeport_whitelist
|
||||
ingress_whitelist = var.ingress_whitelist
|
||||
|
||||
extra_ingress_firewalls = var.extra_ingress_firewalls
|
||||
}
|
||||
|
@ -219,7 +219,7 @@ resource "google_compute_instance" "master" {
|
||||
machine_type = each.value.size
|
||||
zone = each.value.zone
|
||||
|
||||
tags = ["master"]
|
||||
tags = ["control-plane", "master", each.key]
|
||||
|
||||
boot_disk {
|
||||
initialize_params {
|
||||
@ -325,7 +325,7 @@ resource "google_compute_instance" "worker" {
|
||||
machine_type = each.value.size
|
||||
zone = each.value.zone
|
||||
|
||||
tags = ["worker"]
|
||||
tags = ["worker", each.key]
|
||||
|
||||
boot_disk {
|
||||
initialize_params {
|
||||
@ -398,3 +398,24 @@ resource "google_compute_target_pool" "worker_lb" {
|
||||
name = "${var.prefix}-worker-lb-pool"
|
||||
instances = local.worker_target_list
|
||||
}
|
||||
|
||||
resource "google_compute_firewall" "extra_ingress_firewall" {
|
||||
for_each = {
|
||||
for name, firewall in var.extra_ingress_firewalls :
|
||||
name => firewall
|
||||
}
|
||||
|
||||
name = "${var.prefix}-${each.key}-ingress"
|
||||
network = google_compute_network.main.name
|
||||
|
||||
priority = 100
|
||||
|
||||
source_ranges = each.value.source_ranges
|
||||
|
||||
target_tags = each.value.target_tags
|
||||
|
||||
allow {
|
||||
protocol = each.value.protocol
|
||||
ports = each.value.ports
|
||||
}
|
||||
}
|
||||
|
@ -14,7 +14,7 @@ variable "machines" {
|
||||
}))
|
||||
boot_disk = object({
|
||||
image_name = string
|
||||
size = number
|
||||
size = number
|
||||
})
|
||||
}))
|
||||
}
|
||||
@ -73,3 +73,14 @@ variable "ingress_whitelist" {
|
||||
variable "private_network_cidr" {
|
||||
default = "10.0.10.0/24"
|
||||
}
|
||||
|
||||
variable "extra_ingress_firewalls" {
|
||||
type = map(object({
|
||||
source_ranges = set(string)
|
||||
protocol = string
|
||||
ports = list(string)
|
||||
target_tags = set(string)
|
||||
}))
|
||||
|
||||
default = {}
|
||||
}
|
||||
|
@ -95,3 +95,14 @@ variable "ingress_whitelist" {
|
||||
type = list(string)
|
||||
default = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
variable "extra_ingress_firewalls" {
|
||||
type = map(object({
|
||||
source_ranges = set(string)
|
||||
protocol = string
|
||||
ports = list(string)
|
||||
target_tags = set(string)
|
||||
}))
|
||||
|
||||
default = {}
|
||||
}
|
||||
|
@ -56,11 +56,24 @@ cd inventory/$CLUSTER
|
||||
|
||||
Edit `default.tfvars` to match your requirement.
|
||||
|
||||
Flatcar Container Linux instead of the basic Hetzner Images.
|
||||
|
||||
```bash
|
||||
cd ../../contrib/terraform/hetzner
|
||||
```
|
||||
|
||||
Edit `main.tf` and reactivate the module `source = "./modules/kubernetes-cluster-flatcar"`and
|
||||
comment out the `#source = "./modules/kubernetes-cluster"`.
|
||||
|
||||
activate `ssh_private_key_path = var.ssh_private_key_path`. The VM boots into
|
||||
Rescue-Mode with the selected image of the `var.machines` but installs Flatcar instead.
|
||||
|
||||
Run Terraform to create the infrastructure.
|
||||
|
||||
```bash
|
||||
terraform init ../../contrib/terraform/hetzner
|
||||
terraform apply --var-file default.tfvars ../../contrib/terraform/hetzner/
|
||||
cd ./kubespray
|
||||
terraform -chdir=./contrib/terraform/hetzner/ init
|
||||
terraform -chdir=./contrib/terraform/hetzner/ apply --var-file=../../../inventory/$CLUSTER/default.tfvars
|
||||
```
|
||||
|
||||
You should now have a inventory file named `inventory.ini` that you can use with kubespray.
|
||||
|
@ -9,6 +9,8 @@ ssh_public_keys = [
|
||||
"ssh-rsa I-did-not-read-the-docs 2",
|
||||
]
|
||||
|
||||
ssh_private_key_path = "~/.ssh/id_rsa"
|
||||
|
||||
machines = {
|
||||
"master-0" : {
|
||||
"node_type" : "master",
|
||||
|
@ -2,6 +2,7 @@ provider "hcloud" {}
|
||||
|
||||
module "kubernetes" {
|
||||
source = "./modules/kubernetes-cluster"
|
||||
#source = "./modules/kubernetes-cluster-flatcar"
|
||||
|
||||
prefix = var.prefix
|
||||
|
||||
@ -9,6 +10,9 @@ module "kubernetes" {
|
||||
|
||||
machines = var.machines
|
||||
|
||||
#only for flatcar
|
||||
#ssh_private_key_path = var.ssh_private_key_path
|
||||
|
||||
ssh_public_keys = var.ssh_public_keys
|
||||
network_zone = var.network_zone
|
||||
|
||||
@ -49,4 +53,4 @@ resource "null_resource" "inventories" {
|
||||
triggers = {
|
||||
template = data.template_file.inventory.rendered
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,202 @@
|
||||
resource "hcloud_network" "kubernetes" {
|
||||
name = "${var.prefix}-network"
|
||||
ip_range = var.private_network_cidr
|
||||
}
|
||||
|
||||
resource "hcloud_network_subnet" "kubernetes" {
|
||||
type = "cloud"
|
||||
network_id = hcloud_network.kubernetes.id
|
||||
network_zone = var.network_zone
|
||||
ip_range = var.private_subnet_cidr
|
||||
}
|
||||
|
||||
resource "hcloud_ssh_key" "first" {
|
||||
name = var.prefix
|
||||
public_key = var.ssh_public_keys.0
|
||||
}
|
||||
|
||||
resource "hcloud_server" "master" {
|
||||
for_each = {
|
||||
for name, machine in var.machines :
|
||||
name => machine
|
||||
if machine.node_type == "master"
|
||||
}
|
||||
name = "${var.prefix}-${each.key}"
|
||||
ssh_keys = [hcloud_ssh_key.first.id]
|
||||
# boot into rescue OS
|
||||
rescue = "linux64"
|
||||
# dummy value for the OS because Flatcar is not available
|
||||
image = each.value.image
|
||||
server_type = each.value.size
|
||||
location = var.zone
|
||||
connection {
|
||||
host = self.ipv4_address
|
||||
timeout = "5m"
|
||||
private_key = file(var.ssh_private_key_path)
|
||||
}
|
||||
firewall_ids = [hcloud_firewall.machine.id]
|
||||
provisioner "file" {
|
||||
content = data.ct_config.machine-ignitions[each.key].rendered
|
||||
destination = "/root/ignition.json"
|
||||
}
|
||||
|
||||
provisioner "remote-exec" {
|
||||
inline = [
|
||||
"set -ex",
|
||||
"apt update",
|
||||
"apt install -y gawk",
|
||||
"curl -fsSLO --retry-delay 1 --retry 60 --retry-connrefused --retry-max-time 60 --connect-timeout 20 https://raw.githubusercontent.com/kinvolk/init/flatcar-master/bin/flatcar-install",
|
||||
"chmod +x flatcar-install",
|
||||
"./flatcar-install -s -i /root/ignition.json",
|
||||
"shutdown -r +1",
|
||||
]
|
||||
}
|
||||
|
||||
# optional:
|
||||
provisioner "remote-exec" {
|
||||
connection {
|
||||
host = self.ipv4_address
|
||||
timeout = "3m"
|
||||
user = var.user_flatcar
|
||||
}
|
||||
|
||||
inline = [
|
||||
"sudo hostnamectl set-hostname ${self.name}",
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
resource "hcloud_server_network" "master" {
|
||||
for_each = hcloud_server.master
|
||||
server_id = each.value.id
|
||||
subnet_id = hcloud_network_subnet.kubernetes.id
|
||||
}
|
||||
|
||||
resource "hcloud_server" "worker" {
|
||||
for_each = {
|
||||
for name, machine in var.machines :
|
||||
name => machine
|
||||
if machine.node_type == "worker"
|
||||
}
|
||||
name = "${var.prefix}-${each.key}"
|
||||
ssh_keys = [hcloud_ssh_key.first.id]
|
||||
# boot into rescue OS
|
||||
rescue = "linux64"
|
||||
# dummy value for the OS because Flatcar is not available
|
||||
image = each.value.image
|
||||
server_type = each.value.size
|
||||
location = var.zone
|
||||
connection {
|
||||
host = self.ipv4_address
|
||||
timeout = "5m"
|
||||
private_key = file(var.ssh_private_key_path)
|
||||
}
|
||||
firewall_ids = [hcloud_firewall.machine.id]
|
||||
provisioner "file" {
|
||||
content = data.ct_config.machine-ignitions[each.key].rendered
|
||||
destination = "/root/ignition.json"
|
||||
}
|
||||
|
||||
provisioner "remote-exec" {
|
||||
inline = [
|
||||
"set -ex",
|
||||
"apt update",
|
||||
"apt install -y gawk",
|
||||
"curl -fsSLO --retry-delay 1 --retry 60 --retry-connrefused --retry-max-time 60 --connect-timeout 20 https://raw.githubusercontent.com/kinvolk/init/flatcar-master/bin/flatcar-install",
|
||||
"chmod +x flatcar-install",
|
||||
"./flatcar-install -s -i /root/ignition.json",
|
||||
"shutdown -r +1",
|
||||
]
|
||||
}
|
||||
|
||||
# optional:
|
||||
provisioner "remote-exec" {
|
||||
connection {
|
||||
host = self.ipv4_address
|
||||
timeout = "3m"
|
||||
user = var.user_flatcar
|
||||
}
|
||||
|
||||
inline = [
|
||||
"sudo hostnamectl set-hostname ${self.name}",
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
resource "hcloud_server_network" "worker" {
|
||||
for_each = hcloud_server.worker
|
||||
server_id = each.value.id
|
||||
subnet_id = hcloud_network_subnet.kubernetes.id
|
||||
}
|
||||
|
||||
data "ct_config" "machine-ignitions" {
|
||||
for_each = {
|
||||
for name, machine in var.machines :
|
||||
name => machine
|
||||
}
|
||||
content = data.template_file.machine-configs[each.key].rendered
|
||||
}
|
||||
|
||||
data "template_file" "machine-configs" {
|
||||
for_each = {
|
||||
for name, machine in var.machines :
|
||||
name => machine
|
||||
}
|
||||
template = file("${path.module}/templates/machine.yaml.tmpl")
|
||||
|
||||
vars = {
|
||||
ssh_keys = jsonencode(var.ssh_public_keys)
|
||||
user_flatcar = jsonencode(var.user_flatcar)
|
||||
name = each.key
|
||||
}
|
||||
}
|
||||
|
||||
resource "hcloud_firewall" "machine" {
|
||||
name = "${var.prefix}-machine-firewall"
|
||||
|
||||
rule {
|
||||
direction = "in"
|
||||
protocol = "tcp"
|
||||
port = "22"
|
||||
source_ips = var.ssh_whitelist
|
||||
}
|
||||
|
||||
rule {
|
||||
direction = "in"
|
||||
protocol = "tcp"
|
||||
port = "6443"
|
||||
source_ips = var.api_server_whitelist
|
||||
}
|
||||
}
|
||||
|
||||
resource "hcloud_firewall" "worker" {
|
||||
name = "${var.prefix}-worker-firewall"
|
||||
|
||||
rule {
|
||||
direction = "in"
|
||||
protocol = "tcp"
|
||||
port = "22"
|
||||
source_ips = var.ssh_whitelist
|
||||
}
|
||||
|
||||
rule {
|
||||
direction = "in"
|
||||
protocol = "tcp"
|
||||
port = "80"
|
||||
source_ips = var.ingress_whitelist
|
||||
}
|
||||
|
||||
rule {
|
||||
direction = "in"
|
||||
protocol = "tcp"
|
||||
port = "443"
|
||||
source_ips = var.ingress_whitelist
|
||||
}
|
||||
|
||||
rule {
|
||||
direction = "in"
|
||||
protocol = "tcp"
|
||||
port = "30000-32767"
|
||||
source_ips = var.nodeport_whitelist
|
||||
}
|
||||
}
|
@ -0,0 +1,27 @@
|
||||
output "master_ip_addresses" {
|
||||
value = {
|
||||
for key, instance in hcloud_server.master :
|
||||
instance.name => {
|
||||
"private_ip" = hcloud_server_network.master[key].ip
|
||||
"public_ip" = hcloud_server.master[key].ipv4_address
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
output "worker_ip_addresses" {
|
||||
value = {
|
||||
for key, instance in hcloud_server.worker :
|
||||
instance.name => {
|
||||
"private_ip" = hcloud_server_network.worker[key].ip
|
||||
"public_ip" = hcloud_server.worker[key].ipv4_address
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
output "cluster_private_network_cidr" {
|
||||
value = var.private_subnet_cidr
|
||||
}
|
||||
|
||||
output "network_id" {
|
||||
value = hcloud_network.kubernetes.id
|
||||
}
|
@ -0,0 +1,16 @@
|
||||
---
|
||||
passwd:
|
||||
users:
|
||||
- name: ${user_flatcar}
|
||||
ssh_authorized_keys: ${ssh_keys}
|
||||
storage:
|
||||
files:
|
||||
- path: /home/core/works
|
||||
filesystem: root
|
||||
mode: 0755
|
||||
contents:
|
||||
inline: |
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
hostname="$(hostname)"
|
||||
echo My name is ${name} and the hostname is $${hostname}
|
@ -0,0 +1,60 @@
|
||||
|
||||
variable "zone" {
|
||||
type = string
|
||||
default = "fsn1"
|
||||
}
|
||||
|
||||
variable "prefix" {
|
||||
default = "k8s"
|
||||
}
|
||||
|
||||
variable "user_flatcar" {
|
||||
type = string
|
||||
default = "core"
|
||||
}
|
||||
|
||||
variable "machines" {
|
||||
type = map(object({
|
||||
node_type = string
|
||||
size = string
|
||||
image = string
|
||||
}))
|
||||
}
|
||||
|
||||
|
||||
|
||||
variable "ssh_public_keys" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "ssh_private_key_path" {
|
||||
type = string
|
||||
default = "~/.ssh/id_rsa"
|
||||
}
|
||||
|
||||
variable "ssh_whitelist" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "api_server_whitelist" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "nodeport_whitelist" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "ingress_whitelist" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "private_network_cidr" {
|
||||
default = "10.0.0.0/16"
|
||||
}
|
||||
|
||||
variable "private_subnet_cidr" {
|
||||
default = "10.0.10.0/24"
|
||||
}
|
||||
variable "network_zone" {
|
||||
default = "eu-central"
|
||||
}
|
@ -0,0 +1,13 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
hcloud = {
|
||||
source = "hetznercloud/hcloud"
|
||||
}
|
||||
ct = {
|
||||
source = "poseidon/ct"
|
||||
}
|
||||
null = {
|
||||
source = "hashicorp/null"
|
||||
}
|
||||
}
|
||||
}
|
@ -2,18 +2,18 @@
|
||||
${connection_strings_master}
|
||||
${connection_strings_worker}
|
||||
|
||||
[kube-master]
|
||||
[kube_control_plane]
|
||||
${list_master}
|
||||
|
||||
[etcd]
|
||||
${list_master}
|
||||
|
||||
[kube-node]
|
||||
[kube_node]
|
||||
${list_worker}
|
||||
|
||||
[k8s-cluster:children]
|
||||
[k8s_cluster:children]
|
||||
kube-master
|
||||
kube-node
|
||||
|
||||
[k8s-cluster:vars]
|
||||
[k8s_cluster:vars]
|
||||
network_id=${network_id}
|
||||
|
@ -25,6 +25,12 @@ variable "ssh_public_keys" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "ssh_private_key_path" {
|
||||
description = "Private SSH key which connect to the VMs."
|
||||
type = string
|
||||
default = "~/.ssh/id_rsa"
|
||||
}
|
||||
|
||||
variable "ssh_whitelist" {
|
||||
description = "List of IP ranges (CIDR) to whitelist for ssh"
|
||||
type = list(string)
|
||||
|
@ -88,7 +88,7 @@ binaries available on hyperkube v1.4.3_coreos.0 or higher.
|
||||
|
||||
## Requirements
|
||||
|
||||
- [Install Terraform](https://www.terraform.io/intro/getting-started/install.html) 0.12 or later
|
||||
- [Install Terraform](https://www.terraform.io/intro/getting-started/install.html) 0.14 or later
|
||||
- [Install Ansible](http://docs.ansible.com/ansible/latest/intro_installation.html)
|
||||
- you already have a suitable OS image in Glance
|
||||
- you already have a floating IP pool created
|
||||
@ -270,6 +270,7 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
|
||||
|`supplementary_node_groups` | To add ansible groups to the nodes, such as `kube_ingress` for running ingress controller pods, empty by default. |
|
||||
|`bastion_allowed_remote_ips` | List of CIDR allowed to initiate a SSH connection, `["0.0.0.0/0"]` by default |
|
||||
|`master_allowed_remote_ips` | List of CIDR blocks allowed to initiate an API connection, `["0.0.0.0/0"]` by default |
|
||||
|`bastion_allowed_ports` | List of ports to open on bastion node, `[]` by default |
|
||||
|`k8s_allowed_remote_ips` | List of CIDR allowed to initiate a SSH connection, empty by default |
|
||||
|`worker_allowed_ports` | List of ports to open on worker nodes, `[{ "protocol" = "tcp", "port_range_min" = 30000, "port_range_max" = 32767, "remote_ip_prefix" = "0.0.0.0/0"}]` by default |
|
||||
|`master_allowed_ports` | List of ports to open on master nodes, expected format is `[{ "protocol" = "tcp", "port_range_min" = 443, "port_range_max" = 443, "remote_ip_prefix" = "0.0.0.0/0"}]`, empty by default |
|
||||
@ -283,6 +284,7 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
|
||||
|`master_server_group_policy` | Enable and use openstack nova servergroups for masters with set policy, default: "" (disabled) |
|
||||
|`node_server_group_policy` | Enable and use openstack nova servergroups for nodes with set policy, default: "" (disabled) |
|
||||
|`etcd_server_group_policy` | Enable and use openstack nova servergroups for etcd with set policy, default: "" (disabled) |
|
||||
|`additional_server_groups` | Extra server groups to create. Set "policy" to the policy for the group, expected format is `{"new-server-group" = {"policy" = "anti-affinity"}}`, default: {} (to not create any extra groups) |
|
||||
|`use_access_ip` | If 1, nodes with floating IPs will transmit internal cluster traffic via floating IPs; if 0 private IPs will be used instead. Default value is 1. |
|
||||
|`port_security_enabled` | Allow to disable port security by setting this to `false`. `true` by default |
|
||||
|`force_null_port_security` | Set `null` instead of `true` or `false` for `port_security`. `false` by default |
|
||||
@ -291,12 +293,33 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
|
||||
|
||||
##### k8s_nodes
|
||||
|
||||
Allows a custom definition of worker nodes giving the operator full control over individual node flavor and
|
||||
availability zone placement. To enable the use of this mode set the `number_of_k8s_nodes` and
|
||||
`number_of_k8s_nodes_no_floating_ip` variables to 0. Then define your desired worker node configuration
|
||||
using the `k8s_nodes` variable. The `az`, `flavor` and `floating_ip` parameters are mandatory.
|
||||
Allows a custom definition of worker nodes giving the operator full control over individual node flavor and availability zone placement.
|
||||
To enable the use of this mode set the `number_of_k8s_nodes` and `number_of_k8s_nodes_no_floating_ip` variables to 0.
|
||||
Then define your desired worker node configuration using the `k8s_nodes` variable.
|
||||
The `az`, `flavor` and `floating_ip` parameters are mandatory.
|
||||
The optional parameter `extra_groups` (a comma-delimited string) can be used to define extra inventory group memberships for specific nodes.
|
||||
|
||||
```yaml
|
||||
k8s_nodes:
|
||||
node-name:
|
||||
az: string # Name of the AZ
|
||||
flavor: string # Flavor ID to use
|
||||
floating_ip: bool # If floating IPs should be created or not
|
||||
extra_groups: string # (optional) Additional groups to add for kubespray, defaults to no groups
|
||||
image_id: string # (optional) Image ID to use, defaults to var.image_id or var.image
|
||||
root_volume_size_in_gb: number # (optional) Size of the block storage to use as root disk, defaults to var.node_root_volume_size_in_gb or to use volume from flavor otherwise
|
||||
volume_type: string # (optional) Volume type to use, defaults to var.node_volume_type
|
||||
network_id: string # (optional) Use this network_id for the node, defaults to either var.network_id or ID of var.network_name
|
||||
server_group: string # (optional) Server group to add this node to. If set, this has to be one specified in additional_server_groups, defaults to use the server group specified in node_server_group_policy
|
||||
cloudinit: # (optional) Options for cloud-init
|
||||
extra_partitions: # List of extra partitions (other than the root partition) to setup during creation
|
||||
volume_path: string # Path to the volume to create partition for (e.g. /dev/vda )
|
||||
partition_path: string # Path to the partition (e.g. /dev/vda2 )
|
||||
mount_path: string # Path to where the partition should be mounted
|
||||
partition_start: string # Where the partition should start (e.g. 10GB ). Note, if you set the partition_start to 0 there will be no space left for the root partition
|
||||
partition_end: string # Where the partition should end (e.g. 10GB or -1 for end of volume)
|
||||
```
|
||||
|
||||
For example:
|
||||
|
||||
```ini
|
||||
@ -426,7 +449,7 @@ This should finish fairly quickly telling you Terraform has successfully initial
|
||||
|
||||
You can apply cloud-init based customization for the openstack instances before provisioning your cluster.
|
||||
One common template is used for all instances. Adjust the file shown below:
|
||||
`contrib/terraform/openstack/modules/compute/templates/cloudinit.yaml`
|
||||
`contrib/terraform/openstack/modules/compute/templates/cloudinit.yaml.tmpl`
|
||||
For example, to enable openstack novnc access and ansible_user=root SSH access:
|
||||
|
||||
```ShellSession
|
||||
|
@ -84,6 +84,7 @@ module "compute" {
|
||||
supplementary_node_groups = var.supplementary_node_groups
|
||||
master_allowed_ports = var.master_allowed_ports
|
||||
worker_allowed_ports = var.worker_allowed_ports
|
||||
bastion_allowed_ports = var.bastion_allowed_ports
|
||||
use_access_ip = var.use_access_ip
|
||||
master_server_group_policy = var.master_server_group_policy
|
||||
node_server_group_policy = var.node_server_group_policy
|
||||
@ -96,6 +97,8 @@ module "compute" {
|
||||
network_router_id = module.network.router_id
|
||||
network_id = module.network.network_id
|
||||
use_existing_network = var.use_existing_network
|
||||
private_subnet_id = module.network.subnet_id
|
||||
additional_server_groups = var.additional_server_groups
|
||||
|
||||
depends_on = [
|
||||
module.network.subnet_id
|
||||
|
@ -15,8 +15,14 @@ data "openstack_images_image_v2" "image_master" {
|
||||
name = var.image_master == "" ? var.image : var.image_master
|
||||
}
|
||||
|
||||
data "template_file" "cloudinit" {
|
||||
template = file("${path.module}/templates/cloudinit.yaml")
|
||||
data "cloudinit_config" "cloudinit" {
|
||||
part {
|
||||
content_type = "text/cloud-config"
|
||||
content = templatefile("${path.module}/templates/cloudinit.yaml.tmpl", {
|
||||
# template_file doesn't support lists
|
||||
extra_partitions = ""
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
data "openstack_networking_network_v2" "k8s_network" {
|
||||
@ -82,6 +88,17 @@ resource "openstack_networking_secgroup_rule_v2" "bastion" {
|
||||
security_group_id = openstack_networking_secgroup_v2.bastion[0].id
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "k8s_bastion_ports" {
|
||||
count = length(var.bastion_allowed_ports)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = lookup(var.bastion_allowed_ports[count.index], "protocol", "tcp")
|
||||
port_range_min = lookup(var.bastion_allowed_ports[count.index], "port_range_min")
|
||||
port_range_max = lookup(var.bastion_allowed_ports[count.index], "port_range_max")
|
||||
remote_ip_prefix = lookup(var.bastion_allowed_ports[count.index], "remote_ip_prefix", "0.0.0.0/0")
|
||||
security_group_id = openstack_networking_secgroup_v2.bastion[0].id
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_v2" "k8s" {
|
||||
name = "${var.cluster_name}-k8s"
|
||||
description = "${var.cluster_name} - Kubernetes"
|
||||
@ -156,6 +173,12 @@ resource "openstack_compute_servergroup_v2" "k8s_etcd" {
|
||||
policies = [var.etcd_server_group_policy]
|
||||
}
|
||||
|
||||
resource "openstack_compute_servergroup_v2" "k8s_node_additional" {
|
||||
for_each = var.additional_server_groups
|
||||
name = "k8s-${each.key}-srvgrp"
|
||||
policies = [each.value.policy]
|
||||
}
|
||||
|
||||
locals {
|
||||
# master groups
|
||||
master_sec_groups = compact([
|
||||
@ -185,6 +208,29 @@ locals {
|
||||
image_to_use_gfs = var.image_gfs_uuid != "" ? var.image_gfs_uuid : var.image_uuid != "" ? var.image_uuid : data.openstack_images_image_v2.gfs_image[0].id
|
||||
# image_master uuidimage_gfs_uuid
|
||||
image_to_use_master = var.image_master_uuid != "" ? var.image_master_uuid : var.image_uuid != "" ? var.image_uuid : data.openstack_images_image_v2.image_master[0].id
|
||||
|
||||
k8s_nodes_settings = {
|
||||
for name, node in var.k8s_nodes :
|
||||
name => {
|
||||
"use_local_disk" = (node.root_volume_size_in_gb != null ? node.root_volume_size_in_gb : var.node_root_volume_size_in_gb) == 0,
|
||||
"image_id" = node.image_id != null ? node.image_id : local.image_to_use_node,
|
||||
"volume_size" = node.root_volume_size_in_gb != null ? node.root_volume_size_in_gb : var.node_root_volume_size_in_gb,
|
||||
"volume_type" = node.volume_type != null ? node.volume_type : var.node_volume_type,
|
||||
"network_id" = node.network_id != null ? node.network_id : (var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id)
|
||||
"server_group" = node.server_group != null ? [openstack_compute_servergroup_v2.k8s_node_additional[node.server_group].id] : (var.node_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_node[0].id] : [])
|
||||
}
|
||||
}
|
||||
|
||||
k8s_masters_settings = {
|
||||
for name, node in var.k8s_masters :
|
||||
name => {
|
||||
"use_local_disk" = (node.root_volume_size_in_gb != null ? node.root_volume_size_in_gb : var.master_root_volume_size_in_gb) == 0,
|
||||
"image_id" = node.image_id != null ? node.image_id : local.image_to_use_master,
|
||||
"volume_size" = node.root_volume_size_in_gb != null ? node.root_volume_size_in_gb : var.master_root_volume_size_in_gb,
|
||||
"volume_type" = node.volume_type != null ? node.volume_type : var.master_volume_type,
|
||||
"network_id" = node.network_id != null ? node.network_id : (var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_networking_port_v2" "bastion_port" {
|
||||
@ -195,6 +241,12 @@ resource "openstack_networking_port_v2" "bastion_port" {
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.bastion_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
dynamic "fixed_ip" {
|
||||
for_each = var.private_subnet_id == "" ? [] : [true]
|
||||
content {
|
||||
subnet_id = var.private_subnet_id
|
||||
}
|
||||
}
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
@ -207,7 +259,7 @@ resource "openstack_compute_instance_v2" "bastion" {
|
||||
image_id = var.bastion_root_volume_size_in_gb == 0 ? local.image_to_use_node : null
|
||||
flavor_id = var.flavor_bastion
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
user_data = data.template_file.cloudinit.rendered
|
||||
user_data = data.cloudinit_config.cloudinit.rendered
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.bastion_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : []
|
||||
@ -245,6 +297,12 @@ resource "openstack_networking_port_v2" "k8s_master_port" {
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
dynamic "fixed_ip" {
|
||||
for_each = var.private_subnet_id == "" ? [] : [true]
|
||||
content {
|
||||
subnet_id = var.private_subnet_id
|
||||
}
|
||||
}
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
@ -258,7 +316,7 @@ resource "openstack_compute_instance_v2" "k8s_master" {
|
||||
image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
|
||||
flavor_id = var.flavor_k8s_master
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
user_data = data.template_file.cloudinit.rendered
|
||||
user_data = data.cloudinit_config.cloudinit.rendered
|
||||
|
||||
|
||||
dynamic "block_device" {
|
||||
@ -300,11 +358,17 @@ resource "openstack_compute_instance_v2" "k8s_master" {
|
||||
resource "openstack_networking_port_v2" "k8s_masters_port" {
|
||||
for_each = var.number_of_k8s_masters == 0 && var.number_of_k8s_masters_no_etcd == 0 && var.number_of_k8s_masters_no_floating_ip == 0 && var.number_of_k8s_masters_no_floating_ip_no_etcd == 0 ? var.k8s_masters : {}
|
||||
name = "${var.cluster_name}-k8s-${each.key}"
|
||||
network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id
|
||||
network_id = local.k8s_masters_settings[each.key].network_id
|
||||
admin_state_up = "true"
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
dynamic "fixed_ip" {
|
||||
for_each = var.private_subnet_id == "" ? [] : [true]
|
||||
content {
|
||||
subnet_id = var.private_subnet_id
|
||||
}
|
||||
}
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
@ -315,17 +379,17 @@ resource "openstack_compute_instance_v2" "k8s_masters" {
|
||||
for_each = var.number_of_k8s_masters == 0 && var.number_of_k8s_masters_no_etcd == 0 && var.number_of_k8s_masters_no_floating_ip == 0 && var.number_of_k8s_masters_no_floating_ip_no_etcd == 0 ? var.k8s_masters : {}
|
||||
name = "${var.cluster_name}-k8s-${each.key}"
|
||||
availability_zone = each.value.az
|
||||
image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
|
||||
image_id = local.k8s_masters_settings[each.key].use_local_disk ? local.k8s_masters_settings[each.key].image_id : null
|
||||
flavor_id = each.value.flavor
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.master_root_volume_size_in_gb > 0 ? [local.image_to_use_master] : []
|
||||
for_each = !local.k8s_masters_settings[each.key].use_local_disk ? [local.k8s_masters_settings[each.key].image_id] : []
|
||||
content {
|
||||
uuid = local.image_to_use_master
|
||||
uuid = block_device.value
|
||||
source_type = "image"
|
||||
volume_size = var.master_root_volume_size_in_gb
|
||||
volume_type = var.master_volume_type
|
||||
volume_size = local.k8s_masters_settings[each.key].volume_size
|
||||
volume_type = local.k8s_masters_settings[each.key].volume_type
|
||||
boot_index = 0
|
||||
destination_type = "volume"
|
||||
delete_on_termination = true
|
||||
@ -351,7 +415,7 @@ resource "openstack_compute_instance_v2" "k8s_masters" {
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "%{if each.value.floating_ip}sed s/USER/${var.ssh_user}/ ${path.root}/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, [for key, value in var.k8s_masters_fips : value.address]), 0)}/ > ${var.group_vars_path}/no_floating.yml%{else}true%{endif}"
|
||||
command = "%{if each.value.floating_ip}sed s/USER/${var.ssh_user}/ ${path.module}/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, [for key, value in var.k8s_masters_fips : value.address]), 0)}/ > ${var.group_vars_path}/no_floating.yml%{else}true%{endif}"
|
||||
}
|
||||
}
|
||||
|
||||
@ -363,6 +427,12 @@ resource "openstack_networking_port_v2" "k8s_master_no_etcd_port" {
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
dynamic "fixed_ip" {
|
||||
for_each = var.private_subnet_id == "" ? [] : [true]
|
||||
content {
|
||||
subnet_id = var.private_subnet_id
|
||||
}
|
||||
}
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
@ -376,7 +446,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
|
||||
image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
|
||||
flavor_id = var.flavor_k8s_master
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
user_data = data.template_file.cloudinit.rendered
|
||||
user_data = data.cloudinit_config.cloudinit.rendered
|
||||
|
||||
|
||||
dynamic "block_device" {
|
||||
@ -423,6 +493,12 @@ resource "openstack_networking_port_v2" "etcd_port" {
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.etcd_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
dynamic "fixed_ip" {
|
||||
for_each = var.private_subnet_id == "" ? [] : [true]
|
||||
content {
|
||||
subnet_id = var.private_subnet_id
|
||||
}
|
||||
}
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
@ -436,7 +512,7 @@ resource "openstack_compute_instance_v2" "etcd" {
|
||||
image_id = var.etcd_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
|
||||
flavor_id = var.flavor_etcd
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
user_data = data.template_file.cloudinit.rendered
|
||||
user_data = data.cloudinit_config.cloudinit.rendered
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.etcd_root_volume_size_in_gb > 0 ? [local.image_to_use_master] : []
|
||||
@ -477,6 +553,12 @@ resource "openstack_networking_port_v2" "k8s_master_no_floating_ip_port" {
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
dynamic "fixed_ip" {
|
||||
for_each = var.private_subnet_id == "" ? [] : [true]
|
||||
content {
|
||||
subnet_id = var.private_subnet_id
|
||||
}
|
||||
}
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
@ -531,6 +613,12 @@ resource "openstack_networking_port_v2" "k8s_master_no_floating_ip_no_etcd_port"
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.master_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
dynamic "fixed_ip" {
|
||||
for_each = var.private_subnet_id == "" ? [] : [true]
|
||||
content {
|
||||
subnet_id = var.private_subnet_id
|
||||
}
|
||||
}
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
@ -544,7 +632,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
|
||||
image_id = var.master_root_volume_size_in_gb == 0 ? local.image_to_use_master : null
|
||||
flavor_id = var.flavor_k8s_master
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
user_data = data.template_file.cloudinit.rendered
|
||||
user_data = data.cloudinit_config.cloudinit.rendered
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.master_root_volume_size_in_gb > 0 ? [local.image_to_use_master] : []
|
||||
@ -586,6 +674,12 @@ resource "openstack_networking_port_v2" "k8s_node_port" {
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.worker_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
dynamic "fixed_ip" {
|
||||
for_each = var.private_subnet_id == "" ? [] : [true]
|
||||
content {
|
||||
subnet_id = var.private_subnet_id
|
||||
}
|
||||
}
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
@ -599,7 +693,7 @@ resource "openstack_compute_instance_v2" "k8s_node" {
|
||||
image_id = var.node_root_volume_size_in_gb == 0 ? local.image_to_use_node : null
|
||||
flavor_id = var.flavor_k8s_node
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
user_data = data.template_file.cloudinit.rendered
|
||||
user_data = data.cloudinit_config.cloudinit.rendered
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.node_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : []
|
||||
@ -646,6 +740,12 @@ resource "openstack_networking_port_v2" "k8s_node_no_floating_ip_port" {
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.worker_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
dynamic "fixed_ip" {
|
||||
for_each = var.private_subnet_id == "" ? [] : [true]
|
||||
content {
|
||||
subnet_id = var.private_subnet_id
|
||||
}
|
||||
}
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
@ -659,7 +759,7 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
||||
image_id = var.node_root_volume_size_in_gb == 0 ? local.image_to_use_node : null
|
||||
flavor_id = var.flavor_k8s_node
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
user_data = data.template_file.cloudinit.rendered
|
||||
user_data = data.cloudinit_config.cloudinit.rendered
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.node_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : []
|
||||
@ -679,9 +779,9 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
||||
}
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.node_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||
for_each = var.node_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_node[0].id] : []
|
||||
content {
|
||||
group = openstack_compute_servergroup_v2.k8s_node[0].id
|
||||
group = scheduler_hints.value
|
||||
}
|
||||
}
|
||||
|
||||
@ -696,11 +796,17 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
||||
resource "openstack_networking_port_v2" "k8s_nodes_port" {
|
||||
for_each = var.number_of_k8s_nodes == 0 && var.number_of_k8s_nodes_no_floating_ip == 0 ? var.k8s_nodes : {}
|
||||
name = "${var.cluster_name}-k8s-node-${each.key}"
|
||||
network_id = var.use_existing_network ? data.openstack_networking_network_v2.k8s_network[0].id : var.network_id
|
||||
network_id = local.k8s_nodes_settings[each.key].network_id
|
||||
admin_state_up = "true"
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.worker_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
dynamic "fixed_ip" {
|
||||
for_each = var.private_subnet_id == "" ? [] : [true]
|
||||
content {
|
||||
subnet_id = var.private_subnet_id
|
||||
}
|
||||
}
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
@ -711,18 +817,20 @@ resource "openstack_compute_instance_v2" "k8s_nodes" {
|
||||
for_each = var.number_of_k8s_nodes == 0 && var.number_of_k8s_nodes_no_floating_ip == 0 ? var.k8s_nodes : {}
|
||||
name = "${var.cluster_name}-k8s-node-${each.key}"
|
||||
availability_zone = each.value.az
|
||||
image_id = var.node_root_volume_size_in_gb == 0 ? local.image_to_use_node : null
|
||||
image_id = local.k8s_nodes_settings[each.key].use_local_disk ? local.k8s_nodes_settings[each.key].image_id : null
|
||||
flavor_id = each.value.flavor
|
||||
key_pair = openstack_compute_keypair_v2.k8s.name
|
||||
user_data = data.template_file.cloudinit.rendered
|
||||
user_data = each.value.cloudinit != null ? templatefile("${path.module}/templates/cloudinit.yaml.tmpl", {
|
||||
extra_partitions = each.value.cloudinit.extra_partitions
|
||||
}) : data.cloudinit_config.cloudinit.rendered
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.node_root_volume_size_in_gb > 0 ? [local.image_to_use_node] : []
|
||||
for_each = !local.k8s_nodes_settings[each.key].use_local_disk ? [local.k8s_nodes_settings[each.key].image_id] : []
|
||||
content {
|
||||
uuid = local.image_to_use_node
|
||||
uuid = block_device.value
|
||||
source_type = "image"
|
||||
volume_size = var.node_root_volume_size_in_gb
|
||||
volume_type = var.node_volume_type
|
||||
volume_size = local.k8s_nodes_settings[each.key].volume_size
|
||||
volume_type = local.k8s_nodes_settings[each.key].volume_type
|
||||
boot_index = 0
|
||||
destination_type = "volume"
|
||||
delete_on_termination = true
|
||||
@ -734,15 +842,15 @@ resource "openstack_compute_instance_v2" "k8s_nodes" {
|
||||
}
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.node_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||
for_each = local.k8s_nodes_settings[each.key].server_group
|
||||
content {
|
||||
group = openstack_compute_servergroup_v2.k8s_node[0].id
|
||||
group = scheduler_hints.value
|
||||
}
|
||||
}
|
||||
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "kube_node,k8s_cluster,%{if each.value.floating_ip == false}no_floating,%{endif}${var.supplementary_node_groups},${try(each.value.extra_groups, "")}"
|
||||
kubespray_groups = "kube_node,k8s_cluster,%{if each.value.floating_ip == false}no_floating,%{endif}${var.supplementary_node_groups}${each.value.extra_groups != null ? ",${each.value.extra_groups}" : ""}"
|
||||
depends_on = var.network_router_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
@ -760,6 +868,12 @@ resource "openstack_networking_port_v2" "glusterfs_node_no_floating_ip_port" {
|
||||
port_security_enabled = var.force_null_port_security ? null : var.port_security_enabled
|
||||
security_group_ids = var.port_security_enabled ? local.gfs_sec_groups : null
|
||||
no_security_groups = var.port_security_enabled ? null : false
|
||||
dynamic "fixed_ip" {
|
||||
for_each = var.private_subnet_id == "" ? [] : [true]
|
||||
content {
|
||||
subnet_id = var.private_subnet_id
|
||||
}
|
||||
}
|
||||
|
||||
depends_on = [
|
||||
var.network_router_id
|
||||
|
@ -1,17 +0,0 @@
|
||||
# yamllint disable rule:comments
|
||||
#cloud-config
|
||||
## in some cases novnc console access is required
|
||||
## it requires ssh password to be set
|
||||
#ssh_pwauth: yes
|
||||
#chpasswd:
|
||||
# list: |
|
||||
# root:secret
|
||||
# expire: False
|
||||
|
||||
## in some cases direct root ssh access via ssh key is required
|
||||
#disable_root: false
|
||||
|
||||
## in some cases additional CA certs are required
|
||||
#ca-certs:
|
||||
# trusted: |
|
||||
# -----BEGIN CERTIFICATE-----
|
@ -0,0 +1,39 @@
|
||||
%{~ if length(extra_partitions) > 0 }
|
||||
#cloud-config
|
||||
bootcmd:
|
||||
%{~ for idx, partition in extra_partitions }
|
||||
- [ cloud-init-per, once, move-second-header, sgdisk, --move-second-header, ${partition.volume_path} ]
|
||||
- [ cloud-init-per, once, create-part-${idx}, parted, --script, ${partition.volume_path}, 'mkpart extended ext4 ${partition.partition_start} ${partition.partition_end}' ]
|
||||
- [ cloud-init-per, once, create-fs-part-${idx}, mkfs.ext4, ${partition.partition_path} ]
|
||||
%{~ endfor }
|
||||
|
||||
runcmd:
|
||||
%{~ for idx, partition in extra_partitions }
|
||||
- mkdir -p ${partition.mount_path}
|
||||
- chown nobody:nogroup ${partition.mount_path}
|
||||
- mount ${partition.partition_path} ${partition.mount_path}
|
||||
%{~ endfor }
|
||||
|
||||
mounts:
|
||||
%{~ for idx, partition in extra_partitions }
|
||||
- [ ${partition.partition_path}, ${partition.mount_path} ]
|
||||
%{~ endfor }
|
||||
%{~ else ~}
|
||||
# yamllint disable rule:comments
|
||||
#cloud-config
|
||||
## in some cases novnc console access is required
|
||||
## it requires ssh password to be set
|
||||
#ssh_pwauth: yes
|
||||
#chpasswd:
|
||||
# list: |
|
||||
# root:secret
|
||||
# expire: False
|
||||
|
||||
## in some cases direct root ssh access via ssh key is required
|
||||
#disable_root: false
|
||||
|
||||
## in some cases additional CA certs are required
|
||||
#ca-certs:
|
||||
# trusted: |
|
||||
# -----BEGIN CERTIFICATE-----
|
||||
%{~ endif }
|
@ -116,9 +116,48 @@ variable "k8s_allowed_egress_ips" {
|
||||
type = list
|
||||
}
|
||||
|
||||
variable "k8s_masters" {}
|
||||
variable "k8s_masters" {
|
||||
type = map(object({
|
||||
az = string
|
||||
flavor = string
|
||||
floating_ip = bool
|
||||
etcd = bool
|
||||
image_id = optional(string)
|
||||
root_volume_size_in_gb = optional(number)
|
||||
volume_type = optional(string)
|
||||
network_id = optional(string)
|
||||
}))
|
||||
}
|
||||
|
||||
variable "k8s_nodes" {}
|
||||
variable "k8s_nodes" {
|
||||
type = map(object({
|
||||
az = string
|
||||
flavor = string
|
||||
floating_ip = bool
|
||||
extra_groups = optional(string)
|
||||
image_id = optional(string)
|
||||
root_volume_size_in_gb = optional(number)
|
||||
volume_type = optional(string)
|
||||
network_id = optional(string)
|
||||
additional_server_groups = optional(list(string))
|
||||
server_group = optional(string)
|
||||
cloudinit = optional(object({
|
||||
extra_partitions = list(object({
|
||||
volume_path = string
|
||||
partition_path = string
|
||||
partition_start = string
|
||||
partition_end = string
|
||||
mount_path = string
|
||||
}))
|
||||
}))
|
||||
}))
|
||||
}
|
||||
|
||||
variable "additional_server_groups" {
|
||||
type = map(object({
|
||||
policy = string
|
||||
}))
|
||||
}
|
||||
|
||||
variable "supplementary_master_groups" {
|
||||
default = ""
|
||||
@ -136,6 +175,10 @@ variable "worker_allowed_ports" {
|
||||
type = list
|
||||
}
|
||||
|
||||
variable "bastion_allowed_ports" {
|
||||
type = list
|
||||
}
|
||||
|
||||
variable "use_access_ip" {}
|
||||
|
||||
variable "master_server_group_policy" {
|
||||
@ -185,3 +228,7 @@ variable "port_security_enabled" {
|
||||
variable "force_null_port_security" {
|
||||
type = bool
|
||||
}
|
||||
|
||||
variable "private_subnet_id" {
|
||||
type = string
|
||||
}
|
||||
|
@ -4,5 +4,6 @@ terraform {
|
||||
source = "terraform-provider-openstack/openstack"
|
||||
}
|
||||
}
|
||||
required_version = ">= 0.12.26"
|
||||
experiments = [module_variable_optional_attrs]
|
||||
required_version = ">= 0.14.0"
|
||||
}
|
||||
|
@ -257,6 +257,12 @@ variable "worker_allowed_ports" {
|
||||
]
|
||||
}
|
||||
|
||||
variable "bastion_allowed_ports" {
|
||||
type = list(any)
|
||||
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "use_access_ip" {
|
||||
default = 1
|
||||
}
|
||||
@ -294,6 +300,13 @@ variable "k8s_nodes" {
|
||||
default = {}
|
||||
}
|
||||
|
||||
variable "additional_server_groups" {
|
||||
default = {}
|
||||
type = map(object({
|
||||
policy = string
|
||||
}))
|
||||
}
|
||||
|
||||
variable "extra_sec_groups" {
|
||||
default = false
|
||||
}
|
||||
|
@ -5,5 +5,6 @@ terraform {
|
||||
version = "~> 1.17"
|
||||
}
|
||||
}
|
||||
required_version = ">= 0.12.26"
|
||||
experiments = [module_variable_optional_attrs]
|
||||
required_version = ">= 0.14.0"
|
||||
}
|
||||
|
@ -251,8 +251,8 @@ resource "upcloud_firewall_rules" "master" {
|
||||
content {
|
||||
action = "accept"
|
||||
comment = "UpCloud DNS"
|
||||
destination_port_end = "53"
|
||||
destination_port_start = "53"
|
||||
source_port_end = "53"
|
||||
source_port_start = "53"
|
||||
direction = "in"
|
||||
family = "IPv4"
|
||||
protocol = firewall_rule.value
|
||||
@ -267,8 +267,8 @@ resource "upcloud_firewall_rules" "master" {
|
||||
content {
|
||||
action = "accept"
|
||||
comment = "UpCloud DNS"
|
||||
destination_port_end = "53"
|
||||
destination_port_start = "53"
|
||||
source_port_end = "53"
|
||||
source_port_start = "53"
|
||||
direction = "in"
|
||||
family = "IPv4"
|
||||
protocol = firewall_rule.value
|
||||
@ -283,8 +283,8 @@ resource "upcloud_firewall_rules" "master" {
|
||||
content {
|
||||
action = "accept"
|
||||
comment = "UpCloud DNS"
|
||||
destination_port_end = "53"
|
||||
destination_port_start = "53"
|
||||
source_port_end = "53"
|
||||
source_port_start = "53"
|
||||
direction = "in"
|
||||
family = "IPv6"
|
||||
protocol = firewall_rule.value
|
||||
@ -299,8 +299,8 @@ resource "upcloud_firewall_rules" "master" {
|
||||
content {
|
||||
action = "accept"
|
||||
comment = "UpCloud DNS"
|
||||
destination_port_end = "53"
|
||||
destination_port_start = "53"
|
||||
source_port_end = "53"
|
||||
source_port_start = "53"
|
||||
direction = "in"
|
||||
family = "IPv6"
|
||||
protocol = firewall_rule.value
|
||||
@ -315,8 +315,8 @@ resource "upcloud_firewall_rules" "master" {
|
||||
content {
|
||||
action = "accept"
|
||||
comment = "NTP Port"
|
||||
destination_port_end = "123"
|
||||
destination_port_start = "123"
|
||||
source_port_end = "123"
|
||||
source_port_start = "123"
|
||||
direction = "in"
|
||||
family = "IPv4"
|
||||
protocol = firewall_rule.value
|
||||
@ -325,6 +325,20 @@ resource "upcloud_firewall_rules" "master" {
|
||||
}
|
||||
}
|
||||
|
||||
dynamic firewall_rule {
|
||||
for_each = var.firewall_default_deny_in ? ["udp"] : []
|
||||
|
||||
content {
|
||||
action = "accept"
|
||||
comment = "NTP Port"
|
||||
source_port_end = "123"
|
||||
source_port_start = "123"
|
||||
direction = "in"
|
||||
family = "IPv6"
|
||||
protocol = firewall_rule.value
|
||||
}
|
||||
}
|
||||
|
||||
firewall_rule {
|
||||
action = var.firewall_default_deny_in ? "drop" : "accept"
|
||||
direction = "in"
|
||||
@ -394,8 +408,8 @@ resource "upcloud_firewall_rules" "k8s" {
|
||||
content {
|
||||
action = "accept"
|
||||
comment = "UpCloud DNS"
|
||||
destination_port_end = "53"
|
||||
destination_port_start = "53"
|
||||
source_port_end = "53"
|
||||
source_port_start = "53"
|
||||
direction = "in"
|
||||
family = "IPv4"
|
||||
protocol = firewall_rule.value
|
||||
@ -410,8 +424,8 @@ resource "upcloud_firewall_rules" "k8s" {
|
||||
content {
|
||||
action = "accept"
|
||||
comment = "UpCloud DNS"
|
||||
destination_port_end = "53"
|
||||
destination_port_start = "53"
|
||||
source_port_end = "53"
|
||||
source_port_start = "53"
|
||||
direction = "in"
|
||||
family = "IPv4"
|
||||
protocol = firewall_rule.value
|
||||
@ -426,8 +440,8 @@ resource "upcloud_firewall_rules" "k8s" {
|
||||
content {
|
||||
action = "accept"
|
||||
comment = "UpCloud DNS"
|
||||
destination_port_end = "53"
|
||||
destination_port_start = "53"
|
||||
source_port_end = "53"
|
||||
source_port_start = "53"
|
||||
direction = "in"
|
||||
family = "IPv6"
|
||||
protocol = firewall_rule.value
|
||||
@ -442,8 +456,8 @@ resource "upcloud_firewall_rules" "k8s" {
|
||||
content {
|
||||
action = "accept"
|
||||
comment = "UpCloud DNS"
|
||||
destination_port_end = "53"
|
||||
destination_port_start = "53"
|
||||
source_port_end = "53"
|
||||
source_port_start = "53"
|
||||
direction = "in"
|
||||
family = "IPv6"
|
||||
protocol = firewall_rule.value
|
||||
@ -458,8 +472,8 @@ resource "upcloud_firewall_rules" "k8s" {
|
||||
content {
|
||||
action = "accept"
|
||||
comment = "NTP Port"
|
||||
destination_port_end = "123"
|
||||
destination_port_start = "123"
|
||||
source_port_end = "123"
|
||||
source_port_start = "123"
|
||||
direction = "in"
|
||||
family = "IPv4"
|
||||
protocol = firewall_rule.value
|
||||
@ -468,6 +482,20 @@ resource "upcloud_firewall_rules" "k8s" {
|
||||
}
|
||||
}
|
||||
|
||||
dynamic firewall_rule {
|
||||
for_each = var.firewall_default_deny_in ? ["udp"] : []
|
||||
|
||||
content {
|
||||
action = "accept"
|
||||
comment = "NTP Port"
|
||||
source_port_end = "123"
|
||||
source_port_start = "123"
|
||||
direction = "in"
|
||||
family = "IPv6"
|
||||
protocol = firewall_rule.value
|
||||
}
|
||||
}
|
||||
|
||||
firewall_rule {
|
||||
action = var.firewall_default_deny_in ? "drop" : "accept"
|
||||
direction = "in"
|
||||
|
@ -23,7 +23,9 @@ variable "vsphere_datastore" {}
|
||||
|
||||
variable "vsphere_user" {}
|
||||
|
||||
variable "vsphere_password" {}
|
||||
variable "vsphere_password" {
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
variable "vsphere_server" {}
|
||||
|
||||
|
@ -4,12 +4,6 @@ terraform {
|
||||
source = "hashicorp/vsphere"
|
||||
version = ">= 1.24.3"
|
||||
}
|
||||
null = {
|
||||
source = "hashicorp/null"
|
||||
}
|
||||
template = {
|
||||
source = "hashicorp/template"
|
||||
}
|
||||
}
|
||||
required_version = ">= 0.13"
|
||||
}
|
||||
|
@ -37,6 +37,8 @@
|
||||
* [CentOS/OracleLinux/AlmaLinux/Rocky Linux](docs/centos.md)
|
||||
* [Kylin Linux Advanced Server V10](docs/kylinlinux.md)
|
||||
* [Amazon Linux 2](docs/amazonlinux.md)
|
||||
* [UOS Linux](docs/uoslinux.md)
|
||||
* [openEuler notes](docs/openeuler.md))
|
||||
* CRI
|
||||
* [Containerd](docs/containerd.md)
|
||||
* [Docker](docs/docker.md)
|
||||
|
@ -5,7 +5,7 @@ Amazon Linux is supported with docker,containerd and cri-o runtimes.
|
||||
**Note:** that Amazon Linux is not currently covered in kubespray CI and
|
||||
support for it is currently considered experimental.
|
||||
|
||||
Amazon Linux 2, while derrived from the Redhat OS family, does not keep in
|
||||
Amazon Linux 2, while derived from the Redhat OS family, does not keep in
|
||||
sync with RHEL upstream like CentOS/AlmaLinux/Oracle Linux. In order to use
|
||||
Amazon Linux as the ansible host for your kubespray deployments you need to
|
||||
manually install `python3` and deploy ansible and kubespray dependencies in
|
||||
|
@ -267,7 +267,7 @@ Note: use `--tags` and `--skip-tags` wise and only if you're 100% sure what you'
|
||||
## Bastion host
|
||||
|
||||
If you prefer to not make your nodes publicly accessible (nodes with private IPs only),
|
||||
you can use a so called *bastion* host to connect to your nodes. To specify and use a bastion,
|
||||
you can use a so-called _bastion_ host to connect to your nodes. To specify and use a bastion,
|
||||
simply add a line to your inventory, where you have to replace x.x.x.x with the public IP of the
|
||||
bastion host.
|
||||
|
||||
@ -281,7 +281,7 @@ For more information about Ansible and bastion hosts, read
|
||||
|
||||
## Mitogen
|
||||
|
||||
Mitogen support is deprecated, please see [mitogen related docs](/docs/mitogen.md) for useage and reasons for deprecation.
|
||||
Mitogen support is deprecated, please see [mitogen related docs](/docs/mitogen.md) for usage and reasons for deprecation.
|
||||
|
||||
## Beyond ansible 2.9
|
||||
|
||||
@ -290,7 +290,7 @@ two projects which are now joined under the Ansible umbrella.
|
||||
|
||||
Ansible-base (2.10.x branch) will contain just the ansible language implementation while
|
||||
ansible modules that were previously bundled into a single repository will be part of the
|
||||
ansible 3.x package. Pleasee see [this blog post](https://blog.while-true-do.io/ansible-release-3-0-0/)
|
||||
ansible 3.x package. Please see [this blog post](https://blog.while-true-do.io/ansible-release-3-0-0/)
|
||||
that explains in detail the need and the evolution plan.
|
||||
|
||||
**Note:** this change means that ansible virtual envs cannot be upgraded with `pip install -U`.
|
||||
|
@ -205,6 +205,14 @@ To re-define health host please set the following variable in your inventory:
|
||||
calico_healthhost: "0.0.0.0"
|
||||
```
|
||||
|
||||
### Optional : Configure VXLAN hardware Offload
|
||||
|
||||
The VXLAN Offload is disable by default. It can be configured like this to enabled it:
|
||||
|
||||
```yml
|
||||
calico_feature_detect_override: "ChecksumOffloadBroken=false" # The vxlan offload will enabled (It may cause problem on buggy NIC driver)
|
||||
```
|
||||
|
||||
### Optional : Configure Calico Node probe timeouts
|
||||
|
||||
Under certain conditions a deployer may need to tune the Calico liveness and readiness probes timeout settings. These can be configured like this:
|
||||
@ -227,6 +235,8 @@ If you are running your cluster with the default calico settings and are upgradi
|
||||
* perform a manual migration to vxlan before upgrading kubespray (see migrating from IP in IP to VXLAN below)
|
||||
* pin the pre-2.19 settings in your ansible inventory (see IP in IP mode settings below)
|
||||
|
||||
**Note:**: Vxlan in ipv6 only supported when kernel >= 3.12. So if your kernel version < 3.12, Please don't set `calico_vxlan_mode_ipv6: vxlanAlways`. More details see [#Issue 6877](https://github.com/projectcalico/calico/issues/6877).
|
||||
|
||||
### IP in IP mode
|
||||
|
||||
To configure Ip in Ip mode you need to use the bird network backend.
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
## CentOS 7
|
||||
|
||||
The maximum python version offically supported in CentOS is 3.6. Ansible as of version 5 (ansible core 2.12.x) increased their python requirement to python 3.8 and above.
|
||||
The maximum python version officially supported in CentOS is 3.6. Ansible as of version 5 (ansible core 2.12.x) increased their python requirement to python 3.8 and above.
|
||||
Kubespray supports multiple ansible versions but only the default (5.x) gets wide testing coverage. If your deployment host is CentOS 7 it is recommended to use one of the earlier versions still supported.
|
||||
|
||||
## CentOS 8
|
||||
|
72
docs/cgroups.md
Normal file
72
docs/cgroups.md
Normal file
@ -0,0 +1,72 @@
|
||||
# cgroups
|
||||
|
||||
To avoid the rivals for resources between containers or the impact on the host in Kubernetes, the kubelet components will rely on cgroups to limit the container’s resources usage.
|
||||
|
||||
## Enforcing Node Allocatable
|
||||
|
||||
You can use `kubelet_enforce_node_allocatable` to set node allocatable enforcement.
|
||||
|
||||
```yaml
|
||||
# A comma separated list of levels of node allocatable enforcement to be enforced by kubelet.
|
||||
kubelet_enforce_node_allocatable: "pods"
|
||||
# kubelet_enforce_node_allocatable: "pods,kube-reserved"
|
||||
# kubelet_enforce_node_allocatable: "pods,kube-reserved,system-reserved"
|
||||
```
|
||||
|
||||
Note that to enforce kube-reserved or system-reserved, `kube_reserved_cgroups` or `system_reserved_cgroups` needs to be specified respectively.
|
||||
|
||||
Here is an example:
|
||||
|
||||
```yaml
|
||||
kubelet_enforce_node_allocatable: "pods,kube-reserved,system-reserved"
|
||||
|
||||
# Reserve this space for kube resources
|
||||
# Set to true to reserve resources for kube daemons
|
||||
kube_reserved: true
|
||||
kube_reserved_cgroups_for_service_slice: kube.slice
|
||||
kube_reserved_cgroups: "/{{ kube_reserved_cgroups_for_service_slice }}"
|
||||
kube_memory_reserved: 256Mi
|
||||
kube_cpu_reserved: 100m
|
||||
# kube_ephemeral_storage_reserved: 2Gi
|
||||
# kube_pid_reserved: "1000"
|
||||
# Reservation for master hosts
|
||||
kube_master_memory_reserved: 512Mi
|
||||
kube_master_cpu_reserved: 200m
|
||||
# kube_master_ephemeral_storage_reserved: 2Gi
|
||||
# kube_master_pid_reserved: "1000"
|
||||
|
||||
# Set to true to reserve resources for system daemons
|
||||
system_reserved: true
|
||||
system_reserved_cgroups_for_service_slice: system.slice
|
||||
system_reserved_cgroups: "/{{ system_reserved_cgroups_for_service_slice }}"
|
||||
system_memory_reserved: 512Mi
|
||||
system_cpu_reserved: 500m
|
||||
# system_ephemeral_storage_reserved: 2Gi
|
||||
# system_pid_reserved: "1000"
|
||||
# Reservation for master hosts
|
||||
system_master_memory_reserved: 256Mi
|
||||
system_master_cpu_reserved: 250m
|
||||
# system_master_ephemeral_storage_reserved: 2Gi
|
||||
# system_master_pid_reserved: "1000"
|
||||
```
|
||||
|
||||
After the setup, the cgroups hierarchy is as follows:
|
||||
|
||||
```bash
|
||||
/ (Cgroups Root)
|
||||
├── kubepods.slice
|
||||
│ ├── ...
|
||||
│ ├── kubepods-besteffort.slice
|
||||
│ ├── kubepods-burstable.slice
|
||||
│ └── ...
|
||||
├── kube.slice
|
||||
│ ├── ...
|
||||
│ ├── {{container_manager}}.service
|
||||
│ ├── kubelet.service
|
||||
│ └── ...
|
||||
├── system.slice
|
||||
│ └── ...
|
||||
└── ...
|
||||
```
|
||||
|
||||
You can learn more in the [official kubernetes documentation](https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/).
|
14
docs/ci.md
14
docs/ci.md
@ -12,11 +12,11 @@ centos7 | :white_check_mark: | :white_check_mark: | :x: | :white_check_mark: |
|
||||
debian10 | :white_check_mark: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian11 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian9 | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: |
|
||||
fedora35 | :white_check_mark: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: |
|
||||
fedora36 | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: |
|
||||
fedora37 | :white_check_mark: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: |
|
||||
fedora38 | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: |
|
||||
opensuse | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
rockylinux8 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
rockylinux9 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
rockylinux9 | :white_check_mark: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu16 | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: | :x: |
|
||||
ubuntu18 | :white_check_mark: | :x: | :white_check_mark: | :white_check_mark: | :x: | :x: | :x: | :white_check_mark: |
|
||||
ubuntu20 | :white_check_mark: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: |
|
||||
@ -32,8 +32,8 @@ centos7 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian10 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian11 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora35 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora36 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora37 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora38 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
opensuse | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
rockylinux8 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
rockylinux9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
@ -52,8 +52,8 @@ centos7 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian10 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian11 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora35 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora36 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: |
|
||||
fedora37 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora38 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: |
|
||||
opensuse | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
|
||||
rockylinux8 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
rockylinux9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
|
@ -121,6 +121,23 @@ cilium_encryption_type: "wireguard"
|
||||
|
||||
Kubespray currently supports Linux distributions with Wireguard Kernel mode on Linux 5.6 and newer.
|
||||
|
||||
## Bandwidth Manager
|
||||
|
||||
Cilium’s bandwidth manager supports the kubernetes.io/egress-bandwidth Pod annotation.
|
||||
|
||||
Bandwidth enforcement currently does not work in combination with L7 Cilium Network Policies.
|
||||
In case they select the Pod at egress, then the bandwidth enforcement will be disabled for those Pods.
|
||||
|
||||
Bandwidth Manager requires a v5.1.x or more recent Linux kernel.
|
||||
|
||||
For further information, make sure to check the official [Cilium documentation.](https://docs.cilium.io/en/v1.12/gettingstarted/bandwidth-manager/)
|
||||
|
||||
To use this function, set the following parameters
|
||||
|
||||
```yml
|
||||
cilium_enable_bandwidth_manager: true
|
||||
```
|
||||
|
||||
## Install Cilium Hubble
|
||||
|
||||
k8s-net-cilium.yml:
|
||||
|
@ -64,14 +64,17 @@ is a list of such dictionaries.
|
||||
|
||||
Default runtime can be changed by setting `containerd_default_runtime`.
|
||||
|
||||
#### base_runtime_spec
|
||||
#### Base runtime specs and limiting number of open files
|
||||
|
||||
`base_runtime_spec` key in a runtime dictionary can be used to explicitly
|
||||
specify a runtime spec json file. We ship the default one which is generated
|
||||
with `ctr oci spec > /etc/containerd/cri-base.json`. It will be used if you set
|
||||
`base_runtime_spec: cri-base.json`. The main advantage of doing so is the presence of
|
||||
`rlimits` section in this configuration, which will restrict the maximum number
|
||||
of file descriptors(open files) per container to 1024.
|
||||
`base_runtime_spec` key in a runtime dictionary is used to explicitly
|
||||
specify a runtime spec json file. `runc` runtime has it set to `cri-base.json`,
|
||||
which is generated with `ctr oci spec > /etc/containerd/cri-base.json` and
|
||||
updated to include a custom setting for maximum number of file descriptors per
|
||||
container.
|
||||
|
||||
You can change maximum number of file descriptors per container for the default
|
||||
`runc` runtime by setting the `containerd_base_runtime_spec_rlimit_nofile`
|
||||
variable.
|
||||
|
||||
You can tune many more [settings][runtime-spec] by supplying your own file name and content with `containerd_base_runtime_specs`:
|
||||
|
||||
@ -97,6 +100,15 @@ containerd_runc_runtime:
|
||||
...
|
||||
```
|
||||
|
||||
Config insecure-registry access to self hosted registries.
|
||||
|
||||
```yaml
|
||||
containerd_insecure_registries:
|
||||
"test.registry.io": "http://test.registry.io"
|
||||
"172.19.16.11:5000": "http://172.19.16.11:5000"
|
||||
"repo:5000": "http://repo:5000"
|
||||
```
|
||||
|
||||
[containerd]: https://containerd.io/
|
||||
[RuntimeClass]: https://kubernetes.io/docs/concepts/containers/runtime-class/
|
||||
[runtime classes in containerd]: https://github.com/containerd/containerd/blob/main/docs/cri/config.md#runtime-classes
|
||||
|
@ -50,6 +50,20 @@ is not set, a default resolver is chosen (depending on cloud provider or 8.8.8.8
|
||||
DNS servers to be added *after* the cluster DNS. Used by all ``resolvconf_mode`` modes. These serve as backup
|
||||
DNS servers in early cluster deployment when no cluster DNS is available yet.
|
||||
|
||||
### dns_upstream_forward_extra_opts
|
||||
|
||||
Whether or not upstream DNS servers come from `upstream_dns_servers` variable or /etc/resolv.conf, related forward block in coredns (and nodelocaldns) configuration can take options (see <https://coredns.io/plugins/forward/> for details).
|
||||
These are configurable in inventory in as a dictionary in the `dns_upstream_forward_extra_opts` variable.
|
||||
By default, no other option than the ones hardcoded (see `roles/kubernetes-apps/ansible/templates/coredns-config.yml.j2` and `roles/kubernetes-apps/ansible/templates/nodelocaldns-config.yml.j2`).
|
||||
|
||||
### coredns_kubernetes_extra_opts
|
||||
|
||||
Custom options to be added to the kubernetes coredns plugin.
|
||||
|
||||
### coredns_kubernetes_extra_domains
|
||||
|
||||
Extra domains to be forwarded to the kubernetes coredns plugin.
|
||||
|
||||
### coredns_external_zones
|
||||
|
||||
Array of optional external zones to coredns forward queries to. It's injected into
|
||||
@ -224,7 +238,7 @@ cluster service names.
|
||||
|
||||
Setting ``enable_nodelocaldns`` to ``true`` will make pods reach out to the dns (core-dns) caching agent running on the same node, thereby avoiding iptables DNAT rules and connection tracking. The local caching agent will query core-dns (depending on what main DNS plugin is configured in your cluster) for cache misses of cluster hostnames(cluster.local suffix by default).
|
||||
|
||||
More information on the rationale behind this implementation can be found [here](https://github.com/kubernetes/enhancements/blob/master/keps/sig-network/0030-nodelocal-dns-cache.md).
|
||||
More information on the rationale behind this implementation can be found [here](https://github.com/kubernetes/enhancements/blob/master/keps/sig-network/1024-nodelocal-cache-dns/README.md).
|
||||
|
||||
**As per the 2.10 release, Nodelocal DNS cache is enabled by default.**
|
||||
|
||||
@ -253,7 +267,7 @@ See [dns_etchosts](#dns_etchosts-coredns) above.
|
||||
|
||||
### Nodelocal DNS HA
|
||||
|
||||
Under some circumstances the single POD nodelocaldns implementation may not be able to be replaced soon enough and a cluster upgrade or a nodelocaldns upgrade can cause DNS requests to time out for short intervals. If for any reason your applications cannot tollerate this behavior you can enable a redundant nodelocal DNS pod on each node:
|
||||
Under some circumstances the single POD nodelocaldns implementation may not be able to be replaced soon enough and a cluster upgrade or a nodelocaldns upgrade can cause DNS requests to time out for short intervals. If for any reason your applications cannot tolerate this behavior you can enable a redundant nodelocal DNS pod on each node:
|
||||
|
||||
```yaml
|
||||
enable_nodelocaldns_secondary: true
|
||||
|
@ -55,7 +55,7 @@ Docker log options:
|
||||
docker_log_opts: "--log-opt max-size=50m --log-opt max-file=5"
|
||||
```
|
||||
|
||||
Changre the docker `bin_dir`, this should not be changed unless you use a custom docker package:
|
||||
Change the docker `bin_dir`, this should not be changed unless you use a custom docker package:
|
||||
|
||||
```yaml
|
||||
docker_bin_dir: "/usr/bin"
|
||||
|
@ -2,6 +2,8 @@
|
||||
|
||||
Flannel is a network fabric for containers, designed for Kubernetes
|
||||
|
||||
Supported [backends](https://github.com/flannel-io/flannel/blob/master/Documentation/backends.md#wireguard): `vxlan`, `host-gw` and `wireguard`
|
||||
|
||||
**Warning:** You may encounter this [bug](https://github.com/coreos/flannel/pull/1282) with `VXLAN` backend, while waiting on a newer Flannel version the current workaround (`ethtool --offload flannel.1 rx off tx off`) is showcase in kubespray [networking test](tests/testcases/040_check-network-adv.yml:31).
|
||||
|
||||
## Verifying flannel install
|
||||
|
@ -17,9 +17,9 @@ The **kubernetes** version should be at least `v1.23.6` to have all the most rec
|
||||
---
|
||||
|
||||
## kube-apiserver
|
||||
authorization_modes: ['Node','RBAC']
|
||||
authorization_modes: ['Node', 'RBAC']
|
||||
# AppArmor-based OS
|
||||
#kube_apiserver_feature_gates: ['AppArmor=true']
|
||||
# kube_apiserver_feature_gates: ['AppArmor=true']
|
||||
kube_apiserver_request_timeout: 120s
|
||||
kube_apiserver_service_account_lookup: true
|
||||
|
||||
@ -41,7 +41,18 @@ kube_encrypt_secret_data: true
|
||||
kube_encryption_resources: [secrets]
|
||||
kube_encryption_algorithm: "secretbox"
|
||||
|
||||
kube_apiserver_enable_admission_plugins: ['EventRateLimit,AlwaysPullImages,ServiceAccount,NamespaceLifecycle,NodeRestriction,LimitRanger,ResourceQuota,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,PodNodeSelector,PodSecurity']
|
||||
kube_apiserver_enable_admission_plugins:
|
||||
- EventRateLimit
|
||||
- AlwaysPullImages
|
||||
- ServiceAccount
|
||||
- NamespaceLifecycle
|
||||
- NodeRestriction
|
||||
- LimitRanger
|
||||
- ResourceQuota
|
||||
- MutatingAdmissionWebhook
|
||||
- ValidatingAdmissionWebhook
|
||||
- PodNodeSelector
|
||||
- PodSecurity
|
||||
kube_apiserver_admission_control_config_file: true
|
||||
# EventRateLimit plugin configuration
|
||||
kube_apiserver_admission_event_rate_limits:
|
||||
@ -60,7 +71,7 @@ kube_profiling: false
|
||||
kube_controller_manager_bind_address: 127.0.0.1
|
||||
kube_controller_terminated_pod_gc_threshold: 50
|
||||
# AppArmor-based OS
|
||||
#kube_controller_feature_gates: ["RotateKubeletServerCertificate=true","AppArmor=true"]
|
||||
# kube_controller_feature_gates: ["RotateKubeletServerCertificate=true", "AppArmor=true"]
|
||||
kube_controller_feature_gates: ["RotateKubeletServerCertificate=true"]
|
||||
|
||||
## kube-scheduler
|
||||
@ -68,12 +79,13 @@ kube_scheduler_bind_address: 127.0.0.1
|
||||
kube_kubeadm_scheduler_extra_args:
|
||||
profiling: false
|
||||
# AppArmor-based OS
|
||||
#kube_scheduler_feature_gates: ["AppArmor=true"]
|
||||
# kube_scheduler_feature_gates: ["AppArmor=true"]
|
||||
|
||||
## etcd
|
||||
etcd_deployment_type: kubeadm
|
||||
|
||||
## kubelet
|
||||
kubelet_authorization_mode_webhook: true
|
||||
kubelet_authentication_token_webhook: true
|
||||
kube_read_only_port: 0
|
||||
kubelet_rotate_server_certificates: true
|
||||
@ -82,7 +94,7 @@ kubelet_event_record_qps: 1
|
||||
kubelet_rotate_certificates: true
|
||||
kubelet_streaming_connection_idle_timeout: "5m"
|
||||
kubelet_make_iptables_util_chains: true
|
||||
kubelet_feature_gates: ["RotateKubeletServerCertificate=true","SeccompDefault=true"]
|
||||
kubelet_feature_gates: ["RotateKubeletServerCertificate=true", "SeccompDefault=true"]
|
||||
kubelet_seccomp_default: true
|
||||
kubelet_systemd_hardening: true
|
||||
# In case you have multiple interfaces in your
|
||||
|
@ -124,7 +124,7 @@ By default NGINX `keepalive_timeout` is set to `75s`.
|
||||
The default ELB idle timeout will work for most scenarios, unless the NGINX [keepalive_timeout](http://nginx.org/en/docs/http/ngx_http_core_module.html#keepalive_timeout) has been modified,
|
||||
in which case `service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout` will need to be modified to ensure it is less than the `keepalive_timeout` the user has configured.
|
||||
|
||||
_Please Note: An idle timeout of `3600s` is recommended when using WebSockets._
|
||||
*Please Note: An idle timeout of `3600s` is recommended when using WebSockets.*
|
||||
|
||||
More information with regards to idle timeouts for your Load Balancer can be found in the [official AWS documentation](https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/config-idle-timeout.html).
|
||||
|
||||
|
@ -39,6 +39,12 @@ kube_vip_services_enabled: false
|
||||
[additional manual steps](https://kube-vip.io/docs/usage/cloud-provider/)
|
||||
are needed.
|
||||
|
||||
If using [local traffic policy](https://kube-vip.io/docs/usage/kubernetes-services/#external-traffic-policy-kube-vip-v050):
|
||||
|
||||
```yaml
|
||||
kube_vip_enableServicesElection: true
|
||||
```
|
||||
|
||||
If using [ARP mode](https://kube-vip.io/docs/installation/static/#arp) :
|
||||
|
||||
```yaml
|
||||
|
@ -1,10 +1,11 @@
|
||||
# Local Storage Provisioner
|
||||
# Local Static Storage Provisioner
|
||||
|
||||
The [local storage provisioner](https://github.com/kubernetes-incubator/external-storage/tree/master/local-volume)
|
||||
The [local static storage provisioner](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner)
|
||||
is NOT a dynamic storage provisioner as you would
|
||||
expect from a cloud provider. Instead, it simply creates PersistentVolumes for
|
||||
all mounts under the host_dir of the specified storage class.
|
||||
all mounts under the `host_dir` of the specified storage class.
|
||||
These storage classes are specified in the `local_volume_provisioner_storage_classes` nested dictionary.
|
||||
|
||||
Example:
|
||||
|
||||
```yaml
|
||||
@ -16,15 +17,18 @@ local_volume_provisioner_storage_classes:
|
||||
host_dir: /mnt/fast-disks
|
||||
mount_dir: /mnt/fast-disks
|
||||
block_cleaner_command:
|
||||
- "/scripts/shred.sh"
|
||||
- "2"
|
||||
- "/scripts/shred.sh"
|
||||
- "2"
|
||||
volume_mode: Filesystem
|
||||
fs_type: ext4
|
||||
```
|
||||
|
||||
For each key in `local_volume_provisioner_storage_classes` a storageClass with the
|
||||
same name is created. The subkeys of each storage class are converted to camelCase and added
|
||||
as attributes to the storageClass.
|
||||
For each key in `local_volume_provisioner_storage_classes` a "storage class" with
|
||||
the same name is created in the entry `storageClassMap` of the ConfigMap `local-volume-provisioner`.
|
||||
The subkeys of each storage class in `local_volume_provisioner_storage_classes`
|
||||
are converted to camelCase and added as attributes to the storage class in the
|
||||
ConfigMap.
|
||||
|
||||
The result of the above example is:
|
||||
|
||||
```yaml
|
||||
@ -43,80 +47,85 @@ data:
|
||||
fsType: ext4
|
||||
```
|
||||
|
||||
The default StorageClass is local-storage on /mnt/disks,
|
||||
the rest of this doc will use that path as an example.
|
||||
Additionally, a StorageClass object (`storageclasses.storage.k8s.io`) is also
|
||||
created for each storage class:
|
||||
|
||||
```bash
|
||||
$ kubectl get storageclasses.storage.k8s.io
|
||||
NAME PROVISIONER RECLAIMPOLICY
|
||||
fast-disks kubernetes.io/no-provisioner Delete
|
||||
local-storage kubernetes.io/no-provisioner Delete
|
||||
```
|
||||
|
||||
The default StorageClass is `local-storage` on `/mnt/disks`;
|
||||
the rest of this documentation will use that path as an example.
|
||||
|
||||
## Examples to create local storage volumes
|
||||
|
||||
1. tmpfs method:
|
||||
1. Using tmpfs
|
||||
|
||||
``` bash
|
||||
for vol in vol1 vol2 vol3; do
|
||||
mkdir /mnt/disks/$vol
|
||||
mount -t tmpfs -o size=5G $vol /mnt/disks/$vol
|
||||
done
|
||||
```
|
||||
```bash
|
||||
for vol in vol1 vol2 vol3; do
|
||||
mkdir /mnt/disks/$vol
|
||||
mount -t tmpfs -o size=5G $vol /mnt/disks/$vol
|
||||
done
|
||||
```
|
||||
|
||||
The tmpfs method is not recommended for production because the mount is not
|
||||
persistent and data will be deleted on reboot.
|
||||
The tmpfs method is not recommended for production because the mounts are not
|
||||
persistent and data will be deleted on reboot.
|
||||
|
||||
1. Mount physical disks
|
||||
|
||||
``` bash
|
||||
mkdir /mnt/disks/ssd1
|
||||
mount /dev/vdb1 /mnt/disks/ssd1
|
||||
```
|
||||
```bash
|
||||
mkdir /mnt/disks/ssd1
|
||||
mount /dev/vdb1 /mnt/disks/ssd1
|
||||
```
|
||||
|
||||
Physical disks are recommended for production environments because it offers
|
||||
complete isolation in terms of I/O and capacity.
|
||||
Physical disks are recommended for production environments because it offers
|
||||
complete isolation in terms of I/O and capacity.
|
||||
|
||||
1. Mount unpartitioned physical devices
|
||||
|
||||
``` bash
|
||||
for disk in /dev/sdc /dev/sdd /dev/sde; do
|
||||
ln -s $disk /mnt/disks
|
||||
done
|
||||
```
|
||||
```bash
|
||||
for disk in /dev/sdc /dev/sdd /dev/sde; do
|
||||
ln -s $disk /mnt/disks
|
||||
done
|
||||
```
|
||||
|
||||
This saves time of precreating filesystems. Note that your storageclass must have
|
||||
volume_mode set to "Filesystem" and fs_type defined. If either is not set, the
|
||||
disk will be added as a raw block device.
|
||||
This saves time of precreating filesystems. Note that your storageclass must have
|
||||
`volume_mode` set to `"Filesystem"` and `fs_type` defined. If either is not set, the
|
||||
disk will be added as a raw block device.
|
||||
|
||||
1. PersistentVolumes with `volumeMode="Block"`
|
||||
|
||||
Just like above, you can create PersistentVolumes with volumeMode `Block`
|
||||
by creating a symbolic link under discovery directory to the block device on
|
||||
the node, if you set `volume_mode` to `"Block"`. This will create a volume
|
||||
presented into a Pod as a block device, without any filesystem on it.
|
||||
|
||||
1. File-backed sparsefile method
|
||||
|
||||
``` bash
|
||||
truncate /mnt/disks/disk5 --size 2G
|
||||
mkfs.ext4 /mnt/disks/disk5
|
||||
mkdir /mnt/disks/vol5
|
||||
mount /mnt/disks/disk5 /mnt/disks/vol5
|
||||
```
|
||||
```bash
|
||||
truncate /mnt/disks/disk5 --size 2G
|
||||
mkfs.ext4 /mnt/disks/disk5
|
||||
mkdir /mnt/disks/vol5
|
||||
mount /mnt/disks/disk5 /mnt/disks/vol5
|
||||
```
|
||||
|
||||
If you have a development environment and only one disk, this is the best way
|
||||
to limit the quota of persistent volumes.
|
||||
If you have a development environment and only one disk, this is the best way
|
||||
to limit the quota of persistent volumes.
|
||||
|
||||
1. Simple directories
|
||||
|
||||
In a development environment using `mount --bind` works also, but there is no capacity
|
||||
management.
|
||||
|
||||
1. Block volumeMode PVs
|
||||
|
||||
Create a symbolic link under discovery directory to the block device on the node. To use
|
||||
raw block devices in pods, volume_type should be set to "Block".
|
||||
In a development environment, using `mount --bind` works also, but there is no capacity
|
||||
management.
|
||||
|
||||
## Usage notes
|
||||
|
||||
Beta PV.NodeAffinity field is used by default. If running against an older K8s
|
||||
version, the useAlphaAPI flag must be set in the configMap.
|
||||
|
||||
The volume provisioner cannot calculate volume sizes correctly, so you should
|
||||
delete the daemonset pod on the relevant host after creating volumes. The pod
|
||||
will be recreated and read the size correctly.
|
||||
|
||||
Make sure to make any mounts persist via /etc/fstab or with systemd mounts (for
|
||||
Flatcar Container Linux). Pods with persistent volume claims will not be
|
||||
Make sure to make any mounts persist via `/etc/fstab` or with systemd mounts (for
|
||||
Flatcar Container Linux or Fedora CoreOS). Pods with persistent volume claims will not be
|
||||
able to start if the mounts become unavailable.
|
||||
|
||||
## Further reading
|
||||
|
||||
Refer to the upstream docs here: <https://github.com/kubernetes-incubator/external-storage/tree/master/local-volume>
|
||||
Refer to the upstream docs here: <https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner>
|
||||
|
66
docs/mirror.md
Normal file
66
docs/mirror.md
Normal file
@ -0,0 +1,66 @@
|
||||
# Public Download Mirror
|
||||
|
||||
The public mirror is useful to make the public resources download quickly in some areas of the world. (such as China).
|
||||
|
||||
## Configuring Kubespray to use a mirror site
|
||||
|
||||
You can follow the [offline](offline-environment.md) to config the image/file download configuration to the public mirror site. If you want to download quickly in China, the configuration can be like:
|
||||
|
||||
```shell
|
||||
gcr_image_repo: "gcr.m.daocloud.io"
|
||||
kube_image_repo: "k8s.m.daocloud.io"
|
||||
docker_image_repo: "docker.m.daocloud.io"
|
||||
quay_image_repo: "quay.m.daocloud.io"
|
||||
github_image_repo: "ghcr.m.daocloud.io"
|
||||
|
||||
files_repo: "https://files.m.daocloud.io"
|
||||
```
|
||||
|
||||
Use mirror sites only if you trust the provider. The Kubespray team cannot verify their reliability or security.
|
||||
You can replace the `m.daocloud.io` with any site you want.
|
||||
|
||||
## Example Usage Full Steps
|
||||
|
||||
You can follow the full steps to use the kubesray with mirror. for example:
|
||||
|
||||
Install Ansible according to Ansible installation guide then run the following steps:
|
||||
|
||||
```shell
|
||||
# Copy ``inventory/sample`` as ``inventory/mycluster``
|
||||
cp -rfp inventory/sample inventory/mycluster
|
||||
|
||||
# Update Ansible inventory file with inventory builder
|
||||
declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5)
|
||||
CONFIG_FILE=inventory/mycluster/hosts.yaml python3 contrib/inventory_builder/inventory.py ${IPS[@]}
|
||||
|
||||
# Use the download mirror
|
||||
cp inventory/mycluster/group_vars/all/offline.yml inventory/mycluster/group_vars/all/mirror.yml
|
||||
sed -i -E '/# .*\{\{ files_repo/s/^# //g' inventory/mycluster/group_vars/all/mirror.yml
|
||||
tee -a inventory/mycluster/group_vars/all/mirror.yml <<EOF
|
||||
gcr_image_repo: "gcr.m.daocloud.io"
|
||||
kube_image_repo: "k8s.m.daocloud.io"
|
||||
docker_image_repo: "docker.m.daocloud.io"
|
||||
quay_image_repo: "quay.m.daocloud.io"
|
||||
github_image_repo: "ghcr.m.daocloud.io"
|
||||
files_repo: "https://files.m.daocloud.io"
|
||||
EOF
|
||||
|
||||
# Review and change parameters under ``inventory/mycluster/group_vars``
|
||||
cat inventory/mycluster/group_vars/all/all.yml
|
||||
cat inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml
|
||||
|
||||
# Deploy Kubespray with Ansible Playbook - run the playbook as root
|
||||
# The option `--become` is required, as for example writing SSL keys in /etc/,
|
||||
# installing packages and interacting with various systemd daemons.
|
||||
# Without --become the playbook will fail to run!
|
||||
ansible-playbook -i inventory/mycluster/hosts.yaml --become --become-user=root cluster.yml
|
||||
```
|
||||
|
||||
The above steps are by adding the "Use the download mirror" step to the [README.md](../README.md) steps.
|
||||
|
||||
## Community-run mirror sites
|
||||
|
||||
DaoCloud(China)
|
||||
|
||||
* [image-mirror](https://github.com/DaoCloud/public-image-mirror)
|
||||
* [files-mirror](https://github.com/DaoCloud/public-binary-files-mirror)
|
@ -1,16 +1,31 @@
|
||||
# Offline environment
|
||||
|
||||
In case your servers don't have access to internet (for example when deploying on premises with security constraints), you need to setup:
|
||||
In case your servers don't have access to the internet directly (for example
|
||||
when deploying on premises with security constraints), you need to get the
|
||||
following artifacts in advance from another environment where has access to the internet.
|
||||
|
||||
* Some static files (zips and binaries)
|
||||
* OS packages (rpm/deb files)
|
||||
* Container images used by Kubespray. Exhaustive list depends on your setup
|
||||
* [Optional] Python packages used by Kubespray (only required if your OS doesn't provide all python packages/versions
|
||||
listed in `requirements.txt`)
|
||||
* [Optional] Helm chart files (only required if `helm_enabled=true`)
|
||||
|
||||
Then you need to setup the following services on your offline environment:
|
||||
|
||||
* a HTTP reverse proxy/cache/mirror to serve some static files (zips and binaries)
|
||||
* an internal Yum/Deb repository for OS packages
|
||||
* an internal container image registry that need to be populated with all container images used by Kubespray. Exhaustive list depends on your setup
|
||||
* [Optional] an internal PyPi server for kubespray python packages (only required if your OS doesn't provide all python packages/versions listed in `requirements.txt`)
|
||||
* [Optional] an internal Helm registry (only required if `helm_enabled=true`)
|
||||
* an internal container image registry that need to be populated with all container images used by Kubespray
|
||||
* [Optional] an internal PyPi server for python packages used by Kubespray
|
||||
* [Optional] an internal Helm registry for Helm chart files
|
||||
|
||||
You can get artifact lists with [generate_list.sh](/contrib/offline/generate_list.sh) script.
|
||||
In addition, you can find some tools for offline deployment under [contrib/offline](/contrib/offline/README.md).
|
||||
|
||||
## Configure Inventory
|
||||
|
||||
Once all artifacts are accessible from your internal network, **adjust** the following variables in [your inventory](/inventory/sample/group_vars/all/offline.yml) to match your environment:
|
||||
Once all artifacts are accessible from your internal network, **adjust** the following variables
|
||||
in [your inventory](/inventory/sample/group_vars/all/offline.yml) to match your environment:
|
||||
|
||||
```yaml
|
||||
# Registry overrides
|
||||
@ -23,7 +38,7 @@ kubeadm_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubeadm"
|
||||
kubectl_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubectl"
|
||||
kubelet_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubelet"
|
||||
# etcd is optional if you **DON'T** use etcd_deployment=host
|
||||
etcd_download_url: "{{ files_repo }}/kubernetes/etcd/etcd-{{ etcd_version }}-linux-amd64.tar.gz"
|
||||
etcd_download_url: "{{ files_repo }}/kubernetes/etcd/etcd-{{ etcd_version }}-linux-{{ image_arch }}.tar.gz"
|
||||
cni_download_url: "{{ files_repo }}/kubernetes/cni/cni-plugins-linux-{{ image_arch }}-{{ cni_version }}.tgz"
|
||||
crictl_download_url: "{{ files_repo }}/kubernetes/cri-tools/crictl-{{ crictl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz"
|
||||
# If using Calico
|
||||
@ -36,7 +51,7 @@ runc_download_url: "{{ files_repo }}/runc.{{ image_arch }}"
|
||||
nerdctl_download_url: "{{ files_repo }}/nerdctl-{{ nerdctl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz"
|
||||
# Insecure registries for containerd
|
||||
containerd_insecure_registries:
|
||||
- "{{ registry_host }}"
|
||||
"{{ registry_addr }}":"{{ registry_host }}"
|
||||
|
||||
# CentOS/Redhat/AlmaLinux/Rocky Linux
|
||||
## Docker / Containerd
|
||||
@ -73,20 +88,31 @@ containerd_ubuntu_repo_repokey: 'YOURREPOKEY'
|
||||
For the OS specific settings, just define the one matching your OS.
|
||||
If you use the settings like the one above, you'll need to define in your inventory the following variables:
|
||||
|
||||
* `registry_host`: Container image registry. If you _don't_ use the same repository path for the container images that the ones defined in [Download's role defaults](https://github.com/kubernetes-sigs/kubespray/blob/master/roles/download/defaults/main.yml), you need to override the `*_image_repo` for these container images. If you want to make your life easier, use the same repository path, you won't have to override anything else.
|
||||
* `files_repo`: HTTP webserver or reverse proxy that is able to serve the files listed above. Path is not important, you can store them anywhere as long as it's accessible by kubespray. It's recommended to use `*_version` in the path so that you don't need to modify this setting everytime kubespray upgrades one of these components.
|
||||
* `yum_repo`/`debian_repo`/`ubuntu_repo`: OS package repository depending of your OS, should point to your internal repository. Adjust the path accordingly.
|
||||
* `registry_host`: Container image registry. If you _don't_ use the same repository path for the container images that
|
||||
the ones defined
|
||||
in [Download's role defaults](https://github.com/kubernetes-sigs/kubespray/blob/master/roles/download/defaults/main.yml)
|
||||
, you need to override the `*_image_repo` for these container images. If you want to make your life easier, use the
|
||||
same repository path, you won't have to override anything else.
|
||||
* `registry_addr`: Container image registry, but only have [domain or ip]:[port].
|
||||
* `files_repo`: HTTP webserver or reverse proxy that is able to serve the files listed above. Path is not important, you
|
||||
can store them anywhere as long as it's accessible by kubespray. It's recommended to use `*_version` in the path so
|
||||
that you don't need to modify this setting everytime kubespray upgrades one of these components.
|
||||
* `yum_repo`/`debian_repo`/`ubuntu_repo`: OS package repository depending of your OS, should point to your internal
|
||||
repository. Adjust the path accordingly.
|
||||
|
||||
## Install Kubespray Python Packages
|
||||
|
||||
### Recommended way: Kubespray Container Image
|
||||
|
||||
The easiest way is to use [kubespray container image](https://quay.io/kubespray/kubespray) as all the required packages are baked in the image.
|
||||
The easiest way is to use [kubespray container image](https://quay.io/kubespray/kubespray) as all the required packages
|
||||
are baked in the image.
|
||||
Just copy the container image in your private container image registry and you are all set!
|
||||
|
||||
### Manual installation
|
||||
|
||||
Look at the `requirements.txt` file and check if your OS provides all packages out-of-the-box (Using the OS package manager). For those missing, you need to either use a proxy that has Internet access (typically from a DMZ) or setup a PyPi server in your network that will host these packages.
|
||||
Look at the `requirements.txt` file and check if your OS provides all packages out-of-the-box (Using the OS package
|
||||
manager). For those missing, you need to either use a proxy that has Internet access (typically from a DMZ) or setup a
|
||||
PyPi server in your network that will host these packages.
|
||||
|
||||
If you're using a HTTP(S) proxy to download your python packages:
|
||||
|
||||
@ -106,13 +132,15 @@ pip install -i https://pypiserver/pypi package_you_miss
|
||||
|
||||
## Run Kubespray as usual
|
||||
|
||||
Once all artifacts are in place and your inventory properly set up, you can run kubespray with the regular `cluster.yaml` command:
|
||||
Once all artifacts are in place and your inventory properly set up, you can run kubespray with the
|
||||
regular `cluster.yaml` command:
|
||||
|
||||
```bash
|
||||
ansible-playbook -i inventory/my_airgap_cluster/hosts.yaml -b cluster.yml
|
||||
```
|
||||
|
||||
If you use [Kubespray Container Image](#recommended-way:-kubespray-container-image), you can mount your inventory inside the container:
|
||||
If you use [Kubespray Container Image](#recommended-way:-kubespray-container-image), you can mount your inventory inside
|
||||
the container:
|
||||
|
||||
```bash
|
||||
docker run --rm -it -v path_to_inventory/my_airgap_cluster:inventory/my_airgap_cluster myprivateregisry.com/kubespray/kubespray:v2.14.0 ansible-playbook -i inventory/my_airgap_cluster/hosts.yaml -b cluster.yml
|
||||
|
11
docs/openeuler.md
Normal file
11
docs/openeuler.md
Normal file
@ -0,0 +1,11 @@
|
||||
# OpenEuler
|
||||
|
||||
[OpenEuler](https://www.openeuler.org/en/) Linux is supported with docker and containerd runtimes.
|
||||
|
||||
**Note:** that OpenEuler Linux is not currently covered in kubespray CI and
|
||||
support for it is currently considered experimental.
|
||||
|
||||
At present, only `openEuler 22.03 LTS` has been adapted, which can support the deployment of aarch64 and x86_64 platforms.
|
||||
|
||||
There are no special considerations for using OpenEuler Linux as the target OS
|
||||
for Kubespray deployments.
|
@ -14,8 +14,8 @@ hands-on guide to get started with Kubespray.
|
||||
|
||||
## Cluster Details
|
||||
|
||||
* [kubespray](https://github.com/kubernetes-sigs/kubespray) v2.17.x
|
||||
* [kubernetes](https://github.com/kubernetes/kubernetes) v1.17.9
|
||||
* [kubespray](https://github.com/kubernetes-sigs/kubespray)
|
||||
* [kubernetes](https://github.com/kubernetes/kubernetes)
|
||||
|
||||
## Prerequisites
|
||||
|
||||
@ -466,7 +466,7 @@ kubectl logs $POD_NAME
|
||||
|
||||
#### Exec
|
||||
|
||||
In this section you will verify the ability to [execute commands in a container](https://kubernetes.io/docs/tasks/debug-application-cluster/get-shell-running-container/#running-individual-commands-in-a-container).
|
||||
In this section you will verify the ability to [execute commands in a container](https://kubernetes.io/docs/tasks/debug/debug-application/get-shell-running-container/#running-individual-commands-in-a-container).
|
||||
|
||||
Print the nginx version by executing the `nginx -v` command in the `nginx` container:
|
||||
|
||||
|
9
docs/uoslinux.md
Normal file
9
docs/uoslinux.md
Normal file
@ -0,0 +1,9 @@
|
||||
# UOS Linux
|
||||
|
||||
UOS Linux(UnionTech OS Server 20) is supported with docker and containerd runtimes.
|
||||
|
||||
**Note:** that UOS Linux is not currently covered in kubespray CI and
|
||||
support for it is currently considered experimental.
|
||||
|
||||
There are no special considerations for using UOS Linux as the target OS
|
||||
for Kubespray deployments.
|
@ -169,6 +169,7 @@ variables to match your requirements.
|
||||
* *searchdomains* - Array of up to 4 search domains
|
||||
* *remove_default_searchdomains* - Boolean. If enabled, `searchdomains` variable can hold 6 search domains.
|
||||
* *dns_etchosts* - Content of hosts file for coredns and nodelocaldns
|
||||
* *dns_upstream_forward_extra_opts* - Options to add in the forward section of coredns/nodelocaldns related to upstream DNS servers
|
||||
|
||||
For more information, see [DNS
|
||||
Stack](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/dns-stack.md).
|
||||
@ -183,7 +184,7 @@ Stack](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/dns-stack.m
|
||||
* *containerd_default_runtime* - If defined, changes the default Containerd runtime used by the Kubernetes CRI plugin.
|
||||
|
||||
* *containerd_additional_runtimes* - Sets the additional Containerd runtimes used by the Kubernetes CRI plugin.
|
||||
[Default config](https://github.com/kubernetes-sigs/kubespray/blob/master/roles/container-engine/containerd/defaults/main.yml) can be overriden in inventory vars.
|
||||
[Default config](https://github.com/kubernetes-sigs/kubespray/blob/master/roles/container-engine/containerd/defaults/main.yml) can be overridden in inventory vars.
|
||||
|
||||
* *http_proxy/https_proxy/no_proxy/no_proxy_exclude_workers/additional_no_proxy* - Proxy variables for deploying behind a
|
||||
proxy. Note that no_proxy defaults to all internal cluster IPs and hostnames
|
||||
@ -217,7 +218,7 @@ Stack](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/dns-stack.m
|
||||
|
||||
By default the `kubelet_secure_addresses` is set with the `10.0.0.110` the ansible control host uses `eth0` to connect to the machine. In case you want to use `eth1` as the outgoing interface on which `kube-apiserver` connects to the `kubelet`s, you should override the variable in this way: `kubelet_secure_addresses: "192.168.1.110"`.
|
||||
|
||||
* *node_labels* - Labels applied to nodes via kubelet --node-labels parameter.
|
||||
* *node_labels* - Labels applied to nodes via `kubectl label node`.
|
||||
For example, labels can be set in the inventory as variables or more widely in group_vars.
|
||||
*node_labels* can only be defined as a dict:
|
||||
|
||||
|
@ -21,14 +21,14 @@ After this step you should have:
|
||||
|
||||
### Kubespray configuration
|
||||
|
||||
First in `inventory/sample/group_vars/all.yml` you must set the cloud provider to `external` and external_cloud_provider to `external_cloud_provider`.
|
||||
First in `inventory/sample/group_vars/all/all.yml` you must set the cloud provider to `external` and external_cloud_provider to `external_cloud_provider`.
|
||||
|
||||
```yml
|
||||
cloud_provider: "external"
|
||||
external_cloud_provider: "vsphere"
|
||||
```
|
||||
|
||||
Then, `inventory/sample/group_vars/vsphere.yml`, you need to declare your vCenter credentials and enable the vSphere CSI following the description below.
|
||||
Then, `inventory/sample/group_vars/all/vsphere.yml`, you need to declare your vCenter credentials and enable the vSphere CSI following the description below.
|
||||
|
||||
| Variable | Required | Type | Choices | Default | Comment |
|
||||
|----------------------------------------|----------|---------|----------------------------|---------------------------|---------------------------------------------------------------------------------------------------------------------|
|
||||
|
@ -35,6 +35,11 @@ loadbalancer_apiserver_healthcheck_port: 8081
|
||||
|
||||
### OTHER OPTIONAL VARIABLES
|
||||
|
||||
## By default, Kubespray collects nameservers on the host. It then adds the previously collected nameservers in nameserverentries.
|
||||
## If true, Kubespray does not include host nameservers in nameserverentries in dns_late stage. However, It uses the nameserver to make sure cluster installed safely in dns_early stage.
|
||||
## Use this option with caution, you may need to define your dns servers. Otherwise, the outbound queries such as www.google.com may fail.
|
||||
# disable_host_nameservers: false
|
||||
|
||||
## Upstream dns servers
|
||||
# upstream_dns_servers:
|
||||
# - 8.8.8.8
|
||||
|
@ -37,6 +37,9 @@
|
||||
# [Optional] Calico with kdd: If using Calico network plugin with kdd datastore
|
||||
# calico_crds_download_url: "{{ files_repo }}/github.com/projectcalico/calico/archive/{{ calico_version }}.tar.gz"
|
||||
|
||||
# [Optional] Cilium: If using Cilium network plugin
|
||||
# ciliumcli_download_url: "{{ files_repo }}/github.com/cilium/cilium-cli/releases/download/{{ cilium_cli_version }}/cilium-linux-{{ image_arch }}.tar.gz"
|
||||
|
||||
# [Optional] Flannel: If using Falnnel network plugin
|
||||
# flannel_cni_download_url: "{{ files_repo }}/kubernetes/flannel/{{ flannel_cni_version }}/flannel-{{ image_arch }}"
|
||||
|
||||
@ -55,12 +58,18 @@
|
||||
# [Optional] cri-o: only if you set container_manager: crio
|
||||
# crio_download_base: "download.opensuse.org/repositories/devel:kubic:libcontainers:stable"
|
||||
# crio_download_crio: "http://{{ crio_download_base }}:/cri-o:/"
|
||||
# crio_download_url: "{{ files_repo }}/storage.googleapis.com/cri-o/artifacts/cri-o.{{ image_arch }}.{{ crio_version }}.tar.gz"
|
||||
# skopeo_download_url: "{{ files_repo }}/github.com/lework/skopeo-binary/releases/download/{{ skopeo_version }}/skopeo-linux-{{ image_arch }}"
|
||||
|
||||
# [Optional] runc,containerd: only if you set container_runtime: containerd
|
||||
# runc_download_url: "{{ files_repo }}/github.com/opencontainers/runc/releases/download/{{ runc_version }}/runc.{{ image_arch }}"
|
||||
# containerd_download_url: "{{ files_repo }}/github.com/containerd/containerd/releases/download/v{{ containerd_version }}/containerd-{{ containerd_version }}-linux-{{ image_arch }}.tar.gz"
|
||||
# nerdctl_download_url: "{{ files_repo }}/github.com/containerd/nerdctl/releases/download/v{{ nerdctl_version }}/nerdctl-{{ nerdctl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz"
|
||||
|
||||
# [Optional] runsc,containerd-shim-runsc: only if you set gvisor_enabled: true
|
||||
# gvisor_runsc_download_url: "{{ files_repo }}/storage.googleapis.com/gvisor/releases/release/{{ gvisor_version }}/{{ ansible_architecture }}/runsc"
|
||||
# gvisor_containerd_shim_runsc_download_url: "{{ files_repo }}/storage.googleapis.com/gvisor/releases/release/{{ gvisor_version }}/{{ ansible_architecture }}/containerd-shim-runsc-v1"
|
||||
|
||||
## CentOS/Redhat/AlmaLinux
|
||||
### For EL7, base and extras repo must be available, for EL8, baseos and appstream
|
||||
### By default we enable those repo automatically
|
||||
|
@ -18,6 +18,8 @@ metrics_server_enabled: false
|
||||
# metrics_server_kubelet_insecure_tls: true
|
||||
# metrics_server_metric_resolution: 15s
|
||||
# metrics_server_kubelet_preferred_address_types: "InternalIP,ExternalIP,Hostname"
|
||||
# metrics_server_host_network: false
|
||||
# metrics_server_replicas: 1
|
||||
|
||||
# Rancher Local Path Provisioner
|
||||
local_path_provisioner_enabled: false
|
||||
@ -161,7 +163,7 @@ cert_manager_enabled: false
|
||||
|
||||
# MetalLB deployment
|
||||
metallb_enabled: false
|
||||
metallb_speaker_enabled: true
|
||||
metallb_speaker_enabled: "{{ metallb_enabled }}"
|
||||
# metallb_ip_range:
|
||||
# - "10.5.0.50-10.5.0.99"
|
||||
# metallb_pool_name: "loadbalanced"
|
||||
@ -210,7 +212,7 @@ metallb_speaker_enabled: true
|
||||
# my_asn: 4200000000
|
||||
|
||||
argocd_enabled: false
|
||||
# argocd_version: v2.4.12
|
||||
# argocd_version: v2.5.7
|
||||
# argocd_namespace: argocd
|
||||
# Default password:
|
||||
# - https://argo-cd.readthedocs.io/en/stable/getting_started/#4-login-using-the-cli
|
||||
|
@ -17,7 +17,7 @@ kube_token_dir: "{{ kube_config_dir }}/tokens"
|
||||
kube_api_anonymous_auth: true
|
||||
|
||||
## Change this to use another Kubernetes version, e.g. a current beta release
|
||||
kube_version: v1.24.6
|
||||
kube_version: v1.25.6
|
||||
|
||||
# Where the binaries will be downloaded.
|
||||
# Note: ensure that you've enough disk space (about 1G)
|
||||
@ -205,6 +205,14 @@ enable_coredns_k8s_external: false
|
||||
coredns_k8s_external_zone: k8s_external.local
|
||||
# Enable endpoint_pod_names option for kubernetes plugin
|
||||
enable_coredns_k8s_endpoint_pod_names: false
|
||||
# Set forward options for upstream DNS servers in coredns (and nodelocaldns) config
|
||||
# dns_upstream_forward_extra_opts:
|
||||
# policy: sequential
|
||||
# Apply extra options to coredns kubernetes plugin
|
||||
# coredns_kubernetes_extra_opts:
|
||||
# - 'fallthrough example.local'
|
||||
# Forward extra domains to the coredns kubernetes plugin
|
||||
# coredns_kubernetes_extra_domains: ''
|
||||
|
||||
# Can be docker_dns, host_resolvconf or none
|
||||
resolvconf_mode: host_resolvconf
|
||||
@ -255,9 +263,36 @@ podsecuritypolicy_enabled: false
|
||||
# Acceptable options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "".
|
||||
# kubelet_enforce_node_allocatable: pods
|
||||
|
||||
## Set runtime and kubelet cgroups when using systemd as cgroup driver (default)
|
||||
# kubelet_runtime_cgroups: "{{ kube_reserved_cgroups }}/{{ container_manager }}.service"
|
||||
# kubelet_kubelet_cgroups: "{{ kube_reserved_cgroups }}/kubelet.service"
|
||||
|
||||
## Set runtime and kubelet cgroups when using cgroupfs as cgroup driver
|
||||
# kubelet_runtime_cgroups_cgroupfs: "/system.slice/{{ container_manager }}.service"
|
||||
# kubelet_kubelet_cgroups_cgroupfs: "/system.slice/kubelet.service"
|
||||
|
||||
# Optionally reserve this space for kube daemons.
|
||||
# kube_reserved: true
|
||||
## Uncomment to override default values
|
||||
## The following two items need to be set when kube_reserved is true
|
||||
# kube_reserved_cgroups_for_service_slice: kube.slice
|
||||
# kube_reserved_cgroups: "/{{ kube_reserved_cgroups_for_service_slice }}"
|
||||
# kube_memory_reserved: 256Mi
|
||||
# kube_cpu_reserved: 100m
|
||||
# kube_ephemeral_storage_reserved: 2Gi
|
||||
# kube_pid_reserved: "1000"
|
||||
# Reservation for master hosts
|
||||
# kube_master_memory_reserved: 512Mi
|
||||
# kube_master_cpu_reserved: 200m
|
||||
# kube_master_ephemeral_storage_reserved: 2Gi
|
||||
# kube_master_pid_reserved: "1000"
|
||||
|
||||
## Optionally reserve resources for OS system daemons.
|
||||
# system_reserved: true
|
||||
## Uncomment to override default values
|
||||
## The following two items need to be set when system_reserved is true
|
||||
# system_reserved_cgroups_for_service_slice: system.slice
|
||||
# system_reserved_cgroups: "/{{ system_reserved_cgroups_for_service_slice }}"
|
||||
# system_memory_reserved: 512Mi
|
||||
# system_cpu_reserved: 500m
|
||||
# system_ephemeral_storage_reserved: 2Gi
|
||||
@ -339,3 +374,9 @@ event_ttl_duration: "1h0m0s"
|
||||
auto_renew_certificates: false
|
||||
# First Monday of each month
|
||||
# auto_renew_certificates_systemd_calendar: "Mon *-*-1,2,3,4,5,6,7 03:{{ groups['kube_control_plane'].index(inventory_hostname) }}0:00"
|
||||
|
||||
# kubeadm patches path
|
||||
kubeadm_patches:
|
||||
enabled: false
|
||||
source_dir: "{{ inventory_dir }}/patches"
|
||||
dest_dir: "{{ kube_config_dir }}/patches"
|
||||
|
@ -109,6 +109,10 @@ calico_pool_blocksize: 26
|
||||
# calico_ip_auto_method: "interface=eth.*"
|
||||
# calico_ip6_auto_method: "interface=eth.*"
|
||||
|
||||
# Set FELIX_MTUIFACEPATTERN, Pattern used to discover the host’s interface for MTU auto-detection.
|
||||
# see https://projectcalico.docs.tigera.io/reference/felix/configuration
|
||||
# calico_felix_mtu_iface_pattern: "^((en|wl|ww|sl|ib)[opsx].*|(eth|wlan|wwan).*)"
|
||||
|
||||
# Choose the iptables insert mode for Calico: "Insert" or "Append".
|
||||
# calico_felix_chaininsertmode: Insert
|
||||
|
||||
|
@ -10,9 +10,9 @@
|
||||
## single quote and escape backslashes
|
||||
# flannel_interface_regexp: '10\\.0\\.[0-2]\\.\\d{1,3}'
|
||||
|
||||
# You can choose what type of flannel backend to use: 'vxlan' or 'host-gw'
|
||||
# for experimental backend
|
||||
# You can choose what type of flannel backend to use: 'vxlan', 'host-gw' or 'wireguard'
|
||||
# please refer to flannel's docs : https://github.com/coreos/flannel/blob/master/README.md
|
||||
# flannel_backend_type: "vxlan"
|
||||
# flannel_vxlan_vni: 1
|
||||
# flannel_vxlan_port: 8472
|
||||
# flannel_vxlan_direct_routing: false
|
||||
|
@ -55,3 +55,9 @@ kube_ovn_enable_ssl: false
|
||||
|
||||
## dpdk
|
||||
kube_ovn_dpdk_enabled: false
|
||||
|
||||
## enable interconnection to an existing IC database server.
|
||||
kube_ovn_ic_enable: false
|
||||
kube_ovn_ic_autoroute: true
|
||||
kube_ovn_ic_dbhost: "127.0.0.1"
|
||||
kube_ovn_ic_zone: "kubernetes"
|
||||
|
@ -16,7 +16,7 @@
|
||||
# Add External IP of service to the RIB so that it gets advertised to the BGP peers.
|
||||
# kube_router_advertise_external_ip: false
|
||||
|
||||
# Add LoadbBalancer IP of service status as set by the LB provider to the RIB so that it gets advertised to the BGP peers.
|
||||
# Add LoadBalancer IP of service status as set by the LB provider to the RIB so that it gets advertised to the BGP peers.
|
||||
# kube_router_advertise_loadbalancer_ip: false
|
||||
|
||||
# Adjust manifest of kube-router daemonset template with DSR needed changes
|
||||
|
@ -0,0 +1,8 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: kube-controller-manager
|
||||
annotations:
|
||||
prometheus.io/scrape: 'true'
|
||||
prometheus.io/port: '10257'
|
8
inventory/sample/patches/kube-scheduler+merge.yaml
Normal file
8
inventory/sample/patches/kube-scheduler+merge.yaml
Normal file
@ -0,0 +1,8 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: kube-scheduler
|
||||
annotations:
|
||||
prometheus.io/scrape: 'true'
|
||||
prometheus.io/port: '10259'
|
51
pipeline.Dockerfile
Normal file
51
pipeline.Dockerfile
Normal file
@ -0,0 +1,51 @@
|
||||
# Use imutable image tags rather than mutable tags (like ubuntu:20.04)
|
||||
FROM ubuntu:focal-20220531
|
||||
|
||||
ARG ARCH=amd64
|
||||
ARG TZ=Etc/UTC
|
||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
|
||||
ENV VAGRANT_VERSION=2.3.7
|
||||
ENV VAGRANT_DEFAULT_PROVIDER=libvirt
|
||||
ENV VAGRANT_ANSIBLE_TAGS=facts
|
||||
|
||||
RUN apt update -y \
|
||||
&& apt install -y \
|
||||
libssl-dev python3-dev sshpass apt-transport-https jq moreutils wget libvirt-dev openssh-client rsync git \
|
||||
ca-certificates curl gnupg2 software-properties-common python3-pip unzip \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - \
|
||||
&& add-apt-repository \
|
||||
"deb [arch=$ARCH] https://download.docker.com/linux/ubuntu \
|
||||
$(lsb_release -cs) \
|
||||
stable" \
|
||||
&& apt update -y && apt-get install --no-install-recommends -y docker-ce \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Some tools like yamllint need this
|
||||
# Pip needs this as well at the moment to install ansible
|
||||
# (and potentially other packages)
|
||||
# See: https://github.com/pypa/pip/issues/10219
|
||||
ENV LANG=C.UTF-8
|
||||
|
||||
WORKDIR /kubespray
|
||||
COPY . .
|
||||
RUN /usr/bin/python3 -m pip install --no-cache-dir pip -U \
|
||||
&& /usr/bin/python3 -m pip install --no-cache-dir -r tests/requirements.txt \
|
||||
&& python3 -m pip install --no-cache-dir -r requirements.txt \
|
||||
&& update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
||||
|
||||
RUN KUBE_VERSION=$(sed -n 's/^kube_version: //p' roles/kubespray-defaults/defaults/main.yaml) \
|
||||
&& curl -LO https://storage.googleapis.com/kubernetes-release/release/$KUBE_VERSION/bin/linux/$ARCH/kubectl \
|
||||
&& chmod a+x kubectl \
|
||||
&& mv kubectl /usr/local/bin/kubectl
|
||||
|
||||
# Install Vagrant
|
||||
RUN wget https://releases.hashicorp.com/vagrant/${VAGRANT_VERSION}/vagrant_${VAGRANT_VERSION}-1_amd64.deb && \
|
||||
dpkg -i vagrant_${VAGRANT_VERSION}-1_amd64.deb && \
|
||||
rm vagrant_${VAGRANT_VERSION}-1_amd64.deb && \
|
||||
vagrant plugin install vagrant-libvirt
|
||||
|
||||
# Install Kubernetes collections
|
||||
RUN pip3 install kubernetes \
|
||||
&& ansible-galaxy collection install kubernetes.core
|
@ -24,7 +24,7 @@
|
||||
- { role: kubespray-defaults}
|
||||
- { role: recover_control_plane/control-plane }
|
||||
|
||||
- include: cluster.yml
|
||||
- import_playbook: cluster.yml
|
||||
|
||||
- hosts: kube_control_plane
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
|
@ -6,5 +6,5 @@ netaddr==0.7.19
|
||||
pbr==5.4.4
|
||||
jmespath==0.9.5
|
||||
ruamel.yaml==0.16.10
|
||||
ruamel.yaml.clib==0.2.6
|
||||
ruamel.yaml.clib==0.2.7
|
||||
MarkupSafe==1.1.1
|
||||
|
@ -6,5 +6,5 @@ netaddr==0.7.19
|
||||
pbr==5.4.4
|
||||
jmespath==0.9.5
|
||||
ruamel.yaml==0.16.10
|
||||
ruamel.yaml.clib==0.2.6
|
||||
ruamel.yaml.clib==0.2.7
|
||||
MarkupSafe==1.1.1
|
||||
|
@ -29,6 +29,9 @@
|
||||
msg: "Reset confirmation failed"
|
||||
when: reset_confirmation != "yes"
|
||||
|
||||
- name: Gather information about installed services
|
||||
service_facts:
|
||||
|
||||
environment: "{{ proxy_disable_env }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
|
@ -84,6 +84,7 @@
|
||||
- use_oracle_public_repo|default(true)
|
||||
- '''ID="ol"'' in os_release.stdout_lines'
|
||||
- (ansible_distribution_version | float) >= 7.6
|
||||
- (ansible_distribution_version | float) < 9
|
||||
|
||||
# CentOS ships with python installed
|
||||
|
||||
|
@ -7,7 +7,7 @@
|
||||
check_mode: false
|
||||
|
||||
- include_tasks: bootstrap-centos.yml
|
||||
when: '''ID="centos"'' in os_release.stdout_lines or ''ID="ol"'' in os_release.stdout_lines or ''ID="almalinux"'' in os_release.stdout_lines or ''ID="rocky"'' in os_release.stdout_lines or ''ID="kylin"'' in os_release.stdout_lines'
|
||||
when: '''ID="centos"'' in os_release.stdout_lines or ''ID="ol"'' in os_release.stdout_lines or ''ID="almalinux"'' in os_release.stdout_lines or ''ID="rocky"'' in os_release.stdout_lines or ''ID="kylin"'' in os_release.stdout_lines or ''ID="uos"'' in os_release.stdout_lines or ''ID="openEuler"'' in os_release.stdout_lines'
|
||||
|
||||
- include_tasks: bootstrap-amazon.yml
|
||||
when: '''ID="amzn"'' in os_release.stdout_lines'
|
||||
|
@ -4,7 +4,7 @@
|
||||
containerd_package: 'containerd.io'
|
||||
yum_repo_dir: /etc/yum.repos.d
|
||||
|
||||
# Keep minimal repo information arround for cleanup
|
||||
# Keep minimal repo information around for cleanup
|
||||
containerd_repo_info:
|
||||
repos:
|
||||
|
||||
|
2
roles/container-engine/containerd-common/meta/main.yml
Normal file
2
roles/container-engine/containerd-common/meta/main.yml
Normal file
@ -0,0 +1,2 @@
|
||||
---
|
||||
allow_duplicates: true
|
@ -2,6 +2,9 @@
|
||||
containerd_storage_dir: "/var/lib/containerd"
|
||||
containerd_state_dir: "/run/containerd"
|
||||
containerd_systemd_dir: "/etc/systemd/system/containerd.service.d"
|
||||
# The default value is not -999 here because containerd's oom_score_adj has been
|
||||
# set to the -999 even if containerd_oom_score is 0.
|
||||
# Ref: https://github.com/kubernetes-sigs/kubespray/pull/9275#issuecomment-1246499242
|
||||
containerd_oom_score: 0
|
||||
|
||||
# containerd_default_runtime: "runc"
|
||||
@ -12,7 +15,7 @@ containerd_runc_runtime:
|
||||
type: "io.containerd.runc.v2"
|
||||
engine: ""
|
||||
root: ""
|
||||
# base_runtime_spec: cri-base.json # use this to limit number of file descriptors per container
|
||||
base_runtime_spec: cri-base.json
|
||||
options:
|
||||
systemdCgroup: "{{ containerd_use_systemd_cgroup | ternary('true', 'false') }}"
|
||||
|
||||
@ -23,8 +26,17 @@ containerd_additional_runtimes: []
|
||||
# engine: ""
|
||||
# root: ""
|
||||
|
||||
containerd_base_runtime_spec_rlimit_nofile: 65535
|
||||
|
||||
containerd_default_base_runtime_spec_patch:
|
||||
process:
|
||||
rlimits:
|
||||
- type: RLIMIT_NOFILE
|
||||
hard: "{{ containerd_base_runtime_spec_rlimit_nofile }}"
|
||||
soft: "{{ containerd_base_runtime_spec_rlimit_nofile }}"
|
||||
|
||||
containerd_base_runtime_specs:
|
||||
cri-base.json: "{{ lookup('file', 'cri-base.json') }}"
|
||||
cri-base.json: "{{ containerd_default_base_runtime_spec | combine(containerd_default_base_runtime_spec_patch,recursive=1) }}"
|
||||
|
||||
containerd_grpc_max_recv_message_size: 16777216
|
||||
containerd_grpc_max_send_message_size: 16777216
|
||||
@ -40,6 +52,11 @@ containerd_registries:
|
||||
|
||||
containerd_max_container_log_line_size: -1
|
||||
|
||||
# If enabled it will allow non root users to use port numbers <1024
|
||||
containerd_enable_unprivileged_ports: false
|
||||
# If enabled it will allow non root users to use icmp sockets
|
||||
containerd_enable_unprivileged_icmp: false
|
||||
|
||||
containerd_cfg_dir: /etc/containerd
|
||||
|
||||
# Extra config to be put in {{ containerd_cfg_dir }}/config.toml literally
|
||||
@ -56,3 +73,6 @@ containerd_limit_proc_num: "infinity"
|
||||
containerd_limit_core: "infinity"
|
||||
containerd_limit_open_file_num: "infinity"
|
||||
containerd_limit_mem_lock: "infinity"
|
||||
|
||||
# If enabled it will use config_path and disable use mirrors config
|
||||
containerd_use_config_path: false
|
||||
|
@ -1,214 +0,0 @@
|
||||
{
|
||||
"ociVersion": "1.0.2-dev",
|
||||
"process": {
|
||||
"user": {
|
||||
"uid": 0,
|
||||
"gid": 0
|
||||
},
|
||||
"cwd": "/",
|
||||
"capabilities": {
|
||||
"bounding": [
|
||||
"CAP_CHOWN",
|
||||
"CAP_DAC_OVERRIDE",
|
||||
"CAP_FSETID",
|
||||
"CAP_FOWNER",
|
||||
"CAP_MKNOD",
|
||||
"CAP_NET_RAW",
|
||||
"CAP_SETGID",
|
||||
"CAP_SETUID",
|
||||
"CAP_SETFCAP",
|
||||
"CAP_SETPCAP",
|
||||
"CAP_NET_BIND_SERVICE",
|
||||
"CAP_SYS_CHROOT",
|
||||
"CAP_KILL",
|
||||
"CAP_AUDIT_WRITE"
|
||||
],
|
||||
"effective": [
|
||||
"CAP_CHOWN",
|
||||
"CAP_DAC_OVERRIDE",
|
||||
"CAP_FSETID",
|
||||
"CAP_FOWNER",
|
||||
"CAP_MKNOD",
|
||||
"CAP_NET_RAW",
|
||||
"CAP_SETGID",
|
||||
"CAP_SETUID",
|
||||
"CAP_SETFCAP",
|
||||
"CAP_SETPCAP",
|
||||
"CAP_NET_BIND_SERVICE",
|
||||
"CAP_SYS_CHROOT",
|
||||
"CAP_KILL",
|
||||
"CAP_AUDIT_WRITE"
|
||||
],
|
||||
"inheritable": [
|
||||
"CAP_CHOWN",
|
||||
"CAP_DAC_OVERRIDE",
|
||||
"CAP_FSETID",
|
||||
"CAP_FOWNER",
|
||||
"CAP_MKNOD",
|
||||
"CAP_NET_RAW",
|
||||
"CAP_SETGID",
|
||||
"CAP_SETUID",
|
||||
"CAP_SETFCAP",
|
||||
"CAP_SETPCAP",
|
||||
"CAP_NET_BIND_SERVICE",
|
||||
"CAP_SYS_CHROOT",
|
||||
"CAP_KILL",
|
||||
"CAP_AUDIT_WRITE"
|
||||
],
|
||||
"permitted": [
|
||||
"CAP_CHOWN",
|
||||
"CAP_DAC_OVERRIDE",
|
||||
"CAP_FSETID",
|
||||
"CAP_FOWNER",
|
||||
"CAP_MKNOD",
|
||||
"CAP_NET_RAW",
|
||||
"CAP_SETGID",
|
||||
"CAP_SETUID",
|
||||
"CAP_SETFCAP",
|
||||
"CAP_SETPCAP",
|
||||
"CAP_NET_BIND_SERVICE",
|
||||
"CAP_SYS_CHROOT",
|
||||
"CAP_KILL",
|
||||
"CAP_AUDIT_WRITE"
|
||||
]
|
||||
},
|
||||
"rlimits": [
|
||||
{
|
||||
"type": "RLIMIT_NOFILE",
|
||||
"hard": 1024,
|
||||
"soft": 1024
|
||||
}
|
||||
],
|
||||
"noNewPrivileges": true
|
||||
},
|
||||
"root": {
|
||||
"path": "rootfs"
|
||||
},
|
||||
"mounts": [
|
||||
{
|
||||
"destination": "/proc",
|
||||
"type": "proc",
|
||||
"source": "proc",
|
||||
"options": [
|
||||
"nosuid",
|
||||
"noexec",
|
||||
"nodev"
|
||||
]
|
||||
},
|
||||
{
|
||||
"destination": "/dev",
|
||||
"type": "tmpfs",
|
||||
"source": "tmpfs",
|
||||
"options": [
|
||||
"nosuid",
|
||||
"strictatime",
|
||||
"mode=755",
|
||||
"size=65536k"
|
||||
]
|
||||
},
|
||||
{
|
||||
"destination": "/dev/pts",
|
||||
"type": "devpts",
|
||||
"source": "devpts",
|
||||
"options": [
|
||||
"nosuid",
|
||||
"noexec",
|
||||
"newinstance",
|
||||
"ptmxmode=0666",
|
||||
"mode=0620",
|
||||
"gid=5"
|
||||
]
|
||||
},
|
||||
{
|
||||
"destination": "/dev/shm",
|
||||
"type": "tmpfs",
|
||||
"source": "shm",
|
||||
"options": [
|
||||
"nosuid",
|
||||
"noexec",
|
||||
"nodev",
|
||||
"mode=1777",
|
||||
"size=65536k"
|
||||
]
|
||||
},
|
||||
{
|
||||
"destination": "/dev/mqueue",
|
||||
"type": "mqueue",
|
||||
"source": "mqueue",
|
||||
"options": [
|
||||
"nosuid",
|
||||
"noexec",
|
||||
"nodev"
|
||||
]
|
||||
},
|
||||
{
|
||||
"destination": "/sys",
|
||||
"type": "sysfs",
|
||||
"source": "sysfs",
|
||||
"options": [
|
||||
"nosuid",
|
||||
"noexec",
|
||||
"nodev",
|
||||
"ro"
|
||||
]
|
||||
},
|
||||
{
|
||||
"destination": "/run",
|
||||
"type": "tmpfs",
|
||||
"source": "tmpfs",
|
||||
"options": [
|
||||
"nosuid",
|
||||
"strictatime",
|
||||
"mode=755",
|
||||
"size=65536k"
|
||||
]
|
||||
}
|
||||
],
|
||||
"linux": {
|
||||
"resources": {
|
||||
"devices": [
|
||||
{
|
||||
"allow": false,
|
||||
"access": "rwm"
|
||||
}
|
||||
]
|
||||
},
|
||||
"cgroupsPath": "/default",
|
||||
"namespaces": [
|
||||
{
|
||||
"type": "pid"
|
||||
},
|
||||
{
|
||||
"type": "ipc"
|
||||
},
|
||||
{
|
||||
"type": "uts"
|
||||
},
|
||||
{
|
||||
"type": "mount"
|
||||
},
|
||||
{
|
||||
"type": "network"
|
||||
}
|
||||
],
|
||||
"maskedPaths": [
|
||||
"/proc/acpi",
|
||||
"/proc/asound",
|
||||
"/proc/kcore",
|
||||
"/proc/keys",
|
||||
"/proc/latency_stats",
|
||||
"/proc/timer_list",
|
||||
"/proc/timer_stats",
|
||||
"/proc/sched_debug",
|
||||
"/sys/firmware",
|
||||
"/proc/scsi"
|
||||
],
|
||||
"readonlyPaths": [
|
||||
"/proc/bus",
|
||||
"/proc/fs",
|
||||
"/proc/irq",
|
||||
"/proc/sys",
|
||||
"/proc/sysrq-trigger"
|
||||
]
|
||||
}
|
||||
}
|
@ -3,7 +3,7 @@
|
||||
fail:
|
||||
msg: "{{ ansible_distribution }} is not supported by containerd."
|
||||
when:
|
||||
- ansible_distribution not in ["CentOS", "OracleLinux", "RedHat", "Ubuntu", "Debian", "Fedora", "AlmaLinux", "Rocky", "Amazon", "Flatcar", "Flatcar Container Linux by Kinvolk", "Suse", "openSUSE Leap", "openSUSE Tumbleweed", "Kylin Linux Advanced Server"]
|
||||
- ansible_distribution not in ["CentOS", "OracleLinux", "RedHat", "Ubuntu", "Debian", "Fedora", "AlmaLinux", "Rocky", "Amazon", "Flatcar", "Flatcar Container Linux by Kinvolk", "Suse", "openSUSE Leap", "openSUSE Tumbleweed", "Kylin Linux Advanced Server", "UnionTech", "openEuler"]
|
||||
|
||||
- name: containerd | Remove any package manager controlled containerd package
|
||||
package:
|
||||
@ -84,6 +84,16 @@
|
||||
notify: restart containerd
|
||||
when: http_proxy is defined or https_proxy is defined
|
||||
|
||||
- name: containerd | Generate default base_runtime_spec
|
||||
register: ctr_oci_spec
|
||||
command: "{{ containerd_bin_dir }}/ctr oci spec"
|
||||
check_mode: false
|
||||
changed_when: false
|
||||
|
||||
- name: containerd | Store generated default base_runtime_spec
|
||||
set_fact:
|
||||
containerd_default_base_runtime_spec: "{{ ctr_oci_spec.stdout | from_json }}"
|
||||
|
||||
- name: containerd | Write base_runtime_specs
|
||||
copy:
|
||||
content: "{{ item.value }}"
|
||||
@ -101,6 +111,27 @@
|
||||
mode: 0640
|
||||
notify: restart containerd
|
||||
|
||||
- block:
|
||||
- name: containerd | Create registry directories
|
||||
file:
|
||||
path: "{{ containerd_cfg_dir }}/certs.d/{{ item.key }}"
|
||||
state: directory
|
||||
mode: 0755
|
||||
recurse: true
|
||||
with_dict: "{{ containerd_insecure_registries }}"
|
||||
- name: containerd | Write hosts.toml file
|
||||
blockinfile:
|
||||
path: "{{ containerd_cfg_dir }}/certs.d/{{ item.key }}/hosts.toml"
|
||||
mode: 0640
|
||||
create: true
|
||||
block: |
|
||||
server = "{{ item.value }}"
|
||||
[host."{{ item.value }}"]
|
||||
capabilities = ["pull", "resolve", "push"]
|
||||
skip_verify = true
|
||||
with_dict: "{{ containerd_insecure_registries }}"
|
||||
when: containerd_use_config_path is defined and containerd_use_config_path|bool and containerd_insecure_registries is defined
|
||||
|
||||
# you can sometimes end up in a state where everything is installed
|
||||
# but containerd was not started / enabled
|
||||
- name: containerd | Flush handlers
|
||||
|
@ -18,6 +18,8 @@ oom_score = {{ containerd_oom_score }}
|
||||
[plugins."io.containerd.grpc.v1.cri"]
|
||||
sandbox_image = "{{ pod_infra_image_repo }}:{{ pod_infra_image_tag }}"
|
||||
max_container_log_line_size = {{ containerd_max_container_log_line_size }}
|
||||
enable_unprivileged_ports = {{ containerd_enable_unprivileged_ports | default(false) | lower }}
|
||||
enable_unprivileged_icmp = {{ containerd_enable_unprivileged_icmp | default(false) | lower }}
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd]
|
||||
default_runtime_name = "{{ containerd_default_runtime | default('runc') }}"
|
||||
snapshotter = "{{ containerd_snapshotter | default('overlayfs') }}"
|
||||
@ -45,6 +47,9 @@ oom_score = {{ containerd_oom_score }}
|
||||
runtime_type = "io.containerd.runsc.v1"
|
||||
{% endif %}
|
||||
[plugins."io.containerd.grpc.v1.cri".registry]
|
||||
{% if containerd_use_config_path is defined and containerd_use_config_path|bool %}
|
||||
config_path = "{{ containerd_cfg_dir }}/certs.d"
|
||||
{% else %}
|
||||
[plugins."io.containerd.grpc.v1.cri".registry.mirrors]
|
||||
{% for registry, addr in containerd_registries.items() %}
|
||||
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."{{ registry }}"]
|
||||
@ -55,8 +60,9 @@ oom_score = {{ containerd_oom_score }}
|
||||
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."{{ registry }}"]
|
||||
endpoint = ["{{ ([ addr ] | flatten ) | join('","') }}"]
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% for addr in containerd_insecure_registries.values() | flatten | unique %}
|
||||
[plugins."io.containerd.grpc.v1.cri".registry.configs."{{ addr }}".tls]
|
||||
[plugins."io.containerd.grpc.v1.cri".registry.configs."{{ addr | urlsplit('netloc') }}".tls]
|
||||
insecure_skip_verify = true
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
@ -36,6 +36,10 @@ LimitMEMLOCK={{ containerd_limit_mem_lock }}
|
||||
# Only systemd 226 and above support this version.
|
||||
TasksMax=infinity
|
||||
OOMScoreAdjust=-999
|
||||
# Set the cgroup slice of the service so that kube reserved takes effect
|
||||
{% if kube_reserved is defined and kube_reserved|bool %}
|
||||
Slice={{ kube_reserved_cgroups_for_service_slice }}
|
||||
{% endif %}
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
@ -35,6 +35,10 @@ LimitCORE=infinity
|
||||
TasksMax=infinity
|
||||
Delegate=yes
|
||||
KillMode=process
|
||||
# Set the cgroup slice of the service so that kube reserved takes effect
|
||||
{% if kube_reserved is defined and kube_reserved|bool %}
|
||||
Slice={{ kube_reserved_cgroups_for_service_slice }}
|
||||
{% endif %}
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
@ -1,7 +1,7 @@
|
||||
---
|
||||
|
||||
crio_cgroup_manager: "{{ kubelet_cgroup_driver | default('systemd') }}"
|
||||
crio_conmon: "/usr/bin/conmon"
|
||||
crio_conmon: "{{ bin_dir }}/conmon"
|
||||
crio_enable_metrics: false
|
||||
crio_log_level: "info"
|
||||
crio_metrics_port: "9090"
|
||||
@ -37,17 +37,10 @@ crio_stream_port: "10010"
|
||||
|
||||
crio_required_version: "{{ kube_version | regex_replace('^v(?P<major>\\d+).(?P<minor>\\d+).(?P<patch>\\d+)$', '\\g<major>.\\g<minor>') }}"
|
||||
|
||||
crio_kubernetes_version_matrix:
|
||||
"1.24": "1.24"
|
||||
"1.23": "1.23"
|
||||
"1.22": "1.22"
|
||||
|
||||
crio_version: "{{ crio_kubernetes_version_matrix[crio_required_version] | default('1.24') }}"
|
||||
|
||||
# The crio_runtimes variable defines a list of OCI compatible runtimes.
|
||||
crio_runtimes:
|
||||
- name: runc
|
||||
path: /usr/bin/runc
|
||||
path: "{{ bin_dir }}/runc"
|
||||
type: oci
|
||||
root: /run/runc
|
||||
|
||||
@ -65,7 +58,7 @@ kata_runtimes:
|
||||
# crun is a fast and low-memory footprint OCI Container Runtime fully written in C.
|
||||
crun_runtime:
|
||||
name: crun
|
||||
path: /usr/bin/crun
|
||||
path: "{{ bin_dir }}/crun"
|
||||
type: oci
|
||||
root: /run/crun
|
||||
|
||||
@ -76,20 +69,10 @@ youki_runtime:
|
||||
type: oci
|
||||
root: /run/youki
|
||||
|
||||
# When this is true, CRI-O package repositories are added. Set this to false when using an
|
||||
# environment with preconfigured CRI-O package repositories.
|
||||
crio_add_repos: true
|
||||
|
||||
# Allow crio offline installation
|
||||
# TODO(cristicalin): remove this after 2.21
|
||||
crio_download_base: "download.opensuse.org/repositories/devel:kubic:libcontainers:stable"
|
||||
|
||||
# Allow crio offline installation
|
||||
crio_download_crio: "http://{{ crio_download_base }}:/cri-o:/"
|
||||
|
||||
# skopeo need for save/load images when download_run_once=true
|
||||
skopeo_packages:
|
||||
- "skopeo"
|
||||
|
||||
# Configure the cri-o pids limit, increase this for heavily multi-threaded workloads
|
||||
# see https://github.com/cri-o/cri-o/issues/1921
|
||||
crio_pids_limit: 1024
|
||||
@ -102,3 +85,19 @@ crio_subuid_start: 2130706432
|
||||
crio_subuid_length: 16777216
|
||||
crio_subgid_start: 2130706432
|
||||
crio_subgid_length: 16777216
|
||||
|
||||
# cri-o binary files
|
||||
crio_bin_files:
|
||||
- conmon
|
||||
- crio
|
||||
- crio-status
|
||||
- pinns
|
||||
|
||||
# cri-o manual files
|
||||
crio_man_files:
|
||||
5:
|
||||
- crio.conf
|
||||
- crio.conf.d
|
||||
8:
|
||||
- crio
|
||||
- crio-status
|
||||
|
@ -1,3 +1,5 @@
|
||||
---
|
||||
dependencies:
|
||||
- role: container-engine/crictl
|
||||
- role: container-engine/runc
|
||||
- role: container-engine/skopeo
|
||||
|
@ -0,0 +1,17 @@
|
||||
{
|
||||
"cniVersion": "0.2.0",
|
||||
"name": "mynet",
|
||||
"type": "bridge",
|
||||
"bridge": "cni0",
|
||||
"isGateway": true,
|
||||
"ipMasq": true,
|
||||
"ipam": {
|
||||
"type": "host-local",
|
||||
"subnet": "172.19.0.0/24",
|
||||
"routes": [
|
||||
{
|
||||
"dst": "0.0.0.0/0"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
@ -0,0 +1,10 @@
|
||||
{
|
||||
"metadata": {
|
||||
"name": "runc1"
|
||||
},
|
||||
"image": {
|
||||
"image": "quay.io/kubespray/hello-world:latest"
|
||||
},
|
||||
"log_path": "runc1.0.log",
|
||||
"linux": {}
|
||||
}
|
@ -0,0 +1,10 @@
|
||||
{
|
||||
"metadata": {
|
||||
"name": "runc1",
|
||||
"namespace": "default",
|
||||
"attempt": 1,
|
||||
"uid": "hdishd83djaidwnduwk28bcsb"
|
||||
},
|
||||
"linux": {},
|
||||
"log_directory": "/tmp"
|
||||
}
|
@ -7,24 +7,38 @@ lint: |
|
||||
set -e
|
||||
yamllint -c ../../../.yamllint .
|
||||
platforms:
|
||||
- name: ubuntu2004
|
||||
- name: ubuntu20
|
||||
box: generic/ubuntu2004
|
||||
cpus: 2
|
||||
memory: 1024
|
||||
groups:
|
||||
- kube_control_plane
|
||||
- kube_node
|
||||
- k8s_cluster
|
||||
- name: almalinux8
|
||||
box: almalinux/8
|
||||
cpus: 2
|
||||
memory: 1024
|
||||
groups:
|
||||
- kube_control_plane
|
||||
- kube_node
|
||||
- k8s_cluster
|
||||
- name: fedora
|
||||
box: fedora/35-cloud-base
|
||||
box: fedora/38-cloud-base
|
||||
cpus: 2
|
||||
memory: 2048
|
||||
groups:
|
||||
- kube_control_plane
|
||||
- kube_node
|
||||
- k8s_cluster
|
||||
- name: debian10
|
||||
box: generic/debian10
|
||||
cpus: 2
|
||||
memory: 1024
|
||||
groups:
|
||||
- kube_control_plane
|
||||
- kube_node
|
||||
- k8s_cluster
|
||||
provisioner:
|
||||
name: ansible
|
||||
env:
|
||||
|
@ -2,5 +2,51 @@
|
||||
- name: Prepare
|
||||
hosts: all
|
||||
gather_facts: False
|
||||
become: true
|
||||
vars:
|
||||
ignore_assert_errors: true
|
||||
roles:
|
||||
- role: kubespray-defaults
|
||||
- role: bootstrap-os
|
||||
- role: kubernetes/preinstall
|
||||
- role: adduser
|
||||
user: "{{ addusers.kube }}"
|
||||
tasks:
|
||||
- include_tasks: "../../../../download/tasks/download_file.yml"
|
||||
vars:
|
||||
download: "{{ download_defaults | combine(downloads.cni) }}"
|
||||
|
||||
- name: Prepare CNI
|
||||
hosts: all
|
||||
gather_facts: False
|
||||
become: true
|
||||
vars:
|
||||
ignore_assert_errors: true
|
||||
kube_network_plugin: cni
|
||||
roles:
|
||||
- role: kubespray-defaults
|
||||
- role: network_plugin/cni
|
||||
tasks:
|
||||
- name: Copy test container files
|
||||
copy:
|
||||
src: "{{ item }}"
|
||||
dest: "/tmp/{{ item }}"
|
||||
owner: root
|
||||
mode: 0644
|
||||
with_items:
|
||||
- container.json
|
||||
- sandbox.json
|
||||
- name: Create /etc/cni/net.d directory
|
||||
file:
|
||||
path: /etc/cni/net.d
|
||||
state: directory
|
||||
owner: "{{ kube_owner }}"
|
||||
mode: 0755
|
||||
- name: Setup CNI
|
||||
copy:
|
||||
src: "{{ item }}"
|
||||
dest: "/etc/cni/net.d/{{ item }}"
|
||||
owner: root
|
||||
mode: 0644
|
||||
with_items:
|
||||
- 10-mynet.conf
|
||||
|
@ -19,3 +19,17 @@ def test_run(host):
|
||||
cmd = host.command(crictl + " --runtime-endpoint " + path + " version")
|
||||
assert cmd.rc == 0
|
||||
assert "RuntimeName: cri-o" in cmd.stdout
|
||||
|
||||
def test_run_pod(host):
|
||||
runtime = "runc"
|
||||
|
||||
run_command = "/usr/local/bin/crictl run --with-pull --runtime {} /tmp/container.json /tmp/sandbox.json".format(runtime)
|
||||
with host.sudo():
|
||||
cmd = host.command(run_command)
|
||||
assert cmd.rc == 0
|
||||
|
||||
with host.sudo():
|
||||
log_f = host.file("/tmp/runc1.0.log")
|
||||
|
||||
assert log_f.exists
|
||||
assert b"Hello from Docker" in log_f.content
|
||||
|
119
roles/container-engine/cri-o/tasks/cleanup.yaml
Normal file
119
roles/container-engine/cri-o/tasks/cleanup.yaml
Normal file
@ -0,0 +1,119 @@
|
||||
---
|
||||
# TODO(cristicalin): drop this file after 2.21
|
||||
- name: CRI-O kubic repo name for debian os family
|
||||
set_fact:
|
||||
crio_kubic_debian_repo_name: "{{ ((ansible_distribution == 'Ubuntu') | ternary('x','')) ~ ansible_distribution ~ '_' ~ ansible_distribution_version }}"
|
||||
when: ansible_os_family == "Debian"
|
||||
|
||||
- name: Remove legacy CRI-O kubic apt repo key
|
||||
apt_key:
|
||||
url: "https://{{ crio_download_base }}/{{ crio_kubic_debian_repo_name }}/Release.key"
|
||||
state: absent
|
||||
when: crio_kubic_debian_repo_name is defined
|
||||
|
||||
- name: Remove legacy CRI-O kubic apt repo
|
||||
apt_repository:
|
||||
repo: "deb http://{{ crio_download_base }}/{{ crio_kubic_debian_repo_name }}/ /"
|
||||
state: absent
|
||||
filename: devel-kubic-libcontainers-stable
|
||||
when: crio_kubic_debian_repo_name is defined
|
||||
|
||||
- name: Remove legacy CRI-O kubic cri-o apt repo
|
||||
apt_repository:
|
||||
repo: "deb {{ crio_download_crio }}{{ crio_version }}/{{ crio_kubic_debian_repo_name }}/ /"
|
||||
state: absent
|
||||
filename: devel-kubic-libcontainers-stable-cri-o
|
||||
when: crio_kubic_debian_repo_name is defined
|
||||
|
||||
- name: Remove legacy CRI-O kubic yum repo
|
||||
yum_repository:
|
||||
name: devel_kubic_libcontainers_stable
|
||||
description: Stable Releases of Upstream github.com/containers packages (CentOS_$releasever)
|
||||
baseurl: http://{{ crio_download_base }}/CentOS_{{ ansible_distribution_major_version }}/
|
||||
state: absent
|
||||
when:
|
||||
- ansible_os_family == "RedHat"
|
||||
- ansible_distribution not in ["Amazon", "Fedora"]
|
||||
|
||||
- name: Remove legacy CRI-O kubic yum repo
|
||||
yum_repository:
|
||||
name: "devel_kubic_libcontainers_stable_cri-o_{{ crio_version }}"
|
||||
description: "CRI-O {{ crio_version }} (CentOS_$releasever)"
|
||||
baseurl: "{{ crio_download_crio }}{{ crio_version }}/CentOS_{{ ansible_distribution_major_version }}/"
|
||||
state: absent
|
||||
when:
|
||||
- ansible_os_family == "RedHat"
|
||||
- ansible_distribution not in ["Amazon", "Fedora"]
|
||||
|
||||
- name: Remove legacy CRI-O kubic yum repo
|
||||
yum_repository:
|
||||
name: devel_kubic_libcontainers_stable
|
||||
description: Stable Releases of Upstream github.com/containers packages
|
||||
baseurl: http://{{ crio_download_base }}/Fedora_{{ ansible_distribution_major_version }}/
|
||||
state: absent
|
||||
when:
|
||||
- ansible_distribution in ["Fedora"]
|
||||
- not is_ostree
|
||||
|
||||
- name: Remove legacy CRI-O kubic yum repo
|
||||
yum_repository:
|
||||
name: "devel_kubic_libcontainers_stable_cri-o_{{ crio_version }}"
|
||||
description: "CRI-O {{ crio_version }}"
|
||||
baseurl: "{{ crio_download_crio }}{{ crio_version }}/Fedora_{{ ansible_distribution_major_version }}/"
|
||||
state: absent
|
||||
when:
|
||||
- ansible_distribution in ["Fedora"]
|
||||
- not is_ostree
|
||||
|
||||
- name: Remove legacy CRI-O kubic yum repo
|
||||
yum_repository:
|
||||
name: devel_kubic_libcontainers_stable
|
||||
description: Stable Releases of Upstream github.com/containers packages
|
||||
baseurl: http://{{ crio_download_base }}/CentOS_7/
|
||||
state: absent
|
||||
when: ansible_distribution in ["Amazon"]
|
||||
|
||||
- name: Remove legacy CRI-O kubic yum repo
|
||||
yum_repository:
|
||||
name: "devel_kubic_libcontainers_stable_cri-o_{{ crio_version }}"
|
||||
description: "CRI-O {{ crio_version }}"
|
||||
baseurl: "{{ crio_download_crio }}{{ crio_version }}/CentOS_7/"
|
||||
state: absent
|
||||
when: ansible_distribution in ["Amazon"]
|
||||
|
||||
- name: Disable modular repos for CRI-O
|
||||
ini_file:
|
||||
path: "/etc/yum.repos.d/{{ item.repo }}.repo"
|
||||
section: "{{ item.section }}"
|
||||
option: enabled
|
||||
value: 0
|
||||
mode: 0644
|
||||
become: true
|
||||
when: is_ostree
|
||||
loop:
|
||||
- repo: "fedora-updates-modular"
|
||||
section: "updates-modular"
|
||||
- repo: "fedora-modular"
|
||||
section: "fedora-modular"
|
||||
|
||||
# Disable any older module version if we enabled them before
|
||||
- name: Disable CRI-O ex module
|
||||
command: "rpm-ostree ex module disable cri-o:{{ item }}"
|
||||
become: true
|
||||
when:
|
||||
- is_ostree
|
||||
- ostree_version is defined and ostree_version.stdout is version('2021.9', '>=')
|
||||
with_items:
|
||||
- 1.22
|
||||
- 1.23
|
||||
- 1.24
|
||||
|
||||
- name: cri-o | remove installed packages
|
||||
package:
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
when: not is_ostree
|
||||
with_items:
|
||||
- cri-o
|
||||
- cri-o-runc
|
||||
- oci-systemd-hook
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user