Compare commits
156 Commits
v2.19.0
...
pre-commit
Author | SHA1 | Date | |
---|---|---|---|
2187882ee0 | |||
4a994c82d1 | |||
b074b91ee9 | |||
b3f7be7135 | |||
d4082da97f | |||
faecc7420d | |||
7e862939db | |||
0d3bd69a17 | |||
2b97b661d8 | |||
24f12b024d | |||
f7d363dc96 | |||
47050003a0 | |||
4df6e35270 | |||
307f598bc8 | |||
eb10249a75 | |||
b4318e9967 | |||
c53561c9a0 | |||
f2f9f1d377 | |||
4487a374b1 | |||
06f8368ce6 | |||
5b976a8d80 | |||
e73803c72c | |||
b3876142d2 | |||
9f11946f8a | |||
9c28f61dbd | |||
09291bbdd2 | |||
7fa6314791 | |||
65d95d767a | |||
8306adb102 | |||
4b3db07cdb | |||
c24a3a3b15 | |||
aca6be3adf | |||
9617532561 | |||
ff5e487e32 | |||
9c51ac5157 | |||
07eab539a6 | |||
a608a048ad | |||
0cfa03fa8a | |||
6525461d97 | |||
f592fa1235 | |||
2e1863af78 | |||
2a282711df | |||
91073d7379 | |||
3ce5458f32 | |||
98c194735c | |||
626ea64f66 | |||
0d32c0d92b | |||
ce04fdde72 | |||
4ed3c85a88 | |||
14063b023c | |||
3d32f0e953 | |||
d821bed2ea | |||
058e05df41 | |||
a7ba7cdcd5 | |||
c01656b1e3 | |||
5071529a74 | |||
6d543b830a | |||
e6154998fd | |||
01c6239043 | |||
4607ac2e93 | |||
9ca5632582 | |||
51195212b4 | |||
7414409aa0 | |||
adfd77f11d | |||
f3ea8cf45e | |||
3bb9542606 | |||
1d0b3829ed | |||
a5d7178bf8 | |||
cbef8ea407 | |||
2ff4ae1f08 | |||
edf7f53f76 | |||
f58816c33c | |||
1562a9c2ec | |||
6cd243f14e | |||
4b03f6c20f | |||
d0a2ba37e8 | |||
e8ccbebd6f | |||
d4de9d096f | |||
e1f06dd406 | |||
6f82cf12f5 | |||
ca8080a695 | |||
55d14090d0 | |||
da8498bb6f | |||
b33896844e | |||
ca212c08de | |||
784439dccf | |||
d818c1c6d9 | |||
b9384ad913 | |||
76b0cbcb4e | |||
6bf3306401 | |||
bf477c24d3 | |||
79f6cd774a | |||
c3c9a42502 | |||
4a92b7221a | |||
9d5d945bdb | |||
475ce05979 | |||
57d7029317 | |||
e4fe679916 | |||
123632f5ed | |||
56d83c931b | |||
a22ae6143a | |||
a1ec0571b2 | |||
2db39d4856 | |||
e7729daefc | |||
97b4d79ed5 | |||
890fad389d | |||
0c203ece2d | |||
9e7f89d2a2 | |||
24c8ba832a | |||
c2700266b0 | |||
2cd8c51a07 | |||
589823bdc1 | |||
5dc8be9aa2 | |||
fad296616c | |||
ec01b40e85 | |||
2de5c4821c | |||
9efe145688 | |||
51bc64fb35 | |||
6380483e8b | |||
ae1dcb031f | |||
9535a41187 | |||
47495c336b | |||
d69d4a8303 | |||
ab4d590547 | |||
85271fc2e5 | |||
f6159c5677 | |||
668b9b026c | |||
77de7cb785 | |||
e5d6c042a9 | |||
3ae397019c | |||
7d3e59cf2e | |||
4eb83bb7f6 | |||
1429ba9a07 | |||
889454f2bc | |||
2fba94c5e5 | |||
4726a110fc | |||
6b43d6aff2 | |||
024a3ee551 | |||
cd7381d8de | |||
f53764f949 | |||
57c3aa4560 | |||
bb530da5c2 | |||
cc6cbfbe71 | |||
6f556f5451 | |||
9074bd297b | |||
8030e6f76c | |||
27bd7fd737 | |||
77f436fa39 | |||
814760ba25 | |||
14c0f368b6 | |||
0761659a43 | |||
a4f752fb02 | |||
b2346cdaec | |||
01ca7293f5 | |||
4dfce51ded | |||
f82ed24c03 |
5
.gitignore
vendored
5
.gitignore
vendored
@ -3,7 +3,10 @@
|
||||
**/vagrant_ansible_inventory
|
||||
*.iml
|
||||
temp
|
||||
contrib/offline/offline-files
|
||||
contrib/offline/offline-files.tar.gz
|
||||
.idea
|
||||
.vscode
|
||||
.tox
|
||||
.cache
|
||||
*.bak
|
||||
@ -11,6 +14,7 @@ temp
|
||||
*.tfstate.backup
|
||||
.terraform/
|
||||
contrib/terraform/aws/credentials.tfvars
|
||||
.terraform.lock.hcl
|
||||
/ssh-bastion.conf
|
||||
**/*.sw[pon]
|
||||
*~
|
||||
@ -108,3 +112,4 @@ roles/**/molecule/**/__pycache__/
|
||||
|
||||
# Temp location used by our scripts
|
||||
scripts/tmp/
|
||||
tmp.md
|
||||
|
@ -8,7 +8,7 @@ stages:
|
||||
- deploy-special
|
||||
|
||||
variables:
|
||||
KUBESPRAY_VERSION: v2.18.1
|
||||
KUBESPRAY_VERSION: v2.19.0
|
||||
FAILFASTCI_NAMESPACE: 'kargo-ci'
|
||||
GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray'
|
||||
ANSIBLE_FORCE_COLOR: "true"
|
||||
@ -34,7 +34,7 @@ variables:
|
||||
RECOVER_CONTROL_PLANE_TEST: "false"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube_control_plane[1:]"
|
||||
TERRAFORM_VERSION: 1.0.8
|
||||
ANSIBLE_MAJOR_VERSION: "2.10"
|
||||
ANSIBLE_MAJOR_VERSION: "2.11"
|
||||
|
||||
before_script:
|
||||
- ./tests/scripts/rebase.sh
|
||||
|
@ -68,6 +68,13 @@ markdownlint:
|
||||
script:
|
||||
- markdownlint $(find . -name '*.md' | grep -vF './.git') --ignore docs/_sidebar.md --ignore contrib/dind/README.md
|
||||
|
||||
check-readme-versions:
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
image: python:3
|
||||
script:
|
||||
- tests/scripts/check_readme_versions.sh
|
||||
|
||||
ci-matrix:
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
|
@ -44,7 +44,7 @@ molecule_no_container_engines:
|
||||
molecule_docker:
|
||||
extends: .molecule
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/docker
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/cri-dockerd
|
||||
when: on_success
|
||||
|
||||
molecule_containerd:
|
||||
@ -60,13 +60,6 @@ molecule_cri-o:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/cri-o
|
||||
when: on_success
|
||||
|
||||
molecule_cri-dockerd:
|
||||
extends: .molecule
|
||||
stage: deploy-part2
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/cri-dockerd
|
||||
when: on_success
|
||||
|
||||
# Stage 3 container engines don't get as much attention so allow them to fail
|
||||
molecule_kata:
|
||||
extends: .molecule
|
||||
|
@ -31,23 +31,6 @@ packet_ubuntu20-calico-aio:
|
||||
variables:
|
||||
RESET_CHECK: "true"
|
||||
|
||||
# Exericse ansible variants during the nightly jobs
|
||||
packet_ubuntu20-calico-aio-ansible-2_9:
|
||||
stage: deploy-part1
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
ANSIBLE_MAJOR_VERSION: "2.9"
|
||||
RESET_CHECK: "true"
|
||||
|
||||
packet_ubuntu20-calico-aio-ansible-2_10:
|
||||
stage: deploy-part1
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
ANSIBLE_MAJOR_VERSION: "2.10"
|
||||
RESET_CHECK: "true"
|
||||
|
||||
packet_ubuntu20-calico-aio-ansible-2_11:
|
||||
stage: deploy-part1
|
||||
extends: .packet_periodic
|
||||
@ -73,6 +56,16 @@ packet_ubuntu18-calico-aio:
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu22-aio-docker:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu22-calico-aio:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_centos7-flannel-addons-ha:
|
||||
extends: .packet_pr
|
||||
stage: deploy-part2
|
||||
@ -153,12 +146,17 @@ packet_almalinux8-calico:
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_rockylinux8-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_almalinux8-docker:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_fedora34-docker-weave:
|
||||
packet_fedora36-docker-weave:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
@ -216,19 +214,19 @@ packet_centos7-multus-calico:
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_oracle7-canal-ha:
|
||||
packet_centos7-canal-ha:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_fedora35-docker-calico:
|
||||
packet_fedora36-docker-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
RESET_CHECK: "true"
|
||||
|
||||
packet_fedora34-calico-selinux:
|
||||
packet_fedora35-calico-selinux:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
@ -248,7 +246,7 @@ packet_almalinux8-calico-nodelocaldns-secondary:
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_fedora34-kube-ovn:
|
||||
packet_fedora36-kube-ovn:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
@ -256,7 +254,7 @@ packet_fedora34-kube-ovn:
|
||||
# ### PR JOBS PART3
|
||||
# Long jobs (45min+)
|
||||
|
||||
packet_centos7-docker-weave-upgrade-ha:
|
||||
packet_centos7-weave-upgrade-ha:
|
||||
stage: deploy-part3
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
@ -276,7 +274,7 @@ packet_ubuntu20-calico-ha-wireguard:
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_debian10-calico-upgrade:
|
||||
packet_debian11-calico-upgrade:
|
||||
stage: deploy-part3
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
@ -291,7 +289,12 @@ packet_almalinux8-calico-remove-node:
|
||||
REMOVE_NODE_CHECK: "true"
|
||||
REMOVE_NODE_NAME: "instance-3"
|
||||
|
||||
packet_debian10-calico-upgrade-once:
|
||||
packet_ubuntu20-calico-etcd-kubeadm:
|
||||
stage: deploy-part3
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_debian11-calico-upgrade-once:
|
||||
stage: deploy-part3
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
|
@ -11,6 +11,6 @@ shellcheck:
|
||||
- cp shellcheck-"${SHELLCHECK_VERSION}"/shellcheck /usr/bin/
|
||||
- shellcheck --version
|
||||
script:
|
||||
# Run shellcheck for all *.sh except contrib/
|
||||
- find . -name '*.sh' -not -path './contrib/*' -not -path './.git/*' | xargs shellcheck --severity error
|
||||
# Run shellcheck for all *.sh
|
||||
- find . -name '*.sh' -not -path './.git/*' | xargs shellcheck --severity error
|
||||
except: ['triggers', 'master']
|
||||
|
@ -1,2 +1,3 @@
|
||||
---
|
||||
MD013: false
|
||||
MD029: false
|
||||
|
48
.pre-commit-config.yaml
Normal file
48
.pre-commit-config.yaml
Normal file
@ -0,0 +1,48 @@
|
||||
---
|
||||
repos:
|
||||
- repo: https://github.com/adrienverge/yamllint.git
|
||||
rev: v1.27.1
|
||||
hooks:
|
||||
- id: yamllint
|
||||
args: [--strict]
|
||||
|
||||
- repo: https://github.com/markdownlint/markdownlint
|
||||
rev: v0.11.0
|
||||
hooks:
|
||||
- id: markdownlint
|
||||
args: [ -r, "~MD013,~MD029" ]
|
||||
exclude: "^.git"
|
||||
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: ansible-lint
|
||||
name: ansible-lint
|
||||
entry: ansible-lint -v
|
||||
language: python
|
||||
pass_filenames: false
|
||||
additional_dependencies:
|
||||
- .[community]
|
||||
|
||||
- id: ansible-syntax-check
|
||||
name: ansible-syntax-check
|
||||
entry: env ANSIBLE_INVENTORY=inventory/local-tests.cfg ANSIBLE_REMOTE_USER=root ANSIBLE_BECOME="true" ANSIBLE_BECOME_USER=root ANSIBLE_VERBOSITY="3" ansible-playbook --syntax-check
|
||||
language: python
|
||||
files: "^cluster.yml|^upgrade-cluster.yml|^reset.yml|^extra_playbooks/upgrade-only-k8s.yml"
|
||||
|
||||
- id: tox-inventory-builder
|
||||
name: tox-inventory-builder
|
||||
entry: bash -c "cd contrib/inventory_builder && tox"
|
||||
language: python
|
||||
pass_filenames: false
|
||||
|
||||
- id: check-readme-versions
|
||||
name: check-readme-versions
|
||||
entry: tests/scripts/check_readme_versions.sh
|
||||
language: script
|
||||
pass_filenames: false
|
||||
|
||||
- id: ci-matrix
|
||||
name: ci-matrix
|
||||
entry: tests/scripts/md-table/test.sh
|
||||
language: script
|
||||
pass_filenames: false
|
@ -16,7 +16,12 @@ pip install -r tests/requirements.txt
|
||||
|
||||
#### Linting
|
||||
|
||||
Kubespray uses `yamllint` and `ansible-lint`. To run them locally use `yamllint .` and `ansible-lint`. It is a good idea to add call these tools as part of your pre-commit hook and avoid a lot of back end forth on fixing linting issues (<https://support.gitkraken.com/working-with-repositories/githooksexample/>).
|
||||
Kubespray uses [pre-commit](https://pre-commit.com) hook configuration to run several linters, please install this tool and use it to run validation tests before submitting a PR.
|
||||
|
||||
```ShellSession
|
||||
pre-commit install
|
||||
pre-commit run -a # To run pre-commit hook on all files in the repository, even if they were not modified
|
||||
```
|
||||
|
||||
#### Molecule
|
||||
|
||||
@ -33,7 +38,9 @@ Vagrant with VirtualBox or libvirt driver helps you to quickly spin test cluster
|
||||
1. Submit an issue describing your proposed change to the repo in question.
|
||||
2. The [repo owners](OWNERS) will respond to your issue promptly.
|
||||
3. Fork the desired repo, develop and test your code changes.
|
||||
4. Sign the CNCF CLA (<https://git.k8s.io/community/CLA.md#the-contributor-license-agreement>)
|
||||
5. Submit a pull request.
|
||||
6. Work with the reviewers on their suggestions.
|
||||
7. Ensure to rebase to the HEAD of your target branch and squash un-necessary commits (<https://blog.carbonfive.com/always-squash-and-rebase-your-git-commits/>) before final merger of your contribution.
|
||||
4. Install [pre-commit](https://pre-commit.com) and install it in your development repo).
|
||||
5. Addess any pre-commit validation failures.
|
||||
6. Sign the CNCF CLA (<https://git.k8s.io/community/CLA.md#the-contributor-license-agreement>)
|
||||
7. Submit a pull request.
|
||||
8. Work with the reviewers on their suggestions.
|
||||
9. Ensure to rebase to the HEAD of your target branch and squash un-necessary commits (<https://blog.carbonfive.com/always-squash-and-rebase-your-git-commits/>) before final merger of your contribution.
|
||||
|
@ -1,5 +1,5 @@
|
||||
# Use imutable image tags rather than mutable tags (like ubuntu:20.04)
|
||||
FROM ubuntu:focal-20220316
|
||||
FROM ubuntu:focal-20220531
|
||||
|
||||
ARG ARCH=amd64
|
||||
ARG TZ=Etc/UTC
|
||||
|
59
README.md
59
README.md
@ -57,10 +57,10 @@ A simple way to ensure you get all the correct version of Ansible is to use the
|
||||
You will then need to use [bind mounts](https://docs.docker.com/storage/bind-mounts/) to get the inventory and ssh key into the container, like this:
|
||||
|
||||
```ShellSession
|
||||
docker pull quay.io/kubespray/kubespray:v2.18.1
|
||||
docker pull quay.io/kubespray/kubespray:v2.19.0
|
||||
docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inventory \
|
||||
--mount type=bind,source="${HOME}"/.ssh/id_rsa,dst=/root/.ssh/id_rsa \
|
||||
quay.io/kubespray/kubespray:v2.18.1 bash
|
||||
quay.io/kubespray/kubespray:v2.19.0 bash
|
||||
# Inside the container you may now run the kubespray playbooks:
|
||||
ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml
|
||||
```
|
||||
@ -111,6 +111,7 @@ vagrant up
|
||||
- [Adding/replacing a node](docs/nodes.md)
|
||||
- [Upgrades basics](docs/upgrades.md)
|
||||
- [Air-Gap installation](docs/offline-environment.md)
|
||||
- [NTP](docs/ntp.md)
|
||||
- [Hardening](docs/hardening.md)
|
||||
- [Roadmap](docs/roadmap.md)
|
||||
|
||||
@ -118,14 +119,15 @@ vagrant up
|
||||
|
||||
- **Flatcar Container Linux by Kinvolk**
|
||||
- **Debian** Bullseye, Buster, Jessie, Stretch
|
||||
- **Ubuntu** 16.04, 18.04, 20.04
|
||||
- **CentOS/RHEL** 7, [8](docs/centos8.md)
|
||||
- **Fedora** 34, 35
|
||||
- **Ubuntu** 16.04, 18.04, 20.04, 22.04
|
||||
- **CentOS/RHEL** 7, [8](docs/centos.md#centos-8)
|
||||
- **Fedora** 35, 36
|
||||
- **Fedora CoreOS** (see [fcos Note](docs/fcos.md))
|
||||
- **openSUSE** Leap 15.x/Tumbleweed
|
||||
- **Oracle Linux** 7, [8](docs/centos8.md)
|
||||
- **Alma Linux** [8](docs/centos8.md)
|
||||
- **Rocky Linux** [8](docs/centos8.md)
|
||||
- **Oracle Linux** 7, [8](docs/centos.md#centos-8)
|
||||
- **Alma Linux** [8](docs/centos.md#centos-8)
|
||||
- **Rocky Linux** [8](docs/centos.md#centos-8)
|
||||
- **Kylin Linux Advanced Server V10** (experimental: see [kylin linux notes](docs/kylinlinux.md))
|
||||
- **Amazon Linux 2** (experimental: see [amazon linux notes](docs/amazonlinux.md))
|
||||
|
||||
Note: Upstart/SysV init based OS types are not supported.
|
||||
@ -133,27 +135,40 @@ Note: Upstart/SysV init based OS types are not supported.
|
||||
## Supported Components
|
||||
|
||||
- Core
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.23.7
|
||||
- [etcd](https://github.com/etcd-io/etcd) v3.5.3
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.24.3
|
||||
- [etcd](https://github.com/etcd-io/etcd) v3.5.4
|
||||
- [docker](https://www.docker.com/) v20.10 (see note)
|
||||
- [containerd](https://containerd.io/) v1.6.4
|
||||
- [cri-o](http://cri-o.io/) v1.22 (experimental: see [CRI-O Note](docs/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
||||
- [containerd](https://containerd.io/) v1.6.6
|
||||
- [cri-o](http://cri-o.io/) v1.24 (experimental: see [CRI-O Note](docs/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
||||
- Network Plugin
|
||||
- [cni-plugins](https://github.com/containernetworking/plugins) v1.1.1
|
||||
- [calico](https://github.com/projectcalico/calico) v3.22.3
|
||||
- [calico](https://github.com/projectcalico/calico) v3.23.3
|
||||
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
|
||||
- [cilium](https://github.com/cilium/cilium) v1.11.3
|
||||
- [flanneld](https://github.com/flannel-io/flannel) v0.17.0
|
||||
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.9.2
|
||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) v1.4.0
|
||||
- [cilium](https://github.com/cilium/cilium) v1.11.7
|
||||
- [flannel](https://github.com/flannel-io/flannel) v0.18.1
|
||||
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.9.7
|
||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) v1.5.1
|
||||
- [multus](https://github.com/intel/multus-cni) v3.8
|
||||
- [weave](https://github.com/weaveworks/weave) v2.8.1
|
||||
- [kube-vip](https://github.com/kube-vip/kube-vip) v0.4.2
|
||||
- Application
|
||||
- [cert-manager](https://github.com/jetstack/cert-manager) v1.9.0
|
||||
- [coredns](https://github.com/coredns/coredns) v1.8.6
|
||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v1.3.0
|
||||
- [krew](https://github.com/kubernetes-sigs/krew) v0.4.3
|
||||
- [argocd](https://argoproj.github.io/) v2.4.7
|
||||
- [helm](https://helm.sh/) v3.9.2
|
||||
- [metallb](https://metallb.universe.tf/) v0.12.1
|
||||
- [registry](https://github.com/distribution/distribution) v2.8.1
|
||||
- Storage Plugin
|
||||
- [cephfs-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.0-k8s1.11
|
||||
- [rbd-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.1-k8s1.11
|
||||
- [cert-manager](https://github.com/jetstack/cert-manager) v1.8.0
|
||||
- [coredns](https://github.com/coredns/coredns) v1.8.6
|
||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v1.2.1
|
||||
- [aws-ebs-csi-plugin](https://github.com/kubernetes-sigs/aws-ebs-csi-driver) v0.5.0
|
||||
- [azure-csi-plugin](https://github.com/kubernetes-sigs/azuredisk-csi-driver) v1.10.0
|
||||
- [cinder-csi-plugin](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/cinder-csi-plugin/using-cinder-csi-plugin.md) v1.22.0
|
||||
- [gcp-pd-csi-plugin](https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver) v1.4.0
|
||||
- [local-path-provisioner](https://github.com/rancher/local-path-provisioner) v0.0.22
|
||||
- [local-volume-provisioner](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner) v2.4.0
|
||||
|
||||
## Container Runtime Notes
|
||||
|
||||
@ -162,8 +177,8 @@ Note: Upstart/SysV init based OS types are not supported.
|
||||
|
||||
## Requirements
|
||||
|
||||
- **Minimum required version of Kubernetes is v1.21**
|
||||
- **Ansible v2.9.x, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands**
|
||||
- **Minimum required version of Kubernetes is v1.22**
|
||||
- **Ansible v2.11+, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands**
|
||||
- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](docs/offline-environment.md))
|
||||
- The target servers are configured to allow **IPv4 forwarding**.
|
||||
- If using IPv6 for pods and services, the target servers are configured to allow **IPv6 forwarding**.
|
||||
|
24
RELEASE.md
24
RELEASE.md
@ -9,10 +9,10 @@ The Kubespray Project is released on an as-needed basis. The process is as follo
|
||||
5. Create the release note with [Kubernetes Release Notes Generator](https://github.com/kubernetes/release/blob/master/cmd/release-notes/README.md). See the following `Release note creation` section for the details.
|
||||
6. An approver creates [new release in GitHub](https://github.com/kubernetes-sigs/kubespray/releases/new) using a version and tag name like `vX.Y.Z` and attaching the release notes
|
||||
7. An approver creates a release branch in the form `release-X.Y`
|
||||
8. The corresponding version of [quay.io/kubespray/kubespray:vX.Y.Z](https://quay.io/repository/kubespray/kubespray) and [quay.io/kubespray/vagrant:vX.Y.Z](https://quay.io/repository/kubespray/vagrant) docker images are built and tagged
|
||||
8. The corresponding version of [quay.io/kubespray/kubespray:vX.Y.Z](https://quay.io/repository/kubespray/kubespray) and [quay.io/kubespray/vagrant:vX.Y.Z](https://quay.io/repository/kubespray/vagrant) container images are built and tagged. See the following `Container image creation` section for the details.
|
||||
9. The `KUBESPRAY_VERSION` variable is updated in `.gitlab-ci.yml`
|
||||
10. The release issue is closed
|
||||
11. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] Kubespray $VERSION is released`
|
||||
11. An announcement email is sent to `dev@kubernetes.io` with the subject `[ANNOUNCE] Kubespray $VERSION is released`
|
||||
12. The topic of the #kubespray channel is updated with `vX.Y.Z is released! | ...`
|
||||
|
||||
## Major/minor releases and milestones
|
||||
@ -61,3 +61,23 @@ release-notes --start-sha <The start commit-id> --end-sha <The end commit-id> --
|
||||
|
||||
If the release note file(/tmp/kubespray-release-note) contains "### Uncategorized" pull requests, those pull requests don't have a valid kind label(`kind/feature`, etc.).
|
||||
It is necessary to put a valid label on each pull request and run the above release-notes command again to get a better release note)
|
||||
|
||||
## Container image creation
|
||||
|
||||
The container image `quay.io/kubespray/kubespray:vX.Y.Z` can be created from Dockerfile of the kubespray root directory:
|
||||
|
||||
```shell
|
||||
cd kubespray/
|
||||
nerdctl build -t quay.io/kubespray/kubespray:vX.Y.Z .
|
||||
nerdctl push quay.io/kubespray/kubespray:vX.Y.Z
|
||||
```
|
||||
|
||||
The container image `quay.io/kubespray/vagrant:vX.Y.Z` can be created from build.sh of test-infra/vagrant-docker/:
|
||||
|
||||
```shell
|
||||
cd kubespray/test-infra/vagrant-docker/
|
||||
./build vX.Y.Z
|
||||
```
|
||||
|
||||
Please note that the above operation requires the permission to push container images into quay.io/kubespray/.
|
||||
If you don't have the permission, please ask it on the #kubespray-dev channel.
|
||||
|
3
Vagrantfile
vendored
3
Vagrantfile
vendored
@ -28,8 +28,9 @@ SUPPORTED_OS = {
|
||||
"centos8-bento" => {box: "bento/centos-8", user: "vagrant"},
|
||||
"almalinux8" => {box: "almalinux/8", user: "vagrant"},
|
||||
"almalinux8-bento" => {box: "bento/almalinux-8", user: "vagrant"},
|
||||
"fedora34" => {box: "fedora/34-cloud-base", user: "vagrant"},
|
||||
"rockylinux8" => {box: "generic/rocky8", user: "vagrant"},
|
||||
"fedora35" => {box: "fedora/35-cloud-base", user: "vagrant"},
|
||||
"fedora36" => {box: "fedora/36-cloud-base", user: "vagrant"},
|
||||
"opensuse" => {box: "opensuse/Leap-15.3.x86_64", user: "vagrant"},
|
||||
"opensuse-tumbleweed" => {box: "opensuse/Tumbleweed.x86_64", user: "vagrant"},
|
||||
"oraclelinux" => {box: "generic/oracle7", user: "vagrant"},
|
||||
|
@ -1,6 +1,6 @@
|
||||
[ssh_connection]
|
||||
pipelining=True
|
||||
ssh_args = -o ControlMaster=auto -o ControlPersist=30m -o ConnectionAttempts=100 -o UserKnownHostsFile=/dev/null
|
||||
ansible_ssh_args = -o ControlMaster=auto -o ControlPersist=30m -o ConnectionAttempts=100 -o UserKnownHostsFile=/dev/null
|
||||
#control_path = ~/.ssh/ansible-%%r@%%h:%%p
|
||||
[defaults]
|
||||
# https://github.com/ansible/ansible/issues/56930 (to ignore group names with - and .)
|
||||
@ -10,11 +10,11 @@ host_key_checking=False
|
||||
gathering = smart
|
||||
fact_caching = jsonfile
|
||||
fact_caching_connection = /tmp
|
||||
fact_caching_timeout = 7200
|
||||
fact_caching_timeout = 86400
|
||||
stdout_callback = default
|
||||
display_skipped_hosts = no
|
||||
library = ./library
|
||||
callback_whitelist = profile_tasks,ara_default
|
||||
callbacks_enabled = profile_tasks,ara_default
|
||||
roles_path = roles:$VIRTUAL_ENV/usr/local/share/kubespray/roles:$VIRTUAL_ENV/usr/local/share/ansible/roles:/usr/share/kubespray/roles
|
||||
deprecation_warnings=False
|
||||
inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo, .creds, .gpg
|
||||
|
@ -3,32 +3,20 @@
|
||||
gather_facts: false
|
||||
become: no
|
||||
vars:
|
||||
minimal_ansible_version: 2.9.0
|
||||
minimal_ansible_version_2_10: 2.10.11
|
||||
minimal_ansible_version: 2.11.0
|
||||
maximal_ansible_version: 2.13.0
|
||||
ansible_connection: local
|
||||
tags: always
|
||||
tasks:
|
||||
- name: "Check {{ minimal_ansible_version }} <= Ansible version < {{ maximal_ansible_version }}"
|
||||
assert:
|
||||
msg: "Ansible must be between {{ minimal_ansible_version }} and {{ maximal_ansible_version }}"
|
||||
msg: "Ansible must be between {{ minimal_ansible_version }} and {{ maximal_ansible_version }} exclusive"
|
||||
that:
|
||||
- ansible_version.string is version(minimal_ansible_version, ">=")
|
||||
- ansible_version.string is version(maximal_ansible_version, "<")
|
||||
tags:
|
||||
- check
|
||||
|
||||
- name: "Check Ansible version > {{ minimal_ansible_version_2_10 }} when using ansible 2.10"
|
||||
assert:
|
||||
msg: "When using Ansible 2.10, the minimum supported version is {{ minimal_ansible_version_2_10 }}"
|
||||
that:
|
||||
- ansible_version.string is version(minimal_ansible_version_2_10, ">=")
|
||||
- ansible_version.string is version(maximal_ansible_version, "<")
|
||||
when:
|
||||
- ansible_version.string is version('2.10.0', ">=")
|
||||
tags:
|
||||
- check
|
||||
|
||||
- name: "Check that python netaddr is installed"
|
||||
assert:
|
||||
msg: "Python netaddr is not present"
|
||||
|
@ -17,7 +17,7 @@ pass_or_fail() {
|
||||
test_distro() {
|
||||
local distro=${1:?};shift
|
||||
local extra="${*:-}"
|
||||
local prefix="$distro[${extra}]}"
|
||||
local prefix="${distro[${extra}]}"
|
||||
ansible-playbook -i hosts dind-cluster.yaml -e node_distro=$distro
|
||||
pass_or_fail "$prefix: dind-nodes" || return 1
|
||||
(cd ../..
|
||||
@ -71,15 +71,15 @@ for spec in ${SPECS}; do
|
||||
echo "Loading file=${spec} ..."
|
||||
. ${spec} || continue
|
||||
: ${DISTROS:?} || continue
|
||||
echo "DISTROS=${DISTROS[@]}"
|
||||
echo "DISTROS:" "${DISTROS[@]}"
|
||||
echo "EXTRAS->"
|
||||
printf " %s\n" "${EXTRAS[@]}"
|
||||
let n=1
|
||||
for distro in ${DISTROS[@]}; do
|
||||
for distro in "${DISTROS[@]}"; do
|
||||
for extra in "${EXTRAS[@]:-NULL}"; do
|
||||
# Magic value to let this for run once:
|
||||
[[ ${extra} == NULL ]] && unset extra
|
||||
docker rm -f ${NODES[@]}
|
||||
docker rm -f "${NODES[@]}"
|
||||
printf -v file_out "%s/%s-%02d.out" ${OUTPUT_DIR} ${spec} $((n++))
|
||||
{
|
||||
info "${distro}[${extra}] START: file_out=${file_out}"
|
||||
|
@ -14,12 +14,16 @@ This role performs basic installation and setup of Gluster, but it does not conf
|
||||
|
||||
Available variables are listed below, along with default values (see `defaults/main.yml`):
|
||||
|
||||
glusterfs_default_release: ""
|
||||
```yaml
|
||||
glusterfs_default_release: ""
|
||||
```
|
||||
|
||||
You can specify a `default_release` for apt on Debian/Ubuntu by overriding this variable. This is helpful if you need a different package or version for the main GlusterFS packages (e.g. GlusterFS 3.5.x instead of 3.2.x with the `wheezy-backports` default release on Debian Wheezy).
|
||||
|
||||
glusterfs_ppa_use: yes
|
||||
glusterfs_ppa_version: "3.5"
|
||||
```yaml
|
||||
glusterfs_ppa_use: yes
|
||||
glusterfs_ppa_version: "3.5"
|
||||
```
|
||||
|
||||
For Ubuntu, specify whether to use the official Gluster PPA, and which version of the PPA to use. See Gluster's [Getting Started Guide](https://docs.gluster.org/en/latest/Quick-Start-Guide/Quickstart/) for more info.
|
||||
|
||||
@ -29,9 +33,11 @@ None.
|
||||
|
||||
## Example Playbook
|
||||
|
||||
```yaml
|
||||
- hosts: server
|
||||
roles:
|
||||
- geerlingguy.glusterfs
|
||||
```
|
||||
|
||||
For a real-world use example, read through [Simple GlusterFS Setup with Ansible](http://www.jeffgeerling.com/blog/simple-glusterfs-setup-ansible), a blog post by this role's author, which is included in Chapter 8 of [Ansible for DevOps](https://www.ansiblefordevops.com/).
|
||||
|
||||
|
@ -45,3 +45,21 @@ temp
|
||||
|
||||
In some cases you may want to update some component version, you can declare version variables in ansible inventory file or group_vars,
|
||||
then run `./generate_list.sh -i [inventory_file]` to update file.list and images.list.
|
||||
|
||||
## manage-offline-files.sh
|
||||
|
||||
This script will download all files according to `temp/files.list` and run nginx container to provide offline file download.
|
||||
|
||||
Step(1) generate `files.list`
|
||||
|
||||
```shell
|
||||
./generate_list.sh
|
||||
```
|
||||
|
||||
Step(2) download files and run nginx container
|
||||
|
||||
```shell
|
||||
./manage-offline-files.sh
|
||||
```
|
||||
|
||||
when nginx container is running, it can be accessed through <http://127.0.0.1:8080/>.
|
||||
|
@ -15,7 +15,7 @@ function create_container_image_tar() {
|
||||
IMAGES=$(kubectl describe pods --all-namespaces | grep " Image:" | awk '{print $2}' | sort | uniq)
|
||||
# NOTE: etcd and pause cannot be seen as pods.
|
||||
# The pause image is used for --pod-infra-container-image option of kubelet.
|
||||
EXT_IMAGES=$(kubectl cluster-info dump | egrep "quay.io/coreos/etcd:|k8s.gcr.io/pause:" | sed s@\"@@g)
|
||||
EXT_IMAGES=$(kubectl cluster-info dump | egrep "quay.io/coreos/etcd:|registry.k8s.io/pause:" | sed s@\"@@g)
|
||||
IMAGES="${IMAGES} ${EXT_IMAGES}"
|
||||
|
||||
rm -f ${IMAGE_TAR_FILE}
|
||||
@ -46,12 +46,12 @@ function create_container_image_tar() {
|
||||
|
||||
# NOTE: Here removes the following repo parts from each image
|
||||
# so that these parts will be replaced with Kubespray.
|
||||
# - kube_image_repo: "k8s.gcr.io"
|
||||
# - kube_image_repo: "registry.k8s.io"
|
||||
# - gcr_image_repo: "gcr.io"
|
||||
# - docker_image_repo: "docker.io"
|
||||
# - quay_image_repo: "quay.io"
|
||||
FIRST_PART=$(echo ${image} | awk -F"/" '{print $1}')
|
||||
if [ "${FIRST_PART}" = "k8s.gcr.io" ] ||
|
||||
if [ "${FIRST_PART}" = "registry.k8s.io" ] ||
|
||||
[ "${FIRST_PART}" = "gcr.io" ] ||
|
||||
[ "${FIRST_PART}" = "docker.io" ] ||
|
||||
[ "${FIRST_PART}" = "quay.io" ] ||
|
||||
|
44
contrib/offline/manage-offline-files.sh
Executable file
44
contrib/offline/manage-offline-files.sh
Executable file
@ -0,0 +1,44 @@
|
||||
#!/bin/bash
|
||||
|
||||
CURRENT_DIR=$( dirname "$(readlink -f "$0")" )
|
||||
OFFLINE_FILES_DIR_NAME="offline-files"
|
||||
OFFLINE_FILES_DIR="${CURRENT_DIR}/${OFFLINE_FILES_DIR_NAME}"
|
||||
OFFLINE_FILES_ARCHIVE="${CURRENT_DIR}/offline-files.tar.gz"
|
||||
FILES_LIST=${FILES_LIST:-"${CURRENT_DIR}/temp/files.list"}
|
||||
NGINX_PORT=8080
|
||||
|
||||
# download files
|
||||
if [ ! -f "${FILES_LIST}" ]; then
|
||||
echo "${FILES_LIST} should exist, run ./generate_list.sh first."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
rm -rf "${OFFLINE_FILES_DIR}"
|
||||
rm "${OFFLINE_FILES_ARCHIVE}"
|
||||
mkdir "${OFFLINE_FILES_DIR}"
|
||||
|
||||
wget -x -P "${OFFLINE_FILES_DIR}" -i "${FILES_LIST}"
|
||||
tar -czvf "${OFFLINE_FILES_ARCHIVE}" "${OFFLINE_FILES_DIR_NAME}"
|
||||
|
||||
[ -n "$NO_HTTP_SERVER" ] && echo "skip to run nginx" && exit 0
|
||||
|
||||
# run nginx container server
|
||||
if command -v nerdctl 1>/dev/null 2>&1; then
|
||||
runtime="nerdctl"
|
||||
elif command -v podman 1>/dev/null 2>&1; then
|
||||
runtime="podman"
|
||||
elif command -v docker 1>/dev/null 2>&1; then
|
||||
runtime="docker"
|
||||
else
|
||||
echo "No supported container runtime found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
sudo "${runtime}" container inspect nginx >/dev/null 2>&1
|
||||
if [ $? -ne 0 ]; then
|
||||
sudo "${runtime}" run \
|
||||
--restart=always -d -p ${NGINX_PORT}:80 \
|
||||
--volume "${OFFLINE_FILES_DIR}:/usr/share/nginx/html/download" \
|
||||
--volume "$(pwd)"/nginx.conf:/etc/nginx/nginx.conf \
|
||||
--name nginx nginx:alpine
|
||||
fi
|
39
contrib/offline/nginx.conf
Normal file
39
contrib/offline/nginx.conf
Normal file
@ -0,0 +1,39 @@
|
||||
user nginx;
|
||||
worker_processes auto;
|
||||
error_log /var/log/nginx/error.log;
|
||||
pid /run/nginx.pid;
|
||||
include /usr/share/nginx/modules/*.conf;
|
||||
events {
|
||||
worker_connections 1024;
|
||||
}
|
||||
http {
|
||||
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
|
||||
'$status $body_bytes_sent "$http_referer" '
|
||||
'"$http_user_agent" "$http_x_forwarded_for"';
|
||||
access_log /var/log/nginx/access.log main;
|
||||
sendfile on;
|
||||
tcp_nopush on;
|
||||
tcp_nodelay on;
|
||||
keepalive_timeout 65;
|
||||
types_hash_max_size 2048;
|
||||
default_type application/octet-stream;
|
||||
include /etc/nginx/conf.d/*.conf;
|
||||
server {
|
||||
listen 80 default_server;
|
||||
listen [::]:80 default_server;
|
||||
server_name _;
|
||||
include /etc/nginx/default.d/*.conf;
|
||||
location / {
|
||||
root /usr/share/nginx/html/download;
|
||||
autoindex on;
|
||||
autoindex_exact_size off;
|
||||
autoindex_localtime on;
|
||||
}
|
||||
error_page 404 /404.html;
|
||||
location = /40x.html {
|
||||
}
|
||||
error_page 500 502 503 504 /50x.html;
|
||||
location = /50x.html {
|
||||
}
|
||||
}
|
||||
}
|
@ -36,8 +36,7 @@ terraform apply -var-file=credentials.tfvars
|
||||
```
|
||||
|
||||
- Terraform automatically creates an Ansible Inventory file called `hosts` with the created infrastructure in the directory `inventory`
|
||||
- Ansible will automatically generate an ssh config file for your bastion hosts. To connect to hosts with ssh using bastion host use generated ssh-bastion.conf.
|
||||
Ansible automatically detects bastion and changes ssh_args
|
||||
- Ansible will automatically generate an ssh config file for your bastion hosts. To connect to hosts with ssh using bastion host use generated `ssh-bastion.conf`. Ansible automatically detects bastion and changes `ssh_args`
|
||||
|
||||
```commandline
|
||||
ssh -F ./ssh-bastion.conf user@$ip
|
||||
|
@ -31,9 +31,7 @@ The setup looks like following
|
||||
|
||||
## Requirements
|
||||
|
||||
* Terraform 0.13.0 or newer
|
||||
|
||||
*0.12 also works if you modify the provider block to include version and remove all `versions.tf` files*
|
||||
* Terraform 0.13.0 or newer (0.12 also works if you modify the provider block to include version and remove all `versions.tf` files)
|
||||
|
||||
## Quickstart
|
||||
|
||||
|
@ -212,7 +212,7 @@ def metal_device(resource, tfvars=None):
|
||||
'project_id': raw_attrs['project_id'],
|
||||
'state': raw_attrs['state'],
|
||||
# ansible
|
||||
'ansible_ssh_host': raw_attrs['network.0.address'],
|
||||
'ansible_host': raw_attrs['network.0.address'],
|
||||
'ansible_ssh_user': 'root', # Use root by default in metal
|
||||
# generic
|
||||
'ipv4_address': raw_attrs['network.0.address'],
|
||||
@ -292,16 +292,16 @@ def openstack_host(resource, module_name):
|
||||
try:
|
||||
if 'metadata.prefer_ipv6' in raw_attrs and raw_attrs['metadata.prefer_ipv6'] == "1":
|
||||
attrs.update({
|
||||
'ansible_ssh_host': re.sub("[\[\]]", "", raw_attrs['access_ip_v6']),
|
||||
'ansible_host': re.sub("[\[\]]", "", raw_attrs['access_ip_v6']),
|
||||
'publicly_routable': True,
|
||||
})
|
||||
else:
|
||||
attrs.update({
|
||||
'ansible_ssh_host': raw_attrs['access_ip_v4'],
|
||||
'ansible_host': raw_attrs['access_ip_v4'],
|
||||
'publicly_routable': True,
|
||||
})
|
||||
except (KeyError, ValueError):
|
||||
attrs.update({'ansible_ssh_host': '', 'publicly_routable': False})
|
||||
attrs.update({'ansible_host': '', 'publicly_routable': False})
|
||||
|
||||
# Handling of floating IPs has changed: https://github.com/terraform-providers/terraform-provider-openstack/blob/master/CHANGELOG.md#010-june-21-2017
|
||||
|
||||
@ -349,7 +349,7 @@ def iter_host_ips(hosts, ips):
|
||||
'access_ip_v4': ip,
|
||||
'access_ip': ip,
|
||||
'public_ipv4': ip,
|
||||
'ansible_ssh_host': ip,
|
||||
'ansible_host': ip,
|
||||
})
|
||||
|
||||
if 'use_access_ip' in host[1]['metadata'] and host[1]['metadata']['use_access_ip'] == "0":
|
||||
@ -389,7 +389,7 @@ def query_list(hosts):
|
||||
def query_hostfile(hosts):
|
||||
out = ['## begin hosts generated by terraform.py ##']
|
||||
out.extend(
|
||||
'{}\t{}'.format(attrs['ansible_ssh_host'].ljust(16), name)
|
||||
'{}\t{}'.format(attrs['ansible_host'].ljust(16), name)
|
||||
for name, attrs, _ in hosts
|
||||
)
|
||||
|
||||
|
@ -112,12 +112,26 @@ terraform destroy --var-file cluster-settings.tfvars \
|
||||
* `size`: The size of the additional disk in GB
|
||||
* `tier`: The tier of disk to use (`maxiops` is the only one you can choose atm)
|
||||
* `firewall_enabled`: Enable firewall rules
|
||||
* `firewall_default_deny_in`: Set the firewall to deny inbound traffic by default. Automatically adds UpCloud DNS server and NTP port allowlisting.
|
||||
* `firewall_default_deny_out`: Set the firewall to deny outbound traffic by default.
|
||||
* `master_allowed_remote_ips`: List of IP ranges that should be allowed to access API of masters
|
||||
* `start_address`: Start of address range to allow
|
||||
* `end_address`: End of address range to allow
|
||||
* `k8s_allowed_remote_ips`: List of IP ranges that should be allowed SSH access to all nodes
|
||||
* `start_address`: Start of address range to allow
|
||||
* `end_address`: End of address range to allow
|
||||
* `master_allowed_ports`: List of port ranges that should be allowed to access the masters
|
||||
* `protocol`: Protocol *(tcp|udp|icmp)*
|
||||
* `port_range_min`: Start of port range to allow
|
||||
* `port_range_max`: End of port range to allow
|
||||
* `start_address`: Start of address range to allow
|
||||
* `end_address`: End of address range to allow
|
||||
* `worker_allowed_ports`: List of port ranges that should be allowed to access the workers
|
||||
* `protocol`: Protocol *(tcp|udp|icmp)*
|
||||
* `port_range_min`: Start of port range to allow
|
||||
* `port_range_max`: End of port range to allow
|
||||
* `start_address`: Start of address range to allow
|
||||
* `end_address`: End of address range to allow
|
||||
* `loadbalancer_enabled`: Enable managed load balancer
|
||||
* `loadbalancer_plan`: Plan to use for load balancer *(development|production-small)*
|
||||
* `loadbalancers`: Ports to load balance and which machines to forward to. Key of this object will be used as the name of the load balancer frontends/backends
|
||||
|
@ -96,6 +96,8 @@ machines = {
|
||||
}
|
||||
|
||||
firewall_enabled = false
|
||||
firewall_default_deny_in = false
|
||||
firewall_default_deny_out = false
|
||||
|
||||
master_allowed_remote_ips = [
|
||||
{
|
||||
@ -111,6 +113,9 @@ k8s_allowed_remote_ips = [
|
||||
}
|
||||
]
|
||||
|
||||
master_allowed_ports = []
|
||||
worker_allowed_ports = []
|
||||
|
||||
loadbalancer_enabled = false
|
||||
loadbalancer_plan = "development"
|
||||
loadbalancers = {
|
||||
|
@ -24,8 +24,12 @@ module "kubernetes" {
|
||||
ssh_public_keys = var.ssh_public_keys
|
||||
|
||||
firewall_enabled = var.firewall_enabled
|
||||
firewall_default_deny_in = var.firewall_default_deny_in
|
||||
firewall_default_deny_out = var.firewall_default_deny_out
|
||||
master_allowed_remote_ips = var.master_allowed_remote_ips
|
||||
k8s_allowed_remote_ips = var.k8s_allowed_remote_ips
|
||||
master_allowed_ports = var.master_allowed_ports
|
||||
worker_allowed_ports = var.worker_allowed_ports
|
||||
|
||||
loadbalancer_enabled = var.loadbalancer_enabled
|
||||
loadbalancer_plan = var.loadbalancer_plan
|
||||
|
@ -228,6 +228,112 @@ resource "upcloud_firewall_rules" "master" {
|
||||
source_address_start = "0.0.0.0"
|
||||
}
|
||||
}
|
||||
|
||||
dynamic firewall_rule {
|
||||
for_each = var.master_allowed_ports
|
||||
|
||||
content {
|
||||
action = "accept"
|
||||
comment = "Allow access on this port"
|
||||
destination_port_end = firewall_rule.value.port_range_max
|
||||
destination_port_start = firewall_rule.value.port_range_min
|
||||
direction = "in"
|
||||
family = "IPv4"
|
||||
protocol = firewall_rule.value.protocol
|
||||
source_address_end = firewall_rule.value.end_address
|
||||
source_address_start = firewall_rule.value.start_address
|
||||
}
|
||||
}
|
||||
|
||||
dynamic firewall_rule {
|
||||
for_each = var.firewall_default_deny_in ? ["tcp", "udp"] : []
|
||||
|
||||
content {
|
||||
action = "accept"
|
||||
comment = "UpCloud DNS"
|
||||
destination_port_end = "53"
|
||||
destination_port_start = "53"
|
||||
direction = "in"
|
||||
family = "IPv4"
|
||||
protocol = firewall_rule.value
|
||||
source_address_end = "94.237.40.9"
|
||||
source_address_start = "94.237.40.9"
|
||||
}
|
||||
}
|
||||
|
||||
dynamic firewall_rule {
|
||||
for_each = var.firewall_default_deny_in ? ["tcp", "udp"] : []
|
||||
|
||||
content {
|
||||
action = "accept"
|
||||
comment = "UpCloud DNS"
|
||||
destination_port_end = "53"
|
||||
destination_port_start = "53"
|
||||
direction = "in"
|
||||
family = "IPv4"
|
||||
protocol = firewall_rule.value
|
||||
source_address_end = "94.237.127.9"
|
||||
source_address_start = "94.237.127.9"
|
||||
}
|
||||
}
|
||||
|
||||
dynamic firewall_rule {
|
||||
for_each = var.firewall_default_deny_in ? ["tcp", "udp"] : []
|
||||
|
||||
content {
|
||||
action = "accept"
|
||||
comment = "UpCloud DNS"
|
||||
destination_port_end = "53"
|
||||
destination_port_start = "53"
|
||||
direction = "in"
|
||||
family = "IPv6"
|
||||
protocol = firewall_rule.value
|
||||
source_address_end = "2a04:3540:53::1"
|
||||
source_address_start = "2a04:3540:53::1"
|
||||
}
|
||||
}
|
||||
|
||||
dynamic firewall_rule {
|
||||
for_each = var.firewall_default_deny_in ? ["tcp", "udp"] : []
|
||||
|
||||
content {
|
||||
action = "accept"
|
||||
comment = "UpCloud DNS"
|
||||
destination_port_end = "53"
|
||||
destination_port_start = "53"
|
||||
direction = "in"
|
||||
family = "IPv6"
|
||||
protocol = firewall_rule.value
|
||||
source_address_end = "2a04:3544:53::1"
|
||||
source_address_start = "2a04:3544:53::1"
|
||||
}
|
||||
}
|
||||
|
||||
dynamic firewall_rule {
|
||||
for_each = var.firewall_default_deny_in ? ["udp"] : []
|
||||
|
||||
content {
|
||||
action = "accept"
|
||||
comment = "NTP Port"
|
||||
destination_port_end = "123"
|
||||
destination_port_start = "123"
|
||||
direction = "in"
|
||||
family = "IPv4"
|
||||
protocol = firewall_rule.value
|
||||
source_address_end = "255.255.255.255"
|
||||
source_address_start = "0.0.0.0"
|
||||
}
|
||||
}
|
||||
|
||||
firewall_rule {
|
||||
action = var.firewall_default_deny_in ? "drop" : "accept"
|
||||
direction = "in"
|
||||
}
|
||||
|
||||
firewall_rule {
|
||||
action = var.firewall_default_deny_out ? "drop" : "accept"
|
||||
direction = "out"
|
||||
}
|
||||
}
|
||||
|
||||
resource "upcloud_firewall_rules" "k8s" {
|
||||
@ -265,6 +371,112 @@ resource "upcloud_firewall_rules" "k8s" {
|
||||
source_address_start = "0.0.0.0"
|
||||
}
|
||||
}
|
||||
|
||||
dynamic firewall_rule {
|
||||
for_each = var.worker_allowed_ports
|
||||
|
||||
content {
|
||||
action = "accept"
|
||||
comment = "Allow access on this port"
|
||||
destination_port_end = firewall_rule.value.port_range_max
|
||||
destination_port_start = firewall_rule.value.port_range_min
|
||||
direction = "in"
|
||||
family = "IPv4"
|
||||
protocol = firewall_rule.value.protocol
|
||||
source_address_end = firewall_rule.value.end_address
|
||||
source_address_start = firewall_rule.value.start_address
|
||||
}
|
||||
}
|
||||
|
||||
dynamic firewall_rule {
|
||||
for_each = var.firewall_default_deny_in ? ["tcp", "udp"] : []
|
||||
|
||||
content {
|
||||
action = "accept"
|
||||
comment = "UpCloud DNS"
|
||||
destination_port_end = "53"
|
||||
destination_port_start = "53"
|
||||
direction = "in"
|
||||
family = "IPv4"
|
||||
protocol = firewall_rule.value
|
||||
source_address_end = "94.237.40.9"
|
||||
source_address_start = "94.237.40.9"
|
||||
}
|
||||
}
|
||||
|
||||
dynamic firewall_rule {
|
||||
for_each = var.firewall_default_deny_in ? ["tcp", "udp"] : []
|
||||
|
||||
content {
|
||||
action = "accept"
|
||||
comment = "UpCloud DNS"
|
||||
destination_port_end = "53"
|
||||
destination_port_start = "53"
|
||||
direction = "in"
|
||||
family = "IPv4"
|
||||
protocol = firewall_rule.value
|
||||
source_address_end = "94.237.127.9"
|
||||
source_address_start = "94.237.127.9"
|
||||
}
|
||||
}
|
||||
|
||||
dynamic firewall_rule {
|
||||
for_each = var.firewall_default_deny_in ? ["tcp", "udp"] : []
|
||||
|
||||
content {
|
||||
action = "accept"
|
||||
comment = "UpCloud DNS"
|
||||
destination_port_end = "53"
|
||||
destination_port_start = "53"
|
||||
direction = "in"
|
||||
family = "IPv6"
|
||||
protocol = firewall_rule.value
|
||||
source_address_end = "2a04:3540:53::1"
|
||||
source_address_start = "2a04:3540:53::1"
|
||||
}
|
||||
}
|
||||
|
||||
dynamic firewall_rule {
|
||||
for_each = var.firewall_default_deny_in ? ["tcp", "udp"] : []
|
||||
|
||||
content {
|
||||
action = "accept"
|
||||
comment = "UpCloud DNS"
|
||||
destination_port_end = "53"
|
||||
destination_port_start = "53"
|
||||
direction = "in"
|
||||
family = "IPv6"
|
||||
protocol = firewall_rule.value
|
||||
source_address_end = "2a04:3544:53::1"
|
||||
source_address_start = "2a04:3544:53::1"
|
||||
}
|
||||
}
|
||||
|
||||
dynamic firewall_rule {
|
||||
for_each = var.firewall_default_deny_in ? ["udp"] : []
|
||||
|
||||
content {
|
||||
action = "accept"
|
||||
comment = "NTP Port"
|
||||
destination_port_end = "123"
|
||||
destination_port_start = "123"
|
||||
direction = "in"
|
||||
family = "IPv4"
|
||||
protocol = firewall_rule.value
|
||||
source_address_end = "255.255.255.255"
|
||||
source_address_start = "0.0.0.0"
|
||||
}
|
||||
}
|
||||
|
||||
firewall_rule {
|
||||
action = var.firewall_default_deny_in ? "drop" : "accept"
|
||||
direction = "in"
|
||||
}
|
||||
|
||||
firewall_rule {
|
||||
action = var.firewall_default_deny_out ? "drop" : "accept"
|
||||
direction = "out"
|
||||
}
|
||||
}
|
||||
|
||||
resource "upcloud_loadbalancer" "lb" {
|
||||
|
@ -49,6 +49,34 @@ variable "k8s_allowed_remote_ips" {
|
||||
}))
|
||||
}
|
||||
|
||||
variable "master_allowed_ports" {
|
||||
type = list(object({
|
||||
protocol = string
|
||||
port_range_min = number
|
||||
port_range_max = number
|
||||
start_address = string
|
||||
end_address = string
|
||||
}))
|
||||
}
|
||||
|
||||
variable "worker_allowed_ports" {
|
||||
type = list(object({
|
||||
protocol = string
|
||||
port_range_min = number
|
||||
port_range_max = number
|
||||
start_address = string
|
||||
end_address = string
|
||||
}))
|
||||
}
|
||||
|
||||
variable "firewall_default_deny_in" {
|
||||
type = bool
|
||||
}
|
||||
|
||||
variable "firewall_default_deny_out" {
|
||||
type = bool
|
||||
}
|
||||
|
||||
variable "loadbalancer_enabled" {
|
||||
type = bool
|
||||
}
|
||||
|
@ -3,7 +3,7 @@ terraform {
|
||||
required_providers {
|
||||
upcloud = {
|
||||
source = "UpCloudLtd/upcloud"
|
||||
version = "~>2.4.0"
|
||||
version = "~>2.5.0"
|
||||
}
|
||||
}
|
||||
required_version = ">= 0.13"
|
||||
|
@ -96,6 +96,9 @@ machines = {
|
||||
}
|
||||
|
||||
firewall_enabled = false
|
||||
firewall_default_deny_in = false
|
||||
firewall_default_deny_out = false
|
||||
|
||||
|
||||
master_allowed_remote_ips = [
|
||||
{
|
||||
@ -111,6 +114,9 @@ k8s_allowed_remote_ips = [
|
||||
}
|
||||
]
|
||||
|
||||
master_allowed_ports = []
|
||||
worker_allowed_ports = []
|
||||
|
||||
loadbalancer_enabled = false
|
||||
loadbalancer_plan = "development"
|
||||
loadbalancers = {
|
||||
|
@ -79,6 +79,38 @@ variable "k8s_allowed_remote_ips" {
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "master_allowed_ports" {
|
||||
description = "List of ports to allow on masters"
|
||||
type = list(object({
|
||||
protocol = string
|
||||
port_range_min = number
|
||||
port_range_max = number
|
||||
start_address = string
|
||||
end_address = string
|
||||
}))
|
||||
}
|
||||
|
||||
variable "worker_allowed_ports" {
|
||||
description = "List of ports to allow on workers"
|
||||
type = list(object({
|
||||
protocol = string
|
||||
port_range_min = number
|
||||
port_range_max = number
|
||||
start_address = string
|
||||
end_address = string
|
||||
}))
|
||||
}
|
||||
|
||||
variable "firewall_default_deny_in" {
|
||||
description = "Add firewall policies that deny all inbound traffic by default"
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "firewall_default_deny_out" {
|
||||
description = "Add firewall policies that deny all outbound traffic by default"
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "loadbalancer_enabled" {
|
||||
description = "Enable load balancer"
|
||||
default = false
|
||||
|
@ -3,7 +3,7 @@ terraform {
|
||||
required_providers {
|
||||
upcloud = {
|
||||
source = "UpCloudLtd/upcloud"
|
||||
version = "~>2.4.0"
|
||||
version = "~>2.5.0"
|
||||
}
|
||||
}
|
||||
required_version = ">= 0.13"
|
||||
|
@ -35,9 +35,7 @@ This setup assumes that the DHCP is disabled in the vSphere cluster and IP addre
|
||||
|
||||
## Requirements
|
||||
|
||||
* Terraform 0.13.0 or newer
|
||||
|
||||
*0.12 also works if you modify the provider block to include version and remove all `versions.tf` files*
|
||||
* Terraform 0.13.0 or newer (0.12 also works if you modify the provider block to include version and remove all `versions.tf` files)
|
||||
|
||||
## Quickstart
|
||||
|
||||
|
@ -35,6 +35,7 @@
|
||||
* [OpenSUSE](docs/opensuse.md)
|
||||
* [RedHat Enterprise Linux](docs/rhel.md)
|
||||
* [CentOS/OracleLinux/AlmaLinux/Rocky Linux](docs/centos.md)
|
||||
* [Kylin Linux Advanced Server V10](docs/kylinlinux.md)
|
||||
* [Amazon Linux 2](docs/amazonlinux.md)
|
||||
* CRI
|
||||
* [Containerd](docs/containerd.md)
|
||||
@ -50,6 +51,7 @@
|
||||
* [DNS Stack](docs/dns-stack.md)
|
||||
* [Kubernetes reliability](docs/kubernetes-reliability.md)
|
||||
* [Local Registry](docs/kubernetes-apps/registry.md)
|
||||
* [NTP](docs/ntp.md)
|
||||
* External Storage Provisioners
|
||||
* [RBD Provisioner](docs/kubernetes-apps/rbd_provisioner.md)
|
||||
* [CEPHFS Provisioner](docs/kubernetes-apps/cephfs_provisioner.md)
|
||||
|
@ -13,7 +13,7 @@ KUBESPRAYDIR=kubespray
|
||||
ANSIBLE_VERSION=2.12
|
||||
virtualenv --python=$(which python3) $VENVDIR
|
||||
source $VENVDIR/bin/activate
|
||||
cd $KUESPRAYDIR
|
||||
cd $KUBESPRAYDIR
|
||||
pip install -U -r requirements-$ANSIBLE_VERSION.txt
|
||||
test -f requirements-$ANSIBLE_VERSION.yml && \
|
||||
ansible-galaxy role install -r requirements-$ANSIBLE_VERSION.yml && \
|
||||
@ -26,8 +26,6 @@ Based on the table below and the available python version for your ansible host
|
||||
|
||||
| Ansible Version | Python Version |
|
||||
| --------------- | -------------- |
|
||||
| 2.9 | 2.7,3.5-3.8 |
|
||||
| 2.10 | 2.7,3.5-3.8 |
|
||||
| 2.11 | 2.7,3.5-3.9 |
|
||||
| 2.12 | 3.8-3.10 |
|
||||
|
||||
|
@ -57,19 +57,28 @@ The name of the network security group your instances are in, can be retrieved v
|
||||
These will have to be generated first:
|
||||
|
||||
- Create an Azure AD Application with:
|
||||
`az ad app create --display-name kubespray --identifier-uris http://kubespray --homepage http://kubespray.com --password CLIENT_SECRET`
|
||||
|
||||
```ShellSession
|
||||
az ad app create --display-name kubespray --identifier-uris http://kubespray --homepage http://kubespray.com --password CLIENT_SECRET
|
||||
```
|
||||
|
||||
Display name, identifier-uri, homepage and the password can be chosen
|
||||
|
||||
Note the AppId in the output.
|
||||
|
||||
- Create Service principal for the application with:
|
||||
`az ad sp create --id AppId`
|
||||
|
||||
```ShellSession
|
||||
az ad sp create --id AppId
|
||||
```
|
||||
|
||||
This is the AppId from the last command
|
||||
|
||||
- Create the role assignment with:
|
||||
`az role assignment create --role "Owner" --assignee http://kubespray --subscription SUBSCRIPTION_ID`
|
||||
|
||||
```ShellSession
|
||||
az role assignment create --role "Owner" --assignee http://kubespray --subscription SUBSCRIPTION_ID
|
||||
```
|
||||
|
||||
azure\_csi\_aad\_client\_id must be set to the AppId, azure\_csi\_aad\_client\_secret is your chosen secret.
|
||||
|
||||
|
@ -71,14 +71,27 @@ The name of the resource group that contains the route table. Defaults to `azur
|
||||
These will have to be generated first:
|
||||
|
||||
- Create an Azure AD Application with:
|
||||
`az ad app create --display-name kubernetes --identifier-uris http://kubernetes --homepage http://example.com --password CLIENT_SECRET`
|
||||
|
||||
```ShellSession
|
||||
az ad app create --display-name kubernetes --identifier-uris http://kubernetes --homepage http://example.com --password CLIENT_SECRET
|
||||
```
|
||||
|
||||
display name, identifier-uri, homepage and the password can be chosen
|
||||
Note the AppId in the output.
|
||||
|
||||
- Create Service principal for the application with:
|
||||
`az ad sp create --id AppId`
|
||||
|
||||
```ShellSession
|
||||
az ad sp create --id AppId
|
||||
```
|
||||
|
||||
This is the AppId from the last command
|
||||
|
||||
- Create the role assignment with:
|
||||
`az role assignment create --role "Owner" --assignee http://kubernetes --subscription SUBSCRIPTION_ID`
|
||||
|
||||
```ShellSession
|
||||
az role assignment create --role "Owner" --assignee http://kubernetes --subscription SUBSCRIPTION_ID
|
||||
```
|
||||
|
||||
azure\_aad\_client\_id must be set to the AppId, azure\_aad\_client\_secret is your chosen secret.
|
||||
|
||||
|
@ -48,11 +48,13 @@ The `kubespray-defaults` role is expected to be run before this role.
|
||||
|
||||
Remember to disable fact gathering since Python might not be present on hosts.
|
||||
|
||||
- hosts: all
|
||||
```yaml
|
||||
- hosts: all
|
||||
gather_facts: false # not all hosts might be able to run modules yet
|
||||
roles:
|
||||
- kubespray-defaults
|
||||
- bootstrap-os
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
|
@ -124,8 +124,7 @@ You need to edit your inventory and add:
|
||||
* `calico_rr` group with nodes in it. `calico_rr` can be combined with
|
||||
`kube_node` and/or `kube_control_plane`. `calico_rr` group also must be a child
|
||||
group of `k8s_cluster` group.
|
||||
* `cluster_id` by route reflector node/group (see details
|
||||
[here](https://hub.docker.com/r/calico/routereflector/))
|
||||
* `cluster_id` by route reflector node/group (see details [here](https://hub.docker.com/r/calico/routereflector/))
|
||||
|
||||
Here's an example of Kubespray inventory with standalone route reflectors:
|
||||
|
||||
@ -315,6 +314,13 @@ calico_ipam_host_local: true
|
||||
|
||||
Refer to Project Calico section [Using host-local IPAM](https://docs.projectcalico.org/reference/cni-plugin/configuration#using-host-local-ipam) for further information.
|
||||
|
||||
### Optional : Disable CNI logging to disk
|
||||
|
||||
Calico CNI plugin logs to /var/log/calico/cni/cni.log and to stderr.
|
||||
stderr of CNI plugins can be found in the logs of container runtime.
|
||||
|
||||
You can disable Calico CNI logging to disk by setting `calico_cni_log_file_path: false`.
|
||||
|
||||
## eBPF Support
|
||||
|
||||
Calico supports eBPF for its data plane see [an introduction to the Calico eBPF Dataplane](https://www.projectcalico.org/introducing-the-calico-ebpf-dataplane/) for further information.
|
||||
|
@ -9,7 +9,7 @@ Kubespray supports multiple ansible versions but only the default (5.x) gets wid
|
||||
|
||||
CentOS 8 / Oracle Linux 8 / AlmaLinux 8 / Rocky Linux 8 ship only with iptables-nft (ie without iptables-legacy similar to RHEL8)
|
||||
The only tested configuration for now is using Calico CNI
|
||||
You need to add `calico_iptables_backend: "NFT"` or `calico_iptables_backend: "Auto"` to your configuration.
|
||||
You need to add `calico_iptables_backend: "NFT"` to your configuration.
|
||||
|
||||
If you have containers that are using iptables in the host network namespace (`hostNetwork=true`),
|
||||
you need to ensure they are using iptables-nft.
|
||||
|
21
docs/ci.md
21
docs/ci.md
@ -8,17 +8,18 @@ To generate this Matrix run `./tests/scripts/md-table/main.py`
|
||||
|---| --- | --- | --- | --- | --- | --- | --- | --- |
|
||||
almalinux8 | :white_check_mark: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: |
|
||||
amazon | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
centos7 | :white_check_mark: | :x: | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: | :x: |
|
||||
centos7 | :white_check_mark: | :white_check_mark: | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: | :white_check_mark: |
|
||||
debian10 | :white_check_mark: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian11 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian9 | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: |
|
||||
fedora34 | :white_check_mark: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: |
|
||||
fedora35 | :white_check_mark: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: |
|
||||
fedora36 | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: |
|
||||
opensuse | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
oracle7 | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
rockylinux8 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu16 | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: | :x: |
|
||||
ubuntu18 | :white_check_mark: | :x: | :white_check_mark: | :white_check_mark: | :x: | :x: | :x: | :white_check_mark: |
|
||||
ubuntu20 | :white_check_mark: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: |
|
||||
ubuntu22 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
|
||||
## crio
|
||||
|
||||
@ -30,13 +31,14 @@ centos7 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian10 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian11 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora34 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora35 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora36 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
opensuse | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
oracle7 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
rockylinux8 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu16 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu18 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu20 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu22 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
|
||||
## docker
|
||||
|
||||
@ -44,14 +46,15 @@ ubuntu20 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
|---| --- | --- | --- | --- | --- | --- | --- | --- |
|
||||
almalinux8 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
amazon | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
centos7 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: |
|
||||
centos7 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian10 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian11 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora34 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: |
|
||||
fedora35 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora35 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora36 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: |
|
||||
opensuse | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
|
||||
oracle7 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
rockylinux8 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu16 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: |
|
||||
ubuntu18 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu20 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu22 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
|
@ -4,9 +4,11 @@ Debian Jessie installation Notes:
|
||||
|
||||
- Add
|
||||
|
||||
```GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1"```
|
||||
```ini
|
||||
GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1"
|
||||
```
|
||||
|
||||
to /etc/default/grub. Then update with
|
||||
to `/etc/default/grub`. Then update with
|
||||
|
||||
```ShellSession
|
||||
sudo update-grub
|
||||
@ -16,7 +18,9 @@ Debian Jessie installation Notes:
|
||||
|
||||
- Add the [backports](https://backports.debian.org/Instructions/) which contain Systemd 2.30 and update Systemd.
|
||||
|
||||
```apt-get -t jessie-backports install systemd```
|
||||
```ShellSession
|
||||
apt-get -t jessie-backports install systemd
|
||||
```
|
||||
|
||||
(Necessary because the default Systemd version (2.15) does not support the "Delegate" directive in service files)
|
||||
|
||||
@ -26,11 +30,12 @@ Debian Jessie installation Notes:
|
||||
sudo add-apt-repository ppa:ansible/ansible
|
||||
sudo apt-get update
|
||||
sudo apt-get install ansible
|
||||
|
||||
```
|
||||
|
||||
- Install Jinja2 and Python-Netaddr
|
||||
|
||||
```sudo apt-get install python-jinja2=2.8-1~bpo8+1 python-netaddr```
|
||||
```ShellSession
|
||||
sudo apt-get install python-jinja2=2.8-1~bpo8+1 python-netaddr
|
||||
```
|
||||
|
||||
Now you can continue with [Preparing your deployment](getting-started.md#starting-custom-deployment)
|
||||
|
@ -8,13 +8,7 @@ Using the docker container manager:
|
||||
container_manager: docker
|
||||
```
|
||||
|
||||
Using `cri-dockerd` instead of `dockershim`:
|
||||
|
||||
```yaml
|
||||
cri_dockerd_enabled: false
|
||||
```
|
||||
|
||||
*Note:* The `cri_dockerd_enabled: true` setting will become the default in a future kubespray release once kubespray 1.24+ is supported and `dockershim` is removed. At that point, changing this option will be deprecated and silently ignored.
|
||||
*Note:* `cri-dockerd` has replaced `dockershim` across supported kubernetes version in kubespray 2.20.
|
||||
|
||||
Enabling the `overlay2` graph driver:
|
||||
|
||||
|
@ -54,7 +54,7 @@ Prepare ignition and serve via http (a.e. python -m http.server )
|
||||
|
||||
### create guest
|
||||
|
||||
```shell script
|
||||
```ShellSeasion
|
||||
machine_name=myfcos1
|
||||
ignition_url=http://mywebserver/fcos.ign
|
||||
|
||||
|
@ -2,15 +2,19 @@
|
||||
|
||||
Google Cloud Platform can be used for creation of Kubernetes Service Load Balancer.
|
||||
|
||||
This feature is able to deliver by adding parameters to kube-controller-manager and kubelet. You need specify:
|
||||
This feature is able to deliver by adding parameters to `kube-controller-manager` and `kubelet`. You need specify:
|
||||
|
||||
```
|
||||
--cloud-provider=gce
|
||||
--cloud-config=/etc/kubernetes/cloud-config
|
||||
```
|
||||
|
||||
To get working it in kubespray, you need to add tag to GCE instances and specify it in kubespray group vars and also set cloud_provider to gce. So for example, in file group_vars/all/gcp.yml:
|
||||
To get working it in kubespray, you need to add tag to GCE instances and specify it in kubespray group vars and also set `cloud_provider` to `gce`. So for example, in file `group_vars/all/gcp.yml`:
|
||||
|
||||
```
|
||||
cloud_provider: gce
|
||||
gce_node_tags: k8s-lb
|
||||
```
|
||||
|
||||
When you will setup it and create SVC in Kubernetes with type=LoadBalancer, cloud provider will create public IP and will set firewall.
|
||||
When you will setup it and create SVC in Kubernetes with `type=LoadBalancer`, cloud provider will create public IP and will set firewall.
|
||||
Note: Cloud provider run under VM service account, so this account needs to have correct permissions to be able to create all GCP resources.
|
||||
|
@ -36,12 +36,6 @@ The following diagram shows how traffic to the apiserver is directed.
|
||||
|
||||

|
||||
|
||||
Note: Kubernetes master nodes still use insecure localhost access because
|
||||
there are bugs in Kubernetes <1.5.0 in using TLS auth on master role
|
||||
services. This makes backends receiving unencrypted traffic and may be a
|
||||
security issue when interconnecting different nodes, or maybe not, if those
|
||||
belong to the isolated management network without external access.
|
||||
|
||||
A user may opt to use an external loadbalancer (LB) instead. An external LB
|
||||
provides access for external clients, while the internal LB accepts client
|
||||
connections only to the localhost.
|
||||
@ -129,11 +123,6 @@ Kubespray has nothing to do with it, this is informational only.
|
||||
As you can see, the masters' internal API endpoints are always
|
||||
contacted via the local bind IP, which is `https://bip:sp`.
|
||||
|
||||
**Note** that for some cases, like healthchecks of applications deployed by
|
||||
Kubespray, the masters' APIs are accessed via the insecure endpoint, which
|
||||
consists of the local `kube_apiserver_insecure_bind_address` and
|
||||
`kube_apiserver_insecure_port`.
|
||||
|
||||
## Optional configurations
|
||||
|
||||
### ETCD with a LB
|
||||
|
@ -83,7 +83,12 @@ kubelet_event_record_qps: 1
|
||||
kubelet_rotate_certificates: true
|
||||
kubelet_streaming_connection_idle_timeout: "5m"
|
||||
kubelet_make_iptables_util_chains: true
|
||||
kubelet_feature_gates: ["RotateKubeletServerCertificate=true"]
|
||||
kubelet_feature_gates: ["RotateKubeletServerCertificate=true","SeccompDefault=true"]
|
||||
kubelet_seccomp_default: true
|
||||
|
||||
# additional configurations
|
||||
kube_owner: root
|
||||
kube_cert_group: root
|
||||
```
|
||||
|
||||
Let's take a deep look to the resultant **kubernetes** configuration:
|
||||
|
@ -6,7 +6,11 @@
|
||||
* List of all forked repos could be retrieved from github page of original project.
|
||||
|
||||
2. Add **forked repo** as submodule to desired folder in your existent ansible repo (for example 3d/kubespray):
|
||||
```git submodule add https://github.com/YOUR_GITHUB/kubespray.git kubespray```
|
||||
|
||||
```ShellSession
|
||||
git submodule add https://github.com/YOUR_GITHUB/kubespray.git kubespray
|
||||
```
|
||||
|
||||
Git will create `.gitmodules` file in your existent ansible repo:
|
||||
|
||||
```ini
|
||||
@ -16,10 +20,16 @@
|
||||
```
|
||||
|
||||
3. Configure git to show submodule status:
|
||||
```git config --global status.submoduleSummary true```
|
||||
|
||||
```ShellSession
|
||||
git config --global status.submoduleSummary true
|
||||
```
|
||||
|
||||
4. Add *original* kubespray repo as upstream:
|
||||
```cd kubespray && git remote add upstream https://github.com/kubernetes-sigs/kubespray.git```
|
||||
|
||||
```ShellSession
|
||||
cd kubespray && git remote add upstream https://github.com/kubernetes-sigs/kubespray.git
|
||||
```
|
||||
|
||||
5. Sync your master branch with upstream:
|
||||
|
||||
@ -31,28 +41,33 @@
|
||||
```
|
||||
|
||||
6. Create a new branch which you will use in your working environment:
|
||||
```git checkout -b work```
|
||||
|
||||
```ShellSession
|
||||
git checkout -b work
|
||||
```
|
||||
|
||||
***Never*** use master branch of your repository for your commits.
|
||||
|
||||
7. Modify path to library and roles in your ansible.cfg file (role naming should be unique, you may have to rename your existent roles if they have same names as kubespray project),
|
||||
if you had roles in your existing ansible project before, you can add the path to those separated with `:`:
|
||||
|
||||
8. ```ini
|
||||
```ini
|
||||
...
|
||||
library = ./library/:3d/kubespray/library/
|
||||
roles_path = ./roles/:3d/kubespray/roles/
|
||||
...
|
||||
```
|
||||
|
||||
9. Copy and modify configs from kubespray `group_vars` folder to corresponding `group_vars` folder in your existent project.
|
||||
You could rename *all.yml* config to something else, i.e. *kubespray.yml* and create corresponding group in your inventory file, which will include all hosts groups related to kubernetes setup.
|
||||
8. Copy and modify configs from kubespray `group_vars` folder to corresponding `group_vars` folder in your existent project.
|
||||
|
||||
10. Modify your ansible inventory file by adding mapping of your existent groups (if any) to kubespray naming.
|
||||
You could rename *all.yml* config to something else, i.e. *kubespray.yml* and create corresponding group in your inventory file, which will include all hosts groups related to kubernetes setup.
|
||||
|
||||
9. Modify your ansible inventory file by adding mapping of your existent groups (if any) to kubespray naming.
|
||||
For example:
|
||||
|
||||
```ini
|
||||
...
|
||||
#Kargo groups:
|
||||
#Kubespray groups:
|
||||
[kube_node:children]
|
||||
kubenode
|
||||
|
||||
@ -71,19 +86,20 @@ You could rename *all.yml* config to something else, i.e. *kubespray.yml* and cr
|
||||
kubernetes
|
||||
```
|
||||
|
||||
* Last entry here needed to apply kubespray.yml config file, renamed from all.yml of kubespray project.
|
||||
* Last entry here needed to apply kubespray.yml config file, renamed from all.yml of kubespray project.
|
||||
|
||||
11. Now you can include kubespray tasks in you existent playbooks by including cluster.yml file:
|
||||
10. Now you can include kubespray tasks in you existent playbooks by including cluster.yml file:
|
||||
|
||||
```yml
|
||||
- name: Include kubespray tasks
|
||||
include: 3d/kubespray/cluster.yml
|
||||
- name: Import kubespray playbook
|
||||
ansible.builtin.import_playbook: 3d/kubespray/cluster.yml
|
||||
```
|
||||
|
||||
Or your could copy separate tasks from cluster.yml into your ansible repository.
|
||||
|
||||
12. Commit changes to your ansible repo. Keep in mind, that submodule folder is just a link to the git commit hash of your forked repo.
|
||||
When you update your "work" branch you need to commit changes to ansible repo as well.
|
||||
11. Commit changes to your ansible repo. Keep in mind, that submodule folder is just a link to the git commit hash of your forked repo.
|
||||
|
||||
When you update your "work" branch you need to commit changes to ansible repo as well.
|
||||
Other members of your team should use ```git submodule sync```, ```git submodule update --init``` to get actual code from submodule.
|
||||
|
||||
## Contributing
|
||||
@ -95,8 +111,12 @@ If you made useful changes or fixed a bug in existent kubespray repo, use this f
|
||||
2. Change working directory to git submodule directory (3d/kubespray).
|
||||
|
||||
3. Setup desired user.name and user.email for submodule.
|
||||
If kubespray is only one submodule in your repo you could use something like:
|
||||
```git submodule foreach --recursive 'git config user.name "First Last" && git config user.email "your-email-address@used.for.cncf"'```
|
||||
|
||||
If kubespray is only one submodule in your repo you could use something like:
|
||||
|
||||
```ShellSession
|
||||
git submodule foreach --recursive 'git config user.name "First Last" && git config user.email "your-email-address@used.for.cncf"'
|
||||
```
|
||||
|
||||
4. Sync with upstream master:
|
||||
|
||||
@ -107,8 +127,12 @@ If kubespray is only one submodule in your repo you could use something like:
|
||||
```
|
||||
|
||||
5. Create new branch for the specific fixes that you want to contribute:
|
||||
```git checkout -b fixes-name-date-index```
|
||||
Branch name should be self explaining to you, adding date and/or index will help you to track/delete your old PRs.
|
||||
|
||||
```ShellSession
|
||||
git checkout -b fixes-name-date-index
|
||||
```
|
||||
|
||||
Branch name should be self explaining to you, adding date and/or index will help you to track/delete your old PRs.
|
||||
|
||||
6. Find git hash of your commit in "work" repo and apply it to newly created "fix" repo:
|
||||
|
||||
@ -116,16 +140,49 @@ Branch name should be self explaining to you, adding date and/or index will help
|
||||
git cherry-pick <COMMIT_HASH>
|
||||
```
|
||||
|
||||
7. If you have several temporary-stage commits - squash them using [```git rebase -i```](https://eli.thegreenplace.net/2014/02/19/squashing-github-pull-requests-into-a-single-commit)
|
||||
Also you could use interactive rebase (```git rebase -i HEAD~10```) to delete commits which you don't want to contribute into original repo.
|
||||
7. If you have several temporary-stage commits - squash them using [git rebase -i](https://eli.thegreenplace.net/2014/02/19/squashing-github-pull-requests-into-a-single-commit)
|
||||
|
||||
Also you could use interactive rebase
|
||||
|
||||
```ShellSession
|
||||
git rebase -i HEAD~10
|
||||
```
|
||||
|
||||
to delete commits which you don't want to contribute into original repo.
|
||||
|
||||
8. When your changes is in place, you need to check upstream repo one more time because it could be changed during your work.
|
||||
Check that you're on correct branch:
|
||||
```git status```
|
||||
And pull changes from upstream (if any):
|
||||
```git pull --rebase upstream master```
|
||||
|
||||
9. Now push your changes to your **fork** repo with ```git push```. If your branch doesn't exists on github, git will propose you to use something like ```git push --set-upstream origin fixes-name-date-index```.
|
||||
Check that you're on correct branch:
|
||||
|
||||
10. Open you forked repo in browser, on the main page you will see proposition to create pull request for your newly created branch. Check proposed diff of your PR. If something is wrong you could safely delete "fix" branch on github using ```git push origin --delete fixes-name-date-index```, ```git branch -D fixes-name-date-index``` and start whole process from the beginning.
|
||||
If everything is fine - add description about your changes (what they do and why they're needed) and confirm pull request creation.
|
||||
```ShellSession
|
||||
git status
|
||||
```
|
||||
|
||||
And pull changes from upstream (if any):
|
||||
|
||||
```ShellSession
|
||||
git pull --rebase upstream master
|
||||
```
|
||||
|
||||
9. Now push your changes to your **fork** repo with
|
||||
|
||||
```ShellSession
|
||||
git push
|
||||
```
|
||||
|
||||
If your branch doesn't exists on github, git will propose you to use something like
|
||||
|
||||
```ShellSession
|
||||
git push --set-upstream origin fixes-name-date-index
|
||||
```
|
||||
|
||||
10. Open you forked repo in browser, on the main page you will see proposition to create pull request for your newly created branch. Check proposed diff of your PR. If something is wrong you could safely delete "fix" branch on github using
|
||||
|
||||
```ShellSession
|
||||
git push origin --delete fixes-name-date-index
|
||||
git branch -D fixes-name-date-index
|
||||
```
|
||||
|
||||
and start whole process from the beginning.
|
||||
|
||||
If everything is fine - add description about your changes (what they do and why they're needed) and confirm pull request creation.
|
||||
|
@ -29,8 +29,7 @@ use Kubernetes's `PersistentVolume` abstraction. The following template is
|
||||
expanded by `salt` in the GCE cluster turnup, but can easily be adapted to
|
||||
other situations:
|
||||
|
||||
<!-- BEGIN MUNGE: EXAMPLE registry-pv.yaml.in -->
|
||||
``` yaml
|
||||
```yaml
|
||||
kind: PersistentVolume
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
@ -46,7 +45,6 @@ spec:
|
||||
fsType: "ext4"
|
||||
{% endif %}
|
||||
```
|
||||
<!-- END MUNGE: EXAMPLE registry-pv.yaml.in -->
|
||||
|
||||
If, for example, you wanted to use NFS you would just need to change the
|
||||
`gcePersistentDisk` block to `nfs`. See
|
||||
@ -68,8 +66,7 @@ Now that the Kubernetes cluster knows that some storage exists, you can put a
|
||||
claim on that storage. As with the `PersistentVolume` above, you can start
|
||||
with the `salt` template:
|
||||
|
||||
<!-- BEGIN MUNGE: EXAMPLE registry-pvc.yaml.in -->
|
||||
``` yaml
|
||||
```yaml
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
@ -82,7 +79,6 @@ spec:
|
||||
requests:
|
||||
storage: {{ pillar['cluster_registry_disk_size'] }}
|
||||
```
|
||||
<!-- END MUNGE: EXAMPLE registry-pvc.yaml.in -->
|
||||
|
||||
This tells Kubernetes that you want to use storage, and the `PersistentVolume`
|
||||
you created before will be bound to this claim (unless you have other
|
||||
@ -93,8 +89,7 @@ gives you the right to use this storage until you release the claim.
|
||||
|
||||
Now we can run a Docker registry:
|
||||
|
||||
<!-- BEGIN MUNGE: EXAMPLE registry-rc.yaml -->
|
||||
``` yaml
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
@ -138,7 +133,6 @@ spec:
|
||||
persistentVolumeClaim:
|
||||
claimName: kube-registry-pvc
|
||||
```
|
||||
<!-- END MUNGE: EXAMPLE registry-rc.yaml -->
|
||||
|
||||
*Note:* that if you have set multiple replicas, make sure your CSI driver has support for the `ReadWriteMany` accessMode.
|
||||
|
||||
@ -146,8 +140,7 @@ spec:
|
||||
|
||||
Now that we have a registry `Pod` running, we can expose it as a Service:
|
||||
|
||||
<!-- BEGIN MUNGE: EXAMPLE registry-svc.yaml -->
|
||||
``` yaml
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
@ -164,7 +157,6 @@ spec:
|
||||
port: 5000
|
||||
protocol: TCP
|
||||
```
|
||||
<!-- END MUNGE: EXAMPLE registry-svc.yaml -->
|
||||
|
||||
## Expose the registry on each node
|
||||
|
||||
@ -172,8 +164,7 @@ Now that we have a running `Service`, we need to expose it onto each Kubernetes
|
||||
`Node` so that Docker will see it as `localhost`. We can load a `Pod` on every
|
||||
node by creating following daemonset.
|
||||
|
||||
<!-- BEGIN MUNGE: EXAMPLE ../../saltbase/salt/kube-registry-proxy/kube-registry-proxy.yaml -->
|
||||
``` yaml
|
||||
```yaml
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
@ -207,7 +198,6 @@ spec:
|
||||
containerPort: 80
|
||||
hostPort: 5000
|
||||
```
|
||||
<!-- END MUNGE: EXAMPLE ../../saltbase/salt/kube-registry-proxy/kube-registry-proxy.yaml -->
|
||||
|
||||
When modifying replication-controller, service and daemon-set definitions, take
|
||||
care to ensure *unique* identifiers for the rc-svc couple and the daemon-set.
|
||||
@ -219,7 +209,7 @@ This ensures that port 5000 on each node is directed to the registry `Service`.
|
||||
You should be able to verify that it is running by hitting port 5000 with a web
|
||||
browser and getting a 404 error:
|
||||
|
||||
``` console
|
||||
```ShellSession
|
||||
$ curl localhost:5000
|
||||
404 page not found
|
||||
```
|
||||
@ -229,7 +219,7 @@ $ curl localhost:5000
|
||||
To use an image hosted by this registry, simply say this in your `Pod`'s
|
||||
`spec.containers[].image` field:
|
||||
|
||||
``` yaml
|
||||
```yaml
|
||||
image: localhost:5000/user/container
|
||||
```
|
||||
|
||||
@ -241,7 +231,7 @@ building locally and want to push to your cluster.
|
||||
You can use `kubectl` to set up a port-forward from your local node to a
|
||||
running Pod:
|
||||
|
||||
``` console
|
||||
```ShellSession
|
||||
$ POD=$(kubectl get pods --namespace kube-system -l k8s-app=registry \
|
||||
-o template --template '{{range .items}}{{.metadata.name}} {{.status.phase}}{{"\n"}}{{end}}' \
|
||||
| grep Running | head -1 | cut -f1 -d' ')
|
||||
|
11
docs/kylinlinux.md
Normal file
11
docs/kylinlinux.md
Normal file
@ -0,0 +1,11 @@
|
||||
# Kylin Linux
|
||||
|
||||
Kylin Linux is supported with docker and containerd runtimes.
|
||||
|
||||
**Note:** that Kylin Linux is not currently covered in kubespray CI and
|
||||
support for it is currently considered experimental.
|
||||
|
||||
At present, only `Kylin Linux Advanced Server V10 (Sword)` has been adapted, which can support the deployment of aarch64 and x86_64 platforms.
|
||||
|
||||
There are no special considerations for using Kylin Linux as the target OS
|
||||
for Kubespray deployments.
|
41
docs/ntp.md
Normal file
41
docs/ntp.md
Normal file
@ -0,0 +1,41 @@
|
||||
# NTP synchronization
|
||||
|
||||
The Network Time Protocol (NTP) is a networking protocol for clock synchronization between computer systems. Time synchronization is important to Kubernetes and Etcd.
|
||||
|
||||
## Enable the NTP
|
||||
|
||||
To start the ntpd(or chrony) service and enable it at system boot. There are related specific variables:
|
||||
|
||||
```ShellSession
|
||||
ntp_enabled: true
|
||||
```
|
||||
|
||||
The NTP service would be enabled and sync time automatically.
|
||||
|
||||
## Custimize the NTP configure file
|
||||
|
||||
In the Air-Gap environment, the node cannot access the NTP server by internet. So the node can use the customized ntp server by configuring ntp file.
|
||||
|
||||
```ShellSession
|
||||
ntp_enabled: true
|
||||
ntp_manage_config: true
|
||||
ntp_servers:
|
||||
- "0.your-ntp-server.org iburst"
|
||||
- "1.your-ntp-server.org iburst"
|
||||
- "2.your-ntp-server.org iburst"
|
||||
- "3.your-ntp-server.org iburst"
|
||||
```
|
||||
|
||||
## Advanced Configure
|
||||
|
||||
Enable `tinker panic` is useful when running NTP in a VM environment to avoiding clock drift on VMs. It only takes effect when ntp_manage_config is true.
|
||||
|
||||
```ShellSession
|
||||
ntp_tinker_panic: true
|
||||
```
|
||||
|
||||
Force sync time immediately by NTP after the ntp installed, which is useful in newly installed system.
|
||||
|
||||
```ShellSession
|
||||
ntp_force_sync_immediately: true
|
||||
```
|
@ -252,11 +252,7 @@ Ansible will now execute the playbook, this can take up to 20 minutes.
|
||||
We will leverage a kubeconfig file from one of the controller nodes to access
|
||||
the cluster as administrator from our local workstation.
|
||||
|
||||
> In this simplified set-up, we did not include a load balancer that usually
|
||||
sits on top of the
|
||||
three controller nodes for a high available API server endpoint. In this
|
||||
simplified tutorial we connect directly to one of the three
|
||||
controllers.
|
||||
> In this simplified set-up, we did not include a load balancer that usually sits on top of the three controller nodes for a high available API server endpoint. In this simplified tutorial we connect directly to one of the three controllers.
|
||||
|
||||
First, we need to edit the permission of the kubeconfig file on one of the
|
||||
controller nodes:
|
||||
|
@ -58,7 +58,7 @@ see [download documentation](/docs/downloads.md).
|
||||
|
||||
The following is an example of setting up and running kubespray using `vagrant`.
|
||||
For repeated runs, you could save the script to a file in the root of the
|
||||
kubespray and run it by executing 'source <name_of_the_file>.
|
||||
kubespray and run it by executing `source <name_of_the_file>`.
|
||||
|
||||
```ShellSession
|
||||
# use virtualenv to install all python requirements
|
||||
|
@ -176,7 +176,7 @@ Stack](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/dns-stack.m
|
||||
* *docker_options* - Commonly used to set
|
||||
``--insecure-registry=myregistry.mydomain:5000``
|
||||
* *docker_plugins* - This list can be used to define [Docker plugins](https://docs.docker.com/engine/extend/) to install.
|
||||
* *containerd_default_runtime* - Sets the default Containerd runtime used by the Kubernetes CRI plugin.
|
||||
* *containerd_default_runtime* - If defined, changes the default Containerd runtime used by the Kubernetes CRI plugin.
|
||||
* *containerd_additional_runtimes* - Sets the additional Containerd runtimes used by the Kubernetes CRI plugin.
|
||||
[Default config](https://github.com/kubernetes-sigs/kubespray/blob/master/roles/container-engine/containerd/defaults/main.yml) can be overriden in inventory vars.
|
||||
* *http_proxy/https_proxy/no_proxy/no_proxy_exclude_workers/additional_no_proxy* - Proxy variables for deploying behind a
|
||||
|
@ -120,3 +120,13 @@ kube_webhook_token_auth_url_skip_tls_verify: false
|
||||
# kube_webhook_token_auth_url: https://...
|
||||
## base64-encoded string of the webhook's CA certificate
|
||||
# kube_webhook_token_auth_ca_data: "LS0t..."
|
||||
|
||||
## NTP Settings
|
||||
# Start the ntpd or chrony service and enable it at system boot.
|
||||
ntp_enabled: false
|
||||
ntp_manage_config: false
|
||||
ntp_servers:
|
||||
- "0.pool.ntp.org iburst"
|
||||
- "1.pool.ntp.org iburst"
|
||||
- "2.pool.ntp.org iburst"
|
||||
- "3.pool.ntp.org iburst"
|
||||
|
@ -57,6 +57,3 @@ docker_rpm_keepcache: 1
|
||||
## A string of extra options to pass to the docker daemon.
|
||||
## This string should be exactly as you wish it to appear.
|
||||
# docker_options: ""
|
||||
|
||||
## Use CRI-DockerD instead of dockershim
|
||||
# cri_dockerd_enabled: false
|
||||
|
@ -18,44 +18,48 @@
|
||||
# quay_image_repo: "{{ registry_host }}"
|
||||
|
||||
## Kubernetes components
|
||||
# kubeadm_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubeadm"
|
||||
# kubectl_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubectl"
|
||||
# kubelet_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubelet"
|
||||
# kubeadm_download_url: "{{ files_repo }}/storage.googleapis.com/kubernetes-release/release/{{ kubeadm_version }}/bin/linux/{{ image_arch }}/kubeadm"
|
||||
# kubectl_download_url: "{{ files_repo }}/storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubectl"
|
||||
# kubelet_download_url: "{{ files_repo }}/storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/{{ image_arch }}/kubelet"
|
||||
|
||||
## CNI Plugins
|
||||
# cni_download_url: "{{ files_repo }}/kubernetes/cni/cni-plugins-linux-{{ image_arch }}-{{ cni_version }}.tgz"
|
||||
# cni_download_url: "{{ files_repo }}/github.com/containernetworking/plugins/releases/download/{{ cni_version }}/cni-plugins-linux-{{ image_arch }}-{{ cni_version }}.tgz"
|
||||
|
||||
## cri-tools
|
||||
# crictl_download_url: "{{ files_repo }}/kubernetes/cri-tools/crictl-{{ crictl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz"
|
||||
# crictl_download_url: "{{ files_repo }}/github.com/kubernetes-sigs/cri-tools/releases/download/{{ crictl_version }}/crictl-{{ crictl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz"
|
||||
|
||||
## [Optional] etcd: only if you **DON'T** use etcd_deployment=host
|
||||
# etcd_download_url: "{{ files_repo }}/kubernetes/etcd/etcd-{{ etcd_version }}-linux-amd64.tar.gz"
|
||||
# etcd_download_url: "{{ files_repo }}/github.com/etcd-io/etcd/releases/download/{{ etcd_version }}/etcd-{{ etcd_version }}-linux-{{ image_arch }}.tar.gz"
|
||||
|
||||
# [Optional] Calico: If using Calico network plugin
|
||||
# calicoctl_download_url: "{{ files_repo }}/kubernetes/calico/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}"
|
||||
# calicoctl_download_url: "{{ files_repo }}/github.com/projectcalico/calico/releases/download/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}"
|
||||
# calicoctl_alternate_download_url: "{{ files_repo }}/github.com/projectcalico/calicoctl/releases/download/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}"
|
||||
# [Optional] Calico with kdd: If using Calico network plugin with kdd datastore
|
||||
# calico_crds_download_url: "{{ files_repo }}/kubernetes/calico/{{ calico_version }}.tar.gz"
|
||||
# calico_crds_download_url: "{{ files_repo }}/github.com/projectcalico/calico/archive/{{ calico_version }}.tar.gz"
|
||||
|
||||
# [Optional] Flannel: If using Falnnel network plugin
|
||||
# flannel_cni_download_url: "{{ files_repo }}/kubernetes/flannel/{{ flannel_cni_version }}/flannel-{{ image_arch }}"
|
||||
|
||||
# [Optional] helm: only if you set helm_enabled: true
|
||||
# helm_download_url: "{{ files_repo }}/helm-{{ helm_version }}-linux-{{ image_arch }}.tar.gz"
|
||||
# helm_download_url: "{{ files_repo }}/get.helm.sh/helm-{{ helm_version }}-linux-{{ image_arch }}.tar.gz"
|
||||
|
||||
# [Optional] crun: only if you set crun_enabled: true
|
||||
# crun_download_url: "{{ files_repo }}/containers/crun/releases/download/{{ crun_version }}/crun-{{ crun_version }}-linux-{{ image_arch }}"
|
||||
# crun_download_url: "{{ files_repo }}/github.com/containers/crun/releases/download/{{ crun_version }}/crun-{{ crun_version }}-linux-{{ image_arch }}"
|
||||
|
||||
# [Optional] kata: only if you set kata_containers_enabled: true
|
||||
# kata_containers_download_url: "{{ files_repo }}/kata-containers/runtime/releases/download/{{ kata_containers_version }}/kata-static-{{ kata_containers_version }}-{{ ansible_architecture }}.tar.xz"
|
||||
# kata_containers_download_url: "{{ files_repo }}/github.com/kata-containers/kata-containers/releases/download/{{ kata_containers_version }}/kata-static-{{ kata_containers_version }}-{{ ansible_architecture }}.tar.xz"
|
||||
|
||||
# [Optional] cri-dockerd: only if you set container_manager: docker
|
||||
# cri_dockerd_download_url: "{{ files_repo }}/github.com/Mirantis/cri-dockerd/releases/download/v{{ cri_dockerd_version }}/cri-dockerd-{{ cri_dockerd_version }}.{{ image_arch }}.tgz"
|
||||
|
||||
# [Optional] cri-o: only if you set container_manager: crio
|
||||
# crio_download_base: "download.opensuse.org/repositories/devel:kubic:libcontainers:stable"
|
||||
# crio_download_crio: "http://{{ crio_download_base }}:/cri-o:/"
|
||||
|
||||
# [Optional] runc,containerd: only if you set container_runtime: containerd
|
||||
# runc_download_url: "{{ files_repo }}/{{ runc_version }}/runc.{{ image_arch }}"
|
||||
# containerd_download_url: "{{ files_repo }}/containerd/v{{ containerd_version }}/containerd-{{ containerd_version }}-linux-{{ image_arch }}.tar.gz"
|
||||
# nerdctl_download_url: "{{ files_repo }}/nerdctl/v{{ nerdctl_version }}/nerdctl-{{ nerdctl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz"
|
||||
# runc_download_url: "{{ files_repo }}/github.com/opencontainers/runc/releases/download/{{ runc_version }}/runc.{{ image_arch }}"
|
||||
# containerd_download_url: "{{ files_repo }}/github.com/containerd/containerd/releases/download/v{{ containerd_version }}/containerd-{{ containerd_version }}-linux-{{ image_arch }}.tar.gz"
|
||||
# nerdctl_download_url: "{{ files_repo }}/github.com/containerd/nerdctl/releases/download/v{{ nerdctl_version }}/nerdctl-{{ nerdctl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz"
|
||||
|
||||
## CentOS/Redhat/AlmaLinux
|
||||
### For EL7, base and extras repo must be available, for EL8, baseos and appstream
|
||||
|
@ -14,16 +14,16 @@
|
||||
## gcr.io/cloud-provider-vsphere/cpi/release/manager
|
||||
# external_vsphere_cloud_controller_image_tag: "latest"
|
||||
## gcr.io/cloud-provider-vsphere/csi/release/syncer
|
||||
# vsphere_syncer_image_tag: "v2.4.0"
|
||||
## k8s.gcr.io/sig-storage/csi-attacher
|
||||
# vsphere_csi_attacher_image_tag: "v3.3.0"
|
||||
# vsphere_syncer_image_tag: "v2.5.1"
|
||||
## registry.k8s.io/sig-storage/csi-attacher
|
||||
# vsphere_csi_attacher_image_tag: "v3.4.0"
|
||||
## gcr.io/cloud-provider-vsphere/csi/release/driver
|
||||
# vsphere_csi_controller: "v2.4.0"
|
||||
## k8s.gcr.io/sig-storage/livenessprobe
|
||||
# vsphere_csi_liveness_probe_image_tag: "v2.4.0"
|
||||
## k8s.gcr.io/sig-storage/csi-provisioner
|
||||
# vsphere_csi_provisioner_image_tag: "v3.0.0"
|
||||
## k8s.gcr.io/sig-storage/csi-resizer
|
||||
# vsphere_csi_controller: "v2.5.1"
|
||||
## registry.k8s.io/sig-storage/livenessprobe
|
||||
# vsphere_csi_liveness_probe_image_tag: "v2.6.0"
|
||||
## registry.k8s.io/sig-storage/csi-provisioner
|
||||
# vsphere_csi_provisioner_image_tag: "v3.1.0"
|
||||
## registry.k8s.io/sig-storage/csi-resizer
|
||||
## makes sense only for vSphere version >=7.0
|
||||
# vsphere_csi_resizer_tag: "v1.3.0"
|
||||
|
||||
|
@ -27,7 +27,7 @@ local_path_provisioner_enabled: false
|
||||
# local_path_provisioner_claim_root: /opt/local-path-provisioner/
|
||||
# local_path_provisioner_debug: false
|
||||
# local_path_provisioner_image_repo: "rancher/local-path-provisioner"
|
||||
# local_path_provisioner_image_tag: "v0.0.21"
|
||||
# local_path_provisioner_image_tag: "v0.0.22"
|
||||
# local_path_provisioner_helper_image_repo: "busybox"
|
||||
# local_path_provisioner_helper_image_tag: "latest"
|
||||
|
||||
@ -165,7 +165,7 @@ metallb_speaker_enabled: true
|
||||
# metallb_ip_range:
|
||||
# - "10.5.0.50-10.5.0.99"
|
||||
# metallb_pool_name: "loadbalanced"
|
||||
# matallb_auto_assign: true
|
||||
# metallb_auto_assign: true
|
||||
# metallb_speaker_nodeselector:
|
||||
# kubernetes.io/os: "linux"
|
||||
# metallb_controller_nodeselector:
|
||||
@ -209,10 +209,10 @@ metallb_speaker_enabled: true
|
||||
|
||||
|
||||
argocd_enabled: false
|
||||
# argocd_version: v2.1.6
|
||||
# argocd_version: v2.4.7
|
||||
# argocd_namespace: argocd
|
||||
# Default password:
|
||||
# - https://argoproj.github.io/argo-cd/getting_started/#4-login-using-the-cli
|
||||
# - https://argo-cd.readthedocs.io/en/stable/getting_started/#4-login-using-the-cli
|
||||
# ---
|
||||
# The initial password is autogenerated to be the pod name of the Argo CD API server. This can be retrieved with the command:
|
||||
# kubectl get pods -n argocd -l app.kubernetes.io/name=argocd-server -o name | cut -d'/' -f 2
|
||||
|
@ -17,7 +17,7 @@ kube_token_dir: "{{ kube_config_dir }}/tokens"
|
||||
kube_api_anonymous_auth: true
|
||||
|
||||
## Change this to use another Kubernetes version, e.g. a current beta release
|
||||
kube_version: v1.23.7
|
||||
kube_version: v1.24.3
|
||||
|
||||
# Where the binaries will be downloaded.
|
||||
# Note: ensure that you've enough disk space (about 1G)
|
||||
@ -25,6 +25,9 @@ local_release_dir: "/tmp/releases"
|
||||
# Random shifts for retrying failed ops like pushing/downloading
|
||||
retry_stagger: 5
|
||||
|
||||
# This is the user that owns tha cluster installation.
|
||||
kube_owner: kube
|
||||
|
||||
# This is the group that the cert creation scripts chgrp the
|
||||
# cert files to. Not really changeable...
|
||||
kube_cert_group: kube-cert
|
||||
@ -116,9 +119,6 @@ kube_network_node_prefix_ipv6: 120
|
||||
# The port the API Server will be listening on.
|
||||
kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
|
||||
kube_apiserver_port: 6443 # (https)
|
||||
# kube_apiserver_insecure_port: 8080 # (http)
|
||||
# Set to 0 to disable insecure port - Requires RBAC in authorization_modes and kube_api_anonymous_auth: true
|
||||
kube_apiserver_insecure_port: 0 # (disabled)
|
||||
|
||||
# Kube-proxy proxyMode configuration.
|
||||
# Can be ipvs, iptables
|
||||
@ -216,16 +216,9 @@ k8s_image_pull_policy: IfNotPresent
|
||||
# audit log for kubernetes
|
||||
kubernetes_audit: false
|
||||
|
||||
# dynamic kubelet configuration
|
||||
# Note: Feature DynamicKubeletConfig is deprecated in 1.22 and will not move to GA.
|
||||
# It is planned to be removed from Kubernetes in the version 1.23.
|
||||
# Please use alternative ways to update kubelet configuration.
|
||||
dynamic_kubelet_configuration: false
|
||||
|
||||
# define kubelet config dir for dynamic kubelet
|
||||
# kubelet_config_dir:
|
||||
default_kubelet_config_dir: "{{ kube_config_dir }}/dynamic_kubelet_dir"
|
||||
dynamic_kubelet_configuration_dir: "{{ kubelet_config_dir | default(default_kubelet_config_dir) }}"
|
||||
|
||||
# pod security policy (RBAC must be enabled either by having 'RBAC' in authorization_modes or kubeadm enabled)
|
||||
podsecuritypolicy_enabled: false
|
||||
@ -252,9 +245,11 @@ podsecuritypolicy_enabled: false
|
||||
## Uncomment to override default values
|
||||
# system_memory_reserved: 512Mi
|
||||
# system_cpu_reserved: 500m
|
||||
# system_ephemeral_storage_reserved: 2Gi
|
||||
## Reservation for master hosts
|
||||
# system_master_memory_reserved: 256Mi
|
||||
# system_master_cpu_reserved: 250m
|
||||
# system_master_ephemeral_storage_reserved: 2Gi
|
||||
|
||||
## Eviction Thresholds to avoid system OOMs
|
||||
# https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/#eviction-thresholds
|
||||
@ -292,7 +287,7 @@ persistent_volumes_enabled: false
|
||||
# nvidia_driver_install_centos_container: atzedevries/nvidia-centos-driver-installer:2
|
||||
# nvidia_driver_install_ubuntu_container: gcr.io/google-containers/ubuntu-nvidia-driver-installer@sha256:7df76a0f0a17294e86f691c81de6bbb7c04a1b4b3d4ea4e7e2cccdc42e1f6d63
|
||||
## NVIDIA GPU device plugin image.
|
||||
# nvidia_gpu_device_plugin_container: "k8s.gcr.io/nvidia-gpu-device-plugin@sha256:0842734032018be107fa2490c98156992911e3e1f2a21e059ff0105b07dd8e9e"
|
||||
# nvidia_gpu_device_plugin_container: "registry.k8s.io/nvidia-gpu-device-plugin@sha256:0842734032018be107fa2490c98156992911e3e1f2a21e059ff0105b07dd8e9e"
|
||||
|
||||
## Support tls min version, Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13.
|
||||
# tls_min_version: ""
|
||||
|
@ -19,7 +19,7 @@ calico_cni_name: k8s-pod-network
|
||||
# calico_pool_name: "default-pool"
|
||||
|
||||
# add default ippool blockSize (defaults kube_network_node_prefix)
|
||||
# calico_pool_blocksize: 24
|
||||
calico_pool_blocksize: 26
|
||||
|
||||
# add default ippool CIDR (must be inside kube_pods_subnet, defaults to kube_pods_subnet otherwise)
|
||||
# calico_pool_cidr: 1.2.3.4/5
|
||||
|
@ -1,19 +1,86 @@
|
||||
# see roles/network_plugin/cilium/defaults/main.yml
|
||||
---
|
||||
# cilium_version: "v1.11.7"
|
||||
|
||||
# cilium_version: "v1.11.3"
|
||||
# cilium_identity_allocation_mode: kvstore # kvstore or crd
|
||||
# Log-level
|
||||
# cilium_debug: false
|
||||
|
||||
# For adding and mounting extra volumes to the cilium operator
|
||||
# cilium_operator_extra_volumes: []
|
||||
# cilium_operator_extra_volume_mounts: []
|
||||
# cilium_mtu: ""
|
||||
# cilium_enable_ipv4: true
|
||||
# cilium_enable_ipv6: false
|
||||
|
||||
# Name of the cluster. Only relevant when building a mesh of clusters.
|
||||
# cilium_cluster_name: default
|
||||
# Cilium agent health port
|
||||
# cilium_agent_health_port: "9879"
|
||||
|
||||
# Identity allocation mode selects how identities are shared between cilium
|
||||
# nodes by setting how they are stored. The options are "crd" or "kvstore".
|
||||
# - "crd" stores identities in kubernetes as CRDs (custom resource definition).
|
||||
# These can be queried with:
|
||||
# `kubectl get ciliumid`
|
||||
# - "kvstore" stores identities in an etcd kvstore.
|
||||
# - In order to support External Workloads, "crd" is required
|
||||
# - Ref: https://docs.cilium.io/en/stable/gettingstarted/external-workloads/#setting-up-support-for-external-workloads-beta
|
||||
# - KVStore operations are only required when cilium-operator is running with any of the below options:
|
||||
# - --synchronize-k8s-services
|
||||
# - --synchronize-k8s-nodes
|
||||
# - --identity-allocation-mode=kvstore
|
||||
# - Ref: https://docs.cilium.io/en/stable/internals/cilium_operator/#kvstore-operations
|
||||
# cilium_identity_allocation_mode: kvstore
|
||||
|
||||
# Etcd SSL dirs
|
||||
# cilium_cert_dir: /etc/cilium/certs
|
||||
# kube_etcd_cacert_file: ca.pem
|
||||
# kube_etcd_cert_file: cert.pem
|
||||
# kube_etcd_key_file: cert-key.pem
|
||||
|
||||
# Limits for apps
|
||||
# cilium_memory_limit: 500M
|
||||
# cilium_cpu_limit: 500m
|
||||
# cilium_memory_requests: 64M
|
||||
# cilium_cpu_requests: 100m
|
||||
|
||||
# Overlay Network Mode
|
||||
# cilium_tunnel_mode: vxlan
|
||||
# Optional features
|
||||
# cilium_enable_prometheus: false
|
||||
# Enable if you want to make use of hostPort mappings
|
||||
# cilium_enable_portmap: false
|
||||
# Monitor aggregation level (none/low/medium/maximum)
|
||||
# cilium_monitor_aggregation: medium
|
||||
# The monitor aggregation flags determine which TCP flags which, upon the
|
||||
# first observation, cause monitor notifications to be generated.
|
||||
#
|
||||
# Only effective when monitor aggregation is set to "medium" or higher.
|
||||
# cilium_monitor_aggregation_flags: "all"
|
||||
# Kube Proxy Replacement mode (strict/probe/partial)
|
||||
# cilium_kube_proxy_replacement: probe
|
||||
|
||||
# If upgrading from Cilium < 1.5, you may want to override some of these options
|
||||
# to prevent service disruptions. See also:
|
||||
# http://docs.cilium.io/en/stable/install/upgrade/#changes-that-may-require-action
|
||||
# cilium_preallocate_bpf_maps: false
|
||||
|
||||
# `cilium_tofqdns_enable_poller` is deprecated in 1.8, removed in 1.9
|
||||
# cilium_tofqdns_enable_poller: false
|
||||
|
||||
# `cilium_enable_legacy_services` is deprecated in 1.6, removed in 1.9
|
||||
# cilium_enable_legacy_services: false
|
||||
|
||||
# Unique ID of the cluster. Must be unique across all conneted clusters and
|
||||
# in the range of 1 and 255. Only relevant when building a mesh of clusters.
|
||||
# This value is not defined by default
|
||||
# cluster-id:
|
||||
# cilium_cluster_id:
|
||||
|
||||
# Deploy cilium even if kube_network_plugin is not cilium.
|
||||
# This enables to deploy cilium alongside another CNI to replace kube-proxy.
|
||||
# cilium_deploy_additionally: false
|
||||
|
||||
# Auto direct nodes routes can be used to advertise pods routes in your cluster
|
||||
# without any tunelling (with `cilium_tunnel_mode` sets to `disabled`).
|
||||
# This works only if you have a L2 connectivity between all your nodes.
|
||||
# You wil also have to specify the variable `cilium_native_routing_cidr` to
|
||||
# make this work. Please refer to the cilium documentation for more
|
||||
# information about this kind of setups.
|
||||
# cilium_auto_direct_node_routes: false
|
||||
|
||||
# Allows to explicitly specify the IPv4 CIDR for native routing.
|
||||
# When specified, Cilium assumes networking for this CIDR is preconfigured and
|
||||
@ -30,7 +97,6 @@
|
||||
# Allows to explicitly specify the IPv6 CIDR for native routing.
|
||||
# cilium_native_routing_cidr_ipv6: ""
|
||||
|
||||
# Encryption
|
||||
# Enable transparent network encryption.
|
||||
# cilium_encryption_enabled: false
|
||||
|
||||
@ -40,8 +106,139 @@
|
||||
|
||||
# Enable encryption for pure node to node traffic.
|
||||
# This option is only effective when `cilium_encryption_type` is set to `ipsec`.
|
||||
# cilium_ipsec_node_encryption: "false"
|
||||
# cilium_ipsec_node_encryption: false
|
||||
|
||||
# Enables the fallback to the user-space implementation.
|
||||
# If your kernel or distribution does not support WireGuard, Cilium agent can be configured to fall back on the user-space implementation.
|
||||
# When this flag is enabled and Cilium detects that the kernel has no native support for WireGuard,
|
||||
# it will fallback on the wireguard-go user-space implementation of WireGuard.
|
||||
# This option is only effective when `cilium_encryption_type` is set to `wireguard`.
|
||||
# cilium_wireguard_userspace_fallback: "false"
|
||||
# cilium_wireguard_userspace_fallback: false
|
||||
|
||||
# IP Masquerade Agent
|
||||
# https://docs.cilium.io/en/stable/concepts/networking/masquerading/
|
||||
# By default, all packets from a pod destined to an IP address outside of the cilium_native_routing_cidr range are masqueraded
|
||||
# cilium_ip_masq_agent_enable: false
|
||||
### A packet sent from a pod to a destination which belongs to any CIDR from the nonMasqueradeCIDRs is not going to be masqueraded
|
||||
# cilium_non_masquerade_cidrs:
|
||||
# - 10.0.0.0/8
|
||||
# - 172.16.0.0/12
|
||||
# - 192.168.0.0/16
|
||||
# - 100.64.0.0/10
|
||||
# - 192.0.0.0/24
|
||||
# - 192.0.2.0/24
|
||||
# - 192.88.99.0/24
|
||||
# - 198.18.0.0/15
|
||||
# - 198.51.100.0/24
|
||||
# - 203.0.113.0/24
|
||||
# - 240.0.0.0/4
|
||||
### Indicates whether to masquerade traffic to the link local prefix.
|
||||
### If the masqLinkLocal is not set or set to false, then 169.254.0.0/16 is appended to the non-masquerade CIDRs list.
|
||||
# cilium_masq_link_local: false
|
||||
### A time interval at which the agent attempts to reload config from disk
|
||||
# cilium_ip_masq_resync_interval: 60s
|
||||
|
||||
# Hubble
|
||||
### Enable Hubble without install
|
||||
# cilium_enable_hubble: false
|
||||
### Enable Hubble Metrics
|
||||
# cilium_enable_hubble_metrics: false
|
||||
### if cilium_enable_hubble_metrics: true
|
||||
# cilium_hubble_metrics: {}
|
||||
# - dns
|
||||
# - drop
|
||||
# - tcp
|
||||
# - flow
|
||||
# - icmp
|
||||
# - http
|
||||
### Enable Hubble install
|
||||
# cilium_hubble_install: false
|
||||
### Enable auto generate certs if cilium_hubble_install: true
|
||||
# cilium_hubble_tls_generate: false
|
||||
|
||||
# IP address management mode for v1.9+.
|
||||
# https://docs.cilium.io/en/v1.9/concepts/networking/ipam/
|
||||
# cilium_ipam_mode: kubernetes
|
||||
|
||||
# Extra arguments for the Cilium agent
|
||||
# cilium_agent_custom_args: []
|
||||
|
||||
# For adding and mounting extra volumes to the cilium agent
|
||||
# cilium_agent_extra_volumes: []
|
||||
# cilium_agent_extra_volume_mounts: []
|
||||
|
||||
# cilium_agent_extra_env_vars: []
|
||||
|
||||
# cilium_operator_replicas: 2
|
||||
|
||||
# The address at which the cillium operator bind health check api
|
||||
# cilium_operator_api_serve_addr: "127.0.0.1:9234"
|
||||
|
||||
## A dictionary of extra config variables to add to cilium-config, formatted like:
|
||||
## cilium_config_extra_vars:
|
||||
## var1: "value1"
|
||||
## var2: "value2"
|
||||
# cilium_config_extra_vars: {}
|
||||
|
||||
# For adding and mounting extra volumes to the cilium operator
|
||||
# cilium_operator_extra_volumes: []
|
||||
# cilium_operator_extra_volume_mounts: []
|
||||
|
||||
# Extra arguments for the Cilium Operator
|
||||
# cilium_operator_custom_args: []
|
||||
|
||||
# Name of the cluster. Only relevant when building a mesh of clusters.
|
||||
# cilium_cluster_name: default
|
||||
|
||||
# Make Cilium take ownership over the `/etc/cni/net.d` directory on the node, renaming all non-Cilium CNI configurations to `*.cilium_bak`.
|
||||
# This ensures no Pods can be scheduled using other CNI plugins during Cilium agent downtime.
|
||||
# Available for Cilium v1.10 and up.
|
||||
# cilium_cni_exclusive: true
|
||||
|
||||
# Configure the log file for CNI logging with retention policy of 7 days.
|
||||
# Disable CNI file logging by setting this field to empty explicitly.
|
||||
# Available for Cilium v1.12 and up.
|
||||
# cilium_cni_log_file: "/var/run/cilium/cilium-cni.log"
|
||||
|
||||
# -- Configure cgroup related configuration
|
||||
# -- Enable auto mount of cgroup2 filesystem.
|
||||
# When `cilium_cgroup_auto_mount` is enabled, cgroup2 filesystem is mounted at
|
||||
# `cilium_cgroup_host_root` path on the underlying host and inside the cilium agent pod.
|
||||
# If users disable `cilium_cgroup_auto_mount`, it's expected that users have mounted
|
||||
# cgroup2 filesystem at the specified `cilium_cgroup_auto_mount` volume, and then the
|
||||
# volume will be mounted inside the cilium agent pod at the same path.
|
||||
# Available for Cilium v1.11 and up
|
||||
# cilium_cgroup_auto_mount: true
|
||||
# -- Configure cgroup root where cgroup2 filesystem is mounted on the host
|
||||
# cilium_cgroup_host_root: "/run/cilium/cgroupv2"
|
||||
|
||||
# Specifies the ratio (0.0-1.0) of total system memory to use for dynamic
|
||||
# sizing of the TCP CT, non-TCP CT, NAT and policy BPF maps.
|
||||
# cilium_bpf_map_dynamic_size_ratio: "0.0"
|
||||
|
||||
# -- Enables masquerading of IPv4 traffic leaving the node from endpoints.
|
||||
# Available for Cilium v1.10 and up
|
||||
# cilium_enable_ipv4_masquerade: true
|
||||
# -- Enables masquerading of IPv6 traffic leaving the node from endpoints.
|
||||
# Available for Cilium v1.10 and up
|
||||
# cilium_enable_ipv6_masquerade: true
|
||||
|
||||
# -- Enable native IP masquerade support in eBPF
|
||||
# cilium_enable_bpf_masquerade: false
|
||||
|
||||
# -- Configure whether direct routing mode should route traffic via
|
||||
# host stack (true) or directly and more efficiently out of BPF (false) if
|
||||
# the kernel supports it. The latter has the implication that it will also
|
||||
# bypass netfilter in the host namespace.
|
||||
# cilium_enable_host_legacy_routing: true
|
||||
|
||||
# -- Enable use of the remote node identity.
|
||||
# ref: https://docs.cilium.io/en/v1.7/install/upgrade/#configmap-remote-node-identity
|
||||
# cilium_enable_remote_node_identity: true
|
||||
|
||||
# -- Enable the use of well-known identities.
|
||||
# cilium_enable_well_known_identities: false
|
||||
|
||||
# cilium_enable_bpf_clock_probe: true
|
||||
|
||||
# -- Whether to enable CNP status updates.
|
||||
# cilium_disable_cnp_status_updates: true
|
||||
|
@ -314,7 +314,7 @@ def main():
|
||||
wait=dict(default=False, type='bool'),
|
||||
all=dict(default=False, type='bool'),
|
||||
log_level=dict(default=0, type='int'),
|
||||
state=dict(default='present', choices=['present', 'absent', 'latest', 'reloaded', 'stopped']),
|
||||
state=dict(default='present', choices=['present', 'absent', 'latest', 'reloaded', 'stopped', 'exists']),
|
||||
recursive=dict(default=False, type='bool'),
|
||||
),
|
||||
mutually_exclusive=[['filename', 'list']]
|
||||
@ -339,6 +339,11 @@ def main():
|
||||
elif state == 'latest':
|
||||
result = manager.replace()
|
||||
|
||||
elif state == 'exists':
|
||||
result = manager.exists()
|
||||
module.exit_json(changed=changed,
|
||||
msg='%s' % result)
|
||||
|
||||
else:
|
||||
module.fail_json(msg='Unrecognized state %s.' % state)
|
||||
|
||||
|
@ -1,10 +0,0 @@
|
||||
ansible==3.4.0
|
||||
ansible-base==2.10.15
|
||||
cryptography==2.8
|
||||
jinja2==2.11.3
|
||||
netaddr==0.7.19
|
||||
pbr==5.4.4
|
||||
jmespath==0.9.5
|
||||
ruamel.yaml==0.16.10
|
||||
ruamel.yaml.clib==0.2.6
|
||||
MarkupSafe==1.1.1
|
@ -1,10 +0,0 @@
|
||||
ansible==2.9.27
|
||||
cryptography==2.8
|
||||
jinja2==2.11.3
|
||||
netaddr==0.7.19
|
||||
pbr==5.4.4
|
||||
jmespath==0.9.5
|
||||
ruamel.yaml==0.16.10
|
||||
ruamel.yaml.clib==0.2.6 ; python_version >= '3.5'
|
||||
ruamel.yaml.clib==0.2.2 ; python_version < '3.5'
|
||||
MarkupSafe==1.1.1
|
@ -1,4 +0,0 @@
|
||||
---
|
||||
collections:
|
||||
- name: community.general
|
||||
version: '<3.0'
|
@ -1,4 +1,5 @@
|
||||
---
|
||||
kube_owner: kube
|
||||
kube_cert_group: kube-cert
|
||||
etcd_data_dir: "/var/lib/etcd"
|
||||
|
||||
|
@ -17,7 +17,7 @@ provisioner:
|
||||
name: ansible
|
||||
config_options:
|
||||
defaults:
|
||||
callback_whitelist: profile_tasks
|
||||
callbacks_enabled: profile_tasks
|
||||
timeout: 120
|
||||
lint:
|
||||
name: ansible-lint
|
||||
|
@ -13,3 +13,4 @@
|
||||
shell: "{{ user.shell|default(omit) }}"
|
||||
name: "{{ user.name }}"
|
||||
system: "{{ user.system|default(omit) }}"
|
||||
when: kube_owner != "root"
|
||||
|
@ -17,7 +17,7 @@ provisioner:
|
||||
name: ansible
|
||||
config_options:
|
||||
defaults:
|
||||
callback_whitelist: profile_tasks
|
||||
callbacks_enabled: profile_tasks
|
||||
timeout: 120
|
||||
lint:
|
||||
name: ansible-lint
|
||||
|
@ -2,23 +2,39 @@
|
||||
set -e
|
||||
|
||||
BINDIR="/opt/bin"
|
||||
PYPY_VERSION=7.3.2
|
||||
PYPI_URL="https://downloads.python.org/pypy/pypy3.6-v${PYPY_VERSION}-linux64.tar.bz2"
|
||||
PYPI_HASH=d7a91f179076aaa28115ffc0a81e46c6a787785b2bc995c926fe3b02f0e9ad83
|
||||
if [[ -e $BINDIR/.bootstrapped ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
ARCH=$(uname -m)
|
||||
case $ARCH in
|
||||
"x86_64")
|
||||
PYPY_ARCH=linux64
|
||||
PYPI_HASH=46818cb3d74b96b34787548343d266e2562b531ddbaf330383ba930ff1930ed5
|
||||
;;
|
||||
"aarch64")
|
||||
PYPY_ARCH=aarch64
|
||||
PYPI_HASH=2e1ae193d98bc51439642a7618d521ea019f45b8fb226940f7e334c548d2b4b9
|
||||
;;
|
||||
*)
|
||||
echo "Unsupported Architecture: ${ARCH}"
|
||||
exit 1
|
||||
esac
|
||||
|
||||
PYTHON_VERSION=3.9
|
||||
PYPY_VERSION=7.3.9
|
||||
PYPY_FILENAME="pypy${PYTHON_VERSION}-v${PYPY_VERSION}-${PYPY_ARCH}"
|
||||
PYPI_URL="https://downloads.python.org/pypy/${PYPY_FILENAME}.tar.bz2"
|
||||
|
||||
mkdir -p $BINDIR
|
||||
|
||||
cd $BINDIR
|
||||
|
||||
if [[ -e $BINDIR/.bootstrapped ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
TAR_FILE=pyp.tar.bz2
|
||||
wget -O "${TAR_FILE}" "${PYPI_URL}"
|
||||
echo "${PYPI_HASH} ${TAR_FILE}" | sha256sum -c -
|
||||
tar -xjf "${TAR_FILE}" && rm "${TAR_FILE}"
|
||||
mv -n "pypy3.6-v${PYPY_VERSION}-linux64" pypy3
|
||||
mv -n "${PYPY_FILENAME}" pypy3
|
||||
|
||||
ln -s ./pypy3/bin/pypy3 python
|
||||
$BINDIR/python --version
|
||||
|
@ -41,7 +41,7 @@ provisioner:
|
||||
name: ansible
|
||||
config_options:
|
||||
defaults:
|
||||
callback_whitelist: profile_tasks
|
||||
callbacks_enabled: profile_tasks
|
||||
timeout: 120
|
||||
lint:
|
||||
name: ansible-lint
|
||||
|
@ -17,7 +17,7 @@
|
||||
when: not skip_http_proxy_on_os_packages
|
||||
|
||||
- name: Add proxy to RHEL subscription-manager if http_proxy is defined
|
||||
command: /sbin/subscription-manager config --server.proxy_hostname={{ http_proxy | regex_replace(':\\d+$') }} --server.proxy_port={{ http_proxy | regex_replace('^.*:') }}
|
||||
command: /sbin/subscription-manager config --server.proxy_hostname={{ http_proxy | regex_replace(':\d+$') }} --server.proxy_port={{ http_proxy | regex_replace('^.*:') }}
|
||||
become: true
|
||||
when:
|
||||
- not skip_http_proxy_on_os_packages
|
||||
|
@ -7,7 +7,7 @@
|
||||
check_mode: false
|
||||
|
||||
- include_tasks: bootstrap-centos.yml
|
||||
when: '''ID="centos"'' in os_release.stdout_lines or ''ID="ol"'' in os_release.stdout_lines or ''ID="almalinux"'' in os_release.stdout_lines or ''ID="rocky"'' in os_release.stdout_lines'
|
||||
when: '''ID="centos"'' in os_release.stdout_lines or ''ID="ol"'' in os_release.stdout_lines or ''ID="almalinux"'' in os_release.stdout_lines or ''ID="rocky"'' in os_release.stdout_lines or ''ID="kylin"'' in os_release.stdout_lines'
|
||||
|
||||
- include_tasks: bootstrap-amazon.yml
|
||||
when: '''ID="amzn"'' in os_release.stdout_lines'
|
||||
@ -84,7 +84,7 @@
|
||||
or is_fedora_coreos
|
||||
or ansible_distribution == "Fedora")
|
||||
|
||||
- name: "Install ceph-commmon package"
|
||||
- name: Install ceph-commmon package
|
||||
package:
|
||||
name:
|
||||
- ceph-common
|
||||
|
@ -4,7 +4,7 @@ containerd_state_dir: "/run/containerd"
|
||||
containerd_systemd_dir: "/etc/systemd/system/containerd.service.d"
|
||||
containerd_oom_score: 0
|
||||
|
||||
containerd_default_runtime: "runc"
|
||||
# containerd_default_runtime: "runc"
|
||||
# containerd_snapshotter: "native"
|
||||
|
||||
containerd_runc_runtime:
|
||||
|
@ -45,7 +45,7 @@ provisioner:
|
||||
ANSIBLE_ROLES_PATH: ../../../../
|
||||
config_options:
|
||||
defaults:
|
||||
callback_whitelist: profile_tasks
|
||||
callbacks_enabled: profile_tasks
|
||||
timeout: 120
|
||||
lint:
|
||||
name: ansible-lint
|
||||
|
@ -3,7 +3,7 @@
|
||||
fail:
|
||||
msg: "{{ ansible_distribution }} is not supported by containerd."
|
||||
when:
|
||||
- ansible_distribution not in ["CentOS", "OracleLinux", "RedHat", "Ubuntu", "Debian", "Fedora", "AlmaLinux", "Rocky", "Amazon", "Flatcar", "Flatcar Container Linux by Kinvolk", "Suse", "openSUSE Leap", "openSUSE Tumbleweed"]
|
||||
- ansible_distribution not in ["CentOS", "OracleLinux", "RedHat", "Ubuntu", "Debian", "Fedora", "AlmaLinux", "Rocky", "Amazon", "Flatcar", "Flatcar Container Linux by Kinvolk", "Suse", "openSUSE Leap", "openSUSE Tumbleweed", "Kylin Linux Advanced Server"]
|
||||
|
||||
- name: containerd | Remove any package manager controlled containerd package
|
||||
package:
|
||||
|
@ -4,7 +4,6 @@
|
||||
become: true
|
||||
vars:
|
||||
container_manager: docker
|
||||
cri_dockerd_enabled: true
|
||||
roles:
|
||||
- role: kubespray-defaults
|
||||
- role: container-engine/cri-dockerd
|
||||
|
@ -29,7 +29,7 @@ provisioner:
|
||||
ANSIBLE_ROLES_PATH: ../../../../
|
||||
config_options:
|
||||
defaults:
|
||||
callback_whitelist: profile_tasks
|
||||
callbacks_enabled: profile_tasks
|
||||
timeout: 120
|
||||
lint:
|
||||
name: ansible-lint
|
||||
|
@ -35,7 +35,7 @@
|
||||
file:
|
||||
path: /etc/cni/net.d
|
||||
state: directory
|
||||
owner: kube
|
||||
owner: "{{ kube_owner }}"
|
||||
mode: 0755
|
||||
- name: Setup CNI
|
||||
copy:
|
||||
|
@ -7,7 +7,8 @@ Requires=cri-dockerd.socket
|
||||
|
||||
[Service]
|
||||
Type=notify
|
||||
ExecStart={{ bin_dir }}/cri-dockerd --container-runtime-endpoint fd:// --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin --network-plugin=cni --pod-cidr={{ kube_pods_subnet }}
|
||||
ExecStart={{ bin_dir }}/cri-dockerd --container-runtime-endpoint {{ cri_socket }} --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin --network-plugin=cni --pod-cidr={{ kube_pods_subnet }} --pod-infra-container-image={{ pod_infra_image_repo }}:{{ pod_infra_version }} {% if enable_dual_stack_networks %}--ipv6-dual-stack=True{% endif %}
|
||||
|
||||
ExecReload=/bin/kill -s HUP $MAINPID
|
||||
TimeoutSec=0
|
||||
RestartSec=2
|
||||
|
@ -38,11 +38,11 @@ crio_stream_port: "10010"
|
||||
crio_required_version: "{{ kube_version | regex_replace('^v(?P<major>\\d+).(?P<minor>\\d+).(?P<patch>\\d+)$', '\\g<major>.\\g<minor>') }}"
|
||||
|
||||
crio_kubernetes_version_matrix:
|
||||
"1.24": "1.24"
|
||||
"1.23": "1.23"
|
||||
"1.22": "1.22"
|
||||
"1.21": "1.21"
|
||||
|
||||
crio_version: "{{ crio_kubernetes_version_matrix[crio_required_version] | default('1.23') }}"
|
||||
crio_version: "{{ crio_kubernetes_version_matrix[crio_required_version] | default('1.24') }}"
|
||||
|
||||
# The crio_runtimes variable defines a list of OCI compatible runtimes.
|
||||
crio_runtimes:
|
||||
|
3
roles/container-engine/cri-o/meta/main.yml
Normal file
3
roles/container-engine/cri-o/meta/main.yml
Normal file
@ -0,0 +1,3 @@
|
||||
---
|
||||
dependencies:
|
||||
- role: container-engine/crictl
|
@ -2,6 +2,8 @@
|
||||
- name: Converge
|
||||
hosts: all
|
||||
become: true
|
||||
vars:
|
||||
container_manager: crio
|
||||
roles:
|
||||
- role: kubespray-defaults
|
||||
- role: container-engine/cri-o
|
||||
|
@ -31,7 +31,7 @@ provisioner:
|
||||
ANSIBLE_ROLES_PATH: ../../../../
|
||||
config_options:
|
||||
defaults:
|
||||
callback_whitelist: profile_tasks
|
||||
callbacks_enabled: profile_tasks
|
||||
timeout: 120
|
||||
lint:
|
||||
name: ansible-lint
|
||||
|
@ -114,6 +114,29 @@
|
||||
- ansible_os_family == "RedHat"
|
||||
- ansible_distribution not in ["Amazon", "Fedora"]
|
||||
|
||||
- name: Add CRI-O kubic yum repo
|
||||
yum_repository:
|
||||
name: devel_kubic_libcontainers_stable
|
||||
description: Stable Releases of Upstream github.com/containers packages
|
||||
baseurl: http://{{ crio_download_base }}/Fedora_{{ ansible_distribution_major_version }}/
|
||||
gpgcheck: yes
|
||||
gpgkey: http://{{ crio_download_base }}/Fedora_{{ ansible_distribution_major_version }}/repodata/repomd.xml.key
|
||||
keepcache: "0"
|
||||
when:
|
||||
- ansible_distribution in ["Fedora"]
|
||||
- not is_ostree
|
||||
|
||||
- name: Add CRI-O kubic yum repo
|
||||
yum_repository:
|
||||
name: "devel_kubic_libcontainers_stable_cri-o_{{ crio_version }}"
|
||||
description: "CRI-O {{ crio_version }}"
|
||||
baseurl: "{{ crio_download_crio }}{{ crio_version }}/Fedora_{{ ansible_distribution_major_version }}/"
|
||||
gpgcheck: yes
|
||||
gpgkey: "{{ crio_download_crio }}{{ crio_version }}/Fedora_{{ ansible_distribution_major_version }}/repodata/repomd.xml.key"
|
||||
when:
|
||||
- ansible_distribution in ["Fedora"]
|
||||
- not is_ostree
|
||||
|
||||
- name: Add CRI-O kubic yum repo
|
||||
yum_repository:
|
||||
name: devel_kubic_libcontainers_stable
|
||||
@ -154,14 +177,3 @@
|
||||
when:
|
||||
- is_ostree
|
||||
- ostree_version is defined and ostree_version.stdout is version('2021.9', '>=')
|
||||
|
||||
- name: Enable CRI-O module
|
||||
command: "dnf -y module enable cri-o:{{ crio_version }}"
|
||||
args:
|
||||
warn: False
|
||||
register: crio_dnf_result
|
||||
changed_when: "'Enabling' in crio_dnf_result.stdout"
|
||||
become: true
|
||||
when:
|
||||
- ansible_distribution in ["Fedora"]
|
||||
- not is_ostree
|
||||
|
@ -39,9 +39,6 @@
|
||||
import_tasks: "crio_repo.yml"
|
||||
when: crio_add_repos
|
||||
|
||||
- include_role: # noqa unnamed-task
|
||||
name: container-engine/crictl
|
||||
|
||||
- name: Build a list of crio runtimes with Katacontainers runtimes
|
||||
set_fact:
|
||||
crio_runtimes: "{{ crio_runtimes + kata_runtimes }}"
|
||||
@ -98,6 +95,16 @@
|
||||
- ansible_distribution == "Debian"
|
||||
- ansible_distribution_version == "10"
|
||||
|
||||
- name: Remove dpkg hold
|
||||
dpkg_selections:
|
||||
name: "{{ item | split ('=') | first }}"
|
||||
selection: install
|
||||
when:
|
||||
- ansible_pkg_mgr == 'apt'
|
||||
changed_when: false
|
||||
with_items:
|
||||
- "{{ crio_packages }}"
|
||||
|
||||
- name: Install cri-o packages
|
||||
package:
|
||||
name: "{{ item }}"
|
||||
@ -109,6 +116,17 @@
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | d(3) }}"
|
||||
|
||||
# This is required to ensure any apt upgrade will not break kubernetes
|
||||
- name: Tell Debian hosts not to change the cri-o version with apt upgrade
|
||||
dpkg_selections:
|
||||
name: "{{ item | split ('=') | first }}"
|
||||
selection: hold
|
||||
when:
|
||||
- ansible_pkg_mgr == 'apt'
|
||||
changed_when: false
|
||||
with_items:
|
||||
- "{{ crio_packages }}"
|
||||
|
||||
- name: Check if already installed
|
||||
stat:
|
||||
path: "/bin/crio"
|
||||
|
@ -81,6 +81,16 @@
|
||||
tags:
|
||||
- reset_crio
|
||||
|
||||
- name: CRI-O | Remove dpkg hold
|
||||
dpkg_selections:
|
||||
name: "{{ item }}"
|
||||
selection: install
|
||||
when: ansible_pkg_mgr == 'apt'
|
||||
changed_when: false
|
||||
with_items: "{{ crio_packages }}"
|
||||
tags:
|
||||
- reset_crio
|
||||
|
||||
- name: CRI-O | Uninstall CRI-O package
|
||||
package:
|
||||
name: "{{ item }}"
|
||||
|
@ -1,4 +0,0 @@
|
||||
runtime-endpoint: unix://{{ cri_socket }}
|
||||
image-endpoint: unix://{{ cri_socket }}
|
||||
timeout: 30
|
||||
debug: false
|
@ -17,10 +17,10 @@
|
||||
|
||||
# Path to the "root directory". CRI-O stores all of its data, including
|
||||
# containers images, in this directory.
|
||||
#root = "/var/lib/containers/storage"
|
||||
root = "/var/lib/containers/storage"
|
||||
|
||||
# Path to the "run directory". CRI-O stores all of its state in this directory.
|
||||
#runroot = "/var/run/containers/storage"
|
||||
runroot = "/var/run/containers/storage"
|
||||
|
||||
# Storage driver used to manage the storage of images and containers. Please
|
||||
# refer to containers-storage.conf(5) to see all available storage drivers.
|
||||
|
@ -3,12 +3,12 @@
|
||||
crio_storage_driver: "overlay"
|
||||
|
||||
crio_versioned_pkg:
|
||||
"1.24":
|
||||
- "cri-o-1.24.*"
|
||||
"1.23":
|
||||
- "cri-o-1.23.*"
|
||||
"1.22":
|
||||
- "cri-o-1.22.*"
|
||||
"1.21":
|
||||
- "cri-o-1.21.*"
|
||||
|
||||
default_crio_packages: "{{ crio_versioned_pkg[crio_version] }}"
|
||||
|
||||
|
@ -1,11 +1,11 @@
|
||||
---
|
||||
crio_versioned_pkg:
|
||||
"1.24":
|
||||
- "cri-o-1.24.*"
|
||||
"1.23":
|
||||
- "cri-o-1.23.*"
|
||||
"1.22":
|
||||
- "cri-o-1.22.*"
|
||||
"1.21":
|
||||
- "cri-o-1.21.*"
|
||||
|
||||
default_crio_packages: "{{ crio_versioned_pkg[crio_version] }}"
|
||||
|
||||
|
@ -1,11 +1,11 @@
|
||||
---
|
||||
crio_versioned_pkg:
|
||||
"1.24":
|
||||
- "cri-o-1.24.*"
|
||||
"1.23":
|
||||
- "cri-o-1.23.*"
|
||||
"1.22":
|
||||
- "cri-o-1.22.*"
|
||||
"1.21":
|
||||
- "cri-o-1.21.*"
|
||||
|
||||
default_crio_packages: "{{ crio_versioned_pkg[crio_version] }}"
|
||||
|
||||
|
@ -1,14 +1,14 @@
|
||||
---
|
||||
crio_versioned_pkg:
|
||||
"1.24":
|
||||
- "cri-o=1.24*"
|
||||
- cri-o-runc
|
||||
"1.23":
|
||||
- "cri-o=1.23*"
|
||||
- cri-o-runc
|
||||
"1.22":
|
||||
- "cri-o=1.22*"
|
||||
- cri-o-runc
|
||||
"1.21":
|
||||
- "cri-o=1.21*"
|
||||
- cri-o-runc
|
||||
|
||||
crio_debian_buster_backports_packages:
|
||||
- "libseccomp2"
|
||||
|
5
roles/container-engine/cri-o/vars/fedora-36.yml
Normal file
5
roles/container-engine/cri-o/vars/fedora-36.yml
Normal file
@ -0,0 +1,5 @@
|
||||
---
|
||||
crio_packages:
|
||||
- cri-o
|
||||
|
||||
crio_version: 1.24
|
@ -1,10 +1,9 @@
|
||||
---
|
||||
crio_packages:
|
||||
- cri-o
|
||||
- cri-tools
|
||||
|
||||
crio_kubernetes_version_matrix:
|
||||
"1.23": "1.22"
|
||||
"1.24": "1.23"
|
||||
"1.23": "1.23"
|
||||
"1.22": "1.22"
|
||||
"1.21": "1.21"
|
||||
crio_version: "{{ crio_kubernetes_version_matrix[crio_required_version] | default('1.22') }}"
|
||||
crio_version: "{{ crio_kubernetes_version_matrix[crio_required_version] | default('1.23') }}"
|
||||
|
@ -1,14 +1,14 @@
|
||||
---
|
||||
crio_versioned_pkg:
|
||||
"1.24":
|
||||
- "cri-o=1.24*"
|
||||
- cri-o-runc
|
||||
"1.23":
|
||||
- "cri-o=1.23*"
|
||||
- cri-o-runc
|
||||
"1.22":
|
||||
- "cri-o=1.22*"
|
||||
- cri-o-runc
|
||||
"1.21":
|
||||
- "cri-o=1.21*"
|
||||
- cri-o-runc
|
||||
|
||||
default_crio_packages: "{{ crio_versioned_pkg[crio_version] }}"
|
||||
|
||||
|
@ -20,7 +20,3 @@
|
||||
notify:
|
||||
- Get crictl completion
|
||||
- Install crictl completion
|
||||
|
||||
- name: Set fact crictl_installed
|
||||
set_fact:
|
||||
crictl_installed: true
|
||||
|
@ -1,4 +1,3 @@
|
||||
---
|
||||
- name: install crictĺ
|
||||
include_tasks: crictl.yml
|
||||
when: not crictl_installed | default(false)
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user