Compare commits

..

1 Commits

Author SHA1 Message Date
61064c372a Update CONTRIBUTING.md 2024-05-27 12:23:41 +02:00
324 changed files with 1179 additions and 1828 deletions

View File

@ -4,6 +4,4 @@ updates:
directory: "/" directory: "/"
schedule: schedule:
interval: "weekly" interval: "weekly"
labels: labels: [ "dependencies" ]
- dependencies
- release-note-none

View File

@ -1,9 +1,12 @@
--- ---
stages: stages:
- build - build
- test - unit-tests
- deploy-part1 - deploy-part1
- deploy-extended - moderator
- deploy-part2
- deploy-part3
- deploy-special
variables: variables:
KUBESPRAY_VERSION: v2.25.0 KUBESPRAY_VERSION: v2.25.0
@ -40,26 +43,15 @@ before_script:
.job: &job .job: &job
tags: tags:
- ffci - packet
image: $PIPELINE_IMAGE image: $PIPELINE_IMAGE
artifacts: artifacts:
when: always when: always
paths: paths:
- cluster-dump/ - cluster-dump/
needs:
- pipeline-image
.job-moderated:
extends: .job
needs:
- pipeline-image
- ci-not-authorized
- check-galaxy-version # lint
- pre-commit # lint
- vagrant-validate # lint
.testcases: &testcases .testcases: &testcases
extends: .job-moderated <<: *job
retry: 1 retry: 1
interruptible: true interruptible: true
before_script: before_script:
@ -69,38 +61,23 @@ before_script:
script: script:
- ./tests/scripts/testcases_run.sh - ./tests/scripts/testcases_run.sh
after_script: after_script:
- ./tests/scripts/testcases_cleanup.sh - chronic ./tests/scripts/testcases_cleanup.sh
# For failfast, at least 1 job must be defined in .gitlab-ci.yml # For failfast, at least 1 job must be defined in .gitlab-ci.yml
# Premoderated with manual actions # Premoderated with manual actions
ci-not-authorized: ci-authorized:
stage: build extends: .job
before_script: [] stage: moderator
after_script: []
rules:
# LGTM or ok-to-test labels
- if: $PR_LABELS =~ /.*,(lgtm|approved|ok-to-test).*|^(lgtm|approved|ok-to-test).*/i
variables:
CI_OK_TO_TEST: '0'
when: always
- if: $CI_PIPELINE_SOURCE == "schedule" || $CI_PIPELINE_SOURCE == "trigger"
variables:
CI_OK_TO_TEST: '0'
- if: $CI_COMMIT_BRANCH == "master"
variables:
CI_OK_TO_TEST: '0'
- when: always
variables:
CI_OK_TO_TEST: '1'
script: script:
- exit $CI_OK_TO_TEST - /bin/sh scripts/premoderator.sh
tags: except: ['triggers', 'master']
- ffci # Disable ci moderator
needs: [] only: []
include: include:
- .gitlab-ci/build.yml - .gitlab-ci/build.yml
- .gitlab-ci/lint.yml - .gitlab-ci/lint.yml
- .gitlab-ci/shellcheck.yml
- .gitlab-ci/terraform.yml - .gitlab-ci/terraform.yml
- .gitlab-ci/packet.yml - .gitlab-ci/packet.yml
- .gitlab-ci/vagrant.yml - .gitlab-ci/vagrant.yml

View File

@ -1,32 +1,40 @@
--- ---
.build-container: .build:
cache:
key: $CI_COMMIT_REF_SLUG
paths:
- image-cache
tags:
- ffci
stage: build stage: build
image: image:
name: gcr.io/kaniko-project/executor:debug name: moby/buildkit:rootless
entrypoint: [''] entrypoint: [""]
variables: variables:
TAG: $CI_COMMIT_SHORT_SHA BUILDKITD_FLAGS: --oci-worker-no-process-sandbox
PROJECT_DIR: $CI_PROJECT_DIR
DOCKERFILE: Dockerfile
GODEBUG: "http2client=0"
before_script: before_script:
- echo "{\"auths\":{\"$CI_REGISTRY\":{\"auth\":\"$(echo -n ${CI_REGISTRY_USER}:${CI_REGISTRY_PASSWORD} | base64)\"}}}" > /kaniko/.docker/config.json - mkdir ~/.docker
script: - echo "{\"auths\":{\"$CI_REGISTRY\":{\"username\":\"$CI_REGISTRY_USER\",\"password\":\"$CI_REGISTRY_PASSWORD\"}}}" > ~/.docker/config.json
- /kaniko/executor --cache=true
--cache-dir=image-cache
--context $PROJECT_DIR
--dockerfile $PROJECT_DIR/$DOCKERFILE
--label 'git-branch'=$CI_COMMIT_REF_SLUG
--label 'git-tag=$CI_COMMIT_TAG'
--destination $PIPELINE_IMAGE
pipeline-image: pipeline image:
extends: .build-container extends: .build
variables: script:
DOCKERFILE: pipeline.Dockerfile - |
buildctl-daemonless.sh build \
--frontend=dockerfile.v0 \
--local context=. \
--local dockerfile=. \
--opt filename=./pipeline.Dockerfile \
--output type=image,name=$PIPELINE_IMAGE,push=true \
--import-cache type=registry,ref=$CI_REGISTRY_IMAGE/pipeline:cache
rules:
- if: '$CI_COMMIT_REF_NAME != $CI_DEFAULT_BRANCH'
pipeline image and build cache:
extends: .build
script:
- |
buildctl-daemonless.sh build \
--frontend=dockerfile.v0 \
--local context=. \
--local dockerfile=. \
--opt filename=./pipeline.Dockerfile \
--output type=image,name=$PIPELINE_IMAGE,push=true \
--import-cache type=registry,ref=$CI_REGISTRY_IMAGE/pipeline:cache \
--export-cache type=registry,ref=$CI_REGISTRY_IMAGE/pipeline:cache,mode=max
rules:
- if: '$CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH'

View File

@ -1,35 +1,126 @@
--- ---
pre-commit: yamllint:
stage: test extends: .job
tags: stage: unit-tests
- ffci tags: [light]
image: 'ghcr.io/pre-commit-ci/runner-image@sha256:aaf2c7b38b22286f2d381c11673bec571c28f61dd086d11b43a1c9444a813cef'
variables: variables:
PRE_COMMIT_HOME: /pre-commit-cache LANG: C.UTF-8
script: script:
- pre-commit run --all-files - yamllint --strict .
cache: except: ['triggers', 'master']
key: pre-commit-all
paths:
- /pre-commit-cache
needs: []
vagrant-validate: vagrant-validate:
extends: .job extends: .job
stage: test stage: unit-tests
tags: [ffci] tags: [light]
variables: variables:
VAGRANT_VERSION: 2.3.7 VAGRANT_VERSION: 2.3.7
script: script:
- ./tests/scripts/vagrant-validate.sh - ./tests/scripts/vagrant-validate.sh
except: ['triggers', 'master'] except: ['triggers', 'master']
ansible-lint:
extends: .job
stage: unit-tests
tags: [light]
script:
- ansible-lint -v
except: ['triggers', 'master']
# TODO: convert to pre-commit hook jinja-syntax-check:
check-galaxy-version: extends: .job
needs: [] stage: unit-tests
stage: test tags: [light]
tags: [ffci] script:
- "find -name '*.j2' -exec tests/scripts/check-templates.py {} +"
except: ['triggers', 'master']
syntax-check:
extends: .job
stage: unit-tests
tags: [light]
variables:
ANSIBLE_INVENTORY: inventory/local-tests.cfg
ANSIBLE_REMOTE_USER: root
ANSIBLE_BECOME: "true"
ANSIBLE_BECOME_USER: root
ANSIBLE_VERBOSITY: "3"
script:
- ansible-playbook --syntax-check cluster.yml
- ansible-playbook --syntax-check playbooks/cluster.yml
- ansible-playbook --syntax-check upgrade-cluster.yml
- ansible-playbook --syntax-check playbooks/upgrade_cluster.yml
- ansible-playbook --syntax-check reset.yml
- ansible-playbook --syntax-check playbooks/reset.yml
- ansible-playbook --syntax-check extra_playbooks/upgrade-only-k8s.yml
except: ['triggers', 'master']
collection-build-install-sanity-check:
extends: .job
stage: unit-tests
tags: [light]
variables:
ANSIBLE_COLLECTIONS_PATH: "./ansible_collections"
script:
- ansible-galaxy collection build
- ansible-galaxy collection install kubernetes_sigs-kubespray-$(grep "^version:" galaxy.yml | awk '{print $2}').tar.gz
- ansible-galaxy collection list $(egrep -i '(name:\s+|namespace:\s+)' galaxy.yml | awk '{print $2}' | tr '\n' '.' | sed 's|\.$||g') | grep "^kubernetes_sigs.kubespray"
- test -f ansible_collections/kubernetes_sigs/kubespray/playbooks/cluster.yml
- test -f ansible_collections/kubernetes_sigs/kubespray/playbooks/reset.yml
except: ['triggers', 'master']
tox-inventory-builder:
stage: unit-tests
tags: [light]
extends: .job
before_script:
- ./tests/scripts/rebase.sh
script:
- pip3 install tox
- cd contrib/inventory_builder && tox
except: ['triggers', 'master']
markdownlint:
stage: unit-tests
tags: [light]
image: node
before_script:
- npm install -g markdownlint-cli@0.22.0
script:
- markdownlint $(find . -name '*.md' | grep -vF './.git') --ignore docs/_sidebar.md --ignore contrib/dind/README.md
generate-sidebar:
extends: .job
stage: unit-tests
tags: [light]
script:
- scripts/gen_docs_sidebar.sh
- git diff --exit-code
check-readme-versions:
stage: unit-tests
tags: [light]
image: python:3 image: python:3
script: script:
- tests/scripts/check_galaxy_version.sh - tests/scripts/check_readme_versions.sh
check-galaxy-version:
stage: unit-tests
tags: [light]
image: python:3
script:
- tests/scripts/check_galaxy_version.sh
check-typo:
stage: unit-tests
tags: [light]
image: python:3
script:
- tests/scripts/check_typo.sh
ci-matrix:
stage: unit-tests
tags: [light]
image: python:3
script:
- tests/scripts/md-table/test.sh

View File

@ -1,42 +1,30 @@
--- ---
.molecule: .molecule:
tags: [ffci-vm-med] tags: [c3.small.x86]
only: [/^pr-.*$/] only: [/^pr-.*$/]
except: ['triggers'] except: ['triggers']
image: quay.io/kubespray/vm-kubespray-ci:v6 image: $PIPELINE_IMAGE
services: [] services: []
stage: deploy-part1 stage: deploy-part1
needs: []
# - ci-not-authorized
variables:
VAGRANT_DEFAULT_PROVIDER: "libvirt"
before_script: before_script:
- groups - tests/scripts/rebase.sh
- python3 -m venv citest - ./tests/scripts/vagrant_clean.sh
- source citest/bin/activate
- vagrant plugin expunge --reinstall --force --no-tty
- vagrant plugin install vagrant-libvirt
- pip install --no-compile --no-cache-dir pip -U
- pip install --no-compile --no-cache-dir -r $CI_PROJECT_DIR/requirements.txt
- pip install --no-compile --no-cache-dir -r $CI_PROJECT_DIR/tests/requirements.txt
- ./tests/scripts/rebase.sh
- ./tests/scripts/vagrant_clean.sh
script: script:
- ./tests/scripts/molecule_run.sh - ./tests/scripts/molecule_run.sh
after_script: after_script:
- ./tests/scripts/molecule_logs.sh - chronic ./tests/scripts/molecule_logs.sh
artifacts: artifacts:
when: always when: always
paths: paths:
- molecule_logs/ - molecule_logs/
# CI template for periodic CI jobs # CI template for periodic CI jobs
# Enabled when PERIODIC_CI_ENABLED var is set # Enabled when PERIODIC_CI_ENABLED var is set
.molecule_periodic: .molecule_periodic:
only: only:
variables: variables:
- $PERIODIC_CI_ENABLED - $PERIODIC_CI_ENABLED
allow_failure: true allow_failure: true
extends: .molecule extends: .molecule
@ -46,50 +34,50 @@ molecule_full:
molecule_no_container_engines: molecule_no_container_engines:
extends: .molecule extends: .molecule
script: script:
- ./tests/scripts/molecule_run.sh -e container-engine - ./tests/scripts/molecule_run.sh -e container-engine
when: on_success when: on_success
molecule_docker: molecule_docker:
extends: .molecule extends: .molecule
script: script:
- ./tests/scripts/molecule_run.sh -i container-engine/cri-dockerd - ./tests/scripts/molecule_run.sh -i container-engine/cri-dockerd
when: on_success when: on_success
molecule_containerd: molecule_containerd:
extends: .molecule extends: .molecule
script: script:
- ./tests/scripts/molecule_run.sh -i container-engine/containerd - ./tests/scripts/molecule_run.sh -i container-engine/containerd
when: on_success when: on_success
molecule_cri-o: molecule_cri-o:
extends: .molecule extends: .molecule
stage: deploy-part1 stage: deploy-part2
script: script:
- ./tests/scripts/molecule_run.sh -i container-engine/cri-o - ./tests/scripts/molecule_run.sh -i container-engine/cri-o
allow_failure: true allow_failure: true
when: on_success when: on_success
# # Stage 3 container engines don't get as much attention so allow them to fail # Stage 3 container engines don't get as much attention so allow them to fail
# molecule_kata: molecule_kata:
# extends: .molecule extends: .molecule
# stage: deploy-extended stage: deploy-part3
# script: script:
# - ./tests/scripts/molecule_run.sh -i container-engine/kata-containers - ./tests/scripts/molecule_run.sh -i container-engine/kata-containers
# when: manual when: manual
# # FIXME: this test is broken (perma-failing) # FIXME: this test is broken (perma-failing)
molecule_gvisor: molecule_gvisor:
extends: .molecule extends: .molecule
stage: deploy-extended stage: deploy-part3
script: script:
- ./tests/scripts/molecule_run.sh -i container-engine/gvisor - ./tests/scripts/molecule_run.sh -i container-engine/gvisor
when: manual when: manual
# FIXME: this test is broken (perma-failing) # FIXME: this test is broken (perma-failing)
molecule_youki: molecule_youki:
extends: .molecule extends: .molecule
stage: deploy-extended stage: deploy-part3
script: script:
- ./tests/scripts/molecule_run.sh -i container-engine/youki - ./tests/scripts/molecule_run.sh -i container-engine/youki
when: manual when: manual
# FIXME: this test is broken (perma-failing) # FIXME: this test is broken (perma-failing)

View File

@ -6,56 +6,14 @@
CI_PLATFORM: packet CI_PLATFORM: packet
SSH_USER: kubespray SSH_USER: kubespray
tags: tags:
- ffci - packet
needs: except: [triggers]
- pipeline-image
- ci-not-authorized
# CI template for PRs # CI template for PRs
.packet_pr: .packet_pr:
stage: deploy-part1 only: [/^pr-.*$/]
rules:
- if: $PR_LABELS =~ /.*ci-short.*/
when: manual
allow_failure: true
- if: $CI_COMMIT_BRANCH =~ /^pr-.*$/
when: on_success
- when: manual
allow_failure: true
extends: .packet extends: .packet
## Uncomment this to have multiple stages
# needs:
# - packet_ubuntu20-calico-all-in-one
.packet_pr_short:
stage: deploy-part1
extends: .packet
rules:
- if: $CI_COMMIT_BRANCH =~ /^pr-.*$/
when: on_success
- when: manual
allow_failure: true
.packet_pr_manual:
extends: .packet_pr
stage: deploy-extended
rules:
- if: $PR_LABELS =~ /.*ci-full.*/
when: on_success
# Else run as manual
- when: manual
allow_failure: true
.packet_pr_extended:
extends: .packet_pr
stage: deploy-extended
rules:
- if: $PR_LABELS =~ /.*(ci-extended|ci-full).*/
when: on_success
- when: manual
allow_failure: true
# CI template for periodic CI jobs # CI template for periodic CI jobs
# Enabled when PERIODIC_CI_ENABLED var is set # Enabled when PERIODIC_CI_ENABLED var is set
.packet_periodic: .packet_periodic:
@ -76,172 +34,314 @@ packet_cleanup_old:
# The ubuntu20-calico-all-in-one jobs are meant as early stages to prevent running the full CI if something is horribly broken # The ubuntu20-calico-all-in-one jobs are meant as early stages to prevent running the full CI if something is horribly broken
packet_ubuntu20-calico-all-in-one: packet_ubuntu20-calico-all-in-one:
stage: deploy-part1 stage: deploy-part1
extends: .packet_pr_short extends: .packet_pr
when: on_success
variables: variables:
RESET_CHECK: "true" RESET_CHECK: "true"
# ### PR JOBS PART2 # ### PR JOBS PART2
packet_ubuntu20-crio: packet_ubuntu20-all-in-one-docker:
extends: .packet_pr_manual stage: deploy-part2
extends: .packet_pr
when: on_success
packet_ubuntu20-calico-all-in-one-hardening:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_ubuntu22-all-in-one-docker:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_ubuntu22-calico-all-in-one: packet_ubuntu22-calico-all-in-one:
stage: deploy-part2
extends: .packet_pr extends: .packet_pr
when: on_success
packet_ubuntu24-all-in-one-docker:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_ubuntu24-calico-all-in-one:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_ubuntu24-calico-etcd-datastore: packet_ubuntu24-calico-etcd-datastore:
stage: deploy-part2
extends: .packet_pr extends: .packet_pr
when: on_success
packet_centos7-flannel-addons-ha:
extends: .packet_pr
stage: deploy-part2
when: on_success
packet_almalinux8-crio: packet_almalinux8-crio:
extends: .packet_pr extends: .packet_pr
stage: deploy-part2
when: on_success
allow_failure: true
packet_almalinux8-kube-ovn: packet_ubuntu20-crio:
extends: .packet_pr extends: .packet_pr
stage: deploy-part2
when: manual
packet_fedora37-crio:
extends: .packet_pr
stage: deploy-part2
when: manual
packet_ubuntu20-flannel-ha:
stage: deploy-part2
extends: .packet_pr
when: manual
packet_debian10-cilium-svc-proxy:
stage: deploy-part2
extends: .packet_periodic
when: on_success
packet_debian10-calico:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_debian10-docker:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_debian11-calico: packet_debian11-calico:
stage: deploy-part2
extends: .packet_pr extends: .packet_pr
when: on_success
packet_debian11-macvlan: packet_debian11-docker:
stage: deploy-part2
extends: .packet_pr extends: .packet_pr
when: on_success
packet_debian12-calico:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_debian12-docker:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_debian12-cilium: packet_debian12-cilium:
stage: deploy-part2
extends: .packet_periodic
when: on_success
packet_centos7-calico-ha-once-localhost:
stage: deploy-part2
extends: .packet_pr extends: .packet_pr
when: on_success
variables:
# This will instruct Docker not to start over TLS.
DOCKER_TLS_CERTDIR: ""
services:
- docker:19.03.9-dind
packet_almalinux8-kube-ovn:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_almalinux8-calico:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_rockylinux8-calico: packet_rockylinux8-calico:
stage: deploy-part2
extends: .packet_pr extends: .packet_pr
when: on_success
packet_rockylinux9-calico:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_rockylinux9-cilium: packet_rockylinux9-cilium:
stage: deploy-part2
extends: .packet_pr extends: .packet_pr
when: on_success
variables: variables:
RESET_CHECK: "true" RESET_CHECK: "true"
packet_amazon-linux-2-all-in-one: packet_almalinux8-docker:
stage: deploy-part2
extends: .packet_pr extends: .packet_pr
when: on_success
packet_amazon-linux-2-all-in-one:
stage: deploy-part2
extends: .packet_pr
when: on_success
packet_fedora38-docker-weave:
stage: deploy-part2
extends: .packet_pr
when: on_success
allow_failure: true
packet_opensuse-docker-cilium: packet_opensuse-docker-cilium:
stage: deploy-part2
extends: .packet_pr extends: .packet_pr
when: on_success
packet_ubuntu20-cilium-sep:
extends: .packet_pr
## Extended
packet_debian11-docker:
extends: .packet_pr_extended
packet_debian12-docker:
extends: .packet_pr_extended
packet_debian12-calico:
extends: .packet_pr_extended
packet_almalinux8-calico-remove-node:
extends: .packet_pr_extended
variables:
REMOVE_NODE_CHECK: "true"
REMOVE_NODE_NAME: "instance-3"
packet_rockylinux9-calico:
extends: .packet_pr_extended
packet_almalinux8-calico:
extends: .packet_pr_extended
packet_almalinux8-docker:
extends: .packet_pr_extended
packet_ubuntu20-calico-all-in-one-hardening:
extends: .packet_pr_extended
packet_ubuntu24-calico-all-in-one:
extends: .packet_pr_extended
packet_ubuntu20-calico-etcd-kubeadm:
extends: .packet_pr_extended
packet_ubuntu24-all-in-one-docker:
extends: .packet_pr_extended
packet_ubuntu22-all-in-one-docker:
extends: .packet_pr_extended
# ### MANUAL JOBS # ### MANUAL JOBS
packet_fedora37-crio:
extends: .packet_pr_manual
packet_ubuntu20-flannel-ha: packet_ubuntu20-docker-weave-sep:
extends: .packet_pr_manual stage: deploy-part2
extends: .packet_pr
when: manual
packet_ubuntu20-all-in-one-docker: packet_ubuntu20-cilium-sep:
extends: .packet_pr_manual stage: deploy-special
extends: .packet_pr
when: manual
packet_ubuntu20-flannel-ha-once: packet_ubuntu20-flannel-ha-once:
extends: .packet_pr_manual stage: deploy-part2
extends: .packet_pr
packet_fedora37-calico-swap-selinux: when: manual
extends: .packet_pr_manual
# Calico HA eBPF
packet_almalinux8-calico-ha-ebpf: packet_almalinux8-calico-ha-ebpf:
extends: .packet_pr_manual stage: deploy-part2
extends: .packet_pr
when: manual
packet_almalinux8-calico-nodelocaldns-secondary: packet_debian10-macvlan:
extends: .packet_pr_manual stage: deploy-part2
extends: .packet_pr
when: manual
packet_debian11-custom-cni: packet_centos7-calico-ha:
extends: .packet_pr_manual stage: deploy-part2
extends: .packet_pr
when: manual
packet_debian11-kubelet-csr-approver: packet_centos7-multus-calico:
extends: .packet_pr_manual stage: deploy-part2
extends: .packet_pr
when: manual
packet_debian12-custom-cni-helm:
extends: .packet_pr_manual
packet_ubuntu20-calico-ha-wireguard:
extends: .packet_pr_manual
# PERIODIC
packet_fedora38-docker-calico: packet_fedora38-docker-calico:
stage: deploy-extended stage: deploy-part2
extends: .packet_periodic extends: .packet_periodic
when: on_success
variables: variables:
RESET_CHECK: "true" RESET_CHECK: "true"
packet_fedora37-calico-selinux: packet_fedora37-calico-selinux:
stage: deploy-extended stage: deploy-part2
extends: .packet_periodic extends: .packet_periodic
when: on_success
packet_fedora37-calico-swap-selinux:
stage: deploy-part2
extends: .packet_pr
when: manual
packet_ubuntu20-calico-etcd-kubeadm-upgrade-ha: packet_almalinux8-calico-nodelocaldns-secondary:
stage: deploy-extended stage: deploy-part2
extends: .packet_pr
when: manual
packet_fedora38-kube-ovn:
stage: deploy-part2
extends: .packet_periodic extends: .packet_periodic
when: on_success
packet_debian11-custom-cni:
stage: deploy-part2
extends: .packet_pr
when: manual
packet_debian11-kubelet-csr-approver:
stage: deploy-part2
extends: .packet_pr
when: manual
packet_debian12-custom-cni-helm:
stage: deploy-part2
extends: .packet_pr
when: manual
# ### PR JOBS PART3
# Long jobs (45min+)
packet_centos7-weave-upgrade-ha:
stage: deploy-part3
extends: .packet_periodic
when: on_success
variables: variables:
UPGRADE_TEST: basic UPGRADE_TEST: basic
packet_ubuntu20-calico-etcd-kubeadm-upgrade-ha:
stage: deploy-part3
extends: .packet_periodic
when: on_success
variables:
UPGRADE_TEST: basic
# Calico HA Wireguard
packet_ubuntu20-calico-ha-wireguard:
stage: deploy-part2
extends: .packet_pr
when: manual
packet_debian11-calico-upgrade:
stage: deploy-part3
extends: .packet_pr
when: on_success
variables:
UPGRADE_TEST: graceful
packet_almalinux8-calico-remove-node:
stage: deploy-part3
extends: .packet_pr
when: on_success
variables:
REMOVE_NODE_CHECK: "true"
REMOVE_NODE_NAME: "instance-3"
packet_ubuntu20-calico-etcd-kubeadm:
stage: deploy-part3
extends: .packet_pr
when: on_success
packet_debian11-calico-upgrade-once: packet_debian11-calico-upgrade-once:
stage: deploy-extended stage: deploy-part3
extends: .packet_periodic extends: .packet_periodic
when: on_success
variables: variables:
UPGRADE_TEST: graceful UPGRADE_TEST: graceful
packet_ubuntu20-calico-ha-recover: packet_ubuntu20-calico-ha-recover:
stage: deploy-extended stage: deploy-part3
extends: .packet_periodic extends: .packet_periodic
when: on_success
variables: variables:
RECOVER_CONTROL_PLANE_TEST: "true" RECOVER_CONTROL_PLANE_TEST: "true"
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:]:kube_control_plane[1:]" RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:]:kube_control_plane[1:]"
packet_ubuntu20-calico-ha-recover-noquorum: packet_ubuntu20-calico-ha-recover-noquorum:
stage: deploy-extended stage: deploy-part3
extends: .packet_periodic extends: .packet_periodic
when: on_success
variables: variables:
RECOVER_CONTROL_PLANE_TEST: "true" RECOVER_CONTROL_PLANE_TEST: "true"
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[1:]:kube_control_plane[1:]" RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[1:]:kube_control_plane[1:]"
packet_debian11-calico-upgrade:
stage: deploy-extended
extends: .packet_periodic
variables:
UPGRADE_TEST: graceful
packet_debian12-cilium-svc-proxy:
stage: deploy-extended
extends: .packet_periodic

View File

@ -1,17 +0,0 @@
---
# stub pipeline for dynamic generation
pre-commit:
tags:
- light
image: 'ghcr.io/pre-commit-ci/runner-image@sha256:aaf2c7b38b22286f2d381c11673bec571c28f61dd086d11b43a1c9444a813cef'
variables:
PRE_COMMIT_HOME: /pre-commit-cache
script:
- pre-commit run --all-files
cache:
key: pre-commit-$HOOK_ID
paths:
- /pre-commit-cache
parallel:
matrix:
- HOOK_ID:

16
.gitlab-ci/shellcheck.yml Normal file
View File

@ -0,0 +1,16 @@
---
shellcheck:
extends: .job
stage: unit-tests
tags: [light]
variables:
SHELLCHECK_VERSION: v0.7.1
before_script:
- ./tests/scripts/rebase.sh
- curl --silent --location "https://github.com/koalaman/shellcheck/releases/download/"${SHELLCHECK_VERSION}"/shellcheck-"${SHELLCHECK_VERSION}".linux.x86_64.tar.xz" | tar -xJv
- cp shellcheck-"${SHELLCHECK_VERSION}"/shellcheck /usr/bin/
- shellcheck --version
script:
# Run shellcheck for all *.sh
- find . -name '*.sh' -not -path './.git/*' | xargs shellcheck --severity error
except: ['triggers', 'master']

View File

@ -2,10 +2,6 @@
# Tests for contrib/terraform/ # Tests for contrib/terraform/
.terraform_install: .terraform_install:
extends: .job extends: .job
needs:
- ci-not-authorized
- pipeline-image
stage: deploy-part1
before_script: before_script:
- update-alternatives --install /usr/bin/python python /usr/bin/python3 1 - update-alternatives --install /usr/bin/python python /usr/bin/python3 1
- ./tests/scripts/rebase.sh - ./tests/scripts/rebase.sh
@ -28,19 +24,17 @@
.terraform_validate: .terraform_validate:
extends: .terraform_install extends: .terraform_install
tags: [ffci] stage: unit-tests
tags: [light]
only: ['master', /^pr-.*$/] only: ['master', /^pr-.*$/]
script: script:
- terraform -chdir="contrib/terraform/$PROVIDER" validate - terraform -chdir="contrib/terraform/$PROVIDER" validate
- terraform -chdir="contrib/terraform/$PROVIDER" fmt -check -diff - terraform -chdir="contrib/terraform/$PROVIDER" fmt -check -diff
stage: test
needs:
- pipeline-image
.terraform_apply: .terraform_apply:
extends: .terraform_install extends: .terraform_install
tags: [ffci] tags: [light]
stage: deploy-extended stage: deploy-part3
when: manual when: manual
only: [/^pr-.*$/] only: [/^pr-.*$/]
artifacts: artifacts:
@ -57,7 +51,7 @@
- tests/scripts/testcases_run.sh - tests/scripts/testcases_run.sh
after_script: after_script:
# Cleanup regardless of exit code # Cleanup regardless of exit code
- ./tests/scripts/testcases_cleanup.sh - chronic ./tests/scripts/testcases_cleanup.sh
tf-validate-openstack: tf-validate-openstack:
extends: .terraform_validate extends: .terraform_validate
@ -152,7 +146,8 @@ tf-validate-nifcloud:
TF_VAR_router_id: "ab95917c-41fb-4881-b507-3a6dfe9403df" TF_VAR_router_id: "ab95917c-41fb-4881-b507-3a6dfe9403df"
tf-elastx_cleanup: tf-elastx_cleanup:
tags: [ffci] stage: unit-tests
tags: [light]
image: python image: python
variables: variables:
<<: *elastx_variables <<: *elastx_variables
@ -160,11 +155,10 @@ tf-elastx_cleanup:
- pip install -r scripts/openstack-cleanup/requirements.txt - pip install -r scripts/openstack-cleanup/requirements.txt
script: script:
- ./scripts/openstack-cleanup/main.py - ./scripts/openstack-cleanup/main.py
allow_failure: true
tf-elastx_ubuntu20-calico: tf-elastx_ubuntu20-calico:
extends: .terraform_apply extends: .terraform_apply
stage: deploy-part1 stage: deploy-part3
when: on_success when: on_success
allow_failure: true allow_failure: true
variables: variables:

View File

@ -1,63 +1,64 @@
--- ---
.vagrant: .vagrant:
extends: .testcases extends: .testcases
needs:
- ci-not-authorized
variables: variables:
CI_PLATFORM: "vagrant" CI_PLATFORM: "vagrant"
SSH_USER: "vagrant" SSH_USER: "vagrant"
VAGRANT_DEFAULT_PROVIDER: "libvirt" VAGRANT_DEFAULT_PROVIDER: "libvirt"
KUBESPRAY_VAGRANT_CONFIG: tests/files/${CI_JOB_NAME}.rb KUBESPRAY_VAGRANT_CONFIG: tests/files/${CI_JOB_NAME}.rb
DOCKER_NAME: vagrant tags: [c3.small.x86]
VAGRANT_ANSIBLE_TAGS: facts only: [/^pr-.*$/]
tags: [ffci-vm-large] except: ['triggers']
# only: [/^pr-.*$/] image: $PIPELINE_IMAGE
# except: ['triggers']
image: quay.io/kubespray/vm-kubespray-ci:v6
services: [] services: []
before_script: before_script:
- echo $USER
- python3 -m venv citest
- source citest/bin/activate
- vagrant plugin expunge --reinstall --force --no-tty
- vagrant plugin install vagrant-libvirt
- pip install --no-compile --no-cache-dir pip -U
- pip install --no-compile --no-cache-dir -r $CI_PROJECT_DIR/requirements.txt
- pip install --no-compile --no-cache-dir -r $CI_PROJECT_DIR/tests/requirements.txt
- ./tests/scripts/vagrant_clean.sh - ./tests/scripts/vagrant_clean.sh
script: script:
- ./tests/scripts/testcases_run.sh - ./tests/scripts/testcases_run.sh
after_script:
- chronic ./tests/scripts/testcases_cleanup.sh
vagrant_ubuntu20-calico-dual-stack: vagrant_ubuntu20-calico-dual-stack:
stage: deploy-extended stage: deploy-part2
extends: .vagrant extends: .vagrant
when: manual when: manual
# FIXME: this test if broken (perma-failing) # FIXME: this test if broken (perma-failing)
vagrant_ubuntu20-weave-medium:
stage: deploy-part2
extends: .vagrant
when: manual
vagrant_ubuntu20-flannel: vagrant_ubuntu20-flannel:
stage: deploy-part1 stage: deploy-part2
extends: .vagrant extends: .vagrant
when: on_success when: on_success
allow_failure: false allow_failure: false
vagrant_ubuntu20-flannel-collection: vagrant_ubuntu20-flannel-collection:
stage: deploy-extended stage: deploy-part2
extends: .vagrant extends: .vagrant
when: manual when: on_success
vagrant_ubuntu20-kube-router-sep: vagrant_ubuntu20-kube-router-sep:
stage: deploy-extended stage: deploy-part2
extends: .vagrant extends: .vagrant
when: manual when: manual
# Service proxy test fails connectivity testing # Service proxy test fails connectivity testing
vagrant_ubuntu20-kube-router-svc-proxy: vagrant_ubuntu20-kube-router-svc-proxy:
stage: deploy-extended stage: deploy-part2
extends: .vagrant extends: .vagrant
when: manual when: manual
vagrant_fedora37-kube-router: vagrant_fedora37-kube-router:
stage: deploy-extended stage: deploy-part2
extends: .vagrant extends: .vagrant
when: manual when: manual
# FIXME: this test if broken (perma-failing) # FIXME: this test if broken (perma-failing)
vagrant_centos7-kube-router:
stage: deploy-part2
extends: .vagrant
when: manual

3
.markdownlint.yaml Normal file
View File

@ -0,0 +1,3 @@
---
MD013: false
MD029: false

View File

@ -1,4 +0,0 @@
all
exclude_rule 'MD013'
exclude_rule 'MD029'
rule 'MD007', :indent => 2

1
.mdlrc
View File

@ -1 +0,0 @@
style "#{File.dirname(__FILE__)}/.md_style.rb"

View File

@ -1,7 +1,7 @@
--- ---
repos: repos:
- repo: https://github.com/pre-commit/pre-commit-hooks - repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.6.0 rev: v3.4.0
hooks: hooks:
- id: check-added-large-files - id: check-added-large-files
- id: check-case-conflict - id: check-case-conflict
@ -15,59 +15,47 @@ repos:
- id: trailing-whitespace - id: trailing-whitespace
- repo: https://github.com/adrienverge/yamllint.git - repo: https://github.com/adrienverge/yamllint.git
rev: v1.35.1 rev: v1.27.1
hooks: hooks:
- id: yamllint - id: yamllint
args: [--strict] args: [--strict]
- repo: https://github.com/markdownlint/markdownlint - repo: https://github.com/markdownlint/markdownlint
rev: v0.12.0 rev: v0.11.0
hooks: hooks:
- id: markdownlint - id: markdownlint
exclude: "^.github|(^docs/_sidebar\\.md$)" args: [-r, "~MD013,~MD029"]
exclude: "^.git"
- repo: https://github.com/shellcheck-py/shellcheck-py - repo: https://github.com/jumanjihouse/pre-commit-hooks
rev: v0.10.0.1 rev: 3.0.0
hooks: hooks:
- id: shellcheck - id: shellcheck
args: ["--severity=error"] args: [--severity, "error"]
exclude: "^.git" exclude: "^.git"
files: "\\.sh$" files: "\\.sh$"
- repo: https://github.com/ansible/ansible-lint
rev: v24.5.0
hooks:
- id: ansible-lint
additional_dependencies:
- ansible==9.8.0
- jsonschema==4.22.0
- jmespath==1.0.1
- netaddr==1.3.0
- distlib
- repo: https://github.com/golangci/misspell
rev: v0.6.0
hooks:
- id: misspell
exclude: "OWNERS_ALIASES$"
- repo: local - repo: local
hooks: hooks:
- id: ansible-lint
name: ansible-lint
entry: ansible-lint -v
language: python
pass_filenames: false
additional_dependencies:
- .[community]
- id: ansible-syntax-check - id: ansible-syntax-check
name: ansible-syntax-check name: ansible-syntax-check
entry: env ANSIBLE_INVENTORY=inventory/local-tests.cfg ANSIBLE_REMOTE_USER=root ANSIBLE_BECOME="true" ANSIBLE_BECOME_USER=root ANSIBLE_VERBOSITY="3" ansible-playbook --syntax-check entry: env ANSIBLE_INVENTORY=inventory/local-tests.cfg ANSIBLE_REMOTE_USER=root ANSIBLE_BECOME="true" ANSIBLE_BECOME_USER=root ANSIBLE_VERBOSITY="3" ansible-playbook --syntax-check
language: python language: python
files: "^cluster.yml|^upgrade-cluster.yml|^reset.yml|^extra_playbooks/upgrade-only-k8s.yml" files: "^cluster.yml|^upgrade-cluster.yml|^reset.yml|^extra_playbooks/upgrade-only-k8s.yml"
additional_dependencies:
- ansible==9.5.1
- id: tox-inventory-builder - id: tox-inventory-builder
name: tox-inventory-builder name: tox-inventory-builder
entry: bash -c "cd contrib/inventory_builder && tox" entry: bash -c "cd contrib/inventory_builder && tox"
language: python language: python
pass_filenames: false pass_filenames: false
additional_dependencies:
- tox==4.15.0
- id: check-readme-versions - id: check-readme-versions
name: check-readme-versions name: check-readme-versions
@ -75,15 +63,6 @@ repos:
language: script language: script
pass_filenames: false pass_filenames: false
- id: collection-build-install
name: Build and install kubernetes-sigs.kubespray Ansible collection
language: python
additional_dependencies:
- ansible-core>=2.16.4
- distlib
entry: tests/scripts/collection-build-install.sh
pass_filenames: false
- id: generate-docs-sidebar - id: generate-docs-sidebar
name: generate-docs-sidebar name: generate-docs-sidebar
entry: scripts/gen_docs_sidebar.sh entry: scripts/gen_docs_sidebar.sh
@ -92,13 +71,9 @@ repos:
- id: ci-matrix - id: ci-matrix
name: ci-matrix name: ci-matrix
entry: tests/scripts/md-table/main.py entry: tests/scripts/md-table/test.sh
language: python language: script
pass_filenames: false pass_filenames: false
additional_dependencies:
- jinja2
- pathlib
- pyaml
- id: jinja-syntax-check - id: jinja-syntax-check
name: jinja-syntax-check name: jinja-syntax-check
@ -107,4 +82,4 @@ repos:
types: types:
- jinja - jinja
additional_dependencies: additional_dependencies:
- jinja2 - Jinja2

View File

@ -6,7 +6,7 @@ ignore: |
.github/ .github/
# Generated file # Generated file
tests/files/custom_cni/cilium.yaml tests/files/custom_cni/cilium.yaml
# https://ansible.readthedocs.io/projects/lint/rules/yaml/
rules: rules:
braces: braces:
min-spaces-inside: 0 min-spaces-inside: 0
@ -14,16 +14,9 @@ rules:
brackets: brackets:
min-spaces-inside: 0 min-spaces-inside: 0
max-spaces-inside: 1 max-spaces-inside: 1
comments:
min-spaces-from-content: 1
# https://github.com/adrienverge/yamllint/issues/384
comments-indentation: false
indentation: indentation:
spaces: 2 spaces: 2
indent-sequences: consistent indent-sequences: consistent
line-length: disable line-length: disable
new-line-at-end-of-file: disable new-line-at-end-of-file: disable
octal-values:
forbid-implicit-octal: true # yamllint defaults to false
forbid-explicit-octal: true # yamllint defaults to false
truthy: disable truthy: disable

View File

@ -1,4 +1,4 @@
# Contributing guidelines test # Contributing guidelines
## How to become a contributor and submit your own code ## How to become a contributor and submit your own code

View File

@ -6,7 +6,6 @@ aliases:
- mzaian - mzaian
- oomichi - oomichi
- yankay - yankay
- ant31
kubespray-reviewers: kubespray-reviewers:
- cyclinder - cyclinder
- erikjiang - erikjiang
@ -15,6 +14,7 @@ aliases:
- vannten - vannten
- yankay - yankay
kubespray-emeritus_approvers: kubespray-emeritus_approvers:
- ant31
- atoms - atoms
- chadswen - chadswen
- luckysb - luckysb

View File

@ -141,7 +141,7 @@ vagrant up
## Supported Linux Distributions ## Supported Linux Distributions
- **Flatcar Container Linux by Kinvolk** - **Flatcar Container Linux by Kinvolk**
- **Debian** Bookworm, Bullseye - **Debian** Bookworm, Bullseye, Buster
- **Ubuntu** 20.04, 22.04, 24.04 - **Ubuntu** 20.04, 22.04, 24.04
- **CentOS/RHEL** 7, [8, 9](docs/operating_systems/centos.md#centos-8) - **CentOS/RHEL** 7, [8, 9](docs/operating_systems/centos.md#centos-8)
- **Fedora** 37, 38 - **Fedora** 37, 38
@ -160,11 +160,11 @@ Note: Upstart/SysV init based OS types are not supported.
## Supported Components ## Supported Components
- Core - Core
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.30.3 - [kubernetes](https://github.com/kubernetes/kubernetes) v1.29.5
- [etcd](https://github.com/etcd-io/etcd) v3.5.12 - [etcd](https://github.com/etcd-io/etcd) v3.5.12
- [docker](https://www.docker.com/) v26.1 - [docker](https://www.docker.com/) v26.1
- [containerd](https://containerd.io/) v1.7.20 - [containerd](https://containerd.io/) v1.7.16
- [cri-o](http://cri-o.io/) v1.30.3 (experimental: see [CRI-O Note](docs/CRI/cri-o.md). Only on fedora, ubuntu and centos based OS) - [cri-o](http://cri-o.io/) v1.29.1 (experimental: see [CRI-O Note](docs/CRI/cri-o.md). Only on fedora, ubuntu and centos based OS)
- Network Plugin - Network Plugin
- [cni-plugins](https://github.com/containernetworking/plugins) v1.2.0 - [cni-plugins](https://github.com/containernetworking/plugins) v1.2.0
- [calico](https://github.com/projectcalico/calico) v3.27.3 - [calico](https://github.com/projectcalico/calico) v3.27.3
@ -173,10 +173,10 @@ Note: Upstart/SysV init based OS types are not supported.
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.11.5 - [kube-ovn](https://github.com/alauda/kube-ovn) v1.11.5
- [kube-router](https://github.com/cloudnativelabs/kube-router) v2.0.0 - [kube-router](https://github.com/cloudnativelabs/kube-router) v2.0.0
- [multus](https://github.com/k8snetworkplumbingwg/multus-cni) v3.8 - [multus](https://github.com/k8snetworkplumbingwg/multus-cni) v3.8
- [weave](https://github.com/rajch/weave) v2.8.7 - [weave](https://github.com/weaveworks/weave) v2.8.1
- [kube-vip](https://github.com/kube-vip/kube-vip) v0.8.0 - [kube-vip](https://github.com/kube-vip/kube-vip) v0.8.0
- Application - Application
- [cert-manager](https://github.com/jetstack/cert-manager) v1.14.7 - [cert-manager](https://github.com/jetstack/cert-manager) v1.13.2
- [coredns](https://github.com/coredns/coredns) v1.11.1 - [coredns](https://github.com/coredns/coredns) v1.11.1
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v1.10.1 - [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v1.10.1
- [krew](https://github.com/kubernetes-sigs/krew) v0.4.4 - [krew](https://github.com/kubernetes-sigs/krew) v0.4.4
@ -189,7 +189,7 @@ Note: Upstart/SysV init based OS types are not supported.
- [rbd-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.1-k8s1.11 - [rbd-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.1-k8s1.11
- [aws-ebs-csi-plugin](https://github.com/kubernetes-sigs/aws-ebs-csi-driver) v0.5.0 - [aws-ebs-csi-plugin](https://github.com/kubernetes-sigs/aws-ebs-csi-driver) v0.5.0
- [azure-csi-plugin](https://github.com/kubernetes-sigs/azuredisk-csi-driver) v1.10.0 - [azure-csi-plugin](https://github.com/kubernetes-sigs/azuredisk-csi-driver) v1.10.0
- [cinder-csi-plugin](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/cinder-csi-plugin/using-cinder-csi-plugin.md) v1.30.0 - [cinder-csi-plugin](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/cinder-csi-plugin/using-cinder-csi-plugin.md) v1.29.0
- [gcp-pd-csi-plugin](https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver) v1.9.2 - [gcp-pd-csi-plugin](https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver) v1.9.2
- [local-path-provisioner](https://github.com/rancher/local-path-provisioner) v0.0.24 - [local-path-provisioner](https://github.com/rancher/local-path-provisioner) v0.0.24
- [local-volume-provisioner](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner) v2.5.0 - [local-volume-provisioner](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner) v2.5.0

View File

@ -16,7 +16,6 @@ The Kubespray Project is released on an as-needed basis. The process is as follo
1. The release issue is closed 1. The release issue is closed
1. An announcement email is sent to `dev@kubernetes.io` with the subject `[ANNOUNCE] Kubespray $VERSION is released` 1. An announcement email is sent to `dev@kubernetes.io` with the subject `[ANNOUNCE] Kubespray $VERSION is released`
1. The topic of the #kubespray channel is updated with `vX.Y.Z is released! | ...` 1. The topic of the #kubespray channel is updated with `vX.Y.Z is released! | ...`
1. Create/Update Issue for upgradeing kubernetes and [k8s-conformance](https://github.com/cncf/k8s-conformance)
## Major/minor releases and milestones ## Major/minor releases and milestones

3
Vagrantfile vendored
View File

@ -1,7 +1,7 @@
# -*- mode: ruby -*- # -*- mode: ruby -*-
# # vi: set ft=ruby : # # vi: set ft=ruby :
# For help on using kubespray with vagrant, check out docs/developers/vagrant.md # For help on using kubespray with vagrant, check out docs/vagrant.md
require 'fileutils' require 'fileutils'
@ -278,7 +278,6 @@ Vagrant.configure("2") do |config|
"local_path_provisioner_enabled": "#{$local_path_provisioner_enabled}", "local_path_provisioner_enabled": "#{$local_path_provisioner_enabled}",
"local_path_provisioner_claim_root": "#{$local_path_provisioner_claim_root}", "local_path_provisioner_claim_root": "#{$local_path_provisioner_claim_root}",
"ansible_ssh_user": SUPPORTED_OS[$os][:user], "ansible_ssh_user": SUPPORTED_OS[$os][:user],
"ansible_ssh_private_key_file": File.join(Dir.home, ".vagrant.d", "insecure_private_key"),
"unsafe_show_logs": "True" "unsafe_show_logs": "True"
} }

View File

@ -11,7 +11,6 @@ gathering = smart
fact_caching = jsonfile fact_caching = jsonfile
fact_caching_connection = /tmp fact_caching_connection = /tmp
fact_caching_timeout = 86400 fact_caching_timeout = 86400
timeout = 300
stdout_callback = default stdout_callback = default
display_skipped_hosts = no display_skipped_hosts = no
library = ./library library = ./library

View File

@ -12,4 +12,4 @@
template: template:
src: inventory.j2 src: inventory.j2
dest: "{{ playbook_dir }}/inventory" dest: "{{ playbook_dir }}/inventory"
mode: "0644" mode: 0644

View File

@ -22,10 +22,10 @@
template: template:
src: inventory.j2 src: inventory.j2
dest: "{{ playbook_dir }}/inventory" dest: "{{ playbook_dir }}/inventory"
mode: "0644" mode: 0644
- name: Generate Load Balancer variables - name: Generate Load Balancer variables
template: template:
src: loadbalancer_vars.j2 src: loadbalancer_vars.j2
dest: "{{ playbook_dir }}/loadbalancer_vars.yml" dest: "{{ playbook_dir }}/loadbalancer_vars.yml"
mode: "0644" mode: 0644

View File

@ -8,13 +8,13 @@
path: "{{ base_dir }}" path: "{{ base_dir }}"
state: directory state: directory
recurse: true recurse: true
mode: "0755" mode: 0755
- name: Store json files in base_dir - name: Store json files in base_dir
template: template:
src: "{{ item }}" src: "{{ item }}"
dest: "{{ base_dir }}/{{ item }}" dest: "{{ base_dir }}/{{ item }}"
mode: "0644" mode: 0644
with_items: with_items:
- network.json - network.json
- storage.json - storage.json

View File

@ -35,7 +35,7 @@
path-exclude=/usr/share/doc/* path-exclude=/usr/share/doc/*
path-include=/usr/share/doc/*/copyright path-include=/usr/share/doc/*/copyright
dest: /etc/dpkg/dpkg.cfg.d/01_nodoc dest: /etc/dpkg/dpkg.cfg.d/01_nodoc
mode: "0644" mode: 0644
when: when:
- ansible_os_family == 'Debian' - ansible_os_family == 'Debian'
@ -64,7 +64,7 @@
copy: copy:
content: "{{ distro_user }} ALL=(ALL) NOPASSWD:ALL" content: "{{ distro_user }} ALL=(ALL) NOPASSWD:ALL"
dest: "/etc/sudoers.d/{{ distro_user }}" dest: "/etc/sudoers.d/{{ distro_user }}"
mode: "0640" mode: 0640
- name: "Add my pubkey to {{ distro_user }} user authorized keys" - name: "Add my pubkey to {{ distro_user }} user authorized keys"
ansible.posix.authorized_key: ansible.posix.authorized_key:

View File

@ -42,7 +42,7 @@
template: template:
src: inventory_builder.sh.j2 src: inventory_builder.sh.j2
dest: /tmp/kubespray.dind.inventory_builder.sh dest: /tmp/kubespray.dind.inventory_builder.sh
mode: "0755" mode: 0755
tags: tags:
- addresses - addresses

View File

@ -20,7 +20,7 @@
br-netfilter br-netfilter
owner: root owner: root
group: root group: root
mode: "0644" mode: 0644
when: br_netfilter is defined when: br_netfilter is defined

View File

@ -11,7 +11,7 @@
state: directory state: directory
owner: "{{ k8s_deployment_user }}" owner: "{{ k8s_deployment_user }}"
group: "{{ k8s_deployment_user }}" group: "{{ k8s_deployment_user }}"
mode: "0700" mode: 0700
- name: Configure sudo for deployment user - name: Configure sudo for deployment user
copy: copy:
@ -20,13 +20,13 @@
dest: "/etc/sudoers.d/55-k8s-deployment" dest: "/etc/sudoers.d/55-k8s-deployment"
owner: root owner: root
group: root group: root
mode: "0644" mode: 0644
- name: Write private SSH key - name: Write private SSH key
copy: copy:
src: "{{ k8s_deployment_user_pkey_path }}" src: "{{ k8s_deployment_user_pkey_path }}"
dest: "/home/{{ k8s_deployment_user }}/.ssh/id_rsa" dest: "/home/{{ k8s_deployment_user }}/.ssh/id_rsa"
mode: "0400" mode: 0400
owner: "{{ k8s_deployment_user }}" owner: "{{ k8s_deployment_user }}"
group: "{{ k8s_deployment_user }}" group: "{{ k8s_deployment_user }}"
when: k8s_deployment_user_pkey_path is defined when: k8s_deployment_user_pkey_path is defined
@ -41,7 +41,7 @@
- name: Fix ssh-pub-key permissions - name: Fix ssh-pub-key permissions
file: file:
path: "/home/{{ k8s_deployment_user }}/.ssh/authorized_keys" path: "/home/{{ k8s_deployment_user }}/.ssh/authorized_keys"
mode: "0600" mode: 0600
owner: "{{ k8s_deployment_user }}" owner: "{{ k8s_deployment_user }}"
group: "{{ k8s_deployment_user }}" group: "{{ k8s_deployment_user }}"
when: k8s_deployment_user_pkey_path is defined when: k8s_deployment_user_pkey_path is defined

View File

@ -14,7 +14,7 @@
file: file:
path: "{{ item }}" path: "{{ item }}"
state: directory state: directory
mode: "0755" mode: 0755
become: false become: false
loop: loop:
- "{{ playbook_dir }}/plugins/mitogen" - "{{ playbook_dir }}/plugins/mitogen"
@ -25,7 +25,7 @@
url: "{{ mitogen_url }}" url: "{{ mitogen_url }}"
dest: "{{ playbook_dir }}/dist/mitogen_{{ mitogen_version }}.tar.gz" dest: "{{ playbook_dir }}/dist/mitogen_{{ mitogen_version }}.tar.gz"
validate_certs: true validate_certs: true
mode: "0644" mode: 0644
- name: Extract archive - name: Extract archive
unarchive: unarchive:
@ -40,7 +40,7 @@
- name: Add strategy to ansible.cfg - name: Add strategy to ansible.cfg
community.general.ini_file: community.general.ini_file:
path: ansible.cfg path: ansible.cfg
mode: "0644" mode: 0644
section: "{{ item.section | d('defaults') }}" section: "{{ item.section | d('defaults') }}"
option: "{{ item.option }}" option: "{{ item.option }}"
value: "{{ item.value }}" value: "{{ item.value }}"

View File

@ -15,7 +15,7 @@
file: file:
path: "{{ item }}" path: "{{ item }}"
state: directory state: directory
mode: "0775" mode: 0775
with_items: with_items:
- "{{ gluster_mount_dir }}" - "{{ gluster_mount_dir }}"
when: ansible_os_family in ["Debian","RedHat"] and groups['gfs-cluster'] is defined when: ansible_os_family in ["Debian","RedHat"] and groups['gfs-cluster'] is defined

View File

@ -49,7 +49,7 @@
file: file:
path: "{{ item }}" path: "{{ item }}"
state: directory state: directory
mode: "0775" mode: 0775
with_items: with_items:
- "{{ gluster_brick_dir }}" - "{{ gluster_brick_dir }}"
- "{{ gluster_mount_dir }}" - "{{ gluster_mount_dir }}"
@ -101,7 +101,7 @@
template: template:
dest: "{{ gluster_mount_dir }}/.test-file.txt" dest: "{{ gluster_mount_dir }}/.test-file.txt"
src: test-file.txt src: test-file.txt
mode: "0644" mode: 0644
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0] when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
- name: Unmount glusterfs - name: Unmount glusterfs

View File

@ -3,7 +3,7 @@
template: template:
src: "{{ item.file }}" src: "{{ item.file }}"
dest: "{{ kube_config_dir }}/{{ item.dest }}" dest: "{{ kube_config_dir }}/{{ item.dest }}"
mode: "0644" mode: 0644
with_items: with_items:
- { file: glusterfs-kubernetes-endpoint.json.j2, type: ep, dest: glusterfs-kubernetes-endpoint.json} - { file: glusterfs-kubernetes-endpoint.json.j2, type: ep, dest: glusterfs-kubernetes-endpoint.json}
- { file: glusterfs-kubernetes-pv.yml.j2, type: pv, dest: glusterfs-kubernetes-pv.yml} - { file: glusterfs-kubernetes-pv.yml.j2, type: pv, dest: glusterfs-kubernetes-pv.yml}

View File

@ -4,7 +4,7 @@
template: template:
src: "heketi-bootstrap.json.j2" src: "heketi-bootstrap.json.j2"
dest: "{{ kube_config_dir }}/heketi-bootstrap.json" dest: "{{ kube_config_dir }}/heketi-bootstrap.json"
mode: "0640" mode: 0640
register: "rendering" register: "rendering"
- name: "Kubernetes Apps | Install and configure Heketi Bootstrap" - name: "Kubernetes Apps | Install and configure Heketi Bootstrap"
kube: kube:

View File

@ -10,7 +10,7 @@
template: template:
src: "topology.json.j2" src: "topology.json.j2"
dest: "{{ kube_config_dir }}/topology.json" dest: "{{ kube_config_dir }}/topology.json"
mode: "0644" mode: 0644
- name: "Copy topology configuration into container." - name: "Copy topology configuration into container."
changed_when: false changed_when: false
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ initial_heketi_pod_name }}:/tmp/topology.json" command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ initial_heketi_pod_name }}:/tmp/topology.json"

View File

@ -3,7 +3,7 @@
template: template:
src: "glusterfs-daemonset.json.j2" src: "glusterfs-daemonset.json.j2"
dest: "{{ kube_config_dir }}/glusterfs-daemonset.json" dest: "{{ kube_config_dir }}/glusterfs-daemonset.json"
mode: "0644" mode: 0644
become: true become: true
register: "rendering" register: "rendering"
- name: "Kubernetes Apps | Install and configure GlusterFS daemonset" - name: "Kubernetes Apps | Install and configure GlusterFS daemonset"
@ -33,7 +33,7 @@
template: template:
src: "heketi-service-account.json.j2" src: "heketi-service-account.json.j2"
dest: "{{ kube_config_dir }}/heketi-service-account.json" dest: "{{ kube_config_dir }}/heketi-service-account.json"
mode: "0644" mode: 0644
become: true become: true
register: "rendering" register: "rendering"
- name: "Kubernetes Apps | Install and configure Heketi Service Account" - name: "Kubernetes Apps | Install and configure Heketi Service Account"

View File

@ -4,7 +4,7 @@
template: template:
src: "heketi-deployment.json.j2" src: "heketi-deployment.json.j2"
dest: "{{ kube_config_dir }}/heketi-deployment.json" dest: "{{ kube_config_dir }}/heketi-deployment.json"
mode: "0644" mode: 0644
register: "rendering" register: "rendering"
- name: "Kubernetes Apps | Install and configure Heketi" - name: "Kubernetes Apps | Install and configure Heketi"

View File

@ -28,7 +28,7 @@
template: template:
src: "heketi.json.j2" src: "heketi.json.j2"
dest: "{{ kube_config_dir }}/heketi.json" dest: "{{ kube_config_dir }}/heketi.json"
mode: "0644" mode: 0644
- name: "Deploy Heketi config secret" - name: "Deploy Heketi config secret"
when: "secret_state.stdout | length == 0" when: "secret_state.stdout | length == 0"

View File

@ -5,7 +5,7 @@
template: template:
src: "heketi-storage.json.j2" src: "heketi-storage.json.j2"
dest: "{{ kube_config_dir }}/heketi-storage.json" dest: "{{ kube_config_dir }}/heketi-storage.json"
mode: "0644" mode: 0644
register: "rendering" register: "rendering"
- name: "Kubernetes Apps | Install and configure Heketi Storage" - name: "Kubernetes Apps | Install and configure Heketi Storage"
kube: kube:

View File

@ -16,7 +16,7 @@
template: template:
src: "storageclass.yml.j2" src: "storageclass.yml.j2"
dest: "{{ kube_config_dir }}/storageclass.yml" dest: "{{ kube_config_dir }}/storageclass.yml"
mode: "0644" mode: 0644
register: "rendering" register: "rendering"
- name: "Kubernetes Apps | Install and configure Storace Class" - name: "Kubernetes Apps | Install and configure Storace Class"
kube: kube:

View File

@ -10,7 +10,7 @@
template: template:
src: "topology.json.j2" src: "topology.json.j2"
dest: "{{ kube_config_dir }}/topology.json" dest: "{{ kube_config_dir }}/topology.json"
mode: "0644" mode: 0644
- name: "Copy topology configuration into container." # noqa no-handler - name: "Copy topology configuration into container." # noqa no-handler
when: "rendering.changed" when: "rendering.changed"
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ heketi_pod_name }}:/tmp/topology.json" command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ heketi_pod_name }}:/tmp/topology.json"

View File

@ -16,7 +16,7 @@
template: template:
src: ./contrib/offline/temp/{{ item }}.list.template src: ./contrib/offline/temp/{{ item }}.list.template
dest: ./contrib/offline/temp/{{ item }}.list dest: ./contrib/offline/temp/{{ item }}.list
mode: "0644" mode: 0644
with_items: with_items:
- files - files
- images - images

View File

@ -7,7 +7,7 @@
service_facts: service_facts:
- name: Disable service firewalld - name: Disable service firewalld
systemd_service: systemd:
name: firewalld name: firewalld
state: stopped state: stopped
enabled: no enabled: no
@ -15,7 +15,7 @@
"'firewalld.service' in services and services['firewalld.service'].status != 'not-found'" "'firewalld.service' in services and services['firewalld.service'].status != 'not-found'"
- name: Disable service ufw - name: Disable service ufw
systemd_service: systemd:
name: ufw name: ufw
state: stopped state: stopped
enabled: no enabled: no

View File

@ -12,8 +12,8 @@ ${list_master}
${list_worker} ${list_worker}
[k8s_cluster:children] [k8s_cluster:children]
kube_control_plane kube-master
kube_node kube-node
[k8s_cluster:vars] [k8s_cluster:vars]
network_id=${network_id} network_id=${network_id}

View File

@ -72,7 +72,6 @@ The setup looks like following
```bash ```bash
./generate-inventory.sh > sample-inventory/inventory.ini ./generate-inventory.sh > sample-inventory/inventory.ini
```
* Export Variables: * Export Variables:

View File

@ -1,11 +1,5 @@
# See: https://developers.upcloud.com/1.3/5-zones/ # See: https://developers.upcloud.com/1.3/5-zones/
zone = "fi-hel1" zone = "fi-hel1"
private_cloud = false
# Only used if private_cloud = true, public zone equivalent
# For example use finnish public zone for finnish private zone
public_zone = "fi-hel2"
username = "ubuntu" username = "ubuntu"
# Prefix to use for all resources to separate them from other resources # Prefix to use for all resources to separate them from other resources

View File

@ -11,10 +11,8 @@ provider "upcloud" {
module "kubernetes" { module "kubernetes" {
source = "./modules/kubernetes-cluster" source = "./modules/kubernetes-cluster"
prefix = var.prefix prefix = var.prefix
zone = var.zone zone = var.zone
private_cloud = var.private_cloud
public_zone = var.public_zone
template_name = var.template_name template_name = var.template_name
username = var.username username = var.username

View File

@ -54,12 +54,11 @@ resource "upcloud_server" "master" {
if machine.node_type == "master" if machine.node_type == "master"
} }
hostname = "${local.resource-prefix}${each.key}" hostname = "${local.resource-prefix}${each.key}"
plan = each.value.plan plan = each.value.plan
cpu = each.value.plan == null ? null : each.value.cpu cpu = each.value.plan == null ? each.value.cpu : null
mem = each.value.plan == null ? null : each.value.mem mem = each.value.plan == null ? each.value.mem : null
zone = var.zone zone = var.zone
server_group = each.value.server_group == null ? null : upcloud_server_group.server_groups[each.value.server_group].id
template { template {
storage = var.template_name storage = var.template_name
@ -112,13 +111,11 @@ resource "upcloud_server" "worker" {
if machine.node_type == "worker" if machine.node_type == "worker"
} }
hostname = "${local.resource-prefix}${each.key}" hostname = "${local.resource-prefix}${each.key}"
plan = each.value.plan plan = each.value.plan
cpu = each.value.plan == null ? null : each.value.cpu cpu = each.value.plan == null ? each.value.cpu : null
mem = each.value.plan == null ? null : each.value.mem mem = each.value.plan == null ? each.value.mem : null
zone = var.zone zone = var.zone
server_group = each.value.server_group == null ? null : upcloud_server_group.server_groups[each.value.server_group].id
template { template {
storage = var.template_name storage = var.template_name
@ -515,18 +512,8 @@ resource "upcloud_loadbalancer" "lb" {
configured_status = "started" configured_status = "started"
name = "${local.resource-prefix}lb" name = "${local.resource-prefix}lb"
plan = var.loadbalancer_plan plan = var.loadbalancer_plan
zone = var.private_cloud ? var.public_zone : var.zone zone = var.zone
networks { network = upcloud_network.private.id
name = "Private-Net"
type = "private"
family = "IPv4"
network = upcloud_network.private.id
}
networks {
name = "Public-Net"
type = "public"
family = "IPv4"
}
} }
resource "upcloud_loadbalancer_backend" "lb_backend" { resource "upcloud_loadbalancer_backend" "lb_backend" {
@ -547,9 +534,6 @@ resource "upcloud_loadbalancer_frontend" "lb_frontend" {
mode = "tcp" mode = "tcp"
port = each.value.port port = each.value.port
default_backend_name = upcloud_loadbalancer_backend.lb_backend[each.key].name default_backend_name = upcloud_loadbalancer_backend.lb_backend[each.key].name
networks {
name = "Public-Net"
}
} }
resource "upcloud_loadbalancer_static_backend_member" "lb_backend_member" { resource "upcloud_loadbalancer_static_backend_member" "lb_backend_member" {
@ -573,9 +557,5 @@ resource "upcloud_server_group" "server_groups" {
title = each.key title = each.key
anti_affinity_policy = each.value.anti_affinity_policy anti_affinity_policy = each.value.anti_affinity_policy
labels = {} labels = {}
# Managed upstream via upcloud_server resource members = [for server in each.value.servers : merge(upcloud_server.master, upcloud_server.worker)[server].id]
members = []
lifecycle {
ignore_changes = [members]
}
} }

View File

@ -6,14 +6,6 @@ variable "zone" {
type = string type = string
} }
variable "private_cloud" {
type = bool
}
variable "public_zone" {
type = string
}
variable "template_name" {} variable "template_name" {}
variable "username" {} variable "username" {}
@ -28,7 +20,6 @@ variable "machines" {
cpu = string cpu = string
mem = string mem = string
disk_size = number disk_size = number
server_group : string
additional_disks = map(object({ additional_disks = map(object({
size = number size = number
tier = string tier = string
@ -113,5 +104,6 @@ variable "server_groups" {
type = map(object({ type = map(object({
anti_affinity_policy = string anti_affinity_policy = string
servers = list(string)
})) }))
} }

View File

@ -3,7 +3,7 @@ terraform {
required_providers { required_providers {
upcloud = { upcloud = {
source = "UpCloudLtd/upcloud" source = "UpCloudLtd/upcloud"
version = "~>5.6.0" version = "~>2.12.0"
} }
} }
required_version = ">= 0.13" required_version = ">= 0.13"

View File

@ -9,15 +9,6 @@ variable "zone" {
description = "The zone where to run the cluster" description = "The zone where to run the cluster"
} }
variable "private_cloud" {
description = "Whether the environment is in the private cloud region"
default = false
}
variable "public_zone" {
description = "The public zone equivalent if the cluster is running in a private cloud zone"
}
variable "template_name" { variable "template_name" {
description = "Block describing the preconfigured operating system" description = "Block describing the preconfigured operating system"
} }
@ -41,7 +32,6 @@ variable "machines" {
cpu = string cpu = string
mem = string mem = string
disk_size = number disk_size = number
server_group : string
additional_disks = map(object({ additional_disks = map(object({
size = number size = number
tier = string tier = string
@ -152,6 +142,7 @@ variable "server_groups" {
type = map(object({ type = map(object({
anti_affinity_policy = string anti_affinity_policy = string
servers = list(string)
})) }))
default = {} default = {}

View File

@ -3,7 +3,7 @@ terraform {
required_providers { required_providers {
upcloud = { upcloud = {
source = "UpCloudLtd/upcloud" source = "UpCloudLtd/upcloud"
version = "~>5.6.0" version = "~>2.12.0"
} }
} }
required_version = ">= 0.13" required_version = ">= 0.13"

View File

@ -132,7 +132,7 @@ Wireguard option is only available in Cilium 1.10.0 and newer.
### IPsec Encryption ### IPsec Encryption
For further information, make sure to check the official [Cilium documentation.](https://docs.cilium.io/en/stable/security/network/encryption-ipsec/) For further information, make sure to check the official [Cilium documentation.](https://docs.cilium.io/en/stable/gettingstarted/encryption-ipsec/)
To enable IPsec encryption, you just need to set three variables. To enable IPsec encryption, you just need to set three variables.
@ -157,7 +157,7 @@ echo "cilium_ipsec_key: "$(echo -n "3 rfc4106(gcm(aes)) $(echo $(dd if=/dev/uran
### Wireguard Encryption ### Wireguard Encryption
For further information, make sure to check the official [Cilium documentation.](https://docs.cilium.io/en/stable/security/network/encryption-wireguard/) For further information, make sure to check the official [Cilium documentation.](https://docs.cilium.io/en/stable/gettingstarted/encryption-wireguard/)
To enable Wireguard encryption, you just need to set two variables. To enable Wireguard encryption, you just need to set two variables.

View File

@ -231,7 +231,6 @@ The following tags are defined in playbooks:
| services | Remove services (etcd, kubelet etc...) when resetting | | services | Remove services (etcd, kubelet etc...) when resetting |
| snapshot | Enabling csi snapshot | | snapshot | Enabling csi snapshot |
| snapshot-controller | Configuring csi snapshot controller | | snapshot-controller | Configuring csi snapshot controller |
| system-packages | Install packages using OS package manager |
| upgrade | Upgrading, f.e. container images/binaries | | upgrade | Upgrading, f.e. container images/binaries |
| upload | Distributing images/binaries across hosts | | upload | Distributing images/binaries across hosts |
| vsphere-csi-driver | Configuring csi driver: vsphere | | vsphere-csi-driver | Configuring csi driver: vsphere |

View File

@ -216,8 +216,6 @@ Stack](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/dns-stack.m
The percent is calculated by dividing this field value by 100, so the field value must be between 0 and 100, inclusive. The percent is calculated by dividing this field value by 100, so the field value must be between 0 and 100, inclusive.
When specified, the value must be less than imageGCHighThresholdPercent. Default: 80 When specified, the value must be less than imageGCHighThresholdPercent. Default: 80
* *kubelet_max_parallel_image_pulls* - Sets the maximum number of image pulls in parallel. The value is `1` by default which means the default is serial image pulling, set it to a integer great than `1` to enable image pulling in parallel.
* *kubelet_make_iptables_util_chains* - If `true`, causes the kubelet ensures a set of `iptables` rules are present on host. * *kubelet_make_iptables_util_chains* - If `true`, causes the kubelet ensures a set of `iptables` rules are present on host.
* *kubelet_cpu_manager_policy* - If set to `static`, allows pods with certain resource characteristics to be granted increased CPU affinity and exclusivity on the node. And it should be set with `kube_reserved` or `system-reserved`, enable this with the following guide:[Control CPU Management Policies on the Node](https://kubernetes.io/docs/tasks/administer-cluster/cpu-management-policies/) * *kubelet_cpu_manager_policy* - If set to `static`, allows pods with certain resource characteristics to be granted increased CPU affinity and exclusivity on the node. And it should be set with `kube_reserved` or `system-reserved`, enable this with the following guide:[Control CPU Management Policies on the Node](https://kubernetes.io/docs/tasks/administer-cluster/cpu-management-policies/)
@ -245,10 +243,6 @@ kubelet_cpu_manager_policy_options:
By default the `kubelet_secure_addresses` is set with the `10.0.0.110` the ansible control host uses `eth0` to connect to the machine. In case you want to use `eth1` as the outgoing interface on which `kube-apiserver` connects to the `kubelet`s, you should override the variable in this way: `kubelet_secure_addresses: "192.168.1.110"`. By default the `kubelet_secure_addresses` is set with the `10.0.0.110` the ansible control host uses `eth0` to connect to the machine. In case you want to use `eth1` as the outgoing interface on which `kube-apiserver` connects to the `kubelet`s, you should override the variable in this way: `kubelet_secure_addresses: "192.168.1.110"`.
* *kubelet_systemd_wants_dependencies* - List of kubelet service dependencies, other than container runtime.
If you use nfs dynamically mounted volumes, sometimes rpc-statd does not start within the kubelet. You can fix it with this parameter : `kubelet_systemd_wants_dependencies: ["rpc-statd.service"]` This will add `Wants=rpc-statd.service` in `[Unit]` section of /etc/systemd/system/kubelet.service
* *node_labels* - Labels applied to nodes via `kubectl label node`. * *node_labels* - Labels applied to nodes via `kubectl label node`.
For example, labels can be set in the inventory as variables or more widely in group_vars. For example, labels can be set in the inventory as variables or more widely in group_vars.
*node_labels* can only be defined as a dict: *node_labels* can only be defined as a dict:

View File

@ -1,3 +1,4 @@
# OpenStack # OpenStack
## Known compatible public clouds ## Known compatible public clouds

View File

@ -5,8 +5,8 @@
1. build: build a docker image to be used in the pipeline 1. build: build a docker image to be used in the pipeline
2. unit-tests: fast jobs for fast feedback (linting, etc...) 2. unit-tests: fast jobs for fast feedback (linting, etc...)
3. deploy-part1: small number of jobs to test if the PR works with default settings 3. deploy-part1: small number of jobs to test if the PR works with default settings
4. deploy-extended: slow jobs testing different platforms, OS, settings, CNI, etc... 4. deploy-part2: slow jobs testing different platforms, OS, settings, CNI, etc...
5. deploy-extended: very slow jobs (upgrades, etc...) 5. deploy-part3: very slow jobs (upgrades, etc...)
## Runners ## Runners

View File

@ -9,7 +9,8 @@ To generate this Matrix run `./tests/scripts/md-table/main.py`
almalinux8 | :white_check_mark: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | almalinux8 | :white_check_mark: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: |
amazon | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | amazon | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
centos7 | :white_check_mark: | :x: | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: | :white_check_mark: | centos7 | :white_check_mark: | :x: | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: | :white_check_mark: |
debian11 | :white_check_mark: | :x: | :white_check_mark: | :x: | :x: | :x: | :white_check_mark: | :x: | debian10 | :white_check_mark: | :white_check_mark: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: |
debian11 | :white_check_mark: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
debian12 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | debian12 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
fedora37 | :white_check_mark: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | fedora37 | :white_check_mark: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: |
fedora38 | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | fedora38 | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: |
@ -27,6 +28,7 @@ ubuntu24 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
almalinux8 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | almalinux8 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
amazon | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | amazon | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
centos7 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | centos7 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
debian10 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
debian11 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | debian11 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
debian12 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | debian12 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
fedora37 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | fedora37 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
@ -45,6 +47,7 @@ ubuntu24 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
almalinux8 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | almalinux8 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
amazon | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | amazon | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
centos7 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | centos7 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
debian10 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
debian11 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | debian11 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
debian12 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | debian12 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
fedora37 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | fedora37 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |

View File

@ -1,6 +1,6 @@
# cgroups # cgroups
To avoid resource contention between containers and host daemons in Kubernetes, the kubelet components can use cgroups to limit resource usage. To avoid the rivals for resources between containers or the impact on the host in Kubernetes, the kubelet components will rely on cgroups to limit the containers resources usage.
## Enforcing Node Allocatable ## Enforcing Node Allocatable
@ -20,9 +20,8 @@ Here is an example:
```yaml ```yaml
kubelet_enforce_node_allocatable: "pods,kube-reserved,system-reserved" kubelet_enforce_node_allocatable: "pods,kube-reserved,system-reserved"
# Set kube_reserved to true to run kubelet and container-engine daemons in a dedicated cgroup. # Reserve this space for kube resources
# This is required if you want to enforce limits on the resource usage of these daemons. # Set to true to reserve resources for kube daemons
# It is not required if you just want to make resource reservations (kube_memory_reserved, kube_cpu_reserved, etc.)
kube_reserved: true kube_reserved: true
kube_reserved_cgroups_for_service_slice: kube.slice kube_reserved_cgroups_for_service_slice: kube.slice
kube_reserved_cgroups: "/{{ kube_reserved_cgroups_for_service_slice }}" kube_reserved_cgroups: "/{{ kube_reserved_cgroups_for_service_slice }}"

View File

@ -30,12 +30,12 @@ loadbalancer. If you wish to control the name of the loadbalancer container,
you can set the variable `loadbalancer_apiserver_pod_name`. you can set the variable `loadbalancer_apiserver_pod_name`.
If you choose to NOT use the local internal loadbalancer, you will need to If you choose to NOT use the local internal loadbalancer, you will need to
use the [kube-vip](/docs/ingress/kube-vip.md) ansible role or configure your own loadbalancer to achieve HA. By default, it only configures a non-HA endpoint, which points to the use the [kube-vip](kube-vip.md) ansible role or configure your own loadbalancer to achieve HA. By default, it only configures a non-HA endpoint, which points to the
`access_ip` or IP address of the first server node in the `kube_control_plane` group. `access_ip` or IP address of the first server node in the `kube_control_plane` group.
It can also configure clients to use endpoints for a given loadbalancer type. It can also configure clients to use endpoints for a given loadbalancer type.
The following diagram shows how traffic to the apiserver is directed. The following diagram shows how traffic to the apiserver is directed.
![Image](/docs/figures/loadbalancer_localhost.png?raw=true) ![Image](figures/loadbalancer_localhost.png?raw=true)
A user may opt to use an external loadbalancer (LB) instead. An external LB A user may opt to use an external loadbalancer (LB) instead. An external LB
provides access for external clients, while the internal LB accepts client provides access for external clients, while the internal LB accepts client

View File

@ -103,9 +103,7 @@ If you use the settings like the one above, you'll need to define in your invent
can store them anywhere as long as it's accessible by kubespray. It's recommended to use `*_version` in the path so can store them anywhere as long as it's accessible by kubespray. It's recommended to use `*_version` in the path so
that you don't need to modify this setting everytime kubespray upgrades one of these components. that you don't need to modify this setting everytime kubespray upgrades one of these components.
* `yum_repo`/`debian_repo`/`ubuntu_repo`: OS package repository depending on your OS, should point to your internal * `yum_repo`/`debian_repo`/`ubuntu_repo`: OS package repository depending on your OS, should point to your internal
repository. Adjust the path accordingly. Used only for Docker/Containerd packages (if needed); other packages might repository. Adjust the path accordingly.
be installed from other repositories. You might disable installing packages from other repositories by skipping
the `system-packages` tag
## Install Kubespray Python Packages ## Install Kubespray Python Packages

View File

@ -1,3 +1,4 @@
# Recovering the control plane # Recovering the control plane
To recover from broken nodes in the control plane use the "recover\-control\-plane.yml" playbook. To recover from broken nodes in the control plane use the "recover\-control\-plane.yml" playbook.
@ -7,6 +8,7 @@ Examples of what broken means in this context:
* One or more bare metal node(s) suffer from unrecoverable hardware failure * One or more bare metal node(s) suffer from unrecoverable hardware failure
* One or more node(s) fail during patching or upgrading * One or more node(s) fail during patching or upgrading
* Etcd database corruption * Etcd database corruption
* Other node related failures leaving your control plane degraded or nonfunctional * Other node related failures leaving your control plane degraded or nonfunctional
__Note that you need at least one functional node to be able to recover using this method.__ __Note that you need at least one functional node to be able to recover using this method.__

View File

@ -9,16 +9,42 @@ authors:
tags: tags:
- infrastructure - infrastructure
repository: https://github.com/kubernetes-sigs/kubespray repository: https://github.com/kubernetes-sigs/kubespray
issues: https://github.com/kubernetes-sigs/kubespray/issues
documentation: https://kubespray.io
license_file: LICENSE license_file: LICENSE
dependencies: dependencies:
ansible.utils: '>=2.5.0' ansible.utils: '>=2.5.0'
community.general: '>=3.0.0' community.general: '>=3.0.0'
ansible.netcommon: '>=5.3.0' build_ignore:
ansible.posix: '>=1.5.4' - .github
community.docker: '>=3.11.0' - '*.tar.gz'
kubernetes.core: '>=2.4.2' - extra_playbooks
manifest: - inventory
directives: - scripts
- recursive-exclude tests ** - test-infra
- .ansible-lint
- .editorconfig
- .gitignore
- .gitlab-ci
- .gitlab-ci.yml
- .gitmodules
- .markdownlint.yaml
- .nojekyll
- .pre-commit-config.yaml
- .yamllint
- Dockerfile
- FILES.json
- MANIFEST.json
- Makefile
- Vagrantfile
- _config.yml
- ansible.cfg
- requirements*txt
- setup.cfg
- setup.py
- index.html
- reset.yml
- cluster.yml
- scale.yml
- recover-control-plane.yml
- remove-node.yml
- upgrade-cluster.yml
- library

View File

@ -100,8 +100,6 @@ rbd_provisioner_enabled: false
ingress_nginx_enabled: false ingress_nginx_enabled: false
# ingress_nginx_host_network: false # ingress_nginx_host_network: false
# ingress_nginx_service_type: LoadBalancer # ingress_nginx_service_type: LoadBalancer
# ingress_nginx_service_nodeport_http: 30080
# ingress_nginx_service_nodeport_https: 30081
ingress_publish_status_address: "" ingress_publish_status_address: ""
# ingress_nginx_nodeselector: # ingress_nginx_nodeselector:
# kubernetes.io/os: "linux" # kubernetes.io/os: "linux"

View File

@ -17,7 +17,7 @@ kube_token_dir: "{{ kube_config_dir }}/tokens"
kube_api_anonymous_auth: true kube_api_anonymous_auth: true
## Change this to use another Kubernetes version, e.g. a current beta release ## Change this to use another Kubernetes version, e.g. a current beta release
kube_version: v1.30.3 kube_version: v1.29.5
# Where the binaries will be downloaded. # Where the binaries will be downloaded.
# Note: ensure that you've enough disk space (about 1G) # Note: ensure that you've enough disk space (about 1G)
@ -262,7 +262,7 @@ default_kubelet_config_dir: "{{ kube_config_dir }}/dynamic_kubelet_dir"
# kubelet_runtime_cgroups_cgroupfs: "/system.slice/{{ container_manager }}.service" # kubelet_runtime_cgroups_cgroupfs: "/system.slice/{{ container_manager }}.service"
# kubelet_kubelet_cgroups_cgroupfs: "/system.slice/kubelet.service" # kubelet_kubelet_cgroups_cgroupfs: "/system.slice/kubelet.service"
# Whether to run kubelet and container-engine daemons in a dedicated cgroup. # Optionally reserve this space for kube daemons.
# kube_reserved: false # kube_reserved: false
## Uncomment to override default values ## Uncomment to override default values
## The following two items need to be set when kube_reserved is true ## The following two items need to be set when kube_reserved is true

View File

@ -163,13 +163,6 @@ cilium_l2announcements: false
### Enable auto generate certs if cilium_hubble_install: true ### Enable auto generate certs if cilium_hubble_install: true
# cilium_hubble_tls_generate: false # cilium_hubble_tls_generate: false
### Tune cilium_hubble_event_buffer_capacity & cilium_hubble_event_queue_size values to avoid dropping events when hubble is under heavy load
### Capacity of Hubble events buffer. The provided value must be one less than an integer power of two and no larger than 65535
### (ie: 1, 3, ..., 2047, 4095, ..., 65535) (default 4095)
# cilium_hubble_event_buffer_capacity: 4095
### Buffer size of the channel to receive monitor events.
# cilium_hubble_event_queue_size: 50
# IP address management mode for v1.9+. # IP address management mode for v1.9+.
# https://docs.cilium.io/en/v1.9/concepts/networking/ipam/ # https://docs.cilium.io/en/v1.9/concepts/networking/ipam/
# cilium_ipam_mode: kubernetes # cilium_ipam_mode: kubernetes

View File

@ -4,7 +4,7 @@ FROM ubuntu:jammy-20230308
# Pip needs this as well at the moment to install ansible # Pip needs this as well at the moment to install ansible
# (and potentially other packages) # (and potentially other packages)
# See: https://github.com/pypa/pip/issues/10219 # See: https://github.com/pypa/pip/issues/10219
ENV VAGRANT_VERSION=2.4.1 \ ENV VAGRANT_VERSION=2.3.7 \
VAGRANT_DEFAULT_PROVIDER=libvirt \ VAGRANT_DEFAULT_PROVIDER=libvirt \
VAGRANT_ANSIBLE_TAGS=facts \ VAGRANT_ANSIBLE_TAGS=facts \
LANG=C.UTF-8 \ LANG=C.UTF-8 \
@ -30,9 +30,6 @@ RUN apt update -q \
software-properties-common \ software-properties-common \
unzip \ unzip \
libvirt-clients \ libvirt-clients \
qemu-utils \
qemu-kvm \
dnsmasq \
&& curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - \ && curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - \
&& add-apt-repository "deb [arch=$(dpkg --print-architecture)] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" \ && add-apt-repository "deb [arch=$(dpkg --print-architecture)] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" \
&& apt update -q \ && apt update -q \
@ -40,15 +37,13 @@ RUN apt update -q \
&& apt autoremove -yqq --purge && apt clean && rm -rf /var/lib/apt/lists/* /var/log/* && apt autoremove -yqq --purge && apt clean && rm -rf /var/lib/apt/lists/* /var/log/*
WORKDIR /kubespray WORKDIR /kubespray
ADD ./requirements.txt /kubespray/requirements.txt
ADD ./tests/requirements.txt /kubespray/tests/requirements.txt
ADD ./roles/kubespray-defaults/defaults/main/main.yml /kubespray/roles/kubespray-defaults/defaults/main/main.yml
RUN --mount=type=bind,target=./requirements.txt,src=./requirements.txt \
RUN update-alternatives --install /usr/bin/python python /usr/bin/python3 1 \ --mount=type=bind,target=./tests/requirements.txt,src=./tests/requirements.txt \
--mount=type=bind,target=./roles/kubespray-defaults/defaults/main/main.yml,src=./roles/kubespray-defaults/defaults/main/main.yml \
update-alternatives --install /usr/bin/python python /usr/bin/python3 1 \
&& pip install --no-compile --no-cache-dir pip -U \ && pip install --no-compile --no-cache-dir pip -U \
&& pip install --no-compile --no-cache-dir -r tests/requirements.txt \ && pip install --no-compile --no-cache-dir -r tests/requirements.txt \
&& pip install --no-compile --no-cache-dir -r requirements.txt \
&& KUBE_VERSION=$(sed -n 's/^kube_version: //p' roles/kubespray-defaults/defaults/main/main.yml) \ && KUBE_VERSION=$(sed -n 's/^kube_version: //p' roles/kubespray-defaults/defaults/main/main.yml) \
&& curl -L https://dl.k8s.io/release/$KUBE_VERSION/bin/linux/$(dpkg --print-architecture)/kubectl -o /usr/local/bin/kubectl \ && curl -L https://dl.k8s.io/release/$KUBE_VERSION/bin/linux/$(dpkg --print-architecture)/kubectl -o /usr/local/bin/kubectl \
&& echo $(curl -L https://dl.k8s.io/release/$KUBE_VERSION/bin/linux/$(dpkg --print-architecture)/kubectl.sha256) /usr/local/bin/kubectl | sha256sum --check \ && echo $(curl -L https://dl.k8s.io/release/$KUBE_VERSION/bin/linux/$(dpkg --print-architecture)/kubectl.sha256) /usr/local/bin/kubectl | sha256sum --check \

View File

@ -1,7 +1,10 @@
ansible==9.8.0 ansible==9.5.1
# Needed for jinja2 json_query templating cryptography==42.0.7
jinja2==3.1.4
jmespath==1.0.1 jmespath==1.0.1
# Needed for ansible.utils.validate module MarkupSafe==2.1.5
jsonschema==4.23.0 netaddr==1.2.1
# Needed for ansible.utils.ipaddr pbr==6.0.0
netaddr==1.3.0 ruamel.yaml==0.18.6
ruamel.yaml.clib==0.2.8
jsonschema==4.22.0

View File

@ -12,4 +12,4 @@
dest: "{{ ssh_bastion_confing__name }}" dest: "{{ ssh_bastion_confing__name }}"
owner: "{{ ansible_user }}" owner: "{{ ansible_user }}"
group: "{{ ansible_user }}" group: "{{ ansible_user }}"
mode: "0644" mode: 0644

View File

@ -19,4 +19,4 @@
template: template:
src: "{{ ssh_bastion_confing__name }}.j2" src: "{{ ssh_bastion_confing__name }}.j2"
dest: "{{ playbook_dir }}/{{ ssh_bastion_confing__name }}" dest: "{{ playbook_dir }}/{{ ssh_bastion_confing__name }}"
mode: "0640" mode: 0640

View File

@ -11,10 +11,6 @@ coreos_locksmithd_disable: false
# Install public repo on Oracle Linux # Install public repo on Oracle Linux
use_oracle_public_repo: true use_oracle_public_repo: true
## Ubuntu specific variables
# Disable unattended-upgrades for Linux kernel and all packages start with linux- on Ubuntu
ubuntu_kernel_unattended_upgrades_disabled: false
fedora_coreos_packages: fedora_coreos_packages:
- python - python
- python3-libselinux - python3-libselinux

View File

@ -12,7 +12,7 @@
value: "{{ http_proxy | default(omit) }}" value: "{{ http_proxy | default(omit) }}"
state: "{{ http_proxy | default(False) | ternary('present', 'absent') }}" state: "{{ http_proxy | default(False) | ternary('present', 'absent') }}"
no_extra_spaces: true no_extra_spaces: true
mode: "0644" mode: 0644
become: true become: true
when: not skip_http_proxy_on_os_packages when: not skip_http_proxy_on_os_packages
@ -21,7 +21,7 @@
get_url: get_url:
url: https://yum.oracle.com/public-yum-ol7.repo url: https://yum.oracle.com/public-yum-ol7.repo
dest: /etc/yum.repos.d/public-yum-ol7.repo dest: /etc/yum.repos.d/public-yum-ol7.repo
mode: "0644" mode: 0644
when: when:
- use_oracle_public_repo | default(true) - use_oracle_public_repo | default(true)
- '''ID="ol"'' in os_release.stdout_lines' - '''ID="ol"'' in os_release.stdout_lines'
@ -34,7 +34,7 @@
section: "{{ item }}" section: "{{ item }}"
option: enabled option: enabled
value: "1" value: "1"
mode: "0644" mode: 0644
with_items: with_items:
- ol7_latest - ol7_latest
- ol7_addons - ol7_addons
@ -59,7 +59,7 @@
section: "ol{{ ansible_distribution_major_version }}_addons" section: "ol{{ ansible_distribution_major_version }}_addons"
option: "{{ item.option }}" option: "{{ item.option }}"
value: "{{ item.value }}" value: "{{ item.value }}"
mode: "0644" mode: 0644
with_items: with_items:
- { option: "name", value: "ol{{ ansible_distribution_major_version }}_addons" } - { option: "name", value: "ol{{ ansible_distribution_major_version }}_addons" }
- { option: "enabled", value: "1" } - { option: "enabled", value: "1" }
@ -75,45 +75,18 @@
section: "extras" section: "extras"
option: "{{ item.option }}" option: "{{ item.option }}"
value: "{{ item.value }}" value: "{{ item.value }}"
mode: "0644" mode: 0644
with_items: with_items:
- { option: "name", value: "CentOS-{{ ansible_distribution_major_version }} - Extras" } - { option: "name", value: "CentOS-{{ ansible_distribution_major_version }} - Extras" }
- { option: "enabled", value: "1" } - { option: "enabled", value: "1" }
- { option: "gpgcheck", value: "0" } - { option: "gpgcheck", value: "0" }
- { option: "baseurl", value: "http://vault.centos.org/{{ 'altarch' if (ansible_distribution_major_version | int) <= 7 and ansible_architecture == 'aarch64' else 'centos' }}/{{ ansible_distribution_major_version }}/extras/$basearch/{% if ansible_distribution_major_version | int > 7 %}os/{% endif %}" } - { option: "baseurl", value: "http://mirror.centos.org/{{ 'altarch' if (ansible_distribution_major_version | int) <= 7 and ansible_architecture == 'aarch64' else 'centos' }}/{{ ansible_distribution_major_version }}/extras/$basearch/{% if ansible_distribution_major_version | int > 7 %}os/{% endif %}" }
when: when:
- use_oracle_public_repo | default(true) - use_oracle_public_repo | default(true)
- '''ID="ol"'' in os_release.stdout_lines' - '''ID="ol"'' in os_release.stdout_lines'
- (ansible_distribution_version | float) >= 7.6 - (ansible_distribution_version | float) >= 7.6
- (ansible_distribution_version | float) < 9 - (ansible_distribution_version | float) < 9
# CentOS 7 EOL at July 1, 2024.
- name: Check CentOS-Base.repo exists for CentOS 7
stat:
path: /etc/yum.repos.d/CentOS-Base.repo
register: centos_base_repo_stat
when:
- ansible_distribution_major_version == "7"
# CentOS 7 EOL at July 1, 2024.
- name: Update CentOS 7 CentOS-Base.repo
when:
- ansible_distribution_major_version == "7"
- centos_base_repo_stat.stat.exists
become: true
block:
- name: Disable CentOS 7 mirrorlist in CentOS-Base.repo
replace:
path: "{{ centos_base_repo_stat.stat.path }}"
regexp: '^mirrorlist='
replace: '#mirrorlist='
- name: Update CentOS 7 baseurl in CentOS-Base.repo
replace:
path: "{{ centos_base_repo_stat.stat.path }}"
regexp: '^#baseurl=http:\/\/mirror.centos.org'
replace: 'baseurl=http:\/\/vault.centos.org'
# CentOS ships with python installed # CentOS ships with python installed
- name: Check presence of fastestmirror.conf - name: Check presence of fastestmirror.conf

View File

@ -7,7 +7,7 @@
state: present state: present
- name: Make sure docker service is enabled - name: Make sure docker service is enabled
systemd_service: systemd:
name: docker name: docker
masked: false masked: false
enabled: true enabled: true

View File

@ -62,14 +62,3 @@
- '"changed its" in bootstrap_update_apt_result.stdout' - '"changed its" in bootstrap_update_apt_result.stdout'
- '"value from" in bootstrap_update_apt_result.stdout' - '"value from" in bootstrap_update_apt_result.stdout'
ignore_errors: true ignore_errors: true
- name: Disable kernel unattended-upgrades
lineinfile:
path: /etc/apt/apt.conf.d/50unattended-upgrades
insertafter: "Unattended-Upgrade::Package-Blacklist"
line: '"linux-";'
state: present
become: true
when:
- os_release_dict['ID'] == 'ubuntu'
- ubuntu_kernel_unattended_upgrades_disabled

View File

@ -17,7 +17,7 @@
value: "{{ http_proxy | default(omit) }}" value: "{{ http_proxy | default(omit) }}"
state: "{{ http_proxy | default(False) | ternary('present', 'absent') }}" state: "{{ http_proxy | default(False) | ternary('present', 'absent') }}"
no_extra_spaces: true no_extra_spaces: true
mode: "0644" mode: 0644
become: true become: true
when: not skip_http_proxy_on_os_packages when: not skip_http_proxy_on_os_packages

View File

@ -26,7 +26,7 @@
ansible_interpreter_python_fallback: "{{ ansible_interpreter_python_fallback + [ '/opt/bin/python' ] }}" ansible_interpreter_python_fallback: "{{ ansible_interpreter_python_fallback + [ '/opt/bin/python' ] }}"
- name: Disable auto-upgrade - name: Disable auto-upgrade
systemd_service: systemd:
name: locksmithd.service name: locksmithd.service
masked: true masked: true
state: stopped state: stopped

View File

@ -36,7 +36,7 @@
file: file:
path: "{{ ansible_remote_tmp | default('~/.ansible/tmp') }}" path: "{{ ansible_remote_tmp | default('~/.ansible/tmp') }}"
state: directory state: directory
mode: "0700" mode: 0700
- name: Gather facts - name: Gather facts
setup: setup:
@ -61,4 +61,4 @@
state: directory state: directory
owner: root owner: root
group: root group: root
mode: "0755" mode: 0755

View File

@ -12,7 +12,7 @@
value: "{{ http_proxy | default(omit) }}" value: "{{ http_proxy | default(omit) }}"
state: "{{ http_proxy | default(False) | ternary('present', 'absent') }}" state: "{{ http_proxy | default(False) | ternary('present', 'absent') }}"
no_extra_spaces: true no_extra_spaces: true
mode: "0644" mode: 0644
become: true become: true
when: not skip_http_proxy_on_os_packages when: not skip_http_proxy_on_os_packages

View File

@ -1,6 +1,6 @@
--- ---
- name: Containerd | restart containerd - name: Containerd | restart containerd
systemd_service: systemd:
name: containerd name: containerd
state: restarted state: restarted
enabled: yes enabled: yes

View File

@ -35,7 +35,7 @@
unarchive: unarchive:
src: "{{ downloads.containerd.dest }}" src: "{{ downloads.containerd.dest }}"
dest: "{{ containerd_bin_dir }}" dest: "{{ containerd_bin_dir }}"
mode: "0755" mode: 0755
remote_src: yes remote_src: yes
extra_opts: extra_opts:
- --strip-components=1 - --strip-components=1
@ -60,7 +60,7 @@
template: template:
src: containerd.service.j2 src: containerd.service.j2
dest: /etc/systemd/system/containerd.service dest: /etc/systemd/system/containerd.service
mode: "0644" mode: 0644
validate: "sh -c '[ -f /usr/bin/systemd/system/factory-reset.target ] || exit 0 && systemd-analyze verify %s:containerd.service'" validate: "sh -c '[ -f /usr/bin/systemd/system/factory-reset.target ] || exit 0 && systemd-analyze verify %s:containerd.service'"
# FIXME: check that systemd version >= 250 (factory-reset.target was introduced in that release) # FIXME: check that systemd version >= 250 (factory-reset.target was introduced in that release)
# Remove once we drop support for systemd < 250 # Remove once we drop support for systemd < 250
@ -70,7 +70,7 @@
file: file:
dest: "{{ item }}" dest: "{{ item }}"
state: directory state: directory
mode: "0755" mode: 0755
owner: root owner: root
group: root group: root
with_items: with_items:
@ -83,7 +83,7 @@
template: template:
src: http-proxy.conf.j2 src: http-proxy.conf.j2
dest: "{{ containerd_systemd_dir }}/http-proxy.conf" dest: "{{ containerd_systemd_dir }}/http-proxy.conf"
mode: "0644" mode: 0644
notify: Restart containerd notify: Restart containerd
when: http_proxy is defined or https_proxy is defined when: http_proxy is defined or https_proxy is defined
@ -102,7 +102,7 @@
content: "{{ item.value }}" content: "{{ item.value }}"
dest: "{{ containerd_cfg_dir }}/{{ item.key }}" dest: "{{ containerd_cfg_dir }}/{{ item.key }}"
owner: "root" owner: "root"
mode: "0644" mode: 0644
with_dict: "{{ containerd_base_runtime_specs | default({}) }}" with_dict: "{{ containerd_base_runtime_specs | default({}) }}"
notify: Restart containerd notify: Restart containerd
@ -111,7 +111,7 @@
src: config.toml.j2 src: config.toml.j2
dest: "{{ containerd_cfg_dir }}/config.toml" dest: "{{ containerd_cfg_dir }}/config.toml"
owner: "root" owner: "root"
mode: "0640" mode: 0640
notify: Restart containerd notify: Restart containerd
- name: Containerd | Configure containerd registries - name: Containerd | Configure containerd registries
@ -121,13 +121,13 @@
file: file:
path: "{{ containerd_cfg_dir }}/certs.d/{{ item.prefix }}" path: "{{ containerd_cfg_dir }}/certs.d/{{ item.prefix }}"
state: directory state: directory
mode: "0755" mode: 0755
loop: "{{ containerd_registries_mirrors }}" loop: "{{ containerd_registries_mirrors }}"
- name: Containerd | Write hosts.toml file - name: Containerd | Write hosts.toml file
template: template:
src: hosts.toml.j2 src: hosts.toml.j2
dest: "{{ containerd_cfg_dir }}/certs.d/{{ item.prefix }}/hosts.toml" dest: "{{ containerd_cfg_dir }}/certs.d/{{ item.prefix }}/hosts.toml"
mode: "0640" mode: 0640
loop: "{{ containerd_registries_mirrors }}" loop: "{{ containerd_registries_mirrors }}"
# you can sometimes end up in a state where everything is installed # you can sometimes end up in a state where everything is installed
@ -136,7 +136,7 @@
meta: flush_handlers meta: flush_handlers
- name: Containerd | Ensure containerd is started and enabled - name: Containerd | Ensure containerd is started and enabled
systemd_service: systemd:
name: containerd name: containerd
daemon_reload: yes daemon_reload: yes
enabled: yes enabled: yes

View File

@ -100,10 +100,11 @@ oom_score = {{ containerd_oom_score }}
[plugins."io.containerd.tracing.processor.v1.otlp"] [plugins."io.containerd.tracing.processor.v1.otlp"]
endpoint = "{{ containerd_tracing_endpoint }}" endpoint = "{{ containerd_tracing_endpoint }}"
protocol = "{{ containerd_tracing_protocol }}" protocol = "{{ containerd_tracing_protocol }}"
{% if containerd_tracing_protocol == "grpc" %} {% if containerd_tracing_protocol == "grpc" %}
insecure = false insecure = false
{% endif %} {% endif %}
[plugins."io.containerd.internal.v1.tracing"] [plugins."io.containerd.internal.v1.tracing"]
sampling_ratio = {{ containerd_tracing_sampling_ratio }} sampling_ratio = {{ containerd_tracing_sampling_ratio }}
service_name = "{{ containerd_tracing_service_name }}" service_name = "{{ containerd_tracing_service_name }}"
{% endif %} {% endif %}

View File

@ -1,6 +1,6 @@
--- ---
- name: Cri-dockerd | reload systemd - name: Cri-dockerd | reload systemd
systemd_service: systemd:
name: cri-dockerd name: cri-dockerd
daemon_reload: true daemon_reload: true
masked: no masked: no

View File

@ -28,7 +28,7 @@
src: "{{ item }}" src: "{{ item }}"
dest: "/tmp/{{ item }}" dest: "/tmp/{{ item }}"
owner: root owner: root
mode: "0644" mode: 0644
with_items: with_items:
- container.json - container.json
- sandbox.json - sandbox.json
@ -37,12 +37,12 @@
path: /etc/cni/net.d path: /etc/cni/net.d
state: directory state: directory
owner: "{{ kube_owner }}" owner: "{{ kube_owner }}"
mode: "0755" mode: 0755
- name: Setup CNI - name: Setup CNI
copy: copy:
src: "{{ item }}" src: "{{ item }}"
dest: "/etc/cni/net.d/{{ item }}" dest: "/etc/cni/net.d/{{ item }}"
owner: root owner: root
mode: "0644" mode: 0644
with_items: with_items:
- 10-mynet.conf - 10-mynet.conf

View File

@ -8,7 +8,7 @@
copy: copy:
src: "{{ local_release_dir }}/cri-dockerd" src: "{{ local_release_dir }}/cri-dockerd"
dest: "{{ bin_dir }}/cri-dockerd" dest: "{{ bin_dir }}/cri-dockerd"
mode: "0755" mode: 0755
remote_src: true remote_src: true
notify: notify:
- Restart and enable cri-dockerd - Restart and enable cri-dockerd
@ -17,7 +17,7 @@
template: template:
src: "{{ item }}.j2" src: "{{ item }}.j2"
dest: "/etc/systemd/system/{{ item }}" dest: "/etc/systemd/system/{{ item }}"
mode: "0644" mode: 0644
validate: "sh -c '[ -f /usr/bin/systemd/system/factory-reset.target ] || exit 0 && systemd-analyze verify %s:{{ item }}'" validate: "sh -c '[ -f /usr/bin/systemd/system/factory-reset.target ] || exit 0 && systemd-analyze verify %s:{{ item }}'"
# FIXME: check that systemd version >= 250 (factory-reset.target was introduced in that release) # FIXME: check that systemd version >= 250 (factory-reset.target was introduced in that release)
# Remove once we drop support for systemd < 250 # Remove once we drop support for systemd < 250

View File

@ -1,6 +1,6 @@
--- ---
- name: CRI-O | reload systemd - name: CRI-O | reload systemd
systemd_service: systemd:
daemon_reload: true daemon_reload: true
listen: Restart crio listen: Restart crio

View File

@ -33,7 +33,7 @@
src: "{{ item }}" src: "{{ item }}"
dest: "/tmp/{{ item }}" dest: "/tmp/{{ item }}"
owner: root owner: root
mode: "0644" mode: 0644
with_items: with_items:
- container.json - container.json
- sandbox.json - sandbox.json
@ -42,12 +42,12 @@
path: /etc/cni/net.d path: /etc/cni/net.d
state: directory state: directory
owner: "{{ kube_owner }}" owner: "{{ kube_owner }}"
mode: "0755" mode: 0755
- name: Setup CNI - name: Setup CNI
copy: copy:
src: "{{ item }}" src: "{{ item }}"
dest: "/etc/cni/net.d/{{ item }}" dest: "/etc/cni/net.d/{{ item }}"
owner: root owner: root
mode: "0644" mode: 0644
with_items: with_items:
- 10-mynet.conf - 10-mynet.conf

View File

@ -56,27 +56,27 @@
file: file:
path: "{{ item }}" path: "{{ item }}"
state: directory state: directory
mode: "0755" mode: 0755
- name: Cri-o | install cri-o config - name: Cri-o | install cri-o config
template: template:
src: crio.conf.j2 src: crio.conf.j2
dest: /etc/crio/crio.conf dest: /etc/crio/crio.conf
mode: "0644" mode: 0644
register: config_install register: config_install
- name: Cri-o | install config.json - name: Cri-o | install config.json
template: template:
src: config.json.j2 src: config.json.j2
dest: /etc/crio/config.json dest: /etc/crio/config.json
mode: "0644" mode: 0644
register: reg_auth_install register: reg_auth_install
- name: Cri-o | copy binaries - name: Cri-o | copy binaries
copy: copy:
src: "{{ local_release_dir }}/cri-o/bin/{{ item }}" src: "{{ local_release_dir }}/cri-o/bin/{{ item }}"
dest: "{{ bin_dir }}/{{ item }}" dest: "{{ bin_dir }}/{{ item }}"
mode: "0755" mode: 0755
remote_src: true remote_src: true
with_items: with_items:
- "{{ crio_bin_files }}" - "{{ crio_bin_files }}"
@ -86,7 +86,7 @@
copy: copy:
src: "{{ local_release_dir }}/cri-o/contrib/crio.service" src: "{{ local_release_dir }}/cri-o/contrib/crio.service"
dest: /etc/systemd/system/crio.service dest: /etc/systemd/system/crio.service
mode: "0755" mode: 0755
remote_src: true remote_src: true
notify: Restart crio notify: Restart crio
@ -115,7 +115,7 @@
copy: copy:
src: "{{ local_release_dir }}/cri-o/contrib/policy.json" src: "{{ local_release_dir }}/cri-o/contrib/policy.json"
dest: /etc/containers/policy.json dest: /etc/containers/policy.json
mode: "0755" mode: 0755
remote_src: true remote_src: true
notify: Restart crio notify: Restart crio
@ -123,7 +123,7 @@
copy: copy:
src: mounts.conf src: mounts.conf
dest: /etc/containers/mounts.conf dest: /etc/containers/mounts.conf
mode: "0644" mode: 0644
when: when:
- ansible_os_family == 'RedHat' - ansible_os_family == 'RedHat'
notify: Restart crio notify: Restart crio
@ -133,7 +133,7 @@
path: /etc/containers/oci/hooks.d path: /etc/containers/oci/hooks.d
state: directory state: directory
owner: root owner: root
mode: "0755" mode: 0755
- name: Cri-o | set overlay driver - name: Cri-o | set overlay driver
community.general.ini_file: community.general.ini_file:
@ -141,7 +141,7 @@
section: storage section: storage
option: "{{ item.option }}" option: "{{ item.option }}"
value: "{{ item.value }}" value: "{{ item.value }}"
mode: "0644" mode: 0644
with_items: with_items:
- option: driver - option: driver
value: '"overlay"' value: '"overlay"'
@ -157,20 +157,20 @@
section: storage.options.overlay section: storage.options.overlay
option: mountopt option: mountopt
value: '{{ ''"nodev"'' if ansible_kernel is version_compare(("4.18" if ansible_os_family == "RedHat" else "4.19"), "<") else ''"nodev,metacopy=on"'' }}' value: '{{ ''"nodev"'' if ansible_kernel is version_compare(("4.18" if ansible_os_family == "RedHat" else "4.19"), "<") else ''"nodev,metacopy=on"'' }}'
mode: "0644" mode: 0644
- name: Cri-o | create directory registries configs - name: Cri-o | create directory registries configs
file: file:
path: /etc/containers/registries.conf.d path: /etc/containers/registries.conf.d
state: directory state: directory
owner: root owner: root
mode: "0755" mode: 0755
- name: Cri-o | write registries configs - name: Cri-o | write registries configs
template: template:
src: registry.conf.j2 src: registry.conf.j2
dest: "/etc/containers/registries.conf.d/10-{{ item.prefix | default(item.location) | regex_replace(':|/', '_') }}.conf" dest: "/etc/containers/registries.conf.d/10-{{ item.prefix | default(item.location) | regex_replace(':|/', '_') }}.conf"
mode: "0644" mode: 0644
loop: "{{ crio_registries }}" loop: "{{ crio_registries }}"
notify: Restart crio notify: Restart crio
@ -178,14 +178,14 @@
template: template:
src: unqualified.conf.j2 src: unqualified.conf.j2
dest: "/etc/containers/registries.conf.d/01-unqualified.conf" dest: "/etc/containers/registries.conf.d/01-unqualified.conf"
mode: "0644" mode: 0644
notify: Restart crio notify: Restart crio
- name: Cri-o | write cri-o proxy drop-in - name: Cri-o | write cri-o proxy drop-in
template: template:
src: http-proxy.conf.j2 src: http-proxy.conf.j2
dest: /etc/systemd/system/crio.service.d/http-proxy.conf dest: /etc/systemd/system/crio.service.d/http-proxy.conf
mode: "0644" mode: 0644
notify: Restart crio notify: Restart crio
when: http_proxy is defined or https_proxy is defined when: http_proxy is defined or https_proxy is defined

View File

@ -20,7 +20,7 @@
option: enabled option: enabled
value: "0" value: "0"
backup: yes backup: yes
mode: "0644" mode: 0644
when: when:
- amzn2_extras_file_stat.stat.exists - amzn2_extras_file_stat.stat.exists
- not amzn2_extras_docker_repo.changed - not amzn2_extras_docker_repo.changed

View File

@ -9,4 +9,4 @@
copy: copy:
dest: /etc/bash_completion.d/crictl dest: /etc/bash_completion.d/crictl
content: "{{ cri_completion.stdout }}" content: "{{ cri_completion.stdout }}"
mode: "0644" mode: 0644

View File

@ -9,13 +9,13 @@
src: crictl.yaml.j2 src: crictl.yaml.j2
dest: /etc/crictl.yaml dest: /etc/crictl.yaml
owner: root owner: root
mode: "0644" mode: 0644
- name: Copy crictl binary from download dir - name: Copy crictl binary from download dir
copy: copy:
src: "{{ local_release_dir }}/crictl" src: "{{ local_release_dir }}/crictl"
dest: "{{ bin_dir }}/crictl" dest: "{{ bin_dir }}/crictl"
mode: "0755" mode: 0755
remote_src: true remote_src: true
notify: notify:
- Get crictl completion - Get crictl completion

View File

@ -8,5 +8,5 @@
copy: copy:
src: "{{ downloads.crun.dest }}" src: "{{ downloads.crun.dest }}"
dest: "{{ bin_dir }}/crun" dest: "{{ bin_dir }}/crun"
mode: "0755" mode: 0755
remote_src: true remote_src: true

View File

@ -10,12 +10,12 @@
template: template:
src: docker-storage-setup.j2 src: docker-storage-setup.j2
dest: /etc/sysconfig/docker-storage-setup dest: /etc/sysconfig/docker-storage-setup
mode: "0644" mode: 0644
- name: Docker-storage-override-directory | docker service storage-setup override dir - name: Docker-storage-override-directory | docker service storage-setup override dir
file: file:
dest: /etc/systemd/system/docker.service.d dest: /etc/systemd/system/docker.service.d
mode: "0755" mode: 0755
owner: root owner: root
group: root group: root
state: directory state: directory
@ -30,7 +30,7 @@
owner: root owner: root
group: root group: root
mode: "0644" mode: 0644
# https://docs.docker.com/engine/installation/linux/docker-ce/centos/#install-using-the-repository # https://docs.docker.com/engine/installation/linux/docker-ce/centos/#install-using-the-repository
- name: Docker-storage-setup | install lvm2 - name: Docker-storage-setup | install lvm2

View File

@ -1,6 +1,6 @@
--- ---
- name: Docker | reload systemd - name: Docker | reload systemd
systemd_service: systemd:
name: docker name: docker
daemon_reload: true daemon_reload: true
masked: no masked: no

View File

@ -82,14 +82,14 @@
template: template:
src: "fedora_docker.repo.j2" src: "fedora_docker.repo.j2"
dest: "{{ yum_repo_dir }}/docker.repo" dest: "{{ yum_repo_dir }}/docker.repo"
mode: "0644" mode: 0644
when: ansible_distribution == "Fedora" and not is_ostree when: ansible_distribution == "Fedora" and not is_ostree
- name: Configure docker repository on RedHat/CentOS/OracleLinux/AlmaLinux/KylinLinux - name: Configure docker repository on RedHat/CentOS/OracleLinux/AlmaLinux/KylinLinux
template: template:
src: "rh_docker.repo.j2" src: "rh_docker.repo.j2"
dest: "{{ yum_repo_dir }}/docker-ce.repo" dest: "{{ yum_repo_dir }}/docker-ce.repo"
mode: "0644" mode: 0644
when: when:
- ansible_os_family == "RedHat" - ansible_os_family == "RedHat"
- ansible_distribution != "Fedora" - ansible_distribution != "Fedora"

View File

@ -102,5 +102,5 @@
ignore_errors: true # noqa ignore-errors ignore_errors: true # noqa ignore-errors
- name: Docker | systemctl daemon-reload # noqa no-handler - name: Docker | systemctl daemon-reload # noqa no-handler
systemd_service: systemd:
daemon_reload: true daemon_reload: true

View File

@ -3,13 +3,13 @@
file: file:
path: /etc/systemd/system/docker.service.d path: /etc/systemd/system/docker.service.d
state: directory state: directory
mode: "0755" mode: 0755
- name: Write docker proxy drop-in - name: Write docker proxy drop-in
template: template:
src: http-proxy.conf.j2 src: http-proxy.conf.j2
dest: /etc/systemd/system/docker.service.d/http-proxy.conf dest: /etc/systemd/system/docker.service.d/http-proxy.conf
mode: "0644" mode: 0644
notify: Restart docker notify: Restart docker
when: http_proxy is defined or https_proxy is defined when: http_proxy is defined or https_proxy is defined
@ -27,7 +27,7 @@
template: template:
src: docker.service.j2 src: docker.service.j2
dest: /etc/systemd/system/docker.service dest: /etc/systemd/system/docker.service
mode: "0644" mode: 0644
register: docker_service_file register: docker_service_file
notify: Restart docker notify: Restart docker
when: when:
@ -38,14 +38,14 @@
template: template:
src: docker-options.conf.j2 src: docker-options.conf.j2
dest: "/etc/systemd/system/docker.service.d/docker-options.conf" dest: "/etc/systemd/system/docker.service.d/docker-options.conf"
mode: "0644" mode: 0644
notify: Restart docker notify: Restart docker
- name: Write docker dns systemd drop-in - name: Write docker dns systemd drop-in
template: template:
src: docker-dns.conf.j2 src: docker-dns.conf.j2
dest: "/etc/systemd/system/docker.service.d/docker-dns.conf" dest: "/etc/systemd/system/docker.service.d/docker-dns.conf"
mode: "0644" mode: 0644
notify: Restart docker notify: Restart docker
when: dns_mode != 'none' and resolvconf_mode == 'docker_dns' when: dns_mode != 'none' and resolvconf_mode == 'docker_dns'
@ -53,14 +53,14 @@
copy: copy:
src: cleanup-docker-orphans.sh src: cleanup-docker-orphans.sh
dest: "{{ bin_dir }}/cleanup-docker-orphans.sh" dest: "{{ bin_dir }}/cleanup-docker-orphans.sh"
mode: "0755" mode: 0755
when: docker_orphan_clean_up | bool when: docker_orphan_clean_up | bool
- name: Write docker orphan clean up systemd drop-in - name: Write docker orphan clean up systemd drop-in
template: template:
src: docker-orphan-cleanup.conf.j2 src: docker-orphan-cleanup.conf.j2
dest: "/etc/systemd/system/docker.service.d/docker-orphan-cleanup.conf" dest: "/etc/systemd/system/docker.service.d/docker-orphan-cleanup.conf"
mode: "0644" mode: 0644
notify: Restart docker notify: Restart docker
when: docker_orphan_clean_up | bool when: docker_orphan_clean_up | bool

View File

@ -24,9 +24,8 @@ containerd_versioned_pkg:
'1.6.18': "{{ containerd_package }}=1.6.18-1" '1.6.18': "{{ containerd_package }}=1.6.18-1"
'1.6.28': "{{ containerd_package }}=1.6.28-2" '1.6.28': "{{ containerd_package }}=1.6.28-2"
'1.6.31': "{{ containerd_package }}=1.6.31-1" '1.6.31': "{{ containerd_package }}=1.6.31-1"
'1.6.32': "{{ containerd_package }}=1.6.32-1" 'stable': "{{ containerd_package }}=1.6.31-1"
'stable': "{{ containerd_package }}=1.6.32-1" 'edge': "{{ containerd_package }}=1.6.31-1"
'edge': "{{ containerd_package }}=1.6.32-1"
# https://download.docker.com/linux/debian/ # https://download.docker.com/linux/debian/
docker_versioned_pkg: docker_versioned_pkg:

View File

@ -24,9 +24,8 @@ containerd_versioned_pkg:
'1.6.18': "{{ containerd_package }}-1.6.18-3.1.fc{{ ansible_distribution_major_version }}" '1.6.18': "{{ containerd_package }}-1.6.18-3.1.fc{{ ansible_distribution_major_version }}"
'1.6.28': "{{ containerd_package }}-1.6.28-3.2.fc{{ ansible_distribution_major_version }}" '1.6.28': "{{ containerd_package }}-1.6.28-3.2.fc{{ ansible_distribution_major_version }}"
'1.6.31': "{{ containerd_package }}-1.6.31-3.1.fc{{ ansible_distribution_major_version }}" '1.6.31': "{{ containerd_package }}-1.6.31-3.1.fc{{ ansible_distribution_major_version }}"
'1.6.32': "{{ containerd_package }}-1.6.32-3.1.fc{{ ansible_distribution_major_version }}" 'stable': "{{ containerd_package }}-1.6.31-3.1.fc{{ ansible_distribution_major_version }}"
'stable': "{{ containerd_package }}-1.6.32-3.1.fc{{ ansible_distribution_major_version }}" 'edge': "{{ containerd_package }}-1.6.31-3.1.fc{{ ansible_distribution_major_version }}"
'edge': "{{ containerd_package }}-1.6.32-3.1.fc{{ ansible_distribution_major_version }}"
# https://docs.docker.com/install/linux/docker-ce/fedora/ # https://docs.docker.com/install/linux/docker-ce/fedora/
# https://download.docker.com/linux/fedora/<fedora-version>/x86_64/stable/Packages/ # https://download.docker.com/linux/fedora/<fedora-version>/x86_64/stable/Packages/

View File

@ -29,9 +29,8 @@ containerd_versioned_pkg:
'1.6.18': "{{ containerd_package }}-1.6.18-3.1.el8" '1.6.18': "{{ containerd_package }}-1.6.18-3.1.el8"
'1.6.28': "{{ containerd_package }}-1.6.28-3.1.el8" '1.6.28': "{{ containerd_package }}-1.6.28-3.1.el8"
'1.6.31': "{{ containerd_package }}-1.6.31-3.1.el8" '1.6.31': "{{ containerd_package }}-1.6.31-3.1.el8"
'1.6.32': "{{ containerd_package }}-1.6.32-3.1.el8" 'stable': "{{ containerd_package }}-1.6.31-3.1.el8"
'stable': "{{ containerd_package }}-1.6.32-3.1.el8" 'edge': "{{ containerd_package }}-1.6.31-3.1.el8"
'edge': "{{ containerd_package }}-1.6.32-3.1.el8"
# https://docs.docker.com/engine/installation/linux/centos/#install-from-a-package # https://docs.docker.com/engine/installation/linux/centos/#install-from-a-package
# https://download.docker.com/linux/centos/8/x86_64/stable/Packages/ # https://download.docker.com/linux/centos/8/x86_64/stable/Packages/

View File

@ -24,9 +24,8 @@ containerd_versioned_pkg:
'1.6.18': "{{ containerd_package }}-1.6.18-3.1.el7" '1.6.18': "{{ containerd_package }}-1.6.18-3.1.el7"
'1.6.28': "{{ containerd_package }}-1.6.28-3.1.el7" '1.6.28': "{{ containerd_package }}-1.6.28-3.1.el7"
'1.6.31': "{{ containerd_package }}-1.6.31-3.1.el7" '1.6.31': "{{ containerd_package }}-1.6.31-3.1.el7"
'1.6.32': "{{ containerd_package }}-1.6.32-3.1.el7" 'stable': "{{ containerd_package }}-1.6.31-3.1.el7"
'stable': "{{ containerd_package }}-1.6.32-3.1.el7" 'edge': "{{ containerd_package }}-1.6.31-3.1.el7"
'edge': "{{ containerd_package }}-1.6.32-3.1.el7"
# https://docs.docker.com/engine/installation/linux/centos/#install-from-a-package # https://docs.docker.com/engine/installation/linux/centos/#install-from-a-package
# https://download.docker.com/linux/centos/<centos_version>>/x86_64/stable/Packages/ # https://download.docker.com/linux/centos/<centos_version>>/x86_64/stable/Packages/

View File

@ -24,9 +24,8 @@ containerd_versioned_pkg:
'1.6.18': "{{ containerd_package }}-1.6.18-3.1.el{{ ansible_distribution_major_version }}" '1.6.18': "{{ containerd_package }}-1.6.18-3.1.el{{ ansible_distribution_major_version }}"
'1.6.28': "{{ containerd_package }}-1.6.28-3.1.el{{ ansible_distribution_major_version }}" '1.6.28': "{{ containerd_package }}-1.6.28-3.1.el{{ ansible_distribution_major_version }}"
'1.6.31': "{{ containerd_package }}-1.6.31-3.1.el{{ ansible_distribution_major_version }}" '1.6.31': "{{ containerd_package }}-1.6.31-3.1.el{{ ansible_distribution_major_version }}"
'1.6.32': "{{ containerd_package }}-1.6.32-3.1.el{{ ansible_distribution_major_version }}" 'stable': "{{ containerd_package }}-1.6.31-3.1.el{{ ansible_distribution_major_version }}"
'stable': "{{ containerd_package }}-1.6.32-3.1.el{{ ansible_distribution_major_version }}" 'edge': "{{ containerd_package }}-1.6.31-3.1.el{{ ansible_distribution_major_version }}"
'edge': "{{ containerd_package }}-1.6.32-3.1.el{{ ansible_distribution_major_version }}"
# https://docs.docker.com/engine/installation/linux/centos/#install-from-a-package # https://docs.docker.com/engine/installation/linux/centos/#install-from-a-package
# https://download.docker.com/linux/centos/<centos_version>>/x86_64/stable/Packages/ # https://download.docker.com/linux/centos/<centos_version>>/x86_64/stable/Packages/

Some files were not shown because too many files have changed in this diff Show More