Adds support for Ansible collections (#9582)

This commit is contained in:
Luke Simmons
2023-03-27 11:25:55 +02:00
committed by GitHub
parent baed5f0b32
commit acbf44a1b4
28 changed files with 1104 additions and 899 deletions

View File

@ -0,0 +1,33 @@
---
- hosts: localhost
gather_facts: false
become: no
vars:
minimal_ansible_version: 2.11.0
maximal_ansible_version: 2.13.0
ansible_connection: local
tags: always
tasks:
- name: "Check {{ minimal_ansible_version }} <= Ansible version < {{ maximal_ansible_version }}"
assert:
msg: "Ansible must be between {{ minimal_ansible_version }} and {{ maximal_ansible_version }} exclusive"
that:
- ansible_version.string is version(minimal_ansible_version, ">=")
- ansible_version.string is version(maximal_ansible_version, "<")
tags:
- check
- name: "Check that python netaddr is installed"
assert:
msg: "Python netaddr is not present"
that: "'127.0.0.1' | ipaddr"
tags:
- check
# CentOS 7 provides too old jinja version
- name: "Check that jinja is not too old (install via pip)"
assert:
msg: "Your Jinja version is too old, install via pip"
that: "{% set test %}It works{% endset %}{{ test == 'It works' }}"
tags:
- check

131
playbooks/cluster.yml Normal file
View File

@ -0,0 +1,131 @@
---
- name: Check ansible version
import_playbook: ansible_version.yml
- name: Ensure compatibility with old groups
import_playbook: legacy_groups.yml
- hosts: bastion[0]
gather_facts: False
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: bastion-ssh-config, tags: ["localhost", "bastion"] }
- hosts: k8s_cluster:etcd
strategy: linear
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
gather_facts: false
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: bootstrap-os, tags: bootstrap-os}
- name: Gather facts
tags: always
import_playbook: facts.yml
- hosts: k8s_cluster:etcd
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: kubernetes/preinstall, tags: preinstall }
- { role: "container-engine", tags: "container-engine", when: deploy_container_engine }
- { role: download, tags: download, when: "not skip_downloads" }
- hosts: etcd:kube_control_plane
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- role: etcd
tags: etcd
vars:
etcd_cluster_setup: true
etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}"
when: etcd_deployment_type != "kubeadm"
- hosts: k8s_cluster
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- role: etcd
tags: etcd
vars:
etcd_cluster_setup: false
etcd_events_cluster_setup: false
when:
- etcd_deployment_type != "kubeadm"
- kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
- kube_network_plugin != "calico" or calico_datastore == "etcd"
- hosts: k8s_cluster
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: kubernetes/node, tags: node }
- hosts: kube_control_plane
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: kubernetes/control-plane, tags: master }
- { role: kubernetes/client, tags: client }
- { role: kubernetes-apps/cluster_roles, tags: cluster-roles }
- hosts: k8s_cluster
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: kubernetes/kubeadm, tags: kubeadm}
- { role: kubernetes/node-label, tags: node-label }
- { role: network_plugin, tags: network }
- hosts: calico_rr
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: network_plugin/calico/rr, tags: ['network', 'calico_rr'] }
- hosts: kube_control_plane[0]
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"] }
- hosts: kube_control_plane
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: kubernetes-apps/external_cloud_controller, tags: external-cloud-controller }
- { role: kubernetes-apps/network_plugin, tags: network }
- { role: kubernetes-apps/policy_controller, tags: policy-controller }
- { role: kubernetes-apps/ingress_controller, tags: ingress-controller }
- { role: kubernetes-apps/external_provisioner, tags: external-provisioner }
- { role: kubernetes-apps, tags: apps }
- name: Apply resolv.conf changes now that cluster DNS is up
hosts: k8s_cluster
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf, dns_late: true }

27
playbooks/facts.yml Normal file
View File

@ -0,0 +1,27 @@
---
- name: Gather facts
hosts: k8s_cluster:etcd:calico_rr
gather_facts: False
tags: always
tasks:
- name: Gather minimal facts
setup:
gather_subset: '!all'
# filter match the following variables:
# ansible_default_ipv4
# ansible_default_ipv6
# ansible_all_ipv4_addresses
# ansible_all_ipv6_addresses
- name: Gather necessary facts (network)
setup:
gather_subset: '!all,!min,network'
filter: "ansible_*_ipv[46]*"
# filter match the following variables:
# ansible_memtotal_mb
# ansible_swaptotal_mb
- name: Gather necessary facts (hardware)
setup:
gather_subset: '!all,!min,hardware'
filter: "ansible_*total_mb"

View File

@ -0,0 +1,47 @@
---
# This is an inventory compatibility playbook to ensure we keep compatibility with old style group names
- name: Add kube-master nodes to kube_control_plane
hosts: kube-master
gather_facts: false
tags: always
tasks:
- name: add nodes to kube_control_plane group
group_by:
key: 'kube_control_plane'
- name: Add kube-node nodes to kube_node
hosts: kube-node
gather_facts: false
tags: always
tasks:
- name: add nodes to kube_node group
group_by:
key: 'kube_node'
- name: Add k8s-cluster nodes to k8s_cluster
hosts: k8s-cluster
gather_facts: false
tags: always
tasks:
- name: add nodes to k8s_cluster group
group_by:
key: 'k8s_cluster'
- name: Add calico-rr nodes to calico_rr
hosts: calico-rr
gather_facts: false
tags: always
tasks:
- name: add nodes to calico_rr group
group_by:
key: 'calico_rr'
- name: Add no-floating nodes to no_floating
hosts: no-floating
gather_facts: false
tags: always
tasks:
- name: add nodes to no-floating group
group_by:
key: 'no_floating'

View File

@ -0,0 +1,34 @@
---
- name: Check ansible version
import_playbook: ansible_version.yml
- name: Ensure compatibility with old groups
import_playbook: legacy_groups.yml
- hosts: bastion[0]
gather_facts: False
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults}
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
- hosts: etcd[0]
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults}
- role: recover_control_plane/etcd
when: etcd_deployment_type != "kubeadm"
- hosts: kube_control_plane[0]
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults}
- { role: recover_control_plane/control-plane }
- import_playbook: cluster.yml
- hosts: kube_control_plane
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults}
- { role: recover_control_plane/post-recover }

50
playbooks/remove-node.yml Normal file
View File

@ -0,0 +1,50 @@
---
- name: Check ansible version
import_playbook: ansible_version.yml
- name: Ensure compatibility with old groups
import_playbook: legacy_groups.yml
- hosts: bastion[0]
gather_facts: False
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: bastion-ssh-config, tags: ["localhost", "bastion"] }
- hosts: "{{ node | default('etcd:k8s_cluster:calico_rr') }}"
gather_facts: no
tasks:
- name: Confirm Execution
pause:
prompt: "Are you sure you want to delete nodes state? Type 'yes' to delete nodes."
register: pause_result
run_once: True
when:
- not (skip_confirmation | default(false) | bool)
- name: Fail if user does not confirm deletion
fail:
msg: "Delete nodes confirmation failed"
when: pause_result.user_input | default('yes') != 'yes'
- name: Gather facts
import_playbook: facts.yml
when: reset_nodes|default(True)|bool
- hosts: "{{ node | default('kube_node') }}"
gather_facts: no
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults, when: reset_nodes|default(True)|bool }
- { role: remove-node/pre-remove, tags: pre-remove }
- { role: remove-node/remove-etcd-node }
- { role: reset, tags: reset, when: reset_nodes|default(True)|bool }
# Currently cannot remove first master or etcd
- hosts: "{{ node | default('kube_control_plane[1:]:etcd[1:]') }}"
gather_facts: no
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults, when: reset_nodes|default(True)|bool }
- { role: remove-node/post-remove, tags: post-remove }

39
playbooks/reset.yml Normal file
View File

@ -0,0 +1,39 @@
---
- name: Check ansible version
import_playbook: ansible_version.yml
- name: Ensure compatibility with old groups
import_playbook: legacy_groups.yml
- hosts: bastion[0]
gather_facts: False
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults}
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
- name: Gather facts
import_playbook: facts.yml
- hosts: etcd:k8s_cluster:calico_rr
gather_facts: False
vars_prompt:
name: "reset_confirmation"
prompt: "Are you sure you want to reset cluster state? Type 'yes' to reset your cluster."
default: "no"
private: no
pre_tasks:
- name: check confirmation
fail:
msg: "Reset confirmation failed"
when: reset_confirmation != "yes"
- name: Gather information about installed services
service_facts:
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults}
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf, dns_early: true }
- { role: reset, tags: reset }

124
playbooks/scale.yml Normal file
View File

@ -0,0 +1,124 @@
---
- name: Check ansible version
import_playbook: ansible_version.yml
- name: Ensure compatibility with old groups
import_playbook: legacy_groups.yml
- hosts: bastion[0]
gather_facts: False
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: bastion-ssh-config, tags: ["localhost", "bastion"] }
- name: Bootstrap any new workers
hosts: kube_node
strategy: linear
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
gather_facts: false
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: bootstrap-os, tags: bootstrap-os }
- name: Gather facts
tags: always
import_playbook: facts.yml
- name: Generate the etcd certificates beforehand
hosts: etcd:kube_control_plane
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- role: etcd
tags: etcd
vars:
etcd_cluster_setup: false
etcd_events_cluster_setup: false
when:
- etcd_deployment_type != "kubeadm"
- kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
- kube_network_plugin != "calico" or calico_datastore == "etcd"
- name: Download images to ansible host cache via first kube_control_plane node
hosts: kube_control_plane[0]
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults, when: "not skip_downloads and download_run_once and not download_localhost" }
- { role: kubernetes/preinstall, tags: preinstall, when: "not skip_downloads and download_run_once and not download_localhost" }
- { role: download, tags: download, when: "not skip_downloads and download_run_once and not download_localhost" }
- name: Target only workers to get kubelet installed and checking in on any new nodes(engine)
hosts: kube_node
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: kubernetes/preinstall, tags: preinstall }
- { role: container-engine, tags: "container-engine", when: deploy_container_engine }
- { role: download, tags: download, when: "not skip_downloads" }
- role: etcd
tags: etcd
vars:
etcd_cluster_setup: false
when:
- etcd_deployment_type != "kubeadm"
- kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
- kube_network_plugin != "calico" or calico_datastore == "etcd"
- name: Target only workers to get kubelet installed and checking in on any new nodes(node)
hosts: kube_node
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: kubernetes/node, tags: node }
- name: Upload control plane certs and retrieve encryption key
hosts: kube_control_plane | first
environment: "{{ proxy_disable_env }}"
gather_facts: False
tags: kubeadm
roles:
- { role: kubespray-defaults }
tasks:
- name: Upload control plane certificates
command: >-
{{ bin_dir }}/kubeadm init phase
--config {{ kube_config_dir }}/kubeadm-config.yaml
upload-certs
--upload-certs
environment: "{{ proxy_disable_env }}"
register: kubeadm_upload_cert
changed_when: false
- name: set fact 'kubeadm_certificate_key' for later use
set_fact:
kubeadm_certificate_key: "{{ kubeadm_upload_cert.stdout_lines[-1] | trim }}"
when: kubeadm_certificate_key is not defined
- name: Target only workers to get kubelet installed and checking in on any new nodes(network)
hosts: kube_node
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: kubernetes/kubeadm, tags: kubeadm }
- { role: kubernetes/node-label, tags: node-label }
- { role: network_plugin, tags: network }
- name: Apply resolv.conf changes now that cluster DNS is up
hosts: k8s_cluster
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf, dns_late: true }

View File

@ -0,0 +1,170 @@
---
- name: Check ansible version
import_playbook: ansible_version.yml
- name: Ensure compatibility with old groups
import_playbook: legacy_groups.yml
- hosts: bastion[0]
gather_facts: False
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: bastion-ssh-config, tags: ["localhost", "bastion"] }
- hosts: k8s_cluster:etcd:calico_rr
strategy: linear
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
gather_facts: false
environment: "{{ proxy_disable_env }}"
vars:
# Need to disable pipelining for bootstrap-os as some systems have requiretty in sudoers set, which makes pipelining
# fail. bootstrap-os fixes this on these systems, so in later plays it can be enabled.
ansible_ssh_pipelining: false
roles:
- { role: kubespray-defaults }
- { role: bootstrap-os, tags: bootstrap-os}
- name: Gather facts
tags: always
import_playbook: facts.yml
- name: Download images to ansible host cache via first kube_control_plane node
hosts: kube_control_plane[0]
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults, when: "not skip_downloads and download_run_once and not download_localhost"}
- { role: kubernetes/preinstall, tags: preinstall, when: "not skip_downloads and download_run_once and not download_localhost" }
- { role: download, tags: download, when: "not skip_downloads and download_run_once and not download_localhost" }
- name: Prepare nodes for upgrade
hosts: k8s_cluster:etcd:calico_rr
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: kubernetes/preinstall, tags: preinstall }
- { role: download, tags: download, when: "not skip_downloads" }
- name: Upgrade container engine on non-cluster nodes
hosts: etcd:calico_rr:!k8s_cluster
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
serial: "{{ serial | default('20%') }}"
roles:
- { role: kubespray-defaults }
- { role: container-engine, tags: "container-engine", when: deploy_container_engine }
- hosts: etcd:kube_control_plane
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- role: etcd
tags: etcd
vars:
etcd_cluster_setup: true
etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}"
when: etcd_deployment_type != "kubeadm"
- hosts: k8s_cluster
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- role: etcd
tags: etcd
vars:
etcd_cluster_setup: false
etcd_events_cluster_setup: false
when:
- etcd_deployment_type != "kubeadm"
- kube_network_plugin in ["calico", "flannel", "canal", "cilium"] or cilium_deploy_additionally | default(false) | bool
- kube_network_plugin != "calico" or calico_datastore == "etcd"
- name: Handle upgrades to master components first to maintain backwards compat.
gather_facts: False
hosts: kube_control_plane
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
serial: 1
roles:
- { role: kubespray-defaults }
- { role: upgrade/pre-upgrade, tags: pre-upgrade }
- { role: container-engine, tags: "container-engine", when: deploy_container_engine }
- { role: kubernetes/node, tags: node }
- { role: kubernetes/control-plane, tags: master, upgrade_cluster_setup: true }
- { role: kubernetes/client, tags: client }
- { role: kubernetes/node-label, tags: node-label }
- { role: kubernetes-apps/cluster_roles, tags: cluster-roles }
- { role: kubernetes-apps, tags: csi-driver }
- { role: upgrade/post-upgrade, tags: post-upgrade }
- name: Upgrade calico and external cloud provider on all masters, calico-rrs, and nodes
hosts: kube_control_plane:calico_rr:kube_node
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
serial: "{{ serial | default('20%') }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: kubernetes-apps/external_cloud_controller, tags: external-cloud-controller }
- { role: network_plugin, tags: network }
- { role: kubernetes-apps/network_plugin, tags: network }
- { role: kubernetes-apps/policy_controller, tags: policy-controller }
- name: Finally handle worker upgrades, based on given batch size
hosts: kube_node:calico_rr:!kube_control_plane
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
serial: "{{ serial | default('20%') }}"
roles:
- { role: kubespray-defaults }
- { role: upgrade/pre-upgrade, tags: pre-upgrade }
- { role: container-engine, tags: "container-engine", when: deploy_container_engine }
- { role: kubernetes/node, tags: node }
- { role: kubernetes/kubeadm, tags: kubeadm }
- { role: kubernetes/node-label, tags: node-label }
- { role: upgrade/post-upgrade, tags: post-upgrade }
- hosts: kube_control_plane[0]
gather_facts: False
any_errors_fatal: true
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"] }
- hosts: calico_rr
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: network_plugin/calico/rr, tags: network }
- hosts: kube_control_plane
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: kubernetes-apps/ingress_controller, tags: ingress-controller }
- { role: kubernetes-apps/external_provisioner, tags: external-provisioner }
- { role: kubernetes-apps, tags: apps }
- name: Apply resolv.conf changes now that cluster DNS is up
hosts: k8s_cluster
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults }
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf, dns_late: true }