Compare commits
18 Commits
master
...
release-2.
Author | SHA1 | Date | |
---|---|---|---|
c3814bb258 | |||
90e6e19403 | |||
7e419310ce | |||
11b72e2408 | |||
c267d427ce | |||
6d37c3cde6 | |||
af84e56099 | |||
d3954a5590 | |||
75d648cae5 | |||
087d9c204f | |||
775cadda62 | |||
19c000c127 | |||
b39a196cfb | |||
9fc14b3e6c | |||
f9a7dce7ca | |||
fbbbd90732 | |||
9869b46432 | |||
6cd33700f5 |
@ -7,33 +7,14 @@ skip_list:
|
||||
|
||||
# These rules are intentionally skipped:
|
||||
#
|
||||
# [role-name] "meta/main.yml" Role name role-name does not match ``^+$`` pattern
|
||||
# Meta roles in Kubespray don't need proper names
|
||||
# (Disabled in June 2021)
|
||||
- 'role-name'
|
||||
# [E204]: "Lines should be no longer than 160 chars"
|
||||
# This could be re-enabled with a major rewrite in the future.
|
||||
# For now, there's not enough value gain from strictly limiting line length.
|
||||
# (Disabled in May 2019)
|
||||
- '204'
|
||||
|
||||
# [var-naming] "defaults/main.yml" File defines variable 'apiVersion' that violates variable naming standards
|
||||
# In Kubespray we use variables that use camelCase to match their k8s counterparts
|
||||
# (Disabled in June 2021)
|
||||
- 'var-naming'
|
||||
|
||||
# [fqcn-builtins]
|
||||
# Roles in kubespray don't need fully qualified collection names
|
||||
# (Disabled in Feb 2023)
|
||||
- 'fqcn-builtins'
|
||||
|
||||
# We use template in names
|
||||
- 'name[template]'
|
||||
|
||||
# No changed-when on commands
|
||||
# (Disabled in June 2023 after ansible upgrade; FIXME)
|
||||
- 'no-changed-when'
|
||||
|
||||
# Disable run-once check with free strategy
|
||||
# (Disabled in June 2023 after ansible upgrade; FIXME)
|
||||
- 'run-once[task]'
|
||||
exclude_paths:
|
||||
# Generated files
|
||||
- tests/files/custom_cni/cilium.yaml
|
||||
- venv
|
||||
- .github
|
||||
# [E701]: "meta/main.yml should contain relevant info"
|
||||
# Roles in Kubespray are not intended to be used/imported by Ansible Galaxy.
|
||||
# While it can be useful to have these metadata available, they are also available in the existing documentation.
|
||||
# (Disabled in May 2019)
|
||||
- '701'
|
||||
|
@ -1,8 +0,0 @@
|
||||
# This file contains ignores rule violations for ansible-lint
|
||||
inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml jinja[spacing]
|
||||
roles/kubernetes/control-plane/defaults/main/kube-proxy.yml jinja[spacing]
|
||||
roles/kubernetes/control-plane/defaults/main/main.yml jinja[spacing]
|
||||
roles/kubernetes/kubeadm/defaults/main.yml jinja[spacing]
|
||||
roles/kubernetes/node/defaults/main.yml jinja[spacing]
|
||||
roles/kubernetes/preinstall/defaults/main.yml jinja[spacing]
|
||||
roles/kubespray-defaults/defaults/main/main.yml jinja[spacing]
|
1
.gitattributes
vendored
1
.gitattributes
vendored
@ -1 +0,0 @@
|
||||
docs/_sidebar.md linguist-generated=true
|
44
.github/ISSUE_TEMPLATE/bug-report.md
vendored
Normal file
44
.github/ISSUE_TEMPLATE/bug-report.md
vendored
Normal file
@ -0,0 +1,44 @@
|
||||
---
|
||||
name: Bug Report
|
||||
about: Report a bug encountered while operating Kubernetes
|
||||
labels: kind/bug
|
||||
|
||||
---
|
||||
<!--
|
||||
Please, be ready for followup questions, and please respond in a timely
|
||||
manner. If we can't reproduce a bug or think a feature already exists, we
|
||||
might close your issue. If we're wrong, PLEASE feel free to reopen it and
|
||||
explain why.
|
||||
-->
|
||||
|
||||
**Environment**:
|
||||
- **Cloud provider or hardware configuration:**
|
||||
|
||||
- **OS (`printf "$(uname -srm)\n$(cat /etc/os-release)\n"`):**
|
||||
|
||||
- **Version of Ansible** (`ansible --version`):
|
||||
|
||||
- **Version of Python** (`python --version`):
|
||||
|
||||
|
||||
**Kubespray version (commit) (`git rev-parse --short HEAD`):**
|
||||
|
||||
|
||||
**Network plugin used**:
|
||||
|
||||
|
||||
**Full inventory with variables (`ansible -i inventory/sample/inventory.ini all -m debug -a "var=hostvars[inventory_hostname]"`):**
|
||||
<!-- We recommend using snippets services like https://gist.github.com/ etc. -->
|
||||
|
||||
**Command used to invoke ansible**:
|
||||
|
||||
|
||||
**Output of ansible run**:
|
||||
<!-- We recommend using snippets services like https://gist.github.com/ etc. -->
|
||||
|
||||
**Anything else do we need to know**:
|
||||
<!-- By running scripts/collect-info.yaml you can get a lot of useful informations.
|
||||
Script can be started by:
|
||||
ansible-playbook -i <inventory_file_path> -u <ssh_user> -e ansible_ssh_user=<ssh_user> -b --become-user=root -e dir=`pwd` scripts/collect-info.yaml
|
||||
(If you using CoreOS remember to add '-e ansible_python_interpreter=/opt/bin/python').
|
||||
After running this command you can find logs in `pwd`/logs.tar.gz. You can even upload somewhere entire file and paste link here.-->
|
124
.github/ISSUE_TEMPLATE/bug-report.yaml
vendored
124
.github/ISSUE_TEMPLATE/bug-report.yaml
vendored
@ -1,124 +0,0 @@
|
||||
---
|
||||
name: Bug Report
|
||||
description: Report a bug encountered while using Kubespray
|
||||
labels: kind/bug
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Please, be ready for followup questions, and please respond in a timely
|
||||
manner. If we can't reproduce a bug or think a feature already exists, we
|
||||
might close your issue. If we're wrong, PLEASE feel free to reopen it and
|
||||
explain why.
|
||||
- type: textarea
|
||||
id: problem
|
||||
attributes:
|
||||
label: What happened?
|
||||
description: |
|
||||
Please provide as much info as possible. Not doing so may result in your bug not being addressed in a timely manner.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: expected
|
||||
attributes:
|
||||
label: What did you expect to happen?
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: repro
|
||||
attributes:
|
||||
label: How can we reproduce it (as minimally and precisely as possible)?
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: '### Environment'
|
||||
|
||||
- type: textarea
|
||||
id: os
|
||||
attributes:
|
||||
label: OS
|
||||
placeholder: 'printf "$(uname -srm)\n$(cat /etc/os-release)\n"'
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: ansible_version
|
||||
attributes:
|
||||
label: Version of Ansible
|
||||
placeholder: 'ansible --version'
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: input
|
||||
id: python_version
|
||||
attributes:
|
||||
label: Version of Python
|
||||
placeholder: 'python --version'
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: input
|
||||
id: kubespray_version
|
||||
attributes:
|
||||
label: Version of Kubespray (commit)
|
||||
placeholder: 'git rev-parse --short HEAD'
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: dropdown
|
||||
id: network_plugin
|
||||
attributes:
|
||||
label: Network plugin used
|
||||
options:
|
||||
- calico
|
||||
- cilium
|
||||
- cni
|
||||
- custom_cni
|
||||
- flannel
|
||||
- kube-ovn
|
||||
- kube-router
|
||||
- macvlan
|
||||
- meta
|
||||
- multus
|
||||
- ovn4nfv
|
||||
- weave
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: inventory
|
||||
attributes:
|
||||
label: Full inventory with variables
|
||||
placeholder: 'ansible -i inventory/sample/inventory.ini all -m debug -a "var=hostvars[inventory_hostname]"'
|
||||
description: We recommend using snippets services like https://gist.github.com/ etc.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: input
|
||||
id: ansible_command
|
||||
attributes:
|
||||
label: Command used to invoke ansible
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: ansible_output
|
||||
attributes:
|
||||
label: Output of ansible run
|
||||
description: We recommend using snippets services like https://gist.github.com/ etc.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: anything_else
|
||||
attributes:
|
||||
label: Anything else we need to know
|
||||
description: |
|
||||
By running scripts/collect-info.yaml you can get a lot of useful informations.
|
||||
Script can be started by:
|
||||
ansible-playbook -i <inventory_file_path> -u <ssh_user> -e ansible_ssh_user=<ssh_user> -b --become-user=root -e dir=`pwd` scripts/collect-info.yaml
|
||||
(If you using CoreOS remember to add '-e ansible_python_interpreter=/opt/bin/python').
|
||||
After running this command you can find logs in `pwd`/logs.tar.gz. You can even upload somewhere entire file and paste link here
|
5
.github/ISSUE_TEMPLATE/config.yml
vendored
5
.github/ISSUE_TEMPLATE/config.yml
vendored
@ -1,5 +0,0 @@
|
||||
---
|
||||
contact_links:
|
||||
- name: Support Request
|
||||
url: https://kubernetes.slack.com/channels/kubespray
|
||||
about: Support request or question relating to Kubernetes
|
11
.github/ISSUE_TEMPLATE/enhancement.md
vendored
Normal file
11
.github/ISSUE_TEMPLATE/enhancement.md
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
---
|
||||
name: Enhancement Request
|
||||
about: Suggest an enhancement to the Kubespray project
|
||||
labels: kind/feature
|
||||
|
||||
---
|
||||
<!-- Please only use this template for submitting enhancement requests -->
|
||||
|
||||
**What would you like to be added**:
|
||||
|
||||
**Why is this needed**:
|
20
.github/ISSUE_TEMPLATE/enhancement.yaml
vendored
20
.github/ISSUE_TEMPLATE/enhancement.yaml
vendored
@ -1,20 +0,0 @@
|
||||
---
|
||||
name: Enhancement Request
|
||||
description: Suggest an enhancement to the Kubespray project
|
||||
labels: kind/feature
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: Please only use this template for submitting enhancement requests
|
||||
- type: textarea
|
||||
id: what
|
||||
attributes:
|
||||
label: What would you like to be added
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: why
|
||||
attributes:
|
||||
label: Why is this needed
|
||||
validations:
|
||||
required: true
|
20
.github/ISSUE_TEMPLATE/failing-test.md
vendored
Normal file
20
.github/ISSUE_TEMPLATE/failing-test.md
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
---
|
||||
name: Failing Test
|
||||
about: Report test failures in Kubespray CI jobs
|
||||
labels: kind/failing-test
|
||||
|
||||
---
|
||||
|
||||
<!-- Please only use this template for submitting reports about failing tests in Kubespray CI jobs -->
|
||||
|
||||
**Which jobs are failing**:
|
||||
|
||||
**Which test(s) are failing**:
|
||||
|
||||
**Since when has it been failing**:
|
||||
|
||||
**Testgrid link**:
|
||||
|
||||
**Reason for failure**:
|
||||
|
||||
**Anything else we need to know**:
|
41
.github/ISSUE_TEMPLATE/failing-test.yaml
vendored
41
.github/ISSUE_TEMPLATE/failing-test.yaml
vendored
@ -1,41 +0,0 @@
|
||||
---
|
||||
name: Failing Test
|
||||
description: Report test failures in Kubespray CI jobs
|
||||
labels: kind/failing-test
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: Please only use this template for submitting reports about failing tests in Kubespray CI jobs
|
||||
- type: textarea
|
||||
id: failing_jobs
|
||||
attributes:
|
||||
label: Which jobs are failing ?
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: failing_tests
|
||||
attributes:
|
||||
label: Which tests are failing ?
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: input
|
||||
id: since_when
|
||||
attributes:
|
||||
label: Since when has it been failing ?
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: failure_reason
|
||||
attributes:
|
||||
label: Reason for failure
|
||||
description: If you don't know and have no guess, just put "Unknown"
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: anything_else
|
||||
attributes:
|
||||
label: Anything else we need to know
|
18
.github/ISSUE_TEMPLATE/support.md
vendored
Normal file
18
.github/ISSUE_TEMPLATE/support.md
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
---
|
||||
name: Support Request
|
||||
about: Support request or question relating to Kubespray
|
||||
labels: triage/support
|
||||
|
||||
---
|
||||
|
||||
<!--
|
||||
STOP -- PLEASE READ!
|
||||
|
||||
GitHub is not the right place for support requests.
|
||||
|
||||
If you're looking for help, check [Stack Overflow](https://stackoverflow.com/questions/tagged/kubespray) and the [troubleshooting guide](https://kubernetes.io/docs/tasks/debug-application-cluster/troubleshooting/).
|
||||
|
||||
You can also post your question on the [Kubernetes Slack](http://slack.k8s.io/) or the [Discuss Kubernetes](https://discuss.kubernetes.io/) forum.
|
||||
|
||||
If the matter is security related, please disclose it privately via https://kubernetes.io/security/.
|
||||
-->
|
9
.github/dependabot.yml
vendored
9
.github/dependabot.yml
vendored
@ -1,9 +0,0 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "pip"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
labels:
|
||||
- dependencies
|
||||
- release-note-none
|
24
.gitignore
vendored
24
.gitignore
vendored
@ -3,26 +3,18 @@
|
||||
**/vagrant_ansible_inventory
|
||||
*.iml
|
||||
temp
|
||||
contrib/offline/container-images
|
||||
contrib/offline/container-images.tar.gz
|
||||
contrib/offline/offline-files
|
||||
contrib/offline/offline-files.tar.gz
|
||||
.idea
|
||||
.vscode
|
||||
.tox
|
||||
.cache
|
||||
*.bak
|
||||
*.tfstate
|
||||
*.tfstate*backup
|
||||
*.lock.hcl
|
||||
*.tfstate.backup
|
||||
.terraform/
|
||||
contrib/terraform/aws/credentials.tfvars
|
||||
.terraform.lock.hcl
|
||||
/ssh-bastion.conf
|
||||
**/*.sw[pon]
|
||||
*~
|
||||
vagrant/
|
||||
plugins/mitogen
|
||||
|
||||
# Ansible inventory
|
||||
inventory/*
|
||||
@ -106,17 +98,3 @@ target/
|
||||
# virtualenv
|
||||
venv/
|
||||
ENV/
|
||||
|
||||
# molecule
|
||||
roles/**/molecule/**/__pycache__/
|
||||
|
||||
# macOS
|
||||
.DS_Store
|
||||
|
||||
# Temp location used by our scripts
|
||||
scripts/tmp/
|
||||
tmp.md
|
||||
|
||||
# Ansible collection files
|
||||
kubernetes_sigs-kubespray*tar.gz
|
||||
ansible_collections
|
||||
|
@ -1,20 +1,21 @@
|
||||
---
|
||||
stages:
|
||||
- build
|
||||
- test
|
||||
- unit-tests
|
||||
- deploy-part1
|
||||
- deploy-extended
|
||||
- moderator
|
||||
- deploy-part2
|
||||
- deploy-part3
|
||||
- deploy-special
|
||||
|
||||
variables:
|
||||
KUBESPRAY_VERSION: v2.25.0
|
||||
KUBESPRAY_VERSION: v2.13.3
|
||||
FAILFASTCI_NAMESPACE: 'kargo-ci'
|
||||
GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray'
|
||||
ANSIBLE_FORCE_COLOR: "true"
|
||||
MAGIC: "ci check this"
|
||||
TEST_ID: "$CI_PIPELINE_ID-$CI_JOB_ID"
|
||||
TEST_ID: "$CI_PIPELINE_ID-$CI_BUILD_ID"
|
||||
CI_TEST_VARS: "./tests/files/${CI_JOB_NAME}.yml"
|
||||
CI_TEST_REGISTRY_MIRROR: "./tests/common/_docker_hub_registry_mirror.yml"
|
||||
CI_TEST_SETTING: "./tests/common/_kubespray_test_settings.yml"
|
||||
GS_ACCESS_KEY_ID: $GS_KEY
|
||||
GS_SECRET_ACCESS_KEY: $GS_SECRET
|
||||
CONTAINER_ENGINE: docker
|
||||
@ -25,43 +26,29 @@ variables:
|
||||
ANSIBLE_INVENTORY: ./inventory/sample/${CI_JOB_NAME}-${BUILD_NUMBER}.ini
|
||||
IDEMPOT_CHECK: "false"
|
||||
RESET_CHECK: "false"
|
||||
REMOVE_NODE_CHECK: "false"
|
||||
UPGRADE_TEST: "false"
|
||||
MITOGEN_ENABLE: "false"
|
||||
ANSIBLE_LOG_LEVEL: "-vv"
|
||||
RECOVER_CONTROL_PLANE_TEST: "false"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:]:kube_control_plane[1:]"
|
||||
TERRAFORM_VERSION: 1.3.7
|
||||
PIPELINE_IMAGE: "$CI_REGISTRY_IMAGE/pipeline:${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube-master[1:]"
|
||||
|
||||
before_script:
|
||||
- ./tests/scripts/rebase.sh
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
||||
- python -m pip install -r tests/requirements.txt
|
||||
- mkdir -p /.ssh
|
||||
|
||||
.job: &job
|
||||
tags:
|
||||
- ffci
|
||||
image: $PIPELINE_IMAGE
|
||||
- packet
|
||||
image: quay.io/kubespray/kubespray:$KUBESPRAY_VERSION
|
||||
artifacts:
|
||||
when: always
|
||||
paths:
|
||||
- cluster-dump/
|
||||
needs:
|
||||
- pipeline-image
|
||||
|
||||
.job-moderated:
|
||||
extends: .job
|
||||
needs:
|
||||
- pipeline-image
|
||||
- ci-not-authorized
|
||||
- check-galaxy-version # lint
|
||||
- pre-commit # lint
|
||||
- vagrant-validate # lint
|
||||
|
||||
.testcases: &testcases
|
||||
extends: .job-moderated
|
||||
retry: 1
|
||||
interruptible: true
|
||||
<<: *job
|
||||
before_script:
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
||||
- ./tests/scripts/rebase.sh
|
||||
@ -69,39 +56,22 @@ before_script:
|
||||
script:
|
||||
- ./tests/scripts/testcases_run.sh
|
||||
after_script:
|
||||
- ./tests/scripts/testcases_cleanup.sh
|
||||
- chronic ./tests/scripts/testcases_cleanup.sh
|
||||
|
||||
# For failfast, at least 1 job must be defined in .gitlab-ci.yml
|
||||
# Premoderated with manual actions
|
||||
ci-not-authorized:
|
||||
stage: build
|
||||
before_script: []
|
||||
after_script: []
|
||||
rules:
|
||||
# LGTM or ok-to-test labels
|
||||
- if: $PR_LABELS =~ /.*,(lgtm|approved|ok-to-test).*|^(lgtm|approved|ok-to-test).*/i
|
||||
variables:
|
||||
CI_OK_TO_TEST: '0'
|
||||
when: always
|
||||
- if: $CI_PIPELINE_SOURCE == "schedule" || $CI_PIPELINE_SOURCE == "trigger"
|
||||
variables:
|
||||
CI_OK_TO_TEST: '0'
|
||||
- if: $CI_COMMIT_BRANCH == "master"
|
||||
variables:
|
||||
CI_OK_TO_TEST: '0'
|
||||
- when: always
|
||||
variables:
|
||||
CI_OK_TO_TEST: '1'
|
||||
ci-authorized:
|
||||
extends: .job
|
||||
stage: moderator
|
||||
script:
|
||||
- exit $CI_OK_TO_TEST
|
||||
tags:
|
||||
- ffci
|
||||
needs: []
|
||||
- /bin/sh scripts/premoderator.sh
|
||||
except: ['triggers', 'master']
|
||||
# Disable ci moderator
|
||||
only: []
|
||||
|
||||
include:
|
||||
- .gitlab-ci/build.yml
|
||||
- .gitlab-ci/lint.yml
|
||||
- .gitlab-ci/shellcheck.yml
|
||||
- .gitlab-ci/terraform.yml
|
||||
- .gitlab-ci/packet.yml
|
||||
- .gitlab-ci/vagrant.yml
|
||||
- .gitlab-ci/molecule.yml
|
||||
|
@ -1,32 +0,0 @@
|
||||
---
|
||||
.build-container:
|
||||
cache:
|
||||
key: $CI_COMMIT_REF_SLUG
|
||||
paths:
|
||||
- image-cache
|
||||
tags:
|
||||
- ffci
|
||||
stage: build
|
||||
image:
|
||||
name: gcr.io/kaniko-project/executor:debug
|
||||
entrypoint: ['']
|
||||
variables:
|
||||
TAG: $CI_COMMIT_SHORT_SHA
|
||||
PROJECT_DIR: $CI_PROJECT_DIR
|
||||
DOCKERFILE: Dockerfile
|
||||
GODEBUG: "http2client=0"
|
||||
before_script:
|
||||
- echo "{\"auths\":{\"$CI_REGISTRY\":{\"auth\":\"$(echo -n ${CI_REGISTRY_USER}:${CI_REGISTRY_PASSWORD} | base64)\"}}}" > /kaniko/.docker/config.json
|
||||
script:
|
||||
- /kaniko/executor --cache=true
|
||||
--cache-dir=image-cache
|
||||
--context $PROJECT_DIR
|
||||
--dockerfile $PROJECT_DIR/$DOCKERFILE
|
||||
--label 'git-branch'=$CI_COMMIT_REF_SLUG
|
||||
--label 'git-tag=$CI_COMMIT_TAG'
|
||||
--destination $PIPELINE_IMAGE
|
||||
|
||||
pipeline-image:
|
||||
extends: .build-container
|
||||
variables:
|
||||
DOCKERFILE: pipeline.Dockerfile
|
@ -1,35 +1,76 @@
|
||||
---
|
||||
pre-commit:
|
||||
stage: test
|
||||
tags:
|
||||
- ffci
|
||||
image: 'ghcr.io/pre-commit-ci/runner-image@sha256:aaf2c7b38b22286f2d381c11673bec571c28f61dd086d11b43a1c9444a813cef'
|
||||
yamllint:
|
||||
extends: .job
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
variables:
|
||||
PRE_COMMIT_HOME: /pre-commit-cache
|
||||
LANG: C.UTF-8
|
||||
script:
|
||||
- pre-commit run --all-files
|
||||
cache:
|
||||
key: pre-commit-all
|
||||
paths:
|
||||
- /pre-commit-cache
|
||||
needs: []
|
||||
- yamllint --strict .
|
||||
except: ['triggers', 'master']
|
||||
|
||||
vagrant-validate:
|
||||
extends: .job
|
||||
stage: test
|
||||
tags: [ffci]
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
variables:
|
||||
VAGRANT_VERSION: 2.3.7
|
||||
VAGRANT_VERSION: 2.2.4
|
||||
script:
|
||||
- ./tests/scripts/vagrant-validate.sh
|
||||
- ./tests/scripts/vagrant-validate.sh
|
||||
except: ['triggers', 'master']
|
||||
|
||||
ansible-lint:
|
||||
extends: .job
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
# lint every yml/yaml file that looks like it contains Ansible plays
|
||||
script: |-
|
||||
grep -Rl '^- hosts: \|^ hosts: ' --include \*.yml --include \*.yaml . | xargs -P 4 -n 25 ansible-lint -v
|
||||
except: ['triggers', 'master']
|
||||
|
||||
# TODO: convert to pre-commit hook
|
||||
check-galaxy-version:
|
||||
needs: []
|
||||
stage: test
|
||||
tags: [ffci]
|
||||
syntax-check:
|
||||
extends: .job
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
variables:
|
||||
ANSIBLE_INVENTORY: inventory/local-tests.cfg
|
||||
ANSIBLE_REMOTE_USER: root
|
||||
ANSIBLE_BECOME: "true"
|
||||
ANSIBLE_BECOME_USER: root
|
||||
ANSIBLE_VERBOSITY: "3"
|
||||
script:
|
||||
- ansible-playbook --syntax-check cluster.yml
|
||||
- ansible-playbook --syntax-check upgrade-cluster.yml
|
||||
- ansible-playbook --syntax-check reset.yml
|
||||
- ansible-playbook --syntax-check extra_playbooks/upgrade-only-k8s.yml
|
||||
except: ['triggers', 'master']
|
||||
|
||||
tox-inventory-builder:
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
extends: .job
|
||||
before_script:
|
||||
- ./tests/scripts/rebase.sh
|
||||
- apt-get update && apt-get install -y python3-pip
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
||||
- python -m pip install -r tests/requirements.txt
|
||||
script:
|
||||
- pip3 install tox
|
||||
- cd contrib/inventory_builder && tox
|
||||
except: ['triggers', 'master']
|
||||
|
||||
markdownlint:
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
image: node
|
||||
before_script:
|
||||
- npm install -g markdownlint-cli
|
||||
script:
|
||||
- markdownlint README.md docs --ignore docs/_sidebar.md
|
||||
|
||||
ci-matrix:
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
image: python:3
|
||||
script:
|
||||
- tests/scripts/check_galaxy_version.sh
|
||||
- tests/scripts/md-table/test.sh
|
||||
|
@ -1,95 +0,0 @@
|
||||
---
|
||||
.molecule:
|
||||
tags: [ffci-vm-med]
|
||||
only: [/^pr-.*$/]
|
||||
except: ['triggers']
|
||||
image: quay.io/kubespray/vm-kubespray-ci:v6
|
||||
services: []
|
||||
stage: deploy-part1
|
||||
needs: []
|
||||
# - ci-not-authorized
|
||||
variables:
|
||||
VAGRANT_DEFAULT_PROVIDER: "libvirt"
|
||||
before_script:
|
||||
- groups
|
||||
- python3 -m venv citest
|
||||
- source citest/bin/activate
|
||||
- vagrant plugin expunge --reinstall --force --no-tty
|
||||
- vagrant plugin install vagrant-libvirt
|
||||
- pip install --no-compile --no-cache-dir pip -U
|
||||
- pip install --no-compile --no-cache-dir -r $CI_PROJECT_DIR/requirements.txt
|
||||
- pip install --no-compile --no-cache-dir -r $CI_PROJECT_DIR/tests/requirements.txt
|
||||
- ./tests/scripts/rebase.sh
|
||||
- ./tests/scripts/vagrant_clean.sh
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh
|
||||
after_script:
|
||||
- ./tests/scripts/molecule_logs.sh
|
||||
artifacts:
|
||||
when: always
|
||||
paths:
|
||||
- molecule_logs/
|
||||
|
||||
# CI template for periodic CI jobs
|
||||
# Enabled when PERIODIC_CI_ENABLED var is set
|
||||
|
||||
.molecule_periodic:
|
||||
only:
|
||||
variables:
|
||||
- $PERIODIC_CI_ENABLED
|
||||
allow_failure: true
|
||||
extends: .molecule
|
||||
|
||||
molecule_full:
|
||||
extends: .molecule_periodic
|
||||
|
||||
molecule_no_container_engines:
|
||||
extends: .molecule
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -e container-engine
|
||||
when: on_success
|
||||
|
||||
molecule_docker:
|
||||
extends: .molecule
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/cri-dockerd
|
||||
when: on_success
|
||||
|
||||
molecule_containerd:
|
||||
extends: .molecule
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/containerd
|
||||
when: on_success
|
||||
|
||||
molecule_cri-o:
|
||||
extends: .molecule
|
||||
stage: deploy-part1
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/cri-o
|
||||
allow_failure: true
|
||||
when: on_success
|
||||
|
||||
# # Stage 3 container engines don't get as much attention so allow them to fail
|
||||
# molecule_kata:
|
||||
# extends: .molecule
|
||||
# stage: deploy-extended
|
||||
# script:
|
||||
# - ./tests/scripts/molecule_run.sh -i container-engine/kata-containers
|
||||
# when: manual
|
||||
# # FIXME: this test is broken (perma-failing)
|
||||
|
||||
molecule_gvisor:
|
||||
extends: .molecule
|
||||
stage: deploy-extended
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/gvisor
|
||||
when: manual
|
||||
# FIXME: this test is broken (perma-failing)
|
||||
|
||||
molecule_youki:
|
||||
extends: .molecule
|
||||
stage: deploy-extended
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh -i container-engine/youki
|
||||
when: manual
|
||||
# FIXME: this test is broken (perma-failing)
|
@ -1,247 +1,229 @@
|
||||
---
|
||||
.packet:
|
||||
.packet: &packet
|
||||
extends: .testcases
|
||||
variables:
|
||||
ANSIBLE_TIMEOUT: "120"
|
||||
CI_PLATFORM: packet
|
||||
SSH_USER: kubespray
|
||||
CI_PLATFORM: "packet"
|
||||
SSH_USER: "kubespray"
|
||||
tags:
|
||||
- ffci
|
||||
needs:
|
||||
- pipeline-image
|
||||
- ci-not-authorized
|
||||
- packet
|
||||
only: [/^pr-.*$/]
|
||||
except: ['triggers']
|
||||
|
||||
# CI template for PRs
|
||||
.packet_pr:
|
||||
stage: deploy-part1
|
||||
rules:
|
||||
- if: $PR_LABELS =~ /.*ci-short.*/
|
||||
when: manual
|
||||
allow_failure: true
|
||||
- if: $CI_COMMIT_BRANCH =~ /^pr-.*$/
|
||||
when: on_success
|
||||
- when: manual
|
||||
allow_failure: true
|
||||
extends: .packet
|
||||
|
||||
## Uncomment this to have multiple stages
|
||||
# needs:
|
||||
# - packet_ubuntu20-calico-all-in-one
|
||||
|
||||
.packet_pr_short:
|
||||
packet_ubuntu18-calico-aio:
|
||||
stage: deploy-part1
|
||||
extends: .packet
|
||||
rules:
|
||||
- if: $CI_COMMIT_BRANCH =~ /^pr-.*$/
|
||||
when: on_success
|
||||
- when: manual
|
||||
allow_failure: true
|
||||
when: on_success
|
||||
|
||||
.packet_pr_manual:
|
||||
extends: .packet_pr
|
||||
stage: deploy-extended
|
||||
rules:
|
||||
- if: $PR_LABELS =~ /.*ci-full.*/
|
||||
when: on_success
|
||||
# Else run as manual
|
||||
- when: manual
|
||||
allow_failure: true
|
||||
|
||||
.packet_pr_extended:
|
||||
extends: .packet_pr
|
||||
stage: deploy-extended
|
||||
rules:
|
||||
- if: $PR_LABELS =~ /.*(ci-extended|ci-full).*/
|
||||
when: on_success
|
||||
- when: manual
|
||||
allow_failure: true
|
||||
|
||||
# CI template for periodic CI jobs
|
||||
# Enabled when PERIODIC_CI_ENABLED var is set
|
||||
.packet_periodic:
|
||||
only:
|
||||
variables:
|
||||
- $PERIODIC_CI_ENABLED
|
||||
allow_failure: true
|
||||
# Future AIO job
|
||||
packet_ubuntu20-calico-aio:
|
||||
stage: deploy-part1
|
||||
extends: .packet
|
||||
|
||||
packet_cleanup_old:
|
||||
stage: deploy-part1
|
||||
extends: .packet_periodic
|
||||
script:
|
||||
- cd tests
|
||||
- make cleanup-packet
|
||||
after_script: []
|
||||
|
||||
# The ubuntu20-calico-all-in-one jobs are meant as early stages to prevent running the full CI if something is horribly broken
|
||||
packet_ubuntu20-calico-all-in-one:
|
||||
stage: deploy-part1
|
||||
extends: .packet_pr_short
|
||||
variables:
|
||||
RESET_CHECK: "true"
|
||||
when: on_success
|
||||
|
||||
# ### PR JOBS PART2
|
||||
|
||||
packet_ubuntu20-crio:
|
||||
extends: .packet_pr_manual
|
||||
|
||||
packet_ubuntu22-calico-all-in-one:
|
||||
extends: .packet_pr
|
||||
|
||||
packet_ubuntu24-calico-etcd-datastore:
|
||||
extends: .packet_pr
|
||||
|
||||
packet_almalinux8-crio:
|
||||
extends: .packet_pr
|
||||
|
||||
packet_almalinux8-kube-ovn:
|
||||
extends: .packet_pr
|
||||
|
||||
packet_debian11-calico:
|
||||
extends: .packet_pr
|
||||
|
||||
packet_debian11-macvlan:
|
||||
extends: .packet_pr
|
||||
|
||||
packet_debian12-cilium:
|
||||
extends: .packet_pr
|
||||
|
||||
packet_rockylinux8-calico:
|
||||
extends: .packet_pr
|
||||
|
||||
packet_rockylinux9-cilium:
|
||||
extends: .packet_pr
|
||||
packet_centos7-flannel-containerd-addons-ha:
|
||||
extends: .packet
|
||||
stage: deploy-part2
|
||||
when: on_success
|
||||
variables:
|
||||
RESET_CHECK: "true"
|
||||
MITOGEN_ENABLE: "true"
|
||||
|
||||
packet_amazon-linux-2-all-in-one:
|
||||
extends: .packet_pr
|
||||
|
||||
packet_opensuse-docker-cilium:
|
||||
extends: .packet_pr
|
||||
|
||||
packet_ubuntu20-cilium-sep:
|
||||
extends: .packet_pr
|
||||
|
||||
## Extended
|
||||
packet_debian11-docker:
|
||||
extends: .packet_pr_extended
|
||||
|
||||
packet_debian12-docker:
|
||||
extends: .packet_pr_extended
|
||||
|
||||
packet_debian12-calico:
|
||||
extends: .packet_pr_extended
|
||||
|
||||
packet_almalinux8-calico-remove-node:
|
||||
extends: .packet_pr_extended
|
||||
packet_centos7-crio:
|
||||
extends: .packet
|
||||
stage: deploy-part2
|
||||
when: on_success
|
||||
variables:
|
||||
REMOVE_NODE_CHECK: "true"
|
||||
REMOVE_NODE_NAME: "instance-3"
|
||||
MITOGEN_ENABLE: "true"
|
||||
|
||||
packet_rockylinux9-calico:
|
||||
extends: .packet_pr_extended
|
||||
packet_ubuntu18-crio:
|
||||
extends: .packet
|
||||
stage: deploy-part2
|
||||
when: manual
|
||||
variables:
|
||||
MITOGEN_ENABLE: "true"
|
||||
|
||||
packet_almalinux8-calico:
|
||||
extends: .packet_pr_extended
|
||||
packet_ubuntu16-canal-kubeadm-ha:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
when: on_success
|
||||
|
||||
packet_almalinux8-docker:
|
||||
extends: .packet_pr_extended
|
||||
packet_ubuntu16-canal-sep:
|
||||
stage: deploy-special
|
||||
extends: .packet
|
||||
when: manual
|
||||
|
||||
packet_ubuntu20-calico-all-in-one-hardening:
|
||||
extends: .packet_pr_extended
|
||||
packet_ubuntu16-flannel-ha:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
when: manual
|
||||
|
||||
packet_ubuntu24-calico-all-in-one:
|
||||
extends: .packet_pr_extended
|
||||
packet_ubuntu16-kube-router-sep:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
when: manual
|
||||
|
||||
packet_ubuntu20-calico-etcd-kubeadm:
|
||||
extends: .packet_pr_extended
|
||||
packet_ubuntu16-kube-router-svc-proxy:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
when: manual
|
||||
|
||||
packet_ubuntu24-all-in-one-docker:
|
||||
extends: .packet_pr_extended
|
||||
packet_debian10-cilium-svc-proxy:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
when: manual
|
||||
|
||||
packet_ubuntu22-all-in-one-docker:
|
||||
extends: .packet_pr_extended
|
||||
packet_debian10-containerd:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
when: on_success
|
||||
variables:
|
||||
MITOGEN_ENABLE: "true"
|
||||
|
||||
packet_centos7-calico-ha-once-localhost:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
when: on_success
|
||||
variables:
|
||||
# This will instruct Docker not to start over TLS.
|
||||
DOCKER_TLS_CERTDIR: ""
|
||||
services:
|
||||
- docker:19.03.9-dind
|
||||
|
||||
packet_centos8-kube-ovn:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
when: on_success
|
||||
|
||||
packet_centos8-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
when: on_success
|
||||
|
||||
packet_fedora32-weave:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
when: on_success
|
||||
|
||||
packet_opensuse-canal:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu18-ovn4nfv:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
when: on_success
|
||||
|
||||
# Contiv does not work in k8s v1.16
|
||||
# packet_ubuntu16-contiv-sep:
|
||||
# stage: deploy-part2
|
||||
# extends: .packet
|
||||
# when: on_success
|
||||
|
||||
# ### MANUAL JOBS
|
||||
packet_fedora37-crio:
|
||||
extends: .packet_pr_manual
|
||||
|
||||
packet_ubuntu20-flannel-ha:
|
||||
extends: .packet_pr_manual
|
||||
packet_ubuntu16-weave-sep:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
when: manual
|
||||
|
||||
packet_ubuntu20-all-in-one-docker:
|
||||
extends: .packet_pr_manual
|
||||
packet_ubuntu18-cilium-sep:
|
||||
stage: deploy-special
|
||||
extends: .packet
|
||||
when: manual
|
||||
|
||||
packet_ubuntu20-flannel-ha-once:
|
||||
extends: .packet_pr_manual
|
||||
packet_ubuntu18-flannel-containerd-ha:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
when: manual
|
||||
|
||||
packet_fedora37-calico-swap-selinux:
|
||||
extends: .packet_pr_manual
|
||||
packet_ubuntu18-flannel-containerd-ha-once:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
when: manual
|
||||
|
||||
packet_almalinux8-calico-ha-ebpf:
|
||||
extends: .packet_pr_manual
|
||||
packet_debian9-macvlan:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
when: manual
|
||||
|
||||
packet_almalinux8-calico-nodelocaldns-secondary:
|
||||
extends: .packet_pr_manual
|
||||
packet_centos7-calico-ha:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
when: manual
|
||||
|
||||
packet_debian11-custom-cni:
|
||||
extends: .packet_pr_manual
|
||||
packet_centos7-kube-router:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
when: manual
|
||||
|
||||
packet_debian11-kubelet-csr-approver:
|
||||
extends: .packet_pr_manual
|
||||
packet_centos7-multus-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
when: manual
|
||||
|
||||
packet_debian12-custom-cni-helm:
|
||||
extends: .packet_pr_manual
|
||||
packet_oracle7-canal-ha:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
when: manual
|
||||
|
||||
packet_ubuntu20-calico-ha-wireguard:
|
||||
extends: .packet_pr_manual
|
||||
|
||||
# PERIODIC
|
||||
packet_fedora38-docker-calico:
|
||||
stage: deploy-extended
|
||||
extends: .packet_periodic
|
||||
packet_fedora31-flannel:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
when: on_success
|
||||
variables:
|
||||
RESET_CHECK: "true"
|
||||
MITOGEN_ENABLE: "true"
|
||||
|
||||
packet_fedora37-calico-selinux:
|
||||
stage: deploy-extended
|
||||
extends: .packet_periodic
|
||||
packet_amazon-linux-2-aio:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
when: manual
|
||||
|
||||
packet_fedora32-kube-ovn-containerd:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu20-calico-etcd-kubeadm-upgrade-ha:
|
||||
stage: deploy-extended
|
||||
extends: .packet_periodic
|
||||
# ### PR JOBS PART3
|
||||
# Long jobs (45min+)
|
||||
|
||||
packet_centos7-weave-upgrade-ha:
|
||||
stage: deploy-part3
|
||||
extends: .packet
|
||||
when: on_success
|
||||
variables:
|
||||
UPGRADE_TEST: basic
|
||||
MITOGEN_ENABLE: "false"
|
||||
|
||||
|
||||
packet_debian11-calico-upgrade-once:
|
||||
stage: deploy-extended
|
||||
extends: .packet_periodic
|
||||
packet_debian9-calico-upgrade:
|
||||
stage: deploy-part3
|
||||
extends: .packet
|
||||
when: on_success
|
||||
variables:
|
||||
UPGRADE_TEST: graceful
|
||||
MITOGEN_ENABLE: "false"
|
||||
|
||||
packet_ubuntu20-calico-ha-recover:
|
||||
stage: deploy-extended
|
||||
extends: .packet_periodic
|
||||
variables:
|
||||
RECOVER_CONTROL_PLANE_TEST: "true"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:]:kube_control_plane[1:]"
|
||||
|
||||
packet_ubuntu20-calico-ha-recover-noquorum:
|
||||
stage: deploy-extended
|
||||
extends: .packet_periodic
|
||||
variables:
|
||||
RECOVER_CONTROL_PLANE_TEST: "true"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[1:]:kube_control_plane[1:]"
|
||||
|
||||
packet_debian11-calico-upgrade:
|
||||
stage: deploy-extended
|
||||
extends: .packet_periodic
|
||||
packet_debian9-calico-upgrade-once:
|
||||
stage: deploy-part3
|
||||
extends: .packet
|
||||
when: on_success
|
||||
variables:
|
||||
UPGRADE_TEST: graceful
|
||||
MITOGEN_ENABLE: "false"
|
||||
|
||||
packet_debian12-cilium-svc-proxy:
|
||||
stage: deploy-extended
|
||||
extends: .packet_periodic
|
||||
packet_ubuntu18-calico-ha-recover:
|
||||
stage: deploy-part3
|
||||
extends: .packet
|
||||
when: on_success
|
||||
variables:
|
||||
RECOVER_CONTROL_PLANE_TEST: "true"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube-master[1:]"
|
||||
|
||||
packet_ubuntu18-calico-ha-recover-noquorum:
|
||||
stage: deploy-part3
|
||||
extends: .packet
|
||||
when: on_success
|
||||
variables:
|
||||
RECOVER_CONTROL_PLANE_TEST: "true"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[1:],kube-master[1:]"
|
||||
|
@ -1,17 +0,0 @@
|
||||
---
|
||||
# stub pipeline for dynamic generation
|
||||
pre-commit:
|
||||
tags:
|
||||
- light
|
||||
image: 'ghcr.io/pre-commit-ci/runner-image@sha256:aaf2c7b38b22286f2d381c11673bec571c28f61dd086d11b43a1c9444a813cef'
|
||||
variables:
|
||||
PRE_COMMIT_HOME: /pre-commit-cache
|
||||
script:
|
||||
- pre-commit run --all-files
|
||||
cache:
|
||||
key: pre-commit-$HOOK_ID
|
||||
paths:
|
||||
- /pre-commit-cache
|
||||
parallel:
|
||||
matrix:
|
||||
- HOOK_ID:
|
16
.gitlab-ci/shellcheck.yml
Normal file
16
.gitlab-ci/shellcheck.yml
Normal file
@ -0,0 +1,16 @@
|
||||
---
|
||||
shellcheck:
|
||||
extends: .job
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
variables:
|
||||
SHELLCHECK_VERSION: v0.6.0
|
||||
before_script:
|
||||
- ./tests/scripts/rebase.sh
|
||||
- curl --silent --location "https://github.com/koalaman/shellcheck/releases/download/"${SHELLCHECK_VERSION}"/shellcheck-"${SHELLCHECK_VERSION}".linux.x86_64.tar.xz" | tar -xJv
|
||||
- cp shellcheck-"${SHELLCHECK_VERSION}"/shellcheck /usr/bin/
|
||||
- shellcheck --version
|
||||
script:
|
||||
# Run shellcheck for all *.sh except contrib/
|
||||
- find . -name '*.sh' -not -path './contrib/*' -not -path './.git/*' | xargs shellcheck --severity error
|
||||
except: ['triggers', 'master']
|
@ -2,10 +2,6 @@
|
||||
# Tests for contrib/terraform/
|
||||
.terraform_install:
|
||||
extends: .job
|
||||
needs:
|
||||
- ci-not-authorized
|
||||
- pipeline-image
|
||||
stage: deploy-part1
|
||||
before_script:
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
||||
- ./tests/scripts/rebase.sh
|
||||
@ -16,31 +12,29 @@
|
||||
# Prepare inventory
|
||||
- cp contrib/terraform/$PROVIDER/sample-inventory/cluster.tfvars .
|
||||
- ln -s contrib/terraform/$PROVIDER/hosts
|
||||
- terraform -chdir="contrib/terraform/$PROVIDER" init
|
||||
- terraform init contrib/terraform/$PROVIDER
|
||||
# Copy SSH keypair
|
||||
- mkdir -p ~/.ssh
|
||||
- echo "$PACKET_PRIVATE_KEY" | base64 -d > ~/.ssh/id_rsa
|
||||
- chmod 400 ~/.ssh/id_rsa
|
||||
- echo "$PACKET_PUBLIC_KEY" | base64 -d > ~/.ssh/id_rsa.pub
|
||||
- mkdir -p contrib/terraform/$PROVIDER/group_vars
|
||||
- mkdir -p group_vars
|
||||
# Random subnet to avoid routing conflicts
|
||||
- export TF_VAR_subnet_cidr="10.$(( $RANDOM % 256 )).$(( $RANDOM % 256 )).0/24"
|
||||
|
||||
.terraform_validate:
|
||||
extends: .terraform_install
|
||||
tags: [ffci]
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
only: ['master', /^pr-.*$/]
|
||||
script:
|
||||
- terraform -chdir="contrib/terraform/$PROVIDER" validate
|
||||
- terraform -chdir="contrib/terraform/$PROVIDER" fmt -check -diff
|
||||
stage: test
|
||||
needs:
|
||||
- pipeline-image
|
||||
- terraform validate -var-file=cluster.tfvars contrib/terraform/$PROVIDER
|
||||
- terraform fmt -check -diff contrib/terraform/$PROVIDER
|
||||
|
||||
.terraform_apply:
|
||||
extends: .terraform_install
|
||||
tags: [ffci]
|
||||
stage: deploy-extended
|
||||
tags: [light]
|
||||
stage: deploy-part3
|
||||
when: manual
|
||||
only: [/^pr-.*$/]
|
||||
artifacts:
|
||||
@ -57,74 +51,56 @@
|
||||
- tests/scripts/testcases_run.sh
|
||||
after_script:
|
||||
# Cleanup regardless of exit code
|
||||
- ./tests/scripts/testcases_cleanup.sh
|
||||
- chronic ./tests/scripts/testcases_cleanup.sh
|
||||
|
||||
tf-validate-openstack:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
TF_VERSION: 0.12.24
|
||||
PROVIDER: openstack
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-validate-equinix:
|
||||
tf-validate-packet:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: equinix
|
||||
TF_VERSION: 0.12.24
|
||||
PROVIDER: packet
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-validate-aws:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
TF_VERSION: 0.12.24
|
||||
PROVIDER: aws
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-validate-exoscale:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: exoscale
|
||||
|
||||
tf-validate-hetzner:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: hetzner
|
||||
|
||||
tf-validate-vsphere:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: vsphere
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-validate-upcloud:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: upcloud
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-validate-nifcloud:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
PROVIDER: nifcloud
|
||||
|
||||
# tf-packet-ubuntu20-default:
|
||||
# tf-packet-ubuntu16-default:
|
||||
# extends: .terraform_apply
|
||||
# variables:
|
||||
# TF_VERSION: $TERRAFORM_VERSION
|
||||
# TF_VERSION: 0.12.24
|
||||
# PROVIDER: packet
|
||||
# CLUSTER: $CI_COMMIT_REF_NAME
|
||||
# TF_VAR_number_of_k8s_masters: "1"
|
||||
# TF_VAR_number_of_k8s_nodes: "1"
|
||||
# TF_VAR_plan_k8s_masters: t1.small.x86
|
||||
# TF_VAR_plan_k8s_nodes: t1.small.x86
|
||||
# TF_VAR_metro: am
|
||||
# TF_VAR_facility: ewr1
|
||||
# TF_VAR_public_key_path: ""
|
||||
# TF_VAR_operating_system: ubuntu_20_04
|
||||
# TF_VAR_operating_system: ubuntu_16_04
|
||||
#
|
||||
# tf-packet-ubuntu18-default:
|
||||
# extends: .terraform_apply
|
||||
# variables:
|
||||
# TF_VERSION: 0.12.24
|
||||
# PROVIDER: packet
|
||||
# CLUSTER: $CI_COMMIT_REF_NAME
|
||||
# TF_VAR_number_of_k8s_masters: "1"
|
||||
# TF_VAR_number_of_k8s_nodes: "1"
|
||||
# TF_VAR_plan_k8s_masters: t1.small.x86
|
||||
# TF_VAR_plan_k8s_nodes: t1.small.x86
|
||||
# TF_VAR_facility: ams1
|
||||
# TF_VAR_public_key_path: ""
|
||||
# TF_VAR_operating_system: ubuntu_18_04
|
||||
|
||||
.ovh_variables: &ovh_variables
|
||||
OS_AUTH_URL: https://auth.cloud.ovh.net/v3
|
||||
@ -150,9 +126,14 @@ tf-validate-nifcloud:
|
||||
OS_INTERFACE: public
|
||||
OS_IDENTITY_API_VERSION: "3"
|
||||
TF_VAR_router_id: "ab95917c-41fb-4881-b507-3a6dfe9403df"
|
||||
# Since ELASTX is in Stockholm, Mitogen helps with latency
|
||||
MITOGEN_ENABLE: "false"
|
||||
# Mitogen doesn't support interpreter discovery yet
|
||||
ANSIBLE_PYTHON_INTERPRETER: "/usr/bin/python3"
|
||||
|
||||
tf-elastx_cleanup:
|
||||
tags: [ffci]
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
image: python
|
||||
variables:
|
||||
<<: *elastx_variables
|
||||
@ -160,16 +141,14 @@ tf-elastx_cleanup:
|
||||
- pip install -r scripts/openstack-cleanup/requirements.txt
|
||||
script:
|
||||
- ./scripts/openstack-cleanup/main.py
|
||||
allow_failure: true
|
||||
|
||||
tf-elastx_ubuntu20-calico:
|
||||
tf-elastx_ubuntu18-calico:
|
||||
extends: .terraform_apply
|
||||
stage: deploy-part1
|
||||
stage: deploy-part3
|
||||
when: on_success
|
||||
allow_failure: true
|
||||
variables:
|
||||
<<: *elastx_variables
|
||||
TF_VERSION: $TERRAFORM_VERSION
|
||||
TF_VERSION: 0.12.24
|
||||
PROVIDER: openstack
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
ANSIBLE_TIMEOUT: "60"
|
||||
@ -192,48 +171,47 @@ tf-elastx_ubuntu20-calico:
|
||||
TF_VAR_az_list_node: '["sto1"]'
|
||||
TF_VAR_flavor_k8s_master: 3f73fc93-ec61-4808-88df-2580d94c1a9b # v1-standard-2
|
||||
TF_VAR_flavor_k8s_node: 3f73fc93-ec61-4808-88df-2580d94c1a9b # v1-standard-2
|
||||
TF_VAR_image: ubuntu-20.04-server-latest
|
||||
TF_VAR_image: ubuntu-18.04-server-latest
|
||||
TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
||||
|
||||
# OVH voucher expired, commenting job until things are sorted out
|
||||
|
||||
# tf-ovh_cleanup:
|
||||
# stage: unit-tests
|
||||
# tags: [light]
|
||||
# image: python
|
||||
# environment: ovh
|
||||
# variables:
|
||||
# <<: *ovh_variables
|
||||
# before_script:
|
||||
# - pip install -r scripts/openstack-cleanup/requirements.txt
|
||||
# script:
|
||||
# - ./scripts/openstack-cleanup/main.py
|
||||
tf-ovh_cleanup:
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
image: python
|
||||
environment: ovh
|
||||
variables:
|
||||
<<: *ovh_variables
|
||||
before_script:
|
||||
- pip install -r scripts/openstack-cleanup/requirements.txt
|
||||
script:
|
||||
- ./scripts/openstack-cleanup/main.py
|
||||
|
||||
# tf-ovh_ubuntu20-calico:
|
||||
# extends: .terraform_apply
|
||||
# when: on_success
|
||||
# environment: ovh
|
||||
# variables:
|
||||
# <<: *ovh_variables
|
||||
# TF_VERSION: $TERRAFORM_VERSION
|
||||
# PROVIDER: openstack
|
||||
# CLUSTER: $CI_COMMIT_REF_NAME
|
||||
# ANSIBLE_TIMEOUT: "60"
|
||||
# SSH_USER: ubuntu
|
||||
# TF_VAR_number_of_k8s_masters: "0"
|
||||
# TF_VAR_number_of_k8s_masters_no_floating_ip: "1"
|
||||
# TF_VAR_number_of_k8s_masters_no_floating_ip_no_etcd: "0"
|
||||
# TF_VAR_number_of_etcd: "0"
|
||||
# TF_VAR_number_of_k8s_nodes: "0"
|
||||
# TF_VAR_number_of_k8s_nodes_no_floating_ip: "1"
|
||||
# TF_VAR_number_of_gfs_nodes_no_floating_ip: "0"
|
||||
# TF_VAR_number_of_bastions: "0"
|
||||
# TF_VAR_number_of_k8s_masters_no_etcd: "0"
|
||||
# TF_VAR_use_neutron: "0"
|
||||
# TF_VAR_floatingip_pool: "Ext-Net"
|
||||
# TF_VAR_external_net: "6011fbc9-4cbf-46a4-8452-6890a340b60b"
|
||||
# TF_VAR_network_name: "Ext-Net"
|
||||
# TF_VAR_flavor_k8s_master: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
|
||||
# TF_VAR_flavor_k8s_node: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
|
||||
# TF_VAR_image: "Ubuntu 20.04"
|
||||
# TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
||||
tf-ovh_ubuntu18-calico:
|
||||
extends: .terraform_apply
|
||||
when: on_success
|
||||
environment: ovh
|
||||
variables:
|
||||
<<: *ovh_variables
|
||||
TF_VERSION: 0.12.24
|
||||
PROVIDER: openstack
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
ANSIBLE_TIMEOUT: "60"
|
||||
SSH_USER: ubuntu
|
||||
TF_VAR_number_of_k8s_masters: "0"
|
||||
TF_VAR_number_of_k8s_masters_no_floating_ip: "1"
|
||||
TF_VAR_number_of_k8s_masters_no_floating_ip_no_etcd: "0"
|
||||
TF_VAR_number_of_etcd: "0"
|
||||
TF_VAR_number_of_k8s_nodes: "0"
|
||||
TF_VAR_number_of_k8s_nodes_no_floating_ip: "1"
|
||||
TF_VAR_number_of_gfs_nodes_no_floating_ip: "0"
|
||||
TF_VAR_number_of_bastions: "0"
|
||||
TF_VAR_number_of_k8s_masters_no_etcd: "0"
|
||||
TF_VAR_use_neutron: "0"
|
||||
TF_VAR_floatingip_pool: "Ext-Net"
|
||||
TF_VAR_external_net: "6011fbc9-4cbf-46a4-8452-6890a340b60b"
|
||||
TF_VAR_network_name: "Ext-Net"
|
||||
TF_VAR_flavor_k8s_master: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
|
||||
TF_VAR_flavor_k8s_node: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
|
||||
TF_VAR_image: "Ubuntu 18.04"
|
||||
TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
||||
|
@ -1,63 +1,54 @@
|
||||
---
|
||||
|
||||
molecule_tests:
|
||||
tags: [c3.small.x86]
|
||||
only: [/^pr-.*$/]
|
||||
except: ['triggers']
|
||||
image: quay.io/kubespray/vagrant:$KUBESPRAY_VERSION
|
||||
services: []
|
||||
stage: deploy-part1
|
||||
before_script:
|
||||
- tests/scripts/rebase.sh
|
||||
- apt-get update && apt-get install -y python3-pip
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
||||
- python -m pip install -r tests/requirements.txt
|
||||
- ./tests/scripts/vagrant_clean.sh
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh
|
||||
|
||||
.vagrant:
|
||||
extends: .testcases
|
||||
needs:
|
||||
- ci-not-authorized
|
||||
variables:
|
||||
CI_PLATFORM: "vagrant"
|
||||
SSH_USER: "vagrant"
|
||||
VAGRANT_DEFAULT_PROVIDER: "libvirt"
|
||||
KUBESPRAY_VAGRANT_CONFIG: tests/files/${CI_JOB_NAME}.rb
|
||||
DOCKER_NAME: vagrant
|
||||
VAGRANT_ANSIBLE_TAGS: facts
|
||||
tags: [ffci-vm-large]
|
||||
# only: [/^pr-.*$/]
|
||||
# except: ['triggers']
|
||||
image: quay.io/kubespray/vm-kubespray-ci:v6
|
||||
tags: [c3.small.x86]
|
||||
only: [/^pr-.*$/]
|
||||
except: ['triggers']
|
||||
image: quay.io/kubespray/vagrant:$KUBESPRAY_VERSION
|
||||
services: []
|
||||
before_script:
|
||||
- echo $USER
|
||||
- python3 -m venv citest
|
||||
- source citest/bin/activate
|
||||
- vagrant plugin expunge --reinstall --force --no-tty
|
||||
- vagrant plugin install vagrant-libvirt
|
||||
- pip install --no-compile --no-cache-dir pip -U
|
||||
- pip install --no-compile --no-cache-dir -r $CI_PROJECT_DIR/requirements.txt
|
||||
- pip install --no-compile --no-cache-dir -r $CI_PROJECT_DIR/tests/requirements.txt
|
||||
- apt-get update && apt-get install -y python3-pip
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
||||
- python -m pip install -r tests/requirements.txt
|
||||
- ./tests/scripts/vagrant_clean.sh
|
||||
script:
|
||||
- ./tests/scripts/testcases_run.sh
|
||||
after_script:
|
||||
- chronic ./tests/scripts/testcases_cleanup.sh
|
||||
|
||||
vagrant_ubuntu20-calico-dual-stack:
|
||||
stage: deploy-extended
|
||||
extends: .vagrant
|
||||
when: manual
|
||||
# FIXME: this test if broken (perma-failing)
|
||||
|
||||
vagrant_ubuntu20-flannel:
|
||||
stage: deploy-part1
|
||||
vagrant_ubuntu18-flannel:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: on_success
|
||||
allow_failure: false
|
||||
|
||||
vagrant_ubuntu20-flannel-collection:
|
||||
stage: deploy-extended
|
||||
vagrant_ubuntu18-weave-medium:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: manual
|
||||
|
||||
vagrant_ubuntu20-kube-router-sep:
|
||||
stage: deploy-extended
|
||||
vagrant_ubuntu20-flannel:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: manual
|
||||
|
||||
# Service proxy test fails connectivity testing
|
||||
vagrant_ubuntu20-kube-router-svc-proxy:
|
||||
stage: deploy-extended
|
||||
extends: .vagrant
|
||||
when: manual
|
||||
|
||||
vagrant_fedora37-kube-router:
|
||||
stage: deploy-extended
|
||||
extends: .vagrant
|
||||
when: manual
|
||||
# FIXME: this test if broken (perma-failing)
|
||||
when: on_success
|
||||
|
2
.markdownlint.yaml
Normal file
2
.markdownlint.yaml
Normal file
@ -0,0 +1,2 @@
|
||||
---
|
||||
MD013: false
|
@ -1,4 +0,0 @@
|
||||
all
|
||||
exclude_rule 'MD013'
|
||||
exclude_rule 'MD029'
|
||||
rule 'MD007', :indent => 2
|
@ -1,110 +0,0 @@
|
||||
---
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v4.6.0
|
||||
hooks:
|
||||
- id: check-added-large-files
|
||||
- id: check-case-conflict
|
||||
- id: check-executables-have-shebangs
|
||||
- id: check-xml
|
||||
- id: check-merge-conflict
|
||||
- id: detect-private-key
|
||||
- id: end-of-file-fixer
|
||||
- id: forbid-new-submodules
|
||||
- id: requirements-txt-fixer
|
||||
- id: trailing-whitespace
|
||||
|
||||
- repo: https://github.com/adrienverge/yamllint.git
|
||||
rev: v1.35.1
|
||||
hooks:
|
||||
- id: yamllint
|
||||
args: [--strict]
|
||||
|
||||
- repo: https://github.com/markdownlint/markdownlint
|
||||
rev: v0.12.0
|
||||
hooks:
|
||||
- id: markdownlint
|
||||
exclude: "^.github|(^docs/_sidebar\\.md$)"
|
||||
|
||||
- repo: https://github.com/shellcheck-py/shellcheck-py
|
||||
rev: v0.10.0.1
|
||||
hooks:
|
||||
- id: shellcheck
|
||||
args: ["--severity=error"]
|
||||
exclude: "^.git"
|
||||
files: "\\.sh$"
|
||||
|
||||
- repo: https://github.com/ansible/ansible-lint
|
||||
rev: v24.5.0
|
||||
hooks:
|
||||
- id: ansible-lint
|
||||
additional_dependencies:
|
||||
- ansible==9.8.0
|
||||
- jsonschema==4.22.0
|
||||
- jmespath==1.0.1
|
||||
- netaddr==1.3.0
|
||||
- distlib
|
||||
|
||||
- repo: https://github.com/golangci/misspell
|
||||
rev: v0.6.0
|
||||
hooks:
|
||||
- id: misspell
|
||||
exclude: "OWNERS_ALIASES$"
|
||||
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: ansible-syntax-check
|
||||
name: ansible-syntax-check
|
||||
entry: env ANSIBLE_INVENTORY=inventory/local-tests.cfg ANSIBLE_REMOTE_USER=root ANSIBLE_BECOME="true" ANSIBLE_BECOME_USER=root ANSIBLE_VERBOSITY="3" ansible-playbook --syntax-check
|
||||
language: python
|
||||
files: "^cluster.yml|^upgrade-cluster.yml|^reset.yml|^extra_playbooks/upgrade-only-k8s.yml"
|
||||
additional_dependencies:
|
||||
- ansible==9.5.1
|
||||
|
||||
- id: tox-inventory-builder
|
||||
name: tox-inventory-builder
|
||||
entry: bash -c "cd contrib/inventory_builder && tox"
|
||||
language: python
|
||||
pass_filenames: false
|
||||
additional_dependencies:
|
||||
- tox==4.15.0
|
||||
|
||||
- id: check-readme-versions
|
||||
name: check-readme-versions
|
||||
entry: tests/scripts/check_readme_versions.sh
|
||||
language: script
|
||||
pass_filenames: false
|
||||
|
||||
- id: collection-build-install
|
||||
name: Build and install kubernetes-sigs.kubespray Ansible collection
|
||||
language: python
|
||||
additional_dependencies:
|
||||
- ansible-core>=2.16.4
|
||||
- distlib
|
||||
entry: tests/scripts/collection-build-install.sh
|
||||
pass_filenames: false
|
||||
|
||||
- id: generate-docs-sidebar
|
||||
name: generate-docs-sidebar
|
||||
entry: scripts/gen_docs_sidebar.sh
|
||||
language: script
|
||||
pass_filenames: false
|
||||
|
||||
- id: ci-matrix
|
||||
name: ci-matrix
|
||||
entry: tests/scripts/md-table/main.py
|
||||
language: python
|
||||
pass_filenames: false
|
||||
additional_dependencies:
|
||||
- jinja2
|
||||
- pathlib
|
||||
- pyaml
|
||||
|
||||
- id: jinja-syntax-check
|
||||
name: jinja-syntax-check
|
||||
entry: tests/scripts/check-templates.py
|
||||
language: python
|
||||
types:
|
||||
- jinja
|
||||
additional_dependencies:
|
||||
- jinja2
|
13
.yamllint
13
.yamllint
@ -1,12 +1,6 @@
|
||||
---
|
||||
extends: default
|
||||
|
||||
ignore: |
|
||||
.git/
|
||||
.github/
|
||||
# Generated file
|
||||
tests/files/custom_cni/cilium.yaml
|
||||
# https://ansible.readthedocs.io/projects/lint/rules/yaml/
|
||||
rules:
|
||||
braces:
|
||||
min-spaces-inside: 0
|
||||
@ -14,16 +8,9 @@ rules:
|
||||
brackets:
|
||||
min-spaces-inside: 0
|
||||
max-spaces-inside: 1
|
||||
comments:
|
||||
min-spaces-from-content: 1
|
||||
# https://github.com/adrienverge/yamllint/issues/384
|
||||
comments-indentation: false
|
||||
indentation:
|
||||
spaces: 2
|
||||
indent-sequences: consistent
|
||||
line-length: disable
|
||||
new-line-at-end-of-file: disable
|
||||
octal-values:
|
||||
forbid-implicit-octal: true # yamllint defaults to false
|
||||
forbid-explicit-octal: true # yamllint defaults to false
|
||||
truthy: disable
|
||||
|
@ -1 +0,0 @@
|
||||
# See our release notes on [GitHub](https://github.com/kubernetes-sigs/kubespray/releases)
|
@ -6,23 +6,11 @@
|
||||
|
||||
It is recommended to use filter to manage the GitHub email notification, see [examples for setting filters to Kubernetes Github notifications](https://github.com/kubernetes/community/blob/master/communication/best-practices.md#examples-for-setting-filters-to-kubernetes-github-notifications)
|
||||
|
||||
To install development dependencies you can set up a python virtual env with the necessary dependencies:
|
||||
|
||||
```ShellSession
|
||||
virtualenv venv
|
||||
source venv/bin/activate
|
||||
pip install -r tests/requirements.txt
|
||||
ansible-galaxy install -r tests/requirements.yml
|
||||
```
|
||||
To install development dependencies you can use `pip install -r tests/requirements.txt`
|
||||
|
||||
#### Linting
|
||||
|
||||
Kubespray uses [pre-commit](https://pre-commit.com) hook configuration to run several linters, please install this tool and use it to run validation tests before submitting a PR.
|
||||
|
||||
```ShellSession
|
||||
pre-commit install
|
||||
pre-commit run -a # To run pre-commit hook on all files in the repository, even if they were not modified
|
||||
```
|
||||
Kubespray uses `yamllint` and `ansible-lint`. To run them locally use `yamllint .` and `./tests/scripts/ansible-lint.sh`
|
||||
|
||||
#### Molecule
|
||||
|
||||
@ -39,9 +27,5 @@ Vagrant with VirtualBox or libvirt driver helps you to quickly spin test cluster
|
||||
1. Submit an issue describing your proposed change to the repo in question.
|
||||
2. The [repo owners](OWNERS) will respond to your issue promptly.
|
||||
3. Fork the desired repo, develop and test your code changes.
|
||||
4. Install [pre-commit](https://pre-commit.com) and install it in your development repo.
|
||||
5. Addess any pre-commit validation failures.
|
||||
6. Sign the CNCF CLA (<https://git.k8s.io/community/CLA.md#the-contributor-license-agreement>)
|
||||
7. Submit a pull request.
|
||||
8. Work with the reviewers on their suggestions.
|
||||
9. Ensure to rebase to the HEAD of your target branch and squash un-necessary commits (<https://blog.carbonfive.com/always-squash-and-rebase-your-git-commits/>) before final merger of your contribution.
|
||||
4. Sign the CNCF CLA (<https://git.k8s.io/community/CLA.md#the-contributor-license-agreement>)
|
||||
5. Submit a pull request.
|
||||
|
67
Dockerfile
67
Dockerfile
@ -1,52 +1,21 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
FROM ubuntu:18.04
|
||||
|
||||
# Use imutable image tags rather than mutable tags (like ubuntu:22.04)
|
||||
FROM ubuntu:22.04@sha256:149d67e29f765f4db62aa52161009e99e389544e25a8f43c8c89d4a445a7ca37
|
||||
RUN mkdir /kubespray
|
||||
WORKDIR /kubespray
|
||||
RUN apt update -y && \
|
||||
apt install -y \
|
||||
libssl-dev python3-dev sshpass apt-transport-https jq moreutils \
|
||||
ca-certificates curl gnupg2 software-properties-common python3-pip rsync
|
||||
RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - && \
|
||||
add-apt-repository \
|
||||
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
|
||||
$(lsb_release -cs) \
|
||||
stable" \
|
||||
&& apt update -y && apt-get install docker-ce -y
|
||||
COPY . .
|
||||
RUN /usr/bin/python3 -m pip install pip -U && /usr/bin/python3 -m pip install -r tests/requirements.txt && python3 -m pip install -r requirements.txt && update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
||||
RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.17.5/bin/linux/amd64/kubectl \
|
||||
&& chmod a+x kubectl && cp kubectl /usr/local/bin/kubectl
|
||||
|
||||
# Some tools like yamllint need this
|
||||
# Pip needs this as well at the moment to install ansible
|
||||
# (and potentially other packages)
|
||||
# See: https://github.com/pypa/pip/issues/10219
|
||||
ENV LANG=C.UTF-8 \
|
||||
DEBIAN_FRONTEND=noninteractive \
|
||||
PYTHONDONTWRITEBYTECODE=1
|
||||
|
||||
WORKDIR /kubespray
|
||||
|
||||
# hadolint ignore=DL3008
|
||||
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
apt-get update -q \
|
||||
&& apt-get install -yq --no-install-recommends \
|
||||
curl \
|
||||
python3 \
|
||||
python3-pip \
|
||||
sshpass \
|
||||
vim \
|
||||
rsync \
|
||||
openssh-client \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/* /var/log/*
|
||||
|
||||
RUN --mount=type=bind,source=requirements.txt,target=requirements.txt \
|
||||
--mount=type=cache,sharing=locked,id=pipcache,mode=0777,target=/root/.cache/pip \
|
||||
pip install --no-compile --no-cache-dir -r requirements.txt \
|
||||
&& find /usr -type d -name '*__pycache__' -prune -exec rm -rf {} \;
|
||||
|
||||
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
||||
|
||||
RUN --mount=type=bind,source=roles/kubespray-defaults/defaults/main/main.yml,target=roles/kubespray-defaults/defaults/main/main.yml \
|
||||
KUBE_VERSION=$(sed -n 's/^kube_version: //p' roles/kubespray-defaults/defaults/main/main.yml) \
|
||||
OS_ARCHITECTURE=$(dpkg --print-architecture) \
|
||||
&& curl -L "https://dl.k8s.io/release/${KUBE_VERSION}/bin/linux/${OS_ARCHITECTURE}/kubectl" -o /usr/local/bin/kubectl \
|
||||
&& echo "$(curl -L "https://dl.k8s.io/release/${KUBE_VERSION}/bin/linux/${OS_ARCHITECTURE}/kubectl.sha256")" /usr/local/bin/kubectl | sha256sum --check \
|
||||
&& chmod a+x /usr/local/bin/kubectl
|
||||
|
||||
COPY *.yml ./
|
||||
COPY *.cfg ./
|
||||
COPY roles ./roles
|
||||
COPY contrib ./contrib
|
||||
COPY inventory ./inventory
|
||||
COPY library ./library
|
||||
COPY extra_playbooks ./extra_playbooks
|
||||
COPY playbooks ./playbooks
|
||||
COPY plugins ./plugins
|
||||
ENV LANG=C.UTF-8
|
||||
|
2
LICENSE
2
LICENSE
@ -187,7 +187,7 @@
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2016 Kubespray
|
||||
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
4
Makefile
4
Makefile
@ -1,7 +1,5 @@
|
||||
mitogen:
|
||||
@echo Mitogen support is deprecated.
|
||||
@echo Please run the following command manually:
|
||||
@echo ansible-playbook -c local mitogen.yml -vv
|
||||
ansible-playbook -c local mitogen.yml -vv
|
||||
clean:
|
||||
rm -rf dist/
|
||||
rm *.retry
|
||||
|
2
OWNERS
2
OWNERS
@ -4,5 +4,3 @@ approvers:
|
||||
- kubespray-approvers
|
||||
reviewers:
|
||||
- kubespray-reviewers
|
||||
emeritus_approvers:
|
||||
- kubespray-emeritus_approvers
|
||||
|
@ -1,24 +1,19 @@
|
||||
aliases:
|
||||
kubespray-approvers:
|
||||
- cristicalin
|
||||
- floryut
|
||||
- liupeng0518
|
||||
- mzaian
|
||||
- oomichi
|
||||
- yankay
|
||||
- ant31
|
||||
kubespray-reviewers:
|
||||
- cyclinder
|
||||
- erikjiang
|
||||
- mrfreezeex
|
||||
- mzaian
|
||||
- vannten
|
||||
- yankay
|
||||
kubespray-emeritus_approvers:
|
||||
- mattymo
|
||||
- atoms
|
||||
- chadswen
|
||||
- luckysb
|
||||
- mattymo
|
||||
- mirwan
|
||||
- miouge1
|
||||
- riverzhang
|
||||
- verwilst
|
||||
- woopstar
|
||||
- luckysb
|
||||
kubespray-reviewers:
|
||||
- jjungnickel
|
||||
- archifleks
|
||||
- holmsten
|
||||
- bozzo
|
||||
- floryut
|
||||
- eppo
|
||||
|
245
README.md
245
README.md
@ -5,7 +5,7 @@
|
||||
If you have questions, check the documentation at [kubespray.io](https://kubespray.io) and join us on the [kubernetes slack](https://kubernetes.slack.com), channel **\#kubespray**.
|
||||
You can get your invite [here](http://slack.k8s.io/)
|
||||
|
||||
- Can be deployed on **[AWS](docs/cloud_providers/aws.md), GCE, [Azure](docs/cloud_providers/azure.md), [OpenStack](docs/cloud_providers/openstack.md), [vSphere](docs/cloud_providers/vsphere.md), [Equinix Metal](docs/cloud_providers/equinix-metal.md) (bare metal), Oracle Cloud Infrastructure (Experimental), or Baremetal**
|
||||
- Can be deployed on **[AWS](docs/aws.md), GCE, [Azure](docs/azure.md), [OpenStack](docs/openstack.md), [vSphere](docs/vsphere.md), [Packet](docs/packet.md) (bare metal), Oracle Cloud Infrastructure (Experimental), or Baremetal**
|
||||
- **Highly available** cluster
|
||||
- **Composable** (Choice of the network plugin for instance)
|
||||
- Supports most popular **Linux distributions**
|
||||
@ -13,16 +13,16 @@ You can get your invite [here](http://slack.k8s.io/)
|
||||
|
||||
## Quick Start
|
||||
|
||||
Below are several ways to use Kubespray to deploy a Kubernetes cluster.
|
||||
To deploy the cluster you can use :
|
||||
|
||||
### Ansible
|
||||
|
||||
#### Usage
|
||||
|
||||
Install Ansible according to [Ansible installation guide](/docs/ansible/ansible.md#installing-ansible)
|
||||
then run the following steps:
|
||||
|
||||
```ShellSession
|
||||
# Install dependencies from ``requirements.txt``
|
||||
sudo pip3 install -r requirements.txt
|
||||
|
||||
# Copy ``inventory/sample`` as ``inventory/mycluster``
|
||||
cp -rfp inventory/sample inventory/mycluster
|
||||
|
||||
@ -32,14 +32,7 @@ CONFIG_FILE=inventory/mycluster/hosts.yaml python3 contrib/inventory_builder/inv
|
||||
|
||||
# Review and change parameters under ``inventory/mycluster/group_vars``
|
||||
cat inventory/mycluster/group_vars/all/all.yml
|
||||
cat inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml
|
||||
|
||||
# Clean up old Kubernetes cluster with Ansible Playbook - run the playbook as root
|
||||
# The option `--become` is required, as for example cleaning up SSL keys in /etc/,
|
||||
# uninstalling old packages and interacting with various systemd daemons.
|
||||
# Without --become the playbook will fail to run!
|
||||
# And be mind it will remove the current kubernetes cluster (if it's running)!
|
||||
ansible-playbook -i inventory/mycluster/hosts.yaml --become --become-user=root reset.yml
|
||||
cat inventory/mycluster/group_vars/k8s-cluster/k8s-cluster.yml
|
||||
|
||||
# Deploy Kubespray with Ansible Playbook - run the playbook as root
|
||||
# The option `--become` is required, as for example writing SSL keys in /etc/,
|
||||
@ -48,172 +41,123 @@ ansible-playbook -i inventory/mycluster/hosts.yaml --become --become-user=root
|
||||
ansible-playbook -i inventory/mycluster/hosts.yaml --become --become-user=root cluster.yml
|
||||
```
|
||||
|
||||
Note: When Ansible is already installed via system packages on the control node,
|
||||
Python packages installed via `sudo pip install -r requirements.txt` will go to
|
||||
a different directory tree (e.g. `/usr/local/lib/python2.7/dist-packages` on
|
||||
Ubuntu) from Ansible's (e.g. `/usr/lib/python2.7/dist-packages/ansible` still on
|
||||
Ubuntu). As a consequence, the `ansible-playbook` command will fail with:
|
||||
Note: When Ansible is already installed via system packages on the control machine, other python packages installed via `sudo pip install -r requirements.txt` will go to a different directory tree (e.g. `/usr/local/lib/python2.7/dist-packages` on Ubuntu) from Ansible's (e.g. `/usr/lib/python2.7/dist-packages/ansible` still on Ubuntu).
|
||||
As a consequence, `ansible-playbook` command will fail with:
|
||||
|
||||
```raw
|
||||
ERROR! no action detected in task. This often indicates a misspelled module name, or incorrect module path.
|
||||
```
|
||||
|
||||
This likely indicates that a task depends on a module present in ``requirements.txt``.
|
||||
probably pointing on a task depending on a module present in requirements.txt (i.e. "unseal vault").
|
||||
|
||||
One way of addressing this is to uninstall the system Ansible package then
|
||||
reinstall Ansible via ``pip``, but this not always possible and one must
|
||||
take care regarding package versions.
|
||||
A workaround consists of setting the `ANSIBLE_LIBRARY`
|
||||
and `ANSIBLE_MODULE_UTILS` environment variables respectively to
|
||||
the `ansible/modules` and `ansible/module_utils` subdirectories of the ``pip``
|
||||
installation location, which is the ``Location`` shown by running
|
||||
`pip show [package]` before executing `ansible-playbook`.
|
||||
|
||||
A simple way to ensure you get all the correct version of Ansible is to use
|
||||
the [pre-built docker image from Quay](https://quay.io/repository/kubespray/kubespray?tab=tags).
|
||||
You will then need to use [bind mounts](https://docs.docker.com/storage/bind-mounts/)
|
||||
to access the inventory and SSH key in the container, like this:
|
||||
|
||||
```ShellSession
|
||||
git checkout v2.25.0
|
||||
docker pull quay.io/kubespray/kubespray:v2.25.0
|
||||
docker run --rm -it --mount type=bind,source="$(pwd)"/inventory/sample,dst=/inventory \
|
||||
--mount type=bind,source="${HOME}"/.ssh/id_rsa,dst=/root/.ssh/id_rsa \
|
||||
quay.io/kubespray/kubespray:v2.25.0 bash
|
||||
# Inside the container you may now run the kubespray playbooks:
|
||||
ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml
|
||||
```
|
||||
|
||||
#### Collection
|
||||
|
||||
See [here](docs/ansible/ansible_collection.md) if you wish to use this repository as an Ansible collection
|
||||
One way of solving this would be to uninstall the Ansible package and then, to install it via pip but it is not always possible.
|
||||
A workaround consists of setting `ANSIBLE_LIBRARY` and `ANSIBLE_MODULE_UTILS` environment variables respectively to the `ansible/modules` and `ansible/module_utils` subdirectories of pip packages installation location, which can be found in the Location field of the output of `pip show [package]` before executing `ansible-playbook`.
|
||||
|
||||
### Vagrant
|
||||
|
||||
For Vagrant we need to install Python dependencies for provisioning tasks.
|
||||
Check that ``Python`` and ``pip`` are installed:
|
||||
For Vagrant we need to install python dependencies for provisioning tasks.
|
||||
Check if Python and pip are installed:
|
||||
|
||||
```ShellSession
|
||||
python -V && pip -V
|
||||
```
|
||||
|
||||
If this returns the version of the software, you're good to go. If not, download and install Python from here <https://www.python.org/downloads/source/>
|
||||
|
||||
Install Ansible according to [Ansible installation guide](/docs/ansible/ansible.md#installing-ansible)
|
||||
then run the following step:
|
||||
Install the necessary requirements
|
||||
|
||||
```ShellSession
|
||||
sudo pip install -r requirements.txt
|
||||
vagrant up
|
||||
```
|
||||
|
||||
## Documents
|
||||
|
||||
- [Requirements](#requirements)
|
||||
- [Kubespray vs ...](docs/getting_started/comparisons.md)
|
||||
- [Getting started](docs/getting_started/getting-started.md)
|
||||
- [Setting up your first cluster](docs/getting_started/setting-up-your-first-cluster.md)
|
||||
- [Ansible inventory and tags](docs/ansible/ansible.md)
|
||||
- [Integration with existing ansible repo](docs/operations/integration.md)
|
||||
- [Deployment data variables](docs/ansible/vars.md)
|
||||
- [DNS stack](docs/advanced/dns-stack.md)
|
||||
- [HA mode](docs/operations/ha-mode.md)
|
||||
- [Kubespray vs ...](docs/comparisons.md)
|
||||
- [Getting started](docs/getting-started.md)
|
||||
- [Setting up your first cluster](docs/setting-up-your-first-cluster.md)
|
||||
- [Ansible inventory and tags](docs/ansible.md)
|
||||
- [Integration with existing ansible repo](docs/integration.md)
|
||||
- [Deployment data variables](docs/vars.md)
|
||||
- [DNS stack](docs/dns-stack.md)
|
||||
- [HA mode](docs/ha-mode.md)
|
||||
- [Network plugins](#network-plugins)
|
||||
- [Vagrant install](docs/developers/vagrant.md)
|
||||
- [Flatcar Container Linux bootstrap](docs/operating_systems/flatcar.md)
|
||||
- [Fedora CoreOS bootstrap](docs/operating_systems/fcos.md)
|
||||
- [openSUSE setup](docs/operating_systems/opensuse.md)
|
||||
- [Downloaded artifacts](docs/advanced/downloads.md)
|
||||
- [Cloud providers](docs/cloud_providers/cloud.md)
|
||||
- [OpenStack](docs/cloud_providers/openstack.md)
|
||||
- [AWS](docs/cloud_providers/aws.md)
|
||||
- [Azure](docs/cloud_providers/azure.md)
|
||||
- [vSphere](docs/cloud_providers/vsphere.md)
|
||||
- [Equinix Metal](docs/cloud_providers/equinix-metal.md)
|
||||
- [Large deployments](docs/operations/large-deployments.md)
|
||||
- [Adding/replacing a node](docs/operations/nodes.md)
|
||||
- [Upgrades basics](docs/operations/upgrades.md)
|
||||
- [Air-Gap installation](docs/operations/offline-environment.md)
|
||||
- [NTP](docs/advanced/ntp.md)
|
||||
- [Hardening](docs/operations/hardening.md)
|
||||
- [Mirror](docs/operations/mirror.md)
|
||||
- [Roadmap](docs/roadmap/roadmap.md)
|
||||
- [Vagrant install](docs/vagrant.md)
|
||||
- [Flatcar Container Linux bootstrap](docs/flatcar.md)
|
||||
- [Fedora CoreOS bootstrap](docs/fcos.md)
|
||||
- [Debian Jessie setup](docs/debian.md)
|
||||
- [openSUSE setup](docs/opensuse.md)
|
||||
- [Downloaded artifacts](docs/downloads.md)
|
||||
- [Cloud providers](docs/cloud.md)
|
||||
- [OpenStack](docs/openstack.md)
|
||||
- [AWS](docs/aws.md)
|
||||
- [Azure](docs/azure.md)
|
||||
- [vSphere](docs/vsphere.md)
|
||||
- [Packet Host](docs/packet.md)
|
||||
- [Large deployments](docs/large-deployments.md)
|
||||
- [Adding/replacing a node](docs/nodes.md)
|
||||
- [Upgrades basics](docs/upgrades.md)
|
||||
- [Air-Gap installation](docs/offline-environment.md)
|
||||
- [Roadmap](docs/roadmap.md)
|
||||
|
||||
## Supported Linux Distributions
|
||||
|
||||
- **Flatcar Container Linux by Kinvolk**
|
||||
- **Debian** Bookworm, Bullseye
|
||||
- **Ubuntu** 20.04, 22.04, 24.04
|
||||
- **CentOS/RHEL** 7, [8, 9](docs/operating_systems/centos.md#centos-8)
|
||||
- **Fedora** 37, 38
|
||||
- **Fedora CoreOS** (see [fcos Note](docs/operating_systems/fcos.md))
|
||||
- **openSUSE** Leap 15.x/Tumbleweed
|
||||
- **Oracle Linux** 7, [8, 9](docs/operating_systems/centos.md#centos-8)
|
||||
- **Alma Linux** [8, 9](docs/operating_systems/centos.md#centos-8)
|
||||
- **Rocky Linux** [8, 9](docs/operating_systems/centos.md#centos-8)
|
||||
- **Kylin Linux Advanced Server V10** (experimental: see [kylin linux notes](docs/operating_systems/kylinlinux.md))
|
||||
- **Amazon Linux 2** (experimental: see [amazon linux notes](docs/operating_systems/amazonlinux.md))
|
||||
- **UOS Linux** (experimental: see [uos linux notes](docs/operating_systems/uoslinux.md))
|
||||
- **openEuler** (experimental: see [openEuler notes](docs/operating_systems/openeuler.md))
|
||||
- **Debian** Buster, Jessie, Stretch, Wheezy
|
||||
- **Ubuntu** 16.04, 18.04, 20.04
|
||||
- **CentOS/RHEL** 7, 8 (experimental: see [centos 8 notes](docs/centos8.md))
|
||||
- **Fedora** 31, 32
|
||||
- **Fedora CoreOS** (experimental: see [fcos Note](docs/fcos.md))
|
||||
- **openSUSE** Leap 42.3/Tumbleweed
|
||||
- **Oracle Linux** 7, 8 (experimental: [centos 8 notes](docs/centos8.md) apply)
|
||||
|
||||
Note: Upstart/SysV init based OS types are not supported.
|
||||
|
||||
## Supported Components
|
||||
|
||||
- Core
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.30.3
|
||||
- [etcd](https://github.com/etcd-io/etcd) v3.5.12
|
||||
- [docker](https://www.docker.com/) v26.1
|
||||
- [containerd](https://containerd.io/) v1.7.20
|
||||
- [cri-o](http://cri-o.io/) v1.30.3 (experimental: see [CRI-O Note](docs/CRI/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.18.10
|
||||
- [etcd](https://github.com/coreos/etcd) v3.4.3
|
||||
- [docker](https://www.docker.com/) v19.03 (see note)
|
||||
- [containerd](https://containerd.io/) v1.2.13
|
||||
- [cri-o](http://cri-o.io/) v1.17 (experimental: see [CRI-O Note](docs/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
||||
- Network Plugin
|
||||
- [cni-plugins](https://github.com/containernetworking/plugins) v1.2.0
|
||||
- [calico](https://github.com/projectcalico/calico) v3.27.3
|
||||
- [cilium](https://github.com/cilium/cilium) v1.15.4
|
||||
- [flannel](https://github.com/flannel-io/flannel) v0.22.0
|
||||
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.11.5
|
||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) v2.0.0
|
||||
- [multus](https://github.com/k8snetworkplumbingwg/multus-cni) v3.8
|
||||
- [weave](https://github.com/rajch/weave) v2.8.7
|
||||
- [kube-vip](https://github.com/kube-vip/kube-vip) v0.8.0
|
||||
- [cni-plugins](https://github.com/containernetworking/plugins) v0.8.6
|
||||
- [calico](https://github.com/projectcalico/calico) v3.15.2
|
||||
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
|
||||
- [cilium](https://github.com/cilium/cilium) v1.8.3
|
||||
- [contiv](https://github.com/contiv/install) v1.2.1
|
||||
- [flanneld](https://github.com/coreos/flannel) v0.12.0
|
||||
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.3.0
|
||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) v1.0.1
|
||||
- [multus](https://github.com/intel/multus-cni) v3.6.0
|
||||
- [ovn4nfv](https://github.com/opnfv/ovn4nfv-k8s-plugin) v1.1.0
|
||||
- [weave](https://github.com/weaveworks/weave) v2.7.0
|
||||
- Application
|
||||
- [cert-manager](https://github.com/jetstack/cert-manager) v1.14.7
|
||||
- [coredns](https://github.com/coredns/coredns) v1.11.1
|
||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v1.10.1
|
||||
- [krew](https://github.com/kubernetes-sigs/krew) v0.4.4
|
||||
- [argocd](https://argoproj.github.io/) v2.11.0
|
||||
- [helm](https://helm.sh/) v3.14.2
|
||||
- [metallb](https://metallb.universe.tf/) v0.13.9
|
||||
- [registry](https://github.com/distribution/distribution) v2.8.1
|
||||
- Storage Plugin
|
||||
- [ambassador](https://github.com/datawire/ambassador): v1.5
|
||||
- [cephfs-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.0-k8s1.11
|
||||
- [rbd-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.1-k8s1.11
|
||||
- [aws-ebs-csi-plugin](https://github.com/kubernetes-sigs/aws-ebs-csi-driver) v0.5.0
|
||||
- [azure-csi-plugin](https://github.com/kubernetes-sigs/azuredisk-csi-driver) v1.10.0
|
||||
- [cinder-csi-plugin](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/cinder-csi-plugin/using-cinder-csi-plugin.md) v1.30.0
|
||||
- [gcp-pd-csi-plugin](https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver) v1.9.2
|
||||
- [local-path-provisioner](https://github.com/rancher/local-path-provisioner) v0.0.24
|
||||
- [local-volume-provisioner](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner) v2.5.0
|
||||
- [node-feature-discovery](https://github.com/kubernetes-sigs/node-feature-discovery) v0.14.2
|
||||
- [cert-manager](https://github.com/jetstack/cert-manager) v0.16.1
|
||||
- [coredns](https://github.com/coredns/coredns) v1.6.7
|
||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v0.35.0
|
||||
|
||||
## Container Runtime Notes
|
||||
|
||||
- The cri-o version should be aligned with the respective kubernetes version (i.e. kube_version=1.20.x, crio_version=1.20)
|
||||
Note: The list of validated [docker versions](https://kubernetes.io/docs/setup/production-environment/container-runtimes/#docker) is 1.13.1, 17.03, 17.06, 17.09, 18.06, 18.09 and 19.03. The recommended docker version is 19.03. The kubelet might break on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
|
||||
|
||||
## Requirements
|
||||
|
||||
- **Minimum required version of Kubernetes is v1.28**
|
||||
- **Ansible v2.14+, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands**
|
||||
- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](docs/operations/offline-environment.md))
|
||||
- **Minimum required version of Kubernetes is v1.17**
|
||||
- **Ansible v2.9+, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands**
|
||||
- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](docs/offline-environment.md))
|
||||
- The target servers are configured to allow **IPv4 forwarding**.
|
||||
- If using IPv6 for pods and services, the target servers are configured to allow **IPv6 forwarding**.
|
||||
- **Your ssh key must be copied** to all the servers part of your inventory.
|
||||
- The **firewalls are not managed**, you'll need to implement your own rules the way you used to.
|
||||
in order to avoid any issue during deployment you should disable your firewall.
|
||||
- If kubespray is run from non-root user account, correct privilege escalation method
|
||||
- If kubespray is ran from non-root user account, correct privilege escalation method
|
||||
should be configured in the target servers. Then the `ansible_become` flag
|
||||
or command parameters `--become or -b` should be specified.
|
||||
|
||||
Hardware:
|
||||
These limits are safeguarded by Kubespray. Actual requirements for your workload can differ. For a sizing guide go to the [Building Large Clusters](https://kubernetes.io/docs/setup/cluster-large/#size-of-master-and-master-components) guide.
|
||||
These limits are safe guarded by Kubespray. Actual requirements for your workload can differ. For a sizing guide go to the [Building Large Clusters](https://kubernetes.io/docs/setup/cluster-large/#size-of-master-and-master-components) guide.
|
||||
|
||||
- Master
|
||||
- Memory: 1500 MB
|
||||
@ -222,43 +166,47 @@ These limits are safeguarded by Kubespray. Actual requirements for your workload
|
||||
|
||||
## Network Plugins
|
||||
|
||||
You can choose among ten network plugins. (default: `calico`, except Vagrant uses `flannel`)
|
||||
You can choose between 10 network plugins. (default: `calico`, except Vagrant uses `flannel`)
|
||||
|
||||
- [flannel](docs/CNI/flannel.md): gre/vxlan (layer 2) networking.
|
||||
- [flannel](docs/flannel.md): gre/vxlan (layer 2) networking.
|
||||
|
||||
- [Calico](https://docs.tigera.io/calico/latest/about/) is a networking and network policy provider. Calico supports a flexible set of networking options
|
||||
- [Calico](https://docs.projectcalico.org/latest/introduction/) is a networking and network policy provider. Calico supports a flexible set of networking options
|
||||
designed to give you the most efficient networking across a range of situations, including non-overlay
|
||||
and overlay networks, with or without BGP. Calico uses the same engine to enforce network policy for hosts,
|
||||
pods, and (if using Istio and Envoy) applications at the service mesh layer.
|
||||
|
||||
- [canal](https://github.com/projectcalico/canal): a composition of calico and flannel plugins.
|
||||
|
||||
- [cilium](http://docs.cilium.io/en/latest/): layer 3/4 networking (as well as layer 7 to protect and secure application protocols), supports dynamic insertion of BPF bytecode into the Linux kernel to implement security services, networking and visibility logic.
|
||||
|
||||
- [weave](docs/CNI/weave.md): Weave is a lightweight container overlay network that doesn't require an external K/V database cluster.
|
||||
- [contiv](docs/contiv.md): supports vlan, vxlan, bgp and Cisco SDN networking. This plugin is able to
|
||||
apply firewall policies, segregate containers in multiple network and bridging pods onto physical networks.
|
||||
|
||||
- [ovn4nfv](docs/ovn4nfv.md): [ovn4nfv-k8s-plugins](https://github.com/opnfv/ovn4nfv-k8s-plugin) is the network controller, OVS agent and CNI server to offer basic SFC and OVN overlay networking.
|
||||
|
||||
- [weave](docs/weave.md): Weave is a lightweight container overlay network that doesn't require an external K/V database cluster.
|
||||
(Please refer to `weave` [troubleshooting documentation](https://www.weave.works/docs/net/latest/troubleshooting/)).
|
||||
|
||||
- [kube-ovn](docs/CNI/kube-ovn.md): Kube-OVN integrates the OVN-based Network Virtualization with Kubernetes. It offers an advanced Container Network Fabric for Enterprises.
|
||||
- [kube-ovn](docs/kube-ovn.md): Kube-OVN integrates the OVN-based Network Virtualization with Kubernetes. It offers an advanced Container Network Fabric for Enterprises.
|
||||
|
||||
- [kube-router](docs/CNI/kube-router.md): Kube-router is a L3 CNI for Kubernetes networking aiming to provide operational
|
||||
- [kube-router](docs/kube-router.md): Kube-router is a L3 CNI for Kubernetes networking aiming to provide operational
|
||||
simplicity and high performance: it uses IPVS to provide Kube Services Proxy (if setup to replace kube-proxy),
|
||||
iptables for network policies, and BGP for ods L3 networking (with optionally BGP peering with out-of-cluster BGP peers).
|
||||
It can also optionally advertise routes to Kubernetes cluster Pods CIDRs, ClusterIPs, ExternalIPs and LoadBalancerIPs.
|
||||
|
||||
- [macvlan](docs/CNI/macvlan.md): Macvlan is a Linux network driver. Pods have their own unique Mac and Ip address, connected directly the physical (layer 2) network.
|
||||
- [macvlan](docs/macvlan.md): Macvlan is a Linux network driver. Pods have their own unique Mac and Ip address, connected directly the physical (layer 2) network.
|
||||
|
||||
- [multus](docs/CNI/multus.md): Multus is a meta CNI plugin that provides multiple network interface support to pods. For each interface Multus delegates CNI calls to secondary CNI plugins such as Calico, macvlan, etc.
|
||||
- [multus](docs/multus.md): Multus is a meta CNI plugin that provides multiple network interface support to pods. For each interface Multus delegates CNI calls to secondary CNI plugins such as Calico, macvlan, etc.
|
||||
|
||||
- [custom_cni](roles/network-plugin/custom_cni/) : You can specify some manifests that will be applied to the clusters to bring you own CNI and use non-supported ones by Kubespray.
|
||||
See `tests/files/custom_cni/README.md` and `tests/files/custom_cni/values.yaml`for an example with a CNI provided by a Helm Chart.
|
||||
|
||||
The network plugin to use is defined by the variable `kube_network_plugin`. There is also an
|
||||
The choice is defined with the variable `kube_network_plugin`. There is also an
|
||||
option to leverage built-in cloud provider networking instead.
|
||||
See also [Network checker](docs/advanced/netcheck.md).
|
||||
See also [Network checker](docs/netcheck.md).
|
||||
|
||||
## Ingress Plugins
|
||||
|
||||
- [nginx](https://kubernetes.github.io/ingress-nginx): the NGINX Ingress Controller.
|
||||
- [ambassador](docs/ambassador.md): the Ambassador Ingress Controller and API gateway.
|
||||
|
||||
- [metallb](docs/ingress/metallb.md): the MetalLB bare-metal service LoadBalancer provider.
|
||||
- [nginx](https://kubernetes.github.io/ingress-nginx): the NGINX Ingress Controller.
|
||||
|
||||
## Community docs and resources
|
||||
|
||||
@ -271,12 +219,11 @@ See also [Network checker](docs/advanced/netcheck.md).
|
||||
|
||||
- [Digital Rebar Provision](https://github.com/digitalrebar/provision/blob/v4/doc/integrations/ansible.rst)
|
||||
- [Terraform Contrib](https://github.com/kubernetes-sigs/kubespray/tree/master/contrib/terraform)
|
||||
- [Kubean](https://github.com/kubean-io/kubean)
|
||||
|
||||
## CI Tests
|
||||
|
||||
[](https://gitlab.com/kargo-ci/kubernetes-sigs-kubespray/-/pipelines)
|
||||
[](https://gitlab.com/kargo-ci/kubernetes-sigs-kubespray/pipelines)
|
||||
|
||||
CI/end-to-end tests sponsored by: [CNCF](https://cncf.io), [Equinix Metal](https://metal.equinix.com/), [OVHcloud](https://www.ovhcloud.com/), [ELASTX](https://elastx.se/).
|
||||
CI/end-to-end tests sponsored by: [CNCF](https://cncf.io), [Packet](https://www.packet.com/), [OVHcloud](https://www.ovhcloud.com/), [ELASTX](https://elastx.se/).
|
||||
|
||||
See the [test matrix](docs/developers/test_cases.md) for details.
|
||||
See the [test matrix](docs/test_cases.md) for details.
|
||||
|
60
RELEASE.md
60
RELEASE.md
@ -2,21 +2,17 @@
|
||||
|
||||
The Kubespray Project is released on an as-needed basis. The process is as follows:
|
||||
|
||||
1. An issue is proposing a new release with a changelog since the last release. Please see [a good sample issue](https://github.com/kubernetes-sigs/kubespray/issues/8325)
|
||||
1. At least one of the [approvers](OWNERS_ALIASES) must approve this release
|
||||
1. (Only for major releases) The `kube_version_min_required` variable is set to `n-1`
|
||||
1. (Only for major releases) Remove hashes for [EOL versions](https://github.com/kubernetes/website/blob/main/content/en/releases/patch-releases.md) of kubernetes from `*_checksums` variables.
|
||||
1. Create the release note with [Kubernetes Release Notes Generator](https://github.com/kubernetes/release/blob/master/cmd/release-notes/README.md). See the following `Release note creation` section for the details.
|
||||
1. An approver creates [new release in GitHub](https://github.com/kubernetes-sigs/kubespray/releases/new) using a version and tag name like `vX.Y.Z` and attaching the release notes
|
||||
1. (Only for major releases) An approver creates a release branch in the form `release-X.Y`
|
||||
1. (For major releases) On the `master` branch: bump the version in `galaxy.yml` to the next expected major release (X.y.0 with y = Y + 1), make a Pull Request.
|
||||
1. (For minor releases) On the `release-X.Y` branch: bump the version in `galaxy.yml` to the next expected minor release (X.Y.z with z = Z + 1), make a Pull Request.
|
||||
1. The corresponding version of [quay.io/kubespray/kubespray:vX.Y.Z](https://quay.io/repository/kubespray/kubespray) and [quay.io/kubespray/vagrant:vX.Y.Z](https://quay.io/repository/kubespray/vagrant) container images are built and tagged. See the following `Container image creation` section for the details.
|
||||
1. (Only for major releases) The `KUBESPRAY_VERSION` in `.gitlab-ci.yml` is upgraded to the version we just released # TODO clarify this, this variable is for testing upgrades.
|
||||
1. The release issue is closed
|
||||
1. An announcement email is sent to `dev@kubernetes.io` with the subject `[ANNOUNCE] Kubespray $VERSION is released`
|
||||
1. The topic of the #kubespray channel is updated with `vX.Y.Z is released! | ...`
|
||||
1. Create/Update Issue for upgradeing kubernetes and [k8s-conformance](https://github.com/cncf/k8s-conformance)
|
||||
1. An issue is proposing a new release with a changelog since the last release
|
||||
2. At least one of the [approvers](OWNERS_ALIASES) must approve this release
|
||||
3. The `kube_version_min_required` variable is set to `n-1`
|
||||
4. Remove hashes for [EOL versions](https://github.com/kubernetes/sig-release/blob/master/releases/patch-releases.md) of kubernetes from `*_checksums` variables.
|
||||
5. An approver creates [new release in GitHub](https://github.com/kubernetes-sigs/kubespray/releases/new) using a version and tag name like `vX.Y.Z` and attaching the release notes
|
||||
6. An approver creates a release branch in the form `release-X.Y`
|
||||
7. The corresponding version of [quay.io/kubespray/kubespray:vX.Y.Z](https://quay.io/repository/kubespray/kubespray) and [quay.io/kubespray/vagrant:vX.Y.Z](https://quay.io/repository/kubespray/vagrant) docker images are built and tagged
|
||||
8. The `KUBESPRAY_VERSION` variable is updated in `.gitlab-ci.yml`
|
||||
9. The release issue is closed
|
||||
10. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] Kubespray $VERSION is released`
|
||||
11. The topic of the #kubespray channel is updated with `vX.Y.Z is released! | ...`
|
||||
|
||||
## Major/minor releases and milestones
|
||||
|
||||
@ -50,37 +46,3 @@ The Kubespray Project is released on an as-needed basis. The process is as follo
|
||||
then Kubespray v2.1.0 may be bound to only minor changes to `kube_version`, like v1.5.1
|
||||
and *any* changes to other components, like etcd v4, or calico 1.2.3.
|
||||
And Kubespray v3.x.x shall be bound to `kube_version: 2.x.x` respectively.
|
||||
|
||||
## Release note creation
|
||||
|
||||
You can create a release note with:
|
||||
|
||||
```shell
|
||||
export GITHUB_TOKEN=<your-github-token>
|
||||
export ORG=kubernetes-sigs
|
||||
export REPO=kubespray
|
||||
release-notes --start-sha <The start commit-id> --end-sha <The end commit-id> --dependencies=false --output=/tmp/kubespray-release-note --required-author=""
|
||||
```
|
||||
|
||||
If the release note file(/tmp/kubespray-release-note) contains "### Uncategorized" pull requests, those pull requests don't have a valid kind label(`kind/feature`, etc.).
|
||||
It is necessary to put a valid label on each pull request and run the above release-notes command again to get a better release note
|
||||
|
||||
## Container image creation
|
||||
|
||||
The container image `quay.io/kubespray/kubespray:vX.Y.Z` can be created from Dockerfile of the kubespray root directory:
|
||||
|
||||
```shell
|
||||
cd kubespray/
|
||||
nerdctl build -t quay.io/kubespray/kubespray:vX.Y.Z .
|
||||
nerdctl push quay.io/kubespray/kubespray:vX.Y.Z
|
||||
```
|
||||
|
||||
The container image `quay.io/kubespray/vagrant:vX.Y.Z` can be created from build.sh of test-infra/vagrant-docker/:
|
||||
|
||||
```shell
|
||||
cd kubespray/test-infra/vagrant-docker/
|
||||
./build vX.Y.Z
|
||||
```
|
||||
|
||||
Please note that the above operation requires the permission to push container images into quay.io/kubespray/.
|
||||
If you don't have the permission, please ask it on the #kubespray-dev channel.
|
||||
|
@ -9,7 +9,5 @@
|
||||
#
|
||||
# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
|
||||
# INSTRUCTIONS AT https://kubernetes.io/security/
|
||||
atoms
|
||||
mattymo
|
||||
floryut
|
||||
oomichi
|
||||
cristicalin
|
||||
|
141
Vagrantfile
vendored
141
Vagrantfile
vendored
@ -1,7 +1,7 @@
|
||||
# -*- mode: ruby -*-
|
||||
# # vi: set ft=ruby :
|
||||
|
||||
# For help on using kubespray with vagrant, check out docs/developers/vagrant.md
|
||||
# For help on using kubespray with vagrant, check out docs/vagrant.md
|
||||
|
||||
require 'fileutils'
|
||||
|
||||
@ -19,27 +19,19 @@ SUPPORTED_OS = {
|
||||
"flatcar-beta" => {box: "flatcar-beta", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["beta"]},
|
||||
"flatcar-alpha" => {box: "flatcar-alpha", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["alpha"]},
|
||||
"flatcar-edge" => {box: "flatcar-edge", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["edge"]},
|
||||
"ubuntu1604" => {box: "generic/ubuntu1604", user: "vagrant"},
|
||||
"ubuntu1804" => {box: "generic/ubuntu1804", user: "vagrant"},
|
||||
"ubuntu2004" => {box: "generic/ubuntu2004", user: "vagrant"},
|
||||
"ubuntu2204" => {box: "generic/ubuntu2204", user: "vagrant"},
|
||||
"ubuntu2404" => {box: "bento/ubuntu-24.04", user: "vagrant"},
|
||||
"centos" => {box: "centos/7", user: "vagrant"},
|
||||
"centos-bento" => {box: "bento/centos-7.6", user: "vagrant"},
|
||||
"centos8" => {box: "centos/8", user: "vagrant"},
|
||||
"centos8-bento" => {box: "bento/centos-8", user: "vagrant"},
|
||||
"almalinux8" => {box: "almalinux/8", user: "vagrant"},
|
||||
"almalinux8-bento" => {box: "bento/almalinux-8", user: "vagrant"},
|
||||
"rockylinux8" => {box: "rockylinux/8", user: "vagrant"},
|
||||
"rockylinux9" => {box: "rockylinux/9", user: "vagrant"},
|
||||
"fedora37" => {box: "fedora/37-cloud-base", user: "vagrant"},
|
||||
"fedora38" => {box: "fedora/38-cloud-base", user: "vagrant"},
|
||||
"opensuse" => {box: "opensuse/Leap-15.4.x86_64", user: "vagrant"},
|
||||
"fedora31" => {box: "fedora/31-cloud-base", user: "vagrant"},
|
||||
"fedora32" => {box: "fedora/32-cloud-base", user: "vagrant"},
|
||||
"opensuse" => {box: "bento/opensuse-leap-15.1", user: "vagrant"},
|
||||
"opensuse-tumbleweed" => {box: "opensuse/Tumbleweed.x86_64", user: "vagrant"},
|
||||
"oraclelinux" => {box: "generic/oracle7", user: "vagrant"},
|
||||
"oraclelinux8" => {box: "generic/oracle8", user: "vagrant"},
|
||||
"rhel7" => {box: "generic/rhel7", user: "vagrant"},
|
||||
"rhel8" => {box: "generic/rhel8", user: "vagrant"},
|
||||
"debian11" => {box: "debian/bullseye64", user: "vagrant"},
|
||||
"debian12" => {box: "debian/bookworm64", user: "vagrant"},
|
||||
}
|
||||
|
||||
if File.exist?(CONFIG)
|
||||
@ -55,17 +47,16 @@ $vm_cpus ||= 2
|
||||
$shared_folders ||= {}
|
||||
$forwarded_ports ||= {}
|
||||
$subnet ||= "172.18.8"
|
||||
$subnet_ipv6 ||= "fd3c:b398:0698:0756"
|
||||
$os ||= "ubuntu2004"
|
||||
$os ||= "ubuntu1804"
|
||||
$network_plugin ||= "flannel"
|
||||
# Setting multi_networking to true will install Multus: https://github.com/k8snetworkplumbingwg/multus-cni
|
||||
$multi_networking ||= "False"
|
||||
# Setting multi_networking to true will install Multus: https://github.com/intel/multus-cni
|
||||
$multi_networking ||= false
|
||||
$download_run_once ||= "True"
|
||||
$download_force_cache ||= "False"
|
||||
$download_force_cache ||= "True"
|
||||
# The first three nodes are etcd servers
|
||||
$etcd_instances ||= [$num_instances, 3].min
|
||||
$etcd_instances ||= $num_instances
|
||||
# The first two nodes are kube masters
|
||||
$kube_master_instances ||= [$num_instances, 2].min
|
||||
$kube_master_instances ||= $num_instances == 1 ? $num_instances : ($num_instances - 1)
|
||||
# All nodes are kube nodes
|
||||
$kube_node_instances ||= $num_instances
|
||||
# The following only works when using the libvirt provider
|
||||
@ -74,27 +65,14 @@ $kube_node_instances_with_disks_size ||= "20G"
|
||||
$kube_node_instances_with_disks_number ||= 2
|
||||
$override_disk_size ||= false
|
||||
$disk_size ||= "20GB"
|
||||
$local_path_provisioner_enabled ||= "False"
|
||||
$local_path_provisioner_enabled ||= false
|
||||
$local_path_provisioner_claim_root ||= "/opt/local-path-provisioner/"
|
||||
$libvirt_nested ||= false
|
||||
# boolean or string (e.g. "-vvv")
|
||||
$ansible_verbosity ||= false
|
||||
$ansible_tags ||= ENV['VAGRANT_ANSIBLE_TAGS'] || ""
|
||||
|
||||
$vagrant_dir ||= File.join(File.dirname(__FILE__), ".vagrant")
|
||||
|
||||
$playbook ||= "cluster.yml"
|
||||
$extra_vars ||= {}
|
||||
|
||||
host_vars = {}
|
||||
|
||||
# throw error if os is not supported
|
||||
if ! SUPPORTED_OS.key?($os)
|
||||
puts "Unsupported OS: #{$os}"
|
||||
puts "Supported OS are: #{SUPPORTED_OS.keys.join(', ')}"
|
||||
exit 1
|
||||
end
|
||||
|
||||
$box = SUPPORTED_OS[$os][:box]
|
||||
# if $inventory is not set, try to use example
|
||||
$inventory = "inventory/sample" if ! $inventory
|
||||
@ -103,18 +81,18 @@ $inventory = File.absolute_path($inventory, File.dirname(__FILE__))
|
||||
# if $inventory has a hosts.ini file use it, otherwise copy over
|
||||
# vars etc to where vagrant expects dynamic inventory to be
|
||||
if ! File.exist?(File.join(File.dirname($inventory), "hosts.ini"))
|
||||
$vagrant_ansible = File.join(File.absolute_path($vagrant_dir), "provisioners", "ansible")
|
||||
$vagrant_ansible = File.join(File.dirname(__FILE__), ".vagrant", "provisioners", "ansible")
|
||||
FileUtils.mkdir_p($vagrant_ansible) if ! File.exist?($vagrant_ansible)
|
||||
$vagrant_inventory = File.join($vagrant_ansible,"inventory")
|
||||
FileUtils.rm_f($vagrant_inventory)
|
||||
FileUtils.ln_s($inventory, $vagrant_inventory)
|
||||
if ! File.exist?(File.join($vagrant_ansible,"inventory"))
|
||||
FileUtils.ln_s($inventory, File.join($vagrant_ansible,"inventory"))
|
||||
end
|
||||
end
|
||||
|
||||
if Vagrant.has_plugin?("vagrant-proxyconf")
|
||||
$no_proxy = ENV['NO_PROXY'] || ENV['no_proxy'] || "127.0.0.1,localhost"
|
||||
(1..$num_instances).each do |i|
|
||||
$no_proxy += ",#{$subnet}.#{i+100}"
|
||||
end
|
||||
$no_proxy = ENV['NO_PROXY'] || ENV['no_proxy'] || "127.0.0.1,localhost"
|
||||
(1..$num_instances).each do |i|
|
||||
$no_proxy += ",#{$subnet}.#{i+100}"
|
||||
end
|
||||
end
|
||||
|
||||
Vagrant.configure("2") do |config|
|
||||
@ -164,7 +142,6 @@ Vagrant.configure("2") do |config|
|
||||
vb.gui = $vm_gui
|
||||
vb.linked_clone = true
|
||||
vb.customize ["modifyvm", :id, "--vram", "8"] # ubuntu defaults to 256 MB which is a waste of precious RAM
|
||||
vb.customize ["modifyvm", :id, "--audio", "none"]
|
||||
end
|
||||
|
||||
node.vm.provider :libvirt do |lv|
|
||||
@ -186,15 +163,7 @@ Vagrant.configure("2") do |config|
|
||||
# always make /dev/sd{a/b/c} so that CI can ensure that
|
||||
# virtualbox and libvirt will have the same devices to use for OSDs
|
||||
(1..$kube_node_instances_with_disks_number).each do |d|
|
||||
lv.storage :file, :device => "hd#{driverletters[d]}", :path => "disk-#{i}-#{d}-#{DISK_UUID}.disk", :size => $kube_node_instances_with_disks_size, :bus => "scsi"
|
||||
end
|
||||
end
|
||||
node.vm.provider :virtualbox do |vb|
|
||||
# always make /dev/sd{a/b/c} so that CI can ensure that
|
||||
# virtualbox and libvirt will have the same devices to use for OSDs
|
||||
(1..$kube_node_instances_with_disks_number).each do |d|
|
||||
vb.customize ['createhd', '--filename', "disk-#{i}-#{driverletters[d]}-#{DISK_UUID}.disk", '--size', $kube_node_instances_with_disks_size] # 10GB disk
|
||||
vb.customize ['storageattach', :id, '--storagectl', 'SATA Controller', '--port', d, '--device', 0, '--type', 'hdd', '--medium', "disk-#{i}-#{driverletters[d]}-#{DISK_UUID}.disk", '--nonrotational', 'on', '--mtype', 'normal']
|
||||
lv.storage :file, :device => "hd#{driverletters[d]}", :path => "disk-#{i}-#{d}-#{DISK_UUID}.disk", :size => $kube_node_instances_with_disks_size, :bus => "ide"
|
||||
end
|
||||
end
|
||||
end
|
||||
@ -207,55 +176,19 @@ Vagrant.configure("2") do |config|
|
||||
node.vm.network "forwarded_port", guest: guest, host: host, auto_correct: true
|
||||
end
|
||||
|
||||
if ["rhel7","rhel8"].include? $os
|
||||
# Vagrant synced_folder rsync options cannot be used for RHEL boxes as Rsync package cannot
|
||||
# be installed until the host is registered with a valid Red Hat support subscription
|
||||
node.vm.synced_folder ".", "/vagrant", disabled: false
|
||||
$shared_folders.each do |src, dst|
|
||||
node.vm.synced_folder src, dst
|
||||
end
|
||||
else
|
||||
node.vm.synced_folder ".", "/vagrant", disabled: false, type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z'] , rsync__exclude: ['.git','venv']
|
||||
$shared_folders.each do |src, dst|
|
||||
node.vm.synced_folder src, dst, type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z']
|
||||
end
|
||||
node.vm.synced_folder ".", "/vagrant", disabled: false, type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z'] , rsync__exclude: ['.git','venv']
|
||||
$shared_folders.each do |src, dst|
|
||||
node.vm.synced_folder src, dst, type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z']
|
||||
end
|
||||
|
||||
ip = "#{$subnet}.#{i+100}"
|
||||
node.vm.network :private_network,
|
||||
:ip => ip,
|
||||
:libvirt__guest_ipv6 => 'yes',
|
||||
:libvirt__ipv6_address => "#{$subnet_ipv6}::#{i+100}",
|
||||
:libvirt__ipv6_prefix => "64",
|
||||
:libvirt__forward_mode => "none",
|
||||
:libvirt__dhcp_enabled => false
|
||||
node.vm.network :private_network, ip: ip
|
||||
|
||||
# Disable swap for each vm
|
||||
node.vm.provision "shell", inline: "swapoff -a"
|
||||
|
||||
# ubuntu2004 and ubuntu2204 have IPv6 explicitly disabled. This undoes that.
|
||||
if ["ubuntu2004", "ubuntu2204"].include? $os
|
||||
node.vm.provision "shell", inline: "rm -f /etc/modprobe.d/local.conf"
|
||||
node.vm.provision "shell", inline: "sed -i '/net.ipv6.conf.all.disable_ipv6/d' /etc/sysctl.d/99-sysctl.conf /etc/sysctl.conf"
|
||||
end
|
||||
# Hack for fedora37/38 to get the IP address of the second interface
|
||||
if ["fedora37", "fedora38"].include? $os
|
||||
config.vm.provision "shell", inline: <<-SHELL
|
||||
nmcli conn modify 'Wired connection 2' ipv4.addresses $(cat /etc/sysconfig/network-scripts/ifcfg-eth1 | grep IPADDR | cut -d "=" -f2)
|
||||
nmcli conn modify 'Wired connection 2' ipv4.method manual
|
||||
service NetworkManager restart
|
||||
SHELL
|
||||
end
|
||||
|
||||
# Rockylinux boxes needs UEFI
|
||||
if ["rockylinux8", "rockylinux9"].include? $os
|
||||
config.vm.provider "libvirt" do |domain|
|
||||
domain.loader = "/usr/share/OVMF/x64/OVMF_CODE.fd"
|
||||
end
|
||||
end
|
||||
|
||||
# Disable firewalld on oraclelinux/redhat vms
|
||||
if ["oraclelinux","oraclelinux8","rhel7","rhel8","rockylinux8"].include? $os
|
||||
# Disable firewalld on oraclelinux vms
|
||||
if ["oraclelinux","oraclelinux8"].include? $os
|
||||
node.vm.provision "shell", inline: "systemctl stop firewalld; systemctl disable firewalld"
|
||||
end
|
||||
|
||||
@ -277,18 +210,13 @@ Vagrant.configure("2") do |config|
|
||||
"kubectl_localhost": "True",
|
||||
"local_path_provisioner_enabled": "#{$local_path_provisioner_enabled}",
|
||||
"local_path_provisioner_claim_root": "#{$local_path_provisioner_claim_root}",
|
||||
"ansible_ssh_user": SUPPORTED_OS[$os][:user],
|
||||
"ansible_ssh_private_key_file": File.join(Dir.home, ".vagrant.d", "insecure_private_key"),
|
||||
"unsafe_show_logs": "True"
|
||||
"ansible_ssh_user": SUPPORTED_OS[$os][:user]
|
||||
}
|
||||
|
||||
# Only execute the Ansible provisioner once, when all the machines are up and ready.
|
||||
# And limit the action to gathering facts, the full playbook is going to be ran by testcases_run.sh
|
||||
if i == $num_instances
|
||||
node.vm.provision "ansible" do |ansible|
|
||||
ansible.playbook = $playbook
|
||||
ansible.compatibility_mode = "2.0"
|
||||
ansible.verbose = $ansible_verbosity
|
||||
$ansible_inventory_path = File.join( $inventory, "hosts.ini")
|
||||
if File.exist?($ansible_inventory_path)
|
||||
ansible.inventory_path = $ansible_inventory_path
|
||||
@ -298,15 +226,12 @@ Vagrant.configure("2") do |config|
|
||||
ansible.host_key_checking = false
|
||||
ansible.raw_arguments = ["--forks=#{$num_instances}", "--flush-cache", "-e ansible_become_pass=vagrant"]
|
||||
ansible.host_vars = host_vars
|
||||
ansible.extra_vars = $extra_vars
|
||||
if $ansible_tags != ""
|
||||
ansible.tags = [$ansible_tags]
|
||||
end
|
||||
#ansible.tags = ['download']
|
||||
ansible.groups = {
|
||||
"etcd" => ["#{$instance_name_prefix}-[1:#{$etcd_instances}]"],
|
||||
"kube_control_plane" => ["#{$instance_name_prefix}-[1:#{$kube_master_instances}]"],
|
||||
"kube_node" => ["#{$instance_name_prefix}-[1:#{$kube_node_instances}]"],
|
||||
"k8s_cluster:children" => ["kube_control_plane", "kube_node"],
|
||||
"kube-master" => ["#{$instance_name_prefix}-[1:#{$kube_master_instances}]"],
|
||||
"kube-node" => ["#{$instance_name_prefix}-[1:#{$kube_node_instances}]"],
|
||||
"k8s-cluster:children" => ["kube-master", "kube-node"],
|
||||
}
|
||||
end
|
||||
end
|
||||
|
@ -3,6 +3,7 @@ pipelining=True
|
||||
ssh_args = -o ControlMaster=auto -o ControlPersist=30m -o ConnectionAttempts=100 -o UserKnownHostsFile=/dev/null
|
||||
#control_path = ~/.ssh/ansible-%%r@%%h:%%p
|
||||
[defaults]
|
||||
strategy_plugins = plugins/mitogen/ansible_mitogen/plugins/strategy
|
||||
# https://github.com/ansible/ansible/issues/56930 (to ignore group names with - and .)
|
||||
force_valid_group_names = ignore
|
||||
|
||||
@ -10,12 +11,11 @@ host_key_checking=False
|
||||
gathering = smart
|
||||
fact_caching = jsonfile
|
||||
fact_caching_connection = /tmp
|
||||
fact_caching_timeout = 86400
|
||||
timeout = 300
|
||||
fact_caching_timeout = 7200
|
||||
stdout_callback = default
|
||||
display_skipped_hosts = no
|
||||
library = ./library
|
||||
callbacks_enabled = profile_tasks,ara_default
|
||||
callback_whitelist = profile_tasks
|
||||
roles_path = roles:$VIRTUAL_ENV/usr/local/share/kubespray/roles:$VIRTUAL_ENV/usr/local/share/ansible/roles:/usr/share/kubespray/roles
|
||||
deprecation_warnings=False
|
||||
inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo, .creds, .gpg
|
||||
|
15
ansible_version.yml
Normal file
15
ansible_version.yml
Normal file
@ -0,0 +1,15 @@
|
||||
---
|
||||
- hosts: localhost
|
||||
gather_facts: false
|
||||
become: no
|
||||
vars:
|
||||
minimal_ansible_version: 2.8.0
|
||||
ansible_connection: local
|
||||
tasks:
|
||||
- name: "Check ansible version >={{ minimal_ansible_version }}"
|
||||
assert:
|
||||
msg: "Ansible must be {{ minimal_ansible_version }} or higher"
|
||||
that:
|
||||
- ansible_version.string is version(minimal_ansible_version, ">=")
|
||||
tags:
|
||||
- check
|
138
cluster.yml
138
cluster.yml
@ -1,3 +1,137 @@
|
||||
---
|
||||
- name: Install Kubernetes
|
||||
ansible.builtin.import_playbook: playbooks/cluster.yml
|
||||
- name: Check ansible version
|
||||
import_playbook: ansible_version.yml
|
||||
|
||||
- hosts: all
|
||||
gather_facts: false
|
||||
tags: always
|
||||
tasks:
|
||||
- name: "Set up proxy environment"
|
||||
set_fact:
|
||||
proxy_env:
|
||||
http_proxy: "{{ http_proxy | default ('') }}"
|
||||
HTTP_PROXY: "{{ http_proxy | default ('') }}"
|
||||
https_proxy: "{{ https_proxy | default ('') }}"
|
||||
HTTPS_PROXY: "{{ https_proxy | default ('') }}"
|
||||
no_proxy: "{{ no_proxy | default ('') }}"
|
||||
NO_PROXY: "{{ no_proxy | default ('') }}"
|
||||
no_log: true
|
||||
|
||||
- hosts: bastion[0]
|
||||
gather_facts: False
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: bastion-ssh-config, tags: ["localhost", "bastion"] }
|
||||
|
||||
- hosts: k8s-cluster:etcd
|
||||
strategy: linear
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
gather_facts: false
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: bootstrap-os, tags: bootstrap-os}
|
||||
|
||||
- name: Gather facts
|
||||
tags: always
|
||||
import_playbook: facts.yml
|
||||
|
||||
- hosts: k8s-cluster:etcd
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes/preinstall, tags: preinstall }
|
||||
- { role: "container-engine", tags: "container-engine", when: deploy_container_engine|default(true) }
|
||||
- { role: download, tags: download, when: "not skip_downloads" }
|
||||
environment: "{{ proxy_env }}"
|
||||
|
||||
- hosts: etcd
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- role: etcd
|
||||
tags: etcd
|
||||
vars:
|
||||
etcd_cluster_setup: true
|
||||
etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}"
|
||||
when: not etcd_kubeadm_enabled| default(false)
|
||||
|
||||
- hosts: k8s-cluster
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- role: etcd
|
||||
tags: etcd
|
||||
vars:
|
||||
etcd_cluster_setup: false
|
||||
etcd_events_cluster_setup: false
|
||||
when: not etcd_kubeadm_enabled| default(false)
|
||||
|
||||
- hosts: k8s-cluster
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes/node, tags: node }
|
||||
environment: "{{ proxy_env }}"
|
||||
|
||||
- hosts: kube-master
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes/master, tags: master }
|
||||
- { role: kubernetes/client, tags: client }
|
||||
- { role: kubernetes-apps/cluster_roles, tags: cluster-roles }
|
||||
|
||||
- hosts: k8s-cluster
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes/kubeadm, tags: kubeadm}
|
||||
- { role: network_plugin, tags: network }
|
||||
- { role: kubernetes/node-label, tags: node-label }
|
||||
|
||||
- hosts: calico-rr
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: network_plugin/calico/rr, tags: ['network', 'calico_rr'] }
|
||||
|
||||
- hosts: kube-master[0]
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes-apps/rotate_tokens, tags: rotate_tokens, when: "secret_changed|default(false)" }
|
||||
- { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"] }
|
||||
|
||||
- hosts: kube-master
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes-apps/external_cloud_controller, tags: external-cloud-controller }
|
||||
- { role: kubernetes-apps/network_plugin, tags: network }
|
||||
- { role: kubernetes-apps/policy_controller, tags: policy-controller }
|
||||
- { role: kubernetes-apps/ingress_controller, tags: ingress-controller }
|
||||
- { role: kubernetes-apps/external_provisioner, tags: external-provisioner }
|
||||
|
||||
- hosts: kube-master
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes-apps, tags: apps }
|
||||
environment: "{{ proxy_env }}"
|
||||
|
||||
- hosts: k8s-cluster
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf, dns_late: true }
|
||||
|
@ -35,11 +35,11 @@ class SearchEC2Tags(object):
|
||||
hosts['_meta'] = { 'hostvars': {} }
|
||||
|
||||
##Search ec2 three times to find nodes of each group type. Relies on kubespray-role key/value.
|
||||
for group in ["kube_control_plane", "kube_node", "etcd"]:
|
||||
for group in ["kube-master", "kube-node", "etcd"]:
|
||||
hosts[group] = []
|
||||
tag_key = "kubespray-role"
|
||||
tag_value = ["*"+group+"*"]
|
||||
region = os.environ['AWS_REGION']
|
||||
region = os.environ['REGION']
|
||||
|
||||
ec2 = boto3.resource('ec2', region)
|
||||
filters = [{'Name': 'tag:'+tag_key, 'Values': tag_value}, {'Name': 'instance-state-name', 'Values': ['running']}]
|
||||
@ -67,15 +67,10 @@ class SearchEC2Tags(object):
|
||||
if node_labels_tag:
|
||||
ansible_host['node_labels'] = dict([ label.strip().split('=') for label in node_labels_tag[0]['Value'].split(',') ])
|
||||
|
||||
##Set when instance actually has node_taints
|
||||
node_taints_tag = list(filter(lambda t: t['Key'] == 'kubespray-node-taints', instance.tags))
|
||||
if node_taints_tag:
|
||||
ansible_host['node_taints'] = list([ taint.strip() for taint in node_taints_tag[0]['Value'].split(',') ])
|
||||
|
||||
hosts[group].append(dns_name)
|
||||
hosts['_meta']['hostvars'][dns_name] = ansible_host
|
||||
|
||||
hosts['k8s_cluster'] = {'children':['kube_control_plane', 'kube_node']}
|
||||
|
||||
hosts['k8s-cluster'] = {'children':['kube-master', 'kube-node']}
|
||||
print(json.dumps(hosts, sort_keys=True, indent=2))
|
||||
|
||||
SearchEC2Tags()
|
||||
|
@ -1 +0,0 @@
|
||||
boto3 # Apache-2.0
|
2
contrib/azurerm/.gitignore
vendored
2
contrib/azurerm/.gitignore
vendored
@ -1,2 +1,2 @@
|
||||
.generated
|
||||
/inventory
|
||||
/inventory
|
@ -24,14 +24,14 @@ experience.
|
||||
|
||||
You can enable the use of a Bastion Host by changing **use_bastion** in group_vars/all to **true**. The generated
|
||||
templates will then include an additional bastion VM which can then be used to connect to the masters and nodes. The option
|
||||
also removes all public IPs from all other VMs.
|
||||
also removes all public IPs from all other VMs.
|
||||
|
||||
## Generating and applying
|
||||
|
||||
To generate and apply the templates, call:
|
||||
|
||||
```shell
|
||||
./apply-rg.sh <resource_group_name>
|
||||
$ ./apply-rg.sh <resource_group_name>
|
||||
```
|
||||
|
||||
If you change something in the configuration (e.g. number of nodes) later, you can call this again and Azure will
|
||||
@ -42,26 +42,25 @@ take care about creating/modifying whatever is needed.
|
||||
If you need to delete all resources from a resource group, simply call:
|
||||
|
||||
```shell
|
||||
./clear-rg.sh <resource_group_name>
|
||||
$ ./clear-rg.sh <resource_group_name>
|
||||
```
|
||||
|
||||
**WARNING** this really deletes everything from your resource group, including everything that was later created by you!
|
||||
|
||||
## Installing Ansible and the dependencies
|
||||
|
||||
Install Ansible according to [Ansible installation guide](/docs/ansible/ansible.md#installing-ansible)
|
||||
|
||||
## Generating an inventory for kubespray
|
||||
|
||||
After you have applied the templates, you can generate an inventory with this call:
|
||||
|
||||
```shell
|
||||
./generate-inventory.sh <resource_group_name>
|
||||
$ ./generate-inventory.sh <resource_group_name>
|
||||
```
|
||||
|
||||
It will create the file ./inventory which can then be used with kubespray, e.g.:
|
||||
|
||||
```shell
|
||||
cd kubespray-root-dir
|
||||
ansible-playbook -i contrib/azurerm/inventory -u devops --become -e "@inventory/sample/group_vars/all/all.yml" cluster.yml
|
||||
$ cd kubespray-root-dir
|
||||
$ sudo pip3 install -r requirements.txt
|
||||
$ ansible-playbook -i contrib/azurerm/inventory -u devops --become -e "@inventory/sample/group_vars/all/all.yml" cluster.yml
|
||||
```
|
||||
|
||||
|
@ -1,6 +1,5 @@
|
||||
---
|
||||
- name: Generate Azure inventory
|
||||
hosts: localhost
|
||||
- hosts: localhost
|
||||
gather_facts: False
|
||||
roles:
|
||||
- generate-inventory
|
||||
|
@ -1,6 +1,5 @@
|
||||
---
|
||||
- name: Generate Azure inventory
|
||||
hosts: localhost
|
||||
- hosts: localhost
|
||||
gather_facts: False
|
||||
roles:
|
||||
- generate-inventory_2
|
||||
|
@ -1,6 +1,5 @@
|
||||
---
|
||||
- name: Generate Azure templates
|
||||
hosts: localhost
|
||||
- hosts: localhost
|
||||
gather_facts: False
|
||||
roles:
|
||||
- generate-templates
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
|
||||
- name: Query Azure VMs
|
||||
- name: Query Azure VMs # noqa 301
|
||||
command: azure vm list-ip-address --json {{ azure_resource_group }}
|
||||
register: vm_list_cmd
|
||||
|
||||
@ -12,4 +12,3 @@
|
||||
template:
|
||||
src: inventory.j2
|
||||
dest: "{{ playbook_dir }}/inventory"
|
||||
mode: "0644"
|
||||
|
@ -7,9 +7,9 @@
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
[kube_control_plane]
|
||||
[kube-master]
|
||||
{% for vm in vm_list %}
|
||||
{% if 'kube_control_plane' in vm.tags.roles %}
|
||||
{% if 'kube-master' in vm.tags.roles %}
|
||||
{{ vm.name }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
@ -21,13 +21,13 @@
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
[kube_node]
|
||||
[kube-node]
|
||||
{% for vm in vm_list %}
|
||||
{% if 'kube_node' in vm.tags.roles %}
|
||||
{% if 'kube-node' in vm.tags.roles %}
|
||||
{{ vm.name }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
[k8s_cluster:children]
|
||||
kube_node
|
||||
kube_control_plane
|
||||
[k8s-cluster:children]
|
||||
kube-node
|
||||
kube-master
|
||||
|
@ -1,14 +1,14 @@
|
||||
---
|
||||
|
||||
- name: Query Azure VMs IPs
|
||||
- name: Query Azure VMs IPs # noqa 301
|
||||
command: az vm list-ip-addresses -o json --resource-group {{ azure_resource_group }}
|
||||
register: vm_ip_list_cmd
|
||||
|
||||
- name: Query Azure VMs Roles
|
||||
- name: Query Azure VMs Roles # noqa 301
|
||||
command: az vm list -o json --resource-group {{ azure_resource_group }}
|
||||
register: vm_list_cmd
|
||||
|
||||
- name: Query Azure Load Balancer Public IP
|
||||
- name: Query Azure Load Balancer Public IP # noqa 301
|
||||
command: az network public-ip show -o json -g {{ azure_resource_group }} -n kubernetes-api-pubip
|
||||
register: lb_pubip_cmd
|
||||
|
||||
@ -22,10 +22,8 @@
|
||||
template:
|
||||
src: inventory.j2
|
||||
dest: "{{ playbook_dir }}/inventory"
|
||||
mode: "0644"
|
||||
|
||||
- name: Generate Load Balancer variables
|
||||
template:
|
||||
src: loadbalancer_vars.j2
|
||||
dest: "{{ playbook_dir }}/loadbalancer_vars.yml"
|
||||
mode: "0644"
|
||||
|
@ -7,9 +7,9 @@
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
[kube_control_plane]
|
||||
[kube-master]
|
||||
{% for vm in vm_roles_list %}
|
||||
{% if 'kube_control_plane' in vm.tags.roles %}
|
||||
{% if 'kube-master' in vm.tags.roles %}
|
||||
{{ vm.name }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
@ -21,13 +21,14 @@
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
[kube_node]
|
||||
[kube-node]
|
||||
{% for vm in vm_roles_list %}
|
||||
{% if 'kube_node' in vm.tags.roles %}
|
||||
{% if 'kube-node' in vm.tags.roles %}
|
||||
{{ vm.name }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
[k8s_cluster:children]
|
||||
kube_node
|
||||
kube_control_plane
|
||||
[k8s-cluster:children]
|
||||
kube-node
|
||||
kube-master
|
||||
|
||||
|
@ -24,14 +24,14 @@ bastionIPAddressName: bastion-pubip
|
||||
|
||||
disablePasswordAuthentication: true
|
||||
|
||||
sshKeyPath: "/home/{{ admin_username }}/.ssh/authorized_keys"
|
||||
sshKeyPath: "/home/{{admin_username}}/.ssh/authorized_keys"
|
||||
|
||||
imageReference:
|
||||
publisher: "OpenLogic"
|
||||
offer: "CentOS"
|
||||
sku: "7.5"
|
||||
version: "latest"
|
||||
imageReferenceJson: "{{ imageReference | to_json }}"
|
||||
imageReferenceJson: "{{imageReference|to_json}}"
|
||||
|
||||
storageAccountName: "sa{{ nameSuffix | replace('-', '') }}"
|
||||
storageAccountName: "sa{{nameSuffix | replace('-', '')}}"
|
||||
storageAccountType: "{{ azure_storage_account_type | default('Standard_LRS') }}"
|
||||
|
@ -8,13 +8,11 @@
|
||||
path: "{{ base_dir }}"
|
||||
state: directory
|
||||
recurse: true
|
||||
mode: "0755"
|
||||
|
||||
- name: Store json files in base_dir
|
||||
template:
|
||||
src: "{{ item }}"
|
||||
dest: "{{ base_dir }}/{{ item }}"
|
||||
mode: "0644"
|
||||
with_items:
|
||||
- network.json
|
||||
- storage.json
|
||||
|
@ -27,4 +27,4 @@
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
@ -103,4 +103,4 @@
|
||||
}
|
||||
{% endif %}
|
||||
]
|
||||
}
|
||||
}
|
@ -5,4 +5,4 @@
|
||||
"variables": {},
|
||||
"resources": [],
|
||||
"outputs": {}
|
||||
}
|
||||
}
|
@ -144,7 +144,7 @@
|
||||
"[concat('Microsoft.Network/networkInterfaces/', 'master-{{i}}-nic')]"
|
||||
],
|
||||
"tags": {
|
||||
"roles": "kube_control_plane,etcd"
|
||||
"roles": "kube-master,etcd"
|
||||
},
|
||||
"apiVersion": "{{apiVersion}}",
|
||||
"properties": {
|
||||
|
@ -61,7 +61,7 @@
|
||||
"[concat('Microsoft.Network/networkInterfaces/', 'minion-{{i}}-nic')]"
|
||||
],
|
||||
"tags": {
|
||||
"roles": "kube_node"
|
||||
"roles": "kube-node"
|
||||
},
|
||||
"apiVersion": "{{apiVersion}}",
|
||||
"properties": {
|
||||
@ -112,4 +112,4 @@
|
||||
} {% if not loop.last %},{% endif %}
|
||||
{% endfor %}
|
||||
]
|
||||
}
|
||||
}
|
@ -16,4 +16,4 @@
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
@ -6,7 +6,6 @@ to serve as Kubernetes "nodes", which in turn will run
|
||||
called DIND (Docker-IN-Docker).
|
||||
|
||||
The playbook has two roles:
|
||||
|
||||
- dind-host: creates the "nodes" as containers in localhost, with
|
||||
appropriate settings for DIND (privileged, volume mapping for dind
|
||||
storage, etc).
|
||||
@ -28,7 +27,7 @@ See below for a complete successful run:
|
||||
|
||||
1. Create the node containers
|
||||
|
||||
```shell
|
||||
~~~~
|
||||
# From the kubespray root dir
|
||||
cd contrib/dind
|
||||
pip install -r requirements.txt
|
||||
@ -37,15 +36,15 @@ ansible-playbook -i hosts dind-cluster.yaml
|
||||
|
||||
# Back to kubespray root
|
||||
cd ../..
|
||||
```
|
||||
~~~~
|
||||
|
||||
NOTE: if the playbook run fails with something like below error
|
||||
message, you may need to specifically set `ansible_python_interpreter`,
|
||||
see `./hosts` file for an example expanded localhost entry.
|
||||
|
||||
```shell
|
||||
~~~
|
||||
failed: [localhost] (item=kube-node1) => {"changed": false, "item": "kube-node1", "msg": "Failed to import docker or docker-py - No module named requests.exceptions. Try `pip install docker` or `pip install docker-py` (Python 2.6)"}
|
||||
```
|
||||
~~~
|
||||
|
||||
2. Customize kubespray-dind.yaml
|
||||
|
||||
@ -53,33 +52,33 @@ Note that there's coupling between above created node containers
|
||||
and `kubespray-dind.yaml` settings, in particular regarding selected `node_distro`
|
||||
(as set in `group_vars/all/all.yaml`), and docker settings.
|
||||
|
||||
```shell
|
||||
~~~
|
||||
$EDITOR contrib/dind/kubespray-dind.yaml
|
||||
```
|
||||
~~~
|
||||
|
||||
3. Prepare the inventory and run the playbook
|
||||
|
||||
```shell
|
||||
~~~
|
||||
INVENTORY_DIR=inventory/local-dind
|
||||
mkdir -p ${INVENTORY_DIR}
|
||||
rm -f ${INVENTORY_DIR}/hosts.ini
|
||||
CONFIG_FILE=${INVENTORY_DIR}/hosts.ini /tmp/kubespray.dind.inventory_builder.sh
|
||||
|
||||
ansible-playbook --become -e ansible_ssh_user=debian -i ${INVENTORY_DIR}/hosts.ini cluster.yml --extra-vars @contrib/dind/kubespray-dind.yaml
|
||||
```
|
||||
~~~
|
||||
|
||||
NOTE: You could also test other distros without editing files by
|
||||
passing `--extra-vars` as per below commandline,
|
||||
replacing `DISTRO` by either `debian`, `ubuntu`, `centos`, `fedora`:
|
||||
|
||||
```shell
|
||||
~~~
|
||||
cd contrib/dind
|
||||
ansible-playbook -i hosts dind-cluster.yaml --extra-vars node_distro=DISTRO
|
||||
|
||||
cd ../..
|
||||
CONFIG_FILE=inventory/local-dind/hosts.ini /tmp/kubespray.dind.inventory_builder.sh
|
||||
ansible-playbook --become -e ansible_ssh_user=DISTRO -i inventory/local-dind/hosts.ini cluster.yml --extra-vars @contrib/dind/kubespray-dind.yaml --extra-vars bootstrap_os=DISTRO
|
||||
```
|
||||
~~~
|
||||
|
||||
## Resulting deployment
|
||||
|
||||
@ -90,7 +89,7 @@ from the host where you ran kubespray playbooks.
|
||||
|
||||
Running from an Ubuntu Xenial host:
|
||||
|
||||
```shell
|
||||
~~~
|
||||
$ uname -a
|
||||
Linux ip-xx-xx-xx-xx 4.4.0-1069-aws #79-Ubuntu SMP Mon Sep 24
|
||||
15:01:41 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
|
||||
@ -150,14 +149,14 @@ kube-system weave-net-xr46t 2/2 Running 0
|
||||
|
||||
$ docker exec kube-node1 curl -s http://localhost:31081/api/v1/connectivity_check
|
||||
{"Message":"All 10 pods successfully reported back to the server","Absent":null,"Outdated":null}
|
||||
```
|
||||
~~~
|
||||
|
||||
## Using ./run-test-distros.sh
|
||||
|
||||
You can use `./run-test-distros.sh` to run a set of tests via DIND,
|
||||
and excerpt from this script, to get an idea:
|
||||
|
||||
```shell
|
||||
~~~
|
||||
# The SPEC file(s) must have two arrays as e.g.
|
||||
# DISTROS=(debian centos)
|
||||
# EXTRAS=(
|
||||
@ -170,7 +169,7 @@ and excerpt from this script, to get an idea:
|
||||
#
|
||||
# Each $EXTRAS element will be whitespace split, and passed as --extra-vars
|
||||
# to main kubespray ansible-playbook run.
|
||||
```
|
||||
~~~
|
||||
|
||||
See e.g. `test-some_distros-most_CNIs.env` and
|
||||
`test-some_distros-kube_router_combo.env` in particular for a richer
|
||||
|
@ -1,11 +1,9 @@
|
||||
---
|
||||
- name: Create nodes as docker containers
|
||||
hosts: localhost
|
||||
- hosts: localhost
|
||||
gather_facts: False
|
||||
roles:
|
||||
- { role: dind-host }
|
||||
|
||||
- name: Customize each node containers
|
||||
hosts: containers
|
||||
- hosts: containers
|
||||
roles:
|
||||
- { role: dind-cluster }
|
||||
|
@ -1,9 +1,9 @@
|
||||
---
|
||||
- name: Set_fact distro_setup
|
||||
- name: set_fact distro_setup
|
||||
set_fact:
|
||||
distro_setup: "{{ distro_settings[node_distro] }}"
|
||||
|
||||
- name: Set_fact other distro settings
|
||||
- name: set_fact other distro settings
|
||||
set_fact:
|
||||
distro_user: "{{ distro_setup['user'] }}"
|
||||
distro_ssh_service: "{{ distro_setup['ssh_service'] }}"
|
||||
@ -35,7 +35,6 @@
|
||||
path-exclude=/usr/share/doc/*
|
||||
path-include=/usr/share/doc/*/copyright
|
||||
dest: /etc/dpkg/dpkg.cfg.d/01_nodoc
|
||||
mode: "0644"
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
|
||||
@ -43,7 +42,7 @@
|
||||
package:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
with_items: "{{ distro_extra_packages + ['rsyslog', 'openssh-server'] }}"
|
||||
with_items: "{{ distro_extra_packages }} + [ 'rsyslog', 'openssh-server' ]"
|
||||
|
||||
- name: Start needed services
|
||||
service:
|
||||
@ -64,10 +63,9 @@
|
||||
copy:
|
||||
content: "{{ distro_user }} ALL=(ALL) NOPASSWD:ALL"
|
||||
dest: "/etc/sudoers.d/{{ distro_user }}"
|
||||
mode: "0640"
|
||||
|
||||
- name: "Add my pubkey to {{ distro_user }} user authorized keys"
|
||||
ansible.posix.authorized_key:
|
||||
- name: Add my pubkey to "{{ distro_user }}" user authorized keys
|
||||
authorized_key:
|
||||
user: "{{ distro_user }}"
|
||||
state: present
|
||||
key: "{{ lookup('file', lookup('env', 'HOME') + '/.ssh/id_rsa.pub') }}"
|
||||
key: "{{ lookup('file', lookup('env','HOME') + '/.ssh/id_rsa.pub') }}"
|
||||
|
@ -1,9 +1,9 @@
|
||||
---
|
||||
- name: Set_fact distro_setup
|
||||
- name: set_fact distro_setup
|
||||
set_fact:
|
||||
distro_setup: "{{ distro_settings[node_distro] }}"
|
||||
|
||||
- name: Set_fact other distro settings
|
||||
- name: set_fact other distro settings
|
||||
set_fact:
|
||||
distro_image: "{{ distro_setup['image'] }}"
|
||||
distro_init: "{{ distro_setup['init'] }}"
|
||||
@ -13,7 +13,7 @@
|
||||
distro_agetty_svc: "{{ distro_setup['agetty_svc'] }}"
|
||||
|
||||
- name: Create dind node containers from "containers" inventory section
|
||||
community.docker.docker_container:
|
||||
docker_container:
|
||||
image: "{{ distro_image }}"
|
||||
name: "{{ item }}"
|
||||
state: started
|
||||
@ -42,7 +42,7 @@
|
||||
template:
|
||||
src: inventory_builder.sh.j2
|
||||
dest: /tmp/kubespray.dind.inventory_builder.sh
|
||||
mode: "0755"
|
||||
mode: 0755
|
||||
tags:
|
||||
- addresses
|
||||
|
||||
@ -53,7 +53,7 @@
|
||||
{{ distro_raw_setup_done }} && echo SKIPPED && exit 0
|
||||
until [ "$(readlink /proc/1/exe)" = "{{ distro_pid1_exe }}" ] ; do sleep 1; done
|
||||
{{ distro_raw_setup }}
|
||||
delegate_to: "{{ item._ansible_item_label | default(item.item) }}"
|
||||
delegate_to: "{{ item._ansible_item_label|default(item.item) }}"
|
||||
with_items: "{{ containers.results }}"
|
||||
register: result
|
||||
changed_when: result.stdout.find("SKIPPED") < 0
|
||||
@ -63,25 +63,26 @@
|
||||
until test -S /var/run/dbus/system_bus_socket; do sleep 1; done
|
||||
systemctl disable {{ distro_agetty_svc }}
|
||||
systemctl stop {{ distro_agetty_svc }}
|
||||
delegate_to: "{{ item._ansible_item_label | default(item.item) }}"
|
||||
delegate_to: "{{ item._ansible_item_label|default(item.item) }}"
|
||||
with_items: "{{ containers.results }}"
|
||||
changed_when: false
|
||||
|
||||
# Running systemd-machine-id-setup doesn't create a unique id for each node container on Debian,
|
||||
# handle manually
|
||||
- name: Re-create unique machine-id (as we may just get what comes in the docker image), needed by some CNIs for mac address seeding (notably weave)
|
||||
- name: Re-create unique machine-id (as we may just get what comes in the docker image), needed by some CNIs for mac address seeding (notably weave) # noqa 301
|
||||
raw: |
|
||||
echo {{ item | hash('sha1') }} > /etc/machine-id.new
|
||||
mv -b /etc/machine-id.new /etc/machine-id
|
||||
cmp /etc/machine-id /etc/machine-id~ || true
|
||||
systemctl daemon-reload
|
||||
delegate_to: "{{ item._ansible_item_label | default(item.item) }}"
|
||||
delegate_to: "{{ item._ansible_item_label|default(item.item) }}"
|
||||
with_items: "{{ containers.results }}"
|
||||
|
||||
- name: Early hack image install to adapt for DIND
|
||||
# noqa 302 - this task uses the raw module intentionally
|
||||
raw: |
|
||||
rm -fv /usr/bin/udevadm /usr/sbin/udevadm
|
||||
delegate_to: "{{ item._ansible_item_label | default(item.item) }}"
|
||||
delegate_to: "{{ item._ansible_item_label|default(item.item) }}"
|
||||
with_items: "{{ containers.results }}"
|
||||
register: result
|
||||
changed_when: result.stdout.find("removed") >= 0
|
||||
|
@ -17,7 +17,7 @@ pass_or_fail() {
|
||||
test_distro() {
|
||||
local distro=${1:?};shift
|
||||
local extra="${*:-}"
|
||||
local prefix="${distro[${extra}]}"
|
||||
local prefix="$distro[${extra}]}"
|
||||
ansible-playbook -i hosts dind-cluster.yaml -e node_distro=$distro
|
||||
pass_or_fail "$prefix: dind-nodes" || return 1
|
||||
(cd ../..
|
||||
@ -46,7 +46,7 @@ test_distro() {
|
||||
pass_or_fail "$prefix: netcheck" || return 1
|
||||
}
|
||||
|
||||
NODES=($(egrep ^kube_node hosts))
|
||||
NODES=($(egrep ^kube-node hosts))
|
||||
NETCHECKER_HOST=localhost
|
||||
|
||||
: ${OUTPUT_DIR:=./out}
|
||||
@ -71,15 +71,15 @@ for spec in ${SPECS}; do
|
||||
echo "Loading file=${spec} ..."
|
||||
. ${spec} || continue
|
||||
: ${DISTROS:?} || continue
|
||||
echo "DISTROS:" "${DISTROS[@]}"
|
||||
echo "DISTROS=${DISTROS[@]}"
|
||||
echo "EXTRAS->"
|
||||
printf " %s\n" "${EXTRAS[@]}"
|
||||
let n=1
|
||||
for distro in "${DISTROS[@]}"; do
|
||||
for distro in ${DISTROS[@]}; do
|
||||
for extra in "${EXTRAS[@]:-NULL}"; do
|
||||
# Magic value to let this for run once:
|
||||
[[ ${extra} == NULL ]] && unset extra
|
||||
docker rm -f "${NODES[@]}"
|
||||
docker rm -f ${NODES[@]}
|
||||
printf -v file_out "%s/%s-%02d.out" ${OUTPUT_DIR} ${spec} $((n++))
|
||||
{
|
||||
info "${distro}[${extra}] START: file_out=${file_out}"
|
||||
|
@ -44,11 +44,11 @@ import re
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
ROLES = ['all', 'kube_control_plane', 'kube_node', 'etcd', 'k8s_cluster',
|
||||
'calico_rr']
|
||||
ROLES = ['all', 'kube-master', 'kube-node', 'etcd', 'k8s-cluster',
|
||||
'calico-rr']
|
||||
PROTECTED_NAMES = ROLES
|
||||
AVAILABLE_COMMANDS = ['help', 'print_cfg', 'print_ips', 'print_hostnames',
|
||||
'load', 'add']
|
||||
'load']
|
||||
_boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True,
|
||||
'0': False, 'no': False, 'false': False, 'off': False}
|
||||
yaml = YAML()
|
||||
@ -63,12 +63,10 @@ def get_var_as_bool(name, default):
|
||||
|
||||
|
||||
CONFIG_FILE = os.environ.get("CONFIG_FILE", "./inventory/sample/hosts.yaml")
|
||||
# Remove the reference of KUBE_MASTERS after some deprecation cycles.
|
||||
KUBE_CONTROL_HOSTS = int(os.environ.get("KUBE_CONTROL_HOSTS",
|
||||
os.environ.get("KUBE_MASTERS", 2)))
|
||||
KUBE_MASTERS = int(os.environ.get("KUBE_MASTERS_MASTERS", 2))
|
||||
# Reconfigures cluster distribution at scale
|
||||
SCALE_THRESHOLD = int(os.environ.get("SCALE_THRESHOLD", 50))
|
||||
MASSIVE_SCALE_THRESHOLD = int(os.environ.get("MASSIVE_SCALE_THRESHOLD", 200))
|
||||
MASSIVE_SCALE_THRESHOLD = int(os.environ.get("SCALE_THRESHOLD", 200))
|
||||
|
||||
DEBUG = get_var_as_bool("DEBUG", True)
|
||||
HOST_PREFIX = os.environ.get("HOST_PREFIX", "node")
|
||||
@ -82,54 +80,32 @@ class KubesprayInventory(object):
|
||||
def __init__(self, changed_hosts=None, config_file=None):
|
||||
self.config_file = config_file
|
||||
self.yaml_config = {}
|
||||
loadPreviousConfig = False
|
||||
printHostnames = False
|
||||
# See whether there are any commands to process
|
||||
if changed_hosts and changed_hosts[0] in AVAILABLE_COMMANDS:
|
||||
if changed_hosts[0] == "add":
|
||||
loadPreviousConfig = True
|
||||
changed_hosts = changed_hosts[1:]
|
||||
elif changed_hosts[0] == "print_hostnames":
|
||||
loadPreviousConfig = True
|
||||
printHostnames = True
|
||||
else:
|
||||
self.parse_command(changed_hosts[0], changed_hosts[1:])
|
||||
sys.exit(0)
|
||||
|
||||
# If the user wants to remove a node, we need to load the config anyway
|
||||
if changed_hosts and changed_hosts[0][0] == "-":
|
||||
loadPreviousConfig = True
|
||||
|
||||
if self.config_file and loadPreviousConfig: # Load previous YAML file
|
||||
if self.config_file:
|
||||
try:
|
||||
self.hosts_file = open(config_file, 'r')
|
||||
self.yaml_config = yaml.load(self.hosts_file)
|
||||
except OSError as e:
|
||||
# I am assuming we are catching "cannot open file" exceptions
|
||||
print(e)
|
||||
sys.exit(1)
|
||||
self.yaml_config = yaml.load_all(self.hosts_file)
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
if printHostnames:
|
||||
self.print_hostnames()
|
||||
if changed_hosts and changed_hosts[0] in AVAILABLE_COMMANDS:
|
||||
self.parse_command(changed_hosts[0], changed_hosts[1:])
|
||||
sys.exit(0)
|
||||
|
||||
self.ensure_required_groups(ROLES)
|
||||
|
||||
if changed_hosts:
|
||||
changed_hosts = self.range2ips(changed_hosts)
|
||||
self.hosts = self.build_hostnames(changed_hosts,
|
||||
loadPreviousConfig)
|
||||
self.hosts = self.build_hostnames(changed_hosts)
|
||||
self.purge_invalid_hosts(self.hosts.keys(), PROTECTED_NAMES)
|
||||
self.set_all(self.hosts)
|
||||
self.set_k8s_cluster()
|
||||
etcd_hosts_count = 3 if len(self.hosts.keys()) >= 3 else 1
|
||||
self.set_etcd(list(self.hosts.keys())[:etcd_hosts_count])
|
||||
if len(self.hosts) >= SCALE_THRESHOLD:
|
||||
self.set_kube_control_plane(list(self.hosts.keys())[
|
||||
etcd_hosts_count:(etcd_hosts_count + KUBE_CONTROL_HOSTS)])
|
||||
self.set_kube_master(list(self.hosts.keys())[
|
||||
etcd_hosts_count:(etcd_hosts_count + KUBE_MASTERS)])
|
||||
else:
|
||||
self.set_kube_control_plane(
|
||||
list(self.hosts.keys())[:KUBE_CONTROL_HOSTS])
|
||||
self.set_kube_master(list(self.hosts.keys())[:KUBE_MASTERS])
|
||||
self.set_kube_node(self.hosts.keys())
|
||||
if len(self.hosts) >= SCALE_THRESHOLD:
|
||||
self.set_calico_rr(list(self.hosts.keys())[:etcd_hosts_count])
|
||||
@ -179,29 +155,17 @@ class KubesprayInventory(object):
|
||||
except IndexError:
|
||||
raise ValueError("Host name must end in an integer")
|
||||
|
||||
# Keeps already specified hosts,
|
||||
# and adds or removes the hosts provided as an argument
|
||||
def build_hostnames(self, changed_hosts, loadPreviousConfig=False):
|
||||
def build_hostnames(self, changed_hosts):
|
||||
existing_hosts = OrderedDict()
|
||||
highest_host_id = 0
|
||||
# Load already existing hosts from the YAML
|
||||
if loadPreviousConfig:
|
||||
try:
|
||||
for host in self.yaml_config['all']['hosts']:
|
||||
# Read configuration of an existing host
|
||||
hostConfig = self.yaml_config['all']['hosts'][host]
|
||||
existing_hosts[host] = hostConfig
|
||||
# If the existing host seems
|
||||
# to have been created automatically, detect its ID
|
||||
if host.startswith(HOST_PREFIX):
|
||||
host_id = self.get_host_id(host)
|
||||
if host_id > highest_host_id:
|
||||
highest_host_id = host_id
|
||||
except Exception as e:
|
||||
# I am assuming we are catching automatically
|
||||
# created hosts without IDs
|
||||
print(e)
|
||||
sys.exit(1)
|
||||
try:
|
||||
for host in self.yaml_config['all']['hosts']:
|
||||
existing_hosts[host] = self.yaml_config['all']['hosts'][host]
|
||||
host_id = self.get_host_id(host)
|
||||
if host_id > highest_host_id:
|
||||
highest_host_id = host_id
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# FIXME(mattymo): Fix condition where delete then add reuses highest id
|
||||
next_host_id = highest_host_id + 1
|
||||
@ -209,7 +173,6 @@ class KubesprayInventory(object):
|
||||
|
||||
all_hosts = existing_hosts.copy()
|
||||
for host in changed_hosts:
|
||||
# Delete the host from config the hostname/IP has a "-" prefix
|
||||
if host[0] == "-":
|
||||
realhost = host[1:]
|
||||
if self.exists_hostname(all_hosts, realhost):
|
||||
@ -218,8 +181,6 @@ class KubesprayInventory(object):
|
||||
elif self.exists_ip(all_hosts, realhost):
|
||||
self.debug("Marked {0} for deletion.".format(realhost))
|
||||
self.delete_host_by_ip(all_hosts, realhost)
|
||||
# Host/Argument starts with a digit,
|
||||
# then we assume its an IP address
|
||||
elif host[0].isdigit():
|
||||
if ',' in host:
|
||||
ip, access_ip = host.split(',')
|
||||
@ -239,15 +200,11 @@ class KubesprayInventory(object):
|
||||
next_host = subprocess.check_output(cmd, shell=True)
|
||||
next_host = next_host.strip().decode('ascii')
|
||||
else:
|
||||
# Generates a hostname because we have only an IP address
|
||||
next_host = "{0}{1}".format(HOST_PREFIX, next_host_id)
|
||||
next_host_id += 1
|
||||
# Uses automatically generated node name
|
||||
# in case we dont provide it.
|
||||
all_hosts[next_host] = {'ansible_host': access_ip,
|
||||
'ip': ip,
|
||||
'access_ip': access_ip}
|
||||
# Host/Argument starts with a letter, then we assume its a hostname
|
||||
elif host[0].isalpha():
|
||||
if ',' in host:
|
||||
try:
|
||||
@ -266,7 +223,6 @@ class KubesprayInventory(object):
|
||||
'access_ip': access_ip}
|
||||
return all_hosts
|
||||
|
||||
# Expand IP ranges into individual addresses
|
||||
def range2ips(self, hosts):
|
||||
reworked_hosts = []
|
||||
|
||||
@ -310,7 +266,7 @@ class KubesprayInventory(object):
|
||||
|
||||
def purge_invalid_hosts(self, hostnames, protected_names=[]):
|
||||
for role in self.yaml_config['all']['children']:
|
||||
if role != 'k8s_cluster' and self.yaml_config['all']['children'][role]['hosts']: # noqa
|
||||
if role != 'k8s-cluster' and self.yaml_config['all']['children'][role]['hosts']: # noqa
|
||||
all_hosts = self.yaml_config['all']['children'][role]['hosts'].copy() # noqa
|
||||
for host in all_hosts.keys():
|
||||
if host not in hostnames and host not in protected_names:
|
||||
@ -331,54 +287,52 @@ class KubesprayInventory(object):
|
||||
if self.yaml_config['all']['hosts'] is None:
|
||||
self.yaml_config['all']['hosts'] = {host: None}
|
||||
self.yaml_config['all']['hosts'][host] = opts
|
||||
elif group != 'k8s_cluster:children':
|
||||
elif group != 'k8s-cluster:children':
|
||||
if self.yaml_config['all']['children'][group]['hosts'] is None:
|
||||
self.yaml_config['all']['children'][group]['hosts'] = {
|
||||
host: None}
|
||||
else:
|
||||
self.yaml_config['all']['children'][group]['hosts'][host] = None # noqa
|
||||
|
||||
def set_kube_control_plane(self, hosts):
|
||||
def set_kube_master(self, hosts):
|
||||
for host in hosts:
|
||||
self.add_host_to_group('kube_control_plane', host)
|
||||
self.add_host_to_group('kube-master', host)
|
||||
|
||||
def set_all(self, hosts):
|
||||
for host, opts in hosts.items():
|
||||
self.add_host_to_group('all', host, opts)
|
||||
|
||||
def set_k8s_cluster(self):
|
||||
k8s_cluster = {'children': {'kube_control_plane': None,
|
||||
'kube_node': None}}
|
||||
self.yaml_config['all']['children']['k8s_cluster'] = k8s_cluster
|
||||
k8s_cluster = {'children': {'kube-master': None, 'kube-node': None}}
|
||||
self.yaml_config['all']['children']['k8s-cluster'] = k8s_cluster
|
||||
|
||||
def set_calico_rr(self, hosts):
|
||||
for host in hosts:
|
||||
if host in self.yaml_config['all']['children']['kube_control_plane']: # noqa
|
||||
self.debug("Not adding {0} to calico_rr group because it "
|
||||
"conflicts with kube_control_plane "
|
||||
"group".format(host))
|
||||
if host in self.yaml_config['all']['children']['kube-master']:
|
||||
self.debug("Not adding {0} to calico-rr group because it "
|
||||
"conflicts with kube-master group".format(host))
|
||||
continue
|
||||
if host in self.yaml_config['all']['children']['kube_node']:
|
||||
self.debug("Not adding {0} to calico_rr group because it "
|
||||
"conflicts with kube_node group".format(host))
|
||||
if host in self.yaml_config['all']['children']['kube-node']:
|
||||
self.debug("Not adding {0} to calico-rr group because it "
|
||||
"conflicts with kube-node group".format(host))
|
||||
continue
|
||||
self.add_host_to_group('calico_rr', host)
|
||||
self.add_host_to_group('calico-rr', host)
|
||||
|
||||
def set_kube_node(self, hosts):
|
||||
for host in hosts:
|
||||
if len(self.yaml_config['all']['hosts']) >= SCALE_THRESHOLD:
|
||||
if host in self.yaml_config['all']['children']['etcd']['hosts']: # noqa
|
||||
self.debug("Not adding {0} to kube_node group because of "
|
||||
self.debug("Not adding {0} to kube-node group because of "
|
||||
"scale deployment and host is in etcd "
|
||||
"group.".format(host))
|
||||
continue
|
||||
if len(self.yaml_config['all']['hosts']) >= MASSIVE_SCALE_THRESHOLD: # noqa
|
||||
if host in self.yaml_config['all']['children']['kube_control_plane']['hosts']: # noqa
|
||||
self.debug("Not adding {0} to kube_node group because of "
|
||||
"scale deployment and host is in "
|
||||
"kube_control_plane group.".format(host))
|
||||
if host in self.yaml_config['all']['children']['kube-master']['hosts']: # noqa
|
||||
self.debug("Not adding {0} to kube-node group because of "
|
||||
"scale deployment and host is in kube-master "
|
||||
"group.".format(host))
|
||||
continue
|
||||
self.add_host_to_group('kube_node', host)
|
||||
self.add_host_to_group('kube-node', host)
|
||||
|
||||
def set_etcd(self, hosts):
|
||||
for host in hosts:
|
||||
@ -435,11 +389,9 @@ help - Display this message
|
||||
print_cfg - Write inventory file to stdout
|
||||
print_ips - Write a space-delimited list of IPs from "all" group
|
||||
print_hostnames - Write a space-delimited list of Hostnames from "all" group
|
||||
add - Adds specified hosts into an already existing inventory
|
||||
|
||||
Advanced usage:
|
||||
Create new or overwrite old inventory file: inventory.py 10.10.1.5
|
||||
Add another host after initial creation: inventory.py add 10.10.1.6
|
||||
Add another host after initial creation: inventory.py 10.10.1.5
|
||||
Add range of hosts: inventory.py 10.10.1.3-10.10.1.5
|
||||
Add hosts with different ip and access ip: inventory.py 10.0.0.1,192.168.10.1 10.0.0.2,192.168.10.2 10.0.0.3,192.168.10.3
|
||||
Add hosts with a specific hostname, ip, and optional access ip: first,10.0.0.1,192.168.10.1 second,10.0.0.2 last,10.0.0.3
|
||||
@ -450,9 +402,8 @@ Configurable env vars:
|
||||
DEBUG Enable debug printing. Default: True
|
||||
CONFIG_FILE File to write config to Default: ./inventory/sample/hosts.yaml
|
||||
HOST_PREFIX Host prefix for generated hosts. Default: node
|
||||
KUBE_CONTROL_HOSTS Set the number of kube-control-planes. Default: 2
|
||||
SCALE_THRESHOLD Separate ETCD role if # of nodes >= 50
|
||||
MASSIVE_SCALE_THRESHOLD Separate K8s control-plane and ETCD if # of nodes >= 200
|
||||
MASSIVE_SCALE_THRESHOLD Separate K8s master and ETCD if # of nodes >= 200
|
||||
''' # noqa
|
||||
print(help_text)
|
||||
|
||||
@ -473,7 +424,6 @@ def main(argv=None):
|
||||
if not argv:
|
||||
argv = sys.argv[1:]
|
||||
KubesprayInventory(argv, CONFIG_FILE)
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
@ -1,3 +1,3 @@
|
||||
configparser>=3.3.0
|
||||
ipaddress
|
||||
ruamel.yaml>=0.15.88
|
||||
ipaddress
|
||||
|
@ -1,3 +1,3 @@
|
||||
hacking>=0.10.2
|
||||
mock>=1.3.0
|
||||
pytest>=2.8.0
|
||||
mock>=1.3.0
|
||||
|
@ -13,9 +13,8 @@
|
||||
# under the License.
|
||||
|
||||
import inventory
|
||||
from io import StringIO
|
||||
import mock
|
||||
import unittest
|
||||
from unittest import mock
|
||||
|
||||
from collections import OrderedDict
|
||||
import sys
|
||||
@ -27,28 +26,6 @@ if path not in sys.path:
|
||||
import inventory # noqa
|
||||
|
||||
|
||||
class TestInventoryPrintHostnames(unittest.TestCase):
|
||||
|
||||
@mock.patch('ruamel.yaml.YAML.load')
|
||||
def test_print_hostnames(self, load_mock):
|
||||
mock_io = mock.mock_open(read_data='')
|
||||
load_mock.return_value = OrderedDict({'all': {'hosts': {
|
||||
'node1': {'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'},
|
||||
'node2': {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'}}}})
|
||||
with mock.patch('builtins.open', mock_io):
|
||||
with self.assertRaises(SystemExit) as cm:
|
||||
with mock.patch('sys.stdout', new_callable=StringIO) as stdout:
|
||||
inventory.KubesprayInventory(
|
||||
changed_hosts=["print_hostnames"],
|
||||
config_file="file")
|
||||
self.assertEqual("node1 node2\n", stdout.getvalue())
|
||||
self.assertEqual(cm.exception.code, 0)
|
||||
|
||||
|
||||
class TestInventory(unittest.TestCase):
|
||||
@mock.patch('inventory.sys')
|
||||
def setUp(self, sys_mock):
|
||||
@ -90,14 +67,23 @@ class TestInventory(unittest.TestCase):
|
||||
self.assertRaisesRegex(ValueError, "Host name must end in an",
|
||||
self.inv.get_host_id, hostname)
|
||||
|
||||
def test_build_hostnames_add_one(self):
|
||||
changed_hosts = ['10.90.0.2']
|
||||
expected = OrderedDict([('node1',
|
||||
{'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'})])
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_add_duplicate(self):
|
||||
changed_hosts = ['10.90.0.2']
|
||||
expected = OrderedDict([('node3',
|
||||
expected = OrderedDict([('node1',
|
||||
{'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'})])
|
||||
self.inv.yaml_config['all']['hosts'] = expected
|
||||
result = self.inv.build_hostnames(changed_hosts, True)
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_add_two(self):
|
||||
@ -113,30 +99,6 @@ class TestInventory(unittest.TestCase):
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_add_three(self):
|
||||
changed_hosts = ['10.90.0.2', '10.90.0.3', '10.90.0.4']
|
||||
expected = OrderedDict([
|
||||
('node1', {'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'}),
|
||||
('node2', {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'}),
|
||||
('node3', {'ansible_host': '10.90.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '10.90.0.4'})])
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_add_one(self):
|
||||
changed_hosts = ['10.90.0.2']
|
||||
expected = OrderedDict([('node1',
|
||||
{'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'})])
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_delete_first(self):
|
||||
changed_hosts = ['-10.90.0.2']
|
||||
existing_hosts = OrderedDict([
|
||||
@ -151,24 +113,7 @@ class TestInventory(unittest.TestCase):
|
||||
('node2', {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'})])
|
||||
result = self.inv.build_hostnames(changed_hosts, True)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_delete_by_hostname(self):
|
||||
changed_hosts = ['-node1']
|
||||
existing_hosts = OrderedDict([
|
||||
('node1', {'ansible_host': '10.90.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '10.90.0.2'}),
|
||||
('node2', {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing_hosts
|
||||
expected = OrderedDict([
|
||||
('node2', {'ansible_host': '10.90.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '10.90.0.3'})])
|
||||
result = self.inv.build_hostnames(changed_hosts, True)
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_exists_hostname_positive(self):
|
||||
@ -277,11 +222,11 @@ class TestInventory(unittest.TestCase):
|
||||
self.inv.yaml_config['all']['children'][group]['hosts'].get(host),
|
||||
None)
|
||||
|
||||
def test_set_kube_control_plane(self):
|
||||
group = 'kube_control_plane'
|
||||
def test_set_kube_master(self):
|
||||
group = 'kube-master'
|
||||
host = 'node1'
|
||||
|
||||
self.inv.set_kube_control_plane([host])
|
||||
self.inv.set_kube_master([host])
|
||||
self.assertIn(
|
||||
host, self.inv.yaml_config['all']['children'][group]['hosts'])
|
||||
|
||||
@ -296,8 +241,8 @@ class TestInventory(unittest.TestCase):
|
||||
self.inv.yaml_config['all']['hosts'].get(host), opt)
|
||||
|
||||
def test_set_k8s_cluster(self):
|
||||
group = 'k8s_cluster'
|
||||
expected_hosts = ['kube_node', 'kube_control_plane']
|
||||
group = 'k8s-cluster'
|
||||
expected_hosts = ['kube-node', 'kube-master']
|
||||
|
||||
self.inv.set_k8s_cluster()
|
||||
for host in expected_hosts:
|
||||
@ -306,7 +251,7 @@ class TestInventory(unittest.TestCase):
|
||||
self.inv.yaml_config['all']['children'][group]['children'])
|
||||
|
||||
def test_set_kube_node(self):
|
||||
group = 'kube_node'
|
||||
group = 'kube-node'
|
||||
host = 'node1'
|
||||
|
||||
self.inv.set_kube_node([host])
|
||||
@ -330,12 +275,12 @@ class TestInventory(unittest.TestCase):
|
||||
|
||||
self.inv.set_all(hosts)
|
||||
self.inv.set_etcd(list(hosts.keys())[0:3])
|
||||
self.inv.set_kube_control_plane(list(hosts.keys())[0:2])
|
||||
self.inv.set_kube_master(list(hosts.keys())[0:2])
|
||||
self.inv.set_kube_node(hosts.keys())
|
||||
for h in range(3):
|
||||
self.assertFalse(
|
||||
list(hosts.keys())[h] in
|
||||
self.inv.yaml_config['all']['children']['kube_node']['hosts'])
|
||||
self.inv.yaml_config['all']['children']['kube-node']['hosts'])
|
||||
|
||||
def test_scale_scenario_two(self):
|
||||
num_nodes = 500
|
||||
@ -346,12 +291,12 @@ class TestInventory(unittest.TestCase):
|
||||
|
||||
self.inv.set_all(hosts)
|
||||
self.inv.set_etcd(list(hosts.keys())[0:3])
|
||||
self.inv.set_kube_control_plane(list(hosts.keys())[3:5])
|
||||
self.inv.set_kube_master(list(hosts.keys())[3:5])
|
||||
self.inv.set_kube_node(hosts.keys())
|
||||
for h in range(5):
|
||||
self.assertFalse(
|
||||
list(hosts.keys())[h] in
|
||||
self.inv.yaml_config['all']['children']['kube_node']['hosts'])
|
||||
self.inv.yaml_config['all']['children']['kube-node']['hosts'])
|
||||
|
||||
def test_range2ips_range(self):
|
||||
changed_hosts = ['10.90.0.2', '10.90.0.4-10.90.0.6', '10.90.0.8']
|
||||
@ -368,7 +313,7 @@ class TestInventory(unittest.TestCase):
|
||||
self.assertRaisesRegex(Exception, "Range of ip_addresses isn't valid",
|
||||
self.inv.range2ips, host_range)
|
||||
|
||||
def test_build_hostnames_create_with_one_different_ips(self):
|
||||
def test_build_hostnames_different_ips_add_one(self):
|
||||
changed_hosts = ['10.90.0.2,192.168.0.2']
|
||||
expected = OrderedDict([('node1',
|
||||
{'ansible_host': '192.168.0.2',
|
||||
@ -377,7 +322,17 @@ class TestInventory(unittest.TestCase):
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_create_with_two_different_ips(self):
|
||||
def test_build_hostnames_different_ips_add_duplicate(self):
|
||||
changed_hosts = ['10.90.0.2,192.168.0.2']
|
||||
expected = OrderedDict([('node1',
|
||||
{'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'})])
|
||||
self.inv.yaml_config['all']['hosts'] = expected
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_different_ips_add_two(self):
|
||||
changed_hosts = ['10.90.0.2,192.168.0.2', '10.90.0.3,192.168.0.3']
|
||||
expected = OrderedDict([
|
||||
('node1', {'ansible_host': '192.168.0.2',
|
||||
@ -386,210 +341,6 @@ class TestInventory(unittest.TestCase):
|
||||
('node2', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'})])
|
||||
self.inv.yaml_config['all']['hosts'] = OrderedDict()
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_create_with_three_different_ips(self):
|
||||
changed_hosts = ['10.90.0.2,192.168.0.2',
|
||||
'10.90.0.3,192.168.0.3',
|
||||
'10.90.0.4,192.168.0.4']
|
||||
expected = OrderedDict([
|
||||
('node1', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node2', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node3', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'})])
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_overwrite_one_with_different_ips(self):
|
||||
changed_hosts = ['10.90.0.2,192.168.0.2']
|
||||
expected = OrderedDict([('node1',
|
||||
{'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'})])
|
||||
existing = OrderedDict([('node5',
|
||||
{'ansible_host': '192.168.0.5',
|
||||
'ip': '10.90.0.5',
|
||||
'access_ip': '192.168.0.5'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_overwrite_three_with_different_ips(self):
|
||||
changed_hosts = ['10.90.0.2,192.168.0.2']
|
||||
expected = OrderedDict([('node1',
|
||||
{'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'})])
|
||||
existing = OrderedDict([
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node4', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'}),
|
||||
('node5', {'ansible_host': '192.168.0.5',
|
||||
'ip': '10.90.0.5',
|
||||
'access_ip': '192.168.0.5'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing
|
||||
result = self.inv.build_hostnames(changed_hosts)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_different_ips_add_duplicate(self):
|
||||
changed_hosts = ['10.90.0.2,192.168.0.2']
|
||||
expected = OrderedDict([('node3',
|
||||
{'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'})])
|
||||
existing = expected
|
||||
self.inv.yaml_config['all']['hosts'] = existing
|
||||
result = self.inv.build_hostnames(changed_hosts, True)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_add_two_different_ips_into_one_existing(self):
|
||||
changed_hosts = ['10.90.0.3,192.168.0.3', '10.90.0.4,192.168.0.4']
|
||||
expected = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node4', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'})])
|
||||
|
||||
existing = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing
|
||||
result = self.inv.build_hostnames(changed_hosts, True)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_add_two_different_ips_into_two_existing(self):
|
||||
changed_hosts = ['10.90.0.4,192.168.0.4', '10.90.0.5,192.168.0.5']
|
||||
expected = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node4', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'}),
|
||||
('node5', {'ansible_host': '192.168.0.5',
|
||||
'ip': '10.90.0.5',
|
||||
'access_ip': '192.168.0.5'})])
|
||||
|
||||
existing = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing
|
||||
result = self.inv.build_hostnames(changed_hosts, True)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_hostnames_add_two_different_ips_into_three_existing(self):
|
||||
changed_hosts = ['10.90.0.5,192.168.0.5', '10.90.0.6,192.168.0.6']
|
||||
expected = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node4', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'}),
|
||||
('node5', {'ansible_host': '192.168.0.5',
|
||||
'ip': '10.90.0.5',
|
||||
'access_ip': '192.168.0.5'}),
|
||||
('node6', {'ansible_host': '192.168.0.6',
|
||||
'ip': '10.90.0.6',
|
||||
'access_ip': '192.168.0.6'})])
|
||||
|
||||
existing = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node4', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing
|
||||
result = self.inv.build_hostnames(changed_hosts, True)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
# Add two IP addresses into a config that has
|
||||
# three already defined IP addresses. One of the IP addresses
|
||||
# is a duplicate.
|
||||
def test_build_hostnames_add_two_duplicate_one_overlap(self):
|
||||
changed_hosts = ['10.90.0.4,192.168.0.4', '10.90.0.5,192.168.0.5']
|
||||
expected = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node4', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'}),
|
||||
('node5', {'ansible_host': '192.168.0.5',
|
||||
'ip': '10.90.0.5',
|
||||
'access_ip': '192.168.0.5'})])
|
||||
|
||||
existing = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node4', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing
|
||||
result = self.inv.build_hostnames(changed_hosts, True)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
# Add two duplicate IP addresses into a config that has
|
||||
# three already defined IP addresses
|
||||
def test_build_hostnames_add_two_duplicate_two_overlap(self):
|
||||
changed_hosts = ['10.90.0.3,192.168.0.3', '10.90.0.4,192.168.0.4']
|
||||
expected = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node4', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'})])
|
||||
|
||||
existing = OrderedDict([
|
||||
('node2', {'ansible_host': '192.168.0.2',
|
||||
'ip': '10.90.0.2',
|
||||
'access_ip': '192.168.0.2'}),
|
||||
('node3', {'ansible_host': '192.168.0.3',
|
||||
'ip': '10.90.0.3',
|
||||
'access_ip': '192.168.0.3'}),
|
||||
('node4', {'ansible_host': '192.168.0.4',
|
||||
'ip': '10.90.0.4',
|
||||
'access_ip': '192.168.0.4'})])
|
||||
self.inv.yaml_config['all']['hosts'] = existing
|
||||
result = self.inv.build_hostnames(changed_hosts, True)
|
||||
self.assertEqual(expected, result)
|
||||
|
@ -1,27 +1,21 @@
|
||||
[tox]
|
||||
minversion = 1.6
|
||||
skipsdist = True
|
||||
envlist = pep8
|
||||
envlist = pep8, py33
|
||||
|
||||
[testenv]
|
||||
allowlist_externals = py.test
|
||||
whitelist_externals = py.test
|
||||
usedevelop = True
|
||||
deps =
|
||||
-r{toxinidir}/requirements.txt
|
||||
-r{toxinidir}/test-requirements.txt
|
||||
setenv = VIRTUAL_ENV={envdir}
|
||||
passenv =
|
||||
http_proxy
|
||||
HTTP_PROXY
|
||||
https_proxy
|
||||
HTTPS_PROXY
|
||||
no_proxy
|
||||
NO_PROXY
|
||||
passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY
|
||||
commands = pytest -vv #{posargs:./tests}
|
||||
|
||||
[testenv:pep8]
|
||||
usedevelop = False
|
||||
allowlist_externals = bash
|
||||
whitelist_externals = bash
|
||||
commands =
|
||||
bash -c "find {toxinidir}/* -type f -name '*.py' -print0 | xargs -0 flake8"
|
||||
|
||||
|
@ -5,7 +5,7 @@ deployment on VMs.
|
||||
|
||||
This playbook does not create Virtual Machines, nor does it run Kubespray itself.
|
||||
|
||||
## User creation
|
||||
### User creation
|
||||
|
||||
If you want to create a user for running Kubespray deployment, you should specify
|
||||
both `k8s_deployment_user` and `k8s_deployment_user_pkey_path`.
|
||||
|
@ -1,2 +1,3 @@
|
||||
#k8s_deployment_user: kubespray
|
||||
#k8s_deployment_user_pkey_path: /tmp/ssh_rsa
|
||||
|
||||
|
@ -1,9 +1,8 @@
|
||||
---
|
||||
- name: Prepare Hypervisor to later install kubespray VMs
|
||||
hosts: localhost
|
||||
- hosts: localhost
|
||||
gather_facts: False
|
||||
become: yes
|
||||
vars:
|
||||
bootstrap_os: none
|
||||
- bootstrap_os: none
|
||||
roles:
|
||||
- { role: kvm-setup }
|
||||
- kvm-setup
|
||||
|
@ -1,7 +1,7 @@
|
||||
---
|
||||
|
||||
- name: Install required packages
|
||||
package:
|
||||
yum:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
with_items:
|
||||
@ -22,9 +22,9 @@
|
||||
- ntp
|
||||
when: ansible_os_family == "Debian"
|
||||
|
||||
- name: Create deployment user if required
|
||||
include_tasks: user.yml
|
||||
# Create deployment user if required
|
||||
- include: user.yml
|
||||
when: k8s_deployment_user is defined
|
||||
|
||||
- name: Set proper sysctl values
|
||||
import_tasks: sysctl.yml
|
||||
# Set proper sysctl values
|
||||
- include: sysctl.yml
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
- name: Load br_netfilter module
|
||||
community.general.modprobe:
|
||||
modprobe:
|
||||
name: br_netfilter
|
||||
state: present
|
||||
register: br_netfilter
|
||||
@ -20,24 +20,24 @@
|
||||
br-netfilter
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0644"
|
||||
mode: 0644
|
||||
when: br_netfilter is defined
|
||||
|
||||
|
||||
- name: Enable net.ipv4.ip_forward in sysctl
|
||||
ansible.posix.sysctl:
|
||||
sysctl:
|
||||
name: net.ipv4.ip_forward
|
||||
value: 1
|
||||
sysctl_file: "{{ sysctl_file_path }}"
|
||||
sysctl_file: /etc/sysctl.d/ipv4-ip_forward.conf
|
||||
state: present
|
||||
reload: yes
|
||||
|
||||
- name: Set bridge-nf-call-{arptables,iptables} to 0
|
||||
ansible.posix.sysctl:
|
||||
sysctl:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
value: 0
|
||||
sysctl_file: "{{ sysctl_file_path }}"
|
||||
sysctl_file: /etc/sysctl.d/bridge-nf-call.conf
|
||||
reload: yes
|
||||
with_items:
|
||||
- net.bridge.bridge-nf-call-arptables
|
||||
|
@ -11,7 +11,6 @@
|
||||
state: directory
|
||||
owner: "{{ k8s_deployment_user }}"
|
||||
group: "{{ k8s_deployment_user }}"
|
||||
mode: "0700"
|
||||
|
||||
- name: Configure sudo for deployment user
|
||||
copy:
|
||||
@ -20,13 +19,13 @@
|
||||
dest: "/etc/sudoers.d/55-k8s-deployment"
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0644"
|
||||
mode: 0644
|
||||
|
||||
- name: Write private SSH key
|
||||
copy:
|
||||
src: "{{ k8s_deployment_user_pkey_path }}"
|
||||
dest: "/home/{{ k8s_deployment_user }}/.ssh/id_rsa"
|
||||
mode: "0400"
|
||||
mode: 0400
|
||||
owner: "{{ k8s_deployment_user }}"
|
||||
group: "{{ k8s_deployment_user }}"
|
||||
when: k8s_deployment_user_pkey_path is defined
|
||||
@ -41,7 +40,7 @@
|
||||
- name: Fix ssh-pub-key permissions
|
||||
file:
|
||||
path: "/home/{{ k8s_deployment_user }}/.ssh/authorized_keys"
|
||||
mode: "0600"
|
||||
mode: 0600
|
||||
owner: "{{ k8s_deployment_user }}"
|
||||
group: "{{ k8s_deployment_user }}"
|
||||
when: k8s_deployment_user_pkey_path is defined
|
||||
|
@ -1,51 +0,0 @@
|
||||
---
|
||||
- name: Check ansible version
|
||||
import_playbook: kubernetes_sigs.kubespray.ansible_version
|
||||
|
||||
- name: Install mitogen
|
||||
hosts: localhost
|
||||
strategy: linear
|
||||
vars:
|
||||
mitogen_version: 0.3.2
|
||||
mitogen_url: https://github.com/mitogen-hq/mitogen/archive/refs/tags/v{{ mitogen_version }}.tar.gz
|
||||
ansible_connection: local
|
||||
tasks:
|
||||
- name: Create mitogen plugin dir
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
become: false
|
||||
loop:
|
||||
- "{{ playbook_dir }}/plugins/mitogen"
|
||||
- "{{ playbook_dir }}/dist"
|
||||
|
||||
- name: Download mitogen release
|
||||
get_url:
|
||||
url: "{{ mitogen_url }}"
|
||||
dest: "{{ playbook_dir }}/dist/mitogen_{{ mitogen_version }}.tar.gz"
|
||||
validate_certs: true
|
||||
mode: "0644"
|
||||
|
||||
- name: Extract archive
|
||||
unarchive:
|
||||
src: "{{ playbook_dir }}/dist/mitogen_{{ mitogen_version }}.tar.gz"
|
||||
dest: "{{ playbook_dir }}/dist/"
|
||||
|
||||
- name: Copy plugin
|
||||
ansible.posix.synchronize:
|
||||
src: "{{ playbook_dir }}/dist/mitogen-{{ mitogen_version }}/"
|
||||
dest: "{{ playbook_dir }}/plugins/mitogen"
|
||||
|
||||
- name: Add strategy to ansible.cfg
|
||||
community.general.ini_file:
|
||||
path: ansible.cfg
|
||||
mode: "0644"
|
||||
section: "{{ item.section | d('defaults') }}"
|
||||
option: "{{ item.option }}"
|
||||
value: "{{ item.value }}"
|
||||
with_items:
|
||||
- option: strategy
|
||||
value: mitogen_linear
|
||||
- option: strategy_plugins
|
||||
value: plugins/mitogen/ansible_mitogen/plugins/strategy
|
@ -8,19 +8,19 @@ In the same directory of this ReadMe file you should find a file named `inventor
|
||||
|
||||
Change that file to reflect your local setup (adding more machines or removing them and setting the adequate ip numbers), and save it to `inventory/sample/k8s_gfs_inventory`. Make sure that the settings on `inventory/sample/group_vars/all.yml` make sense with your deployment. Then execute change to the kubespray root folder, and execute (supposing that the machines are all using ubuntu):
|
||||
|
||||
```shell
|
||||
```
|
||||
ansible-playbook -b --become-user=root -i inventory/sample/k8s_gfs_inventory --user=ubuntu ./cluster.yml
|
||||
```
|
||||
|
||||
This will provision your Kubernetes cluster. Then, to provision and configure the GlusterFS cluster, from the same directory execute:
|
||||
|
||||
```shell
|
||||
```
|
||||
ansible-playbook -b --become-user=root -i inventory/sample/k8s_gfs_inventory --user=ubuntu ./contrib/network-storage/glusterfs/glusterfs.yml
|
||||
```
|
||||
|
||||
If your machines are not using Ubuntu, you need to change the `--user=ubuntu` to the correct user. Alternatively, if your Kubernetes machines are using one OS and your GlusterFS a different one, you can instead specify the `ansible_ssh_user=<correct-user>` variable in the inventory file that you just created, for each machine/VM:
|
||||
|
||||
```shell
|
||||
```
|
||||
k8s-master-1 ansible_ssh_host=192.168.0.147 ip=192.168.0.147 ansible_ssh_user=core
|
||||
k8s-master-node-1 ansible_ssh_host=192.168.0.148 ip=192.168.0.148 ansible_ssh_user=core
|
||||
k8s-master-node-2 ansible_ssh_host=192.168.0.146 ip=192.168.0.146 ansible_ssh_user=core
|
||||
@ -30,7 +30,7 @@ k8s-master-node-2 ansible_ssh_host=192.168.0.146 ip=192.168.0.146 ansible_ssh_us
|
||||
|
||||
First step is to fill in a `my-kubespray-gluster-cluster.tfvars` file with the specification desired for your cluster. An example with all required variables would look like:
|
||||
|
||||
```ini
|
||||
```
|
||||
cluster_name = "cluster1"
|
||||
number_of_k8s_masters = "1"
|
||||
number_of_k8s_masters_no_floating_ip = "2"
|
||||
@ -39,7 +39,7 @@ number_of_k8s_nodes = "0"
|
||||
public_key_path = "~/.ssh/my-desired-key.pub"
|
||||
image = "Ubuntu 16.04"
|
||||
ssh_user = "ubuntu"
|
||||
flavor_k8s_node = "node-flavor-id-in-your-openstack"
|
||||
flavor_k8s_node = "node-flavor-id-in-your-openstack"
|
||||
flavor_k8s_master = "master-flavor-id-in-your-openstack"
|
||||
network_name = "k8s-network"
|
||||
floatingip_pool = "net_external"
|
||||
@ -54,7 +54,7 @@ ssh_user_gfs = "ubuntu"
|
||||
|
||||
As explained in the general terraform/openstack guide, you need to source your OpenStack credentials file, add your ssh-key to the ssh-agent and setup environment variables for terraform:
|
||||
|
||||
```shell
|
||||
```
|
||||
$ source ~/.stackrc
|
||||
$ eval $(ssh-agent -s)
|
||||
$ ssh-add ~/.ssh/my-desired-key
|
||||
@ -67,7 +67,7 @@ $ echo Setting up Terraform creds && \
|
||||
|
||||
Then, standing on the kubespray directory (root base of the Git checkout), issue the following terraform command to create the VMs for the cluster:
|
||||
|
||||
```shell
|
||||
```
|
||||
terraform apply -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kubespray-gluster-cluster.tfvars contrib/terraform/openstack
|
||||
```
|
||||
|
||||
@ -75,18 +75,18 @@ This will create both your Kubernetes and Gluster VMs. Make sure that the ansibl
|
||||
|
||||
Then, provision your Kubernetes (kubespray) cluster with the following ansible call:
|
||||
|
||||
```shell
|
||||
```
|
||||
ansible-playbook -b --become-user=root -i contrib/terraform/openstack/hosts ./cluster.yml
|
||||
```
|
||||
|
||||
Finally, provision the glusterfs nodes and add the Persistent Volume setup for GlusterFS in Kubernetes through the following ansible call:
|
||||
|
||||
```shell
|
||||
```
|
||||
ansible-playbook -b --become-user=root -i contrib/terraform/openstack/hosts ./contrib/network-storage/glusterfs/glusterfs.yml
|
||||
```
|
||||
|
||||
If you need to destroy the cluster, you can run:
|
||||
|
||||
```shell
|
||||
```
|
||||
terraform destroy -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kubespray-gluster-cluster.tfvars contrib/terraform/openstack
|
||||
```
|
||||
|
@ -1,29 +1,24 @@
|
||||
---
|
||||
- name: Bootstrap hosts
|
||||
hosts: gfs-cluster
|
||||
- hosts: gfs-cluster
|
||||
gather_facts: false
|
||||
vars:
|
||||
ansible_ssh_pipelining: false
|
||||
roles:
|
||||
- { role: bootstrap-os, tags: bootstrap-os}
|
||||
|
||||
- name: Gather facts
|
||||
hosts: all
|
||||
- hosts: all
|
||||
gather_facts: true
|
||||
|
||||
- name: Install glusterfs server
|
||||
hosts: gfs-cluster
|
||||
- hosts: gfs-cluster
|
||||
vars:
|
||||
ansible_ssh_pipelining: true
|
||||
roles:
|
||||
- { role: glusterfs/server }
|
||||
|
||||
- name: Install glusterfs servers
|
||||
hosts: k8s_cluster
|
||||
- hosts: k8s-cluster
|
||||
roles:
|
||||
- { role: glusterfs/client }
|
||||
|
||||
- name: Configure Kubernetes to use glusterfs
|
||||
hosts: kube_control_plane[0]
|
||||
- hosts: kube-master[0]
|
||||
roles:
|
||||
- { role: kubernetes-pv }
|
||||
|
@ -11,10 +11,10 @@
|
||||
# ## Set disk_volume_device_1 to desired device for gluster brick, if different to /dev/vdb (default).
|
||||
# ## As in the previous case, you can set ip to give direct communication on internal IPs
|
||||
# gfs_node1 ansible_ssh_host=95.54.0.18 # disk_volume_device_1=/dev/vdc ip=10.3.0.7
|
||||
# gfs_node2 ansible_ssh_host=95.54.0.19 # disk_volume_device_1=/dev/vdc ip=10.3.0.8
|
||||
# gfs_node3 ansible_ssh_host=95.54.0.20 # disk_volume_device_1=/dev/vdc ip=10.3.0.9
|
||||
# gfs_node2 ansible_ssh_host=95.54.0.19 # disk_volume_device_1=/dev/vdc ip=10.3.0.8
|
||||
# gfs_node3 ansible_ssh_host=95.54.0.20 # disk_volume_device_1=/dev/vdc ip=10.3.0.9
|
||||
|
||||
# [kube_control_plane]
|
||||
# [kube-master]
|
||||
# node1
|
||||
# node2
|
||||
|
||||
@ -23,16 +23,16 @@
|
||||
# node2
|
||||
# node3
|
||||
|
||||
# [kube_node]
|
||||
# [kube-node]
|
||||
# node2
|
||||
# node3
|
||||
# node4
|
||||
# node5
|
||||
# node6
|
||||
|
||||
# [k8s_cluster:children]
|
||||
# kube_node
|
||||
# kube_control_plane
|
||||
# [k8s-cluster:children]
|
||||
# kube-node
|
||||
# kube-master
|
||||
|
||||
# [gfs-cluster]
|
||||
# gfs_node1
|
||||
@ -41,3 +41,4 @@
|
||||
|
||||
# [network-storage:children]
|
||||
# gfs-cluster
|
||||
|
||||
|
@ -8,22 +8,18 @@ Installs and configures GlusterFS on Linux.
|
||||
|
||||
For GlusterFS to connect between servers, TCP ports `24007`, `24008`, and `24009`/`49152`+ (that port, plus an additional incremented port for each additional server in the cluster; the latter if GlusterFS is version 3.4+), and TCP/UDP port `111` must be open. You can open these using whatever firewall you wish (this can easily be configured using the `geerlingguy.firewall` role).
|
||||
|
||||
This role performs basic installation and setup of Gluster, but it does not configure or mount bricks (volumes), since that step is easier to do in a series of plays in your own playbook. Ansible 1.9+ includes the [`gluster_volume`](https://docs.ansible.com/ansible/latest/collections/gluster/gluster/gluster_volume_module.html) module to ease the management of Gluster volumes.
|
||||
This role performs basic installation and setup of Gluster, but it does not configure or mount bricks (volumes), since that step is easier to do in a series of plays in your own playbook. Ansible 1.9+ includes the [`gluster_volume`](https://docs.ansible.com/gluster_volume_module.html) module to ease the management of Gluster volumes.
|
||||
|
||||
## Role Variables
|
||||
|
||||
Available variables are listed below, along with default values (see `defaults/main.yml`):
|
||||
|
||||
```yaml
|
||||
glusterfs_default_release: ""
|
||||
```
|
||||
glusterfs_default_release: ""
|
||||
|
||||
You can specify a `default_release` for apt on Debian/Ubuntu by overriding this variable. This is helpful if you need a different package or version for the main GlusterFS packages (e.g. GlusterFS 3.5.x instead of 3.2.x with the `wheezy-backports` default release on Debian Wheezy).
|
||||
|
||||
```yaml
|
||||
glusterfs_ppa_use: yes
|
||||
glusterfs_ppa_version: "3.5"
|
||||
```
|
||||
glusterfs_ppa_use: yes
|
||||
glusterfs_ppa_version: "3.5"
|
||||
|
||||
For Ubuntu, specify whether to use the official Gluster PPA, and which version of the PPA to use. See Gluster's [Getting Started Guide](https://docs.gluster.org/en/latest/Quick-Start-Guide/Quickstart/) for more info.
|
||||
|
||||
@ -33,11 +29,9 @@ None.
|
||||
|
||||
## Example Playbook
|
||||
|
||||
```yaml
|
||||
- hosts: server
|
||||
roles:
|
||||
- geerlingguy.glusterfs
|
||||
```
|
||||
|
||||
For a real-world use example, read through [Simple GlusterFS Setup with Ansible](http://www.jeffgeerling.com/blog/simple-glusterfs-setup-ansible), a blog post by this role's author, which is included in Chapter 8 of [Ansible for DevOps](https://www.ansiblefordevops.com/).
|
||||
|
||||
|
@ -6,12 +6,12 @@ galaxy_info:
|
||||
description: GlusterFS installation for Linux.
|
||||
company: "Midwestern Mac, LLC"
|
||||
license: "license (BSD, MIT)"
|
||||
min_ansible_version: "2.0"
|
||||
min_ansible_version: 2.0
|
||||
platforms:
|
||||
- name: EL
|
||||
versions:
|
||||
- "6"
|
||||
- "7"
|
||||
- 6
|
||||
- 7
|
||||
- name: Ubuntu
|
||||
versions:
|
||||
- precise
|
||||
|
@ -3,19 +3,14 @@
|
||||
# hyperkube and needs to be installed as part of the system.
|
||||
|
||||
# Setup/install tasks.
|
||||
- name: Setup RedHat distros for glusterfs
|
||||
include_tasks: setup-RedHat.yml
|
||||
- include: setup-RedHat.yml
|
||||
when: ansible_os_family == 'RedHat' and groups['gfs-cluster'] is defined
|
||||
|
||||
- name: Setup Debian distros for glusterfs
|
||||
include_tasks: setup-Debian.yml
|
||||
- include: setup-Debian.yml
|
||||
when: ansible_os_family == 'Debian' and groups['gfs-cluster'] is defined
|
||||
|
||||
- name: Ensure Gluster mount directories exist.
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
mode: "0775"
|
||||
file: "path={{ item }} state=directory mode=0775"
|
||||
with_items:
|
||||
- "{{ gluster_mount_dir }}"
|
||||
when: ansible_os_family in ["Debian","RedHat"] and groups['gfs-cluster'] is defined
|
||||
|
@ -7,7 +7,7 @@
|
||||
register: glusterfs_ppa_added
|
||||
when: glusterfs_ppa_use
|
||||
|
||||
- name: Ensure GlusterFS client will reinstall if the PPA was just added. # noqa no-handler
|
||||
- name: Ensure GlusterFS client will reinstall if the PPA was just added. # noqa 503
|
||||
apt:
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
|
@ -1,14 +1,10 @@
|
||||
---
|
||||
- name: Install Prerequisites
|
||||
package:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
yum: name={{ item }} state=present
|
||||
with_items:
|
||||
- "centos-release-gluster{{ glusterfs_default_release }}"
|
||||
|
||||
- name: Install Packages
|
||||
package:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
yum: name={{ item }} state=present
|
||||
with_items:
|
||||
- glusterfs-client
|
||||
|
@ -6,12 +6,12 @@ galaxy_info:
|
||||
description: GlusterFS installation for Linux.
|
||||
company: "Midwestern Mac, LLC"
|
||||
license: "license (BSD, MIT)"
|
||||
min_ansible_version: "2.0"
|
||||
min_ansible_version: 2.0
|
||||
platforms:
|
||||
- name: EL
|
||||
versions:
|
||||
- "6"
|
||||
- "7"
|
||||
- 6
|
||||
- 7
|
||||
- name: Ubuntu
|
||||
versions:
|
||||
- precise
|
||||
|
@ -4,110 +4,90 @@
|
||||
include_vars: "{{ ansible_os_family }}.yml"
|
||||
|
||||
# Install xfs package
|
||||
- name: Install xfs Debian
|
||||
apt:
|
||||
name: xfsprogs
|
||||
state: present
|
||||
- name: install xfs Debian
|
||||
apt: name=xfsprogs state=present
|
||||
when: ansible_os_family == "Debian"
|
||||
|
||||
- name: Install xfs RedHat
|
||||
package:
|
||||
name: xfsprogs
|
||||
state: present
|
||||
- name: install xfs RedHat
|
||||
yum: name=xfsprogs state=present
|
||||
when: ansible_os_family == "RedHat"
|
||||
|
||||
# Format external volumes in xfs
|
||||
- name: Format volumes in xfs
|
||||
community.general.filesystem:
|
||||
fstype: xfs
|
||||
dev: "{{ disk_volume_device_1 }}"
|
||||
filesystem: "fstype=xfs dev={{ disk_volume_device_1 }}"
|
||||
|
||||
# Mount external volumes
|
||||
- name: Mounting new xfs filesystem
|
||||
ansible.posix.mount:
|
||||
name: "{{ gluster_volume_node_mount_dir }}"
|
||||
src: "{{ disk_volume_device_1 }}"
|
||||
fstype: xfs
|
||||
state: mounted
|
||||
- name: mounting new xfs filesystem
|
||||
mount: "name={{ gluster_volume_node_mount_dir }} src={{ disk_volume_device_1 }} fstype=xfs state=mounted"
|
||||
|
||||
# Setup/install tasks.
|
||||
- name: Setup RedHat distros for glusterfs
|
||||
include_tasks: setup-RedHat.yml
|
||||
- include: setup-RedHat.yml
|
||||
when: ansible_os_family == 'RedHat'
|
||||
|
||||
- name: Setup Debian distros for glusterfs
|
||||
include_tasks: setup-Debian.yml
|
||||
- include: setup-Debian.yml
|
||||
when: ansible_os_family == 'Debian'
|
||||
|
||||
- name: Ensure GlusterFS is started and enabled at boot.
|
||||
service:
|
||||
name: "{{ glusterfs_daemon }}"
|
||||
state: started
|
||||
enabled: yes
|
||||
service: "name={{ glusterfs_daemon }} state=started enabled=yes"
|
||||
|
||||
- name: Ensure Gluster brick and mount directories exist.
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
mode: "0775"
|
||||
file: "path={{ item }} state=directory mode=0775"
|
||||
with_items:
|
||||
- "{{ gluster_brick_dir }}"
|
||||
- "{{ gluster_mount_dir }}"
|
||||
|
||||
- name: Configure Gluster volume with replicas
|
||||
gluster.gluster.gluster_volume:
|
||||
gluster_volume:
|
||||
state: present
|
||||
name: "{{ gluster_brick_name }}"
|
||||
brick: "{{ gluster_brick_dir }}"
|
||||
replicas: "{{ groups['gfs-cluster'] | length }}"
|
||||
cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip'] | default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
|
||||
cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip']|default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
|
||||
host: "{{ inventory_hostname }}"
|
||||
force: yes
|
||||
run_once: true
|
||||
when: groups['gfs-cluster'] | length > 1
|
||||
when: groups['gfs-cluster']|length > 1
|
||||
|
||||
- name: Configure Gluster volume without replicas
|
||||
gluster.gluster.gluster_volume:
|
||||
gluster_volume:
|
||||
state: present
|
||||
name: "{{ gluster_brick_name }}"
|
||||
brick: "{{ gluster_brick_dir }}"
|
||||
cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip'] | default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
|
||||
cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip']|default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
|
||||
host: "{{ inventory_hostname }}"
|
||||
force: yes
|
||||
run_once: true
|
||||
when: groups['gfs-cluster'] | length <= 1
|
||||
when: groups['gfs-cluster']|length <= 1
|
||||
|
||||
- name: Mount glusterfs to retrieve disk size
|
||||
ansible.posix.mount:
|
||||
mount:
|
||||
name: "{{ gluster_mount_dir }}"
|
||||
src: "{{ ip | default(ansible_default_ipv4['address']) }}:/gluster"
|
||||
src: "{{ ip|default(ansible_default_ipv4['address']) }}:/gluster"
|
||||
fstype: glusterfs
|
||||
opts: "defaults,_netdev"
|
||||
state: mounted
|
||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||
|
||||
- name: Get Gluster disk size
|
||||
setup:
|
||||
filter: ansible_mounts
|
||||
setup: filter=ansible_mounts
|
||||
register: mounts_data
|
||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||
|
||||
- name: Set Gluster disk size to variable
|
||||
set_fact:
|
||||
gluster_disk_size_gb: "{{ (mounts_data.ansible_facts.ansible_mounts | selectattr('mount', 'equalto', gluster_mount_dir) | map(attribute='size_total') | first | int / (1024 * 1024 * 1024)) | int }}"
|
||||
gluster_disk_size_gb: "{{ (mounts_data.ansible_facts.ansible_mounts | selectattr('mount', 'equalto', gluster_mount_dir) | map(attribute='size_total') | first | int / (1024*1024*1024)) | int }}"
|
||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||
|
||||
- name: Create file on GlusterFS
|
||||
template:
|
||||
dest: "{{ gluster_mount_dir }}/.test-file.txt"
|
||||
src: test-file.txt
|
||||
mode: "0644"
|
||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||
|
||||
- name: Unmount glusterfs
|
||||
ansible.posix.mount:
|
||||
mount:
|
||||
name: "{{ gluster_mount_dir }}"
|
||||
fstype: glusterfs
|
||||
src: "{{ ip | default(ansible_default_ipv4['address']) }}:/gluster"
|
||||
src: "{{ ip|default(ansible_default_ipv4['address']) }}:/gluster"
|
||||
state: unmounted
|
||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||
|
@ -7,7 +7,7 @@
|
||||
register: glusterfs_ppa_added
|
||||
when: glusterfs_ppa_use
|
||||
|
||||
- name: Ensure GlusterFS will reinstall if the PPA was just added. # noqa no-handler
|
||||
- name: Ensure GlusterFS will reinstall if the PPA was just added. # noqa 503
|
||||
apt:
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
|
@ -1,15 +1,11 @@
|
||||
---
|
||||
- name: Install Prerequisites
|
||||
package:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
yum: name={{ item }} state=present
|
||||
with_items:
|
||||
- "centos-release-gluster{{ glusterfs_default_release }}"
|
||||
|
||||
- name: Install Packages
|
||||
package:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
yum: name={{ item }} state=present
|
||||
with_items:
|
||||
- glusterfs-server
|
||||
- glusterfs-client
|
||||
|
@ -0,0 +1,5 @@
|
||||
---
|
||||
- hosts: all
|
||||
|
||||
roles:
|
||||
- role_under_test
|
@ -3,13 +3,12 @@
|
||||
template:
|
||||
src: "{{ item.file }}"
|
||||
dest: "{{ kube_config_dir }}/{{ item.dest }}"
|
||||
mode: "0644"
|
||||
with_items:
|
||||
- { file: glusterfs-kubernetes-endpoint.json.j2, type: ep, dest: glusterfs-kubernetes-endpoint.json}
|
||||
- { file: glusterfs-kubernetes-pv.yml.j2, type: pv, dest: glusterfs-kubernetes-pv.yml}
|
||||
- { file: glusterfs-kubernetes-endpoint-svc.json.j2, type: svc, dest: glusterfs-kubernetes-endpoint-svc.json}
|
||||
register: gluster_pv
|
||||
when: inventory_hostname == groups['kube_control_plane'][0] and groups['gfs-cluster'] is defined and hostvars[groups['gfs-cluster'][0]].gluster_disk_size_gb is defined
|
||||
when: inventory_hostname == groups['kube-master'][0] and groups['gfs-cluster'] is defined and hostvars[groups['gfs-cluster'][0]].gluster_disk_size_gb is defined
|
||||
|
||||
- name: Kubernetes Apps | Set GlusterFS endpoint and PV
|
||||
kube:
|
||||
@ -18,6 +17,6 @@
|
||||
kubectl: "{{ bin_dir }}/kubectl"
|
||||
resource: "{{ item.item.type }}"
|
||||
filename: "{{ kube_config_dir }}/{{ item.item.dest }}"
|
||||
state: "{{ item.changed | ternary('latest', 'present') }}"
|
||||
state: "{{ item.changed | ternary('latest','present') }}"
|
||||
with_items: "{{ gluster_pv.results }}"
|
||||
when: inventory_hostname == groups['kube_control_plane'][0] and groups['gfs-cluster'] is defined
|
||||
when: inventory_hostname == groups['kube-master'][0] and groups['gfs-cluster'] is defined
|
||||
|
@ -21,3 +21,4 @@
|
||||
{% endfor %}
|
||||
]
|
||||
}
|
||||
|
||||
|
@ -1,26 +1,17 @@
|
||||
# Deploy Heketi/Glusterfs into Kubespray/Kubernetes
|
||||
|
||||
This playbook aims to automate [this](https://github.com/heketi/heketi/blob/master/docs/admin/install-kubernetes.md) tutorial. It deploys heketi/glusterfs into kubernetes and sets up a storageclass.
|
||||
|
||||
## Important notice
|
||||
|
||||
> Due to resource limits on the current project maintainers and general lack of contributions we are considering placing Heketi into a [near-maintenance mode](https://github.com/heketi/heketi#important-notice)
|
||||
|
||||
## Client Setup
|
||||
|
||||
Heketi provides a CLI that provides users with a means to administer the deployment and configuration of GlusterFS in Kubernetes. [Download and install the heketi-cli](https://github.com/heketi/heketi/releases) on your client machine.
|
||||
|
||||
## Install
|
||||
|
||||
Copy the inventory.yml.sample over to inventory/sample/k8s_heketi_inventory.yml and change it according to your setup.
|
||||
|
||||
```shell
|
||||
```
|
||||
ansible-playbook --ask-become -i inventory/sample/k8s_heketi_inventory.yml contrib/network-storage/heketi/heketi.yml
|
||||
```
|
||||
|
||||
## Tear down
|
||||
|
||||
```shell
|
||||
```
|
||||
ansible-playbook --ask-become -i inventory/sample/k8s_heketi_inventory.yml contrib/network-storage/heketi/heketi-tear-down.yml
|
||||
```
|
||||
|
||||
|
@ -1,11 +1,9 @@
|
||||
---
|
||||
- name: Tear down heketi
|
||||
hosts: kube_control_plane[0]
|
||||
- hosts: kube-master[0]
|
||||
roles:
|
||||
- { role: tear-down }
|
||||
|
||||
- name: Teardown disks in heketi
|
||||
hosts: heketi-node
|
||||
- hosts: heketi-node
|
||||
become: yes
|
||||
roles:
|
||||
- { role: tear-down-disks }
|
||||
|
@ -1,11 +1,9 @@
|
||||
---
|
||||
- name: Prepare heketi install
|
||||
hosts: heketi-node
|
||||
- hosts: heketi-node
|
||||
roles:
|
||||
- { role: prepare }
|
||||
|
||||
- name: Provision heketi
|
||||
hosts: kube_control_plane[0]
|
||||
- hosts: kube-master[0]
|
||||
tags:
|
||||
- "provision"
|
||||
roles:
|
||||
|
@ -2,25 +2,18 @@ all:
|
||||
vars:
|
||||
heketi_admin_key: "11elfeinhundertundelf"
|
||||
heketi_user_key: "!!einseinseins"
|
||||
glusterfs_daemonset:
|
||||
readiness_probe:
|
||||
timeout_seconds: 3
|
||||
initial_delay_seconds: 3
|
||||
liveness_probe:
|
||||
timeout_seconds: 3
|
||||
initial_delay_seconds: 10
|
||||
children:
|
||||
k8s_cluster:
|
||||
k8s-cluster:
|
||||
vars:
|
||||
kubelet_fail_swap_on: false
|
||||
children:
|
||||
kube_control_plane:
|
||||
kube-master:
|
||||
hosts:
|
||||
node1:
|
||||
etcd:
|
||||
hosts:
|
||||
node2:
|
||||
kube_node:
|
||||
kube-node:
|
||||
hosts: &kube_nodes
|
||||
node1:
|
||||
node2:
|
||||
|
@ -5,13 +5,13 @@
|
||||
- "dm_snapshot"
|
||||
- "dm_mirror"
|
||||
- "dm_thin_pool"
|
||||
community.general.modprobe:
|
||||
modprobe:
|
||||
name: "{{ item }}"
|
||||
state: "present"
|
||||
|
||||
- name: "Install glusterfs mount utils (RedHat)"
|
||||
become: true
|
||||
package:
|
||||
yum:
|
||||
name: "glusterfs-fuse"
|
||||
state: "present"
|
||||
when: "ansible_os_family == 'RedHat'"
|
||||
|
@ -1,3 +1,3 @@
|
||||
---
|
||||
- name: "Stop port forwarding"
|
||||
- name: "stop port forwarding"
|
||||
command: "killall "
|
||||
|
@ -7,9 +7,9 @@
|
||||
|
||||
- name: "Bootstrap heketi."
|
||||
when:
|
||||
- "(initial_heketi_state.stdout | from_json | json_query(\"items[?kind=='Service']\")) | length == 0"
|
||||
- "(initial_heketi_state.stdout | from_json | json_query(\"items[?kind=='Deployment']\")) | length == 0"
|
||||
- "(initial_heketi_state.stdout | from_json | json_query(\"items[?kind=='Pod']\")) | length == 0"
|
||||
- "(initial_heketi_state.stdout|from_json|json_query(\"items[?kind=='Service']\"))|length == 0"
|
||||
- "(initial_heketi_state.stdout|from_json|json_query(\"items[?kind=='Deployment']\"))|length == 0"
|
||||
- "(initial_heketi_state.stdout|from_json|json_query(\"items[?kind=='Pod']\"))|length == 0"
|
||||
include_tasks: "bootstrap/deploy.yml"
|
||||
|
||||
# Prepare heketi topology
|
||||
@ -20,11 +20,11 @@
|
||||
|
||||
- name: "Ensure heketi bootstrap pod is up."
|
||||
assert:
|
||||
that: "(initial_heketi_pod.stdout | from_json | json_query('items[*]')) | length == 1"
|
||||
that: "(initial_heketi_pod.stdout|from_json|json_query('items[*]'))|length == 1"
|
||||
|
||||
- name: Store the initial heketi pod name
|
||||
set_fact:
|
||||
initial_heketi_pod_name: "{{ initial_heketi_pod.stdout | from_json | json_query(\"items[*].metadata.name | [0]\") }}"
|
||||
initial_heketi_pod_name: "{{ initial_heketi_pod.stdout|from_json|json_query(\"items[*].metadata.name|[0]\") }}"
|
||||
|
||||
- name: "Test heketi topology."
|
||||
changed_when: false
|
||||
@ -32,7 +32,7 @@
|
||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology info --json"
|
||||
|
||||
- name: "Load heketi topology."
|
||||
when: "heketi_topology.stdout | from_json | json_query(\"clusters[*].nodes[*]\") | flatten | length == 0"
|
||||
when: "heketi_topology.stdout|from_json|json_query(\"clusters[*].nodes[*]\")|flatten|length == 0"
|
||||
include_tasks: "bootstrap/topology.yml"
|
||||
|
||||
# Provision heketi database volume
|
||||
@ -58,7 +58,7 @@
|
||||
service_query: "items[?metadata.name=='heketi-storage-endpoints' && kind=='Service']"
|
||||
job_query: "items[?metadata.name=='heketi-storage-copy-job' && kind=='Job']"
|
||||
when:
|
||||
- "heketi_storage_state.stdout | from_json | json_query(secret_query) | length == 0"
|
||||
- "heketi_storage_state.stdout | from_json | json_query(endpoints_query) | length == 0"
|
||||
- "heketi_storage_state.stdout | from_json | json_query(service_query) | length == 0"
|
||||
- "heketi_storage_state.stdout | from_json | json_query(job_query) | length == 0"
|
||||
- "heketi_storage_state.stdout|from_json|json_query(secret_query)|length == 0"
|
||||
- "heketi_storage_state.stdout|from_json|json_query(endpoints_query)|length == 0"
|
||||
- "heketi_storage_state.stdout|from_json|json_query(service_query)|length == 0"
|
||||
- "heketi_storage_state.stdout|from_json|json_query(job_query)|length == 0"
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user