Compare commits
317 Commits
Author | SHA1 | Date | |
---|---|---|---|
28ee071bd6 | |||
0ba1c8ff0a | |||
d6f859619c | |||
8a3bd6ba72 | |||
d3fca7d47a | |||
587f7b33a7 | |||
d3e8fd808e | |||
6bfc133d6c | |||
3d6b9d6c15 | |||
39fa9503d9 | |||
051e08e31c | |||
dd539cf360 | |||
31094b1768 | |||
1f4ef0e86e | |||
01dbc909be | |||
0512c22607 | |||
f0d5a96464 | |||
361645e8b6 | |||
353d44a4a6 | |||
680aa60429 | |||
6b3cf8c4b8 | |||
e41766fd58 | |||
e4c820c35e | |||
db5f83f8c9 | |||
412d560bcf | |||
a468954519 | |||
a3d3f27aaa | |||
72b68c7f82 | |||
28333d4513 | |||
ed8c0ee95a | |||
724a316204 | |||
d70cafc1e4 | |||
18c8e0a14a | |||
3ff6a2e7ff | |||
1ee3ff738e | |||
52edd4c9bc | |||
d8345c5eae | |||
98e7a07fba | |||
3d5988577a | |||
69603aed34 | |||
299e35ebe4 | |||
6674be2572 | |||
cf1566e8ed | |||
c6d91b89d7 | |||
b44f7957d5 | |||
aead0e3a69 | |||
b0484fe3e5 | |||
b8cd9403df | |||
d7df577898 | |||
09bccc97ba | |||
1c187e9729 | |||
8939196f0d | |||
15be42abfd | |||
ca45d5ffbe | |||
2bec26dba5 | |||
03c8d0113c | |||
536606c2ed | |||
6e29a47784 | |||
826a440fa6 | |||
baff4e61cf | |||
4d7eca7d2e | |||
32fec3bb74 | |||
3134dd4c0d | |||
56a9c7a802 | |||
bfa468c771 | |||
6318bb9f96 | |||
8618a3119b | |||
27a268df33 | |||
7930f6fa0a | |||
49bd208026 | |||
83fe607f62 | |||
ea8b799ff0 | |||
e2d6f8d897 | |||
0924c2510c | |||
065292f8a4 | |||
35f248dff0 | |||
b09fe64ff1 | |||
54debdbda2 | |||
b6341287bb | |||
6a92e34994 | |||
00efc63f74 | |||
b061cce913 | |||
c929b5e82e | |||
58f48500b1 | |||
b5125e59ab | |||
d316b02d28 | |||
7910198b93 | |||
7b2f35c7d4 | |||
45874a23bb | |||
9c3b573f8e | |||
7d6ef61491 | |||
6a7c3c6e3f | |||
883194afec | |||
3a63aa6b1e | |||
337499d772 | |||
82123f3c4e | |||
8f3d820664 | |||
7d812f8112 | |||
473a8beff0 | |||
0d675cdd1a | |||
9cce46ea8c | |||
2e67289473 | |||
980aeafebe | |||
7d1ab3374e | |||
01b9b263ed | |||
c33a049292 | |||
7eaa7c957a | |||
f055ba7965 | |||
157c247563 | |||
a35b6dc1af | |||
910a821d0b | |||
2c21e7bd3a | |||
45a177e2a0 | |||
0c51352a74 | |||
9b1980cfff | |||
ae29296e20 | |||
75e743bfae | |||
2f19d964f6 | |||
0d2990510e | |||
e732df56a1 | |||
2f92d6bca3 | |||
c72903e8d6 | |||
ded58d3b66 | |||
be9414fabe | |||
033afe1574 | |||
c35461a005 | |||
a93421019b | |||
4fd3e2ece7 | |||
937adec515 | |||
bce3f282f1 | |||
f8ad44a99f | |||
7ee2f0d918 | |||
9cbb373ae2 | |||
484df62c5a | |||
79a6b72a13 | |||
d439564a7e | |||
b0a5f265e3 | |||
8800eb3492 | |||
09308d6125 | |||
a8822e24b0 | |||
a60e4c0a3f | |||
b2d740dd1f | |||
3237b2702f | |||
e8c49b0090 | |||
3dd51cd648 | |||
e03aa795fa | |||
a8a05a21a4 | |||
63fa406c3c | |||
6ad6609872 | |||
474fbf09c4 | |||
47849b8ff7 | |||
0379a52f03 | |||
bc2eeb0560 | |||
81f07c3783 | |||
f90926389a | |||
dcb97e775e | |||
096de82fd9 | |||
db693d46df | |||
b8d628c5f3 | |||
0aa22998e2 | |||
afe047a77f | |||
1ae794e5e4 | |||
a7a204ebca | |||
8774d7e4d5 | |||
34e51ac1cb | |||
d152dc2e6a | |||
8ce5a9dd19 | |||
820d8e6ce6 | |||
3cefd60c37 | |||
876d4de6be | |||
974902af31 | |||
45626a05dc | |||
4b5299bb7a | |||
ceab27c97a | |||
03d1b56a8f | |||
64190dfc73 | |||
29128eb316 | |||
ea9f8b4258 | |||
1cb03a184b | |||
158d998ec4 | |||
168241df4f | |||
f5417032bf | |||
d69db3469e | |||
980a4fa401 | |||
027e2e8a11 | |||
dcfda9d9d2 | |||
ca73e29ec5 | |||
0330442c63 | |||
221c6a8eef | |||
25a1e5f952 | |||
38df80046e | |||
57bb7aa5f6 | |||
86996704ce | |||
f53ac2a5a0 | |||
d0af5979c8 | |||
43020bd064 | |||
dc00b96f47 | |||
71c856878c | |||
19865e81db | |||
a4258b1244 | |||
e0b76b185a | |||
c47f441b13 | |||
7c854a18bb | |||
8df2c0a7c6 | |||
e60b9f796e | |||
2c8bcc6722 | |||
e257d92f41 | |||
f697338eec | |||
e2ec7c76a4 | |||
058d101bf9 | |||
833794feef | |||
a3dedb68d1 | |||
4a463567ac | |||
9f3ed7d855 | |||
221b429c24 | |||
b937d1cd9a | |||
986c46c2b6 | |||
e029216566 | |||
2d4595887d | |||
2beffe688a | |||
66408a87ee | |||
62b418cd16 | |||
5361cc075d | |||
be12164290 | |||
588896712e | |||
6221b94fdf | |||
efef80f67b | |||
0c1a0ab966 | |||
678ed5ced5 | |||
7f87ce0362 | |||
d1acf7f192 | |||
171d2ce59c | |||
c6170eb79d | |||
d2c44dd4df | |||
9b7090ca1d | |||
ee8e88b111 | |||
a901b1f0d7 | |||
82efd95901 | |||
4c803d579b | |||
b34ec6c46b | |||
6368c626c5 | |||
a5445d9c5c | |||
da86457cda | |||
eb00693325 | |||
a15a0b5eb9 | |||
646fd5f47b | |||
277b347604 | |||
12bc634ec3 | |||
769e54d8f5 | |||
ad50bc4ccb | |||
0ca7aa126b | |||
d0d9967457 | |||
b51b52ac0e | |||
36c1f32ef9 | |||
fa245ffdd5 | |||
f7c5f45833 | |||
579976260f | |||
d56e9f6b80 | |||
57b0b6a9b1 | |||
640190217d | |||
a08f485d76 | |||
f6b66839bd | |||
26700e7882 | |||
d86229dc2b | |||
f56171b513 | |||
516e9a4de6 | |||
765d907ea1 | |||
287421e21e | |||
2761fda2c9 | |||
339e36fbe6 | |||
5e648b96e8 | |||
ac2135e450 | |||
68c8c05775 | |||
14b1cab5d2 | |||
e570e2e736 | |||
16fd2e5d68 | |||
422b25ab1f | |||
b7527399b5 | |||
89bad11ad8 | |||
9d32e2c3b0 | |||
099341582a | |||
942c98003f | |||
cad3bf3e8c | |||
2ab5cc73cd | |||
9f2dd09628 | |||
2798adc837 | |||
54d9404c0e | |||
f1025dce4e | |||
538f4dad9d | |||
5323e232b2 | |||
5d9986ab5f | |||
38688a4486 | |||
d640a57f9b | |||
5e9479cded | |||
06ffe44f1f | |||
b35b816287 | |||
bf15d06568 | |||
2c2ffa846c | |||
48c41bcbe7 | |||
beb47e1c63 | |||
303c3654a1 | |||
5fab610fab | |||
3c3ebc05cc | |||
94956ebde9 | |||
e716bed11b | |||
ccbcad9741 | |||
15a8c34717 | |||
b815f48803 | |||
95c97332bf | |||
9bdf6b00cc | |||
91b23caa19 | |||
5df48ef8fd | |||
109078c5e0 | |||
c0b262a22a | |||
8bb1af9926 | |||
538f1f1a68 | |||
b60ab3ae44 |
6
.github/ISSUE_TEMPLATE/bug-report.md
vendored
6
.github/ISSUE_TEMPLATE/bug-report.md
vendored
@ -18,6 +18,8 @@ explain why.
|
||||
|
||||
- **Version of Ansible** (`ansible --version`):
|
||||
|
||||
- **Version of Python** (`python --version`):
|
||||
|
||||
|
||||
**Kubespray version (commit) (`git rev-parse --short HEAD`):**
|
||||
|
||||
@ -25,8 +27,8 @@ explain why.
|
||||
**Network plugin used**:
|
||||
|
||||
|
||||
**Copy of your inventory file:**
|
||||
|
||||
**Full inventory with variables (`ansible -i inventory/sample/inventory.ini all -m debug -a "var=hostvars[inventory_hostname]"`):**
|
||||
<!-- We recommend using snippets services like https://gist.github.com/ etc. -->
|
||||
|
||||
**Command used to invoke ansible**:
|
||||
|
||||
|
6
.github/PULL_REQUEST_TEMPLATE.md
vendored
6
.github/PULL_REQUEST_TEMPLATE.md
vendored
@ -1,9 +1,9 @@
|
||||
<!-- Thanks for sending a pull request! Here are some tips for you:
|
||||
|
||||
1. If this is your first time, please read our contributor guidelines: https://git.k8s.io/community/contributors/guide#your-first-contribution and developer guide https://git.k8s.io/community/contributors/devel/development.md#development-guide
|
||||
1. If this is your first time, please read our contributor guidelines: https://git.k8s.io/community/contributors/guide/first-contribution.md and developer guide https://git.k8s.io/community/contributors/devel/development.md
|
||||
2. Please label this pull request according to what type of issue you are addressing, especially if this is a release targeted pull request. For reference on required PR/issue labels, read here:
|
||||
https://git.k8s.io/community/contributors/devel/release.md#issue-kind-label
|
||||
3. Ensure you have added or ran the appropriate tests for your PR: https://git.k8s.io/community/contributors/devel/testing.md
|
||||
https://git.k8s.io/community/contributors/devel/sig-release/release.md#issuepr-kind-label
|
||||
3. Ensure you have added or ran the appropriate tests for your PR: https://git.k8s.io/community/contributors/devel/sig-testing/testing.md
|
||||
4. If you want *faster* PR reviews, read how: https://git.k8s.io/community/contributors/guide/pull-requests.md#best-practices-for-faster-reviews
|
||||
5. Follow the instructions for writing a release note: https://git.k8s.io/community/contributors/guide/release-notes.md
|
||||
6. If the PR is unfinished, see how to mark it: https://git.k8s.io/community/contributors/guide/pull-requests.md#marking-unfinished-pull-requests
|
||||
|
@ -4,13 +4,13 @@ stages:
|
||||
- deploy-part1
|
||||
- moderator
|
||||
- deploy-part2
|
||||
- deploy-gce
|
||||
- deploy-part3
|
||||
- deploy-special
|
||||
|
||||
variables:
|
||||
KUBESPRAY_VERSION: v2.12.9
|
||||
FAILFASTCI_NAMESPACE: 'kargo-ci'
|
||||
GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray'
|
||||
# DOCKER_HOST: tcp://localhost:2375
|
||||
ANSIBLE_FORCE_COLOR: "true"
|
||||
MAGIC: "ci check this"
|
||||
TEST_ID: "$CI_PIPELINE_ID-$CI_BUILD_ID"
|
||||
@ -26,7 +26,10 @@ variables:
|
||||
IDEMPOT_CHECK: "false"
|
||||
RESET_CHECK: "false"
|
||||
UPGRADE_TEST: "false"
|
||||
LOG_LEVEL: "-vv"
|
||||
MITOGEN_ENABLE: "false"
|
||||
ANSIBLE_LOG_LEVEL: "-vv"
|
||||
RECOVER_CONTROL_PLANE_TEST: "false"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube-master[1:]"
|
||||
|
||||
before_script:
|
||||
- ./tests/scripts/rebase.sh
|
||||
@ -37,14 +40,13 @@ before_script:
|
||||
.job: &job
|
||||
tags:
|
||||
- packet
|
||||
variables:
|
||||
KUBESPRAY_VERSION: v2.11.0
|
||||
image: quay.io/kubespray/kubespray:$KUBESPRAY_VERSION
|
||||
artifacts:
|
||||
paths:
|
||||
- cluster-dump/
|
||||
|
||||
.testcases: &testcases
|
||||
<<: *job
|
||||
services:
|
||||
- docker:dind
|
||||
before_script:
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
||||
- ./tests/scripts/rebase.sh
|
||||
@ -52,7 +54,7 @@ before_script:
|
||||
script:
|
||||
- ./tests/scripts/testcases_run.sh
|
||||
after_script:
|
||||
- ./tests/scripts/testcases_cleanup.sh
|
||||
- chronic ./tests/scripts/testcases_cleanup.sh
|
||||
|
||||
# For failfast, at least 1 job must be defined in .gitlab-ci.yml
|
||||
# Premoderated with manual actions
|
||||
@ -70,3 +72,4 @@ include:
|
||||
- .gitlab-ci/shellcheck.yml
|
||||
- .gitlab-ci/terraform.yml
|
||||
- .gitlab-ci/packet.yml
|
||||
- .gitlab-ci/vagrant.yml
|
||||
|
@ -1,247 +0,0 @@
|
||||
---
|
||||
.gce_variables: &gce_variables
|
||||
GCE_USER: travis
|
||||
SSH_USER: $GCE_USER
|
||||
CLOUD_MACHINE_TYPE: "g1-small"
|
||||
CI_PLATFORM: "gce"
|
||||
PRIVATE_KEY: $GCE_PRIVATE_KEY
|
||||
|
||||
.cache: &cache
|
||||
cache:
|
||||
key: "$CI_BUILD_REF_NAME"
|
||||
paths:
|
||||
- downloads/
|
||||
- $HOME/.cache
|
||||
|
||||
.gce: &gce
|
||||
extends: .testcases
|
||||
<<: *cache
|
||||
variables:
|
||||
<<: *gce_variables
|
||||
tags:
|
||||
- gce
|
||||
except: ['triggers']
|
||||
only: [/^pr-.*$/]
|
||||
|
||||
.centos_weave_kubeadm_variables: ¢os_weave_kubeadm_variables
|
||||
# stage: deploy-part1
|
||||
UPGRADE_TEST: "graceful"
|
||||
|
||||
.centos7_multus_calico_variables: ¢os7_multus_calico_variables
|
||||
# stage: deploy-gce
|
||||
UPGRADE_TEST: "graceful"
|
||||
|
||||
# Builds for PRs only (premoderated by unit-tests step) and triggers (auto)
|
||||
### PR JOBS PART1
|
||||
|
||||
gce_ubuntu18-flannel-aio:
|
||||
stage: deploy-part1
|
||||
<<: *gce
|
||||
when: manual
|
||||
|
||||
### PR JOBS PART2
|
||||
|
||||
gce_coreos-calico-aio:
|
||||
stage: deploy-gce
|
||||
<<: *gce
|
||||
when: on_success
|
||||
|
||||
gce_centos7-flannel-addons:
|
||||
stage: deploy-gce
|
||||
<<: *gce
|
||||
when: manual
|
||||
|
||||
### MANUAL JOBS
|
||||
|
||||
gce_centos-weave-kubeadm-sep:
|
||||
stage: deploy-gce
|
||||
extends: .gce
|
||||
variables:
|
||||
<<: *centos_weave_kubeadm_variables
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
except: []
|
||||
|
||||
gce_ubuntu-weave-sep:
|
||||
stage: deploy-gce
|
||||
<<: *gce
|
||||
when: manual
|
||||
only: ['triggers']
|
||||
except: []
|
||||
|
||||
gce_coreos-calico-sep-triggers:
|
||||
stage: deploy-gce
|
||||
<<: *gce
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
except: []
|
||||
|
||||
gce_ubuntu-canal-ha-triggers:
|
||||
stage: deploy-special
|
||||
<<: *gce
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
except: []
|
||||
|
||||
gce_centos7-flannel-addons-triggers:
|
||||
stage: deploy-gce
|
||||
<<: *gce
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
except: []
|
||||
|
||||
gce_ubuntu-weave-sep-triggers:
|
||||
stage: deploy-gce
|
||||
<<: *gce
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
except: []
|
||||
|
||||
# More builds for PRs/merges (manual) and triggers (auto)
|
||||
|
||||
|
||||
gce_ubuntu-canal-ha:
|
||||
stage: deploy-special
|
||||
<<: *gce
|
||||
when: manual
|
||||
|
||||
gce_ubuntu-canal-kubeadm:
|
||||
stage: deploy-gce
|
||||
<<: *gce
|
||||
when: manual
|
||||
|
||||
gce_ubuntu-canal-kubeadm-triggers:
|
||||
stage: deploy-gce
|
||||
<<: *gce
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
except: []
|
||||
|
||||
gce_ubuntu-flannel-ha:
|
||||
stage: deploy-gce
|
||||
<<: *gce
|
||||
when: manual
|
||||
|
||||
gce_centos-weave-kubeadm-triggers:
|
||||
stage: deploy-gce
|
||||
extends: .gce
|
||||
variables:
|
||||
<<: *centos_weave_kubeadm_variables
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
except: []
|
||||
|
||||
gce_ubuntu-contiv-sep:
|
||||
stage: deploy-special
|
||||
<<: *gce
|
||||
when: manual
|
||||
|
||||
gce_coreos-cilium:
|
||||
stage: deploy-special
|
||||
<<: *gce
|
||||
when: manual
|
||||
|
||||
gce_ubuntu18-cilium-sep:
|
||||
stage: deploy-special
|
||||
<<: *gce
|
||||
when: manual
|
||||
|
||||
gce_rhel7-weave:
|
||||
stage: deploy-gce
|
||||
<<: *gce
|
||||
when: manual
|
||||
|
||||
gce_rhel7-weave-triggers:
|
||||
stage: deploy-gce
|
||||
<<: *gce
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
except: []
|
||||
|
||||
gce_debian9-calico-upgrade:
|
||||
stage: deploy-gce
|
||||
<<: *gce
|
||||
when: manual
|
||||
|
||||
gce_debian9-calico-triggers:
|
||||
stage: deploy-gce
|
||||
<<: *gce
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
except: []
|
||||
|
||||
gce_coreos-canal:
|
||||
stage: deploy-gce
|
||||
<<: *gce
|
||||
when: manual
|
||||
|
||||
gce_coreos-canal-triggers:
|
||||
stage: deploy-gce
|
||||
<<: *gce
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
except: []
|
||||
|
||||
gce_rhel7-canal-sep:
|
||||
stage: deploy-special
|
||||
<<: *gce
|
||||
when: manual
|
||||
|
||||
gce_rhel7-canal-sep-triggers:
|
||||
stage: deploy-gce
|
||||
<<: *gce
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
except: []
|
||||
|
||||
gce_centos7-calico-ha:
|
||||
stage: deploy-special
|
||||
<<: *gce
|
||||
when: manual
|
||||
|
||||
gce_centos7-calico-ha-triggers:
|
||||
stage: deploy-gce
|
||||
<<: *gce
|
||||
when: on_success
|
||||
only: ['triggers']
|
||||
except: []
|
||||
|
||||
gce_centos7-kube-router:
|
||||
stage: deploy-special
|
||||
<<: *gce
|
||||
when: manual
|
||||
|
||||
gce_centos7-multus-calico:
|
||||
stage: deploy-gce
|
||||
extends: .gce
|
||||
variables:
|
||||
<<: *centos7_multus_calico_variables
|
||||
when: manual
|
||||
|
||||
gce_oracle-canal:
|
||||
stage: deploy-gce
|
||||
<<: *gce
|
||||
when: manual
|
||||
except: ['triggers']
|
||||
only: ['master', /^pr-.*$/]
|
||||
|
||||
gce_opensuse-canal:
|
||||
stage: deploy-gce
|
||||
<<: *gce
|
||||
when: manual
|
||||
|
||||
# no triggers yet https://github.com/kubernetes-incubator/kargo/issues/613
|
||||
gce_coreos-alpha-weave-ha:
|
||||
stage: deploy-special
|
||||
<<: *gce
|
||||
when: manual
|
||||
|
||||
gce_coreos-kube-router:
|
||||
stage: deploy-special
|
||||
<<: *gce
|
||||
when: manual
|
||||
|
||||
gce_ubuntu-kube-router-sep:
|
||||
stage: deploy-special
|
||||
<<: *gce
|
||||
when: manual
|
@ -2,6 +2,7 @@
|
||||
yamllint:
|
||||
extends: .job
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
variables:
|
||||
LANG: C.UTF-8
|
||||
script:
|
||||
@ -11,15 +12,17 @@ yamllint:
|
||||
vagrant-validate:
|
||||
extends: .job
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
variables:
|
||||
VAGRANT_VERSION: 2.2.4
|
||||
script:
|
||||
- curl -sL https://releases.hashicorp.com/vagrant/2.2.4/vagrant_2.2.4_x86_64.deb -o /tmp/vagrant_2.2.4_x86_64.deb
|
||||
- dpkg -i /tmp/vagrant_2.2.4_x86_64.deb
|
||||
- vagrant validate --ignore-provider
|
||||
- ./tests/scripts/vagrant-validate.sh
|
||||
except: ['triggers', 'master']
|
||||
|
||||
ansible-lint:
|
||||
extends: .job
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
# lint every yml/yaml file that looks like it contains Ansible plays
|
||||
script: |-
|
||||
grep -Rl '^- hosts: \|^ hosts: ' --include \*.yml --include \*.yaml . | xargs -P 4 -n 25 ansible-lint -v
|
||||
@ -28,6 +31,7 @@ ansible-lint:
|
||||
syntax-check:
|
||||
extends: .job
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
variables:
|
||||
ANSIBLE_INVENTORY: inventory/local-tests.cfg
|
||||
ANSIBLE_REMOTE_USER: root
|
||||
@ -43,6 +47,7 @@ syntax-check:
|
||||
|
||||
tox-inventory-builder:
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
extends: .job
|
||||
before_script:
|
||||
- ./tests/scripts/rebase.sh
|
||||
@ -56,8 +61,16 @@ tox-inventory-builder:
|
||||
|
||||
markdownlint:
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
image: node
|
||||
before_script:
|
||||
- npm install -g markdownlint-cli
|
||||
script:
|
||||
- markdownlint README.md docs --ignore docs/_sidebar.md
|
||||
|
||||
ci-matrix:
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
image: python:3
|
||||
script:
|
||||
- tests/scripts/md-table/test.sh
|
||||
|
@ -14,87 +14,130 @@ packet_ubuntu18-calico-aio:
|
||||
extends: .packet
|
||||
when: on_success
|
||||
|
||||
# ### PR JOBS PART2
|
||||
|
||||
packet_centos7-flannel-addons:
|
||||
extends: .packet
|
||||
stage: deploy-part2
|
||||
when: on_success
|
||||
|
||||
# ### MANUAL JOBS
|
||||
|
||||
packet_centos-weave-kubeadm-sep:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
when: on_success
|
||||
variables:
|
||||
UPGRADE_TEST: basic
|
||||
|
||||
packet_ubuntu-weave-sep:
|
||||
stage: deploy-part2
|
||||
# Future AIO job
|
||||
packet_ubuntu20-calico-aio:
|
||||
stage: deploy-part1
|
||||
extends: .packet
|
||||
when: manual
|
||||
|
||||
# # More builds for PRs/merges (manual) and triggers (auto)
|
||||
# ### PR JOBS PART2
|
||||
|
||||
packet_ubuntu-canal-ha:
|
||||
packet_centos7-flannel-containerd-addons-ha:
|
||||
extends: .packet
|
||||
stage: deploy-part2
|
||||
when: on_success
|
||||
variables:
|
||||
MITOGEN_ENABLE: "true"
|
||||
|
||||
packet_centos7-crio:
|
||||
extends: .packet
|
||||
stage: deploy-part2
|
||||
when: on_success
|
||||
variables:
|
||||
MITOGEN_ENABLE: "true"
|
||||
|
||||
packet_ubuntu18-crio:
|
||||
extends: .packet
|
||||
stage: deploy-part2
|
||||
when: manual
|
||||
variables:
|
||||
MITOGEN_ENABLE: "true"
|
||||
|
||||
packet_ubuntu16-canal-kubeadm-ha:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu16-canal-sep:
|
||||
stage: deploy-special
|
||||
extends: .packet
|
||||
when: manual
|
||||
|
||||
packet_ubuntu-canal-kubeadm:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
when: on_success
|
||||
|
||||
packet_ubuntu-flannel-ha:
|
||||
packet_ubuntu16-flannel-ha:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
when: manual
|
||||
|
||||
packet_ubuntu16-kube-router-sep:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
when: manual
|
||||
|
||||
packet_debian10-containerd:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
when: on_success
|
||||
variables:
|
||||
MITOGEN_ENABLE: "true"
|
||||
|
||||
packet_centos7-calico-ha-once-localhost:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
when: on_success
|
||||
variables:
|
||||
# This will instruct Docker not to start over TLS.
|
||||
DOCKER_TLS_CERTDIR: ""
|
||||
services:
|
||||
- docker:19.03.9-dind
|
||||
|
||||
packet_centos8-kube-ovn:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
when: on_success
|
||||
|
||||
packet_centos8-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
when: on_success
|
||||
|
||||
packet_fedora30-weave:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
when: on_success
|
||||
|
||||
packet_opensuse-canal:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
when: on_success
|
||||
|
||||
# Contiv does not work in k8s v1.16
|
||||
# packet_ubuntu-contiv-sep:
|
||||
# packet_ubuntu16-contiv-sep:
|
||||
# stage: deploy-part2
|
||||
# extends: .packet
|
||||
# when: on_success
|
||||
|
||||
# ### MANUAL JOBS
|
||||
|
||||
packet_ubuntu16-weave-sep:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
when: manual
|
||||
|
||||
packet_ubuntu18-cilium-sep:
|
||||
stage: deploy-special
|
||||
extends: .packet
|
||||
when: manual
|
||||
|
||||
packet_ubuntu18-flannel-containerd:
|
||||
packet_ubuntu18-flannel-containerd-ha:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
when: manual
|
||||
|
||||
packet_debian9-macvlan-sep:
|
||||
packet_ubuntu18-flannel-containerd-ha-once:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
when: manual
|
||||
|
||||
packet_debian9-calico-upgrade:
|
||||
packet_debian9-macvlan:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
when: on_success
|
||||
variables:
|
||||
UPGRADE_TEST: graceful
|
||||
|
||||
packet_debian10-containerd:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
when: on_success
|
||||
when: manual
|
||||
|
||||
packet_centos7-calico-ha:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
when: manual
|
||||
|
||||
packet_centos7-kube-ovn:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
when: on_success
|
||||
|
||||
packet_centos7-kube-router:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
@ -105,22 +148,62 @@ packet_centos7-multus-calico:
|
||||
extends: .packet
|
||||
when: manual
|
||||
|
||||
packet_opensuse-canal:
|
||||
packet_oracle7-canal-ha:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
when: manual
|
||||
|
||||
packet_oracle-7-canal:
|
||||
packet_fedora31-flannel:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
when: manual
|
||||
|
||||
packet_ubuntu-kube-router-sep:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
when: manual
|
||||
when: on_success
|
||||
variables:
|
||||
MITOGEN_ENABLE: "true"
|
||||
|
||||
packet_amazon-linux-2-aio:
|
||||
stage: deploy-part2
|
||||
extends: .packet
|
||||
when: manual
|
||||
|
||||
# ### PR JOBS PART3
|
||||
# Long jobs (45min+)
|
||||
|
||||
packet_centos7-weave-upgrade-ha:
|
||||
stage: deploy-part3
|
||||
extends: .packet
|
||||
when: manual
|
||||
variables:
|
||||
UPGRADE_TEST: basic
|
||||
MITOGEN_ENABLE: "false"
|
||||
|
||||
packet_debian9-calico-upgrade:
|
||||
stage: deploy-part3
|
||||
extends: .packet
|
||||
when: manual
|
||||
variables:
|
||||
UPGRADE_TEST: graceful
|
||||
MITOGEN_ENABLE: "false"
|
||||
|
||||
packet_debian9-calico-upgrade-once:
|
||||
stage: deploy-part3
|
||||
extends: .packet
|
||||
when: manual
|
||||
variables:
|
||||
UPGRADE_TEST: graceful
|
||||
MITOGEN_ENABLE: "false"
|
||||
|
||||
packet_ubuntu18-calico-ha-recover:
|
||||
stage: deploy-part3
|
||||
extends: .packet
|
||||
when: on_success
|
||||
variables:
|
||||
RECOVER_CONTROL_PLANE_TEST: "true"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube-master[1:]"
|
||||
|
||||
packet_ubuntu18-calico-ha-recover-noquorum:
|
||||
stage: deploy-part3
|
||||
extends: .packet
|
||||
when: on_success
|
||||
variables:
|
||||
RECOVER_CONTROL_PLANE_TEST: "true"
|
||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[1:],kube-master[1:]"
|
||||
|
@ -2,11 +2,12 @@
|
||||
shellcheck:
|
||||
extends: .job
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
variables:
|
||||
SHELLCHECK_VERSION: v0.6.0
|
||||
before_script:
|
||||
- ./tests/scripts/rebase.sh
|
||||
- curl --silent "https://storage.googleapis.com/shellcheck/shellcheck-"${SHELLCHECK_VERSION}".linux.x86_64.tar.xz" | tar -xJv
|
||||
- curl --silent --location "https://github.com/koalaman/shellcheck/releases/download/"${SHELLCHECK_VERSION}"/shellcheck-"${SHELLCHECK_VERSION}".linux.x86_64.tar.xz" | tar -xJv
|
||||
- cp shellcheck-"${SHELLCHECK_VERSION}"/shellcheck /usr/bin/
|
||||
- shellcheck --version
|
||||
script:
|
||||
|
@ -22,6 +22,7 @@
|
||||
.terraform_validate:
|
||||
extends: .terraform_install
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
only: ['master', /^pr-.*$/]
|
||||
script:
|
||||
- terraform validate -var-file=cluster.tfvars contrib/terraform/$PROVIDER
|
||||
@ -29,9 +30,13 @@
|
||||
|
||||
.terraform_apply:
|
||||
extends: .terraform_install
|
||||
stage: deploy-part2
|
||||
tags: [light]
|
||||
stage: deploy-part3
|
||||
when: manual
|
||||
only: [/^pr-.*$/]
|
||||
artifacts:
|
||||
paths:
|
||||
- cluster-dump/
|
||||
variables:
|
||||
ANSIBLE_INVENTORY_UNPARSED_FAILED: "true"
|
||||
ANSIBLE_INVENTORY: hosts
|
||||
@ -42,33 +47,33 @@
|
||||
- tests/scripts/testcases_run.sh
|
||||
after_script:
|
||||
# Cleanup regardless of exit code
|
||||
- ./tests/scripts/testcases_cleanup.sh
|
||||
- chronic ./tests/scripts/testcases_cleanup.sh
|
||||
|
||||
tf-validate-openstack:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: 0.12.12
|
||||
TF_VERSION: 0.12.24
|
||||
PROVIDER: openstack
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-validate-packet:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: 0.12.12
|
||||
TF_VERSION: 0.12.24
|
||||
PROVIDER: packet
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-validate-aws:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: 0.12.12
|
||||
TF_VERSION: 0.12.24
|
||||
PROVIDER: aws
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
# tf-packet-ubuntu16-default:
|
||||
# extends: .terraform_apply
|
||||
# variables:
|
||||
# TF_VERSION: 0.12.12
|
||||
# TF_VERSION: 0.12.24
|
||||
# PROVIDER: packet
|
||||
# CLUSTER: $CI_COMMIT_REF_NAME
|
||||
# TF_VAR_number_of_k8s_masters: "1"
|
||||
@ -82,7 +87,7 @@ tf-validate-aws:
|
||||
# tf-packet-ubuntu18-default:
|
||||
# extends: .terraform_apply
|
||||
# variables:
|
||||
# TF_VERSION: 0.12.12
|
||||
# TF_VERSION: 0.12.24
|
||||
# PROVIDER: packet
|
||||
# CLUSTER: $CI_COMMIT_REF_NAME
|
||||
# TF_VAR_number_of_k8s_masters: "1"
|
||||
@ -93,69 +98,80 @@ tf-validate-aws:
|
||||
# TF_VAR_public_key_path: ""
|
||||
# TF_VAR_operating_system: ubuntu_18_04
|
||||
|
||||
.ovh_variables: &ovh_variables
|
||||
OS_AUTH_URL: https://auth.cloud.ovh.net/v3
|
||||
OS_PROJECT_ID: 8d3cd5d737d74227ace462dee0b903fe
|
||||
OS_PROJECT_NAME: "9361447987648822"
|
||||
OS_USER_DOMAIN_NAME: Default
|
||||
OS_PROJECT_DOMAIN_ID: default
|
||||
OS_USERNAME: 8XuhBMfkKVrk
|
||||
OS_REGION_NAME: UK1
|
||||
OS_INTERFACE: public
|
||||
OS_IDENTITY_API_VERSION: "3"
|
||||
# .ovh_variables: &ovh_variables
|
||||
# OS_AUTH_URL: https://auth.cloud.ovh.net/v3
|
||||
# OS_PROJECT_ID: 8d3cd5d737d74227ace462dee0b903fe
|
||||
# OS_PROJECT_NAME: "9361447987648822"
|
||||
# OS_USER_DOMAIN_NAME: Default
|
||||
# OS_PROJECT_DOMAIN_ID: default
|
||||
# OS_USERNAME: 8XuhBMfkKVrk
|
||||
# OS_REGION_NAME: UK1
|
||||
# OS_INTERFACE: public
|
||||
# OS_IDENTITY_API_VERSION: "3"
|
||||
|
||||
tf-ovh_ubuntu18-calico:
|
||||
extends: .terraform_apply
|
||||
when: on_success
|
||||
variables:
|
||||
<<: *ovh_variables
|
||||
TF_VERSION: 0.12.12
|
||||
PROVIDER: openstack
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
ANSIBLE_TIMEOUT: "60"
|
||||
SSH_USER: ubuntu
|
||||
TF_VAR_number_of_k8s_masters: "0"
|
||||
TF_VAR_number_of_k8s_masters_no_floating_ip: "1"
|
||||
TF_VAR_number_of_k8s_masters_no_floating_ip_no_etcd: "0"
|
||||
TF_VAR_number_of_etcd: "0"
|
||||
TF_VAR_number_of_k8s_nodes: "0"
|
||||
TF_VAR_number_of_k8s_nodes_no_floating_ip: "1"
|
||||
TF_VAR_number_of_gfs_nodes_no_floating_ip: "0"
|
||||
TF_VAR_number_of_bastions: "0"
|
||||
TF_VAR_number_of_k8s_masters_no_etcd: "0"
|
||||
TF_VAR_use_neutron: "0"
|
||||
TF_VAR_floatingip_pool: "Ext-Net"
|
||||
TF_VAR_external_net: "6011fbc9-4cbf-46a4-8452-6890a340b60b"
|
||||
TF_VAR_network_name: "Ext-Net"
|
||||
TF_VAR_flavor_k8s_master: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
|
||||
TF_VAR_flavor_k8s_node: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
|
||||
TF_VAR_image: "Ubuntu 18.04"
|
||||
TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
||||
# tf-ovh_cleanup:
|
||||
# stage: unit-tests
|
||||
# tags: [light]
|
||||
# image: python
|
||||
# variables:
|
||||
# <<: *ovh_variables
|
||||
# before_script:
|
||||
# - pip install -r scripts/openstack-cleanup/requirements.txt
|
||||
# script:
|
||||
# - ./scripts/openstack-cleanup/main.py
|
||||
|
||||
tf-ovh_coreos-calico:
|
||||
extends: .terraform_apply
|
||||
when: on_success
|
||||
variables:
|
||||
<<: *ovh_variables
|
||||
TF_VERSION: 0.12.12
|
||||
PROVIDER: openstack
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
ANSIBLE_TIMEOUT: "60"
|
||||
SSH_USER: core
|
||||
TF_VAR_number_of_k8s_masters: "0"
|
||||
TF_VAR_number_of_k8s_masters_no_floating_ip: "1"
|
||||
TF_VAR_number_of_k8s_masters_no_floating_ip_no_etcd: "0"
|
||||
TF_VAR_number_of_etcd: "0"
|
||||
TF_VAR_number_of_k8s_nodes: "0"
|
||||
TF_VAR_number_of_k8s_nodes_no_floating_ip: "1"
|
||||
TF_VAR_number_of_gfs_nodes_no_floating_ip: "0"
|
||||
TF_VAR_number_of_bastions: "0"
|
||||
TF_VAR_number_of_k8s_masters_no_etcd: "0"
|
||||
TF_VAR_use_neutron: "0"
|
||||
TF_VAR_floatingip_pool: "Ext-Net"
|
||||
TF_VAR_external_net: "6011fbc9-4cbf-46a4-8452-6890a340b60b"
|
||||
TF_VAR_network_name: "Ext-Net"
|
||||
TF_VAR_flavor_k8s_master: "4d4fd037-9493-4f2b-9afe-b542b5248eac" # b2-7
|
||||
TF_VAR_flavor_k8s_node: "4d4fd037-9493-4f2b-9afe-b542b5248eac" # b2-7
|
||||
TF_VAR_image: "CoreOS Stable"
|
||||
TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
||||
# tf-ovh_ubuntu18-calico:
|
||||
# extends: .terraform_apply
|
||||
# when: on_success
|
||||
# variables:
|
||||
# <<: *ovh_variables
|
||||
# TF_VERSION: 0.12.24
|
||||
# PROVIDER: openstack
|
||||
# CLUSTER: $CI_COMMIT_REF_NAME
|
||||
# ANSIBLE_TIMEOUT: "60"
|
||||
# SSH_USER: ubuntu
|
||||
# TF_VAR_number_of_k8s_masters: "0"
|
||||
# TF_VAR_number_of_k8s_masters_no_floating_ip: "1"
|
||||
# TF_VAR_number_of_k8s_masters_no_floating_ip_no_etcd: "0"
|
||||
# TF_VAR_number_of_etcd: "0"
|
||||
# TF_VAR_number_of_k8s_nodes: "0"
|
||||
# TF_VAR_number_of_k8s_nodes_no_floating_ip: "1"
|
||||
# TF_VAR_number_of_gfs_nodes_no_floating_ip: "0"
|
||||
# TF_VAR_number_of_bastions: "0"
|
||||
# TF_VAR_number_of_k8s_masters_no_etcd: "0"
|
||||
# TF_VAR_use_neutron: "0"
|
||||
# TF_VAR_floatingip_pool: "Ext-Net"
|
||||
# TF_VAR_external_net: "6011fbc9-4cbf-46a4-8452-6890a340b60b"
|
||||
# TF_VAR_network_name: "Ext-Net"
|
||||
# TF_VAR_flavor_k8s_master: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
|
||||
# TF_VAR_flavor_k8s_node: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
|
||||
# TF_VAR_image: "Ubuntu 18.04"
|
||||
# TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
||||
|
||||
# tf-ovh_coreos-calico:
|
||||
# extends: .terraform_apply
|
||||
# when: on_success
|
||||
# variables:
|
||||
# <<: *ovh_variables
|
||||
# TF_VERSION: 0.12.24
|
||||
# PROVIDER: openstack
|
||||
# CLUSTER: $CI_COMMIT_REF_NAME
|
||||
# ANSIBLE_TIMEOUT: "60"
|
||||
# SSH_USER: core
|
||||
# TF_VAR_number_of_k8s_masters: "0"
|
||||
# TF_VAR_number_of_k8s_masters_no_floating_ip: "1"
|
||||
# TF_VAR_number_of_k8s_masters_no_floating_ip_no_etcd: "0"
|
||||
# TF_VAR_number_of_etcd: "0"
|
||||
# TF_VAR_number_of_k8s_nodes: "0"
|
||||
# TF_VAR_number_of_k8s_nodes_no_floating_ip: "1"
|
||||
# TF_VAR_number_of_gfs_nodes_no_floating_ip: "0"
|
||||
# TF_VAR_number_of_bastions: "0"
|
||||
# TF_VAR_number_of_k8s_masters_no_etcd: "0"
|
||||
# TF_VAR_use_neutron: "0"
|
||||
# TF_VAR_floatingip_pool: "Ext-Net"
|
||||
# TF_VAR_external_net: "6011fbc9-4cbf-46a4-8452-6890a340b60b"
|
||||
# TF_VAR_network_name: "Ext-Net"
|
||||
# TF_VAR_flavor_k8s_master: "4d4fd037-9493-4f2b-9afe-b542b5248eac" # b2-7
|
||||
# TF_VAR_flavor_k8s_node: "4d4fd037-9493-4f2b-9afe-b542b5248eac" # b2-7
|
||||
# TF_VAR_image: "CoreOS Stable"
|
||||
# TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
||||
|
49
.gitlab-ci/vagrant.yml
Normal file
49
.gitlab-ci/vagrant.yml
Normal file
@ -0,0 +1,49 @@
|
||||
---
|
||||
|
||||
molecule_tests:
|
||||
tags: [c3.small.x86]
|
||||
only: [/^pr-.*$/]
|
||||
except: ['triggers']
|
||||
image: quay.io/kubespray/vagrant:$KUBESPRAY_VERSION
|
||||
services: []
|
||||
stage: deploy-part1
|
||||
before_script:
|
||||
- tests/scripts/rebase.sh
|
||||
- apt-get update && apt-get install -y python3-pip
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
||||
- python -m pip install -r tests/requirements.txt
|
||||
- ./tests/scripts/vagrant_clean.sh
|
||||
script:
|
||||
- ./tests/scripts/molecule_run.sh
|
||||
|
||||
.vagrant:
|
||||
extends: .testcases
|
||||
variables:
|
||||
CI_PLATFORM: "vagrant"
|
||||
SSH_USER: "kubespray"
|
||||
VAGRANT_DEFAULT_PROVIDER: "libvirt"
|
||||
KUBESPRAY_VAGRANT_CONFIG: tests/files/${CI_JOB_NAME}.rb
|
||||
tags: [c3.small.x86]
|
||||
only: [/^pr-.*$/]
|
||||
except: ['triggers']
|
||||
image: quay.io/kubespray/vagrant:$KUBESPRAY_VERSION
|
||||
services: []
|
||||
before_script:
|
||||
- apt-get update && apt-get install -y python3-pip
|
||||
- update-alternatives --install /usr/bin/python python /usr/bin/python3 10
|
||||
- python -m pip install -r tests/requirements.txt
|
||||
- ./tests/scripts/vagrant_clean.sh
|
||||
script:
|
||||
- vagrant up
|
||||
after_script:
|
||||
- vagrant destroy --force
|
||||
|
||||
vagrant_ubuntu18-flannel:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: on_success
|
||||
|
||||
vagrant_ubuntu18-weave-medium:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: manual
|
@ -2,10 +2,30 @@
|
||||
|
||||
## How to become a contributor and submit your own code
|
||||
|
||||
### Environment setup
|
||||
|
||||
It is recommended to use filter to manage the GitHub email notification, see [examples for setting filters to Kubernetes Github notifications](https://github.com/kubernetes/community/blob/master/communication/best-practices.md#examples-for-setting-filters-to-kubernetes-github-notifications)
|
||||
|
||||
To install development dependencies you can use `pip install -r tests/requirements.txt`
|
||||
|
||||
#### Linting
|
||||
|
||||
Kubespray uses `yamllint` and `ansible-lint`. To run them locally use `yamllint .` and `./tests/scripts/ansible-lint.sh`
|
||||
|
||||
#### Molecule
|
||||
|
||||
[molecule](https://github.com/ansible-community/molecule) is designed to help the development and testing of Ansible roles. In Kubespray you can run it all for all roles with `./tests/scripts/molecule_run.sh` or for a specific role (that you are working with) with `molecule test` from the role directory (`cd roles/my-role`).
|
||||
|
||||
When developing or debugging a role it can be useful to run `molecule create` and `molecule converge` separately. Then you can use `molecule login` to SSH into the test environment.
|
||||
|
||||
#### Vagrant
|
||||
|
||||
Vagrant with VirtualBox or libvirt driver helps you to quickly spin test clusters to test things end to end. See [README.md#vagrant](README.md)
|
||||
|
||||
### Contributing A Patch
|
||||
|
||||
1. Submit an issue describing your proposed change to the repo in question.
|
||||
2. The [repo owners](OWNERS) will respond to your issue promptly.
|
||||
3. Fork the desired repo, develop and test your code changes.
|
||||
4. Sign the CNCF CLA (https://git.k8s.io/community/CLA.md#the-contributor-license-agreement)
|
||||
4. Sign the CNCF CLA (<https://git.k8s.io/community/CLA.md#the-contributor-license-agreement>)
|
||||
5. Submit a pull request.
|
||||
|
@ -4,7 +4,7 @@ RUN mkdir /kubespray
|
||||
WORKDIR /kubespray
|
||||
RUN apt update -y && \
|
||||
apt install -y \
|
||||
libssl-dev python3-dev sshpass apt-transport-https jq \
|
||||
libssl-dev python3-dev sshpass apt-transport-https jq moreutils \
|
||||
ca-certificates curl gnupg2 software-properties-common python3-pip rsync
|
||||
RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - && \
|
||||
add-apt-repository \
|
||||
@ -14,5 +14,8 @@ RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - &&
|
||||
&& apt update -y && apt-get install docker-ce -y
|
||||
COPY . .
|
||||
RUN /usr/bin/python3 -m pip install pip -U && /usr/bin/python3 -m pip install -r tests/requirements.txt && python3 -m pip install -r requirements.txt && update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
||||
RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.14.4/bin/linux/amd64/kubectl \
|
||||
RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.17.5/bin/linux/amd64/kubectl \
|
||||
&& chmod a+x kubectl && cp kubectl /usr/local/bin/kubectl
|
||||
|
||||
# Some tools like yamllint need this
|
||||
ENV LANG=C.UTF-8
|
||||
|
2
Makefile
2
Makefile
@ -1,5 +1,5 @@
|
||||
mitogen:
|
||||
ansible-playbook -c local mitogen.yaml -vv
|
||||
ansible-playbook -c local mitogen.yml -vv
|
||||
clean:
|
||||
rm -rf dist/
|
||||
rm *.retry
|
||||
|
@ -9,7 +9,11 @@ aliases:
|
||||
- riverzhang
|
||||
- verwilst
|
||||
- woopstar
|
||||
- luckysb
|
||||
kubespray-reviewers:
|
||||
- jjungnickel
|
||||
- archifleks
|
||||
- holmsten
|
||||
- bozzo
|
||||
- floryut
|
||||
- eppo
|
||||
|
53
README.md
53
README.md
@ -2,7 +2,7 @@
|
||||
|
||||

|
||||
|
||||
If you have questions, check the [documentation](https://kubespray.io) and join us on the [kubernetes slack](https://kubernetes.slack.com), channel **\#kubespray**.
|
||||
If you have questions, check the documentation at [kubespray.io](https://kubespray.io) and join us on the [kubernetes slack](https://kubernetes.slack.com), channel **\#kubespray**.
|
||||
You can get your invite [here](http://slack.k8s.io/)
|
||||
|
||||
- Can be deployed on **AWS, GCE, Azure, OpenStack, vSphere, Packet (bare metal), Oracle Cloud Infrastructure (Experimental), or Baremetal**
|
||||
@ -21,14 +21,14 @@ To deploy the cluster you can use :
|
||||
|
||||
```ShellSession
|
||||
# Install dependencies from ``requirements.txt``
|
||||
sudo pip install -r requirements.txt
|
||||
sudo pip3 install -r requirements.txt
|
||||
|
||||
# Copy ``inventory/sample`` as ``inventory/mycluster``
|
||||
cp -rfp inventory/sample inventory/mycluster
|
||||
|
||||
# Update Ansible inventory file with inventory builder
|
||||
declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5)
|
||||
CONFIG_FILE=inventory/mycluster/inventory.ini python3 contrib/inventory_builder/inventory.py ${IPS[@]}
|
||||
CONFIG_FILE=inventory/mycluster/hosts.yaml python3 contrib/inventory_builder/inventory.py ${IPS[@]}
|
||||
|
||||
# Review and change parameters under ``inventory/mycluster/group_vars``
|
||||
cat inventory/mycluster/group_vars/all/all.yml
|
||||
@ -38,7 +38,7 @@ cat inventory/mycluster/group_vars/k8s-cluster/k8s-cluster.yml
|
||||
# The option `--become` is required, as for example writing SSL keys in /etc/,
|
||||
# installing packages and interacting with various systemd daemons.
|
||||
# Without --become the playbook will fail to run!
|
||||
ansible-playbook -i inventory/mycluster/inventory.ini --become --become-user=root cluster.yml
|
||||
ansible-playbook -i inventory/mycluster/hosts.yaml --become --become-user=root cluster.yml
|
||||
```
|
||||
|
||||
Note: When Ansible is already installed via system packages on the control machine, other python packages installed via `sudo pip install -r requirements.txt` will go to a different directory tree (e.g. `/usr/local/lib/python2.7/dist-packages` on Ubuntu) from Ansible's (e.g. `/usr/lib/python2.7/dist-packages/ansible` still on Ubuntu).
|
||||
@ -83,6 +83,7 @@ vagrant up
|
||||
- [Network plugins](#network-plugins)
|
||||
- [Vagrant install](docs/vagrant.md)
|
||||
- [CoreOS bootstrap](docs/coreos.md)
|
||||
- [Fedora CoreOS bootstrap](docs/fcos.md)
|
||||
- [Debian Jessie setup](docs/debian.md)
|
||||
- [openSUSE setup](docs/opensuse.md)
|
||||
- [Downloaded artifacts](docs/downloads.md)
|
||||
@ -93,6 +94,7 @@ vagrant up
|
||||
- [vSphere](docs/vsphere.md)
|
||||
- [Packet Host](docs/packet.md)
|
||||
- [Large deployments](docs/large-deployments.md)
|
||||
- [Adding/replacing a node](docs/nodes.md)
|
||||
- [Upgrades basics](docs/upgrades.md)
|
||||
- [Roadmap](docs/roadmap.md)
|
||||
|
||||
@ -101,9 +103,9 @@ vagrant up
|
||||
- **Container Linux by CoreOS**
|
||||
- **Debian** Buster, Jessie, Stretch, Wheezy
|
||||
- **Ubuntu** 16.04, 18.04
|
||||
- **CentOS/RHEL** 7
|
||||
- **Fedora** 28
|
||||
- **Fedora/CentOS** Atomic
|
||||
- **CentOS/RHEL** 7, 8 (experimental: see [centos 8 notes](docs/centos8.md)
|
||||
- **Fedora** 30, 31
|
||||
- **Fedora CoreOS** (experimental: see [fcos Note](docs/fcos.md))
|
||||
- **openSUSE** Leap 42.3/Tumbleweed
|
||||
- **Oracle Linux** 7
|
||||
|
||||
@ -112,34 +114,34 @@ Note: Upstart/SysV init based OS types are not supported.
|
||||
## Supported Components
|
||||
|
||||
- Core
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.16.3
|
||||
- [etcd](https://github.com/coreos/etcd) v3.3.10
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.17.9
|
||||
- [etcd](https://github.com/coreos/etcd) v3.3.12
|
||||
- [docker](https://www.docker.com/) v18.06 (see note)
|
||||
- [cri-o](http://cri-o.io/) v1.14.0 (experimental: see [CRI-O Note](docs/cri-o.md). Only on centos based OS)
|
||||
- [containerd](https://containerd.io/) v1.2.13
|
||||
- [cri-o](http://cri-o.io/) v1.17 (experimental: see [CRI-O Note](docs/cri-o.md). Only on fedora, ubuntu and centos based OS)
|
||||
- Network Plugin
|
||||
- [cni-plugins](https://github.com/containernetworking/plugins) v0.8.1
|
||||
- [calico](https://github.com/projectcalico/calico) v3.7.3
|
||||
- [cni-plugins](https://github.com/containernetworking/plugins) v0.8.6
|
||||
- [calico](https://github.com/projectcalico/calico) v3.13.2
|
||||
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
|
||||
- [cilium](https://github.com/cilium/cilium) v1.5.5
|
||||
- [cilium](https://github.com/cilium/cilium) v1.7.2
|
||||
- [contiv](https://github.com/contiv/install) v1.2.1
|
||||
- [flanneld](https://github.com/coreos/flannel) v0.11.0
|
||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) v0.2.5
|
||||
- [multus](https://github.com/intel/multus-cni) v3.2.1
|
||||
- [weave](https://github.com/weaveworks/weave) v2.5.2
|
||||
- [flanneld](https://github.com/coreos/flannel) v0.12.0
|
||||
- [kube-router](https://github.com/cloudnativelabs/kube-router) v0.4.0
|
||||
- [multus](https://github.com/intel/multus-cni) v3.4.1
|
||||
- [weave](https://github.com/weaveworks/weave) v2.6.2
|
||||
- Application
|
||||
- [cephfs-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.0-k8s1.11
|
||||
- [rbd-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.1-k8s1.11
|
||||
- [cert-manager](https://github.com/jetstack/cert-manager) v0.11.0
|
||||
- [coredns](https://github.com/coredns/coredns) v1.6.0
|
||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v0.26.1
|
||||
- [cert-manager](https://github.com/jetstack/cert-manager) v0.11.1
|
||||
- [coredns](https://github.com/coredns/coredns) v1.6.5
|
||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v0.30.0
|
||||
|
||||
Note: The list of validated [docker versions](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.16.md) was updated to 1.13.1, 17.03, 17.06, 17.09, 18.06, 18.09. kubeadm now properly recognizes Docker 18.09.0 and newer, but still treats 18.06 as the default supported version. The kubelet might break on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
|
||||
|
||||
## Requirements
|
||||
|
||||
- **Minimum required version of Kubernetes is v1.15**
|
||||
- **Ansible v2.7.8 and python-netaddr is installed on the machine that will run Ansible commands**
|
||||
- **Jinja 2.9 (or newer) is required to run the Ansible Playbooks**
|
||||
- **Ansible v2.9+, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands**
|
||||
- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/downloads.md#offline-environment))
|
||||
- The target servers are configured to allow **IPv4 forwarding**.
|
||||
- **Your ssh key must be copied** to all the servers part of your inventory.
|
||||
@ -163,7 +165,10 @@ You can choose between 10 network plugins. (default: `calico`, except Vagrant us
|
||||
|
||||
- [flannel](docs/flannel.md): gre/vxlan (layer 2) networking.
|
||||
|
||||
- [calico](docs/calico.md): bgp (layer 3) networking.
|
||||
- [Calico](https://docs.projectcalico.org/latest/introduction/) is a networking and network policy provider. Calico supports a flexible set of networking options
|
||||
designed to give you the most efficient networking across a range of situations, including non-overlay
|
||||
and overlay networks, with or without BGP. Calico uses the same engine to enforce network policy for hosts,
|
||||
pods, and (if using Istio and Envoy) applications at the service mesh layer.
|
||||
|
||||
- [canal](https://github.com/projectcalico/canal): a composition of calico and flannel plugins.
|
||||
|
||||
@ -195,7 +200,7 @@ See also [Network checker](docs/netcheck.md).
|
||||
- [kubernetes.io/docs/setup/production-environment/tools/kubespray/](https://kubernetes.io/docs/setup/production-environment/tools/kubespray/)
|
||||
- [kubespray, monitoring and logging](https://github.com/gregbkr/kubernetes-kargo-logging-monitoring) by @gregbkr
|
||||
- [Deploy Kubernetes w/ Ansible & Terraform](https://rsmitty.github.io/Terraform-Ansible-Kubernetes/) by @rsmitty
|
||||
- [Deploy a Kubernetes Cluster with Kubespray (video)](https://www.youtube.com/watch?v=N9q51JgbWu8)
|
||||
- [Deploy a Kubernetes Cluster with Kubespray (video)](https://www.youtube.com/watch?v=CJ5G4GpqDy0)
|
||||
|
||||
## Tools and projects on top of Kubespray
|
||||
|
||||
|
45
RELEASE.md
45
RELEASE.md
@ -4,40 +4,45 @@ The Kubespray Project is released on an as-needed basis. The process is as follo
|
||||
|
||||
1. An issue is proposing a new release with a changelog since the last release
|
||||
2. At least one of the [approvers](OWNERS_ALIASES) must approve this release
|
||||
3. An approver creates [new release in GitHub](https://github.com/kubernetes-sigs/kubespray/releases/new) using a version and tag name like `vX.Y.Z` and attaching the release notes
|
||||
4. An approver creates a release branch in the form `release-vX.Y`
|
||||
5. The corresponding version of [quay.io/kubespray/kubespray:vX.Y.Z](https://quay.io/repository/kubespray/kubespray) docker image is built and tagged
|
||||
6. The `KUBESPRAY_VERSION` variable is updated in `.gitlab-ci.yml`
|
||||
7. The release issue is closed
|
||||
8. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] Kubespray $VERSION is released`
|
||||
3. The `kube_version_min_required` variable is set to `n-1`
|
||||
4. Remove hashes for [EOL versions](https://github.com/kubernetes/sig-release/blob/master/releases/patch-releases.md) of kubernetes from `*_checksums` variables.
|
||||
5. An approver creates [new release in GitHub](https://github.com/kubernetes-sigs/kubespray/releases/new) using a version and tag name like `vX.Y.Z` and attaching the release notes
|
||||
6. An approver creates a release branch in the form `release-X.Y`
|
||||
7. The corresponding version of [quay.io/kubespray/kubespray:vX.Y.Z](https://quay.io/repository/kubespray/kubespray) and [quay.io/kubespray/vagrant:vX.Y.Z](https://quay.io/repository/kubespray/vagrant) docker images are built and tagged
|
||||
8. The `KUBESPRAY_VERSION` variable is updated in `.gitlab-ci.yml`
|
||||
9. The release issue is closed
|
||||
10. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] Kubespray $VERSION is released`
|
||||
11. The topic of the #kubespray channel is updated with `vX.Y.Z is released! | ...`
|
||||
|
||||
## Major/minor releases, merge freezes and milestones
|
||||
## Major/minor releases and milestones
|
||||
|
||||
* Kubespray maintains one branch for major releases (vX.Y). Minor releases are available only as tags.
|
||||
* For major releases (vX.Y) Kubespray maintains one branch (`release-X.Y`). Minor releases (vX.Y.Z) are available only as tags.
|
||||
|
||||
* Security patches and bugs might be backported.
|
||||
|
||||
* Fixes for major releases (vX.x.0) and minor releases (vX.Y.x) are delivered
|
||||
* Fixes for major releases (vX.Y) and minor releases (vX.Y.Z) are delivered
|
||||
via maintenance releases (vX.Y.Z) and assigned to the corresponding open
|
||||
milestone (vX.Y). That milestone remains open for the major/minor releases
|
||||
support lifetime, which ends once the milestone closed. Then only a next major
|
||||
or minor release can be done.
|
||||
[GitHub milestone](https://github.com/kubernetes-sigs/kubespray/milestones).
|
||||
That milestone remains open for the major/minor releases support lifetime,
|
||||
which ends once the milestone is closed. Then only a next major or minor release
|
||||
can be done.
|
||||
|
||||
* Kubespray major and minor releases are bound to the given ``kube_version`` major/minor
|
||||
* Kubespray major and minor releases are bound to the given `kube_version` major/minor
|
||||
version numbers and other components' arbitrary versions, like etcd or network plugins.
|
||||
Older or newer versions are not supported and not tested for the given release.
|
||||
Older or newer component versions are not supported and not tested for the given
|
||||
release (even if included in the checksum variables, like `kubeadm_checksums`).
|
||||
|
||||
* There is no unstable releases and no APIs, thus Kubespray doesn't follow
|
||||
[semver](http://semver.org/). Every version describes only a stable release.
|
||||
[semver](https://semver.org/). Every version describes only a stable release.
|
||||
Breaking changes, if any introduced by changed defaults or non-contrib ansible roles'
|
||||
playbooks, shall be described in the release notes. Other breaking changes, if any in
|
||||
the contributed addons or bound versions of Kubernetes and other components, are
|
||||
considered out of Kubespray scope and are up to the components' teams to deal with and
|
||||
document.
|
||||
|
||||
* Minor releases can change components' versions, but not the major ``kube_version``.
|
||||
Greater ``kube_version`` requires a new major or minor release. For example, if Kubespray v2.0.0
|
||||
is bound to ``kube_version: 1.4.x``, ``calico_version: 0.22.0``, ``etcd_version: v3.0.6``,
|
||||
then Kubespray v2.1.0 may be bound to only minor changes to ``kube_version``, like v1.5.1
|
||||
* Minor releases can change components' versions, but not the major `kube_version`.
|
||||
Greater `kube_version` requires a new major or minor release. For example, if Kubespray v2.0.0
|
||||
is bound to `kube_version: 1.4.x`, `calico_version: 0.22.0`, `etcd_version: v3.0.6`,
|
||||
then Kubespray v2.1.0 may be bound to only minor changes to `kube_version`, like v1.5.1
|
||||
and *any* changes to other components, like etcd v4, or calico 1.2.3.
|
||||
And Kubespray v3.x.x shall be bound to ``kube_version: 2.x.x`` respectively.
|
||||
And Kubespray v3.x.x shall be bound to `kube_version: 2.x.x` respectively.
|
||||
|
@ -1,13 +1,13 @@
|
||||
# Defined below are the security contacts for this repo.
|
||||
#
|
||||
# They are the contact point for the Product Security Team to reach out
|
||||
# They are the contact point for the Product Security Committee to reach out
|
||||
# to for triaging and handling of incoming issues.
|
||||
#
|
||||
# The below names agree to abide by the
|
||||
# [Embargo Policy](https://github.com/kubernetes/sig-release/blob/master/security-release-process-documentation/security-release-process.md#embargo-policy)
|
||||
# [Embargo Policy](https://git.k8s.io/security/private-distributors-list.md#embargo-policy)
|
||||
# and will be removed and replaced if they violate that agreement.
|
||||
#
|
||||
# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
|
||||
# INSTRUCTIONS AT https://kubernetes.io/security/
|
||||
atoms
|
||||
mattymo
|
||||
mattymo
|
||||
|
97
Vagrantfile
vendored
97
Vagrantfile
vendored
@ -7,63 +7,72 @@ require 'fileutils'
|
||||
|
||||
Vagrant.require_version ">= 2.0.0"
|
||||
|
||||
CONFIG = File.join(File.dirname(__FILE__), "vagrant/config.rb")
|
||||
CONFIG = File.join(File.dirname(__FILE__), ENV['KUBESPRAY_VAGRANT_CONFIG'] || 'vagrant/config.rb')
|
||||
|
||||
COREOS_URL_TEMPLATE = "https://storage.googleapis.com/%s.release.core-os.net/amd64-usr/current/coreos_production_vagrant.json"
|
||||
FLATCAR_URL_TEMPLATE = "https://%s.release.flatcar-linux.net/amd64-usr/current/flatcar_production_vagrant.json"
|
||||
|
||||
# Uniq disk UUID for libvirt
|
||||
DISK_UUID = Time.now.utc.to_i
|
||||
|
||||
SUPPORTED_OS = {
|
||||
"coreos-stable" => {box: "coreos-stable", user: "core", box_url: COREOS_URL_TEMPLATE % ["stable"]},
|
||||
"coreos-alpha" => {box: "coreos-alpha", user: "core", box_url: COREOS_URL_TEMPLATE % ["alpha"]},
|
||||
"coreos-beta" => {box: "coreos-beta", user: "core", box_url: COREOS_URL_TEMPLATE % ["beta"]},
|
||||
"ubuntu1604" => {box: "generic/ubuntu1604", user: "vagrant"},
|
||||
"ubuntu1804" => {box: "generic/ubuntu1804", user: "vagrant"},
|
||||
"centos" => {box: "centos/7", user: "vagrant"},
|
||||
"centos-bento" => {box: "bento/centos-7.6", user: "vagrant"},
|
||||
"fedora" => {box: "fedora/28-cloud-base", user: "vagrant"},
|
||||
"opensuse" => {box: "opensuse/openSUSE-15.0-x86_64", user: "vagrant"},
|
||||
"opensuse-tumbleweed" => {box: "opensuse/openSUSE-Tumbleweed-x86_64", user: "vagrant"},
|
||||
"oraclelinux" => {box: "generic/oracle7", user: "vagrant"},
|
||||
"coreos-stable" => {box: "coreos-stable", user: "core", box_url: COREOS_URL_TEMPLATE % ["stable"]},
|
||||
"coreos-alpha" => {box: "coreos-alpha", user: "core", box_url: COREOS_URL_TEMPLATE % ["alpha"]},
|
||||
"coreos-beta" => {box: "coreos-beta", user: "core", box_url: COREOS_URL_TEMPLATE % ["beta"]},
|
||||
"flatcar-stable" => {box: "flatcar-stable", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["stable"]},
|
||||
"flatcar-beta" => {box: "flatcar-beta", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["beta"]},
|
||||
"flatcar-alpha" => {box: "flatcar-alpha", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["alpha"]},
|
||||
"flatcar-edge" => {box: "flatcar-edge", user: "core", box_url: FLATCAR_URL_TEMPLATE % ["edge"]},
|
||||
"ubuntu1604" => {box: "generic/ubuntu1604", user: "vagrant"},
|
||||
"ubuntu1804" => {box: "generic/ubuntu1804", user: "vagrant"},
|
||||
"ubuntu2004" => {box: "geerlingguy/ubuntu2004", user: "vagrant"},
|
||||
"centos" => {box: "centos/7", user: "vagrant"},
|
||||
"centos-bento" => {box: "bento/centos-7.6", user: "vagrant"},
|
||||
"centos8" => {box: "centos/8", user: "vagrant"},
|
||||
"centos8-bento" => {box: "bento/centos-8", user: "vagrant"},
|
||||
"fedora30" => {box: "fedora/30-cloud-base", user: "vagrant"},
|
||||
"fedora31" => {box: "fedora/31-cloud-base", user: "vagrant"},
|
||||
"opensuse" => {box: "bento/opensuse-leap-15.1", user: "vagrant"},
|
||||
"opensuse-tumbleweed" => {box: "opensuse/Tumbleweed.x86_64", user: "vagrant"},
|
||||
"oraclelinux" => {box: "generic/oracle7", user: "vagrant"},
|
||||
}
|
||||
|
||||
# Defaults for config options defined in CONFIG
|
||||
$num_instances = 3
|
||||
$instance_name_prefix = "k8s"
|
||||
$vm_gui = false
|
||||
$vm_memory = 2048
|
||||
$vm_cpus = 1
|
||||
$shared_folders = {}
|
||||
$forwarded_ports = {}
|
||||
$subnet = "172.17.8"
|
||||
$os = "ubuntu1804"
|
||||
$network_plugin = "flannel"
|
||||
# Setting multi_networking to true will install Multus: https://github.com/intel/multus-cni
|
||||
$multi_networking = false
|
||||
# The first three nodes are etcd servers
|
||||
$etcd_instances = $num_instances
|
||||
# The first two nodes are kube masters
|
||||
$kube_master_instances = $num_instances == 1 ? $num_instances : ($num_instances - 1)
|
||||
# All nodes are kube nodes
|
||||
$kube_node_instances = $num_instances
|
||||
# The following only works when using the libvirt provider
|
||||
$kube_node_instances_with_disks = false
|
||||
$kube_node_instances_with_disks_size = "20G"
|
||||
$kube_node_instances_with_disks_number = 2
|
||||
$override_disk_size = false
|
||||
$disk_size = "20GB"
|
||||
$local_path_provisioner_enabled = false
|
||||
$local_path_provisioner_claim_root = "/opt/local-path-provisioner/"
|
||||
|
||||
$playbook = "cluster.yml"
|
||||
|
||||
host_vars = {}
|
||||
|
||||
if File.exist?(CONFIG)
|
||||
require CONFIG
|
||||
end
|
||||
|
||||
# Defaults for config options defined in CONFIG
|
||||
$num_instances ||= 3
|
||||
$instance_name_prefix ||= "k8s"
|
||||
$vm_gui ||= false
|
||||
$vm_memory ||= 2048
|
||||
$vm_cpus ||= 1
|
||||
$shared_folders ||= {}
|
||||
$forwarded_ports ||= {}
|
||||
$subnet ||= "172.18.8"
|
||||
$os ||= "ubuntu1804"
|
||||
$network_plugin ||= "flannel"
|
||||
# Setting multi_networking to true will install Multus: https://github.com/intel/multus-cni
|
||||
$multi_networking ||= false
|
||||
# The first three nodes are etcd servers
|
||||
$etcd_instances ||= $num_instances
|
||||
# The first two nodes are kube masters
|
||||
$kube_master_instances ||= $num_instances == 1 ? $num_instances : ($num_instances - 1)
|
||||
# All nodes are kube nodes
|
||||
$kube_node_instances ||= $num_instances
|
||||
# The following only works when using the libvirt provider
|
||||
$kube_node_instances_with_disks ||= false
|
||||
$kube_node_instances_with_disks_size ||= "20G"
|
||||
$kube_node_instances_with_disks_number ||= 2
|
||||
$override_disk_size ||= false
|
||||
$disk_size ||= "20GB"
|
||||
$local_path_provisioner_enabled ||= false
|
||||
$local_path_provisioner_claim_root ||= "/opt/local-path-provisioner/"
|
||||
|
||||
$playbook = "cluster.yml"
|
||||
|
||||
host_vars = {}
|
||||
|
||||
$box = SUPPORTED_OS[$os][:box]
|
||||
# if $inventory is not set, try to use example
|
||||
$inventory = "inventory/sample" if ! $inventory
|
||||
|
@ -11,7 +11,9 @@ host_key_checking=False
|
||||
gathering = smart
|
||||
fact_caching = jsonfile
|
||||
fact_caching_connection = /tmp
|
||||
stdout_callback = skippy
|
||||
fact_caching_timeout = 7200
|
||||
stdout_callback = default
|
||||
display_skipped_hosts = no
|
||||
library = ./library
|
||||
callback_whitelist = profile_tasks
|
||||
roles_path = roles:$VIRTUAL_ENV/usr/local/share/kubespray/roles:$VIRTUAL_ENV/usr/local/share/ansible/roles:/usr/share/kubespray/roles
|
||||
|
15
ansible_version.yml
Normal file
15
ansible_version.yml
Normal file
@ -0,0 +1,15 @@
|
||||
---
|
||||
- hosts: localhost
|
||||
gather_facts: false
|
||||
become: no
|
||||
vars:
|
||||
minimal_ansible_version: 2.8.0
|
||||
ansible_connection: local
|
||||
tasks:
|
||||
- name: "Check ansible version >={{ minimal_ansible_version }}"
|
||||
assert:
|
||||
msg: "Ansible must be {{ minimal_ansible_version }} or higher"
|
||||
that:
|
||||
- ansible_version.string is version(minimal_ansible_version, ">=")
|
||||
tags:
|
||||
- check
|
75
cluster.yml
75
cluster.yml
@ -1,44 +1,53 @@
|
||||
---
|
||||
- hosts: localhost
|
||||
- name: Check ansible version
|
||||
import_playbook: ansible_version.yml
|
||||
|
||||
- hosts: all
|
||||
gather_facts: false
|
||||
become: no
|
||||
tasks:
|
||||
- name: "Check ansible version >=2.7.8"
|
||||
assert:
|
||||
msg: "Ansible must be v2.7.8 or higher"
|
||||
that:
|
||||
- ansible_version.string is version("2.7.8", ">=")
|
||||
tags:
|
||||
- check
|
||||
vars:
|
||||
ansible_connection: local
|
||||
- name: "Set up proxy environment"
|
||||
set_fact:
|
||||
proxy_env:
|
||||
http_proxy: "{{ http_proxy | default ('') }}"
|
||||
HTTP_PROXY: "{{ http_proxy | default ('') }}"
|
||||
https_proxy: "{{ https_proxy | default ('') }}"
|
||||
HTTPS_PROXY: "{{ https_proxy | default ('') }}"
|
||||
no_proxy: "{{ no_proxy | default ('') }}"
|
||||
NO_PROXY: "{{ no_proxy | default ('') }}"
|
||||
no_log: true
|
||||
|
||||
- hosts: bastion[0]
|
||||
gather_facts: False
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
|
||||
- { role: kubespray-defaults }
|
||||
- { role: bastion-ssh-config, tags: ["localhost", "bastion"] }
|
||||
|
||||
- hosts: k8s-cluster:etcd
|
||||
strategy: linear
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
gather_facts: false
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: kubespray-defaults }
|
||||
- { role: bootstrap-os, tags: bootstrap-os}
|
||||
|
||||
- name: Gather facts
|
||||
import_playbook: facts.yml
|
||||
|
||||
- hosts: k8s-cluster:etcd
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes/preinstall, tags: preinstall }
|
||||
- { role: "container-engine", tags: "container-engine", when: deploy_container_engine|default(true) }
|
||||
- { role: download, tags: download, when: "not skip_downloads" }
|
||||
environment: "{{ proxy_env }}"
|
||||
|
||||
- hosts: etcd
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: kubespray-defaults }
|
||||
- role: etcd
|
||||
tags: etcd
|
||||
vars:
|
||||
@ -47,9 +56,10 @@
|
||||
when: not etcd_kubeadm_enabled| default(false)
|
||||
|
||||
- hosts: k8s-cluster
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: kubespray-defaults }
|
||||
- role: etcd
|
||||
tags: etcd
|
||||
vars:
|
||||
@ -58,59 +68,68 @@
|
||||
when: not etcd_kubeadm_enabled| default(false)
|
||||
|
||||
- hosts: k8s-cluster
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes/node, tags: node }
|
||||
environment: "{{ proxy_env }}"
|
||||
|
||||
- hosts: kube-master
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes/master, tags: master }
|
||||
- { role: kubernetes/client, tags: client }
|
||||
- { role: kubernetes-apps/cluster_roles, tags: cluster-roles }
|
||||
|
||||
- hosts: k8s-cluster
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes/kubeadm, tags: kubeadm}
|
||||
- { role: network_plugin, tags: network }
|
||||
- { role: kubernetes/node-label }
|
||||
- { role: kubernetes/node-label, tags: node-label }
|
||||
|
||||
- hosts: calico-rr
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: network_plugin/calico/rr, tags: ['network', 'calico_rr']}
|
||||
- { role: kubespray-defaults }
|
||||
- { role: network_plugin/calico/rr, tags: ['network', 'calico_rr'] }
|
||||
|
||||
- hosts: kube-master[0]
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes-apps/rotate_tokens, tags: rotate_tokens, when: "secret_changed|default(false)" }
|
||||
- { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"]}
|
||||
- { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"] }
|
||||
|
||||
- hosts: kube-master
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes-apps/external_cloud_controller, tags: external-cloud-controller }
|
||||
- { role: kubernetes-apps/network_plugin, tags: network }
|
||||
- { role: kubernetes-apps/policy_controller, tags: policy-controller }
|
||||
- { role: kubernetes-apps/ingress_controller, tags: ingress-controller }
|
||||
- { role: kubernetes-apps/external_provisioner, tags: external-provisioner }
|
||||
|
||||
- hosts: kube-master
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes-apps, tags: apps }
|
||||
environment: "{{ proxy_env }}"
|
||||
|
||||
- hosts: k8s-cluster
|
||||
gather_facts: False
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: kubespray-defaults }
|
||||
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf, dns_late: true }
|
||||
|
@ -15,8 +15,9 @@ Resource Group. It will not install Kubernetes itself, this has to be done in a
|
||||
|
||||
## Configuration through group_vars/all
|
||||
|
||||
You have to modify at least one variable in group_vars/all, which is the **cluster_name** variable. It must be globally
|
||||
unique due to some restrictions in Azure. Most other variables should be self explanatory if you have some basic Kubernetes
|
||||
You have to modify at least two variables in group_vars/all. The one is the **cluster_name** variable, it must be globally
|
||||
unique due to some restrictions in Azure. The other one is the **ssh_public_keys** variable, it must be your ssh public
|
||||
key to access your azure virtual machines. Most other variables should be self explanatory if you have some basic Kubernetes
|
||||
experience.
|
||||
|
||||
## Bastion host
|
||||
@ -59,6 +60,6 @@ It will create the file ./inventory which can then be used with kubespray, e.g.:
|
||||
|
||||
```shell
|
||||
$ cd kubespray-root-dir
|
||||
$ ansible-playbook -i contrib/azurerm/inventory -u devops --become -e "@inventory/sample/group_vars/all.yml" cluster.yml
|
||||
$ ansible-playbook -i contrib/azurerm/inventory -u devops --become -e "@inventory/sample/group_vars/all/all.yml" cluster.yml
|
||||
```
|
||||
|
||||
|
@ -11,9 +11,9 @@ fi
|
||||
|
||||
ansible-playbook generate-templates.yml
|
||||
|
||||
az group deployment create --template-file ./.generated/network.json -g $AZURE_RESOURCE_GROUP
|
||||
az group deployment create --template-file ./.generated/storage.json -g $AZURE_RESOURCE_GROUP
|
||||
az group deployment create --template-file ./.generated/availability-sets.json -g $AZURE_RESOURCE_GROUP
|
||||
az group deployment create --template-file ./.generated/bastion.json -g $AZURE_RESOURCE_GROUP
|
||||
az group deployment create --template-file ./.generated/masters.json -g $AZURE_RESOURCE_GROUP
|
||||
az group deployment create --template-file ./.generated/minions.json -g $AZURE_RESOURCE_GROUP
|
||||
az deployment group create --template-file ./.generated/network.json -g $AZURE_RESOURCE_GROUP
|
||||
az deployment group create --template-file ./.generated/storage.json -g $AZURE_RESOURCE_GROUP
|
||||
az deployment group create --template-file ./.generated/availability-sets.json -g $AZURE_RESOURCE_GROUP
|
||||
az deployment group create --template-file ./.generated/bastion.json -g $AZURE_RESOURCE_GROUP
|
||||
az deployment group create --template-file ./.generated/masters.json -g $AZURE_RESOURCE_GROUP
|
||||
az deployment group create --template-file ./.generated/minions.json -g $AZURE_RESOURCE_GROUP
|
||||
|
@ -81,7 +81,7 @@ class KubesprayInventory(object):
|
||||
if self.config_file:
|
||||
try:
|
||||
self.hosts_file = open(config_file, 'r')
|
||||
self.yaml_config = yaml.load(self.hosts_file)
|
||||
self.yaml_config = yaml.load_all(self.hosts_file)
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
|
@ -1,6 +1,12 @@
|
||||
---
|
||||
- hosts: bastion[0]
|
||||
gather_facts: False
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
|
||||
- hosts: kube-master[0]
|
||||
tags:
|
||||
- "provision"
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: provision }
|
||||
|
@ -1,10 +1,12 @@
|
||||
---
|
||||
metallb:
|
||||
ip_range: "10.5.0.50-10.5.0.99"
|
||||
ip_range:
|
||||
- "10.5.0.50-10.5.0.99"
|
||||
protocol: "layer2"
|
||||
# additional_address_pools:
|
||||
# kube_service_pool:
|
||||
# ip_range: "10.5.1.50-10.5.1.99"
|
||||
# ip_range:
|
||||
# - 10.5.1.50-10.5.1.99"
|
||||
# protocol: "layer2"
|
||||
# auto_assign: false
|
||||
limits:
|
||||
|
@ -4,6 +4,22 @@
|
||||
msg: "MetalLB require kube_proxy_strict_arp = true, see https://github.com/danderson/metallb/issues/153#issuecomment-518651132"
|
||||
when:
|
||||
- "kube_proxy_mode == 'ipvs' and not kube_proxy_strict_arp"
|
||||
|
||||
- name: Kubernetes Apps | Check AppArmor status
|
||||
command: which apparmor_parser
|
||||
register: apparmor_status
|
||||
when:
|
||||
- podsecuritypolicy_enabled
|
||||
- inventory_hostname == groups['kube-master'][0]
|
||||
failed_when: false
|
||||
|
||||
- name: Kubernetes Apps | Set apparmor_enabled
|
||||
set_fact:
|
||||
apparmor_enabled: "{{ apparmor_status.rc == 0 }}"
|
||||
when:
|
||||
- podsecuritypolicy_enabled
|
||||
- inventory_hostname == groups['kube-master'][0]
|
||||
|
||||
- name: "Kubernetes Apps | Lay Down MetalLB"
|
||||
become: true
|
||||
template: { src: "{{ item }}.j2", dest: "{{ kube_config_dir }}/{{ item }}" }
|
||||
@ -11,6 +27,7 @@
|
||||
register: "rendering"
|
||||
when:
|
||||
- "inventory_hostname == groups['kube-master'][0]"
|
||||
|
||||
- name: "Kubernetes Apps | Install and configure MetalLB"
|
||||
kube:
|
||||
name: "MetalLB"
|
||||
|
@ -10,12 +10,16 @@ data:
|
||||
- name: loadbalanced
|
||||
protocol: {{ metallb.protocol }}
|
||||
addresses:
|
||||
- {{ metallb.ip_range }}
|
||||
{% for ip_range in metallb.ip_range %}
|
||||
- {{ ip_range }}
|
||||
{% endfor %}
|
||||
{% if metallb.additional_address_pools is defined %}{% for pool in metallb.additional_address_pools %}
|
||||
- name: {{ pool }}
|
||||
protocol: {{ metallb.additional_address_pools[pool].protocol }}
|
||||
addresses:
|
||||
- {{ metallb.additional_address_pools[pool].ip_range }}
|
||||
{% for ip_range in metallb.additional_address_pools[pool].ip_range %}
|
||||
- {{ ip_range }}
|
||||
{% endfor %}
|
||||
auto-assign: {{ metallb.additional_address_pools[pool].auto_assign }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
@ -50,6 +50,48 @@ rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["services", "endpoints", "nodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
{% if podsecuritypolicy_enabled %}
|
||||
- apiGroups: ["policy"]
|
||||
resourceNames: ["metallb"]
|
||||
resources: ["podsecuritypolicies"]
|
||||
verbs: ["use"]
|
||||
---
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: metallb
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
|
||||
seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default'
|
||||
{% if apparmor_enabled %}
|
||||
apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
|
||||
apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'
|
||||
{% endif %}
|
||||
labels:
|
||||
app: metallb
|
||||
spec:
|
||||
privileged: true
|
||||
allowPrivilegeEscalation: false
|
||||
allowedCapabilities:
|
||||
- net_raw
|
||||
volumes:
|
||||
- secret
|
||||
hostNetwork: true
|
||||
hostPorts:
|
||||
- min: {{ metallb.port }}
|
||||
max: {{ metallb.port }}
|
||||
hostIPC: false
|
||||
hostPID: false
|
||||
runAsUser:
|
||||
rule: 'RunAsAny'
|
||||
seLinux:
|
||||
rule: 'RunAsAny'
|
||||
supplementalGroups:
|
||||
rule: 'RunAsAny'
|
||||
fsGroup:
|
||||
rule: 'RunAsAny'
|
||||
readOnlyRootFilesystem: true
|
||||
{% endif %}
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
|
@ -12,6 +12,11 @@
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"selector": {
|
||||
"matchLabels": {
|
||||
"glusterfs-node": "daemonset"
|
||||
}
|
||||
},
|
||||
"template": {
|
||||
"metadata": {
|
||||
"name": "glusterfs",
|
||||
|
@ -42,6 +42,11 @@
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"selector": {
|
||||
"matchLabels": {
|
||||
"name": "deploy-heketi"
|
||||
}
|
||||
},
|
||||
"replicas": 1,
|
||||
"template": {
|
||||
"metadata": {
|
||||
|
@ -55,6 +55,11 @@
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"selector": {
|
||||
"matchLabels": {
|
||||
"name": "heketi"
|
||||
}
|
||||
},
|
||||
"replicas": 1,
|
||||
"template": {
|
||||
"metadata": {
|
||||
|
@ -150,12 +150,12 @@ data "template_file" "inventory" {
|
||||
|
||||
vars = {
|
||||
public_ip_address_bastion = "${join("\n", formatlist("bastion ansible_host=%s", aws_instance.bastion-server.*.public_ip))}"
|
||||
connection_strings_master = "${join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-master.*.tags.Name, aws_instance.k8s-master.*.private_ip))}"
|
||||
connection_strings_node = "${join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-worker.*.tags.Name, aws_instance.k8s-worker.*.private_ip))}"
|
||||
connection_strings_etcd = "${join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-etcd.*.tags.Name, aws_instance.k8s-etcd.*.private_ip))}"
|
||||
list_master = "${join("\n", aws_instance.k8s-master.*.tags.Name)}"
|
||||
list_node = "${join("\n", aws_instance.k8s-worker.*.tags.Name)}"
|
||||
list_etcd = "${join("\n", aws_instance.k8s-etcd.*.tags.Name)}"
|
||||
connection_strings_master = "${join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-master.*.private_dns, aws_instance.k8s-master.*.private_ip))}"
|
||||
connection_strings_node = "${join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-worker.*.private_dns, aws_instance.k8s-worker.*.private_ip))}"
|
||||
connection_strings_etcd = "${join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-etcd.*.private_dns, aws_instance.k8s-etcd.*.private_ip))}"
|
||||
list_master = "${join("\n", aws_instance.k8s-master.*.private_dns)}"
|
||||
list_node = "${join("\n", aws_instance.k8s-worker.*.private_dns)}"
|
||||
list_etcd = "${join("\n", aws_instance.k8s-etcd.*.private_dns)}"
|
||||
elb_api_fqdn = "apiserver_loadbalancer_domain_name=\"${module.aws-elb.aws_elb_api_fqdn}\""
|
||||
}
|
||||
}
|
||||
|
@ -3,8 +3,8 @@ resource "aws_security_group" "aws-elb" {
|
||||
vpc_id = "${var.aws_vpc_id}"
|
||||
|
||||
tags = "${merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-securitygroup-elb"
|
||||
))}"
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-securitygroup-elb"
|
||||
))}"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "aws-allow-api-access" {
|
||||
|
@ -1,7 +1,7 @@
|
||||
output "kube-master-profile" {
|
||||
value = "${aws_iam_instance_profile.kube-master.name }"
|
||||
value = "${aws_iam_instance_profile.kube-master.name}"
|
||||
}
|
||||
|
||||
output "kube-worker-profile" {
|
||||
value = "${aws_iam_instance_profile.kube-worker.name }"
|
||||
value = "${aws_iam_instance_profile.kube-worker.name}"
|
||||
}
|
||||
|
@ -6,8 +6,8 @@ resource "aws_vpc" "cluster-vpc" {
|
||||
enable_dns_hostnames = true
|
||||
|
||||
tags = "${merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-vpc"
|
||||
))}"
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-vpc"
|
||||
))}"
|
||||
}
|
||||
|
||||
resource "aws_eip" "cluster-nat-eip" {
|
||||
@ -30,9 +30,9 @@ resource "aws_subnet" "cluster-vpc-subnets-public" {
|
||||
cidr_block = "${element(var.aws_cidr_subnets_public, count.index)}"
|
||||
|
||||
tags = "${merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-public",
|
||||
"kubernetes.io/cluster/${var.aws_cluster_name}", "member"
|
||||
))}"
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-public",
|
||||
"kubernetes.io/cluster/${var.aws_cluster_name}", "member"
|
||||
))}"
|
||||
}
|
||||
|
||||
resource "aws_nat_gateway" "cluster-nat-gateway" {
|
||||
@ -48,8 +48,8 @@ resource "aws_subnet" "cluster-vpc-subnets-private" {
|
||||
cidr_block = "${element(var.aws_cidr_subnets_private, count.index)}"
|
||||
|
||||
tags = "${merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-private"
|
||||
))}"
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-private"
|
||||
))}"
|
||||
}
|
||||
|
||||
#Routing in VPC
|
||||
@ -65,8 +65,8 @@ resource "aws_route_table" "kubernetes-public" {
|
||||
}
|
||||
|
||||
tags = "${merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-routetable-public"
|
||||
))}"
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-routetable-public"
|
||||
))}"
|
||||
}
|
||||
|
||||
resource "aws_route_table" "kubernetes-private" {
|
||||
@ -79,20 +79,20 @@ resource "aws_route_table" "kubernetes-private" {
|
||||
}
|
||||
|
||||
tags = "${merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-routetable-private-${count.index}"
|
||||
))}"
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-routetable-private-${count.index}"
|
||||
))}"
|
||||
}
|
||||
|
||||
resource "aws_route_table_association" "kubernetes-public" {
|
||||
count = "${length(var.aws_cidr_subnets_public)}"
|
||||
subnet_id = "${element(aws_subnet.cluster-vpc-subnets-public.*.id,count.index)}"
|
||||
subnet_id = "${element(aws_subnet.cluster-vpc-subnets-public.*.id, count.index)}"
|
||||
route_table_id = "${aws_route_table.kubernetes-public.id}"
|
||||
}
|
||||
|
||||
resource "aws_route_table_association" "kubernetes-private" {
|
||||
count = "${length(var.aws_cidr_subnets_private)}"
|
||||
subnet_id = "${element(aws_subnet.cluster-vpc-subnets-private.*.id,count.index)}"
|
||||
route_table_id = "${element(aws_route_table.kubernetes-private.*.id,count.index)}"
|
||||
subnet_id = "${element(aws_subnet.cluster-vpc-subnets-private.*.id, count.index)}"
|
||||
route_table_id = "${element(aws_route_table.kubernetes-private.*.id, count.index)}"
|
||||
}
|
||||
|
||||
#Kubernetes Security Groups
|
||||
@ -102,8 +102,8 @@ resource "aws_security_group" "kubernetes" {
|
||||
vpc_id = "${aws_vpc.cluster-vpc.id}"
|
||||
|
||||
tags = "${merge(var.default_tags, map(
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-securitygroup"
|
||||
))}"
|
||||
"Name", "kubernetes-${var.aws_cluster_name}-securitygroup"
|
||||
))}"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "allow-all-ingress" {
|
||||
|
@ -38,6 +38,16 @@ hosts where that makes sense. You have the option of creating bastion hosts
|
||||
inside the private subnet to access the nodes there. Alternatively, a node with
|
||||
a floating IP can be used as a jump host to nodes without.
|
||||
|
||||
#### Using an existing router
|
||||
It is possible to use an existing router instead of creating one. To use an
|
||||
existing router set the router\_id variable to the uuid of the router you wish
|
||||
to use.
|
||||
|
||||
For example:
|
||||
```
|
||||
router_id = "00c542e7-6f46-4535-ae95-984c7f0391a3"
|
||||
```
|
||||
|
||||
### Kubernetes Nodes
|
||||
You can create many different kubernetes topologies by setting the number of
|
||||
different classes of hosts. For each class there are options for allocating
|
||||
@ -254,6 +264,107 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
|
||||
|`etcd_root_volume_size_in_gb` | Size of the root volume for etcd nodes, 0 to use ephemeral storage |
|
||||
|`bastion_root_volume_size_in_gb` | Size of the root volume for bastions, 0 to use ephemeral storage |
|
||||
|`use_server_group` | Create and use openstack nova servergroups, default: false |
|
||||
|`k8s_nodes` | Map containing worker node definition, see explanation below |
|
||||
|
||||
##### k8s_nodes
|
||||
Allows a custom defintion of worker nodes giving the operator full control over individual node flavor and
|
||||
availability zone placement. To enable the use of this mode set the `number_of_k8s_nodes` and
|
||||
`number_of_k8s_nodes_no_floating_ip` variables to 0. Then define your desired worker node configuration
|
||||
using the `k8s_nodes` variable.
|
||||
|
||||
For example:
|
||||
```
|
||||
k8s_nodes = {
|
||||
"1" = {
|
||||
"az" = "sto1"
|
||||
"flavor" = "83d8b44a-26a0-4f02-a981-079446926445"
|
||||
"floating_ip" = true
|
||||
},
|
||||
"2" = {
|
||||
"az" = "sto2"
|
||||
"flavor" = "83d8b44a-26a0-4f02-a981-079446926445"
|
||||
"floating_ip" = true
|
||||
},
|
||||
"3" = {
|
||||
"az" = "sto3"
|
||||
"flavor" = "83d8b44a-26a0-4f02-a981-079446926445"
|
||||
"floating_ip" = true
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Would result in the same configuration as:
|
||||
```
|
||||
number_of_k8s_nodes = 3
|
||||
flavor_k8s_node = "83d8b44a-26a0-4f02-a981-079446926445"
|
||||
az_list = ["sto1", "sto2", "sto3"]
|
||||
```
|
||||
|
||||
And:
|
||||
```
|
||||
k8s_nodes = {
|
||||
"ing-1" = {
|
||||
"az" = "sto1"
|
||||
"flavor" = "83d8b44a-26a0-4f02-a981-079446926445"
|
||||
"floating_ip" = true
|
||||
},
|
||||
"ing-2" = {
|
||||
"az" = "sto2"
|
||||
"flavor" = "83d8b44a-26a0-4f02-a981-079446926445"
|
||||
"floating_ip" = true
|
||||
},
|
||||
"ing-3" = {
|
||||
"az" = "sto3"
|
||||
"flavor" = "83d8b44a-26a0-4f02-a981-079446926445"
|
||||
"floating_ip" = true
|
||||
},
|
||||
"big-1" = {
|
||||
"az" = "sto1"
|
||||
"flavor" = "3f73fc93-ec61-4808-88df-2580d94c1a9b"
|
||||
"floating_ip" = false
|
||||
},
|
||||
"big-2" = {
|
||||
"az" = "sto2"
|
||||
"flavor" = "3f73fc93-ec61-4808-88df-2580d94c1a9b"
|
||||
"floating_ip" = false
|
||||
},
|
||||
"big-3" = {
|
||||
"az" = "sto3"
|
||||
"flavor" = "3f73fc93-ec61-4808-88df-2580d94c1a9b"
|
||||
"floating_ip" = false
|
||||
},
|
||||
"small-1" = {
|
||||
"az" = "sto1"
|
||||
"flavor" = "7a6a998f-ac7f-4fb8-a534-2175b254f75e"
|
||||
"floating_ip" = false
|
||||
},
|
||||
"small-2" = {
|
||||
"az" = "sto2"
|
||||
"flavor" = "7a6a998f-ac7f-4fb8-a534-2175b254f75e"
|
||||
"floating_ip" = false
|
||||
},
|
||||
"small-3" = {
|
||||
"az" = "sto3"
|
||||
"flavor" = "7a6a998f-ac7f-4fb8-a534-2175b254f75e"
|
||||
"floating_ip" = false
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Would result in three nodes in each availability zone each with their own separate naming,
|
||||
flavor and floating ip configuration.
|
||||
|
||||
The "schema":
|
||||
```
|
||||
k8s_nodes = {
|
||||
"key | node name suffix, must be unique" = {
|
||||
"az" = string
|
||||
"flavor" = string
|
||||
"floating_ip" = bool
|
||||
},
|
||||
}
|
||||
```
|
||||
All values are required.
|
||||
|
||||
#### Terraform state files
|
||||
|
||||
@ -494,3 +605,81 @@ $ ansible-playbook --become -i inventory/$CLUSTER/hosts ./contrib/network-storag
|
||||
## What's next
|
||||
|
||||
Try out your new Kubernetes cluster with the [Hello Kubernetes service](https://kubernetes.io/docs/tasks/access-application-cluster/service-access-application-cluster/).
|
||||
|
||||
## Appendix
|
||||
|
||||
### Migration from `number_of_k8s_nodes*` to `k8s_nodes`
|
||||
If you currently have a cluster defined using the `number_of_k8s_nodes*` variables and wish
|
||||
to migrate to the `k8s_nodes` style you can do it like so:
|
||||
|
||||
```ShellSession
|
||||
$ terraform state list
|
||||
module.compute.data.openstack_images_image_v2.gfs_image
|
||||
module.compute.data.openstack_images_image_v2.vm_image
|
||||
module.compute.openstack_compute_floatingip_associate_v2.k8s_master[0]
|
||||
module.compute.openstack_compute_floatingip_associate_v2.k8s_node[0]
|
||||
module.compute.openstack_compute_floatingip_associate_v2.k8s_node[1]
|
||||
module.compute.openstack_compute_floatingip_associate_v2.k8s_node[2]
|
||||
module.compute.openstack_compute_instance_v2.k8s_master[0]
|
||||
module.compute.openstack_compute_instance_v2.k8s_node[0]
|
||||
module.compute.openstack_compute_instance_v2.k8s_node[1]
|
||||
module.compute.openstack_compute_instance_v2.k8s_node[2]
|
||||
module.compute.openstack_compute_keypair_v2.k8s
|
||||
module.compute.openstack_compute_servergroup_v2.k8s_etcd[0]
|
||||
module.compute.openstack_compute_servergroup_v2.k8s_master[0]
|
||||
module.compute.openstack_compute_servergroup_v2.k8s_node[0]
|
||||
module.compute.openstack_networking_secgroup_rule_v2.bastion[0]
|
||||
module.compute.openstack_networking_secgroup_rule_v2.egress[0]
|
||||
module.compute.openstack_networking_secgroup_rule_v2.k8s
|
||||
module.compute.openstack_networking_secgroup_rule_v2.k8s_allowed_remote_ips[0]
|
||||
module.compute.openstack_networking_secgroup_rule_v2.k8s_allowed_remote_ips[1]
|
||||
module.compute.openstack_networking_secgroup_rule_v2.k8s_allowed_remote_ips[2]
|
||||
module.compute.openstack_networking_secgroup_rule_v2.k8s_master[0]
|
||||
module.compute.openstack_networking_secgroup_rule_v2.worker[0]
|
||||
module.compute.openstack_networking_secgroup_rule_v2.worker[1]
|
||||
module.compute.openstack_networking_secgroup_rule_v2.worker[2]
|
||||
module.compute.openstack_networking_secgroup_rule_v2.worker[3]
|
||||
module.compute.openstack_networking_secgroup_rule_v2.worker[4]
|
||||
module.compute.openstack_networking_secgroup_v2.bastion[0]
|
||||
module.compute.openstack_networking_secgroup_v2.k8s
|
||||
module.compute.openstack_networking_secgroup_v2.k8s_master
|
||||
module.compute.openstack_networking_secgroup_v2.worker
|
||||
module.ips.null_resource.dummy_dependency
|
||||
module.ips.openstack_networking_floatingip_v2.k8s_master[0]
|
||||
module.ips.openstack_networking_floatingip_v2.k8s_node[0]
|
||||
module.ips.openstack_networking_floatingip_v2.k8s_node[1]
|
||||
module.ips.openstack_networking_floatingip_v2.k8s_node[2]
|
||||
module.network.openstack_networking_network_v2.k8s[0]
|
||||
module.network.openstack_networking_router_interface_v2.k8s[0]
|
||||
module.network.openstack_networking_router_v2.k8s[0]
|
||||
module.network.openstack_networking_subnet_v2.k8s[0]
|
||||
$ terraform state mv 'module.compute.openstack_compute_floatingip_associate_v2.k8s_node[0]' 'module.compute.openstack_compute_floatingip_associate_v2.k8s_nodes["1"]'
|
||||
Move "module.compute.openstack_compute_floatingip_associate_v2.k8s_node[0]" to "module.compute.openstack_compute_floatingip_associate_v2.k8s_nodes[\"1\"]"
|
||||
Successfully moved 1 object(s).
|
||||
$ terraform state mv 'module.compute.openstack_compute_floatingip_associate_v2.k8s_node[1]' 'module.compute.openstack_compute_floatingip_associate_v2.k8s_nodes["2"]'
|
||||
Move "module.compute.openstack_compute_floatingip_associate_v2.k8s_node[1]" to "module.compute.openstack_compute_floatingip_associate_v2.k8s_nodes[\"2\"]"
|
||||
Successfully moved 1 object(s).
|
||||
$ terraform state mv 'module.compute.openstack_compute_floatingip_associate_v2.k8s_node[2]' 'module.compute.openstack_compute_floatingip_associate_v2.k8s_nodes["3"]'
|
||||
Move "module.compute.openstack_compute_floatingip_associate_v2.k8s_node[2]" to "module.compute.openstack_compute_floatingip_associate_v2.k8s_nodes[\"3\"]"
|
||||
Successfully moved 1 object(s).
|
||||
$ terraform state mv 'module.compute.openstack_compute_instance_v2.k8s_node[0]' 'module.compute.openstack_compute_instance_v2.k8s_node["1"]'
|
||||
Move "module.compute.openstack_compute_instance_v2.k8s_node[0]" to "module.compute.openstack_compute_instance_v2.k8s_node[\"1\"]"
|
||||
Successfully moved 1 object(s).
|
||||
$ terraform state mv 'module.compute.openstack_compute_instance_v2.k8s_node[1]' 'module.compute.openstack_compute_instance_v2.k8s_node["2"]'
|
||||
Move "module.compute.openstack_compute_instance_v2.k8s_node[1]" to "module.compute.openstack_compute_instance_v2.k8s_node[\"2\"]"
|
||||
Successfully moved 1 object(s).
|
||||
$ terraform state mv 'module.compute.openstack_compute_instance_v2.k8s_node[2]' 'module.compute.openstack_compute_instance_v2.k8s_node["3"]'
|
||||
Move "module.compute.openstack_compute_instance_v2.k8s_node[2]" to "module.compute.openstack_compute_instance_v2.k8s_node[\"3\"]"
|
||||
Successfully moved 1 object(s).
|
||||
$ terraform state mv 'module.ips.openstack_networking_floatingip_v2.k8s_node[0]' 'module.ips.openstack_networking_floatingip_v2.k8s_node["1"]'
|
||||
Move "module.ips.openstack_networking_floatingip_v2.k8s_node[0]" to "module.ips.openstack_networking_floatingip_v2.k8s_node[\"1\"]"
|
||||
Successfully moved 1 object(s).
|
||||
$ terraform state mv 'module.ips.openstack_networking_floatingip_v2.k8s_node[1]' 'module.ips.openstack_networking_floatingip_v2.k8s_node["2"]'
|
||||
Move "module.ips.openstack_networking_floatingip_v2.k8s_node[1]" to "module.ips.openstack_networking_floatingip_v2.k8s_node[\"2\"]"
|
||||
Successfully moved 1 object(s).
|
||||
$ terraform state mv 'module.ips.openstack_networking_floatingip_v2.k8s_node[2]' 'module.ips.openstack_networking_floatingip_v2.k8s_node["3"]'
|
||||
Move "module.ips.openstack_networking_floatingip_v2.k8s_node[2]" to "module.ips.openstack_networking_floatingip_v2.k8s_node[\"3\"]"
|
||||
Successfully moved 1 object(s).
|
||||
```
|
||||
|
||||
Of course for nodes without floating ips those steps can be omitted.
|
||||
|
@ -12,6 +12,7 @@ module "network" {
|
||||
dns_nameservers = "${var.dns_nameservers}"
|
||||
network_dns_domain = "${var.network_dns_domain}"
|
||||
use_neutron = "${var.use_neutron}"
|
||||
router_id = "${var.router_id}"
|
||||
}
|
||||
|
||||
module "ips" {
|
||||
@ -25,6 +26,7 @@ module "ips" {
|
||||
external_net = "${var.external_net}"
|
||||
network_name = "${var.network_name}"
|
||||
router_id = "${module.network.router_id}"
|
||||
k8s_nodes = "${var.k8s_nodes}"
|
||||
}
|
||||
|
||||
module "compute" {
|
||||
@ -32,6 +34,7 @@ module "compute" {
|
||||
|
||||
cluster_name = "${var.cluster_name}"
|
||||
az_list = "${var.az_list}"
|
||||
az_list_node = "${var.az_list_node}"
|
||||
number_of_k8s_masters = "${var.number_of_k8s_masters}"
|
||||
number_of_k8s_masters_no_etcd = "${var.number_of_k8s_masters_no_etcd}"
|
||||
number_of_etcd = "${var.number_of_etcd}"
|
||||
@ -41,6 +44,7 @@ module "compute" {
|
||||
number_of_bastions = "${var.number_of_bastions}"
|
||||
number_of_k8s_nodes_no_floating_ip = "${var.number_of_k8s_nodes_no_floating_ip}"
|
||||
number_of_gfs_nodes_no_floating_ip = "${var.number_of_gfs_nodes_no_floating_ip}"
|
||||
k8s_nodes = "${var.k8s_nodes}"
|
||||
bastion_root_volume_size_in_gb = "${var.bastion_root_volume_size_in_gb}"
|
||||
etcd_root_volume_size_in_gb = "${var.etcd_root_volume_size_in_gb}"
|
||||
master_root_volume_size_in_gb = "${var.master_root_volume_size_in_gb}"
|
||||
@ -61,6 +65,7 @@ module "compute" {
|
||||
k8s_master_fips = "${module.ips.k8s_master_fips}"
|
||||
k8s_master_no_etcd_fips = "${module.ips.k8s_master_no_etcd_fips}"
|
||||
k8s_node_fips = "${module.ips.k8s_node_fips}"
|
||||
k8s_nodes_fips = "${module.ips.k8s_nodes_fips}"
|
||||
bastion_fips = "${module.ips.bastion_fips}"
|
||||
bastion_allowed_remote_ips = "${var.bastion_allowed_remote_ips}"
|
||||
master_allowed_remote_ips = "${var.master_allowed_remote_ips}"
|
||||
@ -93,7 +98,7 @@ output "k8s_master_fips" {
|
||||
}
|
||||
|
||||
output "k8s_node_fips" {
|
||||
value = "${module.ips.k8s_node_fips}"
|
||||
value = "${var.number_of_k8s_nodes > 0 ? module.ips.k8s_node_fips : [for key, value in module.ips.k8s_nodes_fips : value.address]}"
|
||||
}
|
||||
|
||||
output "bastion_fips" {
|
||||
|
@ -43,7 +43,7 @@ resource "openstack_networking_secgroup_rule_v2" "bastion" {
|
||||
port_range_min = "22"
|
||||
port_range_max = "22"
|
||||
remote_ip_prefix = "${var.bastion_allowed_remote_ips[count.index]}"
|
||||
security_group_id = "${openstack_networking_secgroup_v2.bastion[count.index].id}"
|
||||
security_group_id = "${openstack_networking_secgroup_v2.bastion[0].id}"
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_v2" "k8s" {
|
||||
@ -96,64 +96,40 @@ resource "openstack_networking_secgroup_rule_v2" "worker" {
|
||||
}
|
||||
|
||||
resource "openstack_compute_servergroup_v2" "k8s_master" {
|
||||
count = "%{ if var.use_server_groups }1%{else}0%{endif}"
|
||||
name = "k8s-master-srvgrp"
|
||||
count = "%{if var.use_server_groups}1%{else}0%{endif}"
|
||||
name = "k8s-master-srvgrp"
|
||||
policies = ["anti-affinity"]
|
||||
}
|
||||
|
||||
resource "openstack_compute_servergroup_v2" "k8s_node" {
|
||||
count = "%{ if var.use_server_groups }1%{else}0%{endif}"
|
||||
name = "k8s-node-srvgrp"
|
||||
count = "%{if var.use_server_groups}1%{else}0%{endif}"
|
||||
name = "k8s-node-srvgrp"
|
||||
policies = ["anti-affinity"]
|
||||
}
|
||||
|
||||
resource "openstack_compute_servergroup_v2" "k8s_etcd" {
|
||||
count = "%{ if var.use_server_groups }1%{else}0%{endif}"
|
||||
name = "k8s-etcd-srvgrp"
|
||||
count = "%{if var.use_server_groups}1%{else}0%{endif}"
|
||||
name = "k8s-etcd-srvgrp"
|
||||
policies = ["anti-affinity"]
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "bastion" {
|
||||
name = "${var.cluster_name}-bastion-${count.index+1}"
|
||||
count = "${var.bastion_root_volume_size_in_gb == 0 ? var.number_of_bastions : 0}"
|
||||
name = "${var.cluster_name}-bastion-${count.index + 1}"
|
||||
count = "${var.number_of_bastions}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_bastion}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
|
||||
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}",
|
||||
"${element(openstack_networking_secgroup_v2.bastion.*.name, count.index)}",
|
||||
]
|
||||
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "bastion"
|
||||
depends_on = "${var.network_id}"
|
||||
use_access_ip = "${var.use_access_ip}"
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${var.bastion_fips[0]}/ > group_vars/no-floating.yml"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "bastion_custom_volume_size" {
|
||||
name = "${var.cluster_name}-bastion-${count.index+1}"
|
||||
count = "${var.bastion_root_volume_size_in_gb > 0 ? var.number_of_bastions : 0}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_bastion}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
|
||||
block_device {
|
||||
uuid = "${data.openstack_images_image_v2.vm_image.id}"
|
||||
source_type = "image"
|
||||
volume_size = "${var.bastion_root_volume_size_in_gb}"
|
||||
boot_index = 0
|
||||
destination_type = "volume"
|
||||
delete_on_termination = true
|
||||
dynamic "block_device" {
|
||||
for_each = var.bastion_root_volume_size_in_gb > 0 ? [var.image] : []
|
||||
content {
|
||||
uuid = "${data.openstack_images_image_v2.vm_image.id}"
|
||||
source_type = "image"
|
||||
volume_size = "${var.bastion_root_volume_size_in_gb}"
|
||||
boot_index = 0
|
||||
destination_type = "volume"
|
||||
delete_on_termination = true
|
||||
}
|
||||
}
|
||||
|
||||
network {
|
||||
@ -177,13 +153,26 @@ resource "openstack_compute_instance_v2" "bastion_custom_volume_size" {
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_master" {
|
||||
name = "${var.cluster_name}-k8s-master-${count.index+1}"
|
||||
count = "${var.master_root_volume_size_in_gb == 0 ? var.number_of_k8s_masters : 0}"
|
||||
name = "${var.cluster_name}-k8s-master-${count.index + 1}"
|
||||
count = "${var.number_of_k8s_masters}"
|
||||
availability_zone = "${element(var.az_list, count.index)}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_master}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.master_root_volume_size_in_gb > 0 ? [var.image] : []
|
||||
content {
|
||||
uuid = "${data.openstack_images_image_v2.vm_image.id}"
|
||||
source_type = "image"
|
||||
volume_size = "${var.master_root_volume_size_in_gb}"
|
||||
boot_index = 0
|
||||
destination_type = "volume"
|
||||
delete_on_termination = true
|
||||
}
|
||||
}
|
||||
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
@ -207,106 +196,31 @@ resource "openstack_compute_instance_v2" "k8s_master" {
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element( concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > group_vars/no-floating.yml"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_master_custom_volume_size" {
|
||||
name = "${var.cluster_name}-k8s-master-${count.index+1}"
|
||||
count = "${var.master_root_volume_size_in_gb > 0 ? var.number_of_k8s_masters : 0}"
|
||||
availability_zone = "${element(var.az_list, count.index)}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_master}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
|
||||
block_device {
|
||||
uuid = "${data.openstack_images_image_v2.vm_image.id}"
|
||||
source_type = "image"
|
||||
volume_size = "${var.master_root_volume_size_in_gb}"
|
||||
boot_index = 0
|
||||
destination_type = "volume"
|
||||
delete_on_termination = true
|
||||
}
|
||||
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
|
||||
security_groups = ["${openstack_networking_secgroup_v2.k8s_master.name}",
|
||||
"${openstack_networking_secgroup_v2.k8s.name}",
|
||||
]
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||
content {
|
||||
group = "${openstack_compute_servergroup_v2.k8s_master[0].id}"
|
||||
}
|
||||
}
|
||||
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "etcd,kube-master,${var.supplementary_master_groups},k8s-cluster,vault"
|
||||
depends_on = "${var.network_id}"
|
||||
use_access_ip = "${var.use_access_ip}"
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element( concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > group_vars/no-floating.yml"
|
||||
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > group_vars/no-floating.yml"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
|
||||
name = "${var.cluster_name}-k8s-master-ne-${count.index+1}"
|
||||
count = "${var.master_root_volume_size_in_gb == 0 ? var.number_of_k8s_masters_no_etcd : 0}"
|
||||
name = "${var.cluster_name}-k8s-master-ne-${count.index + 1}"
|
||||
count = "${var.number_of_k8s_masters_no_etcd}"
|
||||
availability_zone = "${element(var.az_list, count.index)}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_master}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
|
||||
security_groups = ["${openstack_networking_secgroup_v2.k8s_master.name}",
|
||||
"${openstack_networking_secgroup_v2.k8s.name}",
|
||||
]
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||
dynamic "block_device" {
|
||||
for_each = var.master_root_volume_size_in_gb > 0 ? [var.image] : []
|
||||
content {
|
||||
group = "${openstack_compute_servergroup_v2.k8s_master[0].id}"
|
||||
uuid = "${data.openstack_images_image_v2.vm_image.id}"
|
||||
source_type = "image"
|
||||
volume_size = "${var.master_root_volume_size_in_gb}"
|
||||
boot_index = 0
|
||||
destination_type = "volume"
|
||||
delete_on_termination = true
|
||||
}
|
||||
}
|
||||
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "kube-master,${var.supplementary_master_groups},k8s-cluster,vault"
|
||||
depends_on = "${var.network_id}"
|
||||
use_access_ip = "${var.use_access_ip}"
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element( concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > group_vars/no-floating.yml"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_master_no_etcd_custom_volume_size" {
|
||||
name = "${var.cluster_name}-k8s-master-ne-${count.index+1}"
|
||||
count = "${var.master_root_volume_size_in_gb > 0 ? var.number_of_k8s_masters_no_etcd : 0}"
|
||||
availability_zone = "${element(var.az_list, count.index)}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_master}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
|
||||
block_device {
|
||||
uuid = "${data.openstack_images_image_v2.vm_image.id}"
|
||||
source_type = "image"
|
||||
volume_size = "${var.master_root_volume_size_in_gb}"
|
||||
boot_index = 0
|
||||
destination_type = "volume"
|
||||
delete_on_termination = true
|
||||
}
|
||||
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
@ -330,62 +244,36 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd_custom_volume_size"
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element( concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > group_vars/no-floating.yml"
|
||||
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > group_vars/no-floating.yml"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "etcd" {
|
||||
name = "${var.cluster_name}-etcd-${count.index+1}"
|
||||
count = "${var.etcd_root_volume_size_in_gb == 0 ? var.number_of_etcd : 0}"
|
||||
name = "${var.cluster_name}-etcd-${count.index + 1}"
|
||||
count = "${var.number_of_etcd}"
|
||||
availability_zone = "${element(var.az_list, count.index)}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_etcd}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
|
||||
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}"]
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_etcd[0]] : []
|
||||
dynamic "block_device" {
|
||||
for_each = var.etcd_root_volume_size_in_gb > 0 ? [var.image] : []
|
||||
content {
|
||||
group = "${openstack_compute_servergroup_v2.k8s_etcd[0].id}"
|
||||
uuid = "${data.openstack_images_image_v2.vm_image.id}"
|
||||
source_type = "image"
|
||||
volume_size = "${var.etcd_root_volume_size_in_gb}"
|
||||
boot_index = 0
|
||||
destination_type = "volume"
|
||||
delete_on_termination = true
|
||||
}
|
||||
}
|
||||
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "etcd,vault,no-floating"
|
||||
depends_on = "${var.network_id}"
|
||||
use_access_ip = "${var.use_access_ip}"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "etcd_custom_volume_size" {
|
||||
name = "${var.cluster_name}-etcd-${count.index+1}"
|
||||
count = "${var.etcd_root_volume_size_in_gb > 0 ? var.number_of_etcd : 0}"
|
||||
availability_zone = "${element(var.az_list, count.index)}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_etcd}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
|
||||
block_device {
|
||||
uuid = "${data.openstack_images_image_v2.vm_image.id}"
|
||||
source_type = "image"
|
||||
volume_size = "${var.etcd_root_volume_size_in_gb}"
|
||||
boot_index = 0
|
||||
destination_type = "volume"
|
||||
delete_on_termination = true
|
||||
}
|
||||
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
|
||||
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}"]
|
||||
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_etcd[0]] : []
|
||||
content {
|
||||
@ -402,53 +290,25 @@ resource "openstack_compute_instance_v2" "etcd_custom_volume_size" {
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
|
||||
name = "${var.cluster_name}-k8s-master-nf-${count.index+1}"
|
||||
count = "${var.master_root_volume_size_in_gb == 0 ? var.number_of_k8s_masters_no_floating_ip : 0}"
|
||||
name = "${var.cluster_name}-k8s-master-nf-${count.index + 1}"
|
||||
count = "${var.number_of_k8s_masters_no_floating_ip}"
|
||||
availability_zone = "${element(var.az_list, count.index)}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_master}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
|
||||
security_groups = ["${openstack_networking_secgroup_v2.k8s_master.name}",
|
||||
"${openstack_networking_secgroup_v2.k8s.name}",
|
||||
]
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||
dynamic "block_device" {
|
||||
for_each = var.master_root_volume_size_in_gb > 0 ? [var.image] : []
|
||||
content {
|
||||
group = "${openstack_compute_servergroup_v2.k8s_master[0].id}"
|
||||
uuid = "${data.openstack_images_image_v2.vm_image.id}"
|
||||
source_type = "image"
|
||||
volume_size = "${var.master_root_volume_size_in_gb}"
|
||||
boot_index = 0
|
||||
destination_type = "volume"
|
||||
delete_on_termination = true
|
||||
}
|
||||
}
|
||||
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "etcd,kube-master,${var.supplementary_master_groups},k8s-cluster,vault,no-floating"
|
||||
depends_on = "${var.network_id}"
|
||||
use_access_ip = "${var.use_access_ip}"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_custom_volume_size" {
|
||||
name = "${var.cluster_name}-k8s-master-nf-${count.index+1}"
|
||||
count = "${var.master_root_volume_size_in_gb > 0 ? var.number_of_k8s_masters_no_floating_ip : 0}"
|
||||
availability_zone = "${element(var.az_list, count.index)}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_master}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
|
||||
block_device {
|
||||
uuid = "${data.openstack_images_image_v2.vm_image.id}"
|
||||
source_type = "image"
|
||||
volume_size = "${var.master_root_volume_size_in_gb}"
|
||||
boot_index = 0
|
||||
destination_type = "volume"
|
||||
delete_on_termination = true
|
||||
}
|
||||
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
@ -456,7 +316,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_custom_volum
|
||||
security_groups = ["${openstack_networking_secgroup_v2.k8s_master.name}",
|
||||
"${openstack_networking_secgroup_v2.k8s.name}",
|
||||
]
|
||||
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||
content {
|
||||
@ -473,53 +333,25 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_custom_volum
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
|
||||
name = "${var.cluster_name}-k8s-master-ne-nf-${count.index+1}"
|
||||
count = "${var.master_root_volume_size_in_gb == 0 ? var.number_of_k8s_masters_no_floating_ip_no_etcd : 0}"
|
||||
name = "${var.cluster_name}-k8s-master-ne-nf-${count.index + 1}"
|
||||
count = "${var.number_of_k8s_masters_no_floating_ip_no_etcd}"
|
||||
availability_zone = "${element(var.az_list, count.index)}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_master}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
|
||||
security_groups = ["${openstack_networking_secgroup_v2.k8s_master.name}",
|
||||
"${openstack_networking_secgroup_v2.k8s.name}",
|
||||
]
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||
dynamic "block_device" {
|
||||
for_each = var.master_root_volume_size_in_gb > 0 ? [var.image] : []
|
||||
content {
|
||||
group = "${openstack_compute_servergroup_v2.k8s_master[0].id}"
|
||||
uuid = "${data.openstack_images_image_v2.vm_image.id}"
|
||||
source_type = "image"
|
||||
volume_size = "${var.master_root_volume_size_in_gb}"
|
||||
boot_index = 0
|
||||
destination_type = "volume"
|
||||
delete_on_termination = true
|
||||
}
|
||||
}
|
||||
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "kube-master,${var.supplementary_master_groups},k8s-cluster,vault,no-floating"
|
||||
depends_on = "${var.network_id}"
|
||||
use_access_ip = "${var.use_access_ip}"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd_custom_volume_size" {
|
||||
name = "${var.cluster_name}-k8s-master-ne-nf-${count.index+1}"
|
||||
count = "${var.master_root_volume_size_in_gb > 0 ? var.number_of_k8s_masters_no_floating_ip_no_etcd : 0}"
|
||||
availability_zone = "${element(var.az_list, count.index)}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_master}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
|
||||
block_device {
|
||||
uuid = "${data.openstack_images_image_v2.vm_image.id}"
|
||||
source_type = "image"
|
||||
volume_size = "${var.master_root_volume_size_in_gb}"
|
||||
boot_index = 0
|
||||
destination_type = "volume"
|
||||
delete_on_termination = true
|
||||
}
|
||||
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
@ -527,7 +359,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd_cust
|
||||
security_groups = ["${openstack_networking_secgroup_v2.k8s_master.name}",
|
||||
"${openstack_networking_secgroup_v2.k8s.name}",
|
||||
]
|
||||
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||
content {
|
||||
@ -544,13 +376,25 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd_cust
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_node" {
|
||||
name = "${var.cluster_name}-k8s-node-${count.index+1}"
|
||||
count = "${var.node_root_volume_size_in_gb == 0 ? var.number_of_k8s_nodes : 0}"
|
||||
availability_zone = "${element(var.az_list, count.index)}"
|
||||
name = "${var.cluster_name}-k8s-node-${count.index + 1}"
|
||||
count = "${var.number_of_k8s_nodes}"
|
||||
availability_zone = "${element(var.az_list_node, count.index)}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_node}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.node_root_volume_size_in_gb > 0 ? [var.image] : []
|
||||
content {
|
||||
uuid = "${data.openstack_images_image_v2.vm_image.id}"
|
||||
source_type = "image"
|
||||
volume_size = "${var.node_root_volume_size_in_gb}"
|
||||
boot_index = 0
|
||||
destination_type = "volume"
|
||||
delete_on_termination = true
|
||||
}
|
||||
}
|
||||
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
@ -574,62 +418,30 @@ resource "openstack_compute_instance_v2" "k8s_node" {
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element( concat(var.bastion_fips, var.k8s_node_fips), 0)}/ > group_vars/no-floating.yml"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_node_custom_volume_size" {
|
||||
name = "${var.cluster_name}-k8s-node-${count.index+1}"
|
||||
count = "${var.node_root_volume_size_in_gb > 0 ? var.number_of_k8s_nodes : 0}"
|
||||
availability_zone = "${element(var.az_list, count.index)}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_node}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
|
||||
block_device {
|
||||
uuid = "${data.openstack_images_image_v2.vm_image.id}"
|
||||
source_type = "image"
|
||||
volume_size = "${var.node_root_volume_size_in_gb}"
|
||||
boot_index = 0
|
||||
destination_type = "volume"
|
||||
delete_on_termination = true
|
||||
}
|
||||
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
|
||||
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}",
|
||||
"${openstack_networking_secgroup_v2.worker.name}",
|
||||
]
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||
content {
|
||||
group = "${openstack_compute_servergroup_v2.k8s_node[0].id}"
|
||||
}
|
||||
}
|
||||
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "kube-node,k8s-cluster,${var.supplementary_node_groups}"
|
||||
depends_on = "${var.network_id}"
|
||||
use_access_ip = "${var.use_access_ip}"
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element( concat(var.bastion_fips, var.k8s_node_fips), 0)}/ > group_vars/no-floating.yml"
|
||||
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_node_fips), 0)}/ > group_vars/no-floating.yml"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
||||
name = "${var.cluster_name}-k8s-node-nf-${count.index+1}"
|
||||
count = "${var.node_root_volume_size_in_gb == 0 ? var.number_of_k8s_nodes_no_floating_ip : 0}"
|
||||
availability_zone = "${element(var.az_list, count.index)}"
|
||||
name = "${var.cluster_name}-k8s-node-nf-${count.index + 1}"
|
||||
count = "${var.number_of_k8s_nodes_no_floating_ip}"
|
||||
availability_zone = "${element(var.az_list_node, count.index)}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_node}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.node_root_volume_size_in_gb > 0 ? [var.image] : []
|
||||
content {
|
||||
uuid = "${data.openstack_images_image_v2.vm_image.id}"
|
||||
source_type = "image"
|
||||
volume_size = "${var.node_root_volume_size_in_gb}"
|
||||
boot_index = 0
|
||||
destination_type = "volume"
|
||||
delete_on_termination = true
|
||||
}
|
||||
}
|
||||
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
@ -637,7 +449,7 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
||||
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}",
|
||||
"${openstack_networking_secgroup_v2.worker.name}",
|
||||
]
|
||||
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||
content {
|
||||
@ -653,21 +465,24 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip_custom_volume_size" {
|
||||
name = "${var.cluster_name}-k8s-node-nf-${count.index+1}"
|
||||
count = "${var.node_root_volume_size_in_gb > 0 ? var.number_of_k8s_nodes_no_floating_ip : 0}"
|
||||
availability_zone = "${element(var.az_list, count.index)}"
|
||||
resource "openstack_compute_instance_v2" "k8s_nodes" {
|
||||
for_each = var.number_of_k8s_nodes == 0 && var.number_of_k8s_nodes_no_floating_ip == 0 ? var.k8s_nodes : {}
|
||||
name = "${var.cluster_name}-k8s-node-${each.key}"
|
||||
availability_zone = "${each.value.az}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_k8s_node}"
|
||||
flavor_id = "${each.value.flavor}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
|
||||
block_device {
|
||||
uuid = "${data.openstack_images_image_v2.vm_image.id}"
|
||||
source_type = "image"
|
||||
volume_size = "${var.node_root_volume_size_in_gb}"
|
||||
boot_index = 0
|
||||
destination_type = "volume"
|
||||
delete_on_termination = true
|
||||
dynamic "block_device" {
|
||||
for_each = var.node_root_volume_size_in_gb > 0 ? [var.image] : []
|
||||
content {
|
||||
uuid = "${data.openstack_images_image_v2.vm_image.id}"
|
||||
source_type = "image"
|
||||
volume_size = "${var.node_root_volume_size_in_gb}"
|
||||
boot_index = 0
|
||||
destination_type = "volume"
|
||||
delete_on_termination = true
|
||||
}
|
||||
}
|
||||
|
||||
network {
|
||||
@ -677,7 +492,7 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip_custom_volume_
|
||||
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}",
|
||||
"${openstack_networking_secgroup_v2.worker.name}",
|
||||
]
|
||||
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||
content {
|
||||
@ -687,155 +502,101 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip_custom_volume_
|
||||
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user}"
|
||||
kubespray_groups = "kube-node,k8s-cluster,no-floating,${var.supplementary_node_groups}"
|
||||
kubespray_groups = "kube-node,k8s-cluster,%{if each.value.floating_ip == false}no-floating,%{endif}${var.supplementary_node_groups}"
|
||||
depends_on = "${var.network_id}"
|
||||
use_access_ip = "${var.use_access_ip}"
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "%{if each.value.floating_ip}sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, [for key, value in var.k8s_nodes_fips : value.address]), 0)}/ > group_vars/no-floating.yml%{else}true%{endif}"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
|
||||
name = "${var.cluster_name}-gfs-node-nf-${count.index + 1}"
|
||||
count = "${var.number_of_gfs_nodes_no_floating_ip}"
|
||||
availability_zone = "${element(var.az_list, count.index)}"
|
||||
image_name = "${var.image_gfs}"
|
||||
flavor_id = "${var.flavor_gfs_node}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
|
||||
dynamic "block_device" {
|
||||
for_each = var.gfs_root_volume_size_in_gb > 0 ? [var.image] : []
|
||||
content {
|
||||
uuid = "${data.openstack_images_image_v2.vm_image.id}"
|
||||
source_type = "image"
|
||||
volume_size = "${var.gfs_root_volume_size_in_gb}"
|
||||
boot_index = 0
|
||||
destination_type = "volume"
|
||||
delete_on_termination = true
|
||||
}
|
||||
}
|
||||
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
|
||||
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}"]
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||
content {
|
||||
group = "${openstack_compute_servergroup_v2.k8s_node[0].id}"
|
||||
}
|
||||
}
|
||||
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user_gfs}"
|
||||
kubespray_groups = "gfs-cluster,network-storage,no-floating"
|
||||
depends_on = "${var.network_id}"
|
||||
use_access_ip = "${var.use_access_ip}"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_compute_floatingip_associate_v2" "bastion" {
|
||||
count = "${var.bastion_root_volume_size_in_gb == 0 ? var.number_of_bastions : 0}"
|
||||
count = "${var.number_of_bastions}"
|
||||
floating_ip = "${var.bastion_fips[count.index]}"
|
||||
instance_id = "${element(openstack_compute_instance_v2.bastion.*.id, count.index)}"
|
||||
wait_until_associated = "${var.wait_for_floatingip}"
|
||||
}
|
||||
|
||||
resource "openstack_compute_floatingip_associate_v2" "bastion_custom_volume_size" {
|
||||
count = "${var.bastion_root_volume_size_in_gb > 0 ? var.number_of_bastions : 0}"
|
||||
floating_ip = "${var.bastion_fips[count.index]}"
|
||||
instance_id = "${element(openstack_compute_instance_v2.bastion_custom_volume_size.*.id, count.index)}"
|
||||
wait_until_associated = "${var.wait_for_floatingip}"
|
||||
}
|
||||
|
||||
resource "openstack_compute_floatingip_associate_v2" "k8s_master" {
|
||||
count = "${var.master_root_volume_size_in_gb == 0 ? var.number_of_k8s_masters : 0}"
|
||||
count = "${var.number_of_k8s_masters}"
|
||||
instance_id = "${element(openstack_compute_instance_v2.k8s_master.*.id, count.index)}"
|
||||
floating_ip = "${var.k8s_master_fips[count.index]}"
|
||||
wait_until_associated = "${var.wait_for_floatingip}"
|
||||
}
|
||||
|
||||
resource "openstack_compute_floatingip_associate_v2" "k8s_master_custom_volume_size" {
|
||||
count = "${var.master_root_volume_size_in_gb > 0 ? var.number_of_k8s_masters : 0}"
|
||||
instance_id = "${element(openstack_compute_instance_v2.k8s_master_custom_volume_size.*.id, count.index)}"
|
||||
floating_ip = "${var.k8s_master_fips[count.index]}"
|
||||
wait_until_associated = "${var.wait_for_floatingip}"
|
||||
}
|
||||
|
||||
resource "openstack_compute_floatingip_associate_v2" "k8s_master_no_etcd" {
|
||||
count = "${var.master_root_volume_size_in_gb == 0 ? var.number_of_k8s_masters_no_etcd : 0}"
|
||||
instance_id = "${element(openstack_compute_instance_v2.k8s_master_no_etcd.*.id, count.index)}"
|
||||
floating_ip = "${var.k8s_master_no_etcd_fips[count.index]}"
|
||||
}
|
||||
|
||||
resource "openstack_compute_floatingip_associate_v2" "k8s_master_no_etcd_custom_volume_size" {
|
||||
count = "${var.master_root_volume_size_in_gb > 0 ? var.number_of_k8s_masters_no_etcd : 0}"
|
||||
instance_id = "${element(openstack_compute_instance_v2.k8s_master_no_etcd_custom_volume_size.*.id, count.index)}"
|
||||
floating_ip = "${var.k8s_master_no_etcd_fips[count.index]}"
|
||||
}
|
||||
|
||||
resource "openstack_compute_floatingip_associate_v2" "k8s_node" {
|
||||
count = "${var.node_root_volume_size_in_gb == 0 ? var.number_of_k8s_nodes : 0}"
|
||||
floating_ip = "${var.k8s_node_fips[count.index]}"
|
||||
instance_id = "${element(openstack_compute_instance_v2.k8s_node.*.id, count.index)}"
|
||||
instance_id = "${element(openstack_compute_instance_v2.k8s_node[*].id, count.index)}"
|
||||
wait_until_associated = "${var.wait_for_floatingip}"
|
||||
}
|
||||
|
||||
resource "openstack_compute_floatingip_associate_v2" "k8s_node_custom_volume_size" {
|
||||
count = "${var.node_root_volume_size_in_gb > 0 ? var.number_of_k8s_nodes : 0}"
|
||||
floating_ip = "${var.k8s_node_fips[count.index]}"
|
||||
instance_id = "${element(openstack_compute_instance_v2.k8s_node_custom_volume_size.*.id, count.index)}"
|
||||
resource "openstack_compute_floatingip_associate_v2" "k8s_nodes" {
|
||||
for_each = var.number_of_k8s_nodes == 0 && var.number_of_k8s_nodes_no_floating_ip == 0 ? { for key, value in var.k8s_nodes : key => value if value.floating_ip } : {}
|
||||
floating_ip = "${var.k8s_nodes_fips[each.key].address}"
|
||||
instance_id = "${openstack_compute_instance_v2.k8s_nodes[each.key].id}"
|
||||
wait_until_associated = "${var.wait_for_floatingip}"
|
||||
}
|
||||
|
||||
resource "openstack_blockstorage_volume_v2" "glusterfs_volume" {
|
||||
name = "${var.cluster_name}-glusterfs_volume-${count.index+1}"
|
||||
name = "${var.cluster_name}-glusterfs_volume-${count.index + 1}"
|
||||
count = "${var.gfs_root_volume_size_in_gb == 0 ? var.number_of_gfs_nodes_no_floating_ip : 0}"
|
||||
description = "Non-ephemeral volume for GlusterFS"
|
||||
size = "${var.gfs_volume_size_in_gb}"
|
||||
}
|
||||
|
||||
resource "openstack_blockstorage_volume_v2" "glusterfs_volume_custom_volume_size" {
|
||||
name = "${var.cluster_name}-glusterfs_volume-${count.index+1}"
|
||||
count = "${var.gfs_root_volume_size_in_gb > 0 ? var.number_of_gfs_nodes_no_floating_ip : 0}"
|
||||
description = "Non-ephemeral volume for GlusterFS"
|
||||
size = "${var.gfs_volume_size_in_gb}"
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
|
||||
name = "${var.cluster_name}-gfs-node-nf-${count.index+1}"
|
||||
count = "${var.gfs_root_volume_size_in_gb == 0 ? var.number_of_gfs_nodes_no_floating_ip : 0}"
|
||||
availability_zone = "${element(var.az_list, count.index)}"
|
||||
image_name = "${var.image_gfs}"
|
||||
flavor_id = "${var.flavor_gfs_node}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
|
||||
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}"]
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||
content {
|
||||
group = "${openstack_compute_servergroup_v2.k8s_node[0].id}"
|
||||
}
|
||||
}
|
||||
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user_gfs}"
|
||||
kubespray_groups = "gfs-cluster,network-storage,no-floating"
|
||||
depends_on = "${var.network_id}"
|
||||
use_access_ip = "${var.use_access_ip}"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip_custom_volume_size" {
|
||||
name = "${var.cluster_name}-gfs-node-nf-${count.index+1}"
|
||||
count = "${var.gfs_root_volume_size_in_gb > 0 ? var.number_of_gfs_nodes_no_floating_ip : 0}"
|
||||
availability_zone = "${element(var.az_list, count.index)}"
|
||||
image_name = "${var.image}"
|
||||
flavor_id = "${var.flavor_gfs_node}"
|
||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||
|
||||
block_device {
|
||||
uuid = "${data.openstack_images_image_v2.gfs_image.id}"
|
||||
source_type = "image"
|
||||
volume_size = "${var.gfs_root_volume_size_in_gb}"
|
||||
boot_index = 0
|
||||
destination_type = "volume"
|
||||
delete_on_termination = true
|
||||
}
|
||||
|
||||
network {
|
||||
name = "${var.network_name}"
|
||||
}
|
||||
|
||||
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}"]
|
||||
|
||||
dynamic "scheduler_hints" {
|
||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||
content {
|
||||
group = "${openstack_compute_servergroup_v2.k8s_node[0].id}"
|
||||
}
|
||||
}
|
||||
|
||||
metadata = {
|
||||
ssh_user = "${var.ssh_user_gfs}"
|
||||
kubespray_groups = "gfs-cluster,network-storage,no-floating"
|
||||
depends_on = "${var.network_id}"
|
||||
use_access_ip = "${var.use_access_ip}"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_compute_volume_attach_v2" "glusterfs_volume" {
|
||||
count = "${var.gfs_root_volume_size_in_gb == 0 ? var.number_of_gfs_nodes_no_floating_ip : 0}"
|
||||
instance_id = "${element(openstack_compute_instance_v2.glusterfs_node_no_floating_ip.*.id, count.index)}"
|
||||
volume_id = "${element(openstack_blockstorage_volume_v2.glusterfs_volume.*.id, count.index)}"
|
||||
}
|
||||
|
||||
resource "openstack_compute_volume_attach_v2" "glusterfs_volume_custom_root_volume_size" {
|
||||
count = "${var.gfs_root_volume_size_in_gb > 0 ? var.number_of_gfs_nodes_no_floating_ip : 0}"
|
||||
instance_id = "${element(openstack_compute_instance_v2.glusterfs_node_no_floating_ip_custom_volume_size.*.id, count.index)}"
|
||||
volume_id = "${element(openstack_blockstorage_volume_v2.glusterfs_volume_custom_volume_size.*.id, count.index)}"
|
||||
}
|
||||
|
@ -1,7 +1,11 @@
|
||||
variable "cluster_name" {}
|
||||
|
||||
variable "az_list" {
|
||||
type = "list"
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "az_list_node" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "number_of_k8s_masters" {}
|
||||
@ -72,6 +76,10 @@ variable "k8s_node_fips" {
|
||||
type = "list"
|
||||
}
|
||||
|
||||
variable "k8s_nodes_fips" {
|
||||
type = "map"
|
||||
}
|
||||
|
||||
variable "bastion_fips" {
|
||||
type = "list"
|
||||
}
|
||||
@ -92,6 +100,8 @@ variable "k8s_allowed_egress_ips" {
|
||||
type = "list"
|
||||
}
|
||||
|
||||
variable "k8s_nodes" {}
|
||||
|
||||
variable "wait_for_floatingip" {}
|
||||
|
||||
variable "supplementary_master_groups" {
|
||||
@ -110,4 +120,4 @@ variable "use_access_ip" {}
|
||||
|
||||
variable "use_server_groups" {
|
||||
type = bool
|
||||
}
|
||||
}
|
||||
|
@ -27,3 +27,10 @@ resource "openstack_networking_floatingip_v2" "bastion" {
|
||||
pool = "${var.floatingip_pool}"
|
||||
depends_on = ["null_resource.dummy_dependency"]
|
||||
}
|
||||
|
||||
resource "openstack_networking_floatingip_v2" "k8s_nodes" {
|
||||
for_each = var.number_of_k8s_nodes == 0 ? { for key, value in var.k8s_nodes : key => value if value.floating_ip } : {}
|
||||
pool = "${var.floatingip_pool}"
|
||||
depends_on = ["null_resource.dummy_dependency"]
|
||||
}
|
||||
|
||||
|
@ -10,6 +10,10 @@ output "k8s_node_fips" {
|
||||
value = "${openstack_networking_floatingip_v2.k8s_node[*].address}"
|
||||
}
|
||||
|
||||
output "k8s_nodes_fips" {
|
||||
value = "${openstack_networking_floatingip_v2.k8s_nodes}"
|
||||
}
|
||||
|
||||
output "bastion_fips" {
|
||||
value = "${openstack_networking_floatingip_v2.bastion[*].address}"
|
||||
}
|
||||
|
@ -14,4 +14,6 @@ variable "network_name" {}
|
||||
|
||||
variable "router_id" {
|
||||
default = ""
|
||||
}
|
||||
}
|
||||
|
||||
variable "k8s_nodes" {}
|
||||
|
@ -1,10 +1,15 @@
|
||||
resource "openstack_networking_router_v2" "k8s" {
|
||||
name = "${var.cluster_name}-router"
|
||||
count = "${var.use_neutron}"
|
||||
count = "${var.use_neutron}" == 1 && "${var.router_id}" == null ? 1 : 0
|
||||
admin_state_up = "true"
|
||||
external_network_id = "${var.external_net}"
|
||||
}
|
||||
|
||||
data "openstack_networking_router_v2" "k8s" {
|
||||
router_id = "${var.router_id}"
|
||||
count = "${var.use_neutron}" == 1 && "${var.router_id}" != null ? 1 : 0
|
||||
}
|
||||
|
||||
resource "openstack_networking_network_v2" "k8s" {
|
||||
name = "${var.network_name}"
|
||||
count = "${var.use_neutron}"
|
||||
@ -23,6 +28,6 @@ resource "openstack_networking_subnet_v2" "k8s" {
|
||||
|
||||
resource "openstack_networking_router_interface_v2" "k8s" {
|
||||
count = "${var.use_neutron}"
|
||||
router_id = "${openstack_networking_router_v2.k8s[count.index].id}"
|
||||
router_id = "%{if openstack_networking_router_v2.k8s != []}${openstack_networking_router_v2.k8s[count.index].id}%{else}${var.router_id}%{endif}"
|
||||
subnet_id = "${openstack_networking_subnet_v2.k8s[count.index].id}"
|
||||
}
|
||||
|
@ -1,11 +1,11 @@
|
||||
output "router_id" {
|
||||
value = "${element(concat(openstack_networking_router_v2.k8s.*.id, list("")), 0)}"
|
||||
value = "%{if var.use_neutron == 1} ${var.router_id == null ? element(concat(openstack_networking_router_v2.k8s.*.id, [""]), 0) : var.router_id} %{else} %{endif}"
|
||||
}
|
||||
|
||||
output "router_internal_port_id" {
|
||||
value = "${element(concat(openstack_networking_router_interface_v2.k8s.*.id, list("")), 0)}"
|
||||
value = "${element(concat(openstack_networking_router_interface_v2.k8s.*.id, [""]), 0)}"
|
||||
}
|
||||
|
||||
output "subnet_id" {
|
||||
value = "${element(concat(openstack_networking_subnet_v2.k8s.*.id, list("")), 0)}"
|
||||
value = "${element(concat(openstack_networking_subnet_v2.k8s.*.id, [""]), 0)}"
|
||||
}
|
||||
|
@ -13,3 +13,5 @@ variable "dns_nameservers" {
|
||||
variable "subnet_cidr" {}
|
||||
|
||||
variable "use_neutron" {}
|
||||
|
||||
variable "router_id" {}
|
||||
|
@ -3,8 +3,14 @@ variable "cluster_name" {
|
||||
}
|
||||
|
||||
variable "az_list" {
|
||||
description = "List of Availability Zones available in your OpenStack cluster"
|
||||
type = "list"
|
||||
description = "List of Availability Zones to use for masters in your OpenStack cluster"
|
||||
type = list(string)
|
||||
default = ["nova"]
|
||||
}
|
||||
|
||||
variable "az_list_node" {
|
||||
description = "List of Availability Zones to use for nodes in your OpenStack cluster"
|
||||
type = list(string)
|
||||
default = ["nova"]
|
||||
}
|
||||
|
||||
@ -213,4 +219,14 @@ variable "use_access_ip" {
|
||||
|
||||
variable "use_server_groups" {
|
||||
default = false
|
||||
}
|
||||
}
|
||||
|
||||
variable "router_id" {
|
||||
description = "uuid of an externally defined router to use"
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "k8s_nodes" {
|
||||
default = {}
|
||||
}
|
||||
|
||||
|
@ -357,7 +357,7 @@ def iter_host_ips(hosts, ips):
|
||||
'ansible_ssh_host': ip,
|
||||
})
|
||||
|
||||
if 'use_access_ip' in host[1]['metadata'] and ihost[1]['metadata']['use_access_ip'] == "0":
|
||||
if 'use_access_ip' in host[1]['metadata'] and host[1]['metadata']['use_access_ip'] == "0":
|
||||
host[1].pop('access_ip')
|
||||
|
||||
yield host
|
||||
|
@ -13,7 +13,7 @@
|
||||
/usr/local/share/ca-certificates/vault-ca.crt
|
||||
{%- elif ansible_os_family == "RedHat" -%}
|
||||
/etc/pki/ca-trust/source/anchors/vault-ca.crt
|
||||
{%- elif ansible_os_family in ["Coreos", "Container Linux by CoreOS"] -%}
|
||||
{%- elif ansible_os_family in ["Coreos", "Container Linux by CoreOS", "Flatcar", "Flatcar Container Linux by Kinvolk"] -%}
|
||||
/etc/ssl/certs/vault-ca.pem
|
||||
{%- endif %}
|
||||
|
||||
@ -25,7 +25,7 @@
|
||||
|
||||
- name: bootstrap/ca_trust | update ca-certificates (Debian/Ubuntu/CoreOS)
|
||||
command: update-ca-certificates
|
||||
when: vault_ca_cert.changed and ansible_os_family in ["Debian", "CoreOS", "Coreos", "Container Linux by CoreOS"]
|
||||
when: vault_ca_cert.changed and ansible_os_family in ["Debian", "CoreOS", "Coreos", "Container Linux by CoreOS", "Flatcar", "Flatcar Container Linux by Kinvolk"]
|
||||
|
||||
- name: bootstrap/ca_trust | update ca-certificates (RedHat)
|
||||
command: update-ca-trust extract
|
||||
|
@ -36,6 +36,7 @@
|
||||
{{ etcd_access_addresses.split(',') | first }}/v3alpha/kv/range
|
||||
register: vault_etcd_exists
|
||||
retries: 4
|
||||
until: vault_etcd_exists.status == 200
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
run_once: true
|
||||
when: not vault_is_running and vault_etcd_available
|
||||
|
@ -7,6 +7,7 @@
|
||||
* [Integration](docs/integration.md)
|
||||
* [Upgrades](/docs/upgrades.md)
|
||||
* [HA Mode](docs/ha-mode.md)
|
||||
* [Adding/replacing a node](docs/nodes.md)
|
||||
* [Large deployments](docs/large-deployments.md)
|
||||
* CNI
|
||||
* [Calico](docs/calico.md)
|
||||
@ -22,9 +23,9 @@
|
||||
* [Packet](/docs/packet.md)
|
||||
* [vSphere](/docs/vsphere.md)
|
||||
* Operating Systems
|
||||
* [Atomic](docs/atomic.md)
|
||||
* [Debian](docs/debian.md)
|
||||
* [Coreos](docs/coreos.md)
|
||||
* [Fedora CoreOS](docs/fcos.md)
|
||||
* [OpenSUSE](docs/opensuse.md)
|
||||
* Advanced
|
||||
* [Proxy](/docs/proxy.md)
|
||||
@ -36,4 +37,5 @@
|
||||
* Developers
|
||||
* [Test cases](docs/test_cases.md)
|
||||
* [Vagrant](docs/vagrant.md)
|
||||
* [CI Matrix](docs/ci.md)
|
||||
* [Roadmap](docs/roadmap.md)
|
||||
|
@ -68,7 +68,7 @@ Optional variables are located in the `inventory/sample/group_vars/all.yml`.
|
||||
Mandatory variables that are common for at least one role (or a node group) can be found in the
|
||||
`inventory/sample/group_vars/k8s-cluster.yml`.
|
||||
There are also role vars for docker, kubernetes preinstall and master roles.
|
||||
According to the [ansible docs](http://docs.ansible.com/ansible/playbooks_variables.html#variable-precedence-where-should-i-put-a-variable),
|
||||
According to the [ansible docs](https://docs.ansible.com/ansible/playbooks_variables.html#variable-precedence-where-should-i-put-a-variable),
|
||||
those cannot be overridden from the group vars. In order to override, one should use
|
||||
the `-e` runtime flags (most simple way) or other layers described in the docs.
|
||||
|
||||
@ -137,6 +137,7 @@ The following tags are defined in playbooks:
|
||||
| upgrade | Upgrading, f.e. container images/binaries
|
||||
| upload | Distributing images/binaries across hosts
|
||||
| weave | Network plugin Weave
|
||||
| ingress_alb | AWS ALB Ingress Controller
|
||||
|
||||
Note: Use the ``bash scripts/gen_tags.sh`` command to generate a list of all
|
||||
tags found in the codebase. New tags will be listed with the empty "Used for"
|
||||
@ -181,4 +182,8 @@ bastion ansible_host=x.x.x.x
|
||||
```
|
||||
|
||||
For more information about Ansible and bastion hosts, read
|
||||
[Running Ansible Through an SSH Bastion Host](http://blog.scottlowe.org/2015/12/24/running-ansible-through-ssh-bastion-host/)
|
||||
[Running Ansible Through an SSH Bastion Host](https://blog.scottlowe.org/2015/12/24/running-ansible-through-ssh-bastion-host/)
|
||||
|
||||
## Mitogen
|
||||
|
||||
You can use [mitogen](mitogen.md) to speed up kubespray.
|
||||
|
@ -1,22 +0,0 @@
|
||||
# Atomic host bootstrap
|
||||
|
||||
Atomic host testing has been done with the network plugin flannel. Change the inventory var `kube_network_plugin: flannel`.
|
||||
|
||||
Note: Flannel is the only plugin that has currently been tested with atomic
|
||||
|
||||
## Vagrant
|
||||
|
||||
* For bootstrapping with Vagrant, use box centos/atomic-host or fedora/atomic-host
|
||||
* Update VagrantFile variable `local_release_dir` to `/var/vagrant/temp`.
|
||||
* Update `vm_memory = 2048` and `vm_cpus = 2`
|
||||
* Networking on vagrant hosts has to be brought up manually once they are booted.
|
||||
|
||||
```ShellSession
|
||||
vagrant ssh
|
||||
sudo /sbin/ifup enp0s8
|
||||
```
|
||||
|
||||
* For users of vagrant-libvirt download centos/atomic-host qcow2 format from <https://wiki.centos.org/SpecialInterestGroup/Atomic/Download/>
|
||||
* For users of vagrant-libvirt download fedora/atomic-host qcow2 format from <https://dl.fedoraproject.org/pub/alt/atomic/stable/>
|
||||
|
||||
Then you can proceed to [cluster deployment](#run-deployment)
|
87
docs/aws-ebs-csi.md
Normal file
87
docs/aws-ebs-csi.md
Normal file
@ -0,0 +1,87 @@
|
||||
# AWS EBS CSI Driver
|
||||
|
||||
AWS EBS CSI driver allows you to provision EBS volumes for pods in EC2 instances. The old in-tree AWS cloud provider is deprecated and will be removed in future versions of Kubernetes. So transitioning to the CSI driver is advised.
|
||||
|
||||
To enable AWS EBS CSI driver, uncomment the `aws_ebs_csi_enabled` option in `group_vars/all/aws.yml` and set it to `true`.
|
||||
|
||||
To set the number of replicas for the AWS CSI controller, you can change `aws_ebs_csi_controller_replicas` option in `group_vars/all/aws.yml`.
|
||||
|
||||
Make sure to add a role, for your EC2 instances hosting Kubernetes, that allows it to do the actions necessary to request a volume and attach it: [AWS CSI Policy](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/blob/master/docs/example-iam-policy.json)
|
||||
|
||||
If you want to deploy the AWS EBS storage class used with the CSI Driver, you should set `persistent_volumes_enabled` in `group_vars/k8s-cluster/k8s-cluster.yml` to `true`.
|
||||
|
||||
You can now run the kubespray playbook (cluster.yml) to deploy Kubernetes over AWS EC2 with EBS CSI Driver enabled.
|
||||
|
||||
## Usage example
|
||||
|
||||
To check if AWS EBS CSI Driver is deployed properly, check that the ebs-csi pods are running:
|
||||
|
||||
```ShellSession
|
||||
$ kubectl -n kube-system get pods | grep ebs
|
||||
ebs-csi-controller-85d86bccc5-8gtq5 4/4 Running 4 40s
|
||||
ebs-csi-node-n4b99 3/3 Running 3 40s
|
||||
```
|
||||
|
||||
Check the associated storage class (if you enabled persistent_volumes):
|
||||
|
||||
```ShellSession
|
||||
$ kubectl get storageclass
|
||||
NAME PROVISIONER AGE
|
||||
ebs-sc ebs.csi.aws.com 45s
|
||||
```
|
||||
|
||||
You can run a PVC and an example Pod using this file `ebs-pod.yml`:
|
||||
|
||||
```yml
|
||||
--
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: ebs-claim
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
storageClassName: ebs-sc
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: app
|
||||
spec:
|
||||
containers:
|
||||
- name: app
|
||||
image: centos
|
||||
command: ["/bin/sh"]
|
||||
args: ["-c", "while true; do echo $(date -u) >> /data/out.txt; sleep 5; done"]
|
||||
volumeMounts:
|
||||
- name: persistent-storage
|
||||
mountPath: /data
|
||||
volumes:
|
||||
- name: persistent-storage
|
||||
persistentVolumeClaim:
|
||||
claimName: ebs-claim
|
||||
```
|
||||
|
||||
Apply this conf to your cluster: ```kubectl apply -f ebs-pod.yml```
|
||||
|
||||
You should see the PVC provisioned and bound:
|
||||
|
||||
```ShellSession
|
||||
$ kubectl get pvc
|
||||
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
|
||||
ebs-claim Bound pvc-0034cb9e-1ddd-4b3f-bb9e-0b5edbf5194c 1Gi RWO ebs-sc 50s
|
||||
```
|
||||
|
||||
And the volume mounted to the example Pod (wait until the Pod is Running):
|
||||
|
||||
```ShellSession
|
||||
$ kubectl exec -it app -- df -h | grep data
|
||||
/dev/nvme1n1 1014M 34M 981M 4% /data
|
||||
```
|
||||
|
||||
## More info
|
||||
|
||||
For further information about the AWS EBS CSI Driver, you can refer to this page: [AWS EBS Driver](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/).
|
119
docs/azure-csi.md
Normal file
119
docs/azure-csi.md
Normal file
@ -0,0 +1,119 @@
|
||||
# Azure Disk CSI Driver
|
||||
|
||||
The Azure Disk CSI driver allows you to provision volumes for pods with a Kubernetes deployment over Azure Cloud. The CSI driver replaces to volume provioning done by the in-tree azure cloud provider which is deprecated.
|
||||
|
||||
This documentation is an updated version of the in-tree Azure cloud provider documentation (azure.md).
|
||||
|
||||
To deploy Azure Disk CSI driver, uncomment the `azure_csi_enabled` option in `group_vars/all/azure.yml` and set it to `true`.
|
||||
|
||||
## Azure Disk CSI Storage Class
|
||||
|
||||
If you want to deploy the Azure Disk storage class to provision volumes dynamically, you should set `persistent_volumes_enabled` in `group_vars/k8s-cluster/k8s-cluster.yml` to `true`.
|
||||
|
||||
## Parameters
|
||||
|
||||
Before creating the instances you must first set the `azure_csi_` variables in the `group_vars/all.yml` file.
|
||||
|
||||
All of the values can be retrieved using the azure cli tool which can be downloaded here: <https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest>
|
||||
|
||||
After installation you have to run `az login` to get access to your account.
|
||||
|
||||
### azure\_csi\_tenant\_id + azure\_csi\_subscription\_id
|
||||
|
||||
Run `az account show` to retrieve your subscription id and tenant id:
|
||||
`azure_csi_tenant_id` -> tenantId field
|
||||
`azure_csi_subscription_id` -> id field
|
||||
|
||||
### azure\_csi\_location
|
||||
|
||||
The region your instances are located in, it can be something like `francecentral` or `norwayeast`. A full list of region names can be retrieved via `az account list-locations`
|
||||
|
||||
### azure\_csi\_resource\_group
|
||||
|
||||
The name of the resource group your instances are in, a list of your resource groups can be retrieved via `az group list`
|
||||
|
||||
Or you can do `az vm list | grep resourceGroup` and get the resource group corresponding to the VMs of your cluster.
|
||||
|
||||
The resource group name is not case sensitive.
|
||||
|
||||
### azure\_csi\_vnet\_name
|
||||
|
||||
The name of the virtual network your instances are in, can be retrieved via `az network vnet list`
|
||||
|
||||
### azure\_csi\_vnet\_resource\_group
|
||||
|
||||
The name of the resource group your vnet is in, can be retrieved via `az network vnet list | grep resourceGroup` and get the resource group corresponding to the vnet of your cluster.
|
||||
|
||||
### azure\_csi\_subnet\_name
|
||||
|
||||
The name of the subnet your instances are in, can be retrieved via `az network vnet subnet list --resource-group RESOURCE_GROUP --vnet-name VNET_NAME`
|
||||
|
||||
### azure\_csi\_security\_group\_name
|
||||
|
||||
The name of the network security group your instances are in, can be retrieved via `az network nsg list`
|
||||
|
||||
### azure\_csi\_aad\_client\_id + azure\_csi\_aad\_client\_secret
|
||||
|
||||
These will have to be generated first:
|
||||
|
||||
- Create an Azure AD Application with:
|
||||
`az ad app create --display-name kubespray --identifier-uris http://kubespray --homepage http://kubespray.com --password CLIENT_SECRET`
|
||||
|
||||
Display name, identifier-uri, homepage and the password can be chosen
|
||||
|
||||
Note the AppId in the output.
|
||||
|
||||
- Create Service principal for the application with:
|
||||
`az ad sp create --id AppId`
|
||||
|
||||
This is the AppId from the last command
|
||||
|
||||
- Create the role assignment with:
|
||||
`az role assignment create --role "Owner" --assignee http://kubespray --subscription SUBSCRIPTION_ID`
|
||||
|
||||
azure\_csi\_aad\_client\_id must be set to the AppId, azure\_csi\_aad\_client\_secret is your chosen secret.
|
||||
|
||||
### azure\_csi\_use\_instance\_metadata
|
||||
|
||||
Use instance metadata service where possible. Boolean value.
|
||||
|
||||
## Test the Azure Disk CSI driver
|
||||
|
||||
To test the dynamic provisioning using Azure CSI driver, make sure to have the storage class deployed (through persistent volumes), and apply the following manifest:
|
||||
|
||||
```yml
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: pvc-azuredisk
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
storageClassName: disk.csi.azure.com
|
||||
---
|
||||
kind: Pod
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: nginx-azuredisk
|
||||
spec:
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
containers:
|
||||
- image: nginx
|
||||
name: nginx-azuredisk
|
||||
command:
|
||||
- "/bin/sh"
|
||||
- "-c"
|
||||
- while true; do echo $(date) >> /mnt/azuredisk/outfile; sleep 1; done
|
||||
volumeMounts:
|
||||
- name: azuredisk
|
||||
mountPath: "/mnt/azuredisk"
|
||||
volumes:
|
||||
- name: azuredisk
|
||||
persistentVolumeClaim:
|
||||
claimName: pvc-azuredisk
|
||||
```
|
@ -1,57 +1,61 @@
|
||||
# Azure
|
||||
|
||||
To deploy Kubernetes on [Azure](https://azure.microsoft.com) uncomment the `cloud_provider` option in `group_vars/all.yml` and set it to `'azure'`.
|
||||
To deploy Kubernetes on [Azure](https://azure.microsoft.com) uncomment the `cloud_provider` option in `group_vars/all/all.yml` and set it to `'azure'`.
|
||||
|
||||
All your instances are required to run in a resource group and a routing table has to be attached to the subnet your instances are in.
|
||||
|
||||
Not all features are supported yet though, for a list of the current status have a look [here](https://github.com/colemickens/azure-kubernetes-status)
|
||||
Not all features are supported yet though, for a list of the current status have a look [here](https://github.com/Azure/AKS)
|
||||
|
||||
## Parameters
|
||||
|
||||
Before creating the instances you must first set the `azure_` variables in the `group_vars/all.yml` file.
|
||||
Before creating the instances you must first set the `azure_` variables in the `group_vars/all/all.yml` file.
|
||||
|
||||
All of the values can be retrieved using the azure cli tool which can be downloaded here: <https://docs.microsoft.com/en-gb/azure/xplat-cli-install>
|
||||
After installation you have to run `azure login` to get access to your account.
|
||||
After installation you have to run `az login` to get access to your account.
|
||||
|
||||
### azure\_tenant\_id + azure\_subscription\_id
|
||||
|
||||
run `azure account show` to retrieve your subscription id and tenant id:
|
||||
run `az account show` to retrieve your subscription id and tenant id:
|
||||
`azure_tenant_id` -> Tenant ID field
|
||||
`azure_subscription_id` -> ID field
|
||||
|
||||
### azure\_location
|
||||
|
||||
The region your instances are located, can be something like `westeurope` or `westcentralus`. A full list of region names can be retrieved via `azure location list`
|
||||
The region your instances are located, can be something like `westeurope` or `westcentralus`. A full list of region names can be retrieved via `az account list-locations`
|
||||
|
||||
### azure\_resource\_group
|
||||
|
||||
The name of the resource group your instances are in, can be retrieved via `azure group list`
|
||||
The name of the resource group your instances are in, can be retrieved via `az group list`
|
||||
|
||||
### azure\_vmtype
|
||||
|
||||
The type of the vm. Supported values are `standard` or `vmss`. If vm is type of `Virtal Machines` then value is `standard`. If vm is part of `Virtaul Machine Scale Sets` then value is `vmss`
|
||||
|
||||
### azure\_vnet\_name
|
||||
|
||||
The name of the virtual network your instances are in, can be retrieved via `azure network vnet list`
|
||||
The name of the virtual network your instances are in, can be retrieved via `az network vnet list`
|
||||
|
||||
### azure\_subnet\_name
|
||||
|
||||
The name of the subnet your instances are in, can be retrieved via `azure network vnet subnet list --resource-group RESOURCE_GROUP --vnet-name VNET_NAME`
|
||||
The name of the subnet your instances are in, can be retrieved via `az network vnet subnet list --resource-group RESOURCE_GROUP --vnet-name VNET_NAME`
|
||||
|
||||
### azure\_security\_group\_name
|
||||
|
||||
The name of the network security group your instances are in, can be retrieved via `azure network nsg list`
|
||||
The name of the network security group your instances are in, can be retrieved via `az network nsg list`
|
||||
|
||||
### azure\_aad\_client\_id + azure\_aad\_client\_secret
|
||||
|
||||
These will have to be generated first:
|
||||
|
||||
- Create an Azure AD Application with:
|
||||
`azure ad app create --display-name kubernetes --identifier-uris http://kubernetes --homepage http://example.com --password CLIENT_SECRET`
|
||||
`az ad app create --display-name kubernetes --identifier-uris http://kubernetes --homepage http://example.com --password CLIENT_SECRET`
|
||||
display name, identifier-uri, homepage and the password can be chosen
|
||||
Note the AppId in the output.
|
||||
- Create Service principal for the application with:
|
||||
`azure ad sp create --id AppId`
|
||||
`az ad sp create --id AppId`
|
||||
This is the AppId from the last command
|
||||
- Create the role assignment with:
|
||||
`azure role assignment create --role "Owner" --assignee http://kubernetes --subscription SUBSCRIPTION_ID`
|
||||
`az role assignment create --role "Owner" --assignee http://kubernetes --subscription SUBSCRIPTION_ID`
|
||||
|
||||
azure\_aad\_client\_id must be set to the AppId, azure\_aad\_client\_secret is your chosen secret.
|
||||
|
||||
|
@ -12,55 +12,55 @@ Check if the calico-node container is running
|
||||
docker ps | grep calico
|
||||
```
|
||||
|
||||
The **calicoctl** command allows to check the status of the network workloads.
|
||||
The **calicoctl.sh** is wrap script with configured acces credentials for command calicoctl allows to check the status of the network workloads.
|
||||
|
||||
* Check the status of Calico nodes
|
||||
|
||||
```ShellSession
|
||||
calicoctl node status
|
||||
calicoctl.sh node status
|
||||
```
|
||||
|
||||
or for versions prior to *v1.0.0*:
|
||||
|
||||
```ShellSession
|
||||
calicoctl status
|
||||
calicoctl.sh status
|
||||
```
|
||||
|
||||
* Show the configured network subnet for containers
|
||||
|
||||
```ShellSession
|
||||
calicoctl get ippool -o wide
|
||||
calicoctl.sh get ippool -o wide
|
||||
```
|
||||
|
||||
or for versions prior to *v1.0.0*:
|
||||
|
||||
```ShellSession
|
||||
calicoctl pool show
|
||||
calicoctl.sh pool show
|
||||
```
|
||||
|
||||
* Show the workloads (ip addresses of containers and their located)
|
||||
|
||||
```ShellSession
|
||||
calicoctl get workloadEndpoint -o wide
|
||||
calicoctl.sh get workloadEndpoint -o wide
|
||||
```
|
||||
|
||||
and
|
||||
|
||||
```ShellSession
|
||||
calicoctl get hostEndpoint -o wide
|
||||
calicoctl.sh get hostEndpoint -o wide
|
||||
```
|
||||
|
||||
or for versions prior *v1.0.0*:
|
||||
|
||||
```ShellSession
|
||||
calicoctl endpoint show --detail
|
||||
calicoctl.sh endpoint show --detail
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
### Optional : Define network backend
|
||||
|
||||
In some cases you may want to define Calico network backend. Allowed values are 'bird', 'gobgp' or 'none'. Bird is a default value.
|
||||
In some cases you may want to define Calico network backend. Allowed values are `bird`, `vxlan` or `none`. Bird is a default value.
|
||||
|
||||
To re-define you need to edit the inventory and add a group variable `calico_network_backend`
|
||||
|
||||
@ -199,9 +199,29 @@ To re-define health host please set the following variable in your inventory:
|
||||
calico_healthhost: "0.0.0.0"
|
||||
```
|
||||
|
||||
## Config encapsulation for cross server traffic
|
||||
|
||||
Calico supports two types of encapsulation: [VXLAN and IP in IP](https://docs.projectcalico.org/v3.11/networking/vxlan-ipip). VXLAN is supported in some environments where IP in IP is not (for example, Azure).
|
||||
|
||||
*IP in IP* and *VXLAN* is mutualy exclusive modes.
|
||||
|
||||
Configure Ip in Ip mode. Possible values is `Always`, `CrossSubnet`, `Never`.
|
||||
|
||||
```yml
|
||||
calico_ipip_mode: 'Always'
|
||||
```
|
||||
|
||||
Configure VXLAN mode. Possible values is `Always`, `CrossSubnet`, `Never`.
|
||||
|
||||
```yml
|
||||
calico_vxlan_mode: 'Never'
|
||||
```
|
||||
|
||||
If you use VXLAN mode, BGP networking is not required. You can disable BGP to reduce the moving parts in your cluster by `calico_network_backend: vxlan`
|
||||
|
||||
## Cloud providers configuration
|
||||
|
||||
Please refer to the official documentation, for example [GCE configuration](http://docs.projectcalico.org/v1.5/getting-started/docker/installation/gce) requires a security rule for calico ip-ip tunnels. Note, calico is always configured with ``ipip: true`` if the cloud provider was defined.
|
||||
Please refer to the official documentation, for example [GCE configuration](http://docs.projectcalico.org/v1.5/getting-started/docker/installation/gce) requires a security rule for calico ip-ip tunnels. Note, calico is always configured with ``calico_ipip_mode: Always`` if the cloud provider was defined.
|
||||
|
||||
### Optional : Ignore kernel's RPF check setting
|
||||
|
||||
|
9
docs/centos8.md
Normal file
9
docs/centos8.md
Normal file
@ -0,0 +1,9 @@
|
||||
# RHEL / CentOS 8
|
||||
|
||||
RHEL / CentOS 8 ships only with iptables-nft (ie without iptables-legacy)
|
||||
The only tested configuration for now is using Calico CNI
|
||||
You need to use K8S 1.17+ and to add `calico_iptables_backend: "NFT"` to your configuration
|
||||
|
||||
If you have containers that are using iptables in the host network namespace (`hostNetwork=true`),
|
||||
you need to ensure they are using iptables-nft.
|
||||
An exemple how k8s do the autodetection can be found [in this PR](https://github.com/kubernetes/kubernetes/pull/82966)
|
54
docs/ci.md
Normal file
54
docs/ci.md
Normal file
@ -0,0 +1,54 @@
|
||||
# CI test coverage
|
||||
|
||||
To generate this Matrix run `./tests/scripts/md-table/main.py`
|
||||
|
||||
## docker
|
||||
|
||||
| OS / CNI | calico | canal | cilium | contiv | flannel | kube-ovn | kube-router | macvlan | weave |
|
||||
|---| --- | --- | --- | --- | --- | --- | --- | --- | --- |
|
||||
amazon | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
centos7 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :white_check_mark: |
|
||||
centos8 | :white_check_mark: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: |
|
||||
debian10 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian9 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: |
|
||||
fedora30 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: |
|
||||
fedora31 | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: |
|
||||
opensuse | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
oracle7 | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu16 | :x: | :white_check_mark: | :x: | :white_check_mark: | :white_check_mark: | :x: | :white_check_mark: | :x: | :white_check_mark: |
|
||||
ubuntu18 | :white_check_mark: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu20 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
|
||||
## crio
|
||||
|
||||
| OS / CNI | calico | canal | cilium | contiv | flannel | kube-ovn | kube-router | macvlan | weave |
|
||||
|---| --- | --- | --- | --- | --- | --- | --- | --- | --- |
|
||||
amazon | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
centos7 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
centos8 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian10 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora30 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora31 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
opensuse | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
oracle7 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu16 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu18 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu20 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
|
||||
## containerd
|
||||
|
||||
| OS / CNI | calico | canal | cilium | contiv | flannel | kube-ovn | kube-router | macvlan | weave |
|
||||
|---| --- | --- | --- | --- | --- | --- | --- | --- | --- |
|
||||
amazon | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
centos7 | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: |
|
||||
centos8 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian10 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora30 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora31 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
opensuse | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
oracle7 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu16 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
ubuntu18 | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: |
|
||||
ubuntu20 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
@ -1,4 +1,4 @@
|
||||
# Comparaison
|
||||
# Comparison
|
||||
|
||||
## Kubespray vs [Kops](https://github.com/kubernetes/kops)
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Contiv
|
||||
|
||||
Here is the [Contiv documentation](http://contiv.github.io/documents/).
|
||||
Here is the [Contiv documentation](https://contiv.github.io/documents/).
|
||||
|
||||
## Administrate Contiv
|
||||
|
||||
|
@ -19,9 +19,14 @@ skip_downloads: false
|
||||
## k8s-cluster.yml
|
||||
|
||||
```yaml
|
||||
etcd_deployment_type: host
|
||||
kubelet_deployment_type: host
|
||||
container_manager: crio
|
||||
```
|
||||
|
||||
## etcd.yml
|
||||
|
||||
```yaml
|
||||
etcd_deployment_type: host
|
||||
```
|
||||
|
||||
[CRI-O]: https://cri-o.io/
|
||||
|
@ -1,7 +1,7 @@
|
||||
# K8s DNS stack by Kubespray
|
||||
|
||||
For K8s cluster nodes, Kubespray configures a [Kubernetes DNS](http://kubernetes.io/docs/admin/dns/)
|
||||
[cluster add-on](http://releases.k8s.io/master/cluster/addons/README.md)
|
||||
For K8s cluster nodes, Kubespray configures a [Kubernetes DNS](https://kubernetes.io/docs/admin/dns/)
|
||||
[cluster add-on](https://releases.k8s.io/master/cluster/addons/README.md)
|
||||
to serve as an authoritative DNS server for a given ``dns_domain`` and its
|
||||
``svc, default.svc`` default subdomains (a total of ``ndots: 5`` max levels).
|
||||
|
||||
@ -42,6 +42,36 @@ DNS servers in early cluster deployment when no cluster DNS is available yet.
|
||||
|
||||
## DNS modes supported by Kubespray
|
||||
|
||||
### coredns_external_zones
|
||||
|
||||
Array of optional external zones to coredns forward queries to. It's injected into
|
||||
`coredns`' config file before default kubernetes zone. Use it as an optimization for well-known zones and/or internal-only
|
||||
domains, i.e. VPN for internal networks (default is unset)
|
||||
|
||||
Example:
|
||||
|
||||
```yaml
|
||||
coredns_external_zones:
|
||||
- zones:
|
||||
- example.com
|
||||
- example.io:1053
|
||||
nameservers:
|
||||
- 1.1.1.1
|
||||
- 2.2.2.2
|
||||
cache: 5
|
||||
- zones:
|
||||
- https://mycompany.local:4453
|
||||
nameservers:
|
||||
- 192.168.0.53
|
||||
cache: 0
|
||||
```
|
||||
|
||||
or as INI
|
||||
|
||||
```ini
|
||||
coredns_external_zones=[{"cache": 30,"zones":["example.com","example.io:453"],"nameservers":["1.1.1.1","2.2.2.2"]}]'
|
||||
```
|
||||
|
||||
You can modify how Kubespray sets up DNS for your cluster with the variables ``dns_mode`` and ``resolvconf_mode``.
|
||||
|
||||
### dns_mode
|
||||
@ -133,6 +163,25 @@ More information on the rationale behind this implementation can be found [here]
|
||||
|
||||
**As per the 2.10 release, Nodelocal DNS cache is enabled by default.**
|
||||
|
||||
### External zones
|
||||
|
||||
It's possible to extent the `nodelocaldns`' configuration by adding an array of external zones. For example:
|
||||
|
||||
```yaml
|
||||
nodelocaldns_external_zones:
|
||||
- zones:
|
||||
- example.com
|
||||
- example.io:1053
|
||||
nameservers:
|
||||
- 1.1.1.1
|
||||
- 2.2.2.2
|
||||
cache: 5
|
||||
- zones:
|
||||
- https://mycompany.local:4453
|
||||
nameservers:
|
||||
- 192.168.0.53
|
||||
```
|
||||
|
||||
## Limitations
|
||||
|
||||
* Kubespray has yet ways to configure Kubedns addon to forward requests SkyDns can
|
||||
|
@ -13,11 +13,13 @@ There is also a "pull once, push many" mode as well:
|
||||
|
||||
NOTE: When `download_run_once` is true and `download_localhost` is false, all downloads will be done on the delegate node, including downloads for container images that are not required on that node. As a consequence, the storage required on that node will probably be more than if download_run_once was false, because all images will be loaded into the docker instance on that node, instead of just the images required for that node.
|
||||
|
||||
:warning: [`download_run_once: true` support only for `container_manager: docker`](https://github.com/containerd/containerd/issues/4075) :warning:
|
||||
|
||||
On caching:
|
||||
|
||||
* When `download_run_once` is `True`, all downloaded files will be cached locally in `download_cache_dir`, which defaults to `/tmp/kubespray_cache`. On subsequent provisioning runs, this local cache will be used to provision the nodes, minimizing bandwidth usage and improving provisioning time. Expect about 800MB of disk space to be used on the ansible node for the cache. Disk space required for the image cache on the kubernetes nodes is a much as is needed for the largest image, which is currently slightly less than 150MB.
|
||||
* By default, if `download_run_once` is false, kubespray will not retrieve the downloaded images and files from the remote node to the local cache, or use that cache to pre-provision those nodes. To force the use of the cache, set `download_force_cache` to `True`.
|
||||
* By default, cached images that are used to pre-provision the remote nodes will be deleted from the remote nodes after use, to save disk space. Setting download_keep_remote_cache will prevent the files from being deleted. This can be useful while developing kubespray, as it can decrease provisioning times. As a consequence, the required storage for images on the remote nodes will increase from 150MB to about 550MB, which is currently the combined size of all required container images.
|
||||
* By default, if `download_run_once` is false, kubespray will not retrieve the downloaded images and files from the download delegate node to the local cache, or use that cache to pre-provision those nodes. If you have a full cache with container images and files and you don’t need to download anything, but want to use a cache - set `download_force_cache` to `True`.
|
||||
* By default, cached images that are used to pre-provision the remote nodes will be deleted from the remote nodes after use, to save disk space. Setting `download_keep_remote_cache` will prevent the files from being deleted. This can be useful while developing kubespray, as it can decrease provisioning times. As a consequence, the required storage for images on the remote nodes will increase from 150MB to about 550MB, which is currently the combined size of all required container images.
|
||||
|
||||
Container images and binary files are described by the vars like ``foo_version``,
|
||||
``foo_download_url``, ``foo_checksum`` for binaries and ``foo_image_repo``,
|
||||
|
90
docs/fcos.md
Normal file
90
docs/fcos.md
Normal file
@ -0,0 +1,90 @@
|
||||
# Fedora CoreOS
|
||||
|
||||
Tested with stable version 31.20200223.3.0.
|
||||
|
||||
Because package installation with `rpm-ostree` requires a reboot, playbook may fail while bootstrap.
|
||||
Restart playbook again.
|
||||
|
||||
## Containers
|
||||
|
||||
Tested with
|
||||
|
||||
- docker
|
||||
- crio
|
||||
|
||||
### docker
|
||||
|
||||
OS base packages contains docker.
|
||||
|
||||
### cri-o
|
||||
|
||||
To use `cri-o` disable docker service with ignition:
|
||||
|
||||
```yaml
|
||||
#workaround, see https://github.com/coreos/fedora-coreos-tracker/issues/229
|
||||
systemd:
|
||||
units:
|
||||
- name: docker.service
|
||||
enabled: false
|
||||
contents: |
|
||||
[Unit]
|
||||
Description=disable docker
|
||||
|
||||
[Service]
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
```
|
||||
|
||||
## Network
|
||||
|
||||
### calico
|
||||
|
||||
To use calico create sysctl file with ignition:
|
||||
|
||||
```yaml
|
||||
files:
|
||||
- path: /etc/sysctl.d/reverse-path-filter.conf
|
||||
contents:
|
||||
inline: |
|
||||
net.ipv4.conf.all.rp_filter=1
|
||||
```
|
||||
|
||||
## libvirt setup
|
||||
|
||||
### Prepare
|
||||
|
||||
Prepare ignition and serve via http (a.e. python -m http.server )
|
||||
|
||||
```json
|
||||
{
|
||||
"ignition": {
|
||||
"version": "3.0.0"
|
||||
},
|
||||
|
||||
"passwd": {
|
||||
"users": [
|
||||
{
|
||||
"name": "ansibleUser",
|
||||
"sshAuthorizedKeys": [
|
||||
"ssh-rsa ..publickey.."
|
||||
],
|
||||
"groups": [ "wheel" ]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### create guest
|
||||
|
||||
```shell script
|
||||
fcos_version=31.20200223.3.0
|
||||
kernel=https://builds.coreos.fedoraproject.org/prod/streams/stable/builds/${fcos_version}/x86_64/fedora-coreos-${fcos_version}-live-kernel-x86_64
|
||||
initrd=https://builds.coreos.fedoraproject.org/prod/streams/stable/builds/${fcos_version}/x86_64/fedora-coreos-${fcos_version}-live-initramfs.x86_64.img
|
||||
ignition_url=http://mywebserver/fcos.ign
|
||||
kernel_args="ip=dhcp rd.neednet=1 console=tty0 coreos.liveiso=/ console=ttyS0 coreos.inst.install_dev=/dev/sda coreos.inst.stream=stable coreos.inst.ignition_url=${ignition_url}"
|
||||
sudo virt-install --name ${machine_name} --ram 4048 --graphics=none --vcpus 2 --disk size=20 \
|
||||
--network bridge=virbr0 \
|
||||
--install kernel=${kernel},initrd=${initrd},kernel_args_overwrite=yes,kernel_args="${kernel_args}"
|
||||
```
|
@ -1,5 +1,11 @@
|
||||
# Flannel
|
||||
|
||||
Flannel is a network fabric for containers, designed for Kubernetes
|
||||
|
||||
**Warning:** You may encounter this [bug](https://github.com/coreos/flannel/pull/1282) with `VXLAN` backend, while waiting on a newer Flannel version the current workaround (`ethtool --offload flannel.1 rx off tx off`) is showcase in kubespray [networking test](tests/testcases/040_check-network-adv.yml:31).
|
||||
|
||||
## Verifying flannel install
|
||||
|
||||
* Flannel configuration file should have been created there
|
||||
|
||||
```ShellSession
|
||||
|
77
docs/gcp-pd-csi.md
Normal file
77
docs/gcp-pd-csi.md
Normal file
@ -0,0 +1,77 @@
|
||||
# GCP Persistent Disk CSI Driver
|
||||
|
||||
The GCP Persistent Disk CSI driver allows you to provision volumes for pods with a Kubernetes deployment over Google Cloud Platform. The CSI driver replaces to volume provioning done by the in-tree azure cloud provider which is deprecated.
|
||||
|
||||
To deploy GCP Persistent Disk CSI driver, uncomment the `gcp_pd_csi_enabled` option in `group_vars/all/gcp.yml` and set it to `true`.
|
||||
|
||||
## GCP Persistent Disk Storage Class
|
||||
|
||||
If you want to deploy the GCP Persistent Disk storage class to provision volumes dynamically, you should set `persistent_volumes_enabled` in `group_vars/k8s-cluster/k8s-cluster.yml` to `true`.
|
||||
|
||||
## GCP credentials
|
||||
|
||||
In order for the CSI driver to provision disks, you need to create for it a service account on GCP with the appropriate permissions.
|
||||
|
||||
Follow these steps to configure it:
|
||||
|
||||
```ShellSession
|
||||
# This will open a web page for you to authenticate
|
||||
gcloud auth login
|
||||
export PROJECT=nameofmyproject
|
||||
gcloud config set project $PROJECT
|
||||
|
||||
git clone https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver $GOPATH/src/sigs.k8s.io/gcp-compute-persistent-disk-csi-driver
|
||||
|
||||
export GCE_PD_SA_NAME=my-gce-pd-csi-sa
|
||||
export GCE_PD_SA_DIR=/my/safe/credentials/directory
|
||||
|
||||
./deploy/setup-project.sh
|
||||
```
|
||||
|
||||
The above will create a file named `cloud-sa.json` in the specified `GCE_PD_SA_DIR`. This file contains the service account with the appropriate credentials for the CSI driver to perform actions on GCP to request disks for pods.
|
||||
|
||||
You need to provide this file's path through the variable `gcp_pd_csi_sa_cred_file` in `inventory/mycluster/group_vars/all/gcp.yml`
|
||||
|
||||
You can now deploy Kubernetes with Kubespray over GCP.
|
||||
|
||||
## GCP PD CSI Driver test
|
||||
|
||||
To test the dynamic provisioning using GCP PD CSI driver, make sure to have the storage class deployed (through persistent volumes), and apply the following manifest:
|
||||
|
||||
```yml
|
||||
---
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: podpvc
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
storageClassName: csi-gce-pd
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: web-server
|
||||
spec:
|
||||
containers:
|
||||
- name: web-server
|
||||
image: nginx
|
||||
volumeMounts:
|
||||
- mountPath: /var/lib/www/html
|
||||
name: mypvc
|
||||
volumes:
|
||||
- name: mypvc
|
||||
persistentVolumeClaim:
|
||||
claimName: podpvc
|
||||
readOnly: false
|
||||
```
|
||||
|
||||
## GCP PD documentation
|
||||
|
||||
You can find the official GCP Persistent Disk CSI driver installation documentation here: [GCP PD CSI Driver](https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver/blob/master/docs/kubernetes/user-guides/driver-install.md
|
||||
)
|
@ -36,7 +36,7 @@ ansible-playbook -i inventory/mycluster/hosts.yml cluster.yml -b -v \
|
||||
--private-key=~/.ssh/private_key
|
||||
```
|
||||
|
||||
See more details in the [ansible guide](ansible.md).
|
||||
See more details in the [ansible guide](docs/ansible.md).
|
||||
|
||||
### Adding nodes
|
||||
|
||||
@ -81,37 +81,41 @@ kube-apiserver via port 8080. A kubeconfig file is not necessary in this case,
|
||||
because kubectl will use <http://localhost:8080> to connect. The kubeconfig files
|
||||
generated will point to localhost (on kube-masters) and kube-node hosts will
|
||||
connect either to a localhost nginx proxy or to a loadbalancer if configured.
|
||||
More details on this process are in the [HA guide](ha-mode.md).
|
||||
More details on this process are in the [HA guide](docs/ha-mode.md).
|
||||
|
||||
Kubespray permits connecting to the cluster remotely on any IP of any
|
||||
kube-master host on port 6443 by default. However, this requires
|
||||
authentication. One could generate a kubeconfig based on one installed
|
||||
kube-master hosts (needs improvement) or connect with a username and password.
|
||||
By default, a user with admin rights is created, named `kube`.
|
||||
The password can be viewed after deployment by looking at the file
|
||||
`{{ credentials_dir }}/kube_user.creds` (`credentials_dir` is set to `{{ inventory_dir }}/credentials` by default). This contains a randomly generated
|
||||
password. If you wish to set your own password, just precreate/modify this
|
||||
file yourself.
|
||||
authentication. One can get a kubeconfig from kube-master hosts
|
||||
(see [below](#accessing-kubernetes-api)) or connect with a [username and password](vars.md#user-accounts).
|
||||
|
||||
For more information on kubeconfig and accessing a Kubernetes cluster, refer to
|
||||
the Kubernetes [documentation](https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/).
|
||||
|
||||
## Accessing Kubernetes Dashboard
|
||||
|
||||
As of kubernetes-dashboard v1.7.x:
|
||||
Supported version is kubernetes-dashboard v2.0.x :
|
||||
|
||||
- New login options that use apiserver auth proxying of token/basic/kubeconfig by default
|
||||
- Requires RBAC in authorization\_modes
|
||||
- Login options are : token/kubeconfig by default, basic can be enabled with `kube_basic_auth: true` inventory variable - not recommended because this requires ABAC api-server which is not tested by kubespray team
|
||||
- Deployed by default in "kube-system" namespace, can be overriden with `dashboard_namespace: kubernetes-dashboard` in inventory,
|
||||
- Only serves over https
|
||||
- No longer available at <https://first_master:6443/ui> until apiserver is updated with the https proxy URL
|
||||
|
||||
If the variable `dashboard_enabled` is set (default is true), then you can access the Kubernetes Dashboard at the following URL, You will be prompted for credentials:
|
||||
<https://first_master:6443/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/#!/login>
|
||||
Access is described in [dashboard docs](https://github.com/kubernetes/dashboard/blob/master/docs/user/accessing-dashboard/1.7.x-and-above.md). With kubespray's default deployment in kube-system namespace, instead of kuberntes-dashboard :
|
||||
|
||||
Or you can run 'kubectl proxy' from your local machine to access dashboard in your browser from:
|
||||
<http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/#!/login>
|
||||
- Proxy URL is <http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/#/login>
|
||||
- kubectl commands must be run with "-n kube-system"
|
||||
|
||||
It is recommended to access dashboard from behind a gateway (like Ingress Controller) that enforces an authentication token. Details and other access options here: <https://github.com/kubernetes/dashboard/wiki/Accessing-Dashboard---1.7.X-and-above>
|
||||
Accessing through Ingress is highly recommended. For proxy access, please note that proxy must listen to [localhost](https://github.com/kubernetes/dashboard/issues/692#issuecomment-220492484) (`proxy --address="x.x.x.x"` will not work)
|
||||
|
||||
For token authentication, guide to create Service Account is provided in [dashboard sample user](https://github.com/kubernetes/dashboard/blob/master/docs/user/access-control/creating-sample-user.md) doc. Still take care of default namespace.
|
||||
|
||||
Access can also by achieved via ssh tunnel on a master :
|
||||
|
||||
```bash
|
||||
# localhost:8081 will be sent to master-1's own localhost:8081
|
||||
ssh -L8001:localhost:8001 user@master-1
|
||||
sudo -i
|
||||
kubectl proxy
|
||||
```
|
||||
|
||||
## Accessing Kubernetes API
|
||||
|
||||
@ -121,6 +125,7 @@ host and can optionally be configured on your ansible host by setting
|
||||
|
||||
- If `kubectl_localhost` enabled, `kubectl` will download onto `/usr/local/bin/` and setup with bash completion. A helper script `inventory/mycluster/artifacts/kubectl.sh` also created for setup with below `admin.conf`.
|
||||
- If `kubeconfig_localhost` enabled `admin.conf` will appear in the `inventory/mycluster/artifacts/` directory after deployment.
|
||||
- The location where these files are downloaded to can be configured via the `artifacts_dir` variable.
|
||||
|
||||
You can see a list of nodes by running the following commands:
|
||||
|
||||
|
@ -53,12 +53,12 @@ an example configuration for a HAProxy service acting as an external LB:
|
||||
```raw
|
||||
listen kubernetes-apiserver-https
|
||||
bind <VIP>:8383
|
||||
option ssl-hello-chk
|
||||
mode tcp
|
||||
option log-health-checks
|
||||
timeout client 3h
|
||||
timeout server 3h
|
||||
server master1 <IP1>:6443
|
||||
server master2 <IP2>:6443
|
||||
server master1 <IP1>:6443 check check-ssl verify none inter 10000
|
||||
server master2 <IP2>:6443 check check-ssl verify none inter 10000
|
||||
balance roundrobin
|
||||
```
|
||||
|
||||
|
@ -7,7 +7,7 @@
|
||||
|
||||
2. Add **forked repo** as submodule to desired folder in your existent ansible repo(for example 3d/kubespray):
|
||||
```git submodule add https://github.com/YOUR_GITHUB/kubespray.git kubespray```
|
||||
Git will create _.gitmodules_ file in your existent ansible repo:
|
||||
Git will create `.gitmodules` file in your existent ansible repo:
|
||||
|
||||
```ini
|
||||
[submodule "3d/kubespray"]
|
||||
@ -115,7 +115,7 @@ Branch name should be self explaining to you, adding date and/or index will help
|
||||
git cherry-pick <COMMIT_HASH>
|
||||
```
|
||||
|
||||
7. If your have several temporary-stage commits - squash them using [```git rebase -i```](http://eli.thegreenplace.net/2014/02/19/squashing-github-pull-requests-into-a-single-commit)
|
||||
7. If you have several temporary-stage commits - squash them using [```git rebase -i```](https://eli.thegreenplace.net/2014/02/19/squashing-github-pull-requests-into-a-single-commit)
|
||||
Also you could use interactive rebase (```git rebase -i HEAD~10```) to delete commits which you don't want to contribute into original repo.
|
||||
|
||||
8. When your changes is in place, you need to check upstream repo one more time because it could be changed during your work.
|
||||
|
@ -4,6 +4,12 @@ Kube-OVN integrates the OVN-based Network Virtualization with Kubernetes. It off
|
||||
|
||||
For more information please check [Kube-OVN documentation](https://github.com/alauda/kube-ovn)
|
||||
|
||||
**Warning:** Kernel version (`cat /proc/version`) needs to be different than `3.10.0-862` or kube-ovn won't start and will print this message:
|
||||
|
||||
```bash
|
||||
kernel version 3.10.0-862 has a nat related bug that will affect ovs function, please update to a version greater than 3.10.0-898
|
||||
```
|
||||
|
||||
## How to use it
|
||||
|
||||
Enable kube-ovn in `group_vars/k8s-cluster/k8s-cluster.yml`
|
||||
|
@ -12,14 +12,14 @@ By default the normal behavior looks like:
|
||||
1. Kubelet updates it status to apiserver periodically, as specified by
|
||||
`--node-status-update-frequency`. The default value is **10s**.
|
||||
|
||||
2. Kubernetes controller manager checks the statuses of Kubelets every
|
||||
2. Kubernetes controller manager checks the statuses of Kubelet every
|
||||
`–-node-monitor-period`. The default value is **5s**.
|
||||
|
||||
3. In case the status is updated within `--node-monitor-grace-period` of time,
|
||||
Kubernetes controller manager considers healthy status of Kubelet. The
|
||||
default value is **40s**.
|
||||
|
||||
> Kubernetes controller manager and Kubelets work asynchronously. It means that
|
||||
> Kubernetes controller manager and Kubelet work asynchronously. It means that
|
||||
> the delay may include any network latency, API Server latency, etcd latency,
|
||||
> latency caused by load on one's master nodes and so on. So if
|
||||
> `--node-status-update-frequency` is set to 5s in reality it may appear in
|
||||
|
@ -3,7 +3,7 @@ Large deployments of K8s
|
||||
|
||||
For a large scaled deployments, consider the following configuration changes:
|
||||
|
||||
* Tune [ansible settings](http://docs.ansible.com/ansible/intro_configuration.html)
|
||||
* Tune [ansible settings](https://docs.ansible.com/ansible/intro_configuration.html)
|
||||
for `forks` and `timeout` vars to fit large numbers of nodes being deployed.
|
||||
|
||||
* Override containers' `foo_image_repo` vars to point to intranet registry.
|
||||
|
13
docs/mitogen.md
Normal file
13
docs/mitogen.md
Normal file
@ -0,0 +1,13 @@
|
||||
# Mitogen
|
||||
|
||||
[Mitogen for Ansible](https://mitogen.networkgenomics.com/ansible_detailed.html) allow a 1.25x - 7x speedup and a CPU usage reduction of at least 2x, depending on network conditions, modules executed, and time already spent by targets on useful work. Mitogen cannot improve a module once it is executing, it can only ensure the module executes as quickly as possible.
|
||||
|
||||
## Install
|
||||
|
||||
```ShellSession
|
||||
ansible-playbook mitogen.yml
|
||||
```
|
||||
|
||||
## Limitation
|
||||
|
||||
If you are experiencing problems, please see the [documentation](https://mitogen.networkgenomics.com/ansible_detailed.html#noteworthy-differences).
|
131
docs/nodes.md
Normal file
131
docs/nodes.md
Normal file
@ -0,0 +1,131 @@
|
||||
# Adding/replacing a node
|
||||
|
||||
Modified from [comments in #3471](https://github.com/kubernetes-sigs/kubespray/issues/3471#issuecomment-530036084)
|
||||
|
||||
## Adding/replacing a worker node
|
||||
|
||||
This should be the easiest.
|
||||
|
||||
### 1) Add new node to the inventory
|
||||
|
||||
### 2) Run `scale.yml`
|
||||
|
||||
You can use `--limit=node1` to limit Kubespray to avoid disturbing other nodes in the cluster.
|
||||
|
||||
Before using `--limit` run playbook `facts.yml` without the limit to refresh facts cache for all nodes.
|
||||
|
||||
### 3) Drain the node that will be removed
|
||||
|
||||
```sh
|
||||
kubectl drain NODE_NAME
|
||||
```
|
||||
|
||||
### 4) Run the remove-node.yml playbook
|
||||
|
||||
With the old node still in the inventory, run `remove-node.yml`. You need to pass `-e node=NODE_NAME` to the playbook to limit the execution to the node being removed.
|
||||
|
||||
### 5) Remove the node from the inventory
|
||||
|
||||
That's it.
|
||||
|
||||
## Adding/replacing a master node
|
||||
|
||||
### 1) Recreate apiserver certs manually to include the new master node in the cert SAN field
|
||||
|
||||
For some reason, Kubespray will not update the apiserver certificate.
|
||||
|
||||
Edit `/etc/kubernetes/kubeadm-config.yaml`, include new host in `certSANs` list.
|
||||
|
||||
Use kubeadm to recreate the certs.
|
||||
|
||||
```sh
|
||||
cd /etc/kubernetes/ssl
|
||||
mv apiserver.crt apiserver.crt.old
|
||||
mv apiserver.key apiserver.key.old
|
||||
|
||||
cd /etc/kubernetes
|
||||
kubeadm init phase certs apiserver --config kubeadm-config.yaml
|
||||
```
|
||||
|
||||
Check the certificate, new host needs to be there.
|
||||
|
||||
```sh
|
||||
openssl x509 -text -noout -in /etc/kubernetes/ssl/apiserver.crt
|
||||
```
|
||||
|
||||
### 2) Run `cluster.yml`
|
||||
|
||||
Add the new host to the inventory and run cluster.yml.
|
||||
|
||||
### 3) Restart kube-system/nginx-proxy
|
||||
|
||||
In all hosts, restart nginx-proxy pod. This pod is a local proxy for the apiserver. Kubespray will update its static config, but it needs to be restarted in order to reload.
|
||||
|
||||
```sh
|
||||
# run in every host
|
||||
docker ps | grep k8s_nginx-proxy_nginx-proxy | awk '{print $1}' | xargs docker restart
|
||||
```
|
||||
|
||||
### 4) Remove old master nodes
|
||||
|
||||
If you are replacing a node, remove the old one from the inventory, and remove from the cluster runtime.
|
||||
|
||||
```sh
|
||||
kubectl drain NODE_NAME
|
||||
kubectl delete node NODE_NAME
|
||||
```
|
||||
|
||||
After that, the old node can be safely shutdown. Also, make sure to restart nginx-proxy in all remaining nodes (step 3)
|
||||
|
||||
From any active master that remains in the cluster, re-upload `kubeadm-config.yaml`
|
||||
|
||||
```sh
|
||||
kubeadm config upload from-file --config /etc/kubernetes/kubeadm-config.yaml
|
||||
```
|
||||
|
||||
## Adding/Replacing an etcd node
|
||||
|
||||
You need to make sure there are always an odd number of etcd nodes in the cluster. In such a way, this is always a replace or scale up operation. Either add two new nodes or remove an old one.
|
||||
|
||||
### 1) Add the new node running cluster.yml
|
||||
|
||||
Update the inventory and run `cluster.yml` passing `--limit=etcd,kube-master -e ignore_assert_errors=yes`.
|
||||
|
||||
Run `upgrade-cluster.yml` also passing `--limit=etcd,kube-master -e ignore_assert_errors=yes`. This is necessary to update all etcd configuration in the cluster.
|
||||
|
||||
At this point, you will have an even number of nodes. Everything should still be working, and you should only have problems if the cluster decides to elect a new etcd leader before you remove a node. Even so, running applications should continue to be available.
|
||||
|
||||
### 2) Remove an old etcd node
|
||||
|
||||
With the node still in the inventory, run `remove-node.yml` passing `-e node=NODE_NAME` as the name of the node that should be removed.
|
||||
|
||||
### 3) Make sure the remaining etcd members have their config updated
|
||||
|
||||
In each etcd host that remains in the cluster:
|
||||
|
||||
```sh
|
||||
cat /etc/etcd.env | grep ETCD_INITIAL_CLUSTER
|
||||
```
|
||||
|
||||
Only active etcd members should be in that list.
|
||||
|
||||
### 4) Remove old etcd members from the cluster runtime
|
||||
|
||||
Acquire a shell prompt into one of the etcd containers and use etcdctl to remove the old member.
|
||||
|
||||
```sh
|
||||
# list all members
|
||||
etcdctl member list
|
||||
|
||||
# remove old member
|
||||
etcdctl member remove MEMBER_ID
|
||||
# careful!!! if you remove a wrong member you will be in trouble
|
||||
|
||||
# note: these command lines are actually much bigger, since you need to pass all certificates to etcdctl.
|
||||
```
|
||||
|
||||
### 5) Make sure the apiserver config is correctly updated
|
||||
|
||||
In every master node, edit `/etc/kubernetes/manifests/kube-apiserver.yaml`. Make sure only active etcd nodes are still present in the apiserver command line parameter `--etcd-servers=...`.
|
||||
|
||||
### 6) Shutdown the old instance
|
@ -23,30 +23,78 @@ In order to make L3 CNIs work on OpenStack you will need to tell OpenStack to al
|
||||
|
||||
First you will need the ids of your OpenStack instances that will run kubernetes:
|
||||
|
||||
openstack server list --project YOUR_PROJECT
|
||||
+--------------------------------------+--------+----------------------------------+--------+-------------+
|
||||
| ID | Name | Tenant ID | Status | Power State |
|
||||
+--------------------------------------+--------+----------------------------------+--------+-------------+
|
||||
| e1f48aad-df96-4bce-bf61-62ae12bf3f95 | k8s-1 | fba478440cb2444a9e5cf03717eb5d6f | ACTIVE | Running |
|
||||
| 725cd548-6ea3-426b-baaa-e7306d3c8052 | k8s-2 | fba478440cb2444a9e5cf03717eb5d6f | ACTIVE | Running |
|
||||
```bash
|
||||
openstack server list --project YOUR_PROJECT
|
||||
+--------------------------------------+--------+----------------------------------+--------+-------------+
|
||||
| ID | Name | Tenant ID | Status | Power State |
|
||||
+--------------------------------------+--------+----------------------------------+--------+-------------+
|
||||
| e1f48aad-df96-4bce-bf61-62ae12bf3f95 | k8s-1 | fba478440cb2444a9e5cf03717eb5d6f | ACTIVE | Running |
|
||||
| 725cd548-6ea3-426b-baaa-e7306d3c8052 | k8s-2 | fba478440cb2444a9e5cf03717eb5d6f | ACTIVE | Running |
|
||||
```
|
||||
|
||||
Then you can use the instance ids to find the connected [neutron](https://wiki.openstack.org/wiki/Neutron) ports (though they are now configured through using OpenStack):
|
||||
|
||||
openstack port list -c id -c device_id --project YOUR_PROJECT
|
||||
+--------------------------------------+--------------------------------------+
|
||||
| id | device_id |
|
||||
+--------------------------------------+--------------------------------------+
|
||||
| 5662a4e0-e646-47f0-bf88-d80fbd2d99ef | e1f48aad-df96-4bce-bf61-62ae12bf3f95 |
|
||||
| e5ae2045-a1e1-4e99-9aac-4353889449a7 | 725cd548-6ea3-426b-baaa-e7306d3c8052 |
|
||||
```bash
|
||||
openstack port list -c id -c device_id --project YOUR_PROJECT
|
||||
+--------------------------------------+--------------------------------------+
|
||||
| id | device_id |
|
||||
+--------------------------------------+--------------------------------------+
|
||||
| 5662a4e0-e646-47f0-bf88-d80fbd2d99ef | e1f48aad-df96-4bce-bf61-62ae12bf3f95 |
|
||||
| e5ae2045-a1e1-4e99-9aac-4353889449a7 | 725cd548-6ea3-426b-baaa-e7306d3c8052 |
|
||||
```
|
||||
|
||||
Given the port ids on the left, you can set the two `allowed-address`(es) in OpenStack. Note that you have to allow both `kube_service_addresses` (default `10.233.0.0/18`) and `kube_pods_subnet` (default `10.233.64.0/18`.)
|
||||
|
||||
# allow kube_service_addresses and kube_pods_subnet network
|
||||
openstack port set 5662a4e0-e646-47f0-bf88-d80fbd2d99ef --allowed-address ip-address=10.233.0.0/18 --allowed-address ip-address=10.233.64.0/18
|
||||
openstack port set e5ae2045-a1e1-4e99-9aac-4353889449a7 --allowed-address ip-address=10.233.0.0/18 --allowed-address ip-address=10.233.64.0/18
|
||||
```bash
|
||||
# allow kube_service_addresses and kube_pods_subnet network
|
||||
openstack port set 5662a4e0-e646-47f0-bf88-d80fbd2d99ef --allowed-address ip-address=10.233.0.0/18 --allowed-address ip-address=10.233.64.0/18
|
||||
openstack port set e5ae2045-a1e1-4e99-9aac-4353889449a7 --allowed-address ip-address=10.233.0.0/18 --allowed-address ip-address=10.233.64.0/18
|
||||
```
|
||||
|
||||
If all the VMs in the tenant correspond to kubespray deployment, you can "sweep run" above with:
|
||||
|
||||
openstack port list --device-owner=compute:nova -c ID -f value | xargs -tI@ openstack port set @ --allowed-address ip-address=10.233.0.0/18 --allowed-address ip-address=10.233.64.0/18
|
||||
```bash
|
||||
openstack port list --device-owner=compute:nova -c ID -f value | xargs -tI@ openstack port set @ --allowed-address ip-address=10.233.0.0/18 --allowed-address ip-address=10.233.64.0/18
|
||||
```
|
||||
|
||||
Now you can finally run the playbook.
|
||||
|
||||
Upgrade from the in-tree to the external cloud provider
|
||||
---------------
|
||||
|
||||
The in-tree cloud provider is deprecated and will be removed in a future version of Kubernetes. The target release for removing all remaining in-tree cloud providers is set to 1.21
|
||||
|
||||
The new cloud provider is configured to have Octavia by default in Kubespray.
|
||||
|
||||
- Change cloud provider from `cloud_provider: openstack` to the new external Cloud provider:
|
||||
|
||||
```yaml
|
||||
cloud_provider: external
|
||||
external_cloud_provider: openstack
|
||||
```
|
||||
|
||||
- Enable Cinder CSI:
|
||||
|
||||
```yaml
|
||||
cinder_csi_enabled: true
|
||||
```
|
||||
|
||||
- Enable topology support (optional), if your openstack provider has custom Zone names you can override the default "nova" zone by setting the variable `cinder_topology_zones`
|
||||
|
||||
```yaml
|
||||
cinder_topology: true
|
||||
```
|
||||
|
||||
- If you are using OpenStack loadbalancer(s) replace the `openstack_lbaas_subnet_id` with the new `external_openstack_lbaas_subnet_id`. **Note** The new cloud provider is using Octavia instead of Neutron LBaaS by default!
|
||||
- Enable 3 feature gates to allow migration of all volumes and storage classes (if you have any feature gates already set just add the 3 listed below):
|
||||
|
||||
```yaml
|
||||
kube_feature_gates:
|
||||
- CSIMigration=true
|
||||
- CSIMigrationOpenStack=true
|
||||
- ExpandCSIVolumes=true
|
||||
```
|
||||
|
||||
- Run the `upgrade-cluster.yml` playbook
|
||||
- Run the cleanup playbook located under extra_playbooks `extra_playbooks/migrate_openstack_provider.yml` (this will clean up all resources used by the old cloud provider)
|
||||
- You can remove the feature gates for Volume migration. If you want to enable the possibility to expand CSI volumes you could leave the `ExpandCSIVolumes=true` feature gate
|
||||
|
@ -38,9 +38,9 @@ Terraform is required to deploy the bare metal infrastructure. The steps below a
|
||||
Grab the latest version of Terraform and install it.
|
||||
|
||||
```bash
|
||||
echo "https://releases.hashicorp.com/terraform/$(curl -s https://checkpoint-api.hashicorp.com/v1/check/terraform | jq -r -M '.current_version')/terraform_$(curl -s https://checkpoint-api.hashicorp.com/v1/check/terraform | jq -r -M '.current_version')_darwin_amd64.zip"
|
||||
echo "https://releases.hashicorp.com/terraform/$(curl -s https://checkpoint-api.hashicorp.com/v1/check/terraform | jq -r -M '.current_version')/terraform_$(curl -s https://checkpoint-api.hashicorp.com/v1/check/terraform | jq -r -M '.current_version')_linux_amd64.zip"
|
||||
sudo yum install unzip
|
||||
sudo unzip terraform_0.12.12_linux_amd64.zip -d /usr/local/bin/
|
||||
sudo unzip terraform_0.12.24_linux_amd64.zip -d /usr/local/bin/
|
||||
```
|
||||
|
||||
## Download Kubespray
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Setting up Environment Proxy
|
||||
|
||||
If you set http and https proxy, all nodes and loadbalancer will be excluded from proxy with generating no_proxy variable in `roles/kubespray-defaults/defaults/main.yml`, if you have additional resources for exclude add them to `additional_no_proxy` variable. If you want fully override your `no_proxy` setting, then fill in just `no_proxy` and no nodes or loadbalancer addresses will be added to no_proxy.
|
||||
If you set http and https proxy, all nodes and loadbalancer will be excluded from proxy with generating no_proxy variable in `roles/kubespray-defaults/tasks/no_proxy.yml`, if you have additional resources for exclude add them to `additional_no_proxy` variable. If you want fully override your `no_proxy` setting, then fill in just `no_proxy` and no nodes or loadbalancer addresses will be added to no_proxy.
|
||||
|
||||
## Set proxy for http and https
|
||||
|
||||
|
@ -17,37 +17,23 @@ Examples of what broken means in this context:
|
||||
|
||||
__Note that you need at least one functional node to be able to recover using this method.__
|
||||
|
||||
## If etcd quorum is intact
|
||||
## Runbook
|
||||
|
||||
* Set the etcd member names of the broken node(s) in the variable "old\_etcd\_members", this variable is used to remove the broken nodes from the etcd cluster.
|
||||
```old_etcd_members=etcd2,etcd3```
|
||||
* If you reuse identities for your etcd nodes add the inventory names for those nodes to the variable "old\_etcds". This will remove any previously generated certificates for those nodes.
|
||||
```old_etcds=etcd2.example.com,etcd3.example.com```
|
||||
* If you would like to remove the broken node objects from the kubernetes cluster add their inventory names to the variable "old\_kube\_masters"
|
||||
```old_kube_masters=master2.example.com,master3.example.com```
|
||||
* Move any broken etcd nodes into the "broken\_etcd" group, make sure the "etcd\_member\_name" variable is set.
|
||||
* Move any broken master nodes into the "broken\_kube-master" group.
|
||||
|
||||
Then run the playbook with ```--limit etcd,kube-master```
|
||||
Then run the playbook with ```--limit etcd,kube-master``` and increase the number of ETCD retries by setting ```-e etcd_retries=10``` or something even larger. The amount of retries required is difficult to predict.
|
||||
|
||||
When finished you should have a fully working and highly available control plane again.
|
||||
When finished you should have a fully working control plane again.
|
||||
|
||||
## If etcd quorum is lost
|
||||
## Recover from lost quorum
|
||||
|
||||
* If you reuse identities for your etcd nodes add the inventory names for those nodes to the variable "old\_etcds". This will remove any previously generated certificates for those nodes.
|
||||
```old_etcds=etcd2.example.com,etcd3.example.com```
|
||||
* If you would like to remove the broken node objects from the kubernetes cluster add their inventory names to the variable "old\_kube\_masters"
|
||||
```old_kube_masters=master2.example.com,master3.example.com```
|
||||
The playbook attempts to figure out it the etcd quorum is intact. If quorum is lost it will attempt to take a snapshot from the first node in the "etcd" group and restore from that. If you would like to restore from an alternate snapshot set the path to that snapshot in the "etcd\_snapshot" variable.
|
||||
|
||||
Then run the playbook with ```--limit etcd,kube-master```
|
||||
|
||||
When finished you should have a fully working and highly available control plane again.
|
||||
|
||||
The playbook will attempt to take a snapshot from the first node in the "etcd" group and restore from that. If you would like to restore from an alternate snapshot set the path to that snapshot in the "etcd\_snapshot" variable.
|
||||
|
||||
```etcd_snapshot=/tmp/etcd_snapshot```
|
||||
```-e etcd_snapshot=/tmp/etcd_snapshot```
|
||||
|
||||
## Caveats
|
||||
|
||||
* The playbook has only been tested on control planes where the etcd and kube-master nodes are the same, the playbook will warn if run on a cluster with separate etcd and kube-master nodes.
|
||||
* The playbook has only been tested with fairly small etcd databases.
|
||||
* If your new control plane nodes have new ip addresses you may have to change settings in various places.
|
||||
* There may be disruptions while running the playbook.
|
||||
|
@ -25,15 +25,17 @@ If you wanted to upgrade just kube_version from v1.4.3 to v1.4.6, you could
|
||||
deploy the following way:
|
||||
|
||||
```ShellSession
|
||||
ansible-playbook cluster.yml -i inventory/sample/hosts.ini -e kube_version=v1.4.3
|
||||
ansible-playbook cluster.yml -i inventory/sample/hosts.ini -e kube_version=v1.4.3 -e upgrade_cluster_setup=true
|
||||
```
|
||||
|
||||
And then repeat with v1.4.6 as kube_version:
|
||||
|
||||
```ShellSession
|
||||
ansible-playbook cluster.yml -i inventory/sample/hosts.ini -e kube_version=v1.4.6
|
||||
ansible-playbook cluster.yml -i inventory/sample/hosts.ini -e kube_version=v1.4.6 -e upgrade_cluster_setup=true
|
||||
```
|
||||
|
||||
The var ```-e upgrade_cluster_setup=true``` is needed to be set in order to migrate the deploys of e.g kube-apiserver inside the cluster immediately which is usually only done in the graceful upgrade. (Refer to [#4139](https://github.com/kubernetes-sigs/kubespray/issues/4139) and [#4736](https://github.com/kubernetes-sigs/kubespray/issues/4736))
|
||||
|
||||
## Graceful upgrade
|
||||
|
||||
Kubespray also supports cordon, drain and uncordoning of nodes when performing
|
||||
@ -63,7 +65,7 @@ For instance, if you're on v2.6.0, then check out v2.7.0, run the upgrade, check
|
||||
Assuming you don't explicitly define a kubernetes version in your k8s-cluster.yml, you simply check out the next tag and run the upgrade-cluster.yml playbook
|
||||
|
||||
* If you do define kubernetes version in your inventory (e.g. group_vars/k8s-cluster.yml) then either make sure to update it before running upgrade-cluster, or specify the new version you're upgrading to: `ansible-playbook -i inventory/mycluster/hosts.ini -b upgrade-cluster.yml -e kube_version=v1.11.3`
|
||||
|
||||
|
||||
Otherwise, the upgrade will leave your cluster at the same k8s version defined in your inventory vars.
|
||||
|
||||
The below example shows taking a cluster that was set up for v2.6.0 up to v2.10.0
|
||||
|
47
docs/vars.md
47
docs/vars.md
@ -3,7 +3,7 @@
|
||||
## Generic Ansible variables
|
||||
|
||||
You can view facts gathered by Ansible automatically
|
||||
[here](http://docs.ansible.com/ansible/playbooks_variables.html#information-discovered-from-systems-facts).
|
||||
[here](https://docs.ansible.com/ansible/playbooks_variables.html#information-discovered-from-systems-facts).
|
||||
|
||||
Some variables of note include:
|
||||
|
||||
@ -50,6 +50,7 @@ Kubernetes needs some parameters in order to get deployed. These are the
|
||||
following default cluster parameters:
|
||||
|
||||
* *cluster_name* - Name of cluster (default is cluster.local)
|
||||
* *container_manager* - Container Runtime to install in the nodes (default is docker)
|
||||
* *dns_domain* - Name of cluster DNS domain (default is cluster.local)
|
||||
* *kube_network_plugin* - Plugin to use for container networking
|
||||
* *kube_service_addresses* - Subnet for cluster IPs (default is
|
||||
@ -57,7 +58,10 @@ following default cluster parameters:
|
||||
* *kube_pods_subnet* - Subnet for Pod IPs (default is 10.233.64.0/18). Must not
|
||||
overlap with kube_service_addresses.
|
||||
* *kube_network_node_prefix* - Subnet allocated per-node for pod IPs. Remaining
|
||||
bits in kube_pods_subnet dictates how many kube-nodes can be in cluster.
|
||||
bits in kube_pods_subnet dictates how many kube-nodes can be in cluster. Setting this > 25 will
|
||||
raise an assertion in playbooks if the `kubelet_max_pods` var also isn't adjusted accordingly
|
||||
(assertion not applicable to calico which doesn't use this as a hard limit, see
|
||||
[Calico IP block sizes](https://docs.projectcalico.org/reference/resources/ippool#block-sizes).
|
||||
* *skydns_server* - Cluster IP for DNS (default is 10.233.0.3)
|
||||
* *skydns_server_secondary* - Secondary Cluster IP for CoreDNS used with coredns_dual deployment (default is 10.233.0.4)
|
||||
* *enable_coredns_k8s_external* - If enabled, it configures the [k8s_external plugin](https://coredns.io/plugins/k8s_external/)
|
||||
@ -104,6 +108,8 @@ Stack](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/dns-stack.m
|
||||
* *docker_options* - Commonly used to set
|
||||
``--insecure-registry=myregistry.mydomain:5000``
|
||||
* *docker_plugins* - This list can be used to define [Docker plugins](https://docs.docker.com/engine/extend/) to install.
|
||||
* *containerd_config* - Controls some parameters in containerd configuration file (usually /etc/containerd/config.toml).
|
||||
[Default config](https://github.com/kubernetes-sigs/kubespray/blob/master/roles/container-engine/containerd/defaults/main.yml) can be overriden in inventory vars.
|
||||
* *http_proxy/https_proxy/no_proxy* - Proxy variables for deploying behind a
|
||||
proxy. Note that no_proxy defaults to all internal cluster IPs and hostnames
|
||||
that correspond to each node.
|
||||
@ -156,7 +162,32 @@ node_taints:
|
||||
|
||||
### Custom flags for Kube Components
|
||||
|
||||
For all kube components, custom flags can be passed in. This allows for edge cases where users need changes to the default deployment that may not be applicable to all deployments. This can be done by providing a list of flags. The `kubelet_node_custom_flags` apply kubelet settings only to nodes and not masters. Example:
|
||||
For all kube components, custom flags can be passed in. This allows for edge cases where users need changes to the default deployment that may not be applicable to all deployments.
|
||||
|
||||
Extra flags for the kubelet can be specified using these variables,
|
||||
in the form of dicts of key-value pairs of configuration parameters that will be inserted into the kubelet YAML config file. The `kubelet_node_config_extra_args` apply kubelet settings only to nodes and not masters. Example:
|
||||
|
||||
```yml
|
||||
kubelet_config_extra_args:
|
||||
EvictionHard:
|
||||
memory.available: "<100Mi"
|
||||
EvictionSoftGracePeriod:
|
||||
memory.available: "30s"
|
||||
EvictionSoft:
|
||||
memory.available: "<300Mi"
|
||||
```
|
||||
|
||||
The possible vars are:
|
||||
|
||||
* *kubelet_config_extra_args*
|
||||
* *kubelet_node_config_extra_args*
|
||||
|
||||
Previously, the same paramaters could be passed as flags to kubelet binary with the following vars:
|
||||
|
||||
* *kubelet_custom_flags*
|
||||
* *kubelet_node_custom_flags*
|
||||
|
||||
The `kubelet_node_custom_flags` apply kubelet settings only to nodes and not masters. Example:
|
||||
|
||||
```yml
|
||||
kubelet_custom_flags:
|
||||
@ -165,10 +196,7 @@ kubelet_custom_flags:
|
||||
- "--eviction-soft=memory.available<300Mi"
|
||||
```
|
||||
|
||||
The possible vars are:
|
||||
|
||||
* *kubelet_custom_flags*
|
||||
* *kubelet_node_custom_flags*
|
||||
This alternative is deprecated and will remain until the flags are completely removed from kubelet
|
||||
|
||||
Extra flags for the API server, controller, and scheduler components can be specified using these variables,
|
||||
in the form of dicts of key-value pairs of configuration parameters that will be inserted into the kubeadm YAML config file:
|
||||
@ -179,11 +207,12 @@ in the form of dicts of key-value pairs of configuration parameters that will be
|
||||
|
||||
## App variables
|
||||
|
||||
* *helm_version* - Defaults to v2.x, set to a v3 version (e.g. `v3.0.1` ) to install Helm 3.x (no more Tiller!). When changing this to 3 in an existing cluster, Tiller will be left alone and has to be removed manually.
|
||||
* *helm_version* - Defaults to v3.x, set to a v2 version (e.g. `v2.16.1` ) to install Helm 2.x (will install Tiller!).
|
||||
Picking v3 for an existing cluster running Tiller will leave it alone. In that case you will have to remove Tiller manually afterwards.
|
||||
|
||||
## User accounts
|
||||
|
||||
By default, a user with admin rights is created, named `kube`.
|
||||
The variable `kube_basic_auth` is false by default, but if set to true, a user with admin rights is created, named `kube`.
|
||||
The password can be viewed after deployment by looking at the file
|
||||
`{{ credentials_dir }}/kube_user.creds` (`credentials_dir` is set to `{{ inventory_dir }}/credentials` by default). This contains a randomly generated
|
||||
password. If you wish to set your own password, just precreate/modify this
|
||||
|
90
docs/vsphere-csi.md
Normal file
90
docs/vsphere-csi.md
Normal file
@ -0,0 +1,90 @@
|
||||
# vSphere CSI Driver
|
||||
|
||||
vSphere CSI driver allows you to provision volumes over a vSphere deployment. The Kubernetes historic in-tree cloud provider is deprecated and will be removed in future versions.
|
||||
|
||||
To enable vSphere CSI driver, uncomment the `vsphere_csi_enabled` option in `group_vars/all/vsphere.yml` and set it to `true`.
|
||||
|
||||
To set the number of replicas for the vSphere CSI controller, you can change `vsphere_csi_controller_replicas` option in `group_vars/all/vsphere.yml`.
|
||||
|
||||
You need to source the vSphere credentials you use to deploy your machines that will host Kubernetes.
|
||||
|
||||
| Variable | Required | Type | Choices | Default | Comment |
|
||||
|---------------------------------------------|----------|---------|----------------------------|---------------------------|----------------------------------------------------------------|
|
||||
| external_vsphere_vcenter_ip | TRUE | string | | | IP/URL of the vCenter |
|
||||
| external_vsphere_vcenter_port | TRUE | string | | "443" | Port of the vCenter API |
|
||||
| external_vsphere_insecure | TRUE | string | "true", "false" | "true" | set to "true" if the host above uses a self-signed cert |
|
||||
| external_vsphere_user | TRUE | string | | | User name for vCenter with required privileges |
|
||||
| external_vsphere_password | TRUE | string | | | Password for vCenter |
|
||||
| external_vsphere_datacenter | TRUE | string | | | Datacenter name to use |
|
||||
| external_vsphere_kubernetes_cluster_id | TRUE | string | | "kubernetes-cluster-id" | Kubernetes cluster ID to use |
|
||||
| vsphere_cloud_controller_image_tag | TRUE | string | | "latest" | Kubernetes cluster ID to use |
|
||||
| vsphere_syncer_image_tag | TRUE | string | | "v1.0.2" | Syncer image tag to use |
|
||||
| vsphere_csi_attacher_image_tag | TRUE | string | | "v1.1.1" | CSI attacher image tag to use |
|
||||
| vsphere_csi_controller | TRUE | string | | "v1.0.2" | CSI controller image tag to use |
|
||||
| vsphere_csi_controller_replicas | TRUE | integer | | 1 | Number of pods Kubernetes should deploy for the CSI controller |
|
||||
| vsphere_csi_liveness_probe_image_tag | TRUE | string | | "v1.1.0" | CSI liveness probe image tag to use |
|
||||
| vsphere_csi_provisioner_image_tag | TRUE | string | | "v1.2.2" | CSI provisioner image tag to use |
|
||||
| vsphere_csi_node_driver_registrar_image_tag | TRUE | string | | "v1.1.0" | CSI node driver registrat image tag to use |
|
||||
| vsphere_csi_driver_image_tag | TRUE | string | | "v1.0.2" | CSI driver image tag to use |
|
||||
|
||||
## Usage example
|
||||
|
||||
To test the dynamic provisioning using vSphere CSI driver, make sure to create a [storage policy](https://github.com/kubernetes/cloud-provider-vsphere/blob/master/docs/book/tutorials/kubernetes-on-vsphere-with-kubeadm.md#create-a-storage-policy) and [storage class](https://github.com/kubernetes/cloud-provider-vsphere/blob/master/docs/book/tutorials/kubernetes-on-vsphere-with-kubeadm.md#create-a-storageclass), then apply the following manifest:
|
||||
|
||||
```yml
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: csi-pvc-vsphere
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
storageClassName: Space-Efficient
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: nginx
|
||||
spec:
|
||||
containers:
|
||||
- image: nginx
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: nginx
|
||||
ports:
|
||||
- containerPort: 80
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- mountPath: /var/lib/www/html
|
||||
name: csi-data-vsphere
|
||||
volumes:
|
||||
- name: csi-data-vsphere
|
||||
persistentVolumeClaim:
|
||||
claimName: csi-pvc-vsphere
|
||||
readOnly: false
|
||||
```
|
||||
|
||||
Apply this conf to your cluster: ```kubectl apply -f nginx.yml```
|
||||
|
||||
You should see the PVC provisioned and bound:
|
||||
|
||||
```ShellSession
|
||||
$ kubectl get pvc
|
||||
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
|
||||
csi-pvc-vsphere Bound pvc-dc7b1d21-ee41-45e1-98d9-e877cc1533ac 1Gi RWO Space-Efficient 10s
|
||||
```
|
||||
|
||||
And the volume mounted to the Nginx Pod (wait until the Pod is Running):
|
||||
|
||||
```ShellSession
|
||||
kubectl exec -it nginx -- df -h | grep /var/lib/www/html
|
||||
/dev/sdb 976M 2.6M 907M 1% /var/lib/www/html
|
||||
```
|
||||
|
||||
## More info
|
||||
|
||||
For further information about the vSphere CSI Driver, you can refer to the official [vSphere Cloud Provider documentation](https://cloud-provider-vsphere.sigs.k8s.io/container_storage_interface.html).
|
@ -1,13 +1,75 @@
|
||||
# vSphere cloud provider
|
||||
# vSphere
|
||||
|
||||
Kubespray can be deployed with vSphere as Cloud provider. This feature supports
|
||||
Kubespray can be deployed with vSphere as Cloud provider. This feature supports:
|
||||
|
||||
- Volumes
|
||||
- Persistent Volumes
|
||||
- Storage Classes and provisioning of volumes.
|
||||
- vSphere Storage Policy Based Management for Containers orchestrated by Kubernetes.
|
||||
- Storage Classes and provisioning of volumes
|
||||
- vSphere Storage Policy Based Management for Containers orchestrated by Kubernetes
|
||||
|
||||
## Prerequisites
|
||||
## Out-of-tree vSphere cloud provider
|
||||
|
||||
### Prerequisites
|
||||
|
||||
You need at first to configure your vSphere environment by following the [official documentation](https://github.com/kubernetes/cloud-provider-vsphere/blob/master/docs/book/tutorials/kubernetes-on-vsphere-with-kubeadm.md#prerequisites).
|
||||
|
||||
After this step you should have:
|
||||
|
||||
- vSphere upgraded to 6.7 U3 or later
|
||||
- VM hardware upgraded to version 15 or higher
|
||||
- UUID activated for each VM where Kubernetes will be deployed
|
||||
|
||||
### Kubespray configuration
|
||||
|
||||
First in `inventory/sample/group_vars/all.yml` you must set the cloud provider to `external` and external_cloud_provider to `external_cloud_provider`.
|
||||
|
||||
```yml
|
||||
cloud_provider: "external"
|
||||
external_cloud_provider: "vsphere"
|
||||
```
|
||||
|
||||
Then, `inventory/sample/group_vars/vsphere.yml`, you need to declare your vCenter credentials and enable the vSphere CSI following the description below.
|
||||
|
||||
| Variable | Required | Type | Choices | Default | Comment |
|
||||
|----------------------------------------|----------|---------|----------------------------|---------|---------------------------------------------------------------------------|
|
||||
| external_vsphere_vcenter_ip | TRUE | string | | | IP/URL of the vCenter |
|
||||
| external_vsphere_vcenter_port | TRUE | string | | "443" | Port of the vCenter API |
|
||||
| external_vsphere_insecure | TRUE | string | "true", "false" | "true" | set to "true" if the host above uses a self-signed cert |
|
||||
| external_vsphere_user | TRUE | string | | | User name for vCenter with required privileges |
|
||||
| external_vsphere_password | TRUE | string | | | Password for vCenter |
|
||||
| external_vsphere_datacenter | TRUE | string | | | Datacenter name to use |
|
||||
| external_vsphere_kubernetes_cluster_id | TRUE | string | | "kubernetes-cluster-id" | Kubernetes cluster ID to use |
|
||||
| vsphere_csi_enabled | TRUE | boolean | | false | Enable vSphere CSI |
|
||||
|
||||
Example configuration:
|
||||
|
||||
```yml
|
||||
external_vsphere_vcenter_ip: "myvcenter.domain.com"
|
||||
external_vsphere_vcenter_port: "443"
|
||||
external_vsphere_insecure: "true"
|
||||
external_vsphere_user: "administrator@vsphere.local"
|
||||
external_vsphere_password: "K8s_admin"
|
||||
external_vsphere_datacenter: "DATACENTER_name"
|
||||
external_vsphere_kubernetes_cluster_id: "kubernetes-cluster-id"
|
||||
vsphere_csi_enabled: true
|
||||
```
|
||||
|
||||
For a more fine-grained CSI setup, refer to the [vsphere-csi](vsphere-csi.md) documentation.
|
||||
|
||||
### Deployment
|
||||
|
||||
Once the configuration is set, you can execute the playbook again to apply the new configuration:
|
||||
|
||||
```ShellSession
|
||||
cd kubespray
|
||||
ansible-playbook -i inventory/sample/hosts.ini -b -v cluster.yml
|
||||
```
|
||||
|
||||
You'll find some useful examples [here](https://github.com/kubernetes/cloud-provider-vsphere/blob/master/docs/book/tutorials/kubernetes-on-vsphere-with-kubeadm.md#sample-manifests-to-test-csi-driver-functionality) to test your configuration.
|
||||
|
||||
## In-tree vSphere cloud provider ([deprecated](https://cloud-provider-vsphere.sigs.k8s.io/concepts/in_tree_vs_out_of_tree.html))
|
||||
|
||||
### Prerequisites (deprecated)
|
||||
|
||||
You need at first to configure your vSphere environment by following the [official documentation](https://kubernetes.io/docs/getting-started-guides/vsphere/#vsphere-cloud-provider).
|
||||
|
||||
@ -18,7 +80,7 @@ After this step you should have:
|
||||
|
||||
If you intend to leverage the [zone and region node labeling](https://kubernetes.io/docs/reference/kubernetes-api/labels-annotations-taints/#failure-domain-beta-kubernetes-io-region), create a tag category for both the zone and region in vCenter. The tags can then be applied at the host, cluster, datacenter, or folder level, and the cloud provider will walk the hierarchy to extract and apply the labels to the Kubernetes nodes.
|
||||
|
||||
## Kubespray configuration
|
||||
### Kubespray configuration (deprecated)
|
||||
|
||||
First you must define the cloud provider in `inventory/sample/group_vars/all.yml` and set it to `vsphere`.
|
||||
|
||||
@ -26,7 +88,7 @@ First you must define the cloud provider in `inventory/sample/group_vars/all.yml
|
||||
cloud_provider: vsphere
|
||||
```
|
||||
|
||||
Then, in the same file, you need to declare your vCenter credential following the description below.
|
||||
Then, in the same file, you need to declare your vCenter credentials following the description below.
|
||||
|
||||
| Variable | Required | Type | Choices | Default | Comment |
|
||||
|------------------------------|----------|---------|----------------------------|---------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
@ -45,7 +107,7 @@ Then, in the same file, you need to declare your vCenter credential following th
|
||||
| vsphere_zone_category | FALSE | string | | | Name of the tag category used to set the `failure-domain.beta.kubernetes.io/zone` label on nodes (Optional, only used for Kubernetes >= 1.12.0) |
|
||||
| vsphere_region_category | FALSE | string | | | Name of the tag category used to set the `failure-domain.beta.kubernetes.io/region` label on nodes (Optional, only used for Kubernetes >= 1.12.0) |
|
||||
|
||||
Example configuration
|
||||
Example configuration:
|
||||
|
||||
```yml
|
||||
vsphere_vcenter_ip: "myvcenter.domain.com"
|
||||
@ -60,9 +122,9 @@ vsphere_scsi_controller_type: "pvscsi"
|
||||
vsphere_resource_pool: "K8s-Pool"
|
||||
```
|
||||
|
||||
## Deployment
|
||||
### Deployment (deprecated)
|
||||
|
||||
Once the configuration is set, you can execute the playbook again to apply the new configuration
|
||||
Once the configuration is set, you can execute the playbook again to apply the new configuration:
|
||||
|
||||
```ShellSession
|
||||
cd kubespray
|
||||
|
@ -74,25 +74,6 @@ This mode is best to use on dynamic size cluster
|
||||
|
||||
The seed mode also allows multi-clouds and hybrid on-premise/cloud clusters deployment.
|
||||
|
||||
* Switch from consensus mode to seed mode
|
||||
* Switch from consensus mode to seed/Observation mode
|
||||
|
||||
```ShellSession
|
||||
# In file ./inventory/sample/group_vars/k8s-cluster.yml
|
||||
weave_mode_seed: true
|
||||
```
|
||||
|
||||
These two variables are only used when `weave_mode_seed` is set to `true` (**/!\ do not manually change these values**)
|
||||
|
||||
```ShellSession
|
||||
# In file ./inventory/sample/group_vars/k8s-cluster.yml
|
||||
weave_seed: uninitialized
|
||||
weave_peers: uninitialized
|
||||
```
|
||||
|
||||
The first variable, `weave_seed`, contains the initial nodes of the weave network
|
||||
|
||||
The second variable, `weave_peers`, saves the IPs of all nodes joined to the weave network
|
||||
|
||||
These two variables are used to connect a new node to the weave network. The new node needs to know the firsts nodes (seed) and the list of IPs of all nodes.
|
||||
|
||||
To reset these variables and reset the weave network set them to `uninitialized`
|
||||
See [weave ipam documentation](https://www.weave.works/docs/net/latest/tasks/ipam/ipam/) and use `weave_extra_args` to enable.
|
||||
|
@ -50,6 +50,8 @@
|
||||
docker_image:
|
||||
name: quay.io/kubespray/cephfs-provisioner:06fddbe2
|
||||
push: yes
|
||||
register: docker_image
|
||||
retries: 10
|
||||
until: docker_image is succeeded
|
||||
|
||||
when: check_image_result.rc != 0
|
||||
|
2
extra_playbooks/files/get_cinder_pvs.sh
Normal file
2
extra_playbooks/files/get_cinder_pvs.sh
Normal file
@ -0,0 +1,2 @@
|
||||
#!/bin/sh
|
||||
kubectl get pv -o go-template --template='{{ range .items }}{{ $metadata := .metadata }}{{ with $value := index .metadata.annotations "pv.kubernetes.io/provisioned-by" }}{{ if eq $value "kubernetes.io/cinder" }}{{printf "%s\n" $metadata.name}}{{ end }}{{ end }}{{ end }}'
|
28
extra_playbooks/migrate_openstack_provider.yml
Normal file
28
extra_playbooks/migrate_openstack_provider.yml
Normal file
@ -0,0 +1,28 @@
|
||||
---
|
||||
- hosts: kube-node:kube-master
|
||||
tasks:
|
||||
- name: Remove old cloud provider config
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: absent
|
||||
with_items:
|
||||
- /etc/kubernetes/cloud_config
|
||||
- hosts: kube-master[0]
|
||||
tasks:
|
||||
- name: Include kubespray-default variables
|
||||
include_vars: ../roles/kubespray-defaults/defaults/main.yaml
|
||||
- name: Copy get_cinder_pvs.sh to master
|
||||
copy:
|
||||
src: get_cinder_pvs.sh
|
||||
dest: /tmp
|
||||
mode: u+rwx
|
||||
- name: Get PVs provisioned by in-tree cloud provider
|
||||
command: /tmp/get_cinder_pvs.sh
|
||||
register: pvs
|
||||
- name: Remove get_cinder_pvs.sh
|
||||
file:
|
||||
path: /tmp/get_cinder_pvs.sh
|
||||
state: absent
|
||||
- name: Rewrite the "pv.kubernetes.io/provisioned-by" annotation
|
||||
command: "{{ bin_dir }}/kubectl annotate --overwrite pv {{ item }} pv.kubernetes.io/provisioned-by=cinder.csi.openstack.org"
|
||||
loop: "{{ pvs.stdout_lines | list }}"
|
19
facts.yml
Normal file
19
facts.yml
Normal file
@ -0,0 +1,19 @@
|
||||
---
|
||||
- name: Gather facts
|
||||
hosts: k8s-cluster:etcd:calico-rr
|
||||
gather_facts: False
|
||||
tasks:
|
||||
- name: Gather minimal facts
|
||||
setup:
|
||||
gather_subset: '!all'
|
||||
|
||||
- name: Gather necessary facts
|
||||
setup:
|
||||
gather_subset: '!all,!min,network,hardware'
|
||||
filter: "{{ item }}"
|
||||
loop:
|
||||
- ansible_distribution_major_version
|
||||
- ansible_default_ipv4
|
||||
- ansible_all_ipv4_addresses
|
||||
- ansible_memtotal_mb
|
||||
- ansible_swaptotal_mb
|
@ -51,10 +51,13 @@ loadbalancer_apiserver_healthcheck_port: 8081
|
||||
## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', 'oci', or 'external'
|
||||
## When openstack is used make sure to source in the openstack credentials
|
||||
## like you would do when using openstack-client before starting the playbook.
|
||||
## Note: The 'external' cloud provider is not supported.
|
||||
## TODO(riverzhang): https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller/#running-cloud-controller-manager
|
||||
# cloud_provider:
|
||||
|
||||
## When cloud_provider is set to 'external', you can set the cloud controller to deploy
|
||||
## Supported cloud controllers are: 'openstack' and 'vsphere'
|
||||
## When openstack or vsphere are used make sure to source in the required fields
|
||||
# external_cloud_provider:
|
||||
|
||||
## Set these proxy values in order to update package manager and docker daemon to use proxies
|
||||
# http_proxy: ""
|
||||
# https_proxy: ""
|
||||
|
8
inventory/sample/group_vars/all/aws.yml
Normal file
8
inventory/sample/group_vars/all/aws.yml
Normal file
@ -0,0 +1,8 @@
|
||||
## To use AWS EBS CSI Driver to provision volumes, uncomment the first value
|
||||
## and configure the parameters below
|
||||
# aws_ebs_csi_enabled: true
|
||||
# aws_ebs_csi_enable_volume_scheduling: true
|
||||
# aws_ebs_csi_enable_volume_snapshot: false
|
||||
# aws_ebs_csi_enable_volume_resizing: false
|
||||
# aws_ebs_csi_controller_replicas: 1
|
||||
# aws_ebs_csi_plugin_image_tag: latest
|
@ -12,3 +12,25 @@
|
||||
# azure_vnet_name:
|
||||
# azure_vnet_resource_group:
|
||||
# azure_route_table_name:
|
||||
# supported values are 'standard' or 'vmss'
|
||||
# azure_vmtype: standard
|
||||
|
||||
## Azure Disk CSI credentials and parameters
|
||||
## see docs/azure-csi.md for details on how to get these values
|
||||
|
||||
# azure_csi_tenant_id:
|
||||
# azure_csi_subscription_id:
|
||||
# azure_csi_aad_client_id:
|
||||
# azure_csi_aad_client_secret:
|
||||
# azure_csi_location:
|
||||
# azure_csi_resource_group:
|
||||
# azure_csi_vnet_name:
|
||||
# azure_csi_vnet_resource_group:
|
||||
# azure_csi_subnet_name:
|
||||
# azure_csi_security_group_name:
|
||||
# azure_csi_use_instance_metadata:
|
||||
|
||||
## To enable Azure Disk CSI, uncomment below
|
||||
# azure_csi_enabled: true
|
||||
# azure_csi_controller_replicas: 1
|
||||
# azure_csi_plugin_image_tag: latest
|
||||
|
15
inventory/sample/group_vars/all/containerd.yml
Normal file
15
inventory/sample/group_vars/all/containerd.yml
Normal file
@ -0,0 +1,15 @@
|
||||
---
|
||||
# Please see roles/container-engine/containerd/defaults/main.yml for more configuration options
|
||||
|
||||
# containerd_config:
|
||||
# grpc:
|
||||
# max_recv_message_size: 16777216
|
||||
# max_send_message_size: 16777216
|
||||
# debug:
|
||||
# level: ""
|
||||
# registries:
|
||||
# "docker.io": "https://registry-1.docker.io"
|
||||
# max_container_log_line_size: -1
|
||||
# metrics:
|
||||
# address: ""
|
||||
# grpc_histogram: false
|
@ -51,4 +51,4 @@ docker_rpm_keepcache: 0
|
||||
|
||||
## A string of extra options to pass to the docker daemon.
|
||||
## This string should be exactly as you wish it to appear.
|
||||
docker_options: >-
|
||||
# docker_options: ""
|
||||
|
10
inventory/sample/group_vars/all/gcp.yml
Normal file
10
inventory/sample/group_vars/all/gcp.yml
Normal file
@ -0,0 +1,10 @@
|
||||
## GCP compute Persistent Disk CSI Driver credentials and parameters
|
||||
## See docs/gcp-pd-csi.md for information about the implementation
|
||||
|
||||
## Specify the path to the file containing the service account credentials
|
||||
# gcp_pd_csi_sa_cred_file: "/my/safe/credentials/directory/cloud-sa.json"
|
||||
|
||||
## To enable GCP Persistent Disk CSI driver, uncomment below
|
||||
# gcp_pd_csi_enabled: true
|
||||
# gcp_pd_csi_controller_replicas: 1
|
||||
# gcp_pd_csi_driver_image_tag: "v0.7.0-gke.0"
|
@ -8,7 +8,7 @@
|
||||
# oci_vnc_id:
|
||||
# oci_subnet1_id:
|
||||
# oci_subnet2_id:
|
||||
## Overrideeeeeeee these default/optional behaviors if you wish
|
||||
## Override these default/optional behaviors if you wish
|
||||
# oci_security_list_management: All
|
||||
## If you would like the controller to manage specific lists per subnet. This is a mapping of subnet ocids to security list ocids. Below are examples.
|
||||
# oci_security_lists:
|
||||
|
@ -15,6 +15,23 @@
|
||||
# openstack_lbaas_monitor_timeout: "30s"
|
||||
# openstack_lbaas_monitor_max_retries: "3"
|
||||
|
||||
## Values for the external OpenStack Cloud Controller
|
||||
# external_openstack_lbaas_network_id: "Neutron network ID to create LBaaS VIP"
|
||||
# external_openstack_lbaas_subnet_id: "Neutron subnet ID to create LBaaS VIP"
|
||||
# external_openstack_lbaas_floating_network_id: "Neutron network ID to get floating IP from"
|
||||
# external_openstack_lbaas_floating_subnet_id: "Neutron subnet ID to get floating IP from"
|
||||
# external_openstack_lbaas_use_octavia: true
|
||||
# external_openstack_lbaas_method: "ROUND_ROBIN"
|
||||
# external_openstack_lbaas_create_monitor: false
|
||||
# external_openstack_lbaas_monitor_delay: "1m"
|
||||
# external_openstack_lbaas_monitor_timeout: "30s"
|
||||
# external_openstack_lbaas_monitor_max_retries: "3"
|
||||
# external_openstack_lbaas_manage_security_groups: false
|
||||
# external_openstack_lbaas_internal_lb: false
|
||||
|
||||
## The tag of the external OpenStack Cloud Controller image
|
||||
# external_openstack_cloud_controller_image_tag: "latest"
|
||||
|
||||
## To use Cinder CSI plugin to provision volumes set this value to true
|
||||
## Make sure to source in the openstack credentials
|
||||
# cinder_csi_enabled: true
|
||||
|
20
inventory/sample/group_vars/all/vsphere.yml
Normal file
20
inventory/sample/group_vars/all/vsphere.yml
Normal file
@ -0,0 +1,20 @@
|
||||
## Values for the external vSphere Cloud Provider
|
||||
# external_vsphere_vcenter_ip: "myvcenter.domain.com"
|
||||
# external_vsphere_vcenter_port: "443"
|
||||
# external_vsphere_insecure: "true"
|
||||
# external_vsphere_user: "administrator@vsphere.local"
|
||||
# external_vsphere_password: "K8s_admin"
|
||||
# external_vsphere_datacenter: "DATACENTER_name"
|
||||
# external_vsphere_kubernetes_cluster_id: "kubernetes-cluster-id"
|
||||
|
||||
## Tags for the external vSphere Cloud Provider images
|
||||
# external_vsphere_cloud_controller_image_tag: "latest"
|
||||
# vsphere_syncer_image_tag: "v1.0.2"
|
||||
# vsphere_csi_attacher_image_tag: "v1.1.1"
|
||||
# vsphere_csi_controller: "v1.0.2"
|
||||
# vsphere_csi_liveness_probe_image_tag: "v1.1.0"
|
||||
# vsphere_csi_provisioner_image_tag: "v1.2.2"
|
||||
|
||||
## To use vSphere CSI plugin to provision volumes set this value to true
|
||||
# vsphere_csi_enabled: true
|
||||
# vsphere_csi_controller_replicas: 1
|
@ -1,3 +1,4 @@
|
||||
---
|
||||
## Etcd auto compaction retention for mvcc key value store in hour
|
||||
# etcd_compaction_retention: 0
|
||||
|
||||
@ -16,3 +17,6 @@
|
||||
### ETCD: disable peer client cert authentication.
|
||||
# This affects ETCD_PEER_CLIENT_CERT_AUTH variable
|
||||
# etcd_peer_client_auth: true
|
||||
|
||||
## Settings for etcd deployment type
|
||||
etcd_deployment_type: docker
|
@ -27,6 +27,8 @@ local_path_provisioner_enabled: false
|
||||
# local_path_provisioner_debug: false
|
||||
# local_path_provisioner_image_repo: "rancher/local-path-provisioner"
|
||||
# local_path_provisioner_image_tag: "v0.0.2"
|
||||
# local_path_provisioner_helper_image_repo: "busybox"
|
||||
# local_path_provisioner_helper_image_tag: "latest"
|
||||
|
||||
# Local volume provisioner deployment
|
||||
local_volume_provisioner_enabled: false
|
||||
@ -82,7 +84,7 @@ ingress_nginx_enabled: false
|
||||
# ingress_nginx_host_network: false
|
||||
ingress_publish_status_address: ""
|
||||
# ingress_nginx_nodeselector:
|
||||
# beta.kubernetes.io/os: "linux"
|
||||
# kubernetes.io/os: "linux"
|
||||
# ingress_nginx_tolerations:
|
||||
# - key: "node-role.kubernetes.io/master"
|
||||
# operator: "Equal"
|
||||
@ -101,6 +103,14 @@ ingress_publish_status_address: ""
|
||||
# ingress_nginx_extra_args:
|
||||
# - --default-ssl-certificate=default/foo-tls
|
||||
|
||||
# ALB ingress controller deployment
|
||||
ingress_alb_enabled: false
|
||||
# alb_ingress_aws_region: "us-east-1"
|
||||
# alb_ingress_restrict_scheme: "false"
|
||||
# Enables logging on all outbound requests sent to the AWS API.
|
||||
# If logging is desired, set to true.
|
||||
# alb_ingress_aws_debug: "false"
|
||||
|
||||
# Cert manager deployment
|
||||
cert_manager_enabled: false
|
||||
# cert_manager_namespace: "cert-manager"
|
||||
|
@ -20,10 +20,10 @@ kube_users_dir: "{{ kube_config_dir }}/users"
|
||||
kube_api_anonymous_auth: true
|
||||
|
||||
## Change this to use another Kubernetes version, e.g. a current beta release
|
||||
kube_version: v1.16.3
|
||||
kube_version: v1.17.9
|
||||
|
||||
# kubernetes image repo define
|
||||
kube_image_repo: "{{ gcr_image_repo }}/google-containers"
|
||||
kube_image_repo: "k8s.gcr.io"
|
||||
|
||||
# Where the binaries will be downloaded.
|
||||
# Note: ensure that you've enough disk space (about 1G)
|
||||
@ -139,6 +139,19 @@ dns_mode: coredns
|
||||
enable_nodelocaldns: true
|
||||
nodelocaldns_ip: 169.254.25.10
|
||||
nodelocaldns_health_port: 9254
|
||||
# nodelocaldns_external_zones:
|
||||
# - zones:
|
||||
# - example.com
|
||||
# - example.io:1053
|
||||
# nameservers:
|
||||
# - 1.1.1.1
|
||||
# - 2.2.2.2
|
||||
# cache: 5
|
||||
# - zones:
|
||||
# - https://mycompany.local:4453
|
||||
# nameservers:
|
||||
# - 192.168.0.53
|
||||
# cache: 0
|
||||
# Enable k8s_external plugin for CoreDNS
|
||||
enable_coredns_k8s_external: false
|
||||
coredns_k8s_external_zone: k8s_external.local
|
||||
@ -158,8 +171,33 @@ dns_domain: "{{ cluster_name }}"
|
||||
## docker for docker, crio for cri-o and containerd for containerd.
|
||||
container_manager: docker
|
||||
|
||||
## Settings for containerized control plane (etcd/kubelet/secrets)
|
||||
etcd_deployment_type: docker
|
||||
## Settings for containerd runtimes (only used when container_manager is set to containerd)
|
||||
#
|
||||
# Settings for default containerd runtime
|
||||
# containerd_default_runtime:
|
||||
# type: io.containerd.runtime.v1.linux
|
||||
# engine: ''
|
||||
# root: ''
|
||||
#
|
||||
# Settings for additional runtimes for containerd configuration
|
||||
# containerd_runtimes:
|
||||
# - name: ""
|
||||
# type: ""
|
||||
# engine: ""
|
||||
# root: ""
|
||||
# Example for Kata Containers as additional runtime:
|
||||
# containerd_runtimes:
|
||||
# - name: kata
|
||||
# type: io.containerd.kata.v2
|
||||
# engine: ""
|
||||
# root: ""
|
||||
#
|
||||
# Settings for untrusted containerd runtime
|
||||
# containerd_untrusted_runtime_type: ''
|
||||
# containerd_untrusted_runtime_engine: ''
|
||||
# containerd_untrusted_runtime_root: ''
|
||||
|
||||
## Settings for containerized control plane (kubelet/secrets)
|
||||
kubelet_deployment_type: host
|
||||
helm_deployment_type: host
|
||||
|
||||
@ -184,15 +222,17 @@ dynamic_kubelet_configuration_dir: "{{ kubelet_config_dir | default(default_kube
|
||||
# pod security policy (RBAC must be enabled either by having 'RBAC' in authorization_modes or kubeadm enabled)
|
||||
podsecuritypolicy_enabled: false
|
||||
|
||||
# Custom PodSecurityPolicySpec for restricted policy
|
||||
# podsecuritypolicy_restricted_spec: {}
|
||||
|
||||
# Custom PodSecurityPolicySpec for privileged policy
|
||||
# podsecuritypolicy_privileged_spec: {}
|
||||
|
||||
# Make a copy of kubeconfig on the host that runs Ansible in {{ inventory_dir }}/artifacts
|
||||
# kubeconfig_localhost: false
|
||||
# Download kubectl onto the host that runs Ansible in {{ bin_dir }}
|
||||
# kubectl_localhost: false
|
||||
|
||||
|
||||
# Enable creation of QoS cgroup hierarchy, if true top level QoS and pod cgroups are created. (default true)
|
||||
# kubelet_cgroups_per_qos: true
|
||||
|
||||
# A comma separated list of levels of node allocatable enforcement to be enforced by kubelet.
|
||||
# Acceptable options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "".
|
||||
# kubelet_enforce_node_allocatable: pods
|
||||
@ -217,7 +257,8 @@ podsecuritypolicy_enabled: false
|
||||
## See https://github.com/kubernetes-sigs/kubespray/issues/2141
|
||||
## Set this variable to true to get rid of this issue
|
||||
volume_cross_zone_attachment: false
|
||||
# Add Persistent Volumes Storage Class for corresponding cloud provider ( OpenStack is only supported now )
|
||||
## Add Persistent Volumes Storage Class for corresponding cloud provider (supported: in-tree OpenStack, Cinder CSI,
|
||||
## AWS EBS CSI, Azure Disk CSI, GCP Persistent Disk CSI)
|
||||
persistent_volumes_enabled: false
|
||||
|
||||
## Container Engine Acceleration
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user