Compare commits

...

86 Commits
1.3.0 ... 1.4.0

Author SHA1 Message Date
cf472a6b4c Merge pull request #107 from ansibl8s/race_condition_api_master
Slowdown apimaster restart
2016-01-26 18:00:47 +01:00
fd6ac61afc Use local etcd/etcdproxy for calico 2016-01-26 17:28:30 +01:00
16a1926f94 Drop ansible 1.9 support 2016-01-26 15:31:11 +01:00
839974bad0 Merge pull request #108 from ansibl8s/set_perms_on_unarchive
Set perms on unarchive
2016-01-26 15:25:28 +01:00
4566d60e6f Slowdown apimaster restart 2016-01-26 15:23:16 +01:00
49a7278563 Set perms on unarchive 2016-01-26 12:17:33 +01:00
8676f8761f Merge pull request #99 from ansibl8s/symlink-dnsmasq-conf
Symlink dnsmasq conf
2016-01-26 00:44:13 +01:00
b9781fa7c2 Symlink dnsmasq conf 2016-01-26 00:30:29 +01:00
08052f60da Merge pull request #105 from ansibl8s/fix_handlers
fix some handlers
2016-01-25 23:00:07 +01:00
44230a4e86 Merge pull request #101 from galthaus/patch-1
Fix download reference in cluster.ml
2016-01-25 22:56:48 +01:00
90ffb8489a fix some handlers 2016-01-25 22:49:24 +01:00
238f6e8a0b Remove apt-get update 2016-01-25 22:46:20 +01:00
ef7cf3bf11 Merge pull request #103 from rackn/perm-fix
Force owner and permissions for get_url retrieved files.
2016-01-25 22:01:57 +01:00
e7d5b7af67 Force owner and permissions for get_url retrieved
files.  get_url doesn't honor owner and mode is spotty.
2016-01-25 13:30:48 -06:00
359e55f6e4 Merge pull request #100 from rackn/cache-update-centos
Need to use separate stanzas for each repo because the
2016-01-25 19:00:57 +01:00
dd29c8064f Fix download reference in cluster.ml 2016-01-25 11:23:55 -06:00
c7bd2a2a1e Need to use separate stanzas for each repo because the
args are different.  Sigh.
2016-01-25 11:16:56 -06:00
87fa167efa Merge pull request #95 from Smana/fix_ha_apiserver
workaround_ha_apiserver
2016-01-25 13:01:03 +01:00
baaa6efc2b workaround_ha_apiserver 2016-01-25 12:07:32 +01:00
cece179bd4 Merge pull request #97 from ansibl8s/systemd_reload
Fix systemd reload and calico unit
2016-01-25 11:01:11 +01:00
56b92812fa Fix systemd reload and calico unit 2016-01-25 10:54:07 +01:00
2cbbcee351 Merge pull request #98 from ansibl8s/setup_cache
Use update_cache when possible
2016-01-25 02:12:19 +01:00
f5508b1794 Use update_cache when possible 2016-01-25 02:06:34 +01:00
8f7d552401 Merge pull request #93 from ansibl8s/flannel-info
Add flannel vars to enable vagrant and amazon environments
2016-01-24 16:46:04 +01:00
bcd6ecb7fb Add flannel vars to enable vagrant and amazon environments 2016-01-24 16:18:35 +01:00
65666fc28a Merge pull request #90 from ansibl8s/add_users_role
AddUser Role
2016-01-24 16:17:24 +01:00
b4734c280a Merge branch 'master' into add_users_role 2016-01-24 15:58:10 +01:00
dd61f685b8 AddUser Role 2016-01-24 11:54:34 +01:00
641ce3358a Merge pull request #91 from Smana/rsync_instead_of_copy
use rsync instead of cp
2016-01-23 23:38:42 +01:00
4984b57aa2 use rsync instead of command 2016-01-23 18:26:07 +01:00
87d8d87c6e Merge pull request #87 from Smana/apiserver_on_host
Apiserver on host
2016-01-23 17:46:23 +01:00
283c4169ac run apiserver as a service
reorder master handlers

typo for sysvinit
2016-01-23 14:21:04 +01:00
d5f11b2442 Merge pull request #88 from Smana/complete_remove_downloader
missing commits for the PR #86
2016-01-22 17:25:12 +01:00
5edc81c627 moving kube-cert group into group_vars 2016-01-22 17:18:45 +01:00
391413f7e7 missing commits for the PR #86 2016-01-22 17:10:31 +01:00
c05c60a5d2 Merge pull request #86 from ansibl8s/remove_downloader
Remove downloader host
2016-01-22 17:04:54 +01:00
87b42e34e0 create kube-cert group task 2016-01-22 16:51:54 +01:00
be0bec9eab add kube-cert group 2016-01-22 16:46:06 +01:00
cb59559835 use command instead of synchronize 2016-01-22 16:37:07 +01:00
078b67c50f Remove downloader host 2016-01-22 09:59:39 +01:00
e95c4739f5 Merge pull request #82 from rackn/etcd-sync
Fix etcd synchronize to other nodes from the downloader
2016-01-21 20:39:52 +01:00
32877bdc7b Merge branch 'master' into etcd-sync 2016-01-21 13:13:58 -06:00
5e3af86c26 Merge pull request #84 from rackn/init-system-fix
Test for a systemd service that should be up.
2016-01-21 20:07:47 +01:00
ec1073def8 Test for a systemd service that should be up. 2016-01-21 11:35:15 -06:00
28e530e005 Fix etcd synchronize to other nodes from the downloader 2016-01-21 11:21:25 -06:00
9e9aba4e3a Merge pull request #79 from Smana/gitinfo
script which gives info about the deployment state
2016-01-21 13:49:11 +01:00
de038530ef don't run gitinfos by default 2016-01-21 13:41:01 +01:00
337977e868 script which gives info about the deployment state
fix script location
2016-01-21 13:41:01 +01:00
1c2bdbacb1 Merge pull request #72 from Smana/etcd_on_host
etcd directly in host
2016-01-21 13:20:05 +01:00
9715962356 etcd directly in host
fix etcd configuration for nodes

fix wrong calico checksums

using a var name etcd_bin_dir

fix etcd handlers for sysvinit

using a var name etcd_bin_dir

sysvinit script

review etcd configuration
2016-01-21 11:36:11 +01:00
5afbe181ce Merge pull request #78 from Smana/conf_etc-hosts_preinstall
move /etc/hosts configuration in 'preinstall' role
2016-01-20 19:02:03 +01:00
a5094f2a6a move /etc/hosts configuration in 'preinstall' role 2016-01-20 17:37:23 +01:00
9156d1ecfd Merge pull request #76 from rackn/dns-ip
Use IP is specified, otherwise use the ansible discovered address.
2016-01-20 15:46:27 +01:00
fe5ec398bf Use IP is specified, otherwise use the ansible discovered address.
This fixes cases for use in Vagrant environments.
2016-01-20 08:34:39 -06:00
babf42f03a Merge pull request #71 from ansibl8s/add_set_remote_user
Add set remote user
2016-01-19 22:20:31 +01:00
859f6322a0 Merge branch 'master' into add_set_remote_user 2016-01-19 21:08:52 +01:00
815c5fa43c Merge pull request #74 from rackn/master
run_once only works if master[0] is first in inventory list of all nodes
2016-01-19 20:48:42 +01:00
10b2466d82 run_once only works if master[0] is first in inventory list
of all nodes.
2016-01-19 13:10:54 -06:00
f68d8f3757 Add seT_remote_user in synchronize 2016-01-19 14:20:05 +01:00
9b083b62cf Rename tasks 2016-01-19 14:20:05 +01:00
59614fc60d Merge pull request #70 from Smana/localhost_dnsmasq
Localhost dnsmasq
2016-01-19 14:01:05 +01:00
b54af6b42f reduce dns timeout 2016-01-19 13:49:33 +01:00
7cab7e5fef restarting kubelet is sometimes required after docker restart 2016-01-19 13:47:07 +01:00
4c5735cef8 configure dnsmasq to listen on localhost only 2016-01-19 13:34:30 +01:00
58e1db6aae update kubedns submodule 2016-01-19 13:32:53 +01:00
63ae6ba5b5 dnsmasq runs on all nodes 2016-01-19 10:31:47 +01:00
f58b4d3dd6 dnsmasq listens on localhost 2016-01-19 10:29:33 +01:00
d3a8584212 add timeout options to resolv.conf 2016-01-19 10:18:53 +01:00
51f1ae1e9e Merge pull request #67 from ansibl8s/v1.1.4
Change hyperkube repo
2016-01-18 17:32:05 +01:00
4271126bae Change hyperkube repo 2016-01-18 17:17:08 +01:00
049f5015c1 upgrade hyperkube image version 2016-01-18 16:55:57 +01:00
6ab671c88b update memcached submodule 2016-01-18 16:25:01 +01:00
d73ac90acf udpate k8s-pgbouncer submodule 2016-01-18 11:58:12 +01:00
adf6e2f7b1 update postgres submodule 2016-01-18 11:44:33 +01:00
fb0803cf4c README : update versions 2016-01-17 21:31:38 +01:00
806834a6e9 upgrade kubernetes to 1.1.4 and calico to 0.14.0 2016-01-17 21:30:11 +01:00
8415634016 use google hyperkube image 2016-01-16 22:55:49 +01:00
319f687ced Merge pull request #62 from ansibl8s/flannel
Flannel running as pod
2016-01-15 13:13:56 +01:00
8127e8f8e8 Flannel running as pod 2016-01-15 13:03:27 +01:00
dd46cc64a4 README : Networking title 2016-01-15 12:18:26 +01:00
2d5862a94d README : typo 2016-01-15 12:18:21 +01:00
3d45a81006 README: ansible basics docs link 2016-01-15 12:18:13 +01:00
51a0996087 fix regexp for resolv.conf 2016-01-15 12:18:03 +01:00
80ac2ec6fc update README 2016-01-15 12:17:28 +01:00
5d61b5e813 Fix namespace 2016-01-14 16:22:37 +01:00
b769636435 Ansible 2.0 2016-01-13 16:40:24 +01:00
78 changed files with 1632 additions and 578 deletions

62
.gitmodules vendored
View File

@ -1,43 +1,43 @@
[submodule "roles/apps/k8s-kube-ui"]
path = roles/apps/k8s-kube-ui
url = https://github.com/ansibl8s/k8s-kube-ui.git
branch = v1.0
path = roles/apps/k8s-kube-ui
url = https://github.com/ansibl8s/k8s-kube-ui.git
branch = v1.0
[submodule "roles/apps/k8s-kubedns"]
path = roles/apps/k8s-kubedns
url = https://github.com/ansibl8s/k8s-kubedns.git
branch = v1.0
path = roles/apps/k8s-kubedns
url = https://github.com/ansibl8s/k8s-kubedns.git
branch = v1.0
[submodule "roles/apps/k8s-common"]
path = roles/apps/k8s-common
url = https://github.com/ansibl8s/k8s-common.git
branch = v1.0
path = roles/apps/k8s-common
url = https://github.com/ansibl8s/k8s-common.git
branch = v1.0
[submodule "roles/apps/k8s-redis"]
path = roles/apps/k8s-redis
url = https://github.com/ansibl8s/k8s-redis.git
branch = v1.0
path = roles/apps/k8s-redis
url = https://github.com/ansibl8s/k8s-redis.git
branch = v1.0
[submodule "roles/apps/k8s-elasticsearch"]
path = roles/apps/k8s-elasticsearch
url = https://github.com/ansibl8s/k8s-elasticsearch.git
path = roles/apps/k8s-elasticsearch
url = https://github.com/ansibl8s/k8s-elasticsearch.git
[submodule "roles/apps/k8s-fabric8"]
path = roles/apps/k8s-fabric8
url = https://github.com/ansibl8s/k8s-fabric8.git
branch = v1.0
path = roles/apps/k8s-fabric8
url = https://github.com/ansibl8s/k8s-fabric8.git
branch = v1.0
[submodule "roles/apps/k8s-memcached"]
path = roles/apps/k8s-memcached
url = https://github.com/ansibl8s/k8s-memcached.git
branch = v1.0
path = roles/apps/k8s-memcached
url = https://github.com/ansibl8s/k8s-memcached.git
branch = v1.0
[submodule "roles/apps/k8s-postgres"]
path = roles/apps/k8s-postgres
url = https://github.com/ansibl8s/k8s-postgres.git
branch = v1.0
path = roles/apps/k8s-postgres
url = https://github.com/ansibl8s/k8s-postgres.git
branch = v1.0
[submodule "roles/apps/k8s-kubedash"]
path = roles/apps/k8s-kubedash
url = https://github.com/ansibl8s/k8s-kubedash.git
path = roles/apps/k8s-kubedash
url = https://github.com/ansibl8s/k8s-kubedash.git
[submodule "roles/apps/k8s-heapster"]
path = roles/apps/k8s-heapster
url = https://github.com/ansibl8s/k8s-heapster.git
path = roles/apps/k8s-heapster
url = https://github.com/ansibl8s/k8s-heapster.git
[submodule "roles/apps/k8s-influxdb"]
path = roles/apps/k8s-influxdb
url = https://github.com/ansibl8s/k8s-influxdb.git
path = roles/apps/k8s-influxdb
url = https://github.com/ansibl8s/k8s-influxdb.git
[submodule "roles/apps/k8s-kube-logstash"]
path = roles/apps/k8s-kube-logstash
url = https://github.com/ansibl8s/k8s-kube-logstash.git
@ -47,3 +47,7 @@
[submodule "roles/apps/k8s-rabbitmq"]
path = roles/apps/k8s-rabbitmq
url = https://github.com/ansibl8s/k8s-rabbitmq.git
[submodule "roles/apps/k8s-pgbouncer"]
path = roles/apps/k8s-pgbouncer
url = https://github.com/ansibl8s/k8s-pgbouncer.git
branch = v1.0

View File

@ -8,14 +8,11 @@ addons:
- node1
env:
- SITE=cluster.yml
before_install:
- sudo apt-get update -qq
- SITE=cluster.yml ANSIBLE_VERSION=2.0.0
install:
# Install Ansible.
- sudo -H pip install ansible
- sudo -H pip install ansible==${ANSIBLE_VERSION}
- sudo -H pip install netaddr
cache:

118
README.md
View File

@ -2,19 +2,30 @@
kubernetes-ansible
========
Install and configure a Multi-Master/HA kubernetes cluster including network plugin.
This project allows to
- Install and configure a **Multi-Master/HA kubernetes** cluster.
- Choose the **network plugin** to be used within the cluster
- A **set of roles** in order to install applications over the k8s cluster
- A **flexible method** which helps to create new roles for apps.
Linux distributions tested:
* **Debian** Wheezy, Jessie
* **Ubuntu** 14.10, 15.04, 15.10
* **Fedora** 23
* **CentOS** 7 (Currently with flannel only)
### Requirements
Tested on **Debian Wheezy/Jessie** and **Ubuntu** (14.10, 15.04, 15.10).
Should work on **RedHat/Fedora/Centos** platforms (to be tested)
* The target servers must have access to the Internet in order to pull docker imaqes.
* The target servers must have **access to the Internet** in order to pull docker imaqes.
* The firewalls are not managed, you'll need to implement your own rules the way you used to.
* Ansible v1.9.x and python-netaddr
in order to avoid any issue during deployment you should **disable your firewall**
* **Copy your ssh keys** to all the servers part of your inventory.
* **Ansible v2.x and python-netaddr**
* Base knowledge on Ansible. Please refer to [Ansible documentation](http://www.ansible.com/how-ansible-works)
### Components
* [kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.1.3
* [etcd](https://github.com/coreos/etcd/releases) v2.2.2
* [calicoctl](https://github.com/projectcalico/calico-docker/releases) v0.13.0
* [kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.1.4
* [etcd](https://github.com/coreos/etcd/releases) v2.2.4
* [calicoctl](https://github.com/projectcalico/calico-docker/releases) v0.14.0
* [flanneld](https://github.com/coreos/flannel/releases) v0.5.5
* [docker](https://www.docker.com/) v1.9.1
@ -25,9 +36,6 @@ These defaults are good for tests purposes.
Edit the inventory according to the number of servers
```
[downloader]
localhost ansible_connection=local ansible_python_interpreter=python2
[kube-master]
10.115.99.31
@ -66,9 +74,6 @@ In node-mesh mode the nodes peers with all the nodes in order to exchange routes
```
[downloader]
localhost ansible_connection=local ansible_python_interpreter=python2
[kube-master]
node1 ansible_ssh_host=10.99.0.26
node2 ansible_ssh_host=10.99.0.27
@ -103,13 +108,10 @@ kube-master
### Playbook
```
---
- hosts: downloader
sudo: no
roles:
- { role: download, tags: download }
- hosts: k8s-cluster
roles:
- { role: download, tags: download }
- { role: kubernetes/preinstall, tags: preinstall }
- { role: docker, tags: docker }
- { role: kubernetes/node, tags: node }
@ -133,7 +135,7 @@ ansible-playbook -i inventory/dev/inventory.cfg cluster.yml -u root
Kubernetes
-------------------------
### Multi master notes
* You can choose where to install the master components. If you want your master node to act both as master (api,scheduler,controller) and node (e.g. accept workloads, create pods ...),
* You can choose where to install the master components. If you want your master node to act both as master (api,scheduler,controller) and node (e.g. accept workloads, create pods ...),
the server address has to be present on both groups 'kube-master' and 'kube-node'.
* Almost all kubernetes components are running into pods except *kubelet*. These pods are managed by kubelet which ensure they're always running
@ -141,8 +143,8 @@ the server address has to be present on both groups 'kube-master' and 'kube-node
* For safety reasons, you should have at least two master nodes and 3 etcd servers
* Kube-proxy doesn't support multiple apiservers on startup ([Issue 18174](https://github.com/kubernetes/kubernetes/issues/18174)). An external loadbalancer needs to be configured.
In order to do so, some variables have to be used '**loadbalancer_apiserver**' and '**apiserver_loadbalancer_domain_name**'
In order to do so, some variables have to be used '**loadbalancer_apiserver**' and '**apiserver_loadbalancer_domain_name**'
### Network Overlay
You can choose between 2 network plugins. Only one must be chosen.
@ -201,7 +203,7 @@ ou'll need to edit the file '*requirements.yml*' in order to chose needed apps.
The list of available apps are available [there](https://github.com/ansibl8s)
For instance it is **strongly recommanded** to install a dns server which resolves kubernetes service names.
In order to use this role you'll need the following entries in the file '*requirements.yml*'
In order to use this role you'll need the following entries in the file '*requirements.yml*'
Please refer to the [k8s-kubedns readme](https://github.com/ansibl8s/k8s-kubedns) for additionnal info.
```
- src: https://github.com/ansibl8s/k8s-common.git
@ -224,21 +226,6 @@ Then download the roles with ansible-galaxy
ansible-galaxy install -r requirements.yml
```
#### Git submodules
Alternatively the roles can be installed as git submodules.
That way is easier if you want to do some changes and commit them.
You can list available submodules with the following command:
```
grep path .gitmodules | sed 's/.*= //'
```
In order to install the dns addon you'll need to follow these steps
```
git submodule init roles/apps/k8s-common roles/apps/k8s-kubedns
git submodule update
```
Finally update the playbook ```apps.yml``` with the chosen roles, and run it
```
...
@ -252,8 +239,14 @@ Finally update the playbook ```apps.yml``` with the chosen roles, and run it
ansible-playbook -i inventory/inventory.cfg apps.yml -u root
```
#### Git submodules
Alternatively the roles can be installed as git submodules.
That way is easier if you want to do some changes and commit them.
#### Calico networking
### Networking
#### Calico
Check if the calico-node container is running
```
docker ps | grep calico
@ -274,6 +267,53 @@ calicoctl pool show
```
calicoctl endpoint show --detail
```
#### Flannel networking
#### Flannel
* Flannel configuration file should have been created there
```
cat /run/flannel/subnet.env
FLANNEL_NETWORK=10.233.0.0/18
FLANNEL_SUBNET=10.233.16.1/24
FLANNEL_MTU=1450
FLANNEL_IPMASQ=false
```
* Check if the network interface has been created
```
ip a show dev flannel.1
4: flannel.1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UNKNOWN group default
link/ether e2:f3:a7:0f:bf:cb brd ff:ff:ff:ff:ff:ff
inet 10.233.16.0/18 scope global flannel.1
valid_lft forever preferred_lft forever
inet6 fe80::e0f3:a7ff:fe0f:bfcb/64 scope link
valid_lft forever preferred_lft forever
```
* Docker must be configured with a bridge ip in the flannel subnet.
```
ps aux | grep docker
root 20196 1.7 2.7 1260616 56840 ? Ssl 10:18 0:07 /usr/bin/docker daemon --bip=10.233.16.1/24 --mtu=1450
```
* Try to run a container and check its ip address
```
kubectl run test --image=busybox --command -- tail -f /dev/null
replicationcontroller "test" created
kubectl describe po test-34ozs | grep ^IP
IP: 10.233.16.2
```
```
kubectl exec test-34ozs -- ip a show dev eth0
8: eth0@if9: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1450 qdisc noqueue
link/ether 02:42:0a:e9:2b:03 brd ff:ff:ff:ff:ff:ff
inet 10.233.16.2/24 scope global eth0
valid_lft forever preferred_lft forever
inet6 fe80::42:aff:fee9:2b03/64 scope link tentative flags 08
valid_lft forever preferred_lft forever
```
Congrats ! now you can walk through [kubernetes basics](http://kubernetes.io/v1.1/basicstutorials.html)

View File

@ -9,6 +9,7 @@
- { role: apps/k8s-elasticsearch, tags: 'elasticsearch' }
- { role: apps/k8s-memcached, tags: 'memcached' }
- { role: apps/k8s-redis, tags: 'redis' }
- { role: apps/k8s-mongodb-simple, tags: 'mongodb-simple' }
# Msg Broker
- { role: apps/k8s-rabbitmq, tags: 'rabbitmq' }
@ -27,3 +28,6 @@
# ETCD
- { role: apps/k8s-etcd, tags: 'etcd'}
# Chat Apps
- { role: apps/k8s-rocketchat, tags: 'rocketchat'}

View File

@ -1,11 +1,8 @@
---
- hosts: downloader
sudo: no
roles:
- { role: download, tags: download }
- hosts: k8s-cluster
roles:
- { role: adduser, tags: adduser }
- { role: download, tags: download }
- { role: kubernetes/preinstall, tags: preinstall }
- { role: docker, tags: docker }
- { role: kubernetes/node, tags: node }

View File

@ -5,6 +5,10 @@ bin_dir: /usr/local/bin
# Note: ensure that you've enough disk space (about 1G)
local_release_dir: "/tmp/releases"
# This is the group that the cert creation scripts chgrp the
# cert files to. Not really changable...
kube_cert_group: kube-cert
# Cluster Loglevel configuration
kube_log_level: 2

View File

@ -1,6 +1,3 @@
[downloader]
localhost ansible_connection=local ansible_python_interpreter=python2
[kube-master]
node1 ansible_ssh_host=10.99.0.26
node2 ansible_ssh_host=10.99.0.27

View File

@ -1,8 +1,5 @@
node1 ansible_connection=local local_release_dir={{ansible_env.HOME}}/releases
[downloader]
node1
[kube-master]
node1

View File

@ -31,6 +31,10 @@
# path: roles/apps
# version: v1.0
#
#- src: https://github.com/ansibl8s/k8s-pgbouncer.git
# path: roles/apps
# version: v1.0
#
#- src: https://github.com/ansibl8s/k8s-heapster.git
# path: roles/apps
#

View File

@ -0,0 +1,15 @@
---
addusers:
- name: etcd
comment: "Etcd user"
createhome: yes
home: "/var/lib/etcd"
system: yes
shell: /bin/nologin
- name: kube
comment: "Kubernetes user"
shell: /sbin/nologin
system: yes
group: "{{ kube_cert_group }}"
createhome: no

View File

@ -0,0 +1,13 @@
- name: User | Create User Group
group: name={{item.group|default(item.name)}} system={{item.system|default(omit)}}
with_items: addusers
- name: User | Create User
user:
comment: "{{item.comment|default(omit)}}"
createhome: "{{item.create_home|default(omit)}}"
group: "{{item.group|default(item.name)}}"
home: "{{item.home|default(omit)}}"
name: "{{item.name}}"
system: "{{item.system|default(omit)}}"
with_items: addusers

View File

@ -1,57 +1,43 @@
---
- name: populate inventory into hosts file
lineinfile:
dest: /etc/hosts
regexp: "^{{ hostvars[item].ansible_default_ipv4.address }} {{ item }}$"
line: "{{ hostvars[item].ansible_default_ipv4.address }} {{ item }}"
state: present
backup: yes
when: hostvars[item].ansible_default_ipv4.address is defined
with_items: groups['all']
- name: populate kubernetes loadbalancer address into hosts file
lineinfile:
dest: /etc/hosts
regexp: ".*{{ apiserver_loadbalancer_domain_name }}$"
line: "{{ loadbalancer_apiserver.address }} lb-apiserver.kubernetes.local"
state: present
backup: yes
when: loadbalancer_apiserver is defined and apiserver_loadbalancer_domain_name is defined
- name: clean hosts file
lineinfile:
dest: /etc/hosts
regexp: "{{ item }}"
state: absent
backup: yes
with_items:
- '^127\.0\.0\.1(\s+){{ inventory_hostname }}.*'
- '^::1(\s+){{ inventory_hostname }}.*'
- name: ensure dnsmasq.d directory exists
file:
path: /etc/dnsmasq.d
state: directory
when: inventory_hostname in groups['kube-master']
- name: configure dnsmasq
- name: ensure dnsmasq.d-available directory exists
file:
path: /etc/dnsmasq.d-available
state: directory
- name: Write dnsmasq configuration
template:
src: 01-kube-dns.conf.j2
dest: /etc/dnsmasq.d/01-kube-dns.conf
mode: 755
dest: /etc/dnsmasq.d-available/01-kube-dns.conf
mode: 0755
backup: yes
when: inventory_hostname in groups['kube-master']
- name: create dnsmasq pod template
- name: Stat dnsmasq configuration
stat: path=/etc/dnsmasq.d/01-kube-dns.conf
register: sym
- name: Move previous configuration
command: mv /etc/dnsmasq.d/01-kube-dns.conf /etc/dnsmasq.d-available/01-kube-dns.conf.bak
changed_when: False
when: sym.stat.islnk is defined and sym.stat.islnk == False
- name: Enable dnsmasq configuration
file:
src: /etc/dnsmasq.d-available/01-kube-dns.conf
dest: /etc/dnsmasq.d/01-kube-dns.conf
state: link
- name: Create dnsmasq pod manifest
template: src=dnsmasq-pod.yml dest=/etc/kubernetes/manifests/dnsmasq-pod.manifest
when: inventory_hostname in groups['kube-master']
- name: Check for dnsmasq port
- name: Check for dnsmasq port (pulling image and running container)
wait_for:
port: 53
delay: 5
timeout: 100
when: inventory_hostname in groups['kube-master']
- name: check resolvconf
stat: path=/etc/resolvconf/resolv.conf.d/head
@ -59,34 +45,42 @@
- name: target resolv.conf file
set_fact:
resolvconffile: >
{%- if resolvconf.stat.exists == True -%}
/etc/resolvconf/resolv.conf.d/head
{%- else -%}
/etc/resolv.conf
{%- endif -%}
resolvconffile: >-
{%- if resolvconf.stat.exists == True -%}/etc/resolvconf/resolv.conf.d/head{%- else -%}/etc/resolv.conf{%- endif -%}
- name: Add search resolv.conf
lineinfile:
line: search {{ [ 'default.svc.' + dns_domain, 'svc.' + dns_domain, dns_domain ] | join(' ') }}
line: "search {{ [ 'default.svc.' + dns_domain, 'svc.' + dns_domain, dns_domain ] | join(' ') }}"
dest: "{{resolvconffile}}"
state: present
insertafter: EOF
insertbefore: BOF
backup: yes
follow: yes
- name: Add all masters as nameserver
- name: Add local dnsmasq to resolv.conf
lineinfile:
line: nameserver {{ hostvars[item]['ansible_default_ipv4']['address'] }}
line: "nameserver 127.0.0.1"
dest: "{{resolvconffile}}"
state: present
insertafter: "^search.*$"
backup: yes
follow: yes
- name: Add options to resolv.conf
lineinfile:
line: options {{ item }}
dest: "{{resolvconffile}}"
state: present
regexp: "^options.*{{ item }}$"
insertafter: EOF
backup: yes
follow: yes
with_items: groups['kube-master']
with_items:
- timeout:2
- attempts:2
- name: disable resolv.conf modification by dhclient
copy: src=dhclient_nodnsupdate dest=/etc/dhcp/dhclient-enter-hooks.d/nodnsupdate mode=u+x backup=yes
copy: src=dhclient_nodnsupdate dest=/etc/dhcp/dhclient-enter-hooks.d/nodnsupdate mode=0755 backup=yes
when: ansible_os_family == "Debian"
- name: disable resolv.conf modification by dhclient

View File

@ -1,5 +1,6 @@
#Listen on all interfaces
interface=*
#Listen on localhost
bind-interfaces
listen-address=127.0.0.1
addn-hosts=/etc/hosts

View File

@ -20,30 +20,35 @@
{{ ansible_distribution }}-{{ ansible_distribution_version }}
when: ansible_kernel|version_compare(docker_kernel_min_version, "<")
- name: ensure docker requirements packages are installed
action: "{{ docker_package_info.pkg_mgr }}"
args: docker_package_info.args
with_items: docker_package_info.pre_pkgs
when: docker_package_info.pre_pkgs|length > 0
- name: ensure docker repository public key is installed
action: "{{ docker_repo_key_info.pkg_key }}"
args: docker_repo_key_info.args
args:
id: "{{item}}"
keyserver: "{{docker_repo_key_info.keyserver}}"
state: present
with_items: docker_repo_key_info.repo_keys
when: docker_repo_key_info.repo_keys|length > 0
- name: ensure docker repository is enabled
action: "{{ docker_repo_info.pkg_repo }}"
args: docker_repo_info.args
args:
repo: "{{item}}"
update_cache: yes
state: present
with_items: docker_repo_info.repos
when: docker_repo_info.repos|length > 0
- name: ensure docker packages are installed
action: "{{ docker_package_info.pkg_mgr }}"
args: docker_package_info.args
args:
pkg: "{{item}}"
update_cache: yes
state: latest
with_items: docker_package_info.pkgs
when: docker_package_info.pkgs|length > 0
- meta: flush_handlers
- name: ensure docker service is started and enabled
service:
name: "{{ item }}"

View File

@ -2,23 +2,13 @@ docker_kernel_min_version: '2.6.32-431'
docker_package_info:
pkg_mgr: yum
args:
name: "{{ item }}"
state: latest
update_cache: yes
pre_pkgs:
- epel-release
- curl
- device-mapper-libs
pkgs:
- docker-io
docker_repo_key_info:
pkg_key: ''
args: {}
repo_keys: []
docker_repo_info:
pkg_repo: ''
args: {}
repos: []

View File

@ -2,35 +2,19 @@ docker_kernel_min_version: '3.2'
docker_package_info:
pkg_mgr: apt
args:
pkg: "{{ item }}"
update_cache: yes
cache_valid_time: 600
state: latest
pre_pkgs:
- apt-transport-https
- curl
- software-properties-common
pkgs:
- docker-engine
docker_repo_key_info:
pkg_key: apt_key
args:
id: "{{ item }}"
keyserver: hkp://p80.pool.sks-keyservers.net:80
state: present
keyserver: hkp://p80.pool.sks-keyservers.net:80
repo_keys:
- 58118E89F3A912897C070ADBF76221572C52609D
- 58118E89F3A912897C070ADBF76221572C52609D
docker_repo_info:
pkg_repo: apt_repository
args:
repo: "{{ item }}"
update_cache: yes
state: present
repos:
- >
deb https://apt.dockerproject.org/repo
deb https://apt.dockerproject.org/repo
{{ ansible_distribution|lower }}-{{ ansible_distribution_release|lower }}
main

View File

@ -2,21 +2,13 @@ docker_kernel_min_version: '0'
docker_package_info:
pkg_mgr: yum
args:
name: "{{ item }}"
state: latest
update_cache: yes
pre_pkgs:
- curl
pkgs:
- docker-io
docker_repo_key_info:
pkg_key: ''
args: {}
repo_keys: []
docker_repo_info:
pkg_repo: ''
args: {}
repos: []

View File

@ -0,0 +1,14 @@
docker_kernel_min_version: '0'
docker_package_info:
pkg_mgr: dnf
pkgs:
- docker-io
docker_repo_key_info:
pkg_key: ''
repo_keys: []
docker_repo_info:
pkg_repo: ''
repos: []

View File

@ -2,21 +2,13 @@ docker_kernel_min_version: '0'
docker_package_info:
pkg_mgr: yum
args:
name: "{{ item }}"
state: latest
update_cache: yes
pre_pkgs:
- curl
pkgs:
- docker
docker_repo_key_info:
pkg_key: ''
args: {}
repo_keys: []
docker_repo_info:
pkg_repo: ''
args: {}
repos: []

View File

@ -1,42 +1,66 @@
---
local_release_dir: /tmp
flannel_version: 0.5.5
calico_version: v0.13.0
# Versions
kube_version: v1.1.4
etcd_version: v2.2.4
calico_version: v0.14.0
calico_plugin_version: v0.7.0
kube_version: v1.1.3
kubectl_checksum: "01b9bea18061a27b1cf30e34fd8ab45cfc096c9a9d57d0ed21072abb40dd3d1d"
kubelet_checksum: "62191c66f2d670dd52ddf1d88ef81048977abf1ffaa95ee6333299447eb6a482"
# Download URL's
kube_download_url: "https://storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/amd64"
flannel_download_url: "https://github.com/coreos/flannel/releases/download/v{{ flannel_version }}/flannel-{{ flannel_version }}-linux-amd64.tar.gz"
etcd_download_url: "https://github.com/coreos/etcd/releases/download/{{ etcd_version }}/etcd-{{ etcd_version }}-linux-amd64.tar.gz"
calico_download_url: "https://github.com/Metaswitch/calico-docker/releases/download/{{calico_version}}/calicoctl"
calico_plugin_download_url: "https://github.com/projectcalico/calico-kubernetes/releases/download/{{calico_plugin_version}}/calico_kubernetes"
# Checksums
calico_checksum: "f251d7a8583233906aa6d059447c1e4fb32bf1369a51fdf96a68d50466d6a69c"
calico_plugin_checksum: "032f582f5eeec6fb26191d2fbcbf8bca4da3b14abb579db7baa7b3504d4dffec"
etcd_checksum: "6c4e5cdeaaac1a70b8f06b5dd6b82c37ff19993c9bca81248975610e555c4b9b"
kubectl_checksum: "873ba19926d17a3287dc8639ea1434fe3cd0cb4e61d82101ba754922cfc7a633"
kubelet_checksum: "f2d1eae3fa6e304f6cbc9b2621e4b86fc3bcb4e74a15d35f58bf00e45c706e0a"
kube_apiserver_checksum: "bb3814c4df65f1587a3650140437392ce3fb4b64f51d459457456691c99f1202"
downloads:
- name: calico
dest: calico/bin/calicoctl
url: "{{calico_download_url}}"
sha256: "{{ calico_checksum }}"
url: "{{ calico_download_url }}"
owner: "root"
mode: "0755"
- name: calico-plugin
dest: calico/bin/calico
url: "{{calico_plugin_download_url}}"
sha256: "{{ calico_plugin_checksum }}"
url: "{{ calico_plugin_download_url }}"
owner: "root"
mode: "0755"
- name: flannel
dest: flannel/flannel-{{ flannel_version }}-linux-amd64.tar.gz
url: "{{flannel_download_url}}"
unarchive: yes
- name: etcd
dest: "etcd/etcd-{{ etcd_version }}-linux-amd64.tar.gz"
sha256: "{{ etcd_checksum }}"
url: "{{ etcd_download_url }}"
unarchive: true
owner: "etcd"
mode: "0755"
- name: kubernetes-kubelet
dest: kubernetes/bin/kubelet
sha256: "{{kubelet_checksum}}"
url: "{{ kube_download_url }}/kubelet"
owner: "kube"
mode: "0755"
- name: kubernetes-kubectl
dest: kubernetes/bin/kubectl
sha256: "{{kubectl_checksum}}"
url: "{{ kube_download_url }}/kubectl"
owner: "kube"
mode: "0755"
- name: kubernetes-apiserver
dest: kubernetes/bin/kube-apiserver
sha256: "{{kube_apiserver_checksum}}"
url: "{{ kube_download_url }}/kube-apiserver"
owner: "kube"
mode: "0755"

View File

@ -8,12 +8,25 @@
url: "{{item.url}}"
dest: "{{local_release_dir}}/{{item.dest}}"
sha256sum: "{{item.sha256 | default(omit)}}"
owner: "{{ item.owner|default(omit) }}"
mode: "{{ item.mode|default(omit) }}"
with_items: downloads
- name: Extract archives
unarchive:
src: "{{ local_release_dir }}/{{item.dest}}"
dest: "{{ local_release_dir }}/{{item.dest|dirname}}"
copy: no
src: "{{ local_release_dir }}/{{item.dest}}"
dest: "{{ local_release_dir }}/{{item.dest|dirname}}"
owner: "{{ item.owner|default(omit) }}"
mode: "{{ item.mode|default(omit) }}"
copy: no
when: "{{item.unarchive is defined and item.unarchive == True}}"
with_items: downloads
- name: Fix permissions
file:
state: file
path: "{{local_release_dir}}/{{item.dest}}"
owner: "{{ item.owner|default(omit) }}"
mode: "{{ item.mode|default(omit) }}"
when: "{{item.unarchive is not defined or item.unarchive == False}}"
with_items: downloads

View File

@ -0,0 +1,3 @@
---
etcd_version: v2.2.4
etcd_bin_dir: "{{ local_release_dir }}/etcd/etcd-{{ etcd_version }}-linux-amd64/"

View File

@ -0,0 +1,15 @@
---
- name: restart etcd
command: /bin/true
notify:
- reload systemd
- reload etcd
- name: reload systemd
command: systemctl daemon-reload
when: init_system == "systemd"
- name: reload etcd
service:
name: etcd
state: restarted

View File

@ -0,0 +1,23 @@
---
- name: Configure | Copy etcd.service systemd file
template:
src: etcd.service.j2
dest: /lib/systemd/system/etcd.service
backup: yes
when: init_system == "systemd"
notify: restart etcd
- name: Configure | Write etcd initd script
template:
src: deb-etcd.initd.j2
dest: /etc/init.d/etcd
owner: root
mode: 0755
when: init_system == "sysvinit" and ansible_os_family == "Debian"
notify: restart etcd
- name: Configure | Create etcd config file
template:
src: etcd.j2
dest: /etc/etcd.env
notify: restart etcd

View File

@ -0,0 +1,9 @@
---
- name: Install | Copy etcd binary
command: rsync -piu "{{ etcd_bin_dir }}/etcd" "{{ bin_dir }}/etcd"
register: etcd_copy
changed_when: false
- name: Install | Copy etcdctl binary
command: rsync -piu "{{ etcd_bin_dir }}/etcdctl" "{{ bin_dir }}/etcdctl"
changed_when: false

View File

@ -1,13 +1,18 @@
---
- name: ETCD2 | Stop etcd2 service
service: name=etcd state=stopped
ignore_errors: yes
- include: install.yml
- include: configure.yml
- name: ETCD2 | create etcd pod template
template: src=etcd-pod.yml dest=/etc/kubernetes/manifests/etcd-pod.manifest
- name: Restart etcd if binary changed
command: /bin/true
notify: restart etcd
when: etcd_copy.stdout_lines
- name: ETCD2 | Check for etcd2 port
wait_for:
port: 2379
delay: 5
timeout: 100
# reload systemd before starting service
- meta: flush_handlers
- name: Ensure etcd is running
service:
name: etcd
state: started
enabled: yes

View File

@ -0,0 +1,113 @@
#!/bin/sh
set -a
### BEGIN INIT INFO
# Provides: etcd
# Required-Start: $local_fs $network $syslog
# Required-Stop:
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: etcd distributed k/v store
# Description:
# etcd is a distributed, consistent key-value store for shared configuration and service discovery
### END INIT INFO
PATH=/sbin:/usr/sbin:/bin:/usr/bin
DESC="etcd k/v store"
NAME=etcd
DAEMON={{ bin_dir }}/etcd
{% if inventory_hostname in groups['etcd'] %}
DAEMON_ARGS=""
{% else %}
DAEMON_ARGS="-proxy on"
{% endif %}
SCRIPTNAME=/etc/init.d/$NAME
DAEMON_USER=etcd
STOP_SCHEDULE="${STOP_SCHEDULE:-QUIT/5/TERM/5/KILL/5}"
PID=/var/run/etcd.pid
# Exit if the binary is not present
[ -x "$DAEMON" ] || exit 0
# Read configuration variable file if it is present
[ -f /etc/etcd.env ] && . /etc/etcd.env
# Define LSB log_* functions.
# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
# and status_of_proc is working.
. /lib/lsb/init-functions
do_status()
{
status_of_proc -p $PID "$DAEMON" "$NAME" && exit 0 || exit $?
}
# Function that starts the daemon/service
#
do_start()
{
start-stop-daemon --background --start --quiet --make-pidfile --pidfile $PID --user $DAEMON_USER --exec $DAEMON \
$DAEMON_OPTS \
|| return 2
}
#
# Function that stops the daemon/service
#
do_stop()
{
start-stop-daemon --stop --quiet --retry=$STOP_SCHEDULE --pidfile $PID --name $NAME
RETVAL="$?"
sleep 1
return "$RETVAL"
}
case "$1" in
start)
log_daemon_msg "Starting $DESC" "$NAME"
do_start
case "$?" in
0|1) log_end_msg 0 || exit 0 ;;
2) log_end_msg 1 || exit 1 ;;
esac
;;
stop)
log_daemon_msg "Stopping $DESC" "$NAME"
if do_stop; then
log_end_msg 0
else
log_failure_msg "Can't stop etcd"
log_end_msg 1
fi
;;
status)
if do_status; then
log_end_msg 0
else
log_failure_msg "etcd is not running"
log_end_msg 1
fi
;;
restart|force-reload)
log_daemon_msg "Restarting $DESC" "$NAME"
if do_stop; then
if do_start; then
log_end_msg 0
exit 0
else
rc="$?"
fi
else
rc="$?"
fi
log_failure_msg "Can't restart etcd"
log_end_msg ${rc}
;;
*)
echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
exit 3
;;
esac

View File

@ -1,54 +0,0 @@
---
apiVersion: v1
kind: Pod
metadata:
name: etcd
namespace: kube-system
spec:
hostNetwork: true
containers:
- name: etcd
image: quay.io/coreos/etcd:v2.2.2
resources:
limits:
cpu: 100m
memory: 256M
args:
{% if inventory_hostname in groups['etcd'] %}
- --name
- etcd-{{inventory_hostname}}-master
- --advertise-client-urls
- "http://{{ hostvars[inventory_hostname]['ip'] | default( ansible_default_ipv4.address) }}:2379"
- --listen-peer-urls
- http://0.0.0.0:2380
- --initial-advertise-peer-urls
- http://{{ hostvars[inventory_hostname]['ip'] | default( ansible_default_ipv4.address) }}:2380
- --data-dir
- /var/etcd/data
- --initial-cluster-state
- new
{% else %}
- --proxy
- 'on'
{% endif %}
- --listen-client-urls
- "http://{{ hostvars[inventory_hostname]['ip'] | default( ansible_default_ipv4.address) }}:2379,http://127.0.0.1:2379"
- --initial-cluster
- "{% for host in groups['etcd'] %}etcd-{{host}}-master=http://{{ hostvars[host]['ip'] | default( hostvars[host]['ansible_default_ipv4']['address']) }}:2380{% if not loop.last %},{% endif %}{% endfor %}"
- --initial-cluster-token
- etcd-k8s-cluster
ports:
- name: etcd-client
containerPort: 2379
hostPort: 2379
- name: etcd-peer
containerPort: 2380
hostPort: 2380
volumeMounts:
- name: varetcd
mountPath: /var/etcd
readOnly: false
volumes:
- name: varetcd
hostPath:
path: /containers/pods/etcd-{{inventory_hostname}}/rootfs/var/etcd

View File

@ -0,0 +1,17 @@
ETCD_DATA_DIR="/var/lib/etcd"
{% if inventory_hostname in groups['etcd'] %}
{% set etcd = {} %}
{% for host in groups['etcd'] %}
{% if inventory_hostname == host %}
{% set _dummy = etcd.update({'name':"etcd"+loop.index|string}) %}
{% endif %}
{% endfor %}
ETCD_ADVERTISE_CLIENT_URLS="http://{{ hostvars[inventory_hostname]['ip'] | default( ansible_default_ipv4.address) }}:2379"
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://{{ hostvars[inventory_hostname]['ip'] | default( ansible_default_ipv4.address) }}:2380"
ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_INITIAL_CLUSTER_TOKEN="k8s_etcd"
ETCD_LISTEN_PEER_URLS="http://{{ hostvars[inventory_hostname]['ip'] | default( ansible_default_ipv4.address) }}:2380"
ETCD_NAME="{{ etcd.name }}"
{% endif %}
ETCD_INITIAL_CLUSTER="{% for host in groups['etcd'] %}etcd{{ loop.index|string }}=http://{{ hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }}:2380{% if not loop.last %},{% endif %}{% endfor %}"
ETCD_LISTEN_CLIENT_URLS="http://{{ hostvars[inventory_hostname]['ip'] | default( ansible_default_ipv4.address) }}:2379,http://127.0.0.1:2379"

View File

@ -0,0 +1,18 @@
[Unit]
Description=etcd
[Service]
User=etcd
EnvironmentFile=/etc/etcd.env
{% if inventory_hostname in groups['etcd'] %}
ExecStart={{ bin_dir }}/etcd
{% else %}
ExecStart={{ bin_dir }}/etcd -proxy on
{% endif %}
Restart=always
RestartSec=10s
LimitNOFILE=40000
[Install]
WantedBy=multi-user.target

View File

@ -265,6 +265,7 @@ _kubectl_get()
flags_completion=()
flags+=("--all-namespaces")
flags+=("--export")
flags+=("--filename=")
flags_with_completion+=("--filename")
flags_completion+=("__handle_filename_extension_flag json|yaml|yml")
@ -401,10 +402,204 @@ _kubectl_describe()
must_have_one_noun+=("serviceaccount")
}
_kubectl_create_namespace()
{
last_command="kubectl_create_namespace"
commands=()
flags=()
two_word_flags=()
flags_with_completion=()
flags_completion=()
flags+=("--dry-run")
flags+=("--generator=")
flags+=("--output=")
two_word_flags+=("-o")
flags+=("--output-version=")
flags+=("--save-config")
flags+=("--schema-cache-dir=")
flags+=("--validate")
flags+=("--alsologtostderr")
flags+=("--api-version=")
flags+=("--certificate-authority=")
flags+=("--client-certificate=")
flags+=("--client-key=")
flags+=("--cluster=")
flags+=("--context=")
flags+=("--insecure-skip-tls-verify")
flags+=("--kubeconfig=")
flags+=("--log-backtrace-at=")
flags+=("--log-dir=")
flags+=("--log-flush-frequency=")
flags+=("--logtostderr")
flags+=("--match-server-version")
flags+=("--namespace=")
flags+=("--password=")
flags+=("--server=")
two_word_flags+=("-s")
flags+=("--stderrthreshold=")
flags+=("--token=")
flags+=("--user=")
flags+=("--username=")
flags+=("--v=")
flags+=("--vmodule=")
must_have_one_flag=()
must_have_one_noun=()
}
_kubectl_create_secret_docker-registry()
{
last_command="kubectl_create_secret_docker-registry"
commands=()
flags=()
two_word_flags=()
flags_with_completion=()
flags_completion=()
flags+=("--docker-email=")
flags+=("--docker-password=")
flags+=("--docker-server=")
flags+=("--docker-username=")
flags+=("--dry-run")
flags+=("--generator=")
flags+=("--output=")
two_word_flags+=("-o")
flags+=("--output-version=")
flags+=("--save-config")
flags+=("--schema-cache-dir=")
flags+=("--validate")
flags+=("--alsologtostderr")
flags+=("--api-version=")
flags+=("--certificate-authority=")
flags+=("--client-certificate=")
flags+=("--client-key=")
flags+=("--cluster=")
flags+=("--context=")
flags+=("--insecure-skip-tls-verify")
flags+=("--kubeconfig=")
flags+=("--log-backtrace-at=")
flags+=("--log-dir=")
flags+=("--log-flush-frequency=")
flags+=("--logtostderr")
flags+=("--match-server-version")
flags+=("--namespace=")
flags+=("--password=")
flags+=("--server=")
two_word_flags+=("-s")
flags+=("--stderrthreshold=")
flags+=("--token=")
flags+=("--user=")
flags+=("--username=")
flags+=("--v=")
flags+=("--vmodule=")
must_have_one_flag=()
must_have_one_flag+=("--docker-email=")
must_have_one_flag+=("--docker-password=")
must_have_one_flag+=("--docker-username=")
must_have_one_noun=()
}
_kubectl_create_secret_generic()
{
last_command="kubectl_create_secret_generic"
commands=()
flags=()
two_word_flags=()
flags_with_completion=()
flags_completion=()
flags+=("--dry-run")
flags+=("--from-file=")
flags+=("--from-literal=")
flags+=("--generator=")
flags+=("--output=")
two_word_flags+=("-o")
flags+=("--output-version=")
flags+=("--save-config")
flags+=("--schema-cache-dir=")
flags+=("--type=")
flags+=("--validate")
flags+=("--alsologtostderr")
flags+=("--api-version=")
flags+=("--certificate-authority=")
flags+=("--client-certificate=")
flags+=("--client-key=")
flags+=("--cluster=")
flags+=("--context=")
flags+=("--insecure-skip-tls-verify")
flags+=("--kubeconfig=")
flags+=("--log-backtrace-at=")
flags+=("--log-dir=")
flags+=("--log-flush-frequency=")
flags+=("--logtostderr")
flags+=("--match-server-version")
flags+=("--namespace=")
flags+=("--password=")
flags+=("--server=")
two_word_flags+=("-s")
flags+=("--stderrthreshold=")
flags+=("--token=")
flags+=("--user=")
flags+=("--username=")
flags+=("--v=")
flags+=("--vmodule=")
must_have_one_flag=()
must_have_one_noun=()
}
_kubectl_create_secret()
{
last_command="kubectl_create_secret"
commands=()
commands+=("docker-registry")
commands+=("generic")
flags=()
two_word_flags=()
flags_with_completion=()
flags_completion=()
flags+=("--alsologtostderr")
flags+=("--api-version=")
flags+=("--certificate-authority=")
flags+=("--client-certificate=")
flags+=("--client-key=")
flags+=("--cluster=")
flags+=("--context=")
flags+=("--insecure-skip-tls-verify")
flags+=("--kubeconfig=")
flags+=("--log-backtrace-at=")
flags+=("--log-dir=")
flags+=("--log-flush-frequency=")
flags+=("--logtostderr")
flags+=("--match-server-version")
flags+=("--namespace=")
flags+=("--password=")
flags+=("--server=")
two_word_flags+=("-s")
flags+=("--stderrthreshold=")
flags+=("--token=")
flags+=("--user=")
flags+=("--username=")
flags+=("--v=")
flags+=("--vmodule=")
must_have_one_flag=()
must_have_one_noun=()
}
_kubectl_create()
{
last_command="kubectl_create"
commands=()
commands+=("namespace")
commands+=("secret")
flags=()
two_word_flags=()
@ -945,6 +1140,125 @@ _kubectl_scale()
must_have_one_noun=()
}
_kubectl_cordon()
{
last_command="kubectl_cordon"
commands=()
flags=()
two_word_flags=()
flags_with_completion=()
flags_completion=()
flags+=("--alsologtostderr")
flags+=("--api-version=")
flags+=("--certificate-authority=")
flags+=("--client-certificate=")
flags+=("--client-key=")
flags+=("--cluster=")
flags+=("--context=")
flags+=("--insecure-skip-tls-verify")
flags+=("--kubeconfig=")
flags+=("--log-backtrace-at=")
flags+=("--log-dir=")
flags+=("--log-flush-frequency=")
flags+=("--logtostderr")
flags+=("--match-server-version")
flags+=("--namespace=")
flags+=("--password=")
flags+=("--server=")
two_word_flags+=("-s")
flags+=("--stderrthreshold=")
flags+=("--token=")
flags+=("--user=")
flags+=("--username=")
flags+=("--v=")
flags+=("--vmodule=")
must_have_one_flag=()
must_have_one_noun=()
}
_kubectl_drain()
{
last_command="kubectl_drain"
commands=()
flags=()
two_word_flags=()
flags_with_completion=()
flags_completion=()
flags+=("--force")
flags+=("--grace-period=")
flags+=("--alsologtostderr")
flags+=("--api-version=")
flags+=("--certificate-authority=")
flags+=("--client-certificate=")
flags+=("--client-key=")
flags+=("--cluster=")
flags+=("--context=")
flags+=("--insecure-skip-tls-verify")
flags+=("--kubeconfig=")
flags+=("--log-backtrace-at=")
flags+=("--log-dir=")
flags+=("--log-flush-frequency=")
flags+=("--logtostderr")
flags+=("--match-server-version")
flags+=("--namespace=")
flags+=("--password=")
flags+=("--server=")
two_word_flags+=("-s")
flags+=("--stderrthreshold=")
flags+=("--token=")
flags+=("--user=")
flags+=("--username=")
flags+=("--v=")
flags+=("--vmodule=")
must_have_one_flag=()
must_have_one_noun=()
}
_kubectl_uncordon()
{
last_command="kubectl_uncordon"
commands=()
flags=()
two_word_flags=()
flags_with_completion=()
flags_completion=()
flags+=("--alsologtostderr")
flags+=("--api-version=")
flags+=("--certificate-authority=")
flags+=("--client-certificate=")
flags+=("--client-key=")
flags+=("--cluster=")
flags+=("--context=")
flags+=("--insecure-skip-tls-verify")
flags+=("--kubeconfig=")
flags+=("--log-backtrace-at=")
flags+=("--log-dir=")
flags+=("--log-flush-frequency=")
flags+=("--logtostderr")
flags+=("--match-server-version")
flags+=("--namespace=")
flags+=("--password=")
flags+=("--server=")
two_word_flags+=("-s")
flags+=("--stderrthreshold=")
flags+=("--token=")
flags+=("--user=")
flags+=("--username=")
flags+=("--v=")
flags+=("--vmodule=")
must_have_one_flag=()
must_have_one_noun=()
}
_kubectl_attach()
{
last_command="kubectl_attach"
@ -1164,6 +1478,7 @@ _kubectl_run()
two_word_flags+=("-r")
flags+=("--requests=")
flags+=("--restart=")
flags+=("--rm")
flags+=("--save-config")
flags+=("--service-generator=")
flags+=("--service-overrides=")
@ -2045,6 +2360,9 @@ _kubectl()
commands+=("logs")
commands+=("rolling-update")
commands+=("scale")
commands+=("cordon")
commands+=("drain")
commands+=("uncordon")
commands+=("attach")
commands+=("exec")
commands+=("port-forward")

View File

@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: kube-system

View File

@ -1,14 +1,4 @@
---
- name: reload systemd
command: systemctl daemon-reload
- name: restart systemd-kubelet
command: /bin/true
notify:
- reload systemd
- restart kubelet
- name: restart kubelet
service:
name: kubelet
state: restarted
- name: restart kube-apiserver
set_fact:
restart_apimaster: True

View File

@ -0,0 +1,24 @@
---
- name: tokens | generate tokens for master components
command: "{{ kube_script_dir }}/kube-gen-token.sh {{ item[0] }}-{{ item[1] }}"
environment:
TOKEN_DIR: "{{ kube_token_dir }}"
with_nested:
- [ "system:kubectl" ]
- "{{ groups['kube-master'] }}"
register: gentoken_master
changed_when: "'Added' in gentoken_master.stdout"
when: inventory_hostname == groups['kube-master'][0]
notify: restart kube-apiserver
- name: tokens | generate tokens for node components
command: "{{ kube_script_dir }}/kube-gen-token.sh {{ item[0] }}-{{ item[1] }}"
environment:
TOKEN_DIR: "{{ kube_token_dir }}"
with_nested:
- [ 'system:kubelet' ]
- "{{ groups['kube-node'] }}"
register: gentoken_node
changed_when: "'Added' in gentoken_node.stdout"
when: inventory_hostname == groups['kube-master'][0]
notify: restart kube-apiserver

View File

@ -1,20 +1,20 @@
---
- include: gen_kube_tokens.yml
tags: tokens
- name: Copy kubectl bash completion
copy:
src: kubectl_bash_completion.sh
dest: /etc/bash_completion.d/kubectl.sh
- name: Install kubectl binary
synchronize:
src: "{{ local_release_dir }}/kubernetes/bin/kubectl"
dest: "{{ bin_dir }}/kubectl"
archive: no
checksum: yes
times: yes
delegate_to: "{{ groups['downloader'][0] }}"
- name: Copy kube-apiserver binary
command: rsync -piu "{{ local_release_dir }}/kubernetes/bin/kube-apiserver" "{{ bin_dir }}/kube-apiserver"
register: kube_apiserver_copy
changed_when: false
- name: Perms kubectl binary
file: path={{ bin_dir }}/kubectl owner=kube mode=0755 state=file
- name: Copy kubectl binary
command: rsync -piu "{{ local_release_dir }}/kubernetes/bin/kubectl" "{{ bin_dir }}/kubectl"
changed_when: false
- name: populate users for basic auth in API
lineinfile:
@ -23,6 +23,7 @@
line: '{{ item.value.pass }},{{ item.key }},{{ item.value.role }}'
backup: yes
with_dict: "{{ kube_users }}"
notify: restart kube-apiserver
# Sync masters
- name: synchronize auth directories for masters
@ -40,32 +41,69 @@
delegate_to: "{{ groups['kube-master'][0] }}"
when: inventory_hostname != "{{ groups['kube-master'][0] }}"
# Write manifests
- name: Write kube-apiserver manifest
- name: install | Write kube-apiserver systemd init file
template:
src: manifests/kube-apiserver.manifest.j2
dest: "{{ kube_manifest_dir }}/kube-apisever.manifest"
notify:
- restart kubelet
src: "kube-apiserver.service.j2"
dest: "/etc/systemd/system/kube-apiserver.service"
backup: yes
when: init_system == "systemd"
notify: restart kube-apiserver
- name: install | Write kube-apiserver initd script
template:
src: "deb-kube-apiserver.initd.j2"
dest: "/etc/init.d/kube-apiserver"
owner: root
mode: 0755
backup: yes
when: init_system == "sysvinit" and ansible_os_family == "Debian"
- name: Write kube-apiserver config file
template:
src: "kube-apiserver.j2"
dest: "{{ kube_config_dir }}/kube-apiserver.env"
backup: yes
notify: restart kube-apiserver
- name: Allow apiserver to bind on both secure and insecure ports
shell: setcap cap_net_bind_service+ep {{ bin_dir }}/kube-apiserver
changed_when: false
- name: Restart apiserver
command: "/bin/true"
notify: restart kube-apiserver
when: is_gentoken_calico|default(false)
- meta: flush_handlers
- name: wait for the apiserver to be running (pulling image and running container)
wait_for:
port: "{{kube_apiserver_insecure_port}}"
delay: 10
timeout: 60
- include: start.yml
with_items: groups['kube-master']
when: "{{ hostvars[item].inventory_hostname == inventory_hostname }}"
- name: Create 'kube-system' namespace
uri:
url: http://127.0.0.1:{{ kube_apiserver_insecure_port }}/api/v1/namespaces
method: POST
body: '{"apiVersion":"v1","kind":"Namespace","metadata":{"name":"kube-system"}}'
status_code: 201,409
body_format: json
# Create kube-system namespace
- name: copy 'kube-system' namespace manifest
copy: src=namespace.yml dest=/etc/kubernetes/kube-system-ns.yml
run_once: yes
when: inventory_hostname == groups['kube-master'][0]
- name: Check if kube-system exists
command: kubectl get ns kube-system
register: 'kubesystem'
changed_when: False
ignore_errors: yes
run_once: yes
- name: wait for the apiserver to be running
wait_for:
port: "{{kube_apiserver_insecure_port}}"
timeout: 60
- name: Create 'kube-system' namespace
command: kubectl create -f /etc/kubernetes/kube-system-ns.yml
changed_when: False
when: kubesystem|failed and inventory_hostname == groups['kube-master'][0]
# Write manifests
- name: Write kube-controller-manager manifest
template:
src: manifests/kube-controller-manager.manifest.j2
@ -80,3 +118,9 @@
template:
src: manifests/kube-podmaster.manifest.j2
dest: "{{ kube_manifest_dir }}/kube-podmaster.manifest"
- name: restart kubelet
service:
name: kubelet
state: restarted
changed_when: false

View File

@ -0,0 +1,21 @@
---
- name: Pause
pause: seconds=10
- name: reload systemd
command: systemctl daemon-reload
when: init_system == "systemd" and restart_apimaster is defined and restart_apimaster == True
- name: reload kube-apiserver
service:
name: kube-apiserver
state: restarted
enabled: yes
when: restart_apimaster is defined and restart_apimaster == True
- name: Enable apiserver
service:
name: kube-apiserver
enabled: yes
state: started
when: restart_apimaster is not defined or restart_apimaster == False

View File

@ -0,0 +1,118 @@
#!/bin/bash
#
### BEGIN INIT INFO
# Provides: kube-apiserver
# Required-Start: $local_fs $network $syslog
# Required-Stop:
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: The Kubernetes apiserver
# Description:
# The Kubernetes apiserver.
### END INIT INFO
# PATH should only include /usr/* if it runs after the mountnfs.sh script
PATH=/sbin:/usr/sbin:/bin:/usr/bin
DESC="The Kubernetes apiserver"
NAME=kube-apiserver
DAEMON={{ bin_dir }}/kube-apiserver
DAEMON_LOG_FILE=/var/log/$NAME.log
PIDFILE=/var/run/$NAME.pid
SCRIPTNAME=/etc/init.d/$NAME
DAEMON_USER=root
# Exit if the package is not installed
[ -x "$DAEMON" ] || exit 0
# Read configuration variable file if it is present
[ -r /etc/kubernetes/$NAME.env ] && . /etc/kubernetes/$NAME.env
# Define LSB log_* functions.
# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
# and status_of_proc is working.
. /lib/lsb/init-functions
#
# Function that starts the daemon/service
#
do_start()
{
# Return
# 0 if daemon has been started
# 1 if daemon was already running
# 2 if daemon could not be started
start-stop-daemon --start --quiet --background --no-close \
--make-pidfile --pidfile $PIDFILE \
--exec $DAEMON -c $DAEMON_USER --test > /dev/null \
|| return 1
start-stop-daemon --start --quiet --background --no-close \
--make-pidfile --pidfile $PIDFILE \
--exec $DAEMON -c $DAEMON_USER -- \
$DAEMON_ARGS >> $DAEMON_LOG_FILE 2>&1 \
|| return 2
}
#
# Function that stops the daemon/service
#
do_stop()
{
# Return
# 0 if daemon has been stopped
# 1 if daemon was already stopped
# 2 if daemon could not be stopped
# other if a failure occurred
start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE --name $NAME
RETVAL="$?"
[ "$RETVAL" = 2 ] && return 2
# Many daemons don't delete their pidfiles when they exit.
rm -f $PIDFILE
return "$RETVAL"
}
case "$1" in
start)
log_daemon_msg "Starting $DESC" "$NAME"
do_start
case "$?" in
0|1) log_end_msg 0 || exit 0 ;;
2) log_end_msg 1 || exit 1 ;;
esac
;;
stop)
log_daemon_msg "Stopping $DESC" "$NAME"
do_stop
case "$?" in
0|1) log_end_msg 0 ;;
2) exit 1 ;;
esac
;;
status)
status_of_proc -p $PIDFILE "$DAEMON" "$NAME" && exit 0 || exit $?
;;
restart|force-reload)
log_daemon_msg "Restarting $DESC" "$NAME"
do_stop
case "$?" in
0|1)
do_start
case "$?" in
0) log_end_msg 0 ;;
1) log_end_msg 1 ;; # Old process is still running
*) log_end_msg 1 ;; # Failed to start
esac
;;
*)
# Failed to stop
log_end_msg 1
;;
esac
;;
*)
echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
exit 3
;;
esac

View File

@ -0,0 +1,44 @@
###
# kubernetes system config
#
# The following values are used to configure the kube-apiserver
{% if init_system == "sysvinit" %}
# Logging directory
KUBE_LOGGING="--log-dir={{ kube_log_dir }} --logtostderr=true"
{% else %}
# logging to stderr means we get it in the systemd journal
KUBE_LOGGING="--logtostderr=true"
{% endif %}
# Apiserver Log level, 0 is debug
KUBE_LOG_LEVEL="{{ kube_log_level | default('--v=2') }}"
# Should this cluster be allowed to run privileged docker containers
KUBE_ALLOW_PRIV="--allow_privileged=true"
# The port on the local server to listen on.
KUBE_API_PORT="--insecure-port={{kube_apiserver_insecure_port}} --secure-port={{ kube_apiserver_port }}"
# Address range to use for services
KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range={{ kube_service_addresses }}"
# Location of the etcd cluster
KUBE_ETCD_SERVERS="--etcd_servers={% for host in groups['etcd'] %}http://{{ hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }}:2379{% if not loop.last %},{% endif %}{% endfor %}"
# default admission control policies
KUBE_ADMISSION_CONTROL="--admission_control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota"
# RUNTIME API CONFIGURATION (e.g. enable extensions)
KUBE_RUNTIME_CONFIG="{% if kube_api_runtime_config is defined %}{% for conf in kube_api_runtime_config %}--runtime-config={{ conf }} {% endfor %}{% endif %}"
# TLS CONFIGURATION
KUBE_TLS_CONFIG="--tls_cert_file={{ kube_cert_dir }}/apiserver.pem --tls_private_key_file={{ kube_cert_dir }}/apiserver-key.pem --client_ca_file={{ kube_cert_dir }}/ca.pem"
# Add you own!
KUBE_API_ARGS="--token_auth_file={{ kube_token_dir }}/known_tokens.csv --basic-auth-file={{ kube_users_dir }}/known_users.csv --service_account_key_file={{ kube_cert_dir }}/apiserver-key.pem"
{% if init_system == "sysvinit" %}
DAEMON_ARGS="$KUBE_LOGGING $KUBE_LOG_LEVEL $KUBE_ALLOW_PRIV $KUBE_API_PORT $KUBE_SERVICE_ADDRESSES \
$KUBE_ETCD_SERVERS $KUBE_ADMISSION_CONTROL $KUBE_RUNTIME_CONFIG $KUBE_TLS_CONFIG $KUBE_API_ARGS"
{% endif %}

View File

@ -0,0 +1,28 @@
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
Requires=etcd.service
After=etcd.service
[Service]
EnvironmentFile=/etc/kubernetes/kube-apiserver.env
User=kube
ExecStart={{ bin_dir }}/kube-apiserver \
$KUBE_LOGTOSTDERR \
$KUBE_LOG_LEVEL \
$KUBE_ETCD_SERVERS \
$KUBE_API_ADDRESS \
$KUBE_API_PORT \
$KUBELET_PORT \
$KUBE_ALLOW_PRIV \
$KUBE_SERVICE_ADDRESSES \
$KUBE_ADMISSION_CONTROL \
$KUBE_RUNTIME_CONFIG \
$KUBE_TLS_CONFIG \
$KUBE_API_ARGS
Restart=on-failure
Type=notify
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,6 @@
---
namespace_kubesystem:
apiVersion: v1
kind: Namespace
metadata:
name: kube-system

View File

@ -24,9 +24,8 @@ kube_users_dir: "{{ kube_config_dir }}/users"
# pods on startup
kube_manifest_dir: "{{ kube_config_dir }}/manifests"
# This is the group that the cert creation scripts chgrp the
# cert files to. Not really changable...
kube_cert_group: kube-cert
# Logging directory (sysvinit systems)
kube_log_dir: "/var/log/kubernetes"
dns_domain: "{{ cluster_name }}"
@ -34,8 +33,8 @@ kube_proxy_mode: userspace
# Temporary image, waiting for official google release
# hyperkube_image_repo: gcr.io/google_containers/hyperkube
hyperkube_image_repo: quay.io/smana/hyperkube
hyperkube_image_tag: v1.1.3
hyperkube_image_repo: quay.io/ant31/kubernetes-hyperkube
hyperkube_image_tag: v1.1.4
# IP address of the DNS server.
# Kubernetes will create a pod with several containers, serving as the DNS

View File

@ -1,14 +1,19 @@
---
- name: reload systemd
command: systemctl daemon-reload
when: init_system == "systemd"
- name: restart systemd-kubelet
- name: restart kubelet
command: /bin/true
notify:
- reload systemd
- restart kubelet
- reload kubelet
- name: restart kubelet
- name: set is_gentoken_calico fact
set_fact:
is_gentoken_calico: true
- name: reload kubelet
service:
name: kubelet
state: restarted

View File

@ -0,0 +1,27 @@
---
- name: tokens | copy the token gen script
copy:
src=kube-gen-token.sh
dest={{ kube_script_dir }}
mode=u+x
when: inventory_hostname == groups['kube-master'][0]
- name: tokens | generate tokens for calico
command: "{{ kube_script_dir }}/kube-gen-token.sh {{ item[0] }}-{{ item[1] }}"
environment:
TOKEN_DIR: "{{ kube_token_dir }}"
with_nested:
- [ "system:calico" ]
- "{{ groups['k8s-cluster'] }}"
register: gentoken_calico
changed_when: "'Added' in gentoken_calico.stdout"
when: kube_network_plugin == "calico"
delegate_to: "{{ groups['kube-master'][0] }}"
notify: set is_gentoken_calico fact
- name: tokens | get the calico token values
slurp:
src: "{{ kube_token_dir }}/system:calico-{{ inventory_hostname }}.token"
register: calico_token
when: kube_network_plugin == "calico"
delegate_to: "{{ groups['kube-master'][0] }}"

View File

@ -1,48 +0,0 @@
---
- name: tokens | copy the token gen script
copy:
src=kube-gen-token.sh
dest={{ kube_script_dir }}
mode=u+x
when: inventory_hostname == groups['kube-master'][0]
- name: tokens | generate tokens for master components
command: "{{ kube_script_dir }}/kube-gen-token.sh {{ item[0] }}-{{ item[1] }}"
environment:
TOKEN_DIR: "{{ kube_token_dir }}"
with_nested:
- [ "system:kubectl" ]
- "{{ groups['kube-master'] }}"
register: gentoken
changed_when: "'Added' in gentoken.stdout"
when: inventory_hostname == groups['kube-master'][0]
- name: tokens | generate tokens for node components
command: "{{ kube_script_dir }}/kube-gen-token.sh {{ item[0] }}-{{ item[1] }}"
environment:
TOKEN_DIR: "{{ kube_token_dir }}"
with_nested:
- [ 'system:kubelet' ]
- "{{ groups['kube-node'] }}"
register: gentoken
changed_when: "'Added' in gentoken.stdout"
when: inventory_hostname == groups['kube-master'][0]
- name: tokens | generate tokens for calico
command: "{{ kube_script_dir }}/kube-gen-token.sh {{ item[0] }}-{{ item[1] }}"
environment:
TOKEN_DIR: "{{ kube_token_dir }}"
with_nested:
- [ "system:calico" ]
- "{{ groups['k8s-cluster'] }}"
register: gentoken
changed_when: "'Added' in gentoken.stdout"
when: kube_network_plugin == "calico"
delegate_to: "{{ groups['kube-master'][0] }}"
- name: tokens | get the calico token values
slurp:
src: "{{ kube_token_dir }}/system:calico-{{ inventory_hostname }}.token"
register: calico_token
when: kube_network_plugin == "calico"
delegate_to: "{{ groups['kube-master'][0] }}"

View File

@ -1,48 +1,29 @@
---
- debug: msg="{{init_system == "systemd"}}"
- debug: msg="{{init_system}}"
- name: install | Write kubelet systemd init file
template: src=kubelet.service.j2 dest=/etc/systemd/system/kubelet.service backup=yes
when: init_system == "systemd"
notify: restart systemd-kubelet
notify: restart kubelet
- name: install | Write kubelet initd script
template: src=deb-kubelet.initd.j2 dest=/etc/init.d/kubelet owner=root mode=755 backup=yes
template: src=deb-kubelet.initd.j2 dest=/etc/init.d/kubelet owner=root mode=0755 backup=yes
when: init_system == "sysvinit" and ansible_os_family == "Debian"
notify: restart kubelet
- name: install | Write kubelet initd script
template: src=rh-kubelet.initd.j2 dest=/etc/init.d/kubelet owner=root mode=755 backup=yes
template: src=rh-kubelet.initd.j2 dest=/etc/init.d/kubelet owner=root mode=0755 backup=yes
when: init_system == "sysvinit" and ansible_os_family == "RedHat"
notify: restart kubelet
- name: install | Install kubelet binary
synchronize:
src: "{{ local_release_dir }}/kubernetes/bin/kubelet"
dest: "{{ bin_dir }}/kubelet"
times: yes
archive: no
delegate_to: "{{ groups['downloader'][0] }}"
notify:
- restart kubelet
- name: install | Perms kubelet binary
file: path={{ bin_dir }}/kubelet owner=kube mode=0755 state=file
command: rsync -piu "{{ local_release_dir }}/kubernetes/bin/kubelet" "{{ bin_dir }}/kubelet"
register: kubelet_copy
changed_when: false
- name: install | Calico-plugin | Directory
file: path=/usr/libexec/kubernetes/kubelet-plugins/net/exec/calico/ state=directory
when: kube_network_plugin == "calico"
- name: install | Calico-plugin | Binary
synchronize:
src: "{{ local_release_dir }}/calico/bin/calico"
dest: "/usr/libexec/kubernetes/kubelet-plugins/net/exec/calico/calico"
times: yes
archive: no
delegate_to: "{{ groups['downloader'][0] }}"
command: rsync -piu "{{ local_release_dir }}/calico/bin/calico" "/usr/libexec/kubernetes/kubelet-plugins/net/exec/calico/calico"
when: kube_network_plugin == "calico"
notify: restart kubelet
- name: install | Perms calico plugin binary
file: path=/usr/libexec/kubernetes/kubelet-plugins/net/exec/calico/calico owner=kube mode=0755 state=file
changed_when: false

View File

@ -1,25 +1,28 @@
---
- name: create kubernetes config directory
file: path={{ kube_config_dir }} state=directory
- name: Create kubernetes config directory
file:
path: "{{ kube_config_dir }}"
state: directory
owner: kube
- name: create kubernetes script directory
file: path={{ kube_script_dir }} state=directory
- name: Create kubernetes script directory
file:
path: "{{ kube_script_dir }}"
state: directory
owner: kube
- name: Make sure manifest directory exists
file: path={{ kube_manifest_dir }} state=directory
- name: Create kubernetes manifests directory
file:
path: "{{ kube_manifest_dir }}"
state: directory
owner: kube
- name: certs | create system kube-cert groups
group: name={{ kube_cert_group }} state=present system=yes
- name: create system kube user
user:
name=kube
comment="Kubernetes user"
shell=/sbin/nologin
state=present
system=yes
groups={{ kube_cert_group }}
- name: Create kubernetes logs directory
file:
path: "{{ kube_log_dir }}"
state: directory
owner: kube
when: init_system == "sysvinit"
- include: secrets.yml
tags:
@ -28,7 +31,7 @@
- include: install.yml
- name: Write kubelet config file
template: src=kubelet.j2 dest={{ kube_config_dir }}/kubelet backup=yes
template: src=kubelet.j2 dest={{ kube_config_dir }}/kubelet.env backup=yes
notify:
- restart kubelet
@ -38,10 +41,18 @@
- restart kubelet
- name: Write proxy manifest
template:
template:
src: manifests/kube-proxy.manifest.j2
dest: "{{ kube_manifest_dir }}/kube-proxy.manifest"
- name: Restart kubelet if binary changed
command: /bin/true
notify: restart kubelet
when: kubelet_copy.stdout_lines
# reload-systemd
- meta: flush_handlers
- name: Enable kubelet
service:
name: kubelet

View File

@ -1,12 +1,12 @@
---
- name: certs | make sure the certificate directory exits
- name: Secrets | certs | make sure the certificate directory exits
file:
path={{ kube_cert_dir }}
state=directory
mode=o-rwx
group={{ kube_cert_group }}
- name: tokens | make sure the tokens directory exits
- name: Secrets | tokens | make sure the tokens directory exits
file:
path={{ kube_token_dir }}
state=directory
@ -14,29 +14,29 @@
group={{ kube_cert_group }}
- include: gen_certs.yml
run_once: true
when: inventory_hostname == groups['kube-master'][0]
- include: gen_tokens.yml
- include: gen_calico_tokens.yml
# Sync certs between nodes
- user:
- name: Secrets | create user
user:
name: '{{ansible_user_id}}'
generate_ssh_key: yes
delegate_to: "{{ groups['kube-master'][0] }}"
run_once: yes
- name: 'get ssh keypair'
- name: Secrets | 'get ssh keypair'
slurp: path=~/.ssh/id_rsa.pub
register: public_key
delegate_to: "{{ groups['kube-master'][0] }}"
- name: 'setup keypair on nodes'
- name: Secrets | 'setup keypair on nodes'
authorized_key:
user: '{{ansible_user_id}}'
key: "{{public_key.content|b64decode }}"
- name: synchronize certificates for nodes
- name: Secrets | synchronize certificates for nodes
synchronize:
src: "{{ item }}"
dest: "{{ kube_cert_dir }}"

View File

@ -27,7 +27,7 @@ DAEMON_USER=root
[ -x "$DAEMON" ] || exit 0
# Read configuration variable file if it is present
[ -r /etc/kubernetes/$NAME ] && . /etc/kubernetes/$NAME
[ -r /etc/kubernetes/$NAME.env ] && . /etc/kubernetes/$NAME.env
# Define LSB log_* functions.
# Depend on lsb-base (>= 3.2-14) to ensure that this file is present

View File

@ -1,4 +1,10 @@
KUBE_LOGTOSTDERR="--logtostderr=true"
{% if init_system == "sysvinit" %}
# Logging directory
KUBE_LOGGING="--log-dir={{ kube_log_dir }} --logtostderr=true"
{% else %}
# logging to stderr means we get it in the systemd journal
KUBE_LOGGING="--logtostderr=true"
{% endif %}
KUBE_LOG_LEVEL="--v={{ kube_log_level | default('2') }}"
KUBE_ALLOW_PRIV="--allow_privileged=true"
KUBELET_API_SERVER="--api_servers={% for host in groups['kube-master'] %}https://{{ hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }}:{{ kube_apiserver_port }}{% if not loop.last %},{% endif %}{% endfor %}"
@ -23,6 +29,6 @@ KUBELET_NETWORK_PLUGIN="--network_plugin={{ kube_network_plugin }}"
# Should this cluster be allowed to run privileged docker containers
KUBE_ALLOW_PRIV="--allow_privileged=true"
{% if init_system == "sysvinit" %}
DAEMON_ARGS="$KUBE_LOGTOSTDERR $KUBE_LOG_LEVEL $KUBE_ALLOW_PRIV $KUBELET_API_SERVER $KUBELET_ADDRESS \
DAEMON_ARGS="$KUBE_LOGGING $KUBE_LOG_LEVEL $KUBE_ALLOW_PRIV $KUBELET_API_SERVER $KUBELET_ADDRESS \
$KUBELET_HOSTNAME $KUBELET_REGISTER_NODE $KUBELET_ARGS $KUBELET_ARGS $KUBELET_NETWORK_PLUGIN"
{% endif %}

View File

@ -8,7 +8,7 @@ After=docker.service
{% endif %}
[Service]
EnvironmentFile=/etc/kubernetes/kubelet
EnvironmentFile=/etc/kubernetes/kubelet.env
ExecStart={{ bin_dir }}/kubelet \
$KUBE_LOGTOSTDERR \
$KUBE_LOG_LEVEL \

View File

@ -27,7 +27,7 @@ pidfile="/var/run/$prog.pid"
lockfile="/var/lock/subsys/$prog"
logfile="/var/log/$prog"
[ -e /etc/kubernetes/$prog ] && . /etc/kubernetes/$prog
[ -e /etc/kubernetes/$prog.env ] && . /etc/kubernetes/$prog.env
start() {
if [ ! -x $exec ]; then

View File

@ -1,15 +1,11 @@
---
run_gitinfos: false
common_required_pkgs:
- python-httplib2
- openssl
- curl
debian_required_pkgs:
- python-apt
- python-pip
rh_required_pkgs:
- libselinux-python
- rsync
pypy_version: 2.4.0
python_pypy_url: "https://bitbucket.org/pypy/pypy/downloads/pypy-{{ pypy_version }}.tar.bz2"

View File

@ -0,0 +1,73 @@
#!/bin/sh
set -e
# Text color variables
txtbld=$(tput bold) # Bold
bldred=${txtbld}$(tput setaf 1) # red
bldgre=${txtbld}$(tput setaf 2) # green
bldylw=${txtbld}$(tput setaf 3) # yellow
txtrst=$(tput sgr0) # Reset
err=${bldred}ERROR${txtrst}
info=${bldgre}INFO${txtrst}
warn=${bldylw}WARNING${txtrst}
usage()
{
cat << EOF
Generates a file which contains useful git informations
Usage : $(basename $0) [global|diff]
ex :
Generate git information
$(basename $0) global
Generate diff from latest tag
$(basename $0) diff
EOF
}
if [ $# != 1 ]; then
printf "\n$err : Needs 1 argument\n"
usage
exit 2
fi;
current_commit=$(git rev-parse HEAD)
latest_tag=$(git describe --abbrev=0 --tags)
latest_tag_commit=$(git show-ref -s ${latest_tag})
tags_list=$(git tag --points-at "${latest_tag}")
case ${1} in
"global")
cat<<EOF
deployment date="$(date '+%d-%m-%Y %Hh%M')"
deployment_timestamp=$(date '+%s')
user="$USER"
current commit (HEAD)="${current_commit}"
current_commit_timestamp=$(git log -1 --pretty=format:%ct)
latest tag(s) (current branch)="${tags_list}"
latest tag commit="${latest_tag_commit}"
current branch="$(git rev-parse --abbrev-ref HEAD)"
branches list="$(git describe --contains --all HEAD)"
git root directory="$(git rev-parse --show-toplevel)"
EOF
if ! git diff-index --quiet HEAD --; then
printf "unstaged changes=\"/etc/.git-ansible.diff\""
fi
if [ ${current_commit} == ${latest_tag_commit} ]; then
printf "\ncurrent_commit_tag=\"${latest_tag}\""
else
printf "\nlast tag was "$(git describe --tags | awk -F- '{print $2}')" commits ago =\""
printf "$(git log --pretty=format:" %h - %s" ${latest_tag}..HEAD)\""
fi
;;
"diff")
git diff
;;
*)
usage
printf "$err: Unknown argument ${1}"
exit 1;
;;
esac

View File

@ -0,0 +1,35 @@
---
- name: Hosts | populate inventory into hosts file
lineinfile:
dest: /etc/hosts
regexp: "^{{ hostvars[item]['ip'] | default(hostvars[item].ansible_default_ipv4.address) }} {{ item }}$"
line: "{{ hostvars[item]['ip'] | default(hostvars[item].ansible_default_ipv4.address) }} {{ item }}"
state: present
backup: yes
when: hostvars[item].ansible_default_ipv4.address is defined
with_items: groups['all']
- name: Hosts | populate kubernetes loadbalancer address into hosts file
lineinfile:
dest: /etc/hosts
regexp: ".*{{ apiserver_loadbalancer_domain_name }}$"
line: "{{ loadbalancer_apiserver.address }} {{ apiserver_loadbalancer_domain_name| default('lb-apiserver.kubernetes.local') }}"
state: present
backup: yes
when: loadbalancer_apiserver is defined and apiserver_loadbalancer_domain_name is defined
- name: Hosts | localhost ipv4 in hosts file
lineinfile:
dest: /etc/hosts
line: "127.0.0.1 localhost localhost.localdomain"
regexp: '^127.0.0.1.*$'
state: present
backup: yes
- name: Hosts | localhost ipv6 in hosts file
lineinfile:
dest: /etc/hosts
line: "::1 localhost6 localhost6.localdomain"
regexp: '^::1.*$'
state: present
backup: yes

View File

@ -0,0 +1,25 @@
---
# Deploy git infos
# ----------------
- name: 'GIT | generate git informations'
local_action: command {{ role_path }}/gen-gitinfos.sh global
register: gitinfo
always_run: yes
- name: 'GIT | copy ansible information'
template:
src: ansible_git.j2
dest: /etc/.ansible.ini
backup: yes
- name: 'GIT | generate diff file'
local_action: command {{ role_path }}/gen-gitinfos.sh diff
register: gitdiff
always_run: yes
- name: 'GIT | copy git diff file'
copy:
content: "{{ gitdiff.stdout }}"
dest: /etc/.git-ansible.diff
backup: yes

View File

@ -1,7 +1,23 @@
---
- include: gitinfos.yml
when: run_gitinfos
- name: gather os specific variables
include_vars: "{{ item }}"
with_first_found:
- files:
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml"
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_release }}.yml"
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml"
- "{{ ansible_distribution|lower }}.yml"
- "{{ ansible_os_family|lower }}.yml"
- defaults.yml
paths:
- ../vars
- name: "Identify init system"
shell: >
$(pgrep systemd > /dev/null && systemctl status > /dev/null);
$(pgrep systemd > /dev/null && systemctl status network.target > /dev/null);
if [ $? -eq 0 ] ; then
echo systemd;
else
@ -10,31 +26,46 @@
always_run: True
register: init_system_output
changed_when: False
tags: always
- set_fact:
init_system: "{{ init_system_output.stdout }}"
always_run: True
tags: always
- name: Update package management cache (APT)
apt: update_cache=yes
when: ansible_pkg_mgr == 'apt'
- name: Update package management cache (YUM)
yum: update_cache=yes name='*'
when: ansible_pkg_mgr == 'yum'
- name: Install python-apt for Debian distribs
shell: apt-get install -y python-apt
when: ansible_os_family == "Debian"
changed_when: False
- name: Install python-dnf for latest RedHat versions
shell: dnf install -y python-dnf yum
when: ansible_distribution == "Fedora" and
ansible_distribution_major_version > 21
changed_when: False
- name: Install packages requirements
action:
module: "{{ ansible_pkg_mgr }}"
name: "{{ item }}"
state: latest
with_items: common_required_pkgs
with_items: "{{required_pkgs | union(common_required_pkgs)}}"
- name: Install debian packages requirements
apt:
name: "{{ item }}"
state: latest
when: ansible_os_family == "Debian"
with_items: debian_required_pkgs
- name: Install redhat packages requirements
action:
module: "{{ ansible_pkg_mgr }}"
name: "{{ item }}"
state: latest
# Todo : selinux configuration
- name: Set selinux policy to permissive
selinux: policy=targeted state=permissive
when: ansible_os_family == "RedHat"
with_items: rh_required_pkgs
changed_when: False
- include: etchosts.yml
- include: python-bootstrap.yml
when: ansible_os_family not in [ "Debian", "RedHat" ]

View File

@ -0,0 +1,3 @@
; This file contains the information which identifies the deployment state relative to the git repo
[default]
{{ gitinfo.stdout }}

View File

@ -0,0 +1,4 @@
required_pkgs:
- epel-release
- libselinux-python
- device-mapper-libs

View File

@ -0,0 +1,4 @@
required_pkgs:
- python-apt
- apt-transport-https
- software-properties-common

View File

@ -0,0 +1,3 @@
required_pkgs:
- libselinux-python
- device-mapper-libs

View File

@ -0,0 +1,3 @@
required_pkgs:
- libselinux-python
- device-mapper-libs

View File

@ -0,0 +1,6 @@
---
## defines the IP used to talk to the node
# flannel_public_ip:
## interface that should be used for flannel operations
# flannel_interface:

View File

@ -1,36 +1,37 @@
---
- name : reload systemd
shell: systemctl daemon-reload
- name: restart systemd-calico-node
- name: restart calico-node
command: /bin/true
notify:
- reload systemd
- restart calico-node
- reload calico-node
- name: restart calico-node
- name: restart docker
command: /bin/true
notify:
- reload systemd
- reload docker
- reload kubelet
- name: delete default docker bridge
command: ip link delete docker0
ignore_errors: yes
notify: restart docker
- name : reload systemd
shell: systemctl daemon-reload
when: init_system == "systemd"
- name: reload calico-node
service:
name: calico-node
state: restarted
- name: restart docker
service: name=docker state=restarted
- name: reload docker
service:
name: docker
state: restarted
- name: restart flannel
service: name=flannel state=restarted
notify:
- reload systemd
- stop docker
- delete docker0
- start docker
when: inventory_hostname in groups['kube-node']
- name: stop docker
service: name=docker state=stopped
- name: delete docker0
command: ip link delete docker0
ignore_errors: yes
- name: start docker
service: name=docker state=started
- name: reload kubelet
service:
name: kubelet
state: restarted

View File

@ -1,13 +1,8 @@
---
- name: Calico | Install calicoctl bin
synchronize:
src: "{{ local_release_dir }}/calico/bin/calicoctl"
dest: "{{ bin_dir }}/calicoctl"
archive: no
times: yes
delegate_to: "{{ groups['downloader'][0] }}"
notify: restart calico-node
command: rsync -piu "{{ local_release_dir }}/calico/bin/calicoctl" "{{ bin_dir }}/calicoctl"
register: calico_copy
changed_when: false
- name: Calico | install calicoctl
file: path={{ bin_dir }}/calicoctl mode=0755 state=file
@ -18,6 +13,11 @@
dest: /usr/bin/calicoctl
state: link
- name: Calico | wait for etcd
wait_for:
port: 2379
when: inventory_hostname in groups['kube-master']
- name: Calico | Check if calico network pool has already been configured
uri:
url: "http://127.0.0.1:2379/v2/keys/calico/v1/ipam/v4/pool"
@ -25,13 +25,11 @@
status_code: 200,404
register: calico_conf
run_once: true
delegate_to: "{{ groups['etcd'][0] }}"
- name: Calico | Configure calico network pool
shell: calicoctl pool add {{ kube_pods_subnet }}
run_once: true
when: calico_conf.status == 404
delegate_to: "{{ groups['etcd'][0] }}"
- name: Calico | Get calico configuration from etcd
uri:
@ -39,7 +37,6 @@
return_content: yes
register: calico_pools
run_once: true
delegate_to: "{{ groups['etcd'][0] }}"
- name: Calico | Check if calico pool is properly configured
fail:
@ -48,7 +45,6 @@
when: ( calico_pools.json['node']['nodes'] | length > 1 ) or
( not calico_pools.json['node']['nodes'][0]['key'] | search(".*{{ kube_pods_subnet | ipaddr('network') }}.*") )
run_once: true
delegate_to: "{{ groups['etcd'][0] }}"
- name: Calico | Write calico-node configuration
template: src=calico/calico.conf.j2 dest=/usr/libexec/kubernetes/kubelet-plugins/net/exec/calico/calico_kubernetes.ini
@ -57,17 +53,38 @@
- name: Calico | Write calico-node systemd init file
template: src=calico/calico-node.service.j2 dest=/etc/systemd/system/calico-node.service
when: init_system == "systemd"
notify: restart systemd-calico-node
notify: restart calico-node
- name: Calico | Write calico-node initd script
template: src=calico/deb-calico.initd.j2 dest=/etc/init.d/calico-node owner=root mode=755
template: src=calico/deb-calico.initd.j2 dest=/etc/init.d/calico-node owner=root mode=0755
when: init_system == "sysvinit" and ansible_os_family == "Debian"
notify: restart calico-node
- name: Calico | Write calico-node initd script
template: src=calico/rh-calico.initd.j2 dest=/etc/init.d/calico-node owner=root mode=755
template: src=calico/rh-calico.initd.j2 dest=/etc/init.d/calico-node owner=root mode=0755
when: init_system == "sysvinit" and ansible_os_family == "RedHat"
notify: restart calico-node
- meta: flush_handlers
- name: Calico | Enable calico-node
service: name=calico-node enabled=yes state=started
- name: Calico | Restart calico if binary changed
service:
name: calico-node
state: restarted
when: calico_copy.stdout_lines
- name: Calico | Disable node mesh
shell: calicoctl bgp node-mesh off
environment:
ETCD_AUTHORITY: "127.0.0.1:2379"
when: peer_with_router|default(false) and inventory_hostname in groups['kube-node']
- name: Calico | Configure peering with router(s)
shell: calicoctl node bgp peer add {{ item.router_id }} as {{ item.as }}
environment:
ETCD_AUTHORITY: "127.0.0.1:2379"
with_items: peers
when: peer_with_router|default(false) and inventory_hostname in groups['kube-node']

View File

@ -1,57 +1,33 @@
---
- name: Create flannel user
user: name=flannel shell=/bin/nologin
- name: Install flannel binaries
synchronize:
src: "{{ local_release_dir }}/flannel/bin/flanneld"
dest: "{{ bin_dir }}/flanneld"
archive: no
times: yes
delegate_to: "{{ groups['downloader'][0] }}"
notify:
- restart flannel
- name: Perms flannel binary
file: path={{ bin_dir }}/flanneld owner=flannel mode=0755 state=file
- name: Write flannel.service systemd file
- name: Flannel | Write flannel configuration
template:
src: flannel/systemd-flannel.service.j2
dest: /etc/systemd/system/flannel.service
notify: restart flannel
src: flannel/network.json
dest: /etc/flannel-network.json
backup: yes
- name: Write docker.service systemd file
- name: Flannel | Create flannel pod manifest
template:
src: flannel/systemd-docker.service.j2
dest: /lib/systemd/system/docker.service
notify: restart docker
src: flannel/flannel-pod.yml
dest: /etc/kubernetes/manifests/flannel-pod.manifest
notify: delete default docker bridge
- name: Set fact for ectcd command conf file location
set_fact:
conf_file: "/tmp/flannel-conf.json"
run_once: true
delegate_to: "{{ groups['kube-master'][0] }}"
- name: Flannel | Wait for flannel subnet.env file presence
wait_for:
path: /run/flannel/subnet.env
delay: 5
- name: Create flannel config file to go in etcd
template: src=flannel/flannel-conf.json.j2 dest={{ conf_file }}
run_once: true
delegate_to: "{{ groups['kube-master'][0] }}"
- name: Get flannel_subnet from subnet.env
shell: cat /run/flannel/subnet.env | awk -F'=' '$1 == "FLANNEL_SUBNET" {print $2}'
register: flannel_subnet_output
changed_when: false
- name: Flannel configuration into etcd
shell: "{{ bin_dir }}/etcdctl set /{{ cluster_name }}/network/config < {{ conf_file }}"
delegate_to: "{{ groups['kube-master'][0] }}"
notify: restart flannel
- set_fact:
flannel_subnet: "{{ flannel_subnet_output.stdout }}"
- name: Clean up the flannel config file
file: path=/tmp/flannel-config.json state=absent
run_once: true
delegate_to: "{{ groups['kube-master'][0] }}"
- name: Get flannel_mtu from subnet.env
shell: cat /run/flannel/subnet.env | awk -F'=' '$1 == "FLANNEL_MTU" {print $2}'
register: flannel_mtu_output
changed_when: false
- name: Launch Flannel
service: name=flannel state=started enabled=yes
notify:
- restart flannel
- name: Enable Docker
service: name=docker enabled=yes state=started
- set_fact:
flannel_mtu: "{{ flannel_mtu_output.stdout }}"

View File

@ -7,7 +7,24 @@
- include: flannel.yml
when: kube_network_plugin == "flannel"
- include: calico.yml
when: kube_network_plugin == "calico"
- name: Set docker daemon options
template:
src: docker
dest: "/etc/default/docker"
owner: root
group: root
mode: 0644
notify:
- restart docker
- name: Write docker.service systemd file
template:
src: systemd-docker.service
dest: /lib/systemd/system/docker.service
notify: restart docker
when: init_system == "systemd"
- meta: flush_handlers
- include: calico.yml
when: kube_network_plugin == "calico"

View File

@ -2,7 +2,7 @@
Description=Calico per-node agent
Documentation=https://github.com/projectcalico/calico-docker
Requires=docker.service
After=docker.service etcd2.service
After=docker.service etcd.service
[Service]
User=root

View File

@ -0,0 +1,6 @@
# Deployed by Ansible
{% if init_system == "sysvinit" and kube_network_plugin == "flannel" and ansible_os_family == "Debian" %}
DOCKER_OPTS="--bip={{ flannel_subnet }} --mtu={{ flannel_mtu }}"
{% elif kube_network_plugin == "flannel" %}
OPTIONS="--bip={{ flannel_subnet }} --mtu={{ flannel_mtu }}"
{% endif %}

View File

@ -0,0 +1,46 @@
---
kind: "Pod"
apiVersion: "v1"
metadata:
name: "flannel"
namespace: "kube-system"
labels:
app: "flannel"
version: "v0.1"
spec:
volumes:
- name: "subnetenv"
hostPath:
path: "/run/flannel"
- name: "networkconfig"
hostPath:
path: "/etc/flannel-network.json"
containers:
- name: "flannel-server-helper"
image: "gcr.io/google_containers/flannel-server-helper:0.1"
args:
- "--network-config=/etc/flannel-network.json"
- "--etcd-prefix=/{{ cluster_name }}/network"
- "--etcd-server=http://{{ groups['etcd'][0] }}:2379"
volumeMounts:
- name: "networkconfig"
mountPath: "/etc/flannel-network.json"
imagePullPolicy: "Always"
- name: "flannel-container"
image: "quay.io/coreos/flannel:0.5.5"
command:
- "/bin/sh"
- "-c"
- "/opt/bin/flanneld -etcd-endpoints {% for srv in groups['etcd'] %}http://{{ srv }}:2379{% if not loop.last %},{% endif %}{% endfor %} -etcd-prefix /{{ cluster_name }}/network {% if flannel_interface is defined %}-iface {{ flannel_interface }}{% endif %} {% if flannel_public_ip is defined %}-public-ip {{ flannel_public_ip }}{% endif %} 1>>/var/log/flannel_server.log 2>&1"
ports:
- hostPort: 10253
containerPort: 10253
resources:
limits:
cpu: "100m"
volumeMounts:
- name: "subnetenv"
mountPath: "/run/flannel"
securityContext:
privileged: true
hostNetwork: true

View File

@ -1,17 +0,0 @@
[Unit]
Description=Docker Application Container Engine
Documentation=http://docs.docker.com
After=network.target docker.socket flannel.service
Requires=docker.socket
[Service]
EnvironmentFile=/run/flannel/subnet.env
EnvironmentFile=-/etc/default/docker
ExecStart=/usr/bin/docker -d -H fd:// --bip=${FLANNEL_SUBNET} --mtu=${FLANNEL_MTU} $DOCKER_OPTS
MountFlags=slave
LimitNOFILE=1048576
LimitNPROC=1048576
LimitCORE=infinity
[Install]
WantedBy=multi-user.target

View File

@ -1,12 +0,0 @@
[Unit]
Description=Flannel Network Overlay
Documentation=https://coreos.com/flannel/docs/latest
[Service]
EnvironmentFile=/etc/network-environment
ExecStart={{ bin_dir }}/flanneld \
$FLANNEL_ETCD_PREFIX
Restart=on-failure
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,28 @@
[Unit]
Description=Docker Application Container Engine
Documentation=http://docs.docker.com
{% if ansible_os_family == "RedHat" %}
After=network.target
Wants=docker-storage-setup.service
{% elif ansible_os_family == "Debian" %}
After=network.target docker.socket
Requires=docker.socket
{% endif %}
[Service]
Type=notify
EnvironmentFile=-/etc/default/docker
Environment=GOTRACEBACK=crash
ExecStart=/usr/bin/docker daemon \
$OPTIONS \
$DOCKER_STORAGE_OPTIONS \
$DOCKER_NETWORK_OPTIONS \
$INSECURE_REGISTRY
LimitNOFILE=1048576
LimitNPROC=1048576
LimitCORE=infinity
MountFlags=slave
TimeoutStartSec=1min
[Install]
WantedBy=multi-user.target