Compare commits
94 Commits
Author | SHA1 | Date | |
---|---|---|---|
ec77f046fb | |||
66a178c614 | |||
95a2bcdd9d | |||
7ab62170e0 | |||
8c37d0aa1f | |||
438a4bdeca | |||
da0a973dd4 | |||
d8bef773ee | |||
64608f06cf | |||
d450e1f06f | |||
569d0081d3 | |||
ba5466cacf | |||
6b094db607 | |||
d0dd69399e | |||
f33f447b3d | |||
efaf6328a2 | |||
a5a34c98a5 | |||
6a287973d9 | |||
d066f0c9e9 | |||
b7f3ff5ce9 | |||
48ec698314 | |||
737a83788f | |||
9471173f6a | |||
c4e3266031 | |||
8d3abdb489 | |||
e89f4ac7ee | |||
99db440287 | |||
e6358d825e | |||
8b3112d287 | |||
b4dfd8c973 | |||
a153ac231a | |||
5b4c365b8c | |||
a08fb131fb | |||
ba2c3f052f | |||
c8a488cfbe | |||
fad80d8595 | |||
21f1c82fb0 | |||
6ec957a255 | |||
76b49bfe30 | |||
7d14763cf0 | |||
4c300a57b5 | |||
e68d6575cd | |||
11b6e31c55 | |||
4d295d567b | |||
ca8ef29ae4 | |||
9be65f8c19 | |||
b70b8a7c39 | |||
687cc01151 | |||
25b986ede7 | |||
1e294b25c1 | |||
5c369d6d40 | |||
7b1e29f855 | |||
80ee1f2d9e | |||
87856513c6 | |||
f7f560de2e | |||
7a8ead07d8 | |||
46f99befee | |||
3563dbe9e8 | |||
fec601a238 | |||
aad4edaf47 | |||
bb3a57a719 | |||
6be93a3b87 | |||
333d9daea8 | |||
8b53ff8ef7 | |||
8334a9e1e4 | |||
f1e5bc81f8 | |||
26646b4a79 | |||
eddd1251eb | |||
ba710ade23 | |||
25d19720c0 | |||
17e3108b0c | |||
f304dd4cf3 | |||
7dcc7c31f6 | |||
4ca2931ae9 | |||
9744972f4a | |||
f770ae82e6 | |||
aa9578ba99 | |||
898e79a49e | |||
8d80265392 | |||
8acd4396d6 | |||
a47f9394bb | |||
5cc37db4bf | |||
3eb2ec101e | |||
84d85e41a9 | |||
96add56527 | |||
d7f9d4a590 | |||
26e61fc9be | |||
7546d75513 | |||
1fb6f36e9c | |||
df4fe074f0 | |||
0c9826c60f | |||
d7a11887f6 | |||
39dd4c1aaa | |||
9c5c0f2697 |
1
.gitignore
vendored
1
.gitignore
vendored
@ -1 +1,2 @@
|
|||||||
ssh
|
ssh
|
||||||
|
nodes
|
||||||
|
148
README.md
148
README.md
@ -4,30 +4,158 @@ Scripts to create libvirt lab with vagrant and prepare some stuff for `k8s` depl
|
|||||||
|
|
||||||
|
|
||||||
Requirements
|
Requirements
|
||||||
============
|
------------
|
||||||
|
|
||||||
* `libvirt`
|
* `libvirt`
|
||||||
* `vagrant`
|
* `vagrant`
|
||||||
* `vagrant-libvirt` plugin
|
* `vagrant-libvirt` plugin (`vagrant plugin install vagrant-libvirt`)
|
||||||
* `$USER` should be able to connect to libvirt (test with `virsh list --all`)
|
* `$USER` should be able to connect to libvirt (test with `virsh list --all`)
|
||||||
|
|
||||||
How-to
|
Vargant lab preparation
|
||||||
======
|
-----------------------
|
||||||
|
|
||||||
|
* Change default IP pool for vagrant networks if you want:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export VAGRANT_POOL="10.100.0.0/16"
|
||||||
|
```
|
||||||
|
|
||||||
|
* Clone this repo
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git clone https://github.com/adidenko/vagrant-k8s
|
||||||
|
cd vagrant-k8s
|
||||||
|
```
|
||||||
|
|
||||||
* Prepare the virtual lab:
|
* Prepare the virtual lab:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
export VAGRANT_POOL="10.100.0.0/16"
|
|
||||||
git clone https://github.com/adidenko/vagrant-k8s
|
|
||||||
cd vagrant-k8s
|
|
||||||
vagrant up
|
vagrant up
|
||||||
```
|
```
|
||||||
|
|
||||||
* Login to master node and deploy k8s with kargo:
|
Deployment on a lab
|
||||||
|
-------------------
|
||||||
|
|
||||||
|
* Login to master node and sudo to root:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
vagrant ssh $USER-k8s-01
|
vagrant ssh $USER-k8s-00
|
||||||
# Inside your master VM run this:
|
|
||||||
sudo su -
|
sudo su -
|
||||||
|
```
|
||||||
|
|
||||||
|
* Clone this repo
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git clone https://github.com/adidenko/vagrant-k8s ~/mcp
|
||||||
|
```
|
||||||
|
|
||||||
|
* Install required software and pull needed repos:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd ~/mcp
|
||||||
|
./bootstrap-master.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
* Check `nodes` list and make sure you have SSH access to them
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd ~/mcp
|
||||||
|
cat nodes
|
||||||
|
ansible all -m ping -i nodes_to_inv.py
|
||||||
|
```
|
||||||
|
|
||||||
|
* Deploy k8s using kargo playbooks
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd ~/mcp
|
||||||
./deploy-k8s.kargo.sh
|
./deploy-k8s.kargo.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
|
* Deploy OpenStack CCP:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd ~/mcp
|
||||||
|
# Build CCP images
|
||||||
|
ansible-playbook -i nodes_to_inv.py playbooks/ccp-build.yaml
|
||||||
|
# Deploy CCP
|
||||||
|
ansible-playbook -i nodes_to_inv.py playbooks/ccp-deploy.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
* Wait for CCP deployment to complete
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# On k8s master node
|
||||||
|
# Check CCP pods, all should become running
|
||||||
|
kubectl --namespace=openstack get pods -o wide
|
||||||
|
|
||||||
|
# Check CCP jobs status, wait until all complete
|
||||||
|
kubectl --namespace=openstack get jobs
|
||||||
|
```
|
||||||
|
|
||||||
|
* Check Horizon:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# On k8s master node check nodePort of Horizon service
|
||||||
|
HORIZON_PORT=$(kubectl --namespace=openstack get svc/horizon -o go-template='{{(index .spec.ports 0).nodePort}}')
|
||||||
|
echo $HORIZON_PORT
|
||||||
|
|
||||||
|
# Access Horizon via nodePort
|
||||||
|
curl -i -s $ANY_K8S_NODE_IP:$HORIZON_PORT
|
||||||
|
```
|
||||||
|
|
||||||
|
Working with kubernetes
|
||||||
|
-----------------------
|
||||||
|
|
||||||
|
* Login to one of your kube-master nodes and run:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# List images in registry
|
||||||
|
curl -s 127.0.0.1:31500/v2/_catalog | python -mjson.tool
|
||||||
|
|
||||||
|
# Check CCP jobs status
|
||||||
|
kubectl --namespace=openstack get jobs
|
||||||
|
|
||||||
|
# Check CCP pods
|
||||||
|
kubectl --namespace=openstack get pods -o wide
|
||||||
|
```
|
||||||
|
|
||||||
|
* Troubleshooting
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Get logs from pod
|
||||||
|
kubectl --namespace=openstack logs $POD_NAME
|
||||||
|
|
||||||
|
# Exec command from pod
|
||||||
|
kubectl --namespace=openstack exec $POD_NAME -- cat /etc/resolv.conf
|
||||||
|
kubectl --namespace=openstack exec $POD_NAME -- curl http://etcd-client:2379/health
|
||||||
|
|
||||||
|
# Run a container
|
||||||
|
docker run -t -i 127.0.0.1:31500/mcp/neutron-dhcp-agent /bin/bash
|
||||||
|
```
|
||||||
|
|
||||||
|
* Network checker
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd ~/mcp
|
||||||
|
./deploy-netchecker.sh
|
||||||
|
# or in openstack namespace
|
||||||
|
./deploy-netchecker.sh openstack
|
||||||
|
```
|
||||||
|
|
||||||
|
* CCP
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Run a bash in one of containers
|
||||||
|
docker run -t -i 127.0.0.1:31500/mcp/nova-base /bin/bash
|
||||||
|
|
||||||
|
# Inside container export credentials
|
||||||
|
export OS_USERNAME=admin
|
||||||
|
export OS_PASSWORD=password
|
||||||
|
export OS_TENANT_NAME=admin
|
||||||
|
export OS_REGION_NAME=RegionOne
|
||||||
|
export OS_AUTH_URL=http://keystone:35357
|
||||||
|
|
||||||
|
# Run CLI commands
|
||||||
|
openstack service list
|
||||||
|
neutron agent-list
|
||||||
|
```
|
||||||
|
52
Vagrantfile
vendored
52
Vagrantfile
vendored
@ -1,32 +1,37 @@
|
|||||||
# -*- mode: ruby -*-
|
# -*- mode: ruby -*-
|
||||||
# vi: set ft=ruby :
|
# vi: set ft=ruby :
|
||||||
|
|
||||||
|
pool = ENV["VAGRANT_POOL"] || "10.250.0.0/16"
|
||||||
|
|
||||||
ENV["VAGRANT_DEFAULT_PROVIDER"] = "libvirt"
|
ENV["VAGRANT_DEFAULT_PROVIDER"] = "libvirt"
|
||||||
pool = ENV["VAGRANT_POOL"] || "10.210.0.0/16"
|
|
||||||
prefix = pool.gsub(/\.\d+\.\d+\/16$/, "")
|
prefix = pool.gsub(/\.\d+\.\d+\/16$/, "")
|
||||||
|
|
||||||
$num_instances = 7
|
$num_instances = 4
|
||||||
$vm_memory = 2048
|
$vm_memory = 6144
|
||||||
$vm_cpus = 2
|
$vm_cpus = 2
|
||||||
|
$master_memory = 1024
|
||||||
|
$master_cpus = 1
|
||||||
|
|
||||||
$user = ENV["USER"]
|
$user = ENV["USER"]
|
||||||
$public_subnet = prefix.to_s + ".0"
|
$public_subnet = prefix.to_s + ".0"
|
||||||
$private_subnet = prefix.to_s + ".1"
|
$private_subnet = prefix.to_s + ".1"
|
||||||
$mgmt_cidr = prefix.to_s + ".2.0/24"
|
$mgmt_cidr = prefix.to_s + ".2.0/24"
|
||||||
|
$neutron_subnet = "172.30.250"
|
||||||
|
|
||||||
$instance_name_prefix = "#{$user}-k8s"
|
$instance_name_prefix = "#{$user}-k8s"
|
||||||
|
|
||||||
# Boxes with libvirt provider support:
|
# Boxes with libvirt provider support:
|
||||||
#$box = "yk0/ubuntu-xenial" #900M
|
#$box = "yk0/ubuntu-xenial" #900M
|
||||||
#$box = "centos/7"
|
#$box = "centos/7"
|
||||||
$box = "nrclark/xenial64-minimal-libvirt"
|
#$box = "nrclark/xenial64-minimal-libvirt"
|
||||||
|
$box = "peru/ubuntu-16.04-server-amd64"
|
||||||
|
|
||||||
# Create SSH keys for future lab
|
# Create SSH keys for future lab
|
||||||
system 'bash ssh-keygen.sh'
|
system 'bash vagrant-scripts/ssh-keygen.sh'
|
||||||
|
|
||||||
# Create nodes list for future kargo deployment
|
# Create nodes list for future kargo deployment
|
||||||
nodes=""
|
nodes=""
|
||||||
(2..$num_instances).each do |i|
|
(1..$num_instances-1).each do |i|
|
||||||
ip = "#{$private_subnet}.#{i+10}"
|
ip = "#{$private_subnet}.#{i+10}"
|
||||||
nodes = "#{nodes}#{ip}\n"
|
nodes = "#{nodes}#{ip}\n"
|
||||||
end
|
end
|
||||||
@ -34,13 +39,9 @@ File.open("nodes", 'w') { |file| file.write(nodes) }
|
|||||||
|
|
||||||
# Create the lab
|
# Create the lab
|
||||||
Vagrant.configure("2") do |config|
|
Vagrant.configure("2") do |config|
|
||||||
(1..$num_instances).each do |i|
|
(0..$num_instances-1).each do |i|
|
||||||
# First node would be master node
|
# First node would be master node
|
||||||
if i == 1
|
master = i == 0
|
||||||
master = true
|
|
||||||
else
|
|
||||||
master = false
|
|
||||||
end
|
|
||||||
|
|
||||||
config.ssh.insert_key = false
|
config.ssh.insert_key = false
|
||||||
vm_name = "%s-%02d" % [$instance_name_prefix, i]
|
vm_name = "%s-%02d" % [$instance_name_prefix, i]
|
||||||
@ -52,8 +53,13 @@ Vagrant.configure("2") do |config|
|
|||||||
# Libvirt provider settings
|
# Libvirt provider settings
|
||||||
test_vm.vm.provider :libvirt do |domain|
|
test_vm.vm.provider :libvirt do |domain|
|
||||||
domain.uri = "qemu+unix:///system"
|
domain.uri = "qemu+unix:///system"
|
||||||
domain.memory = $vm_memory
|
if master
|
||||||
domain.cpus = $vm_cpus
|
domain.memory = $master_memory
|
||||||
|
domain.cpus = $master_cpus
|
||||||
|
else
|
||||||
|
domain.memory = $vm_memory
|
||||||
|
domain.cpus = $vm_cpus
|
||||||
|
end
|
||||||
domain.driver = "kvm"
|
domain.driver = "kvm"
|
||||||
domain.host = "localhost"
|
domain.host = "localhost"
|
||||||
domain.connect_via_ssh = false
|
domain.connect_via_ssh = false
|
||||||
@ -66,6 +72,8 @@ Vagrant.configure("2") do |config|
|
|||||||
domain.cpu_mode = "host-passthrough"
|
domain.cpu_mode = "host-passthrough"
|
||||||
domain.volume_cache = "unsafe"
|
domain.volume_cache = "unsafe"
|
||||||
domain.disk_bus = "virtio"
|
domain.disk_bus = "virtio"
|
||||||
|
# DISABLED: switched to new box which has 100G / partition
|
||||||
|
#domain.storage :file, :type => 'qcow2', :bus => 'virtio', :size => '20G', :device => 'vdb'
|
||||||
end
|
end
|
||||||
|
|
||||||
# Networks and interfaces
|
# Networks and interfaces
|
||||||
@ -85,17 +93,21 @@ Vagrant.configure("2") do |config|
|
|||||||
:libvirt__network_name => "#{$instance_name_prefix}-private",
|
:libvirt__network_name => "#{$instance_name_prefix}-private",
|
||||||
:libvirt__dhcp_enabled => false,
|
:libvirt__dhcp_enabled => false,
|
||||||
:libvirt__forward_mode => "none"
|
:libvirt__forward_mode => "none"
|
||||||
|
# "neutron" isolated network
|
||||||
|
test_vm.vm.network :private_network,
|
||||||
|
:ip => "#{$neutron_subnet}.#{i+10}",
|
||||||
|
:model_type => "e1000",
|
||||||
|
:libvirt__network_name => "#{$instance_name_prefix}-neutron",
|
||||||
|
:libvirt__dhcp_enabled => false,
|
||||||
|
:libvirt__forward_mode => "none"
|
||||||
|
|
||||||
# Provisioning
|
# Provisioning
|
||||||
config.vm.provision "file", source: "ssh", destination: "~/ssh"
|
config.vm.provision "file", source: "ssh", destination: "~/ssh"
|
||||||
if master
|
if master
|
||||||
config.vm.provision "deploy-k8s", type: "file", source: "deploy-k8s.kargo.sh", destination: "~/deploy-k8s.kargo.sh"
|
config.vm.provision "nodes", type: "file", source: "nodes", destination: "/var/tmp/nodes"
|
||||||
config.vm.provision "custom.yaml", type: "file", source: "custom.yaml", destination: "~/custom.yaml"
|
config.vm.provision "bootstrap", type: "shell", path: "vagrant-scripts/provision-master.sh"
|
||||||
config.vm.provision "kubedns.yaml", type: "file", source: "kubedns.yaml", destination: "~/kubedns.yaml"
|
|
||||||
config.vm.provision "nodes", type: "file", source: "nodes", destination: "~/nodes"
|
|
||||||
config.vm.provision "bootstrap", type: "shell", path: "bootstrap-master.sh"
|
|
||||||
else
|
else
|
||||||
config.vm.provision "bootstrap", type: "shell", path: "bootstrap-node.sh"
|
config.vm.provision "bootstrap", type: "shell", path: "vagrant-scripts/provision-node.sh"
|
||||||
end
|
end
|
||||||
|
|
||||||
end
|
end
|
||||||
|
11
bak/deploy-ccp.sh
Executable file
11
bak/deploy-ccp.sh
Executable file
@ -0,0 +1,11 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
INVENTORY="nodes_to_inv.py"
|
||||||
|
|
||||||
|
echo "Createing repository and CCP images, it may take a while..."
|
||||||
|
ansible-playbook -i $INVENTORY playbooks/ccp-build.yaml
|
||||||
|
|
||||||
|
echo "Deploying up OpenStack CCP..."
|
||||||
|
ansible-playbook -i $INVENTORY playbooks/ccp-deploy.yaml
|
@ -1,31 +1,22 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
echo master > /var/tmp/role
|
|
||||||
|
|
||||||
# Packages
|
# Packages
|
||||||
sudo apt-get --yes update
|
apt-get --yes update
|
||||||
sudo apt-get --yes upgrade
|
apt-get --yes upgrade
|
||||||
sudo apt-get --yes install git screen vim telnet tcpdump python-setuptools gcc python-dev python-pip libssl-dev libffi-dev software-properties-common
|
apt-get --yes install git screen vim telnet tcpdump python-setuptools gcc python-dev python-pip libssl-dev libffi-dev software-properties-common curl python-netaddr
|
||||||
|
|
||||||
# Get ansible-2.1+, vanilla ubuntu-16.04 ansible (2.0.0.2) is broken due to https://github.com/ansible/ansible/issues/13876
|
# Get ansible-2.1+, vanilla ubuntu-16.04 ansible (2.0.0.2) is broken due to https://github.com/ansible/ansible/issues/13876
|
||||||
sudo sh -c 'apt-add-repository -y ppa:ansible/ansible;apt-get update;apt-get install -y ansible'
|
ansible --version || (
|
||||||
|
apt-add-repository -y ppa:ansible/ansible
|
||||||
|
apt-get update
|
||||||
|
apt-get install -y ansible
|
||||||
|
)
|
||||||
|
|
||||||
# Kargo-cli
|
# Copy/create nodes list
|
||||||
sudo git clone https://github.com/kubespray/kargo-cli.git /root/kargo-cli
|
test -f ./nodes || cp /var/tmp/nodes ./nodes
|
||||||
sudo sh -c 'cd /root/kargo-cli && python setup.py install'
|
|
||||||
|
|
||||||
# k8s deploy script and configs
|
# Either pull or copy microservices repos
|
||||||
sudo sh -c 'cp -a ~vagrant/deploy-k8s.kargo.sh /root/ && chmod 755 /root/deploy-k8s.kargo.sh'
|
cp -a /var/tmp/microservices* ./ccp/ || touch /var/tmp/ccp-download
|
||||||
sudo cp -a ~vagrant/custom.yaml /root/custom.yaml
|
|
||||||
sudo cp -a ~vagrant/kubedns.yaml /root/kubedns.yaml
|
|
||||||
|
|
||||||
# SSH keys and config
|
# Pull kargo
|
||||||
sudo rm -rf /root/.ssh
|
git clone https://github.com/kubespray/kargo ~/kargo
|
||||||
sudo mv ~vagrant/ssh /root/.ssh
|
|
||||||
sudo echo -e 'Host 10.*\n\tStrictHostKeyChecking no\n\tUserKnownHostsFile=/dev/null' >> /root/.ssh/config
|
|
||||||
sudo chown -R root: /root/.ssh
|
|
||||||
|
|
||||||
# Copy nodes list
|
|
||||||
sudo cp ~vagrant/nodes /root/nodes
|
|
||||||
|
|
||||||
# README
|
|
||||||
sudo echo 'cd /root/kargo ; ansible-playbook -vvv -i inv/inventory.cfg cluster.yml -u root -f 7' > /root/README
|
|
||||||
|
2
ccp/.gitignore
vendored
Normal file
2
ccp/.gitignore
vendored
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
microservices-repos
|
||||||
|
microservices
|
16
ccp/ccp.conf
Normal file
16
ccp/ccp.conf
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
[DEFAULT]
|
||||||
|
deploy_config = /root/ccp/deploy-config.yaml
|
||||||
|
|
||||||
|
[builder]
|
||||||
|
push = True
|
||||||
|
|
||||||
|
[registry]
|
||||||
|
address = "127.0.0.1:31500"
|
||||||
|
|
||||||
|
[kubernetes]
|
||||||
|
namespace = "openstack"
|
||||||
|
|
||||||
|
[repositories]
|
||||||
|
skip_empty = True
|
||||||
|
protocol = https
|
||||||
|
port = 443
|
6
ccp/deploy-config.yaml
Normal file
6
ccp/deploy-config.yaml
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
configs:
|
||||||
|
public_interface: "eth1"
|
||||||
|
private_interface: "eth2"
|
||||||
|
neutron_external_interface: "eth3"
|
||||||
|
neutron_logging_debug: "true"
|
||||||
|
neutron_plugin_agent: "openvswitch"
|
25
ccp/label-nodes.sh
Executable file
25
ccp/label-nodes.sh
Executable file
@ -0,0 +1,25 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# FIXME: hardcoded roles
|
||||||
|
declare -A nodes
|
||||||
|
nodes=( \
|
||||||
|
["node1"]="openstack-controller=true"
|
||||||
|
["node2"]="openstack-compute=true"
|
||||||
|
["node3"]="openstack-compute=true"
|
||||||
|
)
|
||||||
|
|
||||||
|
label_nodes() {
|
||||||
|
all_label='openstack-compute-controller=true'
|
||||||
|
for i in "${!nodes[@]}"
|
||||||
|
do
|
||||||
|
node=$i
|
||||||
|
label=${nodes[$i]}
|
||||||
|
kubectl get nodes $node --show-labels | grep -q "$label" || kubectl label nodes $node $label
|
||||||
|
kubectl get nodes $node --show-labels | grep -q "$all_label" || kubectl label nodes $node $all_label
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
label_nodes
|
||||||
|
|
16
ccp/registry_pod.yaml
Normal file
16
ccp/registry_pod.yaml
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: Pod
|
||||||
|
metadata:
|
||||||
|
name: registry
|
||||||
|
labels:
|
||||||
|
app: registry
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: registry
|
||||||
|
image: registry:2
|
||||||
|
env:
|
||||||
|
imagePullPolicy: Always
|
||||||
|
ports:
|
||||||
|
- containerPort: 5000
|
||||||
|
hostPort: 5000
|
||||||
|
|
15
ccp/registry_svc.yaml
Normal file
15
ccp/registry_svc.yaml
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
kind: "Service"
|
||||||
|
apiVersion: "v1"
|
||||||
|
metadata:
|
||||||
|
name: "registry"
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
app: "registry"
|
||||||
|
ports:
|
||||||
|
-
|
||||||
|
protocol: "TCP"
|
||||||
|
port: 5000
|
||||||
|
targetPort: 5000
|
||||||
|
nodePort: 31500
|
||||||
|
type: "NodePort"
|
||||||
|
|
10
custom.yaml
10
custom.yaml
@ -1,3 +1,13 @@
|
|||||||
|
# Kubernetes version
|
||||||
|
kube_version: "v1.2.4"
|
||||||
|
# Switch network to calico
|
||||||
kube_network_plugin: "calico"
|
kube_network_plugin: "calico"
|
||||||
|
# Kube-proxy should be iptables for calico
|
||||||
kube_proxy_mode: "iptables"
|
kube_proxy_mode: "iptables"
|
||||||
|
# Use non-tmpfs tmp dir
|
||||||
local_release_dir: "/var/tmp/releases"
|
local_release_dir: "/var/tmp/releases"
|
||||||
|
# Upstream DNS servers with mirantis.net
|
||||||
|
upstream_dns_servers:
|
||||||
|
- 8.8.8.8
|
||||||
|
- 8.8.4.4
|
||||||
|
- /mirantis.net/172.18.32.6
|
||||||
|
@ -1,26 +1,19 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
INVENTORY="kargo/inventory/inventory.cfg"
|
INVENTORY="nodes_to_inv.py"
|
||||||
|
|
||||||
nodes=""
|
echo "Installing requirements on nodes..."
|
||||||
i=1
|
ansible-playbook -i $INVENTORY playbooks/bootstrap-nodes.yaml
|
||||||
for nodeip in `cat /root/nodes` ; do
|
|
||||||
i=$(( $i+1 ))
|
|
||||||
nodes+=" node${i}[ansible_ssh_host=${nodeip},ip=${nodeip}]"
|
|
||||||
done
|
|
||||||
|
|
||||||
if [ -f "$INVENTORY" ] ; then
|
|
||||||
echo "$INVENTORY already exists, if you want to recreate, pls remove it and re-run this script"
|
|
||||||
else
|
|
||||||
echo "Preparing inventory..."
|
|
||||||
kargo prepare -y --nodes $nodes
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "Running deployment..."
|
echo "Running deployment..."
|
||||||
kargo deploy -y --ansible-opts="-e @custom.yaml"
|
ansible-playbook -i $INVENTORY /root/kargo/cluster.yml -e @custom.yaml
|
||||||
deploy_res=$?
|
deploy_res=$?
|
||||||
|
|
||||||
if [ "$deploy_res" -eq "0" ]; then
|
if [ "$deploy_res" -eq "0" ]; then
|
||||||
echo "Setting up kubedns..."
|
echo "Setting up kubedns..."
|
||||||
ansible-playbook -i $INVENTORY kubedns.yaml
|
ansible-playbook -i $INVENTORY playbooks/kubedns.yaml
|
||||||
|
echo "Setting up kubedashboard..."
|
||||||
|
ansible-playbook -i $INVENTORY playbooks/kubedashboard.yaml
|
||||||
|
echo "Setting up ip route work-around for DNS clusterIP availability..."
|
||||||
|
ansible-playbook -i $INVENTORY playbooks/ipro_for_cluster_ips.yaml
|
||||||
fi
|
fi
|
||||||
|
36
deploy-netchecker.sh
Executable file
36
deploy-netchecker.sh
Executable file
@ -0,0 +1,36 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
if [ -n "$1" ] ; then
|
||||||
|
NS="--namespace=$1"
|
||||||
|
fi
|
||||||
|
|
||||||
|
kubectl get nodes || exit 1
|
||||||
|
|
||||||
|
echo "Installing netchecker server"
|
||||||
|
git clone https://github.com/adidenko/netchecker-server
|
||||||
|
pushd netchecker-server
|
||||||
|
pushd docker
|
||||||
|
docker build -t 127.0.0.1:31500/netchecker/server:latest .
|
||||||
|
docker push 127.0.0.1:31500/netchecker/server:latest
|
||||||
|
popd
|
||||||
|
kubectl create -f netchecker-server_pod.yaml $NS
|
||||||
|
kubectl create -f netchecker-server_svc.yaml $NS
|
||||||
|
popd
|
||||||
|
|
||||||
|
echo "Installing netchecker agents"
|
||||||
|
git clone https://github.com/adidenko/netchecker-agent
|
||||||
|
pushd netchecker-agent
|
||||||
|
pushd docker
|
||||||
|
docker build -t 127.0.0.1:31500/netchecker/agent:latest .
|
||||||
|
docker push 127.0.0.1:31500/netchecker/agent:latest
|
||||||
|
popd
|
||||||
|
kubectl get nodes | grep Ready | awk '{print $1}' | xargs -I {} kubectl label nodes {} netchecker=agent
|
||||||
|
NUMNODES=`kubectl get nodes --show-labels | grep Ready | grep netchecker=agent | wc -l`
|
||||||
|
sed -e "s/replicas:.*/replicas: $NUMNODES/g" -i netchecker-agent_rc.yaml
|
||||||
|
kubectl create -f netchecker-agent_rc.yaml $NS
|
||||||
|
popd
|
||||||
|
|
||||||
|
echo "DONE"
|
||||||
|
echo
|
||||||
|
echo "use the following command to check agents:"
|
||||||
|
echo "curl -s -X GET 'http://localhost:31081/api/v1/agents/' | python -mjson.tool"
|
25
examples/kubernetes/ccp/README.md
Normal file
25
examples/kubernetes/ccp/README.md
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
CCP examples
|
||||||
|
============
|
||||||
|
Some examples for Openstack CCP.
|
||||||
|
|
||||||
|
Expose Horizon
|
||||||
|
==============
|
||||||
|
|
||||||
|
* Get nodePort of Horizon service:
|
||||||
|
```bash
|
||||||
|
echo $(kubectl --namespace=openstack get svc/horizon -o go-template='{{(index .spec.ports 0).nodePort}}')
|
||||||
|
```
|
||||||
|
|
||||||
|
* NAT on your router/jump-box to any k8s minion public IP and nodePort to provide external access:
|
||||||
|
```bash
|
||||||
|
iptables -t nat -I PREROUTING -p tcp --dport 8080 -j DNAT --to-destination 10.210.0.12:32643
|
||||||
|
iptables -t nat -I POSTROUTING -d 10.210.0.12 ! -s 10.210.0.0/24 -j MASQUERADE
|
||||||
|
iptables -I FORWARD -d 10.210.0.12 -j ACCEPT
|
||||||
|
```
|
||||||
|
|
||||||
|
Where `10.210.0.12` is IP of one of your k8s minions and `32643` is nodePort of Horizon service.
|
||||||
|
|
||||||
|
* You can do the same for novnc:
|
||||||
|
```bash
|
||||||
|
echo $(kubectl --namespace=openstack get svc/nova-novncproxy -o go-template='{{(index .spec.ports 0).nodePort}}')
|
||||||
|
```
|
36
examples/kubernetes/ccp/run_demo.sh
Normal file
36
examples/kubernetes/ccp/run_demo.sh
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
# This script should be executed inside k8s:
|
||||||
|
# docker run -t -i 127.0.0.1:31500/mcp/nova-base /bin/bash
|
||||||
|
|
||||||
|
export OS_USERNAME=admin
|
||||||
|
export OS_PASSWORD=password
|
||||||
|
export OS_TENANT_NAME=admin
|
||||||
|
export OS_REGION_NAME=RegionOne
|
||||||
|
export OS_AUTH_URL=http://keystone:35357
|
||||||
|
|
||||||
|
# Key
|
||||||
|
nova keypair-add test > test.pem
|
||||||
|
chmod 600 test.pem
|
||||||
|
|
||||||
|
# Flavor
|
||||||
|
nova flavor-create demo --is-public true auto 128 2 1
|
||||||
|
|
||||||
|
# Image
|
||||||
|
curl -O http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img
|
||||||
|
glance image-create --name cirros --disk-format qcow2 --container-format bare --file cirros-0.3.4-x86_64-disk.img
|
||||||
|
|
||||||
|
# Aggregates
|
||||||
|
node2=`openstack hypervisor list | grep -o '[a-z]\+-k8s-02'`
|
||||||
|
node3=`openstack hypervisor list | grep -o '[a-z]\+-k8s-03'`
|
||||||
|
nova aggregate-create n2 n2
|
||||||
|
nova aggregate-add-host n2 $node2
|
||||||
|
nova aggregate-create n3 n3
|
||||||
|
nova aggregate-add-host n3 $node3
|
||||||
|
|
||||||
|
# Network
|
||||||
|
neutron net-create net1 --provider:network-type vxlan
|
||||||
|
neutron subnet-create net1 172.20.0.0/24 --name subnet1
|
||||||
|
|
||||||
|
# Instances
|
||||||
|
net_id=`neutron net-list | grep net1 | awk '{print $2}'`
|
||||||
|
nova boot ti02 --image cirros --flavor demo --nic net-id=$net_id --key-name test --availability-zone n2
|
||||||
|
nova boot ti03 --image cirros --flavor demo --nic net-id=$net_id --key-name test --availability-zone n3
|
45
examples/kubernetes/expose-services/README.md
Normal file
45
examples/kubernetes/expose-services/README.md
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
Examples how to expose k8s services
|
||||||
|
===================================
|
||||||
|
|
||||||
|
Exposing dashboard via frontend and externalIPs
|
||||||
|
-----------------------------------------------
|
||||||
|
|
||||||
|
* Edit `kubernetes-dashboard.yaml` and update `externalIPs` to the list of external IPs of your k8s minions
|
||||||
|
|
||||||
|
* Run:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
kubectl create -f kubernetes-dashboard.yaml --namespace=kube-system
|
||||||
|
```
|
||||||
|
|
||||||
|
* Access:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
curl $ANY_MINION_EXTERNAL_IP:9090
|
||||||
|
```
|
||||||
|
|
||||||
|
Exposing dashboard via nodePort
|
||||||
|
-------------------------------
|
||||||
|
|
||||||
|
* Get nodePort of the service:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
echo $(kubectl --namespace=kube-system get svc/kubernetes-dashboard -o go-template='{{(index .spec.ports 0).nodePort}}')
|
||||||
|
```
|
||||||
|
|
||||||
|
* NAT on your router/jump-box to any k8s minion public IP and nodePort to provide external access:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
iptables -t nat -I PREROUTING -p tcp --dport 9090 -j DNAT --to-destination 10.210.0.12:32005
|
||||||
|
iptables -t nat -I POSTROUTING -d 10.210.0.12 ! -s 10.210.0.0/24 -j MASQUERADE
|
||||||
|
iptables -I FORWARD -d 10.210.0.12 -j ACCEPT
|
||||||
|
```
|
||||||
|
|
||||||
|
Where `10.210.0.12` is public IP of one of your k8s minions and `32005` is nodePort of `kubernetes-dashboard` service.
|
||||||
|
|
||||||
|
* Access:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
curl 10.210.0.12:9090
|
||||||
|
```
|
||||||
|
|
22
examples/kubernetes/expose-services/kubedash.yaml
Normal file
22
examples/kubernetes/expose-services/kubedash.yaml
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: kubedash-frontend
|
||||||
|
labels:
|
||||||
|
app: kubedash-frontend
|
||||||
|
tier: frontend
|
||||||
|
spec:
|
||||||
|
externalIPs:
|
||||||
|
- 10.210.0.12
|
||||||
|
- 10.210.0.13
|
||||||
|
- 10.210.0.14
|
||||||
|
- 10.210.0.15
|
||||||
|
- 10.210.0.16
|
||||||
|
- 10.210.0.17
|
||||||
|
ports:
|
||||||
|
- name: http
|
||||||
|
port: 8289
|
||||||
|
protocol: TCP
|
||||||
|
targetPort: 8289
|
||||||
|
selector:
|
||||||
|
name: kubedash
|
@ -0,0 +1,22 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: dashboard-frontend
|
||||||
|
labels:
|
||||||
|
app: dashboard-frontend
|
||||||
|
tier: frontend
|
||||||
|
spec:
|
||||||
|
externalIPs:
|
||||||
|
- 10.210.0.12
|
||||||
|
- 10.210.0.13
|
||||||
|
- 10.210.0.14
|
||||||
|
- 10.210.0.15
|
||||||
|
- 10.210.0.16
|
||||||
|
- 10.210.0.17
|
||||||
|
ports:
|
||||||
|
- name: http
|
||||||
|
port: 9090
|
||||||
|
protocol: TCP
|
||||||
|
targetPort: 9090
|
||||||
|
selector:
|
||||||
|
app: kubernetes-dashboard
|
18
examples/kubernetes/external-nginx/README.md
Normal file
18
examples/kubernetes/external-nginx/README.md
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
Nginx example with external IPs
|
||||||
|
===============================
|
||||||
|
|
||||||
|
* Edit `nginx-frontend.yaml` and update `externalIPs` to the list of external IPs of your k8s minions
|
||||||
|
|
||||||
|
* Deploy:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
kubectl create -f nginx-backends.yaml
|
||||||
|
kubectl create -f nginx-frontend.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
* Check:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
curl $ANY_MINION_EXTERNAL_IP
|
||||||
|
```
|
||||||
|
|
24
examples/kubernetes/external-nginx/nginx-backends.yaml
Normal file
24
examples/kubernetes/external-nginx/nginx-backends.yaml
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
apiVersion: extensions/v1beta1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: nginx-backend
|
||||||
|
spec:
|
||||||
|
replicas: 3
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: nginx-backend
|
||||||
|
tier: backend
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: nginx
|
||||||
|
image: nginx
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
cpu: 100m
|
||||||
|
memory: 100Mi
|
||||||
|
env:
|
||||||
|
- name: GET_HOSTS_FROM
|
||||||
|
value: dns
|
||||||
|
ports:
|
||||||
|
- containerPort: 80
|
22
examples/kubernetes/external-nginx/nginx-frontend.yaml
Normal file
22
examples/kubernetes/external-nginx/nginx-frontend.yaml
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: nginx-frontend
|
||||||
|
labels:
|
||||||
|
app: nginx-frontend
|
||||||
|
tier: frontend
|
||||||
|
spec:
|
||||||
|
externalIPs:
|
||||||
|
- 10.210.0.12
|
||||||
|
- 10.210.0.13
|
||||||
|
- 10.210.0.14
|
||||||
|
- 10.210.0.15
|
||||||
|
- 10.210.0.16
|
||||||
|
- 10.210.0.17
|
||||||
|
ports:
|
||||||
|
- name: http
|
||||||
|
port: 80
|
||||||
|
protocol: TCP
|
||||||
|
targetPort: 80
|
||||||
|
selector:
|
||||||
|
app: nginx-backend
|
97
nodes_to_inv.py
Executable file
97
nodes_to_inv.py
Executable file
@ -0,0 +1,97 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
# A simple dynamic replacemant of 'kargo prepare'
|
||||||
|
# Generates ansible inventory from a list of IPs in 'nodes' file.
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
def read_nodes_from_file(filename):
|
||||||
|
f = open(filename, 'r')
|
||||||
|
content = [x.strip('\n') for x in f.readlines()]
|
||||||
|
return content
|
||||||
|
|
||||||
|
def read_vars_from_file(src="/root/kargo/inventory/group_vars/all.yml"):
|
||||||
|
with open(src, 'r') as f:
|
||||||
|
content = yaml.load(f)
|
||||||
|
return content
|
||||||
|
|
||||||
|
def nodes_to_hash(nodes_list, masters, group_vars):
|
||||||
|
nodes = {
|
||||||
|
'all': {
|
||||||
|
'hosts': [],
|
||||||
|
'vars': group_vars
|
||||||
|
},
|
||||||
|
'etcd': {
|
||||||
|
'hosts': [],
|
||||||
|
},
|
||||||
|
'kube-master': {
|
||||||
|
'hosts': [],
|
||||||
|
},
|
||||||
|
'kube-node': {
|
||||||
|
'hosts': [],
|
||||||
|
},
|
||||||
|
'k8s-cluster': {
|
||||||
|
'children': ['kube-node', 'kube-master']
|
||||||
|
},
|
||||||
|
'_meta': {
|
||||||
|
'hostvars': {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
i = 1
|
||||||
|
|
||||||
|
for node_ip in nodes_list:
|
||||||
|
node_name = "node%s" % i
|
||||||
|
nodes['all']['hosts'].append(node_name)
|
||||||
|
nodes['_meta']['hostvars'][node_name] = {
|
||||||
|
'ansible_ssh_host': node_ip,
|
||||||
|
'ip': node_ip,
|
||||||
|
}
|
||||||
|
nodes['kube-node']['hosts'].append(node_name)
|
||||||
|
if i <= masters:
|
||||||
|
nodes['kube-master']['hosts'].append(node_name)
|
||||||
|
if i <= 3:
|
||||||
|
nodes['etcd']['hosts'].append(node_name)
|
||||||
|
i += 1
|
||||||
|
|
||||||
|
return nodes
|
||||||
|
|
||||||
|
def main():
|
||||||
|
parser = argparse.ArgumentParser(description='Kargo inventory simulator')
|
||||||
|
parser.add_argument('--list', action='store_true')
|
||||||
|
parser.add_argument('--host', default=False)
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
# Read params from ENV since ansible does not support passing args to dynamic inv scripts
|
||||||
|
if os.environ.get('K8S_NODES_FILE'):
|
||||||
|
nodes_file = os.environ['K8S_NODES_FILE']
|
||||||
|
else:
|
||||||
|
nodes_file = 'nodes'
|
||||||
|
|
||||||
|
if os.environ.get('K8S_MASTERS'):
|
||||||
|
masters = int(os.environ['K8S_MASTERS'])
|
||||||
|
else:
|
||||||
|
masters = 2
|
||||||
|
|
||||||
|
if os.environ.get('KARGO_GROUP_VARS'):
|
||||||
|
vars_file = os.environ['KARGO_GROUP_VARS']
|
||||||
|
else:
|
||||||
|
vars_file = "/root/kargo/inventory/group_vars/all.yml"
|
||||||
|
|
||||||
|
nodes_list = read_nodes_from_file(nodes_file)
|
||||||
|
|
||||||
|
if len(nodes_list) < 3:
|
||||||
|
print "Error: requires at least 3 nodes"
|
||||||
|
return
|
||||||
|
|
||||||
|
nodes = nodes_to_hash(nodes_list, masters, read_vars_from_file(vars_file))
|
||||||
|
|
||||||
|
if args.host:
|
||||||
|
print json.dumps(nodes['_meta']['hostvars'][args.host])
|
||||||
|
else:
|
||||||
|
print json.dumps(nodes)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
17
playbooks/bootstrap-nodes.yaml
Normal file
17
playbooks/bootstrap-nodes.yaml
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
- hosts: all
|
||||||
|
tasks:
|
||||||
|
- name: Install packages
|
||||||
|
package: name={{ item }} state=latest
|
||||||
|
with_items:
|
||||||
|
- python-pip
|
||||||
|
- screen
|
||||||
|
- vim
|
||||||
|
- telnet
|
||||||
|
- tcpdump
|
||||||
|
- traceroute
|
||||||
|
- iperf3
|
||||||
|
- nmap
|
||||||
|
- ethtool
|
||||||
|
- curl
|
||||||
|
- git
|
||||||
|
- dnsutils
|
69
playbooks/ccp-build.yaml
Normal file
69
playbooks/ccp-build.yaml
Normal file
@ -0,0 +1,69 @@
|
|||||||
|
- hosts: kube-master
|
||||||
|
|
||||||
|
pre_tasks:
|
||||||
|
|
||||||
|
- name: Download fuel-ccp
|
||||||
|
git:
|
||||||
|
repo: https://git.openstack.org/openstack/fuel-ccp
|
||||||
|
dest: /usr/local/src/fuel-ccp
|
||||||
|
version: master
|
||||||
|
|
||||||
|
- name: Upload ccp configs to master nodes
|
||||||
|
synchronize:
|
||||||
|
src: ../ccp/
|
||||||
|
dest: /root/ccp/
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
|
||||||
|
- name: Install CCP cli tool
|
||||||
|
shell: pip install -U fuel-ccp/
|
||||||
|
args:
|
||||||
|
chdir: /usr/local/src
|
||||||
|
creates: /usr/local/bin/mcp-microservices
|
||||||
|
|
||||||
|
- name: Get pods
|
||||||
|
shell: kubectl get pods
|
||||||
|
register: get_pod
|
||||||
|
run_once: true
|
||||||
|
|
||||||
|
- name: Get services
|
||||||
|
shell: kubectl get svc
|
||||||
|
register: get_svc
|
||||||
|
run_once: true
|
||||||
|
|
||||||
|
- name: Create registry pod
|
||||||
|
shell: kubectl create -f registry_pod.yaml
|
||||||
|
args:
|
||||||
|
chdir: /root/ccp
|
||||||
|
run_once: true
|
||||||
|
when: get_pod.stdout.find('registry') == -1
|
||||||
|
|
||||||
|
- name: Create registry svc
|
||||||
|
shell: kubectl create -f registry_svc.yaml
|
||||||
|
args:
|
||||||
|
chdir: /root/ccp
|
||||||
|
run_once: true
|
||||||
|
when: get_svc.stdout.find('registry') == -1
|
||||||
|
|
||||||
|
- name: Fetch CCP images
|
||||||
|
shell: mcp-microservices --config-file=/root/ccp/ccp.conf fetch
|
||||||
|
run_once: true
|
||||||
|
|
||||||
|
# - name: Patch fuel-ccp-neutron
|
||||||
|
# run_once: true
|
||||||
|
# args:
|
||||||
|
# chdir: /root/microservices-repos/fuel-ccp-neutron
|
||||||
|
# shell: git fetch https://git.openstack.org/openstack/fuel-ccp-neutron {{ item }} && git cherry-pick FETCH_HEAD
|
||||||
|
# with_items:
|
||||||
|
# - "refs/changes/96/340496/6"
|
||||||
|
|
||||||
|
- name: Build CCP images
|
||||||
|
shell: mcp-microservices --config-file=/root/ccp/ccp.conf build
|
||||||
|
run_once: true
|
||||||
|
|
||||||
|
- hosts: k8s-cluster
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
|
||||||
|
- name: Check number of built images
|
||||||
|
shell: test $(curl -s 127.0.0.1:31500/v2/_catalog | python -mjson.tool | grep mcp/ | wc -l) -ge 29
|
27
playbooks/ccp-deploy.yaml
Normal file
27
playbooks/ccp-deploy.yaml
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
- hosts: kube-master
|
||||||
|
|
||||||
|
pre_tasks:
|
||||||
|
|
||||||
|
- name: Rsync CCP configs
|
||||||
|
synchronize:
|
||||||
|
src: ../ccp/
|
||||||
|
dest: /root/ccp/
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: Label nodes
|
||||||
|
shell: ./label-nodes.sh
|
||||||
|
args:
|
||||||
|
chdir: /root/ccp
|
||||||
|
run_once: true
|
||||||
|
|
||||||
|
- name: Get namespaces
|
||||||
|
shell: kubectl get namespace
|
||||||
|
register: get_ns
|
||||||
|
run_once: true
|
||||||
|
|
||||||
|
- name: Deploy CCP
|
||||||
|
shell: mcp-microservices --config-file=/root/ccp/ccp.conf deploy
|
||||||
|
args:
|
||||||
|
chdir: /root/ccp
|
||||||
|
run_once: true
|
||||||
|
when: get_ns.stdout.find('openstack') == -1
|
24
playbooks/ipro_for_cluster_ips.yaml
Normal file
24
playbooks/ipro_for_cluster_ips.yaml
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
# FXIME: add persistent routing rule
|
||||||
|
- hosts: kube-master
|
||||||
|
tasks:
|
||||||
|
- name: Get kube service net
|
||||||
|
shell: grep KUBE_SERVICE_ADDRESSES /etc/kubernetes/kube-apiserver.env | grep -oE "\b([0-9]{1,3}\.){3}[0-9]{1,3}/[0-9]{1,2}\b"
|
||||||
|
register: kube_service_addresses
|
||||||
|
run_once: true
|
||||||
|
- hosts: all
|
||||||
|
tasks:
|
||||||
|
- name: Get local IP
|
||||||
|
shell: "calicoctl status | grep IP: | awk '{print $2}'"
|
||||||
|
register: local_ip
|
||||||
|
- name: Get route
|
||||||
|
shell: ip ro ls | grep "^{{ hostvars[groups['kube-master'][0]]['kube_service_addresses']['stdout'] }}" || echo ""
|
||||||
|
register: local_route
|
||||||
|
- name: Clean up route
|
||||||
|
shell: ip ro del {{ hostvars[groups['kube-master'][0]]['kube_service_addresses']['stdout'] }} || true
|
||||||
|
when: local_route.stdout.find('{{ local_ip.stdout }}') == -1
|
||||||
|
- name: Setup route
|
||||||
|
shell: ip ro add {{ hostvars[groups['kube-master'][0]]['kube_service_addresses']['stdout'] }} via {{ local_ip.stdout }}
|
||||||
|
when: local_route.stdout.find('{{ local_ip.stdout }}') == -1
|
||||||
|
- name: Add openstack namespace to resolv.conf
|
||||||
|
shell: grep openstack.svc.cluster.local /etc/resolv.conf || sed '/^search / s/$/ openstack.svc.cluster.local/' -i /etc/resolv.conf
|
||||||
|
|
5
playbooks/kubedash.yaml
Normal file
5
playbooks/kubedash.yaml
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
- hosts: kube-master
|
||||||
|
tasks:
|
||||||
|
- name: setup-kubedns
|
||||||
|
shell: kpm deploy kube-system/kubedash --namespace=kube-system
|
||||||
|
run_once: true
|
5
playbooks/kubedashboard.yaml
Normal file
5
playbooks/kubedashboard.yaml
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
- hosts: kube-master
|
||||||
|
tasks:
|
||||||
|
- name: setup-kubedns
|
||||||
|
shell: kpm deploy kube-system/kubernetes-dashboard --namespace=kube-system
|
||||||
|
run_once: true
|
14
vagrant-scripts/provision-master.sh
Executable file
14
vagrant-scripts/provision-master.sh
Executable file
@ -0,0 +1,14 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
echo master > /var/tmp/role
|
||||||
|
|
||||||
|
# Packages
|
||||||
|
sudo apt-get --yes update
|
||||||
|
sudo apt-get --yes upgrade
|
||||||
|
sudo apt-get --yes install screen git
|
||||||
|
|
||||||
|
# SSH keys and config
|
||||||
|
sudo rm -rf /root/.ssh
|
||||||
|
sudo mv ~vagrant/ssh /root/.ssh
|
||||||
|
sudo echo -e 'Host 10.*\n\tStrictHostKeyChecking no\n\tUserKnownHostsFile=/dev/null' >> /root/.ssh/config
|
||||||
|
sudo chown -R root: /root/.ssh
|
||||||
|
|
@ -4,10 +4,7 @@ echo node > /var/tmp/role
|
|||||||
# Packages
|
# Packages
|
||||||
sudo apt-get --yes update
|
sudo apt-get --yes update
|
||||||
sudo apt-get --yes upgrade
|
sudo apt-get --yes upgrade
|
||||||
sudo apt-get --yes install screen vim telnet tcpdump python-pip traceroute iperf3 nmap ethtool
|
sudo apt-get --yes install python
|
||||||
|
|
||||||
# Pip
|
|
||||||
sudo pip install kpm
|
|
||||||
|
|
||||||
# SSH
|
# SSH
|
||||||
sudo rm -rf /root/.ssh
|
sudo rm -rf /root/.ssh
|
Reference in New Issue
Block a user