Compare commits
13 Commits
v2.23.1
...
release-2.
Author | SHA1 | Date | |
---|---|---|---|
e86c0cf036 | |||
b50e4c9eb7 | |||
e4ac3ab799 | |||
0af6dfca5d | |||
d6f688f060 | |||
a97fbec320 | |||
c3e73aabcf | |||
d498df20db | |||
08467ad6b3 | |||
3f41d8b274 | |||
0634be4c88 | |||
919e666fb9 | |||
813576efeb |
@ -14,7 +14,7 @@ variables:
|
||||
GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray'
|
||||
ANSIBLE_FORCE_COLOR: "true"
|
||||
MAGIC: "ci check this"
|
||||
TEST_ID: "$CI_PIPELINE_ID-$CI_BUILD_ID"
|
||||
TEST_ID: "$CI_PIPELINE_ID-$CI_JOB_ID"
|
||||
CI_TEST_VARS: "./tests/files/${CI_JOB_NAME}.yml"
|
||||
CI_TEST_REGISTRY_MIRROR: "./tests/common/_docker_hub_registry_mirror.yml"
|
||||
CI_TEST_SETTING: "./tests/common/_kubespray_test_settings.yml"
|
||||
|
@ -14,7 +14,7 @@ vagrant-validate:
|
||||
stage: unit-tests
|
||||
tags: [light]
|
||||
variables:
|
||||
VAGRANT_VERSION: 2.2.19
|
||||
VAGRANT_VERSION: 2.3.7
|
||||
script:
|
||||
- ./tests/scripts/vagrant-validate.sh
|
||||
except: ['triggers', 'master']
|
||||
|
@ -86,7 +86,7 @@ packet_ubuntu18-crio:
|
||||
stage: deploy-part2
|
||||
when: manual
|
||||
|
||||
packet_fedora35-crio:
|
||||
packet_fedora37-crio:
|
||||
extends: .packet_pr
|
||||
stage: deploy-part2
|
||||
when: manual
|
||||
@ -173,10 +173,11 @@ packet_almalinux8-docker:
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
|
||||
packet_fedora36-docker-weave:
|
||||
packet_fedora38-docker-weave:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: on_success
|
||||
allow_failure: true
|
||||
|
||||
packet_opensuse-canal:
|
||||
stage: deploy-part2
|
||||
@ -236,19 +237,19 @@ packet_centos7-canal-ha:
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_fedora36-docker-calico:
|
||||
packet_fedora38-docker-calico:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
variables:
|
||||
RESET_CHECK: "true"
|
||||
|
||||
packet_fedora35-calico-selinux:
|
||||
packet_fedora37-calico-selinux:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
|
||||
packet_fedora35-calico-swap-selinux:
|
||||
packet_fedora37-calico-swap-selinux:
|
||||
stage: deploy-part2
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
@ -263,7 +264,7 @@ packet_almalinux8-calico-nodelocaldns-secondary:
|
||||
extends: .packet_pr
|
||||
when: manual
|
||||
|
||||
packet_fedora36-kube-ovn:
|
||||
packet_fedora38-kube-ovn:
|
||||
stage: deploy-part2
|
||||
extends: .packet_periodic
|
||||
when: on_success
|
||||
|
@ -56,7 +56,7 @@ vagrant_ubuntu16-kube-router-svc-proxy:
|
||||
extends: .vagrant
|
||||
when: manual
|
||||
|
||||
vagrant_fedora35-kube-router:
|
||||
vagrant_fedora37-kube-router:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: on_success
|
||||
|
@ -7,7 +7,7 @@ RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
|
||||
RUN apt update -y \
|
||||
&& apt install -y \
|
||||
curl python3 python3-pip sshpass \
|
||||
curl python3 python3-pip sshpass rsync \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Some tools like yamllint need this
|
||||
@ -29,7 +29,9 @@ RUN python3 -m pip install --no-cache-dir \
|
||||
cryptography==3.4.8 \
|
||||
jinja2==2.11.3 \
|
||||
netaddr==0.7.19 \
|
||||
jmespath==1.0.1 \
|
||||
MarkupSafe==1.1.1 \
|
||||
ruamel.yaml==0.17.21 \
|
||||
&& KUBE_VERSION=$(sed -n 's/^kube_version: //p' roles/kubespray-defaults/defaults/main.yaml) \
|
||||
&& curl -LO https://storage.googleapis.com/kubernetes-release/release/$KUBE_VERSION/bin/linux/$ARCH/kubectl \
|
||||
&& chmod a+x kubectl \
|
||||
|
@ -134,7 +134,7 @@ vagrant up
|
||||
- **Debian** Bullseye, Buster, Jessie, Stretch
|
||||
- **Ubuntu** 16.04, 18.04, 20.04, 22.04
|
||||
- **CentOS/RHEL** 7, [8, 9](docs/centos.md#centos-8)
|
||||
- **Fedora** 35, 36
|
||||
- **Fedora** 37, 38
|
||||
- **Fedora CoreOS** (see [fcos Note](docs/fcos.md))
|
||||
- **openSUSE** Leap 15.x/Tumbleweed
|
||||
- **Oracle Linux** 7, [8, 9](docs/centos.md#centos-8)
|
||||
|
16
Vagrantfile
vendored
16
Vagrantfile
vendored
@ -29,8 +29,8 @@ SUPPORTED_OS = {
|
||||
"almalinux8" => {box: "almalinux/8", user: "vagrant"},
|
||||
"almalinux8-bento" => {box: "bento/almalinux-8", user: "vagrant"},
|
||||
"rockylinux8" => {box: "generic/rocky8", user: "vagrant"},
|
||||
"fedora35" => {box: "fedora/35-cloud-base", user: "vagrant"},
|
||||
"fedora36" => {box: "fedora/36-cloud-base", user: "vagrant"},
|
||||
"fedora37" => {box: "fedora/37-cloud-base", user: "vagrant"},
|
||||
"fedora38" => {box: "fedora/38-cloud-base", user: "vagrant"},
|
||||
"opensuse" => {box: "opensuse/Leap-15.4.x86_64", user: "vagrant"},
|
||||
"opensuse-tumbleweed" => {box: "opensuse/Tumbleweed.x86_64", user: "vagrant"},
|
||||
"oraclelinux" => {box: "generic/oracle7", user: "vagrant"},
|
||||
@ -201,7 +201,8 @@ Vagrant.configure("2") do |config|
|
||||
end
|
||||
|
||||
ip = "#{$subnet}.#{i+100}"
|
||||
node.vm.network :private_network, ip: ip,
|
||||
node.vm.network :private_network,
|
||||
:ip => ip,
|
||||
:libvirt__guest_ipv6 => 'yes',
|
||||
:libvirt__ipv6_address => "#{$subnet_ipv6}::#{i+100}",
|
||||
:libvirt__ipv6_prefix => "64",
|
||||
@ -216,6 +217,14 @@ Vagrant.configure("2") do |config|
|
||||
node.vm.provision "shell", inline: "rm -f /etc/modprobe.d/local.conf"
|
||||
node.vm.provision "shell", inline: "sed -i '/net.ipv6.conf.all.disable_ipv6/d' /etc/sysctl.d/99-sysctl.conf /etc/sysctl.conf"
|
||||
end
|
||||
# Hack for fedora37/38 to get the IP address of the second interface
|
||||
if ["fedora37", "fedora38"].include? $os
|
||||
config.vm.provision "shell", inline: <<-SHELL
|
||||
nmcli conn modify 'Wired connection 2' ipv4.addresses $(cat /etc/sysconfig/network-scripts/ifcfg-eth1 | grep IPADDR | cut -d "=" -f2)
|
||||
nmcli conn modify 'Wired connection 2' ipv4.method manual
|
||||
service NetworkManager restart
|
||||
SHELL
|
||||
end
|
||||
|
||||
# Disable firewalld on oraclelinux/redhat vms
|
||||
if ["oraclelinux","oraclelinux8","rhel7","rhel8"].include? $os
|
||||
@ -248,6 +257,7 @@ Vagrant.configure("2") do |config|
|
||||
if i == $num_instances
|
||||
node.vm.provision "ansible" do |ansible|
|
||||
ansible.playbook = $playbook
|
||||
ansible.compatibility_mode = "2.0"
|
||||
ansible.verbose = $ansible_verbosity
|
||||
$ansible_inventory_path = File.join( $inventory, "hosts.ini")
|
||||
if File.exist?($ansible_inventory_path)
|
||||
|
@ -12,7 +12,7 @@ ssh_public_keys = [
|
||||
machines = {
|
||||
"master-0" : {
|
||||
"node_type" : "master",
|
||||
"size" : "Medium",
|
||||
"size" : "standard.medium",
|
||||
"boot_disk" : {
|
||||
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
||||
"root_partition_size" : 50,
|
||||
@ -22,7 +22,7 @@ machines = {
|
||||
},
|
||||
"worker-0" : {
|
||||
"node_type" : "worker",
|
||||
"size" : "Large",
|
||||
"size" : "standard.large",
|
||||
"boot_disk" : {
|
||||
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
||||
"root_partition_size" : 50,
|
||||
@ -32,7 +32,7 @@ machines = {
|
||||
},
|
||||
"worker-1" : {
|
||||
"node_type" : "worker",
|
||||
"size" : "Large",
|
||||
"size" : "standard.large",
|
||||
"boot_disk" : {
|
||||
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
||||
"root_partition_size" : 50,
|
||||
@ -42,7 +42,7 @@ machines = {
|
||||
},
|
||||
"worker-2" : {
|
||||
"node_type" : "worker",
|
||||
"size" : "Large",
|
||||
"size" : "standard.large",
|
||||
"boot_disk" : {
|
||||
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
||||
"root_partition_size" : 50,
|
||||
|
@ -1,29 +1,25 @@
|
||||
data "exoscale_compute_template" "os_image" {
|
||||
data "exoscale_template" "os_image" {
|
||||
for_each = var.machines
|
||||
|
||||
zone = var.zone
|
||||
name = each.value.boot_disk.image_name
|
||||
}
|
||||
|
||||
data "exoscale_compute" "master_nodes" {
|
||||
for_each = exoscale_compute.master
|
||||
data "exoscale_compute_instance" "master_nodes" {
|
||||
for_each = exoscale_compute_instance.master
|
||||
|
||||
id = each.value.id
|
||||
|
||||
# Since private IP address is not assigned until the nics are created we need this
|
||||
depends_on = [exoscale_nic.master_private_network_nic]
|
||||
id = each.value.id
|
||||
zone = var.zone
|
||||
}
|
||||
|
||||
data "exoscale_compute" "worker_nodes" {
|
||||
for_each = exoscale_compute.worker
|
||||
data "exoscale_compute_instance" "worker_nodes" {
|
||||
for_each = exoscale_compute_instance.worker
|
||||
|
||||
id = each.value.id
|
||||
|
||||
# Since private IP address is not assigned until the nics are created we need this
|
||||
depends_on = [exoscale_nic.worker_private_network_nic]
|
||||
id = each.value.id
|
||||
zone = var.zone
|
||||
}
|
||||
|
||||
resource "exoscale_network" "private_network" {
|
||||
resource "exoscale_private_network" "private_network" {
|
||||
zone = var.zone
|
||||
name = "${var.prefix}-network"
|
||||
|
||||
@ -34,25 +30,29 @@ resource "exoscale_network" "private_network" {
|
||||
netmask = cidrnetmask(var.private_network_cidr)
|
||||
}
|
||||
|
||||
resource "exoscale_compute" "master" {
|
||||
resource "exoscale_compute_instance" "master" {
|
||||
for_each = {
|
||||
for name, machine in var.machines :
|
||||
name => machine
|
||||
if machine.node_type == "master"
|
||||
}
|
||||
|
||||
display_name = "${var.prefix}-${each.key}"
|
||||
template_id = data.exoscale_compute_template.os_image[each.key].id
|
||||
size = each.value.size
|
||||
disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size
|
||||
state = "Running"
|
||||
zone = var.zone
|
||||
security_groups = [exoscale_security_group.master_sg.name]
|
||||
name = "${var.prefix}-${each.key}"
|
||||
template_id = data.exoscale_template.os_image[each.key].id
|
||||
type = each.value.size
|
||||
disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size
|
||||
state = "Running"
|
||||
zone = var.zone
|
||||
security_group_ids = [exoscale_security_group.master_sg.id]
|
||||
network_interface {
|
||||
network_id = exoscale_private_network.private_network.id
|
||||
}
|
||||
elastic_ip_ids = [exoscale_elastic_ip.control_plane_lb.id]
|
||||
|
||||
user_data = templatefile(
|
||||
"${path.module}/templates/cloud-init.tmpl",
|
||||
{
|
||||
eip_ip_address = exoscale_ipaddress.ingress_controller_lb.ip_address
|
||||
eip_ip_address = exoscale_elastic_ip.ingress_controller_lb.ip_address
|
||||
node_local_partition_size = each.value.boot_disk.node_local_partition_size
|
||||
ceph_partition_size = each.value.boot_disk.ceph_partition_size
|
||||
root_partition_size = each.value.boot_disk.root_partition_size
|
||||
@ -62,25 +62,29 @@ resource "exoscale_compute" "master" {
|
||||
)
|
||||
}
|
||||
|
||||
resource "exoscale_compute" "worker" {
|
||||
resource "exoscale_compute_instance" "worker" {
|
||||
for_each = {
|
||||
for name, machine in var.machines :
|
||||
name => machine
|
||||
if machine.node_type == "worker"
|
||||
}
|
||||
|
||||
display_name = "${var.prefix}-${each.key}"
|
||||
template_id = data.exoscale_compute_template.os_image[each.key].id
|
||||
size = each.value.size
|
||||
disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size
|
||||
state = "Running"
|
||||
zone = var.zone
|
||||
security_groups = [exoscale_security_group.worker_sg.name]
|
||||
name = "${var.prefix}-${each.key}"
|
||||
template_id = data.exoscale_template.os_image[each.key].id
|
||||
type = each.value.size
|
||||
disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size
|
||||
state = "Running"
|
||||
zone = var.zone
|
||||
security_group_ids = [exoscale_security_group.worker_sg.id]
|
||||
network_interface {
|
||||
network_id = exoscale_private_network.private_network.id
|
||||
}
|
||||
elastic_ip_ids = [exoscale_elastic_ip.ingress_controller_lb.id]
|
||||
|
||||
user_data = templatefile(
|
||||
"${path.module}/templates/cloud-init.tmpl",
|
||||
{
|
||||
eip_ip_address = exoscale_ipaddress.ingress_controller_lb.ip_address
|
||||
eip_ip_address = exoscale_elastic_ip.ingress_controller_lb.ip_address
|
||||
node_local_partition_size = each.value.boot_disk.node_local_partition_size
|
||||
ceph_partition_size = each.value.boot_disk.ceph_partition_size
|
||||
root_partition_size = each.value.boot_disk.root_partition_size
|
||||
@ -90,41 +94,33 @@ resource "exoscale_compute" "worker" {
|
||||
)
|
||||
}
|
||||
|
||||
resource "exoscale_nic" "master_private_network_nic" {
|
||||
for_each = exoscale_compute.master
|
||||
|
||||
compute_id = each.value.id
|
||||
network_id = exoscale_network.private_network.id
|
||||
}
|
||||
|
||||
resource "exoscale_nic" "worker_private_network_nic" {
|
||||
for_each = exoscale_compute.worker
|
||||
|
||||
compute_id = each.value.id
|
||||
network_id = exoscale_network.private_network.id
|
||||
}
|
||||
|
||||
resource "exoscale_security_group" "master_sg" {
|
||||
name = "${var.prefix}-master-sg"
|
||||
description = "Security group for Kubernetes masters"
|
||||
}
|
||||
|
||||
resource "exoscale_security_group_rules" "master_sg_rules" {
|
||||
resource "exoscale_security_group_rule" "master_sg_rule_ssh" {
|
||||
security_group_id = exoscale_security_group.master_sg.id
|
||||
|
||||
for_each = toset(var.ssh_whitelist)
|
||||
# SSH
|
||||
ingress {
|
||||
protocol = "TCP"
|
||||
cidr_list = var.ssh_whitelist
|
||||
ports = ["22"]
|
||||
}
|
||||
type = "INGRESS"
|
||||
start_port = 22
|
||||
end_port = 22
|
||||
protocol = "TCP"
|
||||
cidr = each.value
|
||||
}
|
||||
|
||||
resource "exoscale_security_group_rule" "master_sg_rule_k8s_api" {
|
||||
security_group_id = exoscale_security_group.master_sg.id
|
||||
|
||||
for_each = toset(var.api_server_whitelist)
|
||||
# Kubernetes API
|
||||
ingress {
|
||||
protocol = "TCP"
|
||||
cidr_list = var.api_server_whitelist
|
||||
ports = ["6443"]
|
||||
}
|
||||
type = "INGRESS"
|
||||
start_port = 6443
|
||||
end_port = 6443
|
||||
protocol = "TCP"
|
||||
cidr = each.value
|
||||
}
|
||||
|
||||
resource "exoscale_security_group" "worker_sg" {
|
||||
@ -132,62 +128,64 @@ resource "exoscale_security_group" "worker_sg" {
|
||||
description = "security group for kubernetes worker nodes"
|
||||
}
|
||||
|
||||
resource "exoscale_security_group_rules" "worker_sg_rules" {
|
||||
resource "exoscale_security_group_rule" "worker_sg_rule_ssh" {
|
||||
security_group_id = exoscale_security_group.worker_sg.id
|
||||
|
||||
# SSH
|
||||
ingress {
|
||||
protocol = "TCP"
|
||||
cidr_list = var.ssh_whitelist
|
||||
ports = ["22"]
|
||||
}
|
||||
for_each = toset(var.ssh_whitelist)
|
||||
type = "INGRESS"
|
||||
start_port = 22
|
||||
end_port = 22
|
||||
protocol = "TCP"
|
||||
cidr = each.value
|
||||
}
|
||||
|
||||
resource "exoscale_security_group_rule" "worker_sg_rule_http" {
|
||||
security_group_id = exoscale_security_group.worker_sg.id
|
||||
|
||||
# HTTP(S)
|
||||
ingress {
|
||||
protocol = "TCP"
|
||||
cidr_list = ["0.0.0.0/0"]
|
||||
ports = ["80", "443"]
|
||||
}
|
||||
for_each = toset(["80", "443"])
|
||||
type = "INGRESS"
|
||||
start_port = each.value
|
||||
end_port = each.value
|
||||
protocol = "TCP"
|
||||
cidr = "0.0.0.0/0"
|
||||
}
|
||||
|
||||
# Kubernetes Nodeport
|
||||
ingress {
|
||||
protocol = "TCP"
|
||||
cidr_list = var.nodeport_whitelist
|
||||
ports = ["30000-32767"]
|
||||
|
||||
resource "exoscale_security_group_rule" "worker_sg_rule_nodeport" {
|
||||
security_group_id = exoscale_security_group.worker_sg.id
|
||||
|
||||
# HTTP(S)
|
||||
for_each = toset(var.nodeport_whitelist)
|
||||
type = "INGRESS"
|
||||
start_port = 30000
|
||||
end_port = 32767
|
||||
protocol = "TCP"
|
||||
cidr = each.value
|
||||
}
|
||||
|
||||
resource "exoscale_elastic_ip" "ingress_controller_lb" {
|
||||
zone = var.zone
|
||||
healthcheck {
|
||||
mode = "http"
|
||||
port = 80
|
||||
uri = "/healthz"
|
||||
interval = 10
|
||||
timeout = 2
|
||||
strikes_ok = 2
|
||||
strikes_fail = 3
|
||||
}
|
||||
}
|
||||
|
||||
resource "exoscale_ipaddress" "ingress_controller_lb" {
|
||||
zone = var.zone
|
||||
healthcheck_mode = "http"
|
||||
healthcheck_port = 80
|
||||
healthcheck_path = "/healthz"
|
||||
healthcheck_interval = 10
|
||||
healthcheck_timeout = 2
|
||||
healthcheck_strikes_ok = 2
|
||||
healthcheck_strikes_fail = 3
|
||||
}
|
||||
|
||||
resource "exoscale_secondary_ipaddress" "ingress_controller_lb" {
|
||||
for_each = exoscale_compute.worker
|
||||
|
||||
compute_id = each.value.id
|
||||
ip_address = exoscale_ipaddress.ingress_controller_lb.ip_address
|
||||
}
|
||||
|
||||
resource "exoscale_ipaddress" "control_plane_lb" {
|
||||
zone = var.zone
|
||||
healthcheck_mode = "tcp"
|
||||
healthcheck_port = 6443
|
||||
healthcheck_interval = 10
|
||||
healthcheck_timeout = 2
|
||||
healthcheck_strikes_ok = 2
|
||||
healthcheck_strikes_fail = 3
|
||||
}
|
||||
|
||||
resource "exoscale_secondary_ipaddress" "control_plane_lb" {
|
||||
for_each = exoscale_compute.master
|
||||
|
||||
compute_id = each.value.id
|
||||
ip_address = exoscale_ipaddress.control_plane_lb.ip_address
|
||||
resource "exoscale_elastic_ip" "control_plane_lb" {
|
||||
zone = var.zone
|
||||
healthcheck {
|
||||
mode = "tcp"
|
||||
port = 6443
|
||||
interval = 10
|
||||
timeout = 2
|
||||
strikes_ok = 2
|
||||
strikes_fail = 3
|
||||
}
|
||||
}
|
||||
|
@ -1,19 +1,19 @@
|
||||
output "master_ip_addresses" {
|
||||
value = {
|
||||
for key, instance in exoscale_compute.master :
|
||||
for key, instance in exoscale_compute_instance.master :
|
||||
instance.name => {
|
||||
"private_ip" = contains(keys(data.exoscale_compute.master_nodes), key) ? data.exoscale_compute.master_nodes[key].private_network_ip_addresses[0] : ""
|
||||
"public_ip" = exoscale_compute.master[key].ip_address
|
||||
"private_ip" = contains(keys(data.exoscale_compute_instance.master_nodes), key) ? data.exoscale_compute_instance.master_nodes[key].private_network_ip_addresses[0] : ""
|
||||
"public_ip" = exoscale_compute_instance.master[key].ip_address
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
output "worker_ip_addresses" {
|
||||
value = {
|
||||
for key, instance in exoscale_compute.worker :
|
||||
for key, instance in exoscale_compute_instance.worker :
|
||||
instance.name => {
|
||||
"private_ip" = contains(keys(data.exoscale_compute.worker_nodes), key) ? data.exoscale_compute.worker_nodes[key].private_network_ip_addresses[0] : ""
|
||||
"public_ip" = exoscale_compute.worker[key].ip_address
|
||||
"private_ip" = contains(keys(data.exoscale_compute_instance.worker_nodes), key) ? data.exoscale_compute_instance.worker_nodes[key].private_network_ip_addresses[0] : ""
|
||||
"public_ip" = exoscale_compute_instance.worker[key].ip_address
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -23,9 +23,9 @@ output "cluster_private_network_cidr" {
|
||||
}
|
||||
|
||||
output "ingress_controller_lb_ip_address" {
|
||||
value = exoscale_ipaddress.ingress_controller_lb.ip_address
|
||||
value = exoscale_elastic_ip.ingress_controller_lb.ip_address
|
||||
}
|
||||
|
||||
output "control_plane_lb_ip_address" {
|
||||
value = exoscale_ipaddress.control_plane_lb.ip_address
|
||||
value = exoscale_elastic_ip.control_plane_lb.ip_address
|
||||
}
|
||||
|
@ -1,7 +1,7 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
exoscale = {
|
||||
source = "exoscale/exoscale"
|
||||
source = "exoscale/exoscale"
|
||||
version = ">= 0.21"
|
||||
}
|
||||
}
|
||||
|
12
docs/ci.md
12
docs/ci.md
@ -12,8 +12,8 @@ centos7 | :white_check_mark: | :white_check_mark: | :x: | :white_check_mark: |
|
||||
debian10 | :white_check_mark: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian11 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian9 | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: |
|
||||
fedora35 | :white_check_mark: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: |
|
||||
fedora36 | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: |
|
||||
fedora37 | :white_check_mark: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: |
|
||||
fedora38 | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: |
|
||||
opensuse | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
rockylinux8 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
rockylinux9 | :white_check_mark: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
|
||||
@ -32,8 +32,8 @@ centos7 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian10 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian11 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora35 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora36 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora37 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora38 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
opensuse | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
rockylinux8 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
rockylinux9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
@ -52,8 +52,8 @@ centos7 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian10 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian11 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
debian9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora35 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora36 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: |
|
||||
fedora37 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
fedora38 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: |
|
||||
opensuse | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
|
||||
rockylinux8 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
rockylinux9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
|
||||
|
@ -100,6 +100,15 @@ containerd_runc_runtime:
|
||||
...
|
||||
```
|
||||
|
||||
Config insecure-registry access to self hosted registries.
|
||||
|
||||
```yaml
|
||||
containerd_insecure_registries:
|
||||
"test.registry.io": "http://test.registry.io"
|
||||
"172.19.16.11:5000": "http://172.19.16.11:5000"
|
||||
"repo:5000": "http://repo:5000"
|
||||
```
|
||||
|
||||
[containerd]: https://containerd.io/
|
||||
[RuntimeClass]: https://kubernetes.io/docs/concepts/containers/runtime-class/
|
||||
[runtime classes in containerd]: https://github.com/containerd/containerd/blob/main/docs/cri/config.md#runtime-classes
|
||||
|
@ -7,7 +7,8 @@ following artifacts in advance from another environment where has access to the
|
||||
* Some static files (zips and binaries)
|
||||
* OS packages (rpm/deb files)
|
||||
* Container images used by Kubespray. Exhaustive list depends on your setup
|
||||
* [Optional] Python packages used by Kubespray (only required if your OS doesn't provide all python packages/versions listed in `requirements.txt`)
|
||||
* [Optional] Python packages used by Kubespray (only required if your OS doesn't provide all python packages/versions
|
||||
listed in `requirements.txt`)
|
||||
* [Optional] Helm chart files (only required if `helm_enabled=true`)
|
||||
|
||||
Then you need to setup the following services on your offline environment:
|
||||
@ -23,7 +24,8 @@ In addition, you can find some tools for offline deployment under [contrib/offli
|
||||
|
||||
## Configure Inventory
|
||||
|
||||
Once all artifacts are accessible from your internal network, **adjust** the following variables in [your inventory](/inventory/sample/group_vars/all/offline.yml) to match your environment:
|
||||
Once all artifacts are accessible from your internal network, **adjust** the following variables
|
||||
in [your inventory](/inventory/sample/group_vars/all/offline.yml) to match your environment:
|
||||
|
||||
```yaml
|
||||
# Registry overrides
|
||||
@ -49,7 +51,7 @@ runc_download_url: "{{ files_repo }}/runc.{{ image_arch }}"
|
||||
nerdctl_download_url: "{{ files_repo }}/nerdctl-{{ nerdctl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz"
|
||||
# Insecure registries for containerd
|
||||
containerd_insecure_registries:
|
||||
- "{{ registry_host }}"
|
||||
"{{ registry_addr }}":"{{ registry_host }}"
|
||||
|
||||
# CentOS/Redhat/AlmaLinux/Rocky Linux
|
||||
## Docker / Containerd
|
||||
@ -86,20 +88,31 @@ containerd_ubuntu_repo_repokey: 'YOURREPOKEY'
|
||||
For the OS specific settings, just define the one matching your OS.
|
||||
If you use the settings like the one above, you'll need to define in your inventory the following variables:
|
||||
|
||||
* `registry_host`: Container image registry. If you _don't_ use the same repository path for the container images that the ones defined in [Download's role defaults](https://github.com/kubernetes-sigs/kubespray/blob/master/roles/download/defaults/main.yml), you need to override the `*_image_repo` for these container images. If you want to make your life easier, use the same repository path, you won't have to override anything else.
|
||||
* `files_repo`: HTTP webserver or reverse proxy that is able to serve the files listed above. Path is not important, you can store them anywhere as long as it's accessible by kubespray. It's recommended to use `*_version` in the path so that you don't need to modify this setting everytime kubespray upgrades one of these components.
|
||||
* `yum_repo`/`debian_repo`/`ubuntu_repo`: OS package repository depending of your OS, should point to your internal repository. Adjust the path accordingly.
|
||||
* `registry_host`: Container image registry. If you _don't_ use the same repository path for the container images that
|
||||
the ones defined
|
||||
in [Download's role defaults](https://github.com/kubernetes-sigs/kubespray/blob/master/roles/download/defaults/main.yml)
|
||||
, you need to override the `*_image_repo` for these container images. If you want to make your life easier, use the
|
||||
same repository path, you won't have to override anything else.
|
||||
* `registry_addr`: Container image registry, but only have [domain or ip]:[port].
|
||||
* `files_repo`: HTTP webserver or reverse proxy that is able to serve the files listed above. Path is not important, you
|
||||
can store them anywhere as long as it's accessible by kubespray. It's recommended to use `*_version` in the path so
|
||||
that you don't need to modify this setting everytime kubespray upgrades one of these components.
|
||||
* `yum_repo`/`debian_repo`/`ubuntu_repo`: OS package repository depending of your OS, should point to your internal
|
||||
repository. Adjust the path accordingly.
|
||||
|
||||
## Install Kubespray Python Packages
|
||||
|
||||
### Recommended way: Kubespray Container Image
|
||||
|
||||
The easiest way is to use [kubespray container image](https://quay.io/kubespray/kubespray) as all the required packages are baked in the image.
|
||||
The easiest way is to use [kubespray container image](https://quay.io/kubespray/kubespray) as all the required packages
|
||||
are baked in the image.
|
||||
Just copy the container image in your private container image registry and you are all set!
|
||||
|
||||
### Manual installation
|
||||
|
||||
Look at the `requirements.txt` file and check if your OS provides all packages out-of-the-box (Using the OS package manager). For those missing, you need to either use a proxy that has Internet access (typically from a DMZ) or setup a PyPi server in your network that will host these packages.
|
||||
Look at the `requirements.txt` file and check if your OS provides all packages out-of-the-box (Using the OS package
|
||||
manager). For those missing, you need to either use a proxy that has Internet access (typically from a DMZ) or setup a
|
||||
PyPi server in your network that will host these packages.
|
||||
|
||||
If you're using a HTTP(S) proxy to download your python packages:
|
||||
|
||||
@ -119,13 +132,15 @@ pip install -i https://pypiserver/pypi package_you_miss
|
||||
|
||||
## Run Kubespray as usual
|
||||
|
||||
Once all artifacts are in place and your inventory properly set up, you can run kubespray with the regular `cluster.yaml` command:
|
||||
Once all artifacts are in place and your inventory properly set up, you can run kubespray with the
|
||||
regular `cluster.yaml` command:
|
||||
|
||||
```bash
|
||||
ansible-playbook -i inventory/my_airgap_cluster/hosts.yaml -b cluster.yml
|
||||
```
|
||||
|
||||
If you use [Kubespray Container Image](#recommended-way:-kubespray-container-image), you can mount your inventory inside the container:
|
||||
If you use [Kubespray Container Image](#recommended-way:-kubespray-container-image), you can mount your inventory inside
|
||||
the container:
|
||||
|
||||
```bash
|
||||
docker run --rm -it -v path_to_inventory/my_airgap_cluster:inventory/my_airgap_cluster myprivateregisry.com/kubespray/kubespray:v2.14.0 ansible-playbook -i inventory/my_airgap_cluster/hosts.yaml -b cluster.yml
|
||||
|
@ -5,7 +5,7 @@ ARG ARCH=amd64
|
||||
ARG TZ=Etc/UTC
|
||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
|
||||
ENV VAGRANT_VERSION=2.2.19
|
||||
ENV VAGRANT_VERSION=2.3.7
|
||||
ENV VAGRANT_DEFAULT_PROVIDER=libvirt
|
||||
ENV VAGRANT_ANSIBLE_TAGS=facts
|
||||
|
||||
@ -41,9 +41,9 @@ RUN KUBE_VERSION=$(sed -n 's/^kube_version: //p' roles/kubespray-defaults/defaul
|
||||
&& mv kubectl /usr/local/bin/kubectl
|
||||
|
||||
# Install Vagrant
|
||||
RUN wget https://releases.hashicorp.com/vagrant/${VAGRANT_VERSION}/vagrant_${VAGRANT_VERSION}_x86_64.deb && \
|
||||
dpkg -i vagrant_${VAGRANT_VERSION}_x86_64.deb && \
|
||||
rm vagrant_${VAGRANT_VERSION}_x86_64.deb && \
|
||||
RUN wget https://releases.hashicorp.com/vagrant/${VAGRANT_VERSION}/vagrant_${VAGRANT_VERSION}-1_amd64.deb && \
|
||||
dpkg -i vagrant_${VAGRANT_VERSION}-1_amd64.deb && \
|
||||
rm vagrant_${VAGRANT_VERSION}-1_amd64.deb && \
|
||||
vagrant plugin install vagrant-libvirt
|
||||
|
||||
# Install Kubernetes collections
|
||||
|
@ -73,3 +73,6 @@ containerd_limit_proc_num: "infinity"
|
||||
containerd_limit_core: "infinity"
|
||||
containerd_limit_open_file_num: "infinity"
|
||||
containerd_limit_mem_lock: "infinity"
|
||||
|
||||
# If enabled it will use config_path and disable use mirrors config
|
||||
containerd_use_config_path: false
|
||||
|
@ -111,28 +111,26 @@
|
||||
mode: 0640
|
||||
notify: restart containerd
|
||||
|
||||
- name: containerd | Create registry directories
|
||||
file:
|
||||
path: "{{ containerd_cfg_dir }}/certs.d/{{ item.key }}"
|
||||
state: directory
|
||||
mode: 0755
|
||||
recurse: true
|
||||
with_items: "{{ containerd_insecure_registries }}"
|
||||
when: containerd_insecure_registries is defined
|
||||
|
||||
- name: containerd | Write hosts.toml file
|
||||
blockinfile:
|
||||
path: "{{ containerd_cfg_dir }}/certs.d/{{ item.key }}/hosts.toml"
|
||||
owner: "root"
|
||||
mode: 0640
|
||||
create: true
|
||||
block: |
|
||||
server = "{{ item.value }}"
|
||||
[host."{{ item.value }}"]
|
||||
capabilities = ["pull", "resolve", "push"]
|
||||
skip_verify = true
|
||||
with_items: "{{ containerd_insecure_registries }}"
|
||||
when: containerd_insecure_registries is defined
|
||||
- block:
|
||||
- name: containerd | Create registry directories
|
||||
file:
|
||||
path: "{{ containerd_cfg_dir }}/certs.d/{{ item.key }}"
|
||||
state: directory
|
||||
mode: 0755
|
||||
recurse: true
|
||||
with_dict: "{{ containerd_insecure_registries }}"
|
||||
- name: containerd | Write hosts.toml file
|
||||
blockinfile:
|
||||
path: "{{ containerd_cfg_dir }}/certs.d/{{ item.key }}/hosts.toml"
|
||||
mode: 0640
|
||||
create: true
|
||||
block: |
|
||||
server = "{{ item.value }}"
|
||||
[host."{{ item.value }}"]
|
||||
capabilities = ["pull", "resolve", "push"]
|
||||
skip_verify = true
|
||||
with_dict: "{{ containerd_insecure_registries }}"
|
||||
when: containerd_use_config_path is defined and containerd_use_config_path|bool and containerd_insecure_registries is defined
|
||||
|
||||
# you can sometimes end up in a state where everything is installed
|
||||
# but containerd was not started / enabled
|
||||
|
@ -47,9 +47,9 @@ oom_score = {{ containerd_oom_score }}
|
||||
runtime_type = "io.containerd.runsc.v1"
|
||||
{% endif %}
|
||||
[plugins."io.containerd.grpc.v1.cri".registry]
|
||||
{% if containerd_insecure_registries is defined and containerd_insecure_registries|length>0 %}
|
||||
{% if containerd_use_config_path is defined and containerd_use_config_path|bool %}
|
||||
config_path = "{{ containerd_cfg_dir }}/certs.d"
|
||||
{% endif %}
|
||||
{% else %}
|
||||
[plugins."io.containerd.grpc.v1.cri".registry.mirrors]
|
||||
{% for registry, addr in containerd_registries.items() %}
|
||||
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."{{ registry }}"]
|
||||
@ -60,8 +60,9 @@ oom_score = {{ containerd_oom_score }}
|
||||
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."{{ registry }}"]
|
||||
endpoint = ["{{ ([ addr ] | flatten ) | join('","') }}"]
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% for addr in containerd_insecure_registries.values() | flatten | unique %}
|
||||
[plugins."io.containerd.grpc.v1.cri".registry.configs."{{ addr }}".tls]
|
||||
[plugins."io.containerd.grpc.v1.cri".registry.configs."{{ addr | urlsplit('netloc') }}".tls]
|
||||
insecure_skip_verify = true
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
@ -24,7 +24,7 @@ platforms:
|
||||
- kube_node
|
||||
- k8s_cluster
|
||||
- name: fedora
|
||||
box: fedora/36-cloud-base
|
||||
box: fedora/38-cloud-base
|
||||
cpus: 2
|
||||
memory: 2048
|
||||
groups:
|
||||
|
@ -114,7 +114,6 @@ flannel_version: "v0.20.2"
|
||||
flannel_cni_version: "v1.2.0"
|
||||
cni_version: "v1.2.0"
|
||||
weave_version: 2.8.1
|
||||
pod_infra_version: "3.8"
|
||||
|
||||
cilium_version: "v1.12.1"
|
||||
cilium_cli_version: "v0.12.5"
|
||||
@ -132,6 +131,12 @@ skopeo_version: v1.10.0
|
||||
# Get kubernetes major version (i.e. 1.17.4 => 1.17)
|
||||
kube_major_version: "{{ kube_version | regex_replace('^v([0-9])+\\.([0-9]+)\\.[0-9]+', 'v\\1.\\2') }}"
|
||||
|
||||
pod_infra_supported_version:
|
||||
v1.25: "3.8"
|
||||
v1.24: "3.7"
|
||||
v1.23: "3.6"
|
||||
pod_infra_version: "{{ pod_infra_supported_version[kube_major_version] }}"
|
||||
|
||||
etcd_supported_versions:
|
||||
v1.25: "v3.5.6"
|
||||
v1.24: "v3.5.6"
|
||||
@ -1108,7 +1113,7 @@ haproxy_image_tag: 2.6.6-alpine
|
||||
# Coredns version should be supported by corefile-migration (or at least work with)
|
||||
# bundle with kubeadm; if not 'basic' upgrade can sometimes fail
|
||||
|
||||
coredns_version: "v1.9.3"
|
||||
coredns_version: "{{ 'v1.9.3' if (kube_version is version('v1.25.0','>=')) else 'v1.8.6' }}"
|
||||
coredns_image_is_namespaced: "{{ (coredns_version is version('v1.7.1','>=')) }}"
|
||||
|
||||
coredns_image_repo: "{{ kube_image_repo }}{{'/coredns/coredns' if (coredns_image_is_namespaced | bool) else '/coredns' }}"
|
||||
|
@ -16,7 +16,7 @@ authorization:
|
||||
mode: AlwaysAllow
|
||||
{% endif %}
|
||||
{% if kubelet_enforce_node_allocatable is defined and kubelet_enforce_node_allocatable != "\"\"" %}
|
||||
{% set kubelet_enforce_node_allocatable_list = kubelet_enforce_node_allocatable.split() %}
|
||||
{% set kubelet_enforce_node_allocatable_list = kubelet_enforce_node_allocatable.split(",") %}
|
||||
enforceNodeAllocatable:
|
||||
{% for item in kubelet_enforce_node_allocatable_list %}
|
||||
- {{ item }}
|
||||
|
@ -7,6 +7,8 @@
|
||||
value: "{{ nameserverentries }}"
|
||||
mode: '0600'
|
||||
backup: yes
|
||||
when:
|
||||
- nameserverentries != "127.0.0.53" or systemd_resolved_enabled.rc != 0
|
||||
notify: Preinstall | update resolvconf for networkmanager
|
||||
|
||||
- name: set default dns if remove_default_searchdomains is false
|
||||
|
@ -358,9 +358,9 @@ containerd_use_systemd_cgroup: true
|
||||
## example define mirror.registry.io or 172.19.16.11:5000
|
||||
## Port number is also needed if the default HTTPS port is not used.
|
||||
# containerd_insecure_registries:
|
||||
# - mirror.registry.io
|
||||
# - 172.19.16.11:5000
|
||||
containerd_insecure_registries: []
|
||||
# "mirror.registry.io":"http://mirror.registry.io"
|
||||
# "172.19.16.11:5000":"http://172.19.16.11:5000"
|
||||
containerd_insecure_registries: {}
|
||||
|
||||
# Containerd conf default dir
|
||||
containerd_storage_dir: "/var/lib/containerd"
|
||||
|
@ -233,10 +233,20 @@
|
||||
}
|
||||
|
||||
- name: Calico | Process calico network pool
|
||||
set_fact:
|
||||
_calico_pool: "{{ _calico_pool_cmd.stdout | from_json | combine(_calico_pool, recursive=True) }}"
|
||||
when:
|
||||
- _calico_pool_cmd is success
|
||||
block:
|
||||
- name: Calico | Get current calico network pool blocksize
|
||||
set_fact:
|
||||
_calico_blocksize: >
|
||||
{
|
||||
"spec": {
|
||||
"blockSize": {{ (_calico_pool_cmd.stdout | from_json).spec.blockSize }}
|
||||
}
|
||||
}
|
||||
- name: Calico | Merge calico network pool
|
||||
set_fact:
|
||||
_calico_pool: "{{ _calico_pool_cmd.stdout | from_json | combine(_calico_pool, _calico_blocksize, recursive=True) }}"
|
||||
|
||||
- name: Calico | Configure calico network pool
|
||||
command:
|
||||
@ -272,10 +282,20 @@
|
||||
}
|
||||
|
||||
- name: Calico | Process calico ipv6 network pool
|
||||
set_fact:
|
||||
_calico_pool_ipv6: "{{ _calico_pool_ipv6_cmd.stdout | from_json | combine(_calico_pool_ipv6, recursive=True) }}"
|
||||
when:
|
||||
- _calico_pool_ipv6_cmd is success
|
||||
block:
|
||||
- name: Calico | Get current calico ipv6 network pool blocksize
|
||||
set_fact:
|
||||
_calico_blocksize_ipv6: >
|
||||
{
|
||||
"spec": {
|
||||
"blockSize": {{ (_calico_pool_ipv6_cmd.stdout | from_json).spec.blockSize }}
|
||||
}
|
||||
}
|
||||
- name: Calico | Merge calico ipv6 network pool
|
||||
set_fact:
|
||||
_calico_pool_ipv6: "{{ _calico_pool_ipv6_cmd.stdout | from_json | combine(_calico_pool_ipv6, _calico_blocksize_ipv6, recursive=True) }}"
|
||||
|
||||
- name: Calico | Configure calico ipv6 network pool
|
||||
command:
|
||||
|
@ -19,69 +19,48 @@ data:
|
||||
disable-server-tls: {% if cilium_hubble_tls_generate %}false{% else %}true{% endif %}
|
||||
disable-client-tls: {% if cilium_hubble_tls_generate %}false{% else %}true{% endif %}
|
||||
---
|
||||
# Source: cilium/templates/hubble-ui-configmap.yaml
|
||||
# Source: cilium/templates/hubble-ui/configmap.yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: hubble-ui-envoy
|
||||
name: hubble-ui-nginx
|
||||
namespace: kube-system
|
||||
data:
|
||||
envoy.yaml: |
|
||||
static_resources:
|
||||
listeners:
|
||||
- name: listener_hubble_ui
|
||||
address:
|
||||
socket_address:
|
||||
address: 0.0.0.0
|
||||
port_value: 8081
|
||||
filter_chains:
|
||||
- filters:
|
||||
- name: envoy.filters.network.http_connection_manager
|
||||
config:
|
||||
codec_type: auto
|
||||
stat_prefix: ingress_http
|
||||
route_config:
|
||||
name: local_route
|
||||
virtual_hosts:
|
||||
- name: local_service
|
||||
domains: ['*']
|
||||
routes:
|
||||
- match:
|
||||
prefix: '/api/'
|
||||
route:
|
||||
cluster: backend
|
||||
max_grpc_timeout: 0s
|
||||
prefix_rewrite: '/'
|
||||
- match:
|
||||
prefix: '/'
|
||||
route:
|
||||
cluster: frontend
|
||||
cors:
|
||||
allow_origin_string_match:
|
||||
- prefix: '*'
|
||||
allow_methods: GET, PUT, DELETE, POST, OPTIONS
|
||||
allow_headers: keep-alive,user-agent,cache-control,content-type,content-transfer-encoding,x-accept-content-transfer-encoding,x-accept-response-streaming,x-user-agent,x-grpc-web,grpc-timeout
|
||||
max_age: '1728000'
|
||||
expose_headers: grpc-status,grpc-message
|
||||
http_filters:
|
||||
- name: envoy.filters.http.grpc_web
|
||||
- name: envoy.filters.http.cors
|
||||
- name: envoy.filters.http.router
|
||||
clusters:
|
||||
- name: frontend
|
||||
connect_timeout: 0.25s
|
||||
type: strict_dns
|
||||
lb_policy: round_robin
|
||||
hosts:
|
||||
- socket_address:
|
||||
address: 127.0.0.1
|
||||
port_value: 8080
|
||||
- name: backend
|
||||
connect_timeout: 0.25s
|
||||
type: logical_dns
|
||||
lb_policy: round_robin
|
||||
http2_protocol_options: {}
|
||||
hosts:
|
||||
- socket_address:
|
||||
address: 127.0.0.1
|
||||
port_value: 8090
|
||||
nginx.conf: |
|
||||
server {
|
||||
listen 8081;
|
||||
{% if cilium_enable_ipv6 %}
|
||||
listen [::]:8081;
|
||||
{% endif %}
|
||||
server_name localhost;
|
||||
root /app;
|
||||
index index.html;
|
||||
client_max_body_size 1G;
|
||||
|
||||
location / {
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
|
||||
# CORS
|
||||
add_header Access-Control-Allow-Methods "GET, POST, PUT, HEAD, DELETE, OPTIONS";
|
||||
add_header Access-Control-Allow-Origin *;
|
||||
add_header Access-Control-Max-Age 1728000;
|
||||
add_header Access-Control-Expose-Headers content-length,grpc-status,grpc-message;
|
||||
add_header Access-Control-Allow-Headers range,keep-alive,user-agent,cache-control,content-type,content-transfer-encoding,x-accept-content-transfer-encoding,x-accept-response-streaming,x-user-agent,x-grpc-web,grpc-timeout;
|
||||
if ($request_method = OPTIONS) {
|
||||
return 204;
|
||||
}
|
||||
# /CORS
|
||||
|
||||
location /api {
|
||||
proxy_http_version 1.1;
|
||||
proxy_pass_request_headers on;
|
||||
proxy_hide_header Access-Control-Allow-Origin;
|
||||
proxy_pass http://127.0.0.1:8090;
|
||||
}
|
||||
|
||||
location / {
|
||||
try_files $uri $uri/ /index.html;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -30,12 +30,10 @@ spec:
|
||||
# the values used in past runs by inspecting the completed pod.
|
||||
args:
|
||||
- "--cilium-namespace=kube-system"
|
||||
- "--hubble-ca-reuse-secret=true"
|
||||
- "--hubble-ca-secret-name=hubble-ca-secret"
|
||||
- "--hubble-ca-generate=true"
|
||||
- "--hubble-ca-validity-duration=94608000s"
|
||||
- "--hubble-ca-config-map-create=true"
|
||||
- "--hubble-ca-config-map-name=hubble-ca-cert"
|
||||
- "--ca-reuse-secret=true"
|
||||
- "--ca-secret-name=hubble-ca-secret"
|
||||
- "--ca-generate=true"
|
||||
- "--ca-validity-duration=94608000s"
|
||||
- "--hubble-server-cert-generate=true"
|
||||
- "--hubble-server-cert-common-name=*.{{ cilium_cluster_name }}.hubble-grpc.cilium.io"
|
||||
- "--hubble-server-cert-validity-duration=94608000s"
|
||||
|
@ -90,7 +90,7 @@ spec:
|
||||
path: hubble-server-ca.crt
|
||||
name: tls
|
||||
---
|
||||
# Source: cilium/templates/hubble-ui-deployment.yaml
|
||||
# Source: cilium/templates/hubble-ui/deployment.yaml
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
@ -118,8 +118,14 @@ spec:
|
||||
image: "{{ cilium_hubble_ui_image_repo }}:{{ cilium_hubble_ui_image_tag }}"
|
||||
imagePullPolicy: {{ k8s_image_pull_policy }}
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
- containerPort: 8081
|
||||
name: http
|
||||
volumeMounts:
|
||||
- name: hubble-ui-nginx-conf
|
||||
mountPath: /etc/nginx/conf.d/default.conf
|
||||
subPath: nginx.conf
|
||||
- name: tmp-dir
|
||||
mountPath: /tmp
|
||||
resources:
|
||||
{}
|
||||
- name: backend
|
||||
@ -135,27 +141,10 @@ spec:
|
||||
name: grpc
|
||||
resources:
|
||||
{}
|
||||
- name: proxy
|
||||
image: "{{ cilium_hubble_envoy_image_repo }}:{{ cilium_hubble_envoy_image_tag }}"
|
||||
imagePullPolicy: {{ k8s_image_pull_policy }}
|
||||
ports:
|
||||
- containerPort: 8081
|
||||
name: http
|
||||
resources:
|
||||
{}
|
||||
command: ["envoy"]
|
||||
args:
|
||||
[
|
||||
"-c",
|
||||
"/etc/envoy.yaml",
|
||||
"-l",
|
||||
"info"
|
||||
]
|
||||
volumeMounts:
|
||||
- name: hubble-ui-envoy-yaml
|
||||
mountPath: /etc/envoy.yaml
|
||||
subPath: envoy.yaml
|
||||
volumes:
|
||||
- name: hubble-ui-envoy-yaml
|
||||
configMap:
|
||||
name: hubble-ui-envoy
|
||||
- configMap:
|
||||
defaultMode: 420
|
||||
name: hubble-ui-nginx
|
||||
name: hubble-ui-nginx-conf
|
||||
- emptyDir: {}
|
||||
name: tmp-dir
|
||||
|
@ -26,12 +26,10 @@ spec:
|
||||
# the values used in past runs by inspecting the completed pod.
|
||||
args:
|
||||
- "--cilium-namespace=kube-system"
|
||||
- "--hubble-ca-reuse-secret=true"
|
||||
- "--hubble-ca-secret-name=hubble-ca-secret"
|
||||
- "--hubble-ca-generate=true"
|
||||
- "--hubble-ca-validity-duration=94608000s"
|
||||
- "--hubble-ca-config-map-create=true"
|
||||
- "--hubble-ca-config-map-name=hubble-ca-cert"
|
||||
- "--ca-reuse-secret=true"
|
||||
- "--ca-secret-name=hubble-ca-secret"
|
||||
- "--ca-generate=true"
|
||||
- "--ca-validity-duration=94608000s"
|
||||
- "--hubble-server-cert-generate=true"
|
||||
- "--hubble-server-cert-common-name=*.{{ cilium_cluster_name }}.hubble-grpc.cilium.io"
|
||||
- "--hubble-server-cert-validity-duration=94608000s"
|
||||
|
@ -2,18 +2,18 @@
|
||||
# A naive premoderation script to allow Gitlab CI pipeline on a specific PRs' comment
|
||||
# Exits with 0, if the pipeline is good to go
|
||||
# Exits with 1, if the user is not allowed to start pipeline
|
||||
# Exits with 2, if script is unable to get issue id from CI_BUILD_REF_NAME variable
|
||||
# Exits with 2, if script is unable to get issue id from CI_COMMIT_REF_NAME variable
|
||||
# Exits with 3, if missing the magic comment in the pipeline to start the pipeline
|
||||
|
||||
CURL_ARGS="-fs --retry 4 --retry-delay 5"
|
||||
MAGIC="${MAGIC:-ci check this}"
|
||||
exit_code=0
|
||||
|
||||
# Get PR number from CI_BUILD_REF_NAME
|
||||
issue=$(echo ${CI_BUILD_REF_NAME} | perl -ne '/^pr-(\d+)-\S+$/ && print $1')
|
||||
# Get PR number from CI_COMMIT_REF_NAME
|
||||
issue=$(echo ${CI_COMMIT_REF_NAME} | perl -ne '/^pr-(\d+)-\S+$/ && print $1')
|
||||
|
||||
if [ "$issue" = "" ]; then
|
||||
echo "Unable to get issue id from: $CI_BUILD_REF_NAME"
|
||||
echo "Unable to get issue id from: $CI_COMMIT_REF_NAME"
|
||||
exit 2
|
||||
fi
|
||||
|
||||
|
@ -34,17 +34,17 @@ images:
|
||||
converted: false
|
||||
tag: "latest"
|
||||
|
||||
fedora-35:
|
||||
filename: Fedora-Cloud-Base-35-1.2.x86_64.qcow2
|
||||
url: https://download.fedoraproject.org/pub/fedora/linux/releases/35/Cloud/x86_64/images/Fedora-Cloud-Base-35-1.2.x86_64.qcow2
|
||||
checksum: sha256:fe84502779b3477284a8d4c86731f642ca10dd3984d2b5eccdf82630a9ca2de6
|
||||
fedora-37:
|
||||
filename: Fedora-Cloud-Base-37-1.7.x86_64.qcow2
|
||||
url: https://download.fedoraproject.org/pub/fedora/linux/releases/37/Cloud/x86_64/images/Fedora-Cloud-Base-37-1.7.x86_64.qcow2
|
||||
checksum: sha256:b5b9bec91eee65489a5745f6ee620573b23337cbb1eb4501ce200b157a01f3a0
|
||||
converted: true
|
||||
tag: "latest"
|
||||
|
||||
fedora-36:
|
||||
filename: Fedora-Cloud-Base-36-1.5.x86_64.qcow2
|
||||
url: https://download.fedoraproject.org/pub/fedora/linux/releases/36/Cloud/x86_64/images/Fedora-Cloud-Base-36-1.5.x86_64.qcow2
|
||||
checksum: sha256:ca9e514cc2f4a7a0188e7c68af60eb4e573d2e6850cc65b464697223f46b4605
|
||||
fedora-38:
|
||||
filename: Fedora-Cloud-Base-38-1.6.x86_64.qcow2
|
||||
url: https://download.fedoraproject.org/pub/fedora/linux/releases/38/Cloud/x86_64/images/Fedora-Cloud-Base-38-1.6.x86_64.qcow2
|
||||
checksum: sha256:d334670401ff3d5b4129fcc662cf64f5a6e568228af59076cc449a4945318482
|
||||
converted: true
|
||||
tag: "latest"
|
||||
|
||||
@ -57,8 +57,8 @@ images:
|
||||
|
||||
centos-7:
|
||||
filename: CentOS-7-x86_64-GenericCloud-2009.qcow2
|
||||
url: http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud-2009.qcow2
|
||||
checksum: sha256:e38bab0475cc6d004d2e17015969c659e5a308111851b0e2715e84646035bdd3
|
||||
url: http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud-2211.qcow2
|
||||
checksum: sha256:284aab2b23d91318f169ff464bce4d53404a15a0618ceb34562838c59af4adea
|
||||
converted: true
|
||||
tag: "latest"
|
||||
|
||||
|
@ -3,7 +3,7 @@
|
||||
ARG KUBESPRAY_VERSION
|
||||
FROM quay.io/kubespray/kubespray:${KUBESPRAY_VERSION}
|
||||
|
||||
ENV VAGRANT_VERSION=2.2.19
|
||||
ENV VAGRANT_VERSION=2.3.7
|
||||
ENV VAGRANT_DEFAULT_PROVIDER=libvirt
|
||||
ENV VAGRANT_ANSIBLE_TAGS=facts
|
||||
|
||||
|
@ -31,8 +31,8 @@ cloud_init:
|
||||
debian-9: "I2Nsb3VkLWNvbmZpZwogdXNlcnM6CiAgLSBuYW1lOiBrdWJlc3ByYXkKICAgIHN1ZG86IEFMTD0oQUxMKSBOT1BBU1NXRDpBTEwKICAgIHNoZWxsOiAvYmluL2Jhc2gKICAgIGxvY2tfcGFzc3dkOiBGYWxzZQogICAgaG9tZTogL2hvbWUva3ViZXNwcmF5CiAgICBzc2hfYXV0aG9yaXplZF9rZXlzOgogICAgICAtIHNzaC1yc2EgQUFBQUIzTnphQzF5YzJFQUFBQURBUUFCQUFBQkFRQ2FuVGkvZUt4MCt0SFlKQWVEaHErc0ZTMk9iVVAxL0k2OWY3aVYzVXRrS2xUMjBKZlcxZjZGZVh0LzA0VmYyN1dRcStOcXM2dkdCcUQ5UVhTWXVmK3QwL3M3RVBMalRlaTltZTFtcHFyK3VUZStLRHRUUDM5cGZEMy9lVkNhZUI3MjZHUDJGa2FEMEZ6cG1FYjY2TzNOcWh4T1E5Nkd4LzlYVHV3L0szbGxqNE9WRDZHcmpSM0I3YzRYdEVCc1pjWnBwTUovb0gxbUd5R1hkaDMxbVdRU3FBUk8vUDhVOEd3dDArSEdwVXdoL2hkeTN0K1NZb1RCMkd3VmIwem95Vnd0VnZmRFF6c204ZnEzYXY0S3ZlejhrWXVOREp2MDV4NGx2VVpnUjE1WkRSWHNBbmRoUXlxb1hkQ0xBZTArZWFLWHE5QmtXeEtGYjloUGUwQVVqamE1"
|
||||
debian-10: "I2Nsb3VkLWNvbmZpZwogdXNlcnM6CiAgLSBuYW1lOiBrdWJlc3ByYXkKICAgIHN1ZG86IEFMTD0oQUxMKSBOT1BBU1NXRDpBTEwKICAgIHNoZWxsOiAvYmluL2Jhc2gKICAgIGxvY2tfcGFzc3dkOiBGYWxzZQogICAgaG9tZTogL2hvbWUva3ViZXNwcmF5CiAgICBzc2hfYXV0aG9yaXplZF9rZXlzOgogICAgICAtIHNzaC1yc2EgQUFBQUIzTnphQzF5YzJFQUFBQURBUUFCQUFBQkFRQ2FuVGkvZUt4MCt0SFlKQWVEaHErc0ZTMk9iVVAxL0k2OWY3aVYzVXRrS2xUMjBKZlcxZjZGZVh0LzA0VmYyN1dRcStOcXM2dkdCcUQ5UVhTWXVmK3QwL3M3RVBMalRlaTltZTFtcHFyK3VUZStLRHRUUDM5cGZEMy9lVkNhZUI3MjZHUDJGa2FEMEZ6cG1FYjY2TzNOcWh4T1E5Nkd4LzlYVHV3L0szbGxqNE9WRDZHcmpSM0I3YzRYdEVCc1pjWnBwTUovb0gxbUd5R1hkaDMxbVdRU3FBUk8vUDhVOEd3dDArSEdwVXdoL2hkeTN0K1NZb1RCMkd3VmIwem95Vnd0VnZmRFF6c204ZnEzYXY0S3ZlejhrWXVOREp2MDV4NGx2VVpnUjE1WkRSWHNBbmRoUXlxb1hkQ0xBZTArZWFLWHE5QmtXeEtGYjloUGUwQVVqamE1"
|
||||
debian-11: "I2Nsb3VkLWNvbmZpZwogdXNlcnM6CiAgLSBuYW1lOiBrdWJlc3ByYXkKICAgIHN1ZG86IEFMTD0oQUxMKSBOT1BBU1NXRDpBTEwKICAgIHNoZWxsOiAvYmluL2Jhc2gKICAgIGxvY2tfcGFzc3dkOiBGYWxzZQogICAgaG9tZTogL2hvbWUva3ViZXNwcmF5CiAgICBzc2hfYXV0aG9yaXplZF9rZXlzOgogICAgICAtIHNzaC1yc2EgQUFBQUIzTnphQzF5YzJFQUFBQURBUUFCQUFBQkFRQ2FuVGkvZUt4MCt0SFlKQWVEaHErc0ZTMk9iVVAxL0k2OWY3aVYzVXRrS2xUMjBKZlcxZjZGZVh0LzA0VmYyN1dRcStOcXM2dkdCcUQ5UVhTWXVmK3QwL3M3RVBMalRlaTltZTFtcHFyK3VUZStLRHRUUDM5cGZEMy9lVkNhZUI3MjZHUDJGa2FEMEZ6cG1FYjY2TzNOcWh4T1E5Nkd4LzlYVHV3L0szbGxqNE9WRDZHcmpSM0I3YzRYdEVCc1pjWnBwTUovb0gxbUd5R1hkaDMxbVdRU3FBUk8vUDhVOEd3dDArSEdwVXdoL2hkeTN0K1NZb1RCMkd3VmIwem95Vnd0VnZmRFF6c204ZnEzYXY0S3ZlejhrWXVOREp2MDV4NGx2VVpnUjE1WkRSWHNBbmRoUXlxb1hkQ0xBZTArZWFLWHE5QmtXeEtGYjloUGUwQVVqamE1"
|
||||
fedora-35: "I2Nsb3VkLWNvbmZpZwpzeXN0ZW1faW5mbzoKICBkaXN0cm86IGZlZG9yYQp1c2VyczoKIC0gbmFtZToga3ViZXNwcmF5CiAgIGdyb3Vwczogd2hlZWwKICAgc3VkbzogJ0FMTD0oQUxMKSBOT1BBU1NXRDpBTEwnCiAgIHNoZWxsOiAvYmluL2Jhc2gKICAgbG9ja19wYXNzd2Q6IEZhbHNlCiAgIGhvbWU6IC9ob21lL2t1YmVzcHJheQogICBzc2hfYXV0aG9yaXplZF9rZXlzOgogICAgIC0gc3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFDYW5UaS9lS3gwK3RIWUpBZURocStzRlMyT2JVUDEvSTY5ZjdpVjNVdGtLbFQyMEpmVzFmNkZlWHQvMDRWZjI3V1FxK05xczZ2R0JxRDlRWFNZdWYrdDAvczdFUExqVGVpOW1lMW1wcXIrdVRlK0tEdFRQMzlwZkQzL2VWQ2FlQjcyNkdQMkZrYUQwRnpwbUViNjZPM05xaHhPUTk2R3gvOVhUdXcvSzNsbGo0T1ZENkdyalIzQjdjNFh0RUJzWmNacHBNSi9vSDFtR3lHWGRoMzFtV1FTcUFSTy9QOFU4R3d0MCtIR3BVd2gvaGR5M3QrU1lvVEIyR3dWYjB6b3lWd3RWdmZEUXpzbThmcTNhdjRLdmV6OGtZdU5ESnYwNXg0bHZVWmdSMTVaRFJYc0FuZGhReXFvWGRDTEFlMCtlYUtYcTlCa1d4S0ZiOWhQZTBBVWpqYTU="
|
||||
fedora-36: "I2Nsb3VkLWNvbmZpZwpzeXN0ZW1faW5mbzoKICBkaXN0cm86IGZlZG9yYQp1c2VyczoKIC0gbmFtZToga3ViZXNwcmF5CiAgIGdyb3Vwczogd2hlZWwKICAgc3VkbzogJ0FMTD0oQUxMKSBOT1BBU1NXRDpBTEwnCiAgIHNoZWxsOiAvYmluL2Jhc2gKICAgbG9ja19wYXNzd2Q6IEZhbHNlCiAgIGhvbWU6IC9ob21lL2t1YmVzcHJheQogICBzc2hfYXV0aG9yaXplZF9rZXlzOgogICAgIC0gc3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFDYW5UaS9lS3gwK3RIWUpBZURocStzRlMyT2JVUDEvSTY5ZjdpVjNVdGtLbFQyMEpmVzFmNkZlWHQvMDRWZjI3V1FxK05xczZ2R0JxRDlRWFNZdWYrdDAvczdFUExqVGVpOW1lMW1wcXIrdVRlK0tEdFRQMzlwZkQzL2VWQ2FlQjcyNkdQMkZrYUQwRnpwbUViNjZPM05xaHhPUTk2R3gvOVhUdXcvSzNsbGo0T1ZENkdyalIzQjdjNFh0RUJzWmNacHBNSi9vSDFtR3lHWGRoMzFtV1FTcUFSTy9QOFU4R3d0MCtIR3BVd2gvaGR5M3QrU1lvVEIyR3dWYjB6b3lWd3RWdmZEUXpzbThmcTNhdjRLdmV6OGtZdU5ESnYwNXg0bHZVWmdSMTVaRFJYc0FuZGhReXFvWGRDTEFlMCtlYUtYcTlCa1d4S0ZiOWhQZTBBVWpqYTU="
|
||||
fedora-37: "I2Nsb3VkLWNvbmZpZwpzeXN0ZW1faW5mbzoKICBkaXN0cm86IGZlZG9yYQp1c2VyczoKIC0gbmFtZToga3ViZXNwcmF5CiAgIGdyb3Vwczogd2hlZWwKICAgc3VkbzogJ0FMTD0oQUxMKSBOT1BBU1NXRDpBTEwnCiAgIHNoZWxsOiAvYmluL2Jhc2gKICAgbG9ja19wYXNzd2Q6IEZhbHNlCiAgIGhvbWU6IC9ob21lL2t1YmVzcHJheQogICBzc2hfYXV0aG9yaXplZF9rZXlzOgogICAgIC0gc3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFDYW5UaS9lS3gwK3RIWUpBZURocStzRlMyT2JVUDEvSTY5ZjdpVjNVdGtLbFQyMEpmVzFmNkZlWHQvMDRWZjI3V1FxK05xczZ2R0JxRDlRWFNZdWYrdDAvczdFUExqVGVpOW1lMW1wcXIrdVRlK0tEdFRQMzlwZkQzL2VWQ2FlQjcyNkdQMkZrYUQwRnpwbUViNjZPM05xaHhPUTk2R3gvOVhUdXcvSzNsbGo0T1ZENkdyalIzQjdjNFh0RUJzWmNacHBNSi9vSDFtR3lHWGRoMzFtV1FTcUFSTy9QOFU4R3d0MCtIR3BVd2gvaGR5M3QrU1lvVEIyR3dWYjB6b3lWd3RWdmZEUXpzbThmcTNhdjRLdmV6OGtZdU5ESnYwNXg0bHZVWmdSMTVaRFJYc0FuZGhReXFvWGRDTEFlMCtlYUtYcTlCa1d4S0ZiOWhQZTBBVWpqYTU="
|
||||
fedora-38: "I2Nsb3VkLWNvbmZpZwpzeXN0ZW1faW5mbzoKICBkaXN0cm86IGZlZG9yYQp1c2VyczoKIC0gbmFtZToga3ViZXNwcmF5CiAgIGdyb3Vwczogd2hlZWwKICAgc3VkbzogJ0FMTD0oQUxMKSBOT1BBU1NXRDpBTEwnCiAgIHNoZWxsOiAvYmluL2Jhc2gKICAgbG9ja19wYXNzd2Q6IEZhbHNlCiAgIGhvbWU6IC9ob21lL2t1YmVzcHJheQogICBzc2hfYXV0aG9yaXplZF9rZXlzOgogICAgIC0gc3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCQVFDYW5UaS9lS3gwK3RIWUpBZURocStzRlMyT2JVUDEvSTY5ZjdpVjNVdGtLbFQyMEpmVzFmNkZlWHQvMDRWZjI3V1FxK05xczZ2R0JxRDlRWFNZdWYrdDAvczdFUExqVGVpOW1lMW1wcXIrdVRlK0tEdFRQMzlwZkQzL2VWQ2FlQjcyNkdQMkZrYUQwRnpwbUViNjZPM05xaHhPUTk2R3gvOVhUdXcvSzNsbGo0T1ZENkdyalIzQjdjNFh0RUJzWmNacHBNSi9vSDFtR3lHWGRoMzFtV1FTcUFSTy9QOFU4R3d0MCtIR3BVd2gvaGR5M3QrU1lvVEIyR3dWYjB6b3lWd3RWdmZEUXpzbThmcTNhdjRLdmV6OGtZdU5ESnYwNXg0bHZVWmdSMTVaRFJYc0FuZGhReXFvWGRDTEFlMCtlYUtYcTlCa1d4S0ZiOWhQZTBBVWpqYTU="
|
||||
opensuse-leap-15: "I2Nsb3VkLWNvbmZpZwogdXNlcnM6CiAgLSBuYW1lOiBrdWJlc3ByYXkKICAgIHN1ZG86IEFMTD0oQUxMKSBOT1BBU1NXRDpBTEwKICAgIHNoZWxsOiAvYmluL2Jhc2gKICAgIGxvY2tfcGFzc3dkOiBGYWxzZQogICAgaG9tZTogL2hvbWUva3ViZXNwcmF5CiAgICBzc2hfYXV0aG9yaXplZF9rZXlzOgogICAgICAtIHNzaC1yc2EgQUFBQUIzTnphQzF5YzJFQUFBQURBUUFCQUFBQkFRQ2FuVGkvZUt4MCt0SFlKQWVEaHErc0ZTMk9iVVAxL0k2OWY3aVYzVXRrS2xUMjBKZlcxZjZGZVh0LzA0VmYyN1dRcStOcXM2dkdCcUQ5UVhTWXVmK3QwL3M3RVBMalRlaTltZTFtcHFyK3VUZStLRHRUUDM5cGZEMy9lVkNhZUI3MjZHUDJGa2FEMEZ6cG1FYjY2TzNOcWh4T1E5Nkd4LzlYVHV3L0szbGxqNE9WRDZHcmpSM0I3YzRYdEVCc1pjWnBwTUovb0gxbUd5R1hkaDMxbVdRU3FBUk8vUDhVOEd3dDArSEdwVXdoL2hkeTN0K1NZb1RCMkd3VmIwem95Vnd0VnZmRFF6c204ZnEzYXY0S3ZlejhrWXVOREp2MDV4NGx2VVpnUjE1WkRSWHNBbmRoUXlxb1hkQ0xBZTArZWFLWHE5QmtXeEtGYjloUGUwQVVqamE1"
|
||||
rhel-server-7: "I2Nsb3VkLWNvbmZpZwpzeXN0ZW1faW5mbzoKICBkaXN0cm86IHJoZWwKdXNlcnM6CiAtIG5hbWU6IGt1YmVzcHJheQogICBncm91cHM6IHdoZWVsCiAgIHN1ZG86ICdBTEw9KEFMTCkgTk9QQVNTV0Q6QUxMJwogICBzaGVsbDogL2Jpbi9iYXNoCiAgIGxvY2tfcGFzc3dkOiBGYWxzZQogICBob21lOiAvaG9tZS9rdWJlc3ByYXkKICAgc3NoX2F1dGhvcml6ZWRfa2V5czoKICAgICAtIHNzaC1yc2EgQUFBQUIzTnphQzF5YzJFQUFBQURBUUFCQUFBQkFRQ2FuVGkvZUt4MCt0SFlKQWVEaHErc0ZTMk9iVVAxL0k2OWY3aVYzVXRrS2xUMjBKZlcxZjZGZVh0LzA0VmYyN1dRcStOcXM2dkdCcUQ5UVhTWXVmK3QwL3M3RVBMalRlaTltZTFtcHFyK3VUZStLRHRUUDM5cGZEMy9lVkNhZUI3MjZHUDJGa2FEMEZ6cG1FYjY2TzNOcWh4T1E5Nkd4LzlYVHV3L0szbGxqNE9WRDZHcmpSM0I3YzRYdEVCc1pjWnBwTUovb0gxbUd5R1hkaDMxbVdRU3FBUk8vUDhVOEd3dDArSEdwVXdoL2hkeTN0K1NZb1RCMkd3VmIwem95Vnd0VnZmRFF6c204ZnEzYXY0S3ZlejhrWXVOREp2MDV4NGx2VVpnUjE1WkRSWHNBbmRoUXlxb1hkQ0xBZTArZWFLWHE5QmtXeEtGYjloUGUwQVVqamE1Cgo="
|
||||
amazon-linux-2: "I2Nsb3VkLWNvbmZpZwpzeXN0ZW1faW5mbzoKICBkaXN0cm86IHJoZWwKdXNlcnM6CiAtIG5hbWU6IGt1YmVzcHJheQogICBncm91cHM6IHdoZWVsCiAgIHN1ZG86ICdBTEw9KEFMTCkgTk9QQVNTV0Q6QUxMJwogICBzaGVsbDogL2Jpbi9iYXNoCiAgIGxvY2tfcGFzc3dkOiBGYWxzZQogICBob21lOiAvaG9tZS9rdWJlc3ByYXkKICAgc3NoX2F1dGhvcml6ZWRfa2V5czoKICAgICAtIHNzaC1yc2EgQUFBQUIzTnphQzF5YzJFQUFBQURBUUFCQUFBQkFRQ2FuVGkvZUt4MCt0SFlKQWVEaHErc0ZTMk9iVVAxL0k2OWY3aVYzVXRrS2xUMjBKZlcxZjZGZVh0LzA0VmYyN1dRcStOcXM2dkdCcUQ5UVhTWXVmK3QwL3M3RVBMalRlaTltZTFtcHFyK3VUZStLRHRUUDM5cGZEMy9lVkNhZUI3MjZHUDJGa2FEMEZ6cG1FYjY2TzNOcWh4T1E5Nkd4LzlYVHV3L0szbGxqNE9WRDZHcmpSM0I3YzRYdEVCc1pjWnBwTUovb0gxbUd5R1hkaDMxbVdRU3FBUk8vUDhVOEd3dDArSEdwVXdoL2hkeTN0K1NZb1RCMkd3VmIwem95Vnd0VnZmRFF6c204ZnEzYXY0S3ZlejhrWXVOREp2MDV4NGx2VVpnUjE1WkRSWHNBbmRoUXlxb1hkQ0xBZTArZWFLWHE5QmtXeEtGYjloUGUwQVVqamE1Cgo="
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
# Instance settings
|
||||
cloud_image: fedora-35
|
||||
cloud_image: fedora-37
|
||||
mode: default
|
||||
|
||||
# Kubespray settings
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
# Instance settings
|
||||
cloud_image: fedora-35
|
||||
cloud_image: fedora-37
|
||||
mode: default
|
||||
|
||||
# Kubespray settings
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
# Instance settings
|
||||
cloud_image: fedora-35
|
||||
cloud_image: fedora-37
|
||||
mode: default
|
||||
|
||||
# Kubespray settings
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
# Instance settings
|
||||
cloud_image: fedora-36
|
||||
cloud_image: fedora-38
|
||||
mode: default
|
||||
|
||||
# Kubespray settings
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
# Instance settings
|
||||
cloud_image: fedora-36
|
||||
cloud_image: fedora-38
|
||||
mode: default
|
||||
|
||||
# Kubespray settings
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
# Instance settings
|
||||
cloud_image: fedora-36
|
||||
cloud_image: fedora-38
|
||||
mode: default
|
||||
|
||||
# Kubespray settings
|
@ -1,6 +1,6 @@
|
||||
$num_instances = 2
|
||||
$vm_memory ||= 2048
|
||||
$os = "fedora35"
|
||||
$os = "fedora37"
|
||||
|
||||
$kube_master_instances = 1
|
||||
$etcd_instances = 1
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
# Instance settings
|
||||
cloud_image: fedora-35
|
||||
cloud_image: fedora-37
|
||||
mode: default
|
||||
|
||||
# Kubespray settings
|
@ -1,4 +1,5 @@
|
||||
-r ../requirements-2.11.txt
|
||||
pyyaml==5.3.1
|
||||
yamllint==1.19.0
|
||||
apache-libcloud==2.2.1
|
||||
tox==3.11.1
|
||||
|
@ -1,4 +1,5 @@
|
||||
-r ../requirements-2.12.txt
|
||||
pyyaml==5.3.1
|
||||
yamllint==1.19.0
|
||||
apache-libcloud==2.2.1
|
||||
tox==3.11.1
|
||||
|
@ -57,9 +57,9 @@ fi
|
||||
# Check out latest tag if testing upgrade
|
||||
test "${UPGRADE_TEST}" != "false" && git fetch --all && git checkout "$KUBESPRAY_VERSION"
|
||||
# Checkout the CI vars file so it is available
|
||||
test "${UPGRADE_TEST}" != "false" && git checkout "${CI_BUILD_REF}" tests/files/${CI_JOB_NAME}.yml
|
||||
test "${UPGRADE_TEST}" != "false" && git checkout "${CI_BUILD_REF}" ${CI_TEST_REGISTRY_MIRROR}
|
||||
test "${UPGRADE_TEST}" != "false" && git checkout "${CI_BUILD_REF}" ${CI_TEST_SETTING}
|
||||
test "${UPGRADE_TEST}" != "false" && git checkout "${CI_COMMIT_SHA}" tests/files/${CI_JOB_NAME}.yml
|
||||
test "${UPGRADE_TEST}" != "false" && git checkout "${CI_COMMIT_SHA}" ${CI_TEST_REGISTRY_MIRROR}
|
||||
test "${UPGRADE_TEST}" != "false" && git checkout "${CI_COMMIT_SHA}" ${CI_TEST_SETTING}
|
||||
|
||||
# Create cluster
|
||||
ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_SETTING} -e @${CI_TEST_REGISTRY_MIRROR} -e @${CI_TEST_VARS} -e local_release_dir=${PWD}/downloads --limit "all:!fake_hosts" cluster.yml
|
||||
@ -68,7 +68,7 @@ ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_SETTING} -e @${CI_TEST_REGIS
|
||||
if [ "${UPGRADE_TEST}" != "false" ]; then
|
||||
test "${UPGRADE_TEST}" == "basic" && PLAYBOOK="cluster.yml"
|
||||
test "${UPGRADE_TEST}" == "graceful" && PLAYBOOK="upgrade-cluster.yml"
|
||||
git checkout "${CI_BUILD_REF}"
|
||||
git checkout "${CI_COMMIT_SHA}"
|
||||
ansible-playbook ${ANSIBLE_LOG_LEVEL} -e @${CI_TEST_SETTING} -e @${CI_TEST_REGISTRY_MIRROR} -e @${CI_TEST_VARS} -e local_release_dir=${PWD}/downloads --limit "all:!fake_hosts" $PLAYBOOK
|
||||
fi
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
#!/bin/bash
|
||||
set -euxo pipefail
|
||||
|
||||
curl -sL "https://releases.hashicorp.com/vagrant/${VAGRANT_VERSION}/vagrant_${VAGRANT_VERSION}_x86_64.deb" -o "/tmp/vagrant_${VAGRANT_VERSION}_x86_64.deb"
|
||||
dpkg -i "/tmp/vagrant_${VAGRANT_VERSION}_x86_64.deb"
|
||||
curl -sL "https://releases.hashicorp.com/vagrant/${VAGRANT_VERSION}/vagrant_${VAGRANT_VERSION}-1_amd64.deb" -o "/tmp/vagrant_${VAGRANT_VERSION}-1_amd64.deb"
|
||||
dpkg -i "/tmp/vagrant_${VAGRANT_VERSION}-1_amd64.deb"
|
||||
vagrant validate --ignore-provider
|
||||
|
Reference in New Issue
Block a user