Merge branch 'feat-additions-and-idempotent' into 'development'

feat: some additions and ensure idempotent

See merge request nofusscomputing/projects/ansible/kubernetes!35
This commit is contained in:
2024-03-13 08:50:45 +00:00
29 changed files with 8025 additions and 1006 deletions

24
CONTRIBUTING.md Normal file
View File

@ -0,0 +1,24 @@
# Contribution Guide
## Updating components with a remote source
Some components within this role are sourced from a remote source. To update them to the latest release use the following commands.
> Ensure that before committing the update remote files to the repository, that no features have been removed that were added.
### Kubevirt
``` bash
export KUBEVIRT_RELEASE='<kubevirt release i.e. v1.2.0>'
# From within the templates directory
wget https://github.com/kubevirt/kubevirt/releases/download/${KUBEVIRT_RELEASE}/kubevirt-operator.yaml -O kubevirt-operator.yaml.j2
# From within the templates directory
wget https://github.com/kubevirt/kubevirt/releases/download/${KUBEVIRT_RELEASE}/kubevirt-cr.yaml -O kubevirt-cr.yaml.j2
```

View File

@ -6,10 +6,10 @@
calico_image_tag: v3.25.0 # Depreciated
# EoF Depreciated
# SoF New Variables
nfc_kubernetes_calico_version: v3.27.0
nfc_kubernetes_tigera_operator_registry: quay.io
nfc_kubernetes_tigera_operator_image: tigera/operator
nfc_kubernetes_tigera_operator_tag: v1.32.3 # Calico v3.27.0
nfc_role_kubernetes_calico_version: v3.27.0
# nfc_kubernetes_tigera_operator_registry: quay.io
# nfc_kubernetes_tigera_operator_image: tigera/operator
# nfc_kubernetes_tigera_operator_tag: v1.32.3 # Calico v3.27.0
# EoF New Variables, EEoF Depreciated
@ -17,26 +17,54 @@ nfc_kubernetes_enable_metallb: false
nfc_kubernetes_enable_servicelb: false
nfc_role_kubernetes_container_images:
kubevirt_operator:
name: Kubevirt Operator
registry: quay.io
image: kubevirt/virt-operator
tag: v1.2.0
tigera_operator:
name: Tigera Operator
registry: quay.io
image: tigera/operator
tag: v1.32.3 # Calico v3.27.0
nfc_role_kubernetes_cluster_domain: cluster.local
nfc_role_kubernetes_etcd_enabled: false
nfc_role_kubernetes_install_olm: false
nfc_role_kubernetes_install_helm: true
nfc_role_kubernetes_install_kubevirt: false
nfc_role_kubernetes_kubevirt_operator_replicas: 1
nfc_role_kubernetes_oidc_enabled: false
nfc_role_kubernetes_pod_subnet: 172.16.248.0/21
nfc_role_kubernetes_service_subnet: 172.16.244.0/22
nfc_role_kubernetes_prime: true
nfc_role_kubernetes_master: true
nfc_role_kubernetes_worker: false
############################################################################################################
#
# Old Vars requiring refactoring
#
# ############################################################################################################
# KubernetesPodSubnet: 10.85.0.0/16
# KubernetesServiceSubnet: 10.86.0.0/16
Kubernetes_Prime: false # Optional, Boolean. Is the current host the Prime master?
Kubernetes_Master: false # Optional, Boolean. Is the current host a master host?
ContainerDioVersion: 1.6.20-1
KubernetesVersion: '1.26.2' # must match the repository release version
kubernetes_version_olm: '0.26.0'
KubernetesVersion: '1.26.12' # must match the repository release version
kubernetes_version_olm: '0.27.0'
KubernetesVersion_k8s_prefix: '-00'
KubernetesVersion_k3s_prefix: '+k3s1'
kubernetes_private_container_registry: [] # Optional, Array. if none use `[]`
@ -46,16 +74,12 @@ kubernetes_etcd_snapshot_retention: 5
# host_external_ip: '' # Optional, String. External IP Address for host.
kubernetes_type: k8s # Mandatory, String. choice K8s | k3s
kube_apiserver_arg_audit_log_maxage: 2
kubelet_arg_system_reserved_cpu: 450m
kubelet_arg_system_reserved_memory: 512Mi
kubelet_arg_system_reserved_storage: 8Gi
# kubernetes_olm_install: true # optional, boolean. default=true
nfc_kubernetes:
enable_firewall: true # Optional, bool enable firewall rules from role 'nfc_firewall'
@ -76,7 +100,7 @@ k3s:
kind: Policy
rules:
- level: Request
when: "{{ Kubernetes_Master | default(false) }}"
when: "{{ nfc_role_kubernetes_master }}"
- name: 90-kubelet.conf
path: /etc/sysctl.d
@ -108,7 +132,7 @@ k3s:
# usernames: []
# runtimeClasses: []
# namespaces: [kube-system]
when: "{{ kubernetes_config.cluster.prime.name == inventory_hostname }}"
when: "{{ kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname }}"
#############################################################################################
@ -123,7 +147,7 @@ k3s:
# - 'my.dnshostname.com'
# - '2001:4860:4860::8888'
# - '192.168.1.1'
# Name: earth # Mandatory, String. Cluster Name
# domain_name: earth # Mandatory, String. Cluster Domain Name
# group_name: # Mandatory, String. name of the ansible inventory group containg all cluster hosts
# prime:
# name: k3s-prod # Mandatory, String. Ansible inventory_host that will
@ -132,10 +156,21 @@ k3s:
# encrypt: true # Optional, Boolean. default `false`. Install wireguard for inter-node encryption
# podSubnet: 172.16.70.0/24 # Mandatory, String. CIDR
# ServiceSubnet: 172.16.72.0/24 # Mandatory, String. CIDR
# # Mandatory, String. Token to join nodes to the cluster
# node_token: !vault |
# $ANSIBLE_VAULT;1.2;AES256;kubernetes/cluster/production
# {rest_of encrypted key}
#
#
# helm:
# enabled: true # Optional, Boolean. default=false. Install Helm Binary
#
#
# kube_virt:
# enabled: false # Optional, Boolean. default=false. Install KubeVirt
#
# nodes: [] # Optional, List of String. default=inventory_hostname. List of nodes to install kibevirt on.
#
# operator:
# replicas: 2 # Optional, Integer. How many virt_operators to deploy.
#
#
# oidc: # Used to configure Kubernetes with OIDC Authentication.
# enabled: true # Mandatory, boolen. speaks for itself.
# issuer_url: https://domainname.com/realms/realm-name # Mandatory, String. URL of OIDC Provider
@ -144,14 +179,14 @@ k3s:
# username_prefix: oidc # Optional, String. What to prefix to username
# groups_claim: roles # Mandatory, String. Claim name containing groups
# groups_prefix: '' # Optional, String. string to append to groups
# hosts:
# my-host-name:
# labels:
# mylabel: myvalue
# taints:
# - effect: NoSchedule
# key: taintkey
# value: taintvalue
#
# hosts:
#
# my-host-name:
# labels:
# mylabel: myvalue
#
# taints:
# - effect: NoSchedule
# key: taintkey
# value: taintvalue

View File

@ -25,7 +25,7 @@ There are many ways to layout your inventory within Ansible. To take full advant
!!! info Info
The nfc_kubernetes role uses this field for any configuration that requires a hostname. You are strongly encouraged to use DNS name and the DNS name be resolveable for each host accessing to the host in question. Using DNS host name is of paramount importance for a host with dynamic DHCP being used.
- variable `Kubernetes_Master` _boolean_ set for all host that are master nodes.
- variable `nfc_role_kubernetes_master` _boolean_ set for all host that are master nodes.
- hosts that require Kubernetes API access added to variable `kubernetes_config.cluster.access`

View File

@ -6,20 +6,37 @@ template: project.html
about: https://gitlab.com/nofusscomputing/projects/ansible/roles/kubernetes
---
This Ansible role is designed to deploy a K3s Kubernetes cluster. After adding your configuration, the cluster will deploy and have a configured CNI (calico) and be in a state ready to use. This role can be used with our [our playbooks](../../playbooks/index.md) or comes included, along with the playbook within our [Ansible Execution Environment](../../execution_environment/index.md).
This Ansible role is designed to deploy a K3s Kubernetes cluster. Without adding cluster configuration this role will install K3s as a single node cluster. To deploy a multi-node cluster add your configuration, K3s will be installed on all nodes. On completion you will have fully configured cluster in a state ready to use. This role can be used with our [our playbooks](../../playbooks/index.md) or comes included, along with the playbook within our [Ansible Execution Environment](../../execution_environment/index.md).
## Role Details
| Item| Value | Description |
|:---|:---:|:---|
| Dependent Roles | _None_ | |
| Optional Roles | _nfc_firewall_ | Used to setup the firewall for kubernetes. |
| Idempotent | _Yes_ | |
| Stats Available | _Not Yet_ | |
| Tags | _Nil_ | |
| Requirements | _Gather Facts_ | |
| | _become_ | |
## Features
- CNI Setup
- CNI Setup, calico including `calicoctl` plugin
> `kubectl calico ....` instead of `calicoctl ....`
- Configurable:
- Container Registries
- etcd snapshot cron schedule
- ectd deployment
- etcd snapshot cron schedule
- etcd snapshot retention
- etcd snapshot retention
- Cluster Domain
@ -47,6 +64,12 @@ This Ansible role is designed to deploy a K3s Kubernetes cluster. After adding y
- Install MetalLB
- Install KubeVirt including `virtctl` plugin
> `kubectl virt ....` instead of `virtctl ....`
- Install the Helm Binary
## Role Workflow
@ -66,6 +89,8 @@ For a more probable than not success this role first installs/configures prime m
1. Configure Kubernetes
1. Install Kubevirt
If the playbook is setup as per [our recommendation](ansible.md) step 2 onwards is first done on master nodes then worker nodes.
!!! tip
@ -79,6 +104,7 @@ If the playbook is setup as per [our recommendation](ansible.md) step 2 onwards
```
_See default variables below for explanation of each variable if it's not evident enough._
## Default Variables

View File

@ -11,6 +11,12 @@ This document details any changes that have occured that may impact users of thi
## Changes with an impact
- _**13 Mar 2024**_ Container Images now a dictionary. This role has two images `kubevirt_operator` and `tigera_operator`.
- All Images are stored in dictionary `nfc_role_kubernetes_container_images` with each image using its own dictionary with mandatory keys `registry`, `image` and `tag`. This change has been made to cater for those whom store their images within their inventory as a dict of dict. For instance to use your inventory image declare variable `nfc_role_kubernetes_container_images.kubevirt_operator: my_images.my_kubevirt_dict` as an example.
- A lot of variables have been updated. To view what has changed, please see `defaults/main.yaml` in [MR !35](https://gitlab.com/nofusscomputing/projects/ansible/kubernetes/-/merge_requests/35)
- _**31 Jan 2024**_ Calico CNI deployment has been migrated to use the calico operator.
- All new cluster installations will be deployed with the operator

View File

@ -1,303 +0,0 @@
---
- name: "{{ role_name }} Install Software"
include_role:
name: nfc_common
vars:
common_gather_facts: false
aptSigningKeys:
- name: docker
url: https://download.docker.com/linux/debian/gpg
save_directory: /usr/share/keyrings
file_extension: asc
- name: kubernetes
url: https://packages.cloud.google.com/apt/doc/apt-key.gpg
save_directory: /usr/share/keyrings
file_extension: asc
aptRepositories:
- name: docker
repo: deb [arch={{ dynamic_processor_architecture }} signed-by=/usr/share/keyrings/docker.asc] http://download.docker.com/linux/{{ ansible_os_family | lower }} {{ ansible_lsb.codename | lower }} stable
- name: kubernetes
repo: deb [signed-by=/usr/share/keyrings/kubernetes.asc] http://apt.kubernetes.io/ kubernetes-xenial main
aptInstall:
- name: gnupg2
- name: apt-transport-https
- name: software-properties-common
- name: ca-certificates
- name: iptables
- name: python3-pip
- name: python3-virtualenv
- name: containerd.io
version: "{{ ContainerDioVersion }}"
- name: kubectl
version: "{{ KubernetesVersion }}"
- name: kubelet
version: "{{ KubernetesVersion }}"
- name: kubeadm
version: "{{ KubernetesVersion }}"
tags:
- install
# containerd.io=1.6.22-1 kubectl=1.26.9-00 kubelet=1.26.9-00 kubeadm=1.26.9-00
- name: Remove swapfile from /etc/fstab
mount:
name: "{{ item }}"
fstype: swap
state: absent
with_items:
- swap
- none
when:
- ansible_os_family == 'Debian' # ansible_lsb.codename = bullseye, ansible_lsb.major_release = 11
tags:
- install
- name: Disable swap
command: swapoff -a
changed_when: true == false
when:
#- ansible_swaptotal_mb > 0
- ansible_os_family == 'Debian'
tags:
- install
- name: Check an armbian os system
stat:
path: /etc/default/armbian-zram-config
register: armbian_stat_result
- name: Armbian Disable Swap
ansible.builtin.shell:
cmd: |
sed -i 's/\# SWAP=false/SWAP=false/g' /etc/default/armbian-zram-config;
sed -i 's/ENABLED=true/ENABLED=false/g' /etc/default/armbian-zram-config;
args:
executable: bash
changed_when: false
# failed_when: false
#notify: RebootHost # doesnt need to reboot as swapoff -a covers the deployment
when: armbian_stat_result.stat.exists
- name: Add the overlay module
community.general.modprobe:
name: overlay
state: present
when:
- ansible_os_family == 'Debian'
tags:
- install
- name: Add the br_netfilter module
community.general.modprobe:
name: br_netfilter
state: present
when:
- ansible_os_family == 'Debian'
tags:
- install
- name: check if containerd installed
ansible.builtin.shell:
cmd: which containerd
failed_when: false
changed_when: false
register: containerd_installed
- name: "Containerd.io Started?"
service:
name: containerd
state: started
tags:
- configure
- install
when: >
ansible_os_family == 'Debian'
and
containerd_installed.rc | default(1) | int == 0
- name: containerd load modules config
template:
src: "etc_module_containerd.conf"
dest: /etc/modules-load.d/containerd.conf
owner: root
mode: 0700
notify: "restart ContainerD"
when:
- ansible_os_family == 'Debian'
tags:
- install
- name: Create containerD host directories.
become_method: sudo
become: yes
file:
path: /etc/containerd/certs.d/{{ item.name }}
state: directory
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
mode: 0700
with_items: "{{ containerd.repositories }}"
tags:
- install
- containerRegistry
- name: containerD registry host
template:
src: "containerd-registry-hosts.toml.j2"
dest: /etc/containerd/certs.d/{{ item.name }}/hosts.toml
owner: root
mode: 0700
notify: "restart ContainerD"
with_items: "{{ containerd.repositories }}"
when:
- ansible_os_family == 'Debian'
tags:
- install
- containerRegistry
- name: containerD default config
template:
src: "etc_containerd_containerd.toml"
dest: /etc/containerd/config.toml
owner: root
mode: 0700
notify: "restart ContainerD"
register: containerd_config
when:
- ansible_os_family == 'Debian'
tags:
- install
- containerRegistry
- name: Install required python modules
ansible.builtin.pip:
name: kubernetes
state: forcereinstall
#virtualenv: /tmp/venv_ansible
when: inventory_hostname != 'op1'
tags:
- install
- name: sysctl net.bridge.bridge-nf-call-ip6tables
sysctl:
name: net.bridge.bridge-nf-call-ip6tables
value: '1'
sysctl_set: yes
state: present
reload: yes
when:
- ansible_os_family == 'Debian'
tags:
- install
- name: sysctl net.bridge.bridge-nf-call-iptables
sysctl:
name: net.bridge.bridge-nf-call-iptables
value: '1'
sysctl_set: yes
state: present
reload: yes
when:
- ansible_os_family == 'Debian'
tags:
- install
- name: sysctl net.ipv4.ip_forward
sysctl:
name: net.ipv4.ip_forward
value: '1'
sysctl_set: yes
state: present
reload: yes
when:
- ansible_os_family == 'Debian'
tags:
- install
# - name: Check if kubernetes has been Initialized
# stat:
# path: /etc/kubernetes/admin.conf
# register: KubernetesInit
# when:
# - kubernetes_config.cluster.prime.name == inventory_hostname
- name: check if iptables is installed
ansible.builtin.shell: |-
dpkg -s iptables &> /dev/null
changed_when: true == false
register: iptables_installed
when:
- ansible_os_family == 'Debian'
tags:
- install
- iptables
- firewall
- name: Add kubernetes Firewall Rules - '/etc/iptables-kubernetes.rules'
template:
src: iptables-kubernetes.rules.j2
dest: "/etc/iptables-kubernetes.rules"
owner: root
mode: 0700
force: yes
notify: "Apply Firewall Rules"
when:
- ansible_os_family == 'Debian'
- iptables_installed.rc == 0
tags:
- install
- iptables
- firewall
- name: File - '/etc/network/if-pre-up.d/firewall-kubernetes'
template:
src: firewall-kubernetes.j2
dest: "/etc/network/if-pre-up.d/firewall-kubernetes"
owner: root
mode: 0700
force: yes
when:
- ansible_os_family == 'Debian'
- iptables_installed.rc == 0
tags:
- install
- iptables
- firewall
- name: Create local workdir
file:
path: "{{ item }}"
state: directory
mode: 0700
delegate_to: localhost
connection: local
run_once: true
changed_when: true == false
with_items:
- /tmp/ansible/
tags:
- always

27
tasks/helm/main.yaml Normal file
View File

@ -0,0 +1,27 @@
---
- name: Fetch Helm APT Key
ansible.builtin.get_url:
url: https://baltocdn.com/helm/signing.asc
dest: /usr/share/keyrings/helm.asc
mode: 740
- name: Add Helm Repository
ansible.builtin.apt_repository:
repo: >-
deb [arch={%- if ansible_architecture == 'aarch64' -%}
arm64
{%- else -%}
amd64
{%- endif %} signed-by=/usr/share/keyrings/helm.asc] http://baltocdn.com/helm/stable/{{
ansible_os_family | lower }}/ all main
state: present
filename: helm
- name: Install Helm
ansible.builtin.apt:
package:
- helm
state: present

View File

@ -1,29 +0,0 @@
---
# kubernetes_installed
- name: K3s Install
ansible.builtin.include_tasks:
file: k3s/install.yaml
apply:
tags:
- always
when: >
install_kubernetes | default(true) | bool
and
not kubernetes_installed | default(false) | bool
tags:
- always
- name: K3s Configure
ansible.builtin.include_tasks:
file: k3s/configure.yaml
apply:
tags:
- always
when: >
install_kubernetes | default(true) | bool
and
kubernetes_installed | default(false) | bool
tags:
- always

View File

@ -12,6 +12,12 @@
when: item.when | default(false) | bool
- name: Check if FW dir exists
ansible.builtin.stat:
name: /etc/iptables.rules.d
register: firewall_rules_dir_metadata
- name: Copy Templates
ansible.builtin.template:
src: "{{ item.src }}"
@ -28,11 +34,12 @@
- src: kubernetes-manifest-rbac.yaml.j2
dest: /var/lib/rancher/k3s/server/manifests/rbac-authorization-common.yaml
when: "{{ kubernetes_config.cluster.prime.name == inventory_hostname }}"
when: "{{ kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname }}"
- src: iptables-kubernetes.rules.j2
dest: "/etc/iptables.rules.d/iptables-kubernetes.rules"
notify: firewall_reloader
when: "{{ firewall_rules_dir_metadata.stat.exists }}"
- name: Add Kubernetes Node Labels

View File

@ -3,14 +3,12 @@
- name: Check for calico deployment manifest
ansible.builtin.stat:
name: /var/lib/rancher/k3s/server/manifests/calico.yaml
become: true
register: file_calico_yaml_metadata
- name: Check for calico Operator deployment manifest
ansible.builtin.stat:
name: /var/lib/rancher/k3s/ansible/deployment-manifest-calico_operator.yaml
become: true
register: file_calico_operator_yaml_metadata
@ -106,6 +104,8 @@
value: '524288'
- name: fs.inotify.max_user_instances
value: '512'
- name: net.ipv6.conf.all.disable_ipv6
value: '1'
when:
- ansible_os_family == 'Debian'
@ -113,7 +113,6 @@
- name: Check for Network Manager Directory
ansible.builtin.stat:
name: /etc/NetworkManager/conf.d
become: true
register: directory_network_manager_metadata
@ -133,7 +132,6 @@
mode: '770'
owner: root
group: root
become: true
diff: true
when: directory_network_manager_metadata.stat.exists
@ -146,11 +144,20 @@
changed_when: false
failed_when: false
register: k3s_installed
when: >
nfc_role_kubernetes_master | default(false) | bool
- name: Check Machine Architecture
ansible.builtin.set_fact:
nfc_kubernetes_install_architectures: "{{ nfc_kubernetes_install_architectures | default({}) | combine({ansible_architecture: ''}) }}"
- name: Check if K3s Installed
ansible.builtin.shell:
cmd: |
if [[ $(service k3s-agent status) ]]; then exit 0; else exit 1; fi
executable: /bin/bash
changed_when: false
failed_when: false
register: k3s_installed
when: >
not nfc_role_kubernetes_worker | default(false) | bool
- name: Download Install Scripts
@ -168,7 +175,10 @@
delegate_to: localhost
run_once: true
# no_log: true
when: ansible_os_family == 'Debian'
when: >
ansible_os_family == 'Debian'
and
item.when | default(true) | bool
loop: "{{ download_files }}"
vars:
ansible_connection: local
@ -177,6 +187,7 @@
url: https://get.k3s.io
- dest: /tmp/install_olm.sh
url: https://raw.githubusercontent.com/operator-framework/operator-lifecycle-manager/v{{ kubernetes_version_olm }}/scripts/install.sh
when: "{{ nfc_role_kubernetes_install_olm }}"
- name: Download K3s Binary
@ -228,15 +239,16 @@
ansible.builtin.copy:
src: "/tmp/k3s.{{ ansible_architecture }}"
dest: "/usr/local/bin/k3s"
mode: '740'
mode: '741'
owner: root
group: root
when: hash_sha256_k3s_existing_binary.stat.checksum | default('0') != hash_sha256_k3s_downloaded_binary
- name: Copy install scripts to Host
ansible.builtin.copy:
src: "{{ item }}"
dest: "{{ item }}"
src: "{{ item.path }}"
dest: "{{ item.path }}"
mode: '755'
owner: root
group: root
@ -244,9 +256,12 @@
loop: "{{ install_scripts }}"
vars:
install_scripts:
- "/tmp/install.sh"
- "/tmp/install_olm.sh"
# when: hash_sha256_k3s_existing_binary.stat.checksum | default('0') != hash_sha256_k3s_downloaded_binary
- path: "/tmp/install.sh"
- path: "/tmp/install_olm.sh"
when: "{{ nfc_role_kubernetes_install_olm }}"
when: >
item.when | default(true) | bool
- name: Required Initial config files
ansible.builtin.copy:
@ -283,7 +298,7 @@
dest: /var/lib/rancher/k3s/server/manifests/calico.yaml
when: >
{{
kubernetes_config.cluster.prime.name == inventory_hostname
kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname
and
(
(
@ -325,10 +340,10 @@
cmd: |
INSTALL_K3S_SKIP_DOWNLOAD=true \
INSTALL_K3S_VERSION="v{{ KubernetesVersion }}{{ KubernetesVersion_k3s_prefix }}" \
/tmp/install.sh --cluster-init
/tmp/install.sh {% if nfc_role_kubernetes_etcd_enabled %}--cluster-init{% endif %}
changed_when: false
when: >
kubernetes_config.cluster.prime.name == inventory_hostname
kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname
and
k3s_installed.rc == 1
@ -352,7 +367,7 @@
and
'calico_manifest' not in ansible_run_tags
and
kubernetes_config.cluster.prime.name == inventory_hostname
kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname
- name: Install MetalLB Operator
@ -371,7 +386,7 @@
when: >-
nfc_kubernetes_enable_metallb | default(false) | bool
and
kubernetes_config.cluster.prime.name == inventory_hostname
kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname
- name: Wait for kubernetes prime to be ready
@ -385,7 +400,7 @@
exit 127;
fi
executable: /bin/bash
delegate_to: "{{ kubernetes_config.cluster.prime.name }}"
delegate_to: "{{ kubernetes_config.cluster.prime.name | default(inventory_hostname) }}"
run_once: true
register: kubernetes_ready_check
retries: 30
@ -409,9 +424,9 @@
install_olm.rc == 1
register: install_olm
when: >
kubernetes_config.cluster.prime.name == inventory_hostname
kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname
and
kubernetes_olm_install | default(false) | bool
nfc_role_kubernetes_install_olm | default(false) | bool
- name: Uninstall OLM
@ -422,7 +437,7 @@
kubectl delete -n olm deployment olm-operator;
kubectl delete crd catalogsources.operators.coreos.com;
kubectl delete` crd clusterserviceversions.operators.coreos.com;
kubectl delete crd clusterserviceversions.operators.coreos.com;
kubectl delete crd installplans.operators.coreos.com;
kubectl delete crd olmconfigs.operators.coreos.com;
kubectl delete crd operatorconditions.operators.coreos.com;
@ -437,9 +452,9 @@
failed_when: false
register: install_olm
when: >
kubernetes_config.cluster.prime.name == inventory_hostname
kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname
and
'olm_uninstall' not in ansible_run_tags
'olm_uninstall' in ansible_run_tags
- name: Enable Cluster Encryption
@ -448,7 +463,7 @@
changed_when: false
failed_when: false # New cluster will fail
when: >
kubernetes_config.cluster.prime.name == inventory_hostname
kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname
and
kubernetes_config.cluster.networking.encrypt | default(false) | bool
and
@ -466,7 +481,7 @@
- name: Fetch Join Token
ansible.builtin.slurp:
src: /var/lib/rancher/k3s/server/token
delegate_to: "{{ kubernetes_config.cluster.prime.name }}"
delegate_to: "{{ kubernetes_config.cluster.prime.name | default(inventory_hostname) }}"
run_once: true
register: k3s_join_token
no_log: true # Value is sensitive
@ -475,7 +490,7 @@
- name: Create Token fact
ansible.builtin.set_fact:
k3s_join_token: "{{ k3s_join_token.content | b64decode | replace('\n', '') }}"
delegate_to: "{{ kubernetes_config.cluster.prime.name }}"
delegate_to: "{{ kubernetes_config.cluster.prime.name | default(inventory_hostname) }}"
run_once: true
no_log: true # Value is sensitive
@ -491,9 +506,9 @@
executable: /bin/bash
changed_when: false
when: >
Kubernetes_Master | default(false) | bool
nfc_role_kubernetes_master | default(false) | bool
and
not kubernetes_config.cluster.prime.name == inventory_hostname
not kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname
and
k3s_installed.rc == 1
@ -506,12 +521,14 @@
INSTALL_K3S_SKIP_DOWNLOAD=true \
INSTALL_K3S_VERSION="v{{ KubernetesVersion }}{{ KubernetesVersion_k3s_prefix }}" \
K3S_TOKEN="{{ k3s_join_token }}" \
K3S_URL="https://{{ hostvars[kubernetes_config.cluster.prime.name].ansible_host }}:6443" \
K3S_URL="https://{{ hostvars[kubernetes_config.cluster.prime.name | default(inventory_hostname)].ansible_host }}:6443" \
/tmp/install.sh -
executable: /bin/bash
changed_when: false
when: >
not Kubernetes_Master | default(false) | bool
not nfc_role_kubernetes_master | default(false) | bool
and
not kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname
and
k3s_installed.rc == 1
@ -521,4 +538,3 @@
kubernetes_installed: true
# Clear Token as no llonger required and due to being a sensitive value
k3s_join_token: null
nfc_kubernetes_install_architectures: {}

View File

@ -96,7 +96,7 @@
- name: Fetch Calico Kubectl Plugin
ansible.builtin.uri:
url: |-
https://github.com/projectcalico/calico/releases/download/{{ nfc_kubernetes_calico_version }}/calicoctl-linux-
https://github.com/projectcalico/calico/releases/download/{{ nfc_role_kubernetes_calico_version }}/calicoctl-linux-
{%- if cpu_arch.key == 'aarch64' -%}
arm64
{%- else -%}
@ -127,7 +127,7 @@
owner: root
group: 'root'
become: true
when: inventory_hostname in groups['kubernetes_master']
when: nfc_role_kubernetes_master
- name: Setup Automagic Host Endpoints
@ -150,10 +150,10 @@
projectcalico.org/operator-node-migration-
executable: bash
become: true
delegate_to: "{{ kubernetes_config.cluster.prime.name }}"
delegate_to: "{{ kubernetes_config.cluster.prime.name | default(inventory_hostname) }}"
changed_when: false
failed_when: false
loop: "{{ groups[kubernetes_config.cluster.group_name] }}"
loop: "{{ groups[kubernetes_config.cluster.group_name | default('make_me_optional')] | default([]) }}"
# kubectl label node ip-10-229-92-202.eu-west-1.compute.internal projectcalico.org/operator-node-migration-
# migration started

View File

@ -1,103 +0,0 @@
---
- name: Common Tasks
include_tasks: common.yaml
# tags:
# - install
- name: Check if kubernetes has been Initialized
stat:
path: /etc/kubernetes/admin.conf
register: KubernetesInitialized
tags:
- always
- name: kubernetes prime
include_tasks: prime.yaml
when: kubernetes_config.cluster.prime.name == inventory_hostname
- name: kubernetes workers
include_tasks: workers.yaml
when: kubernetes_config.cluster.prime.name != inventory_hostname
- name: Add Kubernetes Node Labels
kubernetes.core.k8s:
definition:
apiVersion: v1
kind: Node
metadata:
name: "{{ inventory_hostname }}"
labels:
"{{ item | from_yaml_all }}"
delegate_to: "{{ kubernetes_config.cluster.prime.name }}"
with_items:
- "{{ kubernetes_config.hosts[inventory_hostname].labels }}"
when:
- ( kubernetes_config.hosts[inventory_hostname].labels is defined and
kubernetes_config.hosts[inventory_hostname].labels|default('')|length > 0 )
tags:
- install
- nodelabels
- name: Add Node Taints
kubernetes.core.k8s_taint:
state: "present"
name: "{{ inventory_hostname }}"
taints:
- "{{ item | from_yaml_all }}"
delegate_to: "{{ kubernetes_config.cluster.prime.name }}"
with_items:
- "{{ kubernetes_config.hosts[inventory_hostname].taints.present }}"
when:
- (kubernetes_config.hosts[inventory_hostname].taints.present is defined and
kubernetes_config.hosts[inventory_hostname].taints.present|default('')|length > 0 )
tags:
- install
- taints
- name: Remove Node Taints
kubernetes.core.k8s_taint:
state: "absent"
name: "{{ inventory_hostname }}"
taints:
- "{{ item | from_yaml_all }}"
delegate_to: "{{ kubernetes_config.cluster.prime.name }}"
with_items:
- "{{ kubernetes_config.hosts[inventory_hostname].taints.absent }}"
when:
- ( kubernetes_config.hosts[inventory_hostname].taints.absent is defined and
kubernetes_config.hosts[inventory_hostname].taints.absent|default('')|length > 0 )
tags:
- install
- taints
- name: Create Cluster Namespaces
kubernetes.core.k8s:
state: present
definition:
apiVersion: v1
kind: Namespace
metadata:
name: "{{ item.name }}"
labels:
#app.kubernetes.io/version: # App version
#app.kubernetes.io/component:
#app.kubernetes.io/part-of:
app.kubernetes.io/managed-by: Ansible
#meta.kubernetes.io/description: "{{ item.description | default('') }}"
meta.kubernetes.io/version: "{{ deployment_git_current_short_hash | default('') }}"
with_items:
- "{{ kubernetes_config.namespaces }}"
when:
( kubernetes_config.namespaces is defined and
kubernetes_config.namespaces | default('') | length > 0 and
kubernetes_config.cluster.prime.name == inventory_hostname )
tags:
- install
- namespaces

72
tasks/kubevirt/main.yaml Normal file
View File

@ -0,0 +1,72 @@
---
- name: Validate Virtualization Support
ansible.builtin.include_tasks:
file: kubevirt/validate.yaml
apply:
tags:
- always
tags:
- always
- name: Deploy KubeVirt
ansible.builtin.template:
src: "{{ item }}"
dest: "/var/lib/rancher/k3s/server/manifests/{{ item | replace('.j2', '') | lower }}"
owner: root
mode: '700'
force: true
notify: "{{ item.notify | default(omit) }}"
loop: "{{ templates_to_apply }}"
diff: true
vars:
templates_to_apply:
- kubevirt-operator.yaml.j2
- kubevirt-cr.yaml.j2
- name: Fetch virtctl Kubectl Plugin
ansible.builtin.uri:
url: |-
https://github.com/kubevirt/kubevirt/releases/download/{{
nfc_role_kubernetes_container_images.kubevirt_operator.tag }}/virtctl-{{
nfc_role_kubernetes_container_images.kubevirt_operator.tag }}-linux-
{%- if cpu_arch.key == 'aarch64' -%}
arm64
{%- else -%}
amd64
{%- endif %}
status_code:
- 200
- 304
dest: "/tmp/kubectl-virtctl.{{ cpu_arch.key }}"
mode: '777'
owner: root
group: 'root'
changed_when: false
become: true
delegate_to: localhost
loop: "{{ nfc_kubernetes_install_architectures | dict2items }}"
loop_control:
loop_var: cpu_arch
vars:
ansible_connection: local
- name: Add virtctl Plugin
ansible.builtin.copy:
src: "/tmp/kubectl-virtctl.{{ ansible_architecture }}"
dest: /usr/local/bin/kubectl-virt
mode: '770'
owner: root
group: 'root'
become: true
when: nfc_role_kubernetes_master
- name: Wait for KubeVirt to initialize
ansible.builtin.command:
cmd: kubectl -n kubevirt wait kv kubevirt --for condition=Available
changed_when: false
failed_when: false

View File

@ -0,0 +1,25 @@
---
- name: Install LibVirt-Clients
ansible.builtin.apt:
name: libvirt-clients
state: present
- name: Confirm Virtualization Support
ansible.builtin.command:
cmd: virt-host-validate qemu
changed_when: false
failed_when: false
register: virt_support_check_command
- name: Confirm No QEMU failures
ansible.builtin.assert:
that:
- (": FAIL" | string) not in (item | string)
- |
(": PASS" | string) in (item | string)
or
(": WARN" | string) in (item | string)
loop: "{{ virt_support_check_command.stdout_lines }}"

View File

@ -1,5 +1,24 @@
---
- name: Get Hostname
ansible.builtin.command:
cmd: hostname
changed_when: false
register: hostname_to_check
- name: Hostname Check
ansible.builtin.assert:
that:
- hostname_to_check.stdout == inventory_hostname
msg: The hostname must match the inventory_hostname
- name: Check Machine Architecture
ansible.builtin.set_fact:
nfc_kubernetes_install_architectures: "{{ nfc_kubernetes_install_architectures | default({}) | combine({ansible_architecture: ''}) }}"
- name: Firewall Rules
ansible.builtin.include_role:
name: nfc_firewall
@ -20,19 +39,63 @@
- install
- name: K8s Cluster
ansible.builtin.include_tasks: k8s.yaml
when: kubernetes_type == 'k8s'
# kubernetes_installed
- name: K3s Install
ansible.builtin.include_tasks:
file: k3s/install.yaml
apply:
tags:
- always
when: >
install_kubernetes | default(true) | bool
and
not kubernetes_installed | default(false) | bool
tags:
- never
- install
- always
- name: K3s Cluster
ansible.builtin.include_tasks: k3s.yaml
when: kubernetes_type == 'k3s'
- name: K3s Configure
ansible.builtin.include_tasks:
file: k3s/configure.yaml
apply:
tags:
- always
when: >
install_kubernetes | default(true) | bool
and
kubernetes_installed | default(false) | bool
tags:
- never
- install
- operator_calico
- operator_migrate_calico
- always
- name: Kubevert
ansible.builtin.include_tasks:
file: kubevirt/main.yaml
apply:
tags:
- always
when: >
kubernetes_installed | default(false) | bool
and
kubernetes_config.kube_virt.enabled | default(nfc_role_kubernetes_install_kubevirt)
and
inventory_hostname in kubernetes_config.kube_virt.nodes | default([ inventory_hostname ]) | list
tags:
- always
- name: Helm
ansible.builtin.include_tasks:
file: helm/main.yaml
apply:
tags:
- always
when: >
kubernetes_installed | default(false) | bool
and
kubernetes_config.helm.enabled | default(nfc_role_kubernetes_install_helm)
and
nfc_role_kubernetes_master
tags:
- always

View File

@ -1,146 +0,0 @@
---
- name: initialize Kubernetes cluster
block:
- name: Intilizing Kubernetes Cluster
#command: kubeadm init --pod-network-cidr "{{ KubernetesPodSubnet }}" --apiserver-advertise-address "{{ ansible_default_ipv4.address }}" --ignore-preflight-errors Mem --cri-socket=unix:///var/run/crio/crio.sock
command: kubeadm init --pod-network-cidr "{{ KubernetesPodSubnet }}" --service-cidr "{{ KubernetesServiceSubnet }}" --apiserver-advertise-address "0.0.0.0" --ignore-preflight-errors Mem #--cri-socket=unix:///var/run/containerd/containerd.sock
when:
- not KubernetesInitialized.stat.exists
rescue:
- name: Reset Kubeadmn
ansible.builtin.shell: "{{ item }}"
#register: kube_reset
failed_when: item.rc != 0
with_items:
- kubeadm reset --force
- rm -Rf /etc/cni/net.d
- name: Check if kubernetes has been Initialized
stat:
path: /etc/kubernetes/admin.conf
register: KubernetesInitialized
tags:
- always
- name: fetch kubernetes health
ansible.builtin.shell: " wget http://localhost:10248/healthz -q -O - || true"
register: KubernetesHealth
changed_when: true == false
when: KubernetesInitialized.stat.exists
tags:
- always
- name: set kubeernetes health fact
set_fact:
kube_health: "{{ KubernetesHealth.stdout | default(false) == 'ok' }}"
changed_when: true == false
tags:
- always
- name: Create directory for kube config.
become_method: sudo
become: yes
file:
#path: /home/{{ ansible_user }}/.kube
path: ~/.kube
state: directory
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
mode: 0700
# when: Kubernetes_Master
tags:
- always
- name: Copy Kube config for local user
copy:
remote_src: yes
src: /etc/kubernetes/admin.conf
#dest: /home/{{ ansible_user }}/.kube/config
dest: ~/.kube/config
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
mode: 0700
tags:
- always
- name: Add calico networking.
template:
src: "calico.yaml.j2"
dest: /etc/kubernetes/manifests/calico.yaml
owner: root
mode: 0744
- name: apply calico manifest
command: kubectl apply -f /etc/kubernetes/manifests/calico.yaml
tags:
- install
- manifest
- name: create remote workdir
file:
path: "{{ item }}"
state: directory
mode: 0700
with_items:
- /tmp/ansible/
tags:
- always
- name: Create local workdir
file:
path: "{{ item }}"
state: directory
mode: 0700
delegate_to: localhost
connection: local
with_items:
- /tmp/ansible/
tags:
- always
- name: get join command
ansible.builtin.shell: kubeadm token create --print-join-command > /tmp/ansible/join_kubernetes.sh
changed_when: true == false
tags:
- always
- name: download join command
fetch:
src: /tmp/ansible/join_kubernetes.sh
dest: /tmp/ansible/
flat: yes
changed_when: true == false
tags:
- always
# always:
# - name: remove remote workdir
# file:
# path: "{{ item }}"
# state: absent
# with_items:
# - /tmp/ansible/join_kubernetes.sh
# changed_when: true == false
# when:
# #- Kubernetes_Prime
# #- KubernetesInit.stat.exists
# - kubernetes_config.cluster.prime.name == inventory_hostname

View File

@ -1,46 +0,0 @@
---
# - name: configure non-prime nodes - check node health
# shell: "curl http://localhost:10248/healthz || true"
# register: health
# changed_when: true == false
# - set_fact:
# kube_joined: "{{ health.stdout == 'ok' }}"
# changed_when: true == false
# # when:
# # - not Kubernetes_Prime
- name: configure non-prime nodes - create remote workdir
file:
path: "{{ item }}"
state: directory
mode: 0700
changed_when: true == false
with_items:
- /tmp/ansible/
- ansible.builtin.shell: " wget http://localhost:10248/healthz -q -O - || true"
register: health
changed_when: true == false
- set_fact:
kube_joined: "{{ health.stdout == 'ok' }}"
changed_when: true == false
- name: get join command from ansible controller
copy:
src: /tmp/ansible/join_kubernetes.sh
dest: /tmp/ansible/join_kubernetes.sh
mode: 0700
changed_when: true == false
when:
- not kube_joined
- name: configure non-prime nodes - join node to kubernetes cluster
command: sh /tmp/ansible/join_kubernetes.sh
when:
- not kube_joined

View File

@ -6,6 +6,6 @@ metadata:
namespace: tigera-operator
data:
KUBERNETES_SERVICE_HOST: "
{%- set octet = kubernetes_config.cluster.networking.ServiceSubnet | split('.') -%}
{%- set octet = kubernetes_config.cluster.networking.ServiceSubnet | default(nfc_role_kubernetes_service_subnet) | split('.') -%}
{{- octet[0] }}.{{- octet[1] }}.{{- octet[2] }}.1"
KUBERNETES_SERVICE_PORT: '443'

View File

@ -25272,7 +25272,10 @@ spec:
dnsPolicy: ClusterFirstWithHostNet
containers:
- name: tigera-operator
image: {{ nfc_kubernetes_tigera_operator_registry }}/{{ nfc_kubernetes_tigera_operator_image}}:{{ nfc_kubernetes_tigera_operator_tag }}
image: {{
nfc_role_kubernetes_container_images.tigera_operator.registry }}/{{
nfc_role_kubernetes_container_images.tigera_operator.image}}:{{
nfc_role_kubernetes_container_images.tigera_operator.tag }}
imagePullPolicy: IfNotPresent
command:
- operator
@ -25290,7 +25293,7 @@ spec:
- name: OPERATOR_NAME
value: "tigera-operator"
- name: TIGERA_OPERATOR_INIT_IMAGE_VERSION
value: {{ nfc_kubernetes_tigera_operator_tag }}
value: {{ nfc_role_kubernetes_container_images.tigera_operator.tag }}
envFrom:
- configMapRef:
name: kubernetes-services-endpoint

View File

@ -9,7 +9,7 @@ spec:
- Workload
- Tunnel
blockSize: 26
cidr: {{ kubernetes_config.cluster.networking.podSubnet }}
cidr: {{ kubernetes_config.cluster.networking.podSubnet | default(nfc_role_kubernetes_pod_subnet) }}
ipipMode: Never
natOutgoing: true
nodeSelector: all()

View File

@ -11,7 +11,7 @@ spec:
hostPorts: Enabled
ipPools:
- blockSize: 26
cidr: {{ kubernetes_config.cluster.networking.podSubnet }}
cidr: {{ kubernetes_config.cluster.networking.podSubnet | default(nfc_role_kubernetes_pod_subnet) }}
disableBGPExport: false
encapsulation: VXLAN
natOutgoing: Enabled
@ -41,7 +41,7 @@ spec:
type: RollingUpdate
nonPrivileged: Disabled
serviceCIDRs:
- {{ kubernetes_config.cluster.networking.ServiceSubnet }}
- {{ kubernetes_config.cluster.networking.ServiceSubnet | default(nfc_role_kubernetes_service_subnet) }}
typhaDeployment:
spec:
template:

View File

@ -4810,7 +4810,7 @@ spec:
# chosen from this range. Changing this value after installation will have
# no effect. This should fall within `--cluster-cidr`.
- name: CALICO_IPV4POOL_CIDR
value: "{{ KubernetesPodSubnet }}"
value: "{{ nfc_role_kubernetes_pod_subnet }}"
# Disable file logging so `kubectl logs` works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"

View File

@ -1,10 +0,0 @@
#
# {{ item.name }} Container Registry Configuration
# Managed by: Ansible
#
server = "{{ item.server }}"
[host."{{ item.url }}"]
capabilities = {{ item.capabilities | from_yaml_all }}
skip_verify = {{ item.skip_verify | default(false) | lower }}

View File

@ -1,250 +0,0 @@
disabled_plugins = []
imports = []
oom_score = 0
plugin_dir = ""
required_plugins = []
root = "/var/lib/containerd"
state = "/run/containerd"
temp = ""
version = 2
[cgroup]
path = ""
[debug]
address = ""
format = ""
gid = 0
level = ""
uid = 0
[grpc]
address = "/run/containerd/containerd.sock"
gid = 0
max_recv_message_size = 16777216
max_send_message_size = 16777216
tcp_address = ""
tcp_tls_ca = ""
tcp_tls_cert = ""
tcp_tls_key = ""
uid = 0
[metrics]
address = ""
grpc_histogram = false
[plugins]
[plugins."io.containerd.gc.v1.scheduler"]
deletion_threshold = 0
mutation_threshold = 100
pause_threshold = 0.02
schedule_delay = "0s"
startup_delay = "100ms"
[plugins."io.containerd.grpc.v1.cri"]
device_ownership_from_security_context = false
disable_apparmor = false
disable_cgroup = false
disable_hugetlb_controller = true
disable_proc_mount = false
disable_tcp_service = true
enable_selinux = false
enable_tls_streaming = false
enable_unprivileged_icmp = false
enable_unprivileged_ports = false
ignore_image_defined_volumes = false
max_concurrent_downloads = 3
max_container_log_line_size = 16384
netns_mounts_under_state_dir = false
restrict_oom_score_adj = false
sandbox_image = "registry.k8s.io/pause:3.6"
selinux_category_range = 1024
stats_collect_period = 10
stream_idle_timeout = "4h0m0s"
stream_server_address = "127.0.0.1"
stream_server_port = "0"
systemd_cgroup = false
tolerate_missing_hugetlb_controller = true
unset_seccomp_profile = ""
[plugins."io.containerd.grpc.v1.cri".cni]
bin_dir = "/opt/cni/bin"
conf_dir = "/etc/cni/net.d"
conf_template = ""
ip_pref = ""
max_conf_num = 1
[plugins."io.containerd.grpc.v1.cri".containerd]
default_runtime_name = "runc"
disable_snapshot_annotations = true
discard_unpacked_layers = false
ignore_rdt_not_enabled_errors = false
no_pivot = false
snapshotter = "overlayfs"
[plugins."io.containerd.grpc.v1.cri".containerd.default_runtime]
base_runtime_spec = ""
cni_conf_dir = ""
cni_max_conf_num = 0
container_annotations = []
pod_annotations = []
privileged_without_host_devices = false
runtime_engine = ""
runtime_path = ""
runtime_root = ""
runtime_type = ""
[plugins."io.containerd.grpc.v1.cri".containerd.default_runtime.options]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
base_runtime_spec = ""
cni_conf_dir = ""
cni_max_conf_num = 0
container_annotations = []
pod_annotations = []
privileged_without_host_devices = false
runtime_engine = ""
runtime_path = ""
runtime_root = ""
runtime_type = "io.containerd.runc.v2"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
BinaryName = ""
CriuImagePath = ""
CriuPath = ""
CriuWorkPath = ""
IoGid = 0
IoUid = 0
NoNewKeyring = false
NoPivotRoot = false
Root = ""
ShimCgroup = ""
SystemdCgroup = true
[plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime]
base_runtime_spec = ""
cni_conf_dir = ""
cni_max_conf_num = 0
container_annotations = []
pod_annotations = []
privileged_without_host_devices = false
runtime_engine = ""
runtime_path = ""
runtime_root = ""
runtime_type = ""
[plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime.options]
[plugins."io.containerd.grpc.v1.cri".image_decryption]
key_model = "node"
[plugins."io.containerd.grpc.v1.cri".registry]
config_path = "/etc/containerd/certs.d"
[plugins."io.containerd.grpc.v1.cri".registry.auths]
[plugins."io.containerd.grpc.v1.cri".registry.configs]
[plugins."io.containerd.grpc.v1.cri".registry.headers]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors]
[plugins."io.containerd.grpc.v1.cri".x509_key_pair_streaming]
tls_cert_file = ""
tls_key_file = ""
[plugins."io.containerd.internal.v1.opt"]
path = "/opt/containerd"
[plugins."io.containerd.internal.v1.restart"]
interval = "10s"
[plugins."io.containerd.internal.v1.tracing"]
sampling_ratio = 1.0
service_name = "containerd"
[plugins."io.containerd.metadata.v1.bolt"]
content_sharing_policy = "shared"
[plugins."io.containerd.monitor.v1.cgroups"]
no_prometheus = false
[plugins."io.containerd.runtime.v1.linux"]
no_shim = false
runtime = "runc"
runtime_root = ""
shim = "containerd-shim"
shim_debug = false
[plugins."io.containerd.runtime.v2.task"]
platforms = ["linux/amd64"]
sched_core = false
[plugins."io.containerd.service.v1.diff-service"]
default = ["walking"]
[plugins."io.containerd.service.v1.tasks-service"]
rdt_config_file = ""
[plugins."io.containerd.snapshotter.v1.aufs"]
root_path = ""
[plugins."io.containerd.snapshotter.v1.btrfs"]
root_path = ""
[plugins."io.containerd.snapshotter.v1.devmapper"]
async_remove = false
base_image_size = ""
discard_blocks = false
fs_options = ""
fs_type = ""
pool_name = ""
root_path = ""
[plugins."io.containerd.snapshotter.v1.native"]
root_path = ""
[plugins."io.containerd.snapshotter.v1.overlayfs"]
root_path = ""
upperdir_label = false
[plugins."io.containerd.snapshotter.v1.zfs"]
root_path = ""
[plugins."io.containerd.tracing.processor.v1.otlp"]
endpoint = ""
insecure = false
protocol = ""
[proxy_plugins]
[stream_processors]
[stream_processors."io.containerd.ocicrypt.decoder.v1.tar"]
accepts = ["application/vnd.oci.image.layer.v1.tar+encrypted"]
args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"]
env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"]
path = "ctd-decoder"
returns = "application/vnd.oci.image.layer.v1.tar"
[stream_processors."io.containerd.ocicrypt.decoder.v1.tar.gzip"]
accepts = ["application/vnd.oci.image.layer.v1.tar+gzip+encrypted"]
args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"]
env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"]
path = "ctd-decoder"
returns = "application/vnd.oci.image.layer.v1.tar+gzip"
[timeouts]
"io.containerd.timeout.bolt.open" = "0s"
"io.containerd.timeout.shim.cleanup" = "5s"
"io.containerd.timeout.shim.load" = "5s"
"io.containerd.timeout.shim.shutdown" = "3s"
"io.containerd.timeout.task.state" = "2s"
[ttrpc]
address = ""
gid = 0
uid = 0

View File

@ -1,2 +0,0 @@
overlay
br_netfilter

View File

@ -31,7 +31,7 @@
{%- endif -%}
{%- for kubernetes_host in groups[kubernetes_config.cluster.group_name] -%}
{%- for kubernetes_host in groups[kubernetes_config.cluster.group_name | default('me_is_optional')] | default([]) -%}
{%- set kubernetes_host = hostvars[kubernetes_host].ansible_host -%}
@ -63,7 +63,7 @@
{%- for master_host in groups['kubernetes_master'] -%}
{%- if master_host in groups[kubernetes_config.cluster.group_name] -%}
{%- if master_host in groups[kubernetes_config.cluster.group_name | default('me_is_optional')] | default([]) -%}
{%- set master_host = hostvars[master_host].ansible_host -%}
@ -88,7 +88,7 @@
{%- endif -%}
{%- if Kubernetes_Master | default(false) | bool -%}
{%- if nfc_role_kubernetes_master | default(false) | bool -%}
{%- if
master_host == kubernetes_host
@ -150,7 +150,7 @@
{#- All cluster Hosts -#}
{%- if Kubernetes_Master | default(false) | bool -%}
{%- if nfc_role_kubernetes_master | default(false) | bool -%}
{%- set data.firewall_rules = data.firewall_rules + ['-I kubernetes-api -s ' + kubernetes_host + ' -j ACCEPT'] -%}
@ -182,7 +182,7 @@
{%- endfor -%}
{%- if Kubernetes_Master | default(false) | bool -%}
{%- if nfc_role_kubernetes_master | default(false) | bool -%}
{%- if host_external_ip is defined -%}

View File

@ -6,7 +6,12 @@
# Dont edit this file directly as it will be overwritten.
#
{%- if inventory_hostname in groups['kubernetes_master'] -%}
{%- if
inventory_hostname in groups['kubernetes_master']
or
kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname
-%}
{%
set kube_apiserver_arg = [
@ -19,31 +24,38 @@
-%}
{%
set servers_config = {
"cluster-cidr": KubernetesPodSubnet,
"cluster-cidr": nfc_role_kubernetes_pod_subnet,
"disable": [
"traefik"
],
"disable-network-policy": true,
"etcd-snapshot-retention": kubernetes_etcd_snapshot_retention | int,
"etcd-snapshot-schedule-cron": kubernetes_etcd_snapshot_cron_schedule | string,
"flannel-backend": "none",
"service-cidr": KubernetesServiceSubnet
"service-cidr": nfc_role_kubernetes_service_subnet
}
-%}
{%- if
kubernetes_config.cluster.domain_name is defined
and
kubernetes_config.cluster.domain_name | default('') != ''
-%}
{%- if nfc_role_kubernetes_etcd_enabled -%}
{%- set servers_config = servers_config | combine({
"cluster-domain": kubernetes_config.cluster.domain_name
"etcd-snapshot-retention": kubernetes_etcd_snapshot_retention | int,
"etcd-snapshot-schedule-cron": kubernetes_etcd_snapshot_cron_schedule | string,
}) -%}
{%- endif -%}
{%- if kubernetes_config.cluster.oidc.enabled | default(false) | bool -%}
{%- if
kubernetes_config.cluster.domain_name | default(nfc_role_kubernetes_cluster_domain) is defined
and
kubernetes_config.cluster.domain_name | default(nfc_role_kubernetes_cluster_domain) != ''
-%}
{%- set servers_config = servers_config | combine({
"cluster-domain": kubernetes_config.cluster.domain_name | default(nfc_role_kubernetes_cluster_domain)
}) -%}
{%- endif -%}
{%- if kubernetes_config.cluster.oidc.enabled | default(nfc_role_kubernetes_oidc_enabled) | default(false) | bool -%}
{%-
set kube_apiserver_arg = kube_apiserver_arg + [
@ -129,7 +141,7 @@
-%}
{%- if groups[kubernetes_config.cluster.group_name] | default([]) | list | length > 0 -%}
{%- if groups[kubernetes_config.cluster.group_name | default('make_me_optional')] | default([]) | list | length > 0 -%}
{%- if k3s_installed.rc == 0 -%}
@ -215,7 +227,11 @@
{# EoF All Nodes #}
{%- if inventory_hostname in groups['kubernetes_master'] -%}
{%- if
inventory_hostname in groups['kubernetes_master']
or
kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname
-%}
{%- set servers_config = servers_config | combine( all_nodes_config ) -%}

View File

@ -0,0 +1,16 @@
---
apiVersion: kubevirt.io/v1
kind: KubeVirt
metadata:
name: kubevirt
namespace: kubevirt
spec:
certificateRotateStrategy: {}
configuration:
developerConfiguration:
featureGates: []
customizeComponents: {}
imagePullPolicy: IfNotPresent
workloadUpdateStrategy:
workloadUpdateMethods:
- LiveMigrate

File diff suppressed because it is too large Load Diff