feat: restructure repository as ansible collection

BREAKING CHANGE: Repository restructure from Ansible Role to Ansible Collection

!37
This commit is contained in:
2024-03-13 19:14:08 +09:30
parent 27eaff7547
commit b063db8dc1
37 changed files with 138 additions and 42 deletions

View File

@ -0,0 +1,192 @@
# Depreciated:
# Calico is being migrated to use the calico operator.
# in a near future release, this method of deploying calico
# will be removed. use tag `operator_migrate_calico` to migrate
calico_image_tag: v3.25.0 # Depreciated
# EoF Depreciated
# SoF New Variables
nfc_role_kubernetes_calico_version: v3.27.0
# nfc_kubernetes_tigera_operator_registry: quay.io
# nfc_kubernetes_tigera_operator_image: tigera/operator
# nfc_kubernetes_tigera_operator_tag: v1.32.3 # Calico v3.27.0
# EoF New Variables, EEoF Depreciated
nfc_kubernetes_enable_metallb: false
nfc_kubernetes_enable_servicelb: false
nfc_role_kubernetes_container_images:
kubevirt_operator:
name: Kubevirt Operator
registry: quay.io
image: kubevirt/virt-operator
tag: v1.2.0
tigera_operator:
name: Tigera Operator
registry: quay.io
image: tigera/operator
tag: v1.32.3 # Calico v3.27.0
nfc_role_kubernetes_cluster_domain: cluster.local
nfc_role_kubernetes_etcd_enabled: false
nfc_role_kubernetes_install_olm: false
nfc_role_kubernetes_install_helm: true
nfc_role_kubernetes_install_kubevirt: false
nfc_role_kubernetes_kubevirt_operator_replicas: 1
nfc_role_kubernetes_oidc_enabled: false
nfc_role_kubernetes_pod_subnet: 172.16.248.0/21
nfc_role_kubernetes_service_subnet: 172.16.244.0/22
nfc_role_kubernetes_prime: true
nfc_role_kubernetes_master: true
nfc_role_kubernetes_worker: false
############################################################################################################
#
# Old Vars requiring refactoring
#
# ############################################################################################################
KubernetesVersion: '1.26.12' # must match the repository release version
kubernetes_version_olm: '0.27.0'
KubernetesVersion_k3s_prefix: '+k3s1'
kubernetes_private_container_registry: [] # Optional, Array. if none use `[]`
kubernetes_etcd_snapshot_cron_schedule: '0 */12 * * *'
kubernetes_etcd_snapshot_retention: 5
# host_external_ip: '' # Optional, String. External IP Address for host.
kube_apiserver_arg_audit_log_maxage: 2
kubelet_arg_system_reserved_cpu: 450m
kubelet_arg_system_reserved_memory: 512Mi
kubelet_arg_system_reserved_storage: 8Gi
nfc_kubernetes:
enable_firewall: true # Optional, bool enable firewall rules from role 'nfc_firewall'
nfc_kubernetes_no_restart: false # Set to true to prevent role from restarting kubernetes on the host(s)
nfc_kubernetes_no_restart_master: false # Set to true to prevent role from restarting kubernetes on master host(s)
nfc_kubernetes_no_restart_prime: false # Set to true to prevent role from restarting kubernetes on prime host
nfc_kubernetes_no_restart_slave: false # Set to true to prevent role from restarting kubernetes on slave host(s)
k3s:
files:
- name: audit.yaml
path: /var/lib/rancher/k3s/server
content: |
apiVersion: audit.k8s.io/v1
kind: Policy
rules:
- level: Request
when: "{{ nfc_role_kubernetes_master }}"
- name: 90-kubelet.conf
path: /etc/sysctl.d
content: |
vm.panic_on_oom=0
vm.overcommit_memory=1
kernel.panic=10
kernel.panic_on_oops=1
kernel.keys.root_maxbytes=25000000
- name: psa.yaml
path: /var/lib/rancher/k3s/server
content: ""
# apiVersion: apiserver.conf0 */12 * * *ig.k8s.io/v1
# kind: AdmissionConfiguration
# plugins:
# - name: PodSecurity
# configuration:
# apiVersion: pod-security.admission.config.k8s.io/v1beta1
# kind: PodSecurityConfiguration
# defaults:
# enforce: "restricted"
# enforce-version: "latest"
# audit: "restricted"
# audit-version: "latest"
# warn: "restricted"
# warn-version: "latest"
# exemptions:
# usernames: []
# runtimeClasses: []
# namespaces: [kube-system]
when: "{{ kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname }}"
#############################################################################################
# Cluster Config when stored in Inventory
#
# One required per cluster. recommend creating one ansible host group per cluster.
#############################################################################################
# kubernetes_config: # Dict. Cluster Config
# cluster:
# access: # Mandatory. List, DNS host name or IPv4/IPv6 Address.
# # if none use '[]'
# - 'my.dnshostname.com'
# - '2001:4860:4860::8888'
# - '192.168.1.1'
# domain_name: earth # Mandatory, String. Cluster Domain Name
# group_name: # Mandatory, String. name of the ansible inventory group containg all cluster hosts
# prime:
# name: k3s-prod # Mandatory, String. Ansible inventory_host that will
# # act as the prime master node.
# networking:
# encrypt: true # Optional, Boolean. default `false`. Install wireguard for inter-node encryption
# podSubnet: 172.16.70.0/24 # Mandatory, String. CIDR
# ServiceSubnet: 172.16.72.0/24 # Mandatory, String. CIDR
#
#
# helm:
# enabled: true # Optional, Boolean. default=false. Install Helm Binary
#
#
# kube_virt:
# enabled: false # Optional, Boolean. default=false. Install KubeVirt
#
# nodes: [] # Optional, List of String. default=inventory_hostname. List of nodes to install kibevirt on.
#
# operator:
# replicas: 2 # Optional, Integer. How many virt_operators to deploy.
#
#
# oidc: # Used to configure Kubernetes with OIDC Authentication.
# enabled: true # Mandatory, boolen. speaks for itself.
# issuer_url: https://domainname.com/realms/realm-name # Mandatory, String. URL of OIDC Provider
# client_id: kubernetes-test # Mandatory, string. OIDC Client ID
# username_claim: preferred_username # Mandatory, String. Claim name containing username.
# username_prefix: oidc # Optional, String. What to prefix to username
# groups_claim: roles # Mandatory, String. Claim name containing groups
# groups_prefix: '' # Optional, String. string to append to groups
#
# hosts:
#
# my-host-name:
# labels:
# mylabel: myvalue
#
# taints:
# - effect: NoSchedule
# key: taintkey
# value: taintvalue

View File

@ -0,0 +1,53 @@
---
- name: "restart ContainerD"
service:
name: containerd
state: restarted
when: >
containerd_config.changed | default(false) | bool
and
containerd_installed.rc | default(1) | int == 0
and
kubernetes_type == 'k8s'
tags:
- configure
- install
- name: Restart Kubernetes
ansible.builtin.service:
name: |-
{%- if kubernetes_type == 'k3s' -%}
{%- if Kubernetes_Master | default(false) | bool -%}
k3s
{%- else -%}
k3s-agent
{%- endif -%}
{%- else -%}
kubelet
{%- endif %}
state: restarted
listen: kubernetes_restart
when: |-
not (
nfc_kubernetes_no_restart
or
(
inventory_hostname in groups['kubernetes_master']
and
nfc_kubernetes_no_restart_master
)
or
(
inventory_hostname == kubernetes_config.cluster.prime.name
and
nfc_kubernetes_no_restart_prime
)
or
(
inventory_hostname in groups['kubernetes_worker']
and
nfc_kubernetes_no_restart_slave
)
)

View File

@ -0,0 +1,23 @@
galaxy_info:
role_name: nfc_kubernetes
author: No Fuss Computing
description: template role to install and configure Kubernetes on a host
issue_tracker_url: https://gitlab.com/nofusscomputing/projects/ansible/kubernetes
license: https://gitlab.com/nofusscomputing/projects/ansible/kubernetes/-/blob/master/LICENSE
min_ansible_version: '2.15'
platforms:
- name: Debian
versions:
- bullseye
- name: Ubuntu
versions:
- 21
galaxy_tags:
- k3s
- kubernetes
- container

View File

@ -0,0 +1,27 @@
---
- name: Fetch Helm APT Key
ansible.builtin.get_url:
url: https://baltocdn.com/helm/signing.asc
dest: /usr/share/keyrings/helm.asc
mode: 740
- name: Add Helm Repository
ansible.builtin.apt_repository:
repo: >-
deb [arch={%- if ansible_architecture == 'aarch64' -%}
arm64
{%- else -%}
amd64
{%- endif %} signed-by=/usr/share/keyrings/helm.asc] http://baltocdn.com/helm/stable/{{
ansible_os_family | lower }}/ all main
state: present
filename: helm
- name: Install Helm
ansible.builtin.apt:
package:
- helm
state: present

View File

@ -0,0 +1,69 @@
---
- name: Additional config files
ansible.builtin.copy:
content: |
{{ item.content }}
dest: "{{ item.path }}/{{ item.name }}"
mode: '740'
owner: root
group: root
loop: "{{ k3s.files }}"
when: item.when | default(false) | bool
- name: Check if FW dir exists
ansible.builtin.stat:
name: /etc/iptables.rules.d
register: firewall_rules_dir_metadata
- name: Copy Templates
ansible.builtin.template:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
owner: root
mode: '700'
force: true
notify: "{{ item.notify | default(omit) }}"
loop: "{{ templates_to_apply }}"
when: >
item.when | default(true) | bool
vars:
templates_to_apply:
- src: kubernetes-manifest-rbac.yaml.j2
dest: /var/lib/rancher/k3s/server/manifests/rbac-authorization-common.yaml
when: "{{ kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname }}"
- src: iptables-kubernetes.rules.j2
dest: "/etc/iptables.rules.d/iptables-kubernetes.rules"
notify: firewall_reloader
when: "{{ firewall_rules_dir_metadata.stat.exists }}"
- name: Add Kubernetes Node Labels
ansible.builtin.copy:
content: |-
apiVersion: v1
kind: Node
metadata:
name: "{{ inventory_hostname }}"
{% if kubernetes_config.hosts[inventory_hostname].labels | default([]) | list | length > 0 -%}
labels:
{{ kubernetes_config.hosts[inventory_hostname].labels | to_nice_yaml | indent(4) }}
{%- endif +%}
{% if kubernetes_config.hosts[inventory_hostname].taints | default([]) | list | length > 0 -%}
spec:
taints:
{{ kubernetes_config.hosts[inventory_hostname].taints | to_nice_yaml(indent=0) | indent(4) }}
{% endif %}
dest: /var/lib/rancher/k3s/server/manifests/node-manifest-{{ inventory_hostname }}.yaml
owner: root
group: root
mode: '700'
delegate_to: "{{ kubernetes_config.cluster.prime.name }}"
when:
kubernetes_config.hosts[inventory_hostname].labels | default([]) | list | length > 0
or
kubernetes_config.hosts[inventory_hostname].taints | default([]) | list | length > 0

View File

@ -0,0 +1,540 @@
---
- name: Check for calico deployment manifest
ansible.builtin.stat:
name: /var/lib/rancher/k3s/server/manifests/calico.yaml
register: file_calico_yaml_metadata
- name: Check for calico Operator deployment manifest
ansible.builtin.stat:
name: /var/lib/rancher/k3s/ansible/deployment-manifest-calico_operator.yaml
register: file_calico_operator_yaml_metadata
- name: Install Software
ansible.builtin.include_role:
name: nfc_common
vars:
common_gather_facts: false
initial_common_tasks: true # Don't run init tasks
aptInstall:
- name: curl
- name: iptables
- name: jq
- name: wireguard
- name: Remove swapfile from /etc/fstab
ansible.posix.mount:
name: "{{ item }}"
fstype: swap
state: absent
with_items:
- swap
- none
when:
- ansible_os_family == 'Debian' # ansible_lsb.codename = bullseye, ansible_lsb.major_release = 11
tags:
- install
- name: Disable swap
ansible.builtin.command:
cmd: swapoff -a
changed_when: false
when:
- ansible_os_family == 'Debian'
tags:
- install
- name: Check an armbian os system
ansible.builtin.stat:
path: /etc/default/armbian-zram-config
register: armbian_stat_result
- name: Armbian Disable Swap
ansible.builtin.shell:
cmd: |
sed -i 's/\# SWAP=false/SWAP=false/g' /etc/default/armbian-zram-config;
sed -i 's/ENABLED=true/ENABLED=false/g' /etc/default/armbian-zram-config;
args:
executable: bash
changed_when: false
when: armbian_stat_result.stat.exists
- name: Create Required directories
ansible.builtin.file:
name: "{{ item.name }}"
state: "{{ item.state }}"
mode: "{{ item.mode }}"
loop: "{{ dirs }}"
vars:
dirs:
- name: /etc/rancher/k3s
state: directory
mode: 700
- name: /var/lib/rancher/k3s/server/logs
state: directory
mode: 700
- name: /var/lib/rancher/k3s/server/manifests
state: directory
mode: 700
- name: /var/lib/rancher/k3s/ansible
state: directory
mode: 700
- name: Add sysctl net.ipv4.ip_forward
ansible.posix.sysctl:
name: "{{ item.name }}"
value: "{{ item.value }}"
sysctl_set: true
state: present
reload: true
loop: "{{ settings }}"
notify: reboot_host # On change reboot
vars:
settings:
- name: net.ipv4.ip_forward
value: '1'
- name: fs.inotify.max_user_watches
value: '524288'
- name: fs.inotify.max_user_instances
value: '512'
- name: net.ipv6.conf.all.disable_ipv6
value: '1'
when:
- ansible_os_family == 'Debian'
- name: Check for Network Manager Directory
ansible.builtin.stat:
name: /etc/NetworkManager/conf.d
register: directory_network_manager_metadata
- name: Network Manager Setup
ansible.builtin.copy:
content: |-
#
# K3s Configuration for Network Manager
#
# Managed By ansible/role/nfc_kubernetes
#
# Dont edit this file directly as it will be overwritten.
#
[keyfile]
unmanaged-devices=interface-name:cali*;interface-name:tunl*;interface-name:vxlan.calico;interface-name:vxlan-v6.calico;interface-name:wireguard.cali;interface-name:wg-v6.cali
dest: /etc/NetworkManager/conf.d/calico.conf
mode: '770'
owner: root
group: root
diff: true
when: directory_network_manager_metadata.stat.exists
- name: Check if K3s Installed
ansible.builtin.shell:
cmd: |
if [[ $(service k3s status) ]]; then exit 0; else exit 1; fi
executable: /bin/bash
changed_when: false
failed_when: false
register: k3s_installed
when: >
nfc_role_kubernetes_master | default(false) | bool
- name: Check if K3s Installed
ansible.builtin.shell:
cmd: |
if [[ $(service k3s-agent status) ]]; then exit 0; else exit 1; fi
executable: /bin/bash
changed_when: false
failed_when: false
register: k3s_installed
when: >
not nfc_role_kubernetes_worker | default(false) | bool
- name: Download Install Scripts
ansible.builtin.uri:
url: "{{ item.url }}"
method: GET
return_content: true
status_code:
- 200
- 304
dest: "{{ item.dest }}"
mode: "744"
changed_when: false
register: k3s_download_script
delegate_to: localhost
run_once: true
# no_log: true
when: >
ansible_os_family == 'Debian'
and
item.when | default(true) | bool
loop: "{{ download_files }}"
vars:
ansible_connection: local
download_files:
- dest: /tmp/install.sh
url: https://get.k3s.io
- dest: /tmp/install_olm.sh
url: https://raw.githubusercontent.com/operator-framework/operator-lifecycle-manager/v{{ kubernetes_version_olm }}/scripts/install.sh
when: "{{ nfc_role_kubernetes_install_olm }}"
- name: Download K3s Binary
ansible.builtin.uri:
url: |-
https://github.com/k3s-io/k3s/releases/download/v
{{- KubernetesVersion + KubernetesVersion_k3s_prefix | urlencode -}}
/k3s
{%- if cpu_arch.key == 'aarch64' -%}
-arm64
{%- endif %}
method: GET
return_content: false
status_code:
- 200
- 304
dest: "/tmp/k3s.{{ cpu_arch.key }}"
mode: "744"
changed_when: false
register: k3s_download_files
delegate_to: localhost
run_once: true
# no_log: true
when: ansible_os_family == 'Debian'
loop: "{{ nfc_kubernetes_install_architectures | dict2items }}"
loop_control:
loop_var: cpu_arch
vars:
ansible_connection: local
- name: "[TRACE] Downloaded File SHA256"
ansible.builtin.set_fact:
hash_sha256_k3s_downloaded_binary: "{{ lookup('ansible.builtin.file', '/tmp/k3s.' + cpu_arch.key) | hash('sha256') | string }}"
delegate_to: localhost
loop: "{{ nfc_kubernetes_install_architectures | dict2items }}"
loop_control:
loop_var: cpu_arch
- name: Existing k3s File hash
ansible.builtin.stat:
checksum_algorithm: sha256
name: /usr/local/bin/k3s
register: hash_sha256_k3s_existing_binary
- name: Copy K3s binary to Host
ansible.builtin.copy:
src: "/tmp/k3s.{{ ansible_architecture }}"
dest: "/usr/local/bin/k3s"
mode: '741'
owner: root
group: root
when: hash_sha256_k3s_existing_binary.stat.checksum | default('0') != hash_sha256_k3s_downloaded_binary
- name: Copy install scripts to Host
ansible.builtin.copy:
src: "{{ item.path }}"
dest: "{{ item.path }}"
mode: '755'
owner: root
group: root
changed_when: false
loop: "{{ install_scripts }}"
vars:
install_scripts:
- path: "/tmp/install.sh"
- path: "/tmp/install_olm.sh"
when: "{{ nfc_role_kubernetes_install_olm }}"
when: >
item.when | default(true) | bool
- name: Required Initial config files
ansible.builtin.copy:
content: |
{{ item.content }}
dest: "{{ item.path }}/{{ item.name }}"
mode: '740'
owner: root
group: root
loop: "{{ k3s.files }}"
when: >
item.when | default(true) | bool
# kubernetes_config.cluster.prime.name == inventory_hostname
- name: Copy Intial required templates
ansible.builtin.template:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
owner: root
mode: '700'
force: true
notify: "{{ item.notify | default(omit) }}"
loop: "{{ templates_to_apply }}"
diff: true
when: >
item.when | default(true) | bool
vars:
templates_to_apply:
- src: k3s-config.yaml.j2
dest: /etc/rancher/k3s/config.yaml
notify: kubernetes_restart
- src: "calico.yaml.j2"
dest: /var/lib/rancher/k3s/server/manifests/calico.yaml
when: >
{{
kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname
and
(
(
not file_calico_operator_yaml_metadata.stat.exists
and
file_calico_yaml_metadata.stat.exists
and
k3s_installed.rc == 0
)
or
'calico_manifest' in ansible_run_tags
)
}}
- src: k3s-registries.yaml.j2
dest: /etc/rancher/k3s/registries.yaml
notify: kubernetes_restart
when: "{{ (kubernetes_private_container_registry | default([])) | from_yaml | list | length > 0 }}"
# - name: Templates IPv6
# ansible.builtin.template:
# src: iptables-kubernetes.rules.j2
# dest: "/etc/ip6tables.rules.d/ip6tables-kubernetes.rules"
# owner: root
# mode: '700'
# force: true
# vars:
# ipv6: true
- name: Set IPTables to legacy mode
ansible.builtin.command:
cmd: update-alternatives --set iptables /usr/sbin/iptables-legacy
changed_when: false
- name: Install K3s (prime master)
ansible.builtin.shell:
cmd: |
INSTALL_K3S_SKIP_DOWNLOAD=true \
INSTALL_K3S_VERSION="v{{ KubernetesVersion }}{{ KubernetesVersion_k3s_prefix }}" \
/tmp/install.sh {% if nfc_role_kubernetes_etcd_enabled %}--cluster-init{% endif %}
changed_when: false
when: >
kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname
and
k3s_installed.rc == 1
- name: Install Calico Operator
ansible.builtin.include_tasks:
file: migrate_to_operator.yaml
apply:
tags:
- always
when: >-
(
(
'operator_migrate_calico' in ansible_run_tags
or
'operator_calico' in ansible_run_tags
)
or
not file_calico_yaml_metadata.stat.exists
)
and
'calico_manifest' not in ansible_run_tags
and
kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname
- name: Install MetalLB Operator
ansible.builtin.include_tasks:
file: manifest_apply.yaml
apply:
tags:
- always
loop: "{{ manifests }}"
loop_control:
loop_var: manifest
vars:
manifests:
- name: MetalLB Operator
template: Deployment-manifest-MetalLB_Operator.yaml
when: >-
nfc_kubernetes_enable_metallb | default(false) | bool
and
kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname
- name: Wait for kubernetes prime to be ready
ansible.builtin.shell:
cmd: |
set -o pipefail
if [ `which jq` ]; then
echo $(kubectl get no $(hostname) -o json | jq .status.conditions[4].status | tr -d '"');
else
echo jq command not found;
exit 127;
fi
executable: /bin/bash
delegate_to: "{{ kubernetes_config.cluster.prime.name | default(inventory_hostname) }}"
run_once: true
register: kubernetes_ready_check
retries: 30
delay: 10
until: >
kubernetes_ready_check.stdout | default(false) | bool
or
kubernetes_ready_check.rc != 0
changed_when: false
failed_when: kubernetes_ready_check.rc != 0
- name: Install olm
ansible.builtin.shell:
cmd: |
/tmp/install_olm.sh v{{ kubernetes_version_olm }}
changed_when: false
failed_when: >
'already installed' not in install_olm.stdout
and
install_olm.rc == 1
register: install_olm
when: >
kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname
and
nfc_role_kubernetes_install_olm | default(false) | bool
- name: Uninstall OLM
ansible.builtin.shell:
cmd: |
kubectl delete -n olm deployment packageserver;
kubectl delete -n olm deployment catalog-operator;
kubectl delete -n olm deployment olm-operator;
kubectl delete crd catalogsources.operators.coreos.com;
kubectl delete crd clusterserviceversions.operators.coreos.com;
kubectl delete crd installplans.operators.coreos.com;
kubectl delete crd olmconfigs.operators.coreos.com;
kubectl delete crd operatorconditions.operators.coreos.com;
kubectl delete crd operatorgroups.operators.coreos.com;
kubectl delete crd operators.operators.coreos.com;
kubectl delete crd subscriptions.operators.coreos.com;
kubectl delete namespace operators --force;
kubectl delete namespace olm --force;
changed_when: false
failed_when: false
register: install_olm
when: >
kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname
and
'olm_uninstall' in ansible_run_tags
- name: Enable Cluster Encryption
ansible.builtin.command:
cmd: kubectl patch felixconfiguration default --type='merge' -p '{"spec":{"wireguardEnabled":true,"wireguardEnabledV6":true}}'
changed_when: false
failed_when: false # New cluster will fail
when: >
kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname
and
kubernetes_config.cluster.networking.encrypt | default(false) | bool
and
(
'calico_manifest' in ansible_run_tags
or
(
'operator_migrate_calico' not in ansible_run_tags
or
'operator_calico' not in ansible_run_tags
)
)
- name: Fetch Join Token
ansible.builtin.slurp:
src: /var/lib/rancher/k3s/server/token
delegate_to: "{{ kubernetes_config.cluster.prime.name | default(inventory_hostname) }}"
run_once: true
register: k3s_join_token
no_log: true # Value is sensitive
- name: Create Token fact
ansible.builtin.set_fact:
k3s_join_token: "{{ k3s_join_token.content | b64decode | replace('\n', '') }}"
delegate_to: "{{ kubernetes_config.cluster.prime.name | default(inventory_hostname) }}"
run_once: true
no_log: true # Value is sensitive
- name: Install K3s (master nodes)
ansible.builtin.shell:
cmd: |
INSTALL_K3S_EXEC="server" \
INSTALL_K3S_SKIP_DOWNLOAD=true \
INSTALL_K3S_VERSION="v{{ KubernetesVersion }}{{ KubernetesVersion_k3s_prefix }}" \
K3S_TOKEN="{{ k3s_join_token }}" \
/tmp/install.sh
executable: /bin/bash
changed_when: false
when: >
nfc_role_kubernetes_master | default(false) | bool
and
not kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname
and
k3s_installed.rc == 1
- name: Install K3s (worker nodes)
ansible.builtin.shell:
cmd: |
set -o pipefail
INSTALL_K3S_EXEC="agent" \
INSTALL_K3S_SKIP_DOWNLOAD=true \
INSTALL_K3S_VERSION="v{{ KubernetesVersion }}{{ KubernetesVersion_k3s_prefix }}" \
K3S_TOKEN="{{ k3s_join_token }}" \
K3S_URL="https://{{ hostvars[kubernetes_config.cluster.prime.name | default(inventory_hostname)].ansible_host }}:6443" \
/tmp/install.sh -
executable: /bin/bash
changed_when: false
when: >
not nfc_role_kubernetes_master | default(false) | bool
and
not kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname
and
k3s_installed.rc == 1
- name: Set Kubernetes Final Install Fact
ansible.builtin.set_fact:
kubernetes_installed: true
# Clear Token as no llonger required and due to being a sensitive value
k3s_join_token: null

View File

@ -0,0 +1,49 @@
---
# Save the manifests in a dir so that diff's can be shown for changes
- name: Copy Manifest for addition - {{ manifest.name }}
ansible.builtin.template:
src: "{{ manifest.template }}"
dest: "/var/lib/rancher/k3s/ansible/{{ manifest.template | lower | replace('.j2', '') }}"
mode: '744'
become: true
diff: true
- name: Try / Catch
block:
# Try to create first, if fail use replace.
- name: Apply Manifest Create - {{ manifest.name }}
ansible.builtin.command:
cmd: "kubectl create -f /var/lib/rancher/k3s/ansible/{{ manifest.template | lower | replace('.j2', '') }}"
become: true
changed_when: false
failed_when: >
'Error from server' in manifest_stdout.stderr
register: manifest_stdout
rescue:
- name: TRACE - Manifest Create - {{ manifest.name }}
ansible.builtin.debug:
msg: "{{ manifest_stdout }}"
- name: Replace Manifests - "Rescue" - {{ manifest.name }}
ansible.builtin.command:
cmd: "kubectl replace -f /var/lib/rancher/k3s/ansible/{{ manifest.template | lower | replace('.j2', '') }}"
become: true
changed_when: false
failed_when: >
'Error from server' in manifest_stdout.stderr
and
'ensure CRDs are installed first' in manifest_stdout.stderr
register: manifest_stdout
- name: TRACE - Replace Manifest - "Rescue" - {{ manifest.name }}
ansible.builtin.debug:
msg: "{{ manifest_stdout }}"

View File

@ -0,0 +1,198 @@
---
# Reference https://docs.tigera.io/calico/3.25/operations/operator-migration
# Script creation of imageset: https://docs.tigera.io/calico/latest/operations/image-options/imageset#create-an-imageset
# above may pull sha for arch of machine who ran the script
- name: Try / Catch
vars:
operator_manifests:
- Deployment-manifest-Calico_Operator.yaml.j2
- Installation-manifest-Calico_Cluster.yaml.j2
- FelixConfiguration-manifest-Calico_Cluster.yaml
- IPPool-manifest-Calico_Cluster.yaml.j2
- APIServer-manifest-Calico_Cluster.yaml
- ConfigMap-manifest-Calico_Service_Endpoint.yaml.j2
block:
- name: Move Calico Manifest from addons directory
ansible.builtin.command:
cmd: mv /var/lib/rancher/k3s/server/manifests/calico.yaml /tmp/
become: true
changed_when: false
when: file_calico_yaml_metadata.stat.exists
- name: Remove addon from Kubernetes
ansible.builtin.command:
cmd: kubectl delete addon -n kube-system calico
become: true
changed_when: false
when: file_calico_yaml_metadata.stat.exists
- name: Uninstall Calico
ansible.builtin.command:
cmd: kubectl delete -f /tmp/calico.yaml
become: true
changed_when: false
when: file_calico_yaml_metadata.stat.exists
# Save the manifests in a dir so that diff's can be shown for changes
- name: Copy Manifest for addition
ansible.builtin.template:
src: "{{ item }}"
dest: "/var/lib/rancher/k3s/ansible/{{ item | lower | replace('.j2', '') }}"
mode: '744'
become: true
diff: true
loop: "{{ operator_manifests }}"
- name: Try / Catch
block:
- name: Apply Operator Manifests
ansible.builtin.command:
cmd: "kubectl create -f /var/lib/rancher/k3s/ansible/{{ item | lower | replace('.j2', '') }}"
become: true
changed_when: false
failed_when: >
'Error from server' in operator_manifest_stdout.stderr
loop: "{{ operator_manifests }}"
register: operator_manifest_stdout
rescue:
- name: TRACE - Operator manifest apply
ansible.builtin.debug:
msg: "{{ operator_manifest_stdout }}"
- name: Apply Operator Manifests - "Rescue"
ansible.builtin.command:
cmd: "kubectl replace -f /var/lib/rancher/k3s/ansible/{{ item | lower | replace('.j2', '') }}"
become: true
changed_when: false
failed_when: >
'Error from server' in operator_manifest_stdout.stderr
and
'ensure CRDs are installed first' in operator_manifest_stdout.stderr
loop: "{{ operator_manifests }}"
register: operator_manifest_stdout
- name: TRACE - Operator manifest apply. Rescued
ansible.builtin.debug:
msg: "{{ operator_manifest_stdout }}"
- name: Fetch Calico Kubectl Plugin
ansible.builtin.uri:
url: |-
https://github.com/projectcalico/calico/releases/download/{{ nfc_role_kubernetes_calico_version }}/calicoctl-linux-
{%- if cpu_arch.key == 'aarch64' -%}
arm64
{%- else -%}
amd64
{%- endif %}
status_code:
- 200
- 304
dest: "/tmp/kubectl-calico.{{ cpu_arch.key }}"
mode: '777'
owner: root
group: 'root'
changed_when: false
become: true
delegate_to: localhost
loop: "{{ nfc_kubernetes_install_architectures | dict2items }}"
loop_control:
loop_var: cpu_arch
vars:
ansible_connection: local
- name: Add calico Plugin
ansible.builtin.copy:
src: "/tmp/kubectl-calico.{{ ansible_architecture }}"
dest: /usr/local/bin/kubectl-calico
mode: '770'
owner: root
group: 'root'
become: true
when: nfc_role_kubernetes_master
- name: Setup Automagic Host Endpoints
ansible.builtin.shell:
cmd: |-
kubectl calico \
patch kubecontrollersconfiguration \
default --patch='{"spec": {"controllers": {"node": {"hostEndpoint": {"autoCreate": "Enabled"}}}}}'
executable: bash
become: true
changed_when: false
failed_when: false # fixme
- name: Remove calico migration label
ansible.builtin.shell:
cmd: |-
kubectl label \
{{ inventory_hostname }} \
projectcalico.org/operator-node-migration-
executable: bash
become: true
delegate_to: "{{ kubernetes_config.cluster.prime.name | default(inventory_hostname) }}"
changed_when: false
failed_when: false
loop: "{{ groups[kubernetes_config.cluster.group_name | default('make_me_optional')] | default([]) }}"
# kubectl label node ip-10-229-92-202.eu-west-1.compute.internal projectcalico.org/operator-node-migration-
# migration started
rescue:
- name: Remove Operator Manifests
ansible.builtin.command:
cmd: "kubectl delete -f /var/lib/rancher/k3s/ansible/{{ item | lower | replace('.j2', '') }}"
become: true
changed_when: false
failed_when: false
loop: "{{ operator_manifests }}"
when: file_calico_yaml_metadata.stat.exists # Only rescue if it was a migration
- name: Move Calico Manifest from addons directory
ansible.builtin.command:
cmd: mv /tmp/calico.yaml /var/lib/rancher/k3s/server/manifests/
become: true
changed_when: false
when: file_calico_yaml_metadata.stat.exists
- name: Re-install Calico
ansible.builtin.command:
cmd: kubectl apply -f /var/lib/rancher/k3s/server/manifests/calico.yaml
become: true
changed_when: false
when: file_calico_yaml_metadata.stat.exists
always:
- name: Clean-up Temp File
ansible.builtin.file:
name: /tmp/calico.yaml
state: absent
become: true
when: file_calico_yaml_metadata.stat.exists

View File

@ -0,0 +1,72 @@
---
- name: Validate Virtualization Support
ansible.builtin.include_tasks:
file: kubevirt/validate.yaml
apply:
tags:
- always
tags:
- always
- name: Deploy KubeVirt
ansible.builtin.template:
src: "{{ item }}"
dest: "/var/lib/rancher/k3s/server/manifests/{{ item | replace('.j2', '') | lower }}"
owner: root
mode: '700'
force: true
notify: "{{ item.notify | default(omit) }}"
loop: "{{ templates_to_apply }}"
diff: true
vars:
templates_to_apply:
- kubevirt-operator.yaml.j2
- kubevirt-cr.yaml.j2
- name: Fetch virtctl Kubectl Plugin
ansible.builtin.uri:
url: |-
https://github.com/kubevirt/kubevirt/releases/download/{{
nfc_role_kubernetes_container_images.kubevirt_operator.tag }}/virtctl-{{
nfc_role_kubernetes_container_images.kubevirt_operator.tag }}-linux-
{%- if cpu_arch.key == 'aarch64' -%}
arm64
{%- else -%}
amd64
{%- endif %}
status_code:
- 200
- 304
dest: "/tmp/kubectl-virtctl.{{ cpu_arch.key }}"
mode: '777'
owner: root
group: 'root'
changed_when: false
become: true
delegate_to: localhost
loop: "{{ nfc_kubernetes_install_architectures | dict2items }}"
loop_control:
loop_var: cpu_arch
vars:
ansible_connection: local
- name: Add virtctl Plugin
ansible.builtin.copy:
src: "/tmp/kubectl-virtctl.{{ ansible_architecture }}"
dest: /usr/local/bin/kubectl-virt
mode: '770'
owner: root
group: 'root'
become: true
when: nfc_role_kubernetes_master
- name: Wait for KubeVirt to initialize
ansible.builtin.command:
cmd: kubectl -n kubevirt wait kv kubevirt --for condition=Available
changed_when: false
failed_when: false

View File

@ -0,0 +1,25 @@
---
- name: Install LibVirt-Clients
ansible.builtin.apt:
name: libvirt-clients
state: present
- name: Confirm Virtualization Support
ansible.builtin.command:
cmd: virt-host-validate qemu
changed_when: false
failed_when: false
register: virt_support_check_command
- name: Confirm No QEMU failures
ansible.builtin.assert:
that:
- (": FAIL" | string) not in (item | string)
- |
(": PASS" | string) in (item | string)
or
(": WARN" | string) in (item | string)
loop: "{{ virt_support_check_command.stdout_lines }}"

View File

@ -0,0 +1,101 @@
---
- name: Get Hostname
ansible.builtin.command:
cmd: hostname
changed_when: false
register: hostname_to_check
- name: Hostname Check
ansible.builtin.assert:
that:
- hostname_to_check.stdout == inventory_hostname
msg: The hostname must match the inventory_hostname
- name: Check Machine Architecture
ansible.builtin.set_fact:
nfc_kubernetes_install_architectures: "{{ nfc_kubernetes_install_architectures | default({}) | combine({ansible_architecture: ''}) }}"
- name: Firewall Rules
ansible.builtin.include_role:
name: nfc_firewall
vars:
nfc_firewall_enabled_kubernetes: "{{ nfc_kubernetes.enable_firewall | default(false) | bool }}"
tags:
- never
- install
# fix, reload firewall `iptables-reloader`
- name: Reload iptables
ansible.builtin.command:
cmd: bash -c /usr/bin/iptables-reloader
changed_when: false
tags:
- never
- install
# kubernetes_installed
- name: K3s Install
ansible.builtin.include_tasks:
file: k3s/install.yaml
apply:
tags:
- always
when: >
install_kubernetes | default(true) | bool
and
not kubernetes_installed | default(false) | bool
tags:
- always
- name: K3s Configure
ansible.builtin.include_tasks:
file: k3s/configure.yaml
apply:
tags:
- always
when: >
install_kubernetes | default(true) | bool
and
kubernetes_installed | default(false) | bool
tags:
- always
- name: Kubevert
ansible.builtin.include_tasks:
file: kubevirt/main.yaml
apply:
tags:
- always
when: >
kubernetes_installed | default(false) | bool
and
kubernetes_config.kube_virt.enabled | default(nfc_role_kubernetes_install_kubevirt)
and
inventory_hostname in kubernetes_config.kube_virt.nodes | default([ inventory_hostname ]) | list
tags:
- always
- name: Helm
ansible.builtin.include_tasks:
file: helm/main.yaml
apply:
tags:
- always
when: >
kubernetes_installed | default(false) | bool
and
kubernetes_config.helm.enabled | default(nfc_role_kubernetes_install_helm)
and
nfc_role_kubernetes_master
tags:
- always

View File

@ -0,0 +1,51 @@
# ---
# apiVersion: kyverno.io/v1
# kind: ClusterPolicy
# metadata:
# name: add-networkpolicy
# labels:
# <<: {{ kubernetes_config.defaults.labels.deployment_labels | from_yaml }}
# annotations:
# ansible.kubernetes.io/path: {{ item }}
# policies.kyverno.io/title: Add Network Policy
# policies.kyverno.io/category: Multi-Tenancy, EKS Best Practices
# policies.kyverno.io/subject: NetworkPolicy
# policies.kyverno.io/minversion: 1.6.0
# policies.kyverno.io/description: >-
# By default, Kubernetes allows communications across all Pods within a cluster.
# The NetworkPolicy resource and a CNI plug-in that supports NetworkPolicy must be used to restrict
# communications. A default NetworkPolicy should be configured for each Namespace to
# default deny all ingress and egress traffic to the Pods in the Namespace. Application
# teams can then configure additional NetworkPolicy resources to allow desired traffic
# to application Pods from select sources. This policy will create a new NetworkPolicy resource
# named `default-deny` which will deny all traffic anytime a new Namespace is created.
# spec:
# rules:
# - name: default-deny
# match:
# any:
# - resources:
# kinds:
# - Namespace
# exclude:
# any:
# - resources:
# namespaces:
# - kube-metrics
# - kube-policy
# - kube-system
# - default
# generate:
# apiVersion: networking.k8s.io/v1
# kind: NetworkPolicy
# name: default-deny
# namespace: "{{'{{request.object.metadata.name}}'}}"
# synchronize: true
# data:
# spec:
# # select all pods in the namespace
# podSelector: {}
# # deny all traffic
# policyTypes:
# - Ingress
# - Egress

View File

@ -0,0 +1,60 @@
# ---
# apiVersion: kyverno.io/v1
# kind: ClusterPolicy
# metadata:
# name: add-networkpolicy-dns
# labels:
# <<: {{ kubernetes_config.defaults.labels.deployment_labels | from_yaml }}
# annotations:
# ansible.kubernetes.io/path: {{ item }}
# policies.kyverno.io/title: Add Network Policy for DNS
# policies.kyverno.io/category: Multi-Tenancy, EKS Best Practices
# policies.kyverno.io/subject: NetworkPolicy
# kyverno.io/kyverno-version: 1.6.2
# policies.kyverno.io/minversion: 1.6.0
# kyverno.io/kubernetes-version: "1.23"
# policies.kyverno.io/description: >-
# By default, Kubernetes allows communications across all Pods within a cluster.
# The NetworkPolicy resource and a CNI plug-in that supports NetworkPolicy must be used to restrict
# communications. A default NetworkPolicy should be configured for each Namespace to
# default deny all ingress and egress traffic to the Pods in the Namespace. Application
# teams can then configure additional NetworkPolicy resources to allow desired traffic
# to application Pods from select sources. This policy will create a new NetworkPolicy resource
# named `default-deny` which will deny all traffic anytime a new Namespace is created.
# spec:
# generateExistingOnPolicyUpdate: true
# rules:
# - name: add-netpol-dns
# match:
# any:
# - resources:
# kinds:
# - Namespace
# exclude:
# any:
# - resources:
# namespaces:
# - kube-metrics
# - kube-policy
# - kube-system
# - default
# generate:
# apiVersion: networking.k8s.io/v1
# kind: NetworkPolicy
# name: allow-dns
# namespace: "{{'{{request.object.metadata.name}}'}}"
# synchronize: true
# data:
# spec:
# podSelector:
# matchLabels: {}
# policyTypes:
# - Egress
# egress:
# - to:
# - namespaceSelector:
# matchLabels:
# name: kube-system
# ports:
# - protocol: UDP
# port: 53

View File

@ -0,0 +1,48 @@
---
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: disallow-mutable-tag
labels:
<<: {{ kubernetes_config.defaults.labels.deployment_labels | from_yaml }}
annotations:
ansible.kubernetes.io/path: {{ item }}
policies.kyverno.io/title: Disallow mutable Tag
policies.kyverno.io/category: Best Practices
policies.kyverno.io/minversion: 1.6.0
policies.kyverno.io/severity: medium
policies.kyverno.io/subject: Pod
policies.kyverno.io/description: >-
The ':latest', ':master' and ':dev(elopment)' tags are mutable and can lead to unexpected errors if the
image changes. A best practice is to use an immutable tag that maps to
a specific version of an application Pod. This policy validates that the image
specifies a tag and that it is not called `latest` `master` or`dev(elopment)`.
spec:
#failurePolicy: Fail
validationFailureAction: Audit
background: true
rules:
- name: require-image-tag
match:
any:
- resources:
kinds:
- Pod
validate:
message: "An image tag is required."
pattern:
spec:
containers:
- image: "*:*"
- name: validate-image-tag
match:
any:
- resources:
kinds:
- Pod
validate:
message: "Using a mutable image tag e.g. 'latest', 'master' or 'dev[elopment]' is not allowed."
pattern:
spec:
containers:
- image: "!*:[latest|master|dev|development]"

View File

@ -0,0 +1,52 @@
---
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: disallow-default-namespace
labels:
<<: {{ kubernetes_config.defaults.labels.deployment_labels | from_yaml }}
annotations:
pod-policies.kyverno.io/autogen-controllers: none
policies.kyverno.io/title: Disallow Default Namespace
policies.kyverno.io/minversion: 1.6.0
policies.kyverno.io/category: Multi-Tenancy
policies.kyverno.io/severity: medium
policies.kyverno.io/subject: Pod
policies.kyverno.io/description: >-
Kubernetes Namespaces are an optional feature that provide a way to segment and
isolate cluster resources across multiple applications and users. As a best
practice, workloads should be isolated with Namespaces. Namespaces should be required
and the default (empty) Namespace should not be used. This policy validates that Pods
specify a Namespace name other than `default`. Rule auto-generation is disabled here
due to Pod controllers need to specify the `namespace` field under the top-level `metadata`
object and not at the Pod template level.
spec:
#failurePolicy: Fail
validationFailureAction: Audit
background: true
rules:
- name: validate-namespace
match:
any:
- resources:
kinds:
- Pod
validate:
message: "Using 'default' namespace is not allowed."
pattern:
metadata:
namespace: "!default"
- name: validate-podcontroller-namespace
match:
any:
- resources:
kinds:
- DaemonSet
- Deployment
- Job
- StatefulSet
validate:
message: "Using 'default' namespace is not allowed for pod controllers."
pattern:
metadata:
namespace: "!default"

View File

@ -0,0 +1,48 @@
---
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: spread-pods
labels:
<<: {{ kubernetes_config.defaults.labels.deployment_labels | from_yaml }}
annotations:
policies.kyverno.io/title: Spread Pods Across Nodes
policies.kyverno.io/category: Sample
policies.kyverno.io/subject: Deployment, Pod
policies.kyverno.io/minversion: 1.6.0
policies.kyverno.io/description: >-
Deployments to a Kubernetes cluster with multiple availability zones often need to
distribute those replicas to align with those zones to ensure site-level failures
do not impact availability. This policy matches Deployments with the label
`distributed=required` and mutates them to spread Pods across zones.
spec:
generateExistingOnPolicyUpdate: true
background: true
rules:
- name: spread-pods-across-nodes
# Matches any Deployment with the label `distributed=required`
match:
any:
- resources:
kinds:
- Deployment
- StatefulSet
preconditions:
all:
- key: "{{ '{{ request.object.spec.replicas }}' }}"
operator: GreaterThanOrEquals
value: 2
# Mutates the incoming Deployment.
mutate:
patchStrategicMerge:
spec:
template:
spec:
# Adds the topologySpreadConstraints field if non-existent in the request.
+(topologySpreadConstraints):
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app.kubernetes.io/name: "{% raw %} '{{ request.object.metadata.labels.\"app.kubernetes.io/name\" }}' {% endraw %}"

View File

@ -0,0 +1,38 @@
# apiVersion: networking.k8s.io/v1
# kind: NetworkPolicy
# metadata:
# name: kube-metrics
# namespace: kube-metrics
# labels:
# app.kubernetes.io/name: kube-metrics
# # app.kubernetes.io/instance: { .Release.Name }}
# # app.kubernetes.io/version: { .Chart.Version | quote }}
# # app.kubernetes.io/managed-by: { .Release.Service }}
# app.kubernetes.io/component: loki
# app.kubernetes.io/part-of: metrics
# spec:
# egress:
# - to:
# #- podSelector:
# - namespaceSelector:
# matchLabels:
# kubernetes.io/metadata.name: "default"
# ports:
# - port: 443
# protocol: TCP
# # ingress:
# # - from:
# # #- podSelector:
# # - namespaceSelector:
# # matchLabels:
# # #app.kubernetes.io/name: prometheus
# # #app.kubernetes.io/instance: k8s
# # #app.kubernetes.io/managed-by: prometheus-operator
# # app.kubernetes.io/name: grafana-agent
# # #app.kubernetes.io/part-of: kube-prometheus
# # #app: grafana
# policyTypes:
# - Egress
# #- Ingress

View File

@ -0,0 +1,6 @@
---
apiVersion: operator.tigera.io/v1
kind: APIServer
metadata:
name: default
spec: {}

View File

@ -0,0 +1,11 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kubernetes-services-endpoint
namespace: tigera-operator
data:
KUBERNETES_SERVICE_HOST: "
{%- set octet = kubernetes_config.cluster.networking.ServiceSubnet | default(nfc_role_kubernetes_service_subnet) | split('.') -%}
{{- octet[0] }}.{{- octet[1] }}.{{- octet[2] }}.1"
KUBERNETES_SERVICE_PORT: '443'

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,17 @@
---
apiVersion: crd.projectcalico.org/v1
kind: FelixConfiguration
metadata:
name: default
spec:
# bpfConnectTimeLoadBalancing: TCP
# bpfExternalServiceMode: DSR
# bpfHostNetworkedNATWithoutCTLB: Enabled
bpfLogLevel: ""
floatingIPs: Disabled
healthPort: 9099
logSeverityScreen: Info
reportingInterval: 0s
wireguardEnabled: true
wireguardEnabledV6: true

View File

@ -0,0 +1,16 @@
---
apiVersion: crd.projectcalico.org/v1
kind: IPPool
metadata:
name: default-ipv4-ippool
spec:
allowedUses:
- Workload
- Tunnel
blockSize: 26
cidr: {{ kubernetes_config.cluster.networking.podSubnet | default(nfc_role_kubernetes_pod_subnet) }}
ipipMode: Never
natOutgoing: true
nodeSelector: all()
vxlanMode: Always

View File

@ -0,0 +1,53 @@
---
apiVersion: operator.tigera.io/v1
kind: Installation
metadata:
name: default
spec:
calicoNetwork:
bgp: Disabled
containerIPForwarding: Enabled
hostPorts: Enabled
ipPools:
- blockSize: 26
cidr: {{ kubernetes_config.cluster.networking.podSubnet | default(nfc_role_kubernetes_pod_subnet) }}
disableBGPExport: false
encapsulation: VXLAN
natOutgoing: Enabled
nodeSelector: all()
# linuxDataplane: Iptables
linuxDataplane: BPF
mtu: 0
multiInterfaceMode: None
nodeAddressAutodetectionV4:
kubernetes: NodeInternalIP
cni:
ipam:
type: Calico
type: Calico
componentResources:
- componentName: Node
resourceRequirements:
requests:
cpu: 250m
controlPlaneReplicas: 3
flexVolumePath: None
kubeletVolumePluginPath: None
nodeUpdateStrategy:
rollingUpdate:
maxSurge: 0
maxUnavailable: 1
type: RollingUpdate
nonPrivileged: Disabled
serviceCIDRs:
- {{ kubernetes_config.cluster.networking.ServiceSubnet | default(nfc_role_kubernetes_service_subnet) }}
typhaDeployment:
spec:
template:
spec:
tolerations:
- effect: NoExecute
key: CriticalAddonsOnly
value: "true"
variant: Calico

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,20 @@
{
"cniVersion": "0.3.1",
"name": "crio",
"type": "bridge",
"bridge": "cni0",
"isGateway": true,
"ipMasq": true,
"hairpinMode": true,
"ipam": {
"type": "host-local",
"routes": [
{ "dst": "0.0.0.0/0" },
{ "dst": "1100:200::1/24" }
],
"ranges": [
[{ "subnet": "{{ KubernetesPodSubnet }}" }],
[{ "subnet": "1100:200::/24" }]
]
}
}

View File

@ -0,0 +1,315 @@
#
# IP Tables Firewall Rules for Kubernetes
#
# Managed By ansible/role/nfc_kubernetes
#
# Dont edit this file directly as it will be overwritten. To grant a host API access
# edit the cluster config, adding the hostname/ip to path kubernetes_config.cluster.access
#
# This file is periodicly called by cron
#
{% set data = namespace(firewall_rules=[]) -%}
{%- if ansible_host is regex('^[a-z]') and ':' not in ansible_host -%} {#- Convert DNs name to IP Address -#}
{%- if ipv6 | default(false) -%}
{%- set ansible_host = query('community.dns.lookup', ansible_host + '.', type='AAAA' ) -%}
{%- else -%}
{%- set ansible_host = query('community.dns.lookup', ansible_host + '.', type='A' ) -%}
{%- endif -%}
{%- if ansible_host | list | length > 0 -%} {#- Convert dns lookup to list, and select the first item -#}
{%- set ansible_host = ansible_host | from_yaml_all | list -%}
{%- set ansible_host = ansible_host[0] -%}
{%- endif -%}
{%- endif -%}
{%- for kubernetes_host in groups[kubernetes_config.cluster.group_name | default('me_is_optional')] | default([]) -%}
{%- set kubernetes_host = hostvars[kubernetes_host].ansible_host -%}
{%- if kubernetes_host is regex('^[a-z]') and ':' not in kubernetes_host -%} {#- Convert DNs name to IP Address -#}
{%- if ipv6 | default(false) -%}
{%- set kubernetes_host = query('community.dns.lookup', kubernetes_host + '.', type='AAAA' ) -%}
{%- else -%}
{%- set kubernetes_host = query('community.dns.lookup', kubernetes_host + '.', type='A' ) -%}
{%- endif -%}
{%- if
kubernetes_host is iterable
and
kubernetes_host is not string
-%} {#- Convert dns lookup to list, and select the first item -#}
{%- set kubernetes_host = kubernetes_host | from_yaml_all | list -%}
{%- set kubernetes_host = kubernetes_host[0] | default('') -%}
{%- endif -%}
{%- endif -%}
{%- if kubernetes_host != '' -%}
{%- for master_host in groups['kubernetes_master'] -%}
{%- if master_host in groups[kubernetes_config.cluster.group_name | default('me_is_optional')] | default([]) -%}
{%- set master_host = hostvars[master_host].ansible_host -%}
{%- if master_host is regex('^[a-z]') and ':' not in master_host -%} {#- Convert DNs name to IP Address -#}
{%- if ipv6 | default(false) -%}
{%- set master_host = query('community.dns.lookup', master_host + '.', type='AAAA' ) -%}
{%- else -%}
{%- set master_host = query('community.dns.lookup', master_host + '.', type='A' ) -%}
{%- endif -%}
{%- if master_host | list | length > 0 -%} {#- Convert dns lookup to list, and select the first item -#}
{%- set master_host = master_host | from_yaml_all | list -%}
{%- set master_host = master_host[0] -%}
{%- endif -%}
{%- endif -%}
{%- if nfc_role_kubernetes_master | default(false) | bool -%}
{%- if
master_host == kubernetes_host
and
master_host != ansible_host
and
(
(
ipv6 | default(false)
and
':' in master_host
)
or
(
not ipv6 | default(false)
and
'.' in master_host
)
)
-%}
{#- master hosts only -#}
{%- set data.firewall_rules = data.firewall_rules + ['-I kubernetes-embedded-etcd -s ' + master_host + ' -j ACCEPT'] -%}
{# {%- set data.firewall_rules = data.firewall_rules + ['-I INPUT -s ' + master_host + ' -p tcp -m multiport --dports 2380 -j ACCEPT'] -%} #}
{%- if '-I kubernetes-api -s ' + master_host + ' -j ACCEPT' not in data.firewall_rules -%}
{%- set data.firewall_rules = data.firewall_rules + ['-I kubernetes-api -s ' + master_host + ' -j ACCEPT'] -%}
{%- endif -%}
{%- endif -%}
{%- endif -%}
{%- endif -%}
{%- endfor -%}
{%- if
ansible_host != kubernetes_host
and
(
(
ipv6 | default(false)
and
':' in kubernetes_host
)
or
(
not ipv6 | default(false)
and
'.' in kubernetes_host
)
)
-%}
{#- All cluster Hosts -#}
{%- if nfc_role_kubernetes_master | default(false) | bool -%}
{%- set data.firewall_rules = data.firewall_rules + ['-I kubernetes-api -s ' + kubernetes_host + ' -j ACCEPT'] -%}
{%- endif -%}
{%- set data.firewall_rules = data.firewall_rules + ['-I kubernetes-flannel-vxlan -s ' + kubernetes_host + ' -j ACCEPT'] -%}
{%- set data.firewall_rules = data.firewall_rules + ['-I kubernetes-kubelet-metrics -s ' + kubernetes_host + ' -j ACCEPT'] -%}
{%- set data.firewall_rules = data.firewall_rules + ['-I kubernetes-flannel-wg-four -s ' + kubernetes_host + ' -j ACCEPT'] -%}
{%- set data.firewall_rules = data.firewall_rules + ['-I kubernetes-flannel-wg-six -s ' + kubernetes_host + ' -j ACCEPT'] -%}
{%- set data.firewall_rules = data.firewall_rules + ['-I kubernetes-calico-bgp -s ' + kubernetes_host + ' -j ACCEPT'] -%}
{%- set data.firewall_rules = data.firewall_rules + ['-I kubernetes-calico-typha -s ' + kubernetes_host + ' -j ACCEPT'] -%}
{%- if nfc_kubernetes_enable_metallb | default(false) -%}
{%- set data.firewall_rules = data.firewall_rules + ['-I metallb-l2-tcp -s ' + kubernetes_host + ' -j ACCEPT'] -%}
{%- set data.firewall_rules = data.firewall_rules + ['-I metallb-l2-udp -s ' + kubernetes_host + ' -j ACCEPT'] -%}
{%- endif -%}
{%- endif -%}
{%- endif -%}
{%- endfor -%}
{%- if nfc_role_kubernetes_master | default(false) | bool -%}
{%- if host_external_ip is defined -%}
{%- set data.firewall_rules = data.firewall_rules + ['-I kubernetes-api -s ' + host_external_ip + ' -m comment --comment "hosts configured external IP" -j ACCEPT'] -%}
{%- endif -%}
{%- for api_client in kubernetes_config.cluster.access | default([]) -%}
{%- if api_client is regex('^[a-z]') and ':' not in api_client -%} {#- Convert DNs name to IP Address -#}
{%- set api_client_dns_name = api_client -%}
{%- if ipv6 | default(false) -%}
{%- set api_client = query('community.dns.lookup', api_client + '.', type='AAAA' ) -%}
{%- else -%}
{%- set api_client = query('community.dns.lookup', api_client + '.', type='A' ) -%}
{%- endif -%}
{%- if api_client | list | length > 0 -%} {#- Convert dns lookup to list, and select the first item -#}
{%- set api_client = api_client | from_yaml_all | list -%}
{%- set api_client = api_client[0] -%}
{%- endif -%}
{%- endif -%}
{%- if
api_client != ansible_host
and
(
(
ipv6 | default(false)
and
':' in api_client
)
or
(
not ipv6 | default(false)
and
'.' in api_client
)
)
-%}
{#- Hosts allowed to access API -#}
{%- set data.firewall_rules = data.firewall_rules + ['-I kubernetes-api -s ' + api_client + ' -m comment --comment "host: ' + api_client_dns_name | default(api_client) + '" -j ACCEPT'] -%}
{%- endif -%}
{%- endfor %}
{%- endif %}
*filter
{# -N kubernetes-embedded-etcd
-A kubernetes-embedded-etcd -j RETURN
-A INPUT -p tcp -m multiport --dports 2379,2380 -m comment --comment "etcd. Servers only" -j kubernetes-embedded-etcd
-N kubernetes-api
-A kubernetes-api -j RETURN
-A INPUT -p tcp --dport 6443 -m comment --comment "Kubernetes API access. All Cluster hosts and end users" -j kubernetes-api
-N kubernetes-flannel-vxlan
-A kubernetes-flannel-vxlan -j RETURN
-A INPUT -p udp --dport 8472 -m comment --comment "Flannel. All cluster hosts" -j kubernetes-flannel-vxlan
-N kubernetes-kubelet-metrics
-A kubernetes-kubelet-metrics -j RETURN
-A INPUT -p tcp --dport 10250 -m comment --comment "Kubernetes Metrics. All cluster hosts" -j kubernetes-kubelet-metrics
-N kubernetes-flannel-wg-four
-A kubernetes-flannel-wg-four -j RETURN
-A INPUT -p udp --dport 51820 -m comment --comment "Flannel Wiregaurd IPv4. All cluster hosts" -j kubernetes-flannel-wg-four
-N kubernetes-flannel-wg-six
-A kubernetes-flannel-wg-six -j RETURN
-A INPUT -p udp --dport 51821 -m comment --comment "Flannel Wiregaurd IPv6. All cluster hosts" -j kubernetes-flannel-wg-six #}
{% if data.firewall_rules | length | int > 0 -%}
{% for rule in data.firewall_rules -%}
{{ rule }}
{% endfor -%}
{% endif -%}
{#- #-I INPUT -s 192.168.1.0/24 -p tcp -m multiport --dports 6443 -j ACCEPT
#-I INPUT -s 192.168.1.0/24 -p tcp -m multiport --dports 179 -j ACCEPT
#-I INPUT -s 192.168.1.0/24 -p tcp -m multiport --dports 10250 -j ACCEPT
#-I INPUT -s 192.168.1.0/24 -p udp -m multiport --dports 4789 -j ACCEPT
#-I INPUT -s 192.168.1.0/24 -p tcp -m multiport --dports 2379 -j ACCEPT
#-I INPUT -s 192.168.1.0/24 -p tcp -m multiport --dports 2380 -j ACCEPT
-I INPUT -p tcp -m multiport --dports 6443 -j ACCEPT
-I INPUT -p tcp -m multiport --dports 179 -j ACCEPT
-I INPUT -p tcp -m multiport --dports 10250 -j ACCEPT
-I INPUT -p udp -m multiport --dports 4789 -j ACCEPT
-I INPUT -p tcp -m multiport --dports 2379 -j ACCEPT
-I INPUT -p tcp -m multiport --dports 2380 -j ACCEPT #}
COMMIT
{# iptables -I kubernetes-api -s nww-au1.networkedweb.com -j ACCEPT #}

View File

@ -0,0 +1,244 @@
#
# K3s Configuration for running Kubernetes
#
# Managed By ansible/role/nfc_kubernetes
#
# Dont edit this file directly as it will be overwritten.
#
{%- if
inventory_hostname in groups['kubernetes_master']
or
kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname
-%}
{%
set kube_apiserver_arg = [
"audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log",
"audit-log-maxage=" + kube_apiserver_arg_audit_log_maxage | string,
"audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml",
]
-%}
{%
set servers_config = {
"cluster-cidr": nfc_role_kubernetes_pod_subnet,
"disable": [
"traefik"
],
"disable-network-policy": true,
"flannel-backend": "none",
"service-cidr": nfc_role_kubernetes_service_subnet
}
-%}
{%- if nfc_role_kubernetes_etcd_enabled -%}
{%- set servers_config = servers_config | combine({
"etcd-snapshot-retention": kubernetes_etcd_snapshot_retention | int,
"etcd-snapshot-schedule-cron": kubernetes_etcd_snapshot_cron_schedule | string,
}) -%}
{%- endif -%}
{%- if
kubernetes_config.cluster.domain_name | default(nfc_role_kubernetes_cluster_domain) is defined
and
kubernetes_config.cluster.domain_name | default(nfc_role_kubernetes_cluster_domain) != ''
-%}
{%- set servers_config = servers_config | combine({
"cluster-domain": kubernetes_config.cluster.domain_name | default(nfc_role_kubernetes_cluster_domain)
}) -%}
{%- endif -%}
{%- if kubernetes_config.cluster.oidc.enabled | default(nfc_role_kubernetes_oidc_enabled) | default(false) | bool -%}
{%-
set kube_apiserver_arg = kube_apiserver_arg + [
"oidc-client-id=" + kubernetes_config.cluster.oidc.client_id,
"oidc-groups-claim=" + kubernetes_config.cluster.oidc.groups_claim,
"oidc-issuer-url=" + kubernetes_config.cluster.oidc.issuer_url,
"oidc-username-claim=" + kubernetes_config.cluster.oidc.username_claim
] -%}
{%- if kubernetes_config.cluster.oidc.oidc_username_prefix | default('') != '' -%}
{%- set kube_apiserver_arg = kube_apiserver_arg + [
"oidc-username-prefix=" + kubernetes_config.cluster.oidc.oidc_username_prefix
] -%}
{%- endif -%}
{%- if kubernetes_config.cluster.oidc.groups_prefix | default('') != '' -%}
{%- set kube_apiserver_arg = kube_apiserver_arg + [
"oidc-groups-prefix=" + kubernetes_config.cluster.oidc.groups_prefix
]
-%}
{%- endif -%}
{%- endif -%}
{%- if (
nfc_kubernetes_enable_metallb | default(false)
or
not nfc_kubernetes_enable_servicelb | default(false)
) -%}
{%- set disable = servers_config.disable + [ "servicelb" ] -%}
{%
set servers_config = servers_config | combine({
"disable": disable
})
-%}
{%- endif -%}
{%- if (
not nfc_kubernetes_enable_metallb | default(false)
and
nfc_kubernetes_enable_servicelb | default(false)
) -%}
{%- set servers_config = servers_config | combine({
"servicelb-namespace": kubernetes_config.cluster.networking.service_load_balancer_namespace | default('kube-system')
}) -%}
{%- endif -%}
{# Combine Remaining Server Objects #}
{%
set servers_config = servers_config | combine({
"kube-apiserver-arg": kube_apiserver_arg
})
-%}
{%- endif -%}
{# Eof Server Nodes #}
{# SoF All Nodes #}
{%
set all_nodes_config = {
"kubelet-arg": [
"system-reserved=cpu=" + kubelet_arg_system_reserved_cpu + ",memory=" + kubelet_arg_system_reserved_memory +
",ephemeral-storage=" + kubelet_arg_system_reserved_storage
],
"node-name": inventory_hostname,
}
-%}
{%- if groups[kubernetes_config.cluster.group_name | default('make_me_optional')] | default([]) | list | length > 0 -%}
{%- if k3s_installed.rc == 0 -%}
{%- set ns = namespace(server=[]) -%}
{%- for cluster_node in groups[kubernetes_config.cluster.group_name] -%}
{%- if cluster_node in groups['kubernetes_master'] -%}
{%- if hostvars[cluster_node].host_external_ip is defined -%}
{%- if
hostvars[cluster_node].host_external_ip != ansible_default_ipv4.address
and
cluster_node == inventory_hostname
-%} {# Server self, use internal ip if external ip exists #}
{%- set server_node = ansible_default_ipv4.address -%}
{%- else -%}
{%- set server_node = hostvars[cluster_node].host_external_ip -%}
{%- endif -%}
{%- else -%}
{%- set server_node = hostvars[cluster_node].ansible_host -%}
{%- endif -%}
{%- set ns.server = (ns.server | default([])) + [
"https://" + server_node + ":6443"
] -%}
{%- endif -%}
{%- endfor -%}
{%- set all_nodes_config = all_nodes_config | combine({
"server": ns.server,
}) -%}
{%- elif
kubernetes_config.cluster.prime.name != inventory_hostname
and
k3s_installed.rc == 1
-%}
{%- set server = (server | default([])) + [
"https://" + hostvars[kubernetes_config.cluster.prime.name].ansible_host + ":6443"
] -%}
{%- set all_nodes_config = all_nodes_config | combine({
"server": server,
}) -%}
{%- endif -%}
{%- endif -%}
{%- if
host_external_ip is defined
and
ansible_default_ipv4.address != host_external_ip
-%}
{%- set all_nodes_config = all_nodes_config | combine({
"node-external-ip": host_external_ip
}) -%}
{%- else -%}
{%- set all_nodes_config = all_nodes_config | combine({
"node-ip": ansible_default_ipv4.address
}) -%}
{%- endif -%}
{# EoF All Nodes #}
{%- if
inventory_hostname in groups['kubernetes_master']
or
kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname
-%}
{%- set servers_config = servers_config | combine( all_nodes_config ) -%}
{{ servers_config | to_nice_yaml(indent=2) }}
{%- else -%}
{{ all_nodes_config | to_nice_yaml(indent=2) }}
{%- endif -%}

View File

@ -0,0 +1,19 @@
#
# Private Container Registries for Kubernetes
#
# Managed By ansible/role/nfc_kubernetes
#
# Dont edit this file directly as it will be overwritten.
#
{% set registries = kubernetes_private_container_registry | default([]) -%}
{% if registries | length > 0 %}mirrors:
{% for entry in registries %}
{{ entry.name }}:
endpoint:
- "{{ entry.url }}"
{%- endfor %}
{% endif %}

View File

@ -0,0 +1,294 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
authorization/description: >-
provide full access to everything.
Using this Cluster role should be avoided with additional cluster roles
created to meet the additional authorization requirements.
authorization/target: cluster, namespace
labels:
app.kubernetes.io/part-of: nfc_kubernetes
app.kubernetes.io/managed-by: ansible
app.kubernetes.io/version: ''
name: authorization:full
rules:
- apiGroups:
- "*"
resources:
- "*"
verbs:
- "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
authorization/description: |-
Provide Access for reading ALL non-secret items, this includes reading pod and node metrics.
This role is designed for users who require access to audit/view/diagnose at either the
cluster level `ClusterRoleBinding` or namespace level `RoleBinding`
authorization/target: namespace
labels:
app.kubernetes.io/part-of: nfc_kubernetes
app.kubernetes.io/managed-by: ansible
app.kubernetes.io/version: ''
name: authorization:namespace:read
rules:
- apiGroups: # Get Metrics
- metrics.k8s.io
resources:
- pods
- nodes
verbs:
- get
- list
- watch
- apiGroups: # Read-only access to resrouces
- "*"
resources:
- awx
- cronjobs
- daemonset
- deployments
- helmcharts
- helmchartconfigs
- ingress
- jobs
- namespaces
- pods
- pv
- pvc
- serviceaccount
- services
- statefuleset
- storageclasses
- configmap
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
authorization/description: |-
Provide access for reading ALL items.
This role is designed for users who own and is designed to be
bound to a namespace using a `RoleBinding`
authorization/target: namespace
labels:
app.kubernetes.io/part-of: nfc_kubernetes
app.kubernetes.io/managed-by: ansible
app.kubernetes.io/version: ''
name: authorization:namespace:owner
rules:
- apiGroups: # Read-Write access to resrouces
- "*"
resources:
- cronjobs
- daemonset
- deployments
- helmcharts
- helmchartconfigs
- jobs
- pods
- pvc
- roles
- rolebindings
- secrets
- serviceaccount
- services
- statefuleset
- configmap
verbs:
- create
- get
- list
- watch
- delete
- apiGroups: # Read-Remove access
- "*"
resources:
- ingress
verbs:
- get
- list
- watch
- delete
- apiGroups: # Read access
- "*"
resources:
- storageclasses
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
authorization/description: |-
Provide access for adding/editing/removing Ingress'.
This role is designed for a user who is responsible for the
cluster ingress.
authorization/target: namespace
name: authorization:cluster:ingress-admin
rules:
- apiGroups:
- "*"
resources:
- pods
- nodes
verbs:
- create
- get
- list
- watch
- delete
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: authorization:cluster:view-metrics
rules:
- apiGroups:
- metrics.k8s.io
- "" # Without this metrics don't work. this also grants access to view nodes
resources:
- pods
- nodes
verbs:
- get
- list
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: authorization:read
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: authorization:namespace:read
subjects:
- kind: Group
name: administrators
- kind: Group
name: technician
- kind: Group
name: NodeRED
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: authorization:view-metrics
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: authorization:cluster:view-metrics
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: Group
name: administrators
- kind: Group
name: technician
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: authorization:ingress-admin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: authorization:cluster:ingress-admin
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: Group
name: administrators
# ---
# kind: ClusterRoleBinding
# apiVersion: rbac.authorization.k8s.io/v1
# metadata:
# name: authorization:full
# roleRef:
# apiGroup: rbac.authorization.k8s.io
# kind: ClusterRole
# name: authorization:full
# subjects:
# - kind: Group
# name: administrators
# - kind: Group
# name: technician
###################################################################################################################
# Namespace role binding
# ---
# apiVersion: rbac.authorization.k8s.io/v1
# kind: RoleBinding
# metadata:
# # labels:
# name: authorization:full
# namespace: development
# roleRef:
# apiGroup: rbac.authorization.k8s.io
# kind: Role
# name: authorization:full
# subjects:
# - kind: Group
# name: administrators
# namespace: development
# - kind: Group
# name: technician
# - kind: Group
# name: NodeRED
# ---
# - apiVersion: rbac.authorization.k8s.io/v1
# kind: Role
# metadata:
# labels:
# app.kubernetes.io/description: |-
# provide full access to the testing namespace
# name: authorization:full
# namespace: development
# rules:
# - apiGroups:
# - ""
# resources:
# - ""
# verbs:
# - add
# - delete
# - edit
# - get
# - list
# - watch

View File

@ -0,0 +1,16 @@
---
apiVersion: kubevirt.io/v1
kind: KubeVirt
metadata:
name: kubevirt
namespace: kubevirt
spec:
certificateRotateStrategy: {}
configuration:
developerConfiguration:
featureGates: []
customizeComponents: {}
imagePullPolicy: IfNotPresent
workloadUpdateStrategy:
workloadUpdateMethods:
- LiveMigrate

File diff suppressed because it is too large Load Diff