diff --git a/defaults/main.yml b/defaults/main.yml index 0ee3fbe..e31d808 100644 --- a/defaults/main.yml +++ b/defaults/main.yml @@ -16,16 +16,29 @@ nfc_kubernetes_tigera_operator_tag: v1.32.3 # Calico v3.27.0 nfc_kubernetes_enable_metallb: false nfc_kubernetes_enable_servicelb: false + + +nfc_role_kubernetes_cluster_domain: cluster.local + +nfc_role_kubernetes_etcd_enabled: false + nfc_role_kubernetes_install_olm: false +nfc_role_kubernetes_oidc_enabled: false + +nfc_role_kubernetes_pod_subnet: 172.16.248.0/21 +nfc_role_kubernetes_service_subnet: 172.16.244.0/22 + +nfc_role_kubernetes_prime: true +nfc_role_kubernetes_master: true +nfc_role_kubernetes_worker: false ############################################################################################################ # # Old Vars requiring refactoring # # ############################################################################################################ -# KubernetesPodSubnet: 10.85.0.0/16 -# KubernetesServiceSubnet: 10.86.0.0/16 + Kubernetes_Prime: false # Optional, Boolean. Is the current host the Prime master? @@ -74,7 +87,7 @@ k3s: kind: Policy rules: - level: Request - when: "{{ Kubernetes_Master | default(false) }}" + when: "{{ nfc_role_kubernetes_master }}" - name: 90-kubelet.conf path: /etc/sysctl.d @@ -106,7 +119,7 @@ k3s: # usernames: [] # runtimeClasses: [] # namespaces: [kube-system] - when: "{{ kubernetes_config.cluster.prime.name == inventory_hostname }}" + when: "{{ kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname }}" ############################################################################################# diff --git a/tasks/k3s/configure.yaml b/tasks/k3s/configure.yaml index f6d1f58..0c933e7 100644 --- a/tasks/k3s/configure.yaml +++ b/tasks/k3s/configure.yaml @@ -28,7 +28,7 @@ - src: kubernetes-manifest-rbac.yaml.j2 dest: /var/lib/rancher/k3s/server/manifests/rbac-authorization-common.yaml - when: "{{ kubernetes_config.cluster.prime.name == inventory_hostname }}" + when: "{{ kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname }}" - src: iptables-kubernetes.rules.j2 dest: "/etc/iptables.rules.d/iptables-kubernetes.rules" diff --git a/tasks/k3s/install.yaml b/tasks/k3s/install.yaml index 72a5dc3..25da808 100644 --- a/tasks/k3s/install.yaml +++ b/tasks/k3s/install.yaml @@ -3,14 +3,12 @@ - name: Check for calico deployment manifest ansible.builtin.stat: name: /var/lib/rancher/k3s/server/manifests/calico.yaml - become: true register: file_calico_yaml_metadata - name: Check for calico Operator deployment manifest ansible.builtin.stat: name: /var/lib/rancher/k3s/ansible/deployment-manifest-calico_operator.yaml - become: true register: file_calico_operator_yaml_metadata @@ -113,7 +111,6 @@ - name: Check for Network Manager Directory ansible.builtin.stat: name: /etc/NetworkManager/conf.d - become: true register: directory_network_manager_metadata @@ -133,7 +130,6 @@ mode: '770' owner: root group: root - become: true diff: true when: directory_network_manager_metadata.stat.exists @@ -159,7 +155,7 @@ failed_when: false register: k3s_installed when: > - not Kubernetes_Master | default(false) | bool + not Kubernetes_worker | default(false) | bool - name: Check Machine Architecture @@ -185,7 +181,7 @@ when: > ansible_os_family == 'Debian' and - {{ item.when | default(true) | bool }} + item.when | default(true) | bool loop: "{{ download_files }}" vars: ansible_connection: local @@ -246,7 +242,7 @@ ansible.builtin.copy: src: "/tmp/k3s.{{ ansible_architecture }}" dest: "/usr/local/bin/k3s" - mode: '740' + mode: '741' owner: root group: root when: hash_sha256_k3s_existing_binary.stat.checksum | default('0') != hash_sha256_k3s_downloaded_binary @@ -254,8 +250,8 @@ - name: Copy install scripts to Host ansible.builtin.copy: - src: "{{ item }}" - dest: "{{ item }}" + src: "{{ item.path }}" + dest: "{{ item.path }}" mode: '755' owner: root group: root @@ -267,7 +263,7 @@ - path: "/tmp/install_olm.sh" when: "{{ nfc_role_kubernetes_install_olm }}" when: > - {{ item.when | default(true) | bool }} + item.when | default(true) | bool - name: Required Initial config files @@ -305,7 +301,7 @@ dest: /var/lib/rancher/k3s/server/manifests/calico.yaml when: > {{ - kubernetes_config.cluster.prime.name == inventory_hostname + kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname and ( ( @@ -350,7 +346,7 @@ /tmp/install.sh --cluster-init changed_when: false when: > - kubernetes_config.cluster.prime.name == inventory_hostname + kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname and k3s_installed.rc == 1 @@ -374,7 +370,7 @@ and 'calico_manifest' not in ansible_run_tags and - kubernetes_config.cluster.prime.name == inventory_hostname + kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname - name: Install MetalLB Operator @@ -393,7 +389,7 @@ when: >- nfc_kubernetes_enable_metallb | default(false) | bool and - kubernetes_config.cluster.prime.name == inventory_hostname + kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname - name: Wait for kubernetes prime to be ready @@ -407,7 +403,7 @@ exit 127; fi executable: /bin/bash - delegate_to: "{{ kubernetes_config.cluster.prime.name }}" + delegate_to: "{{ kubernetes_config.cluster.prime.name | default(inventory_hostname) }}" run_once: true register: kubernetes_ready_check retries: 30 @@ -431,7 +427,7 @@ install_olm.rc == 1 register: install_olm when: > - kubernetes_config.cluster.prime.name == inventory_hostname + kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname and nfc_role_kubernetes_install_olm | default(false) | bool @@ -459,7 +455,7 @@ failed_when: false register: install_olm when: > - kubernetes_config.cluster.prime.name == inventory_hostname + kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname and 'olm_uninstall' in ansible_run_tags @@ -470,7 +466,7 @@ changed_when: false failed_when: false # New cluster will fail when: > - kubernetes_config.cluster.prime.name == inventory_hostname + kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname and kubernetes_config.cluster.networking.encrypt | default(false) | bool and @@ -488,7 +484,7 @@ - name: Fetch Join Token ansible.builtin.slurp: src: /var/lib/rancher/k3s/server/token - delegate_to: "{{ kubernetes_config.cluster.prime.name }}" + delegate_to: "{{ kubernetes_config.cluster.prime.name | default(inventory_hostname) }}" run_once: true register: k3s_join_token no_log: true # Value is sensitive @@ -497,7 +493,7 @@ - name: Create Token fact ansible.builtin.set_fact: k3s_join_token: "{{ k3s_join_token.content | b64decode | replace('\n', '') }}" - delegate_to: "{{ kubernetes_config.cluster.prime.name }}" + delegate_to: "{{ kubernetes_config.cluster.prime.name | default(inventory_hostname) }}" run_once: true no_log: true # Value is sensitive @@ -515,7 +511,7 @@ when: > Kubernetes_Master | default(false) | bool and - not kubernetes_config.cluster.prime.name == inventory_hostname + not kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname and k3s_installed.rc == 1 @@ -528,13 +524,15 @@ INSTALL_K3S_SKIP_DOWNLOAD=true \ INSTALL_K3S_VERSION="v{{ KubernetesVersion }}{{ KubernetesVersion_k3s_prefix }}" \ K3S_TOKEN="{{ k3s_join_token }}" \ - K3S_URL="https://{{ hostvars[kubernetes_config.cluster.prime.name].ansible_host }}:6443" \ + K3S_URL="https://{{ hostvars[kubernetes_config.cluster.prime.name | default(inventory_hostname)].ansible_host }}:6443" \ /tmp/install.sh - executable: /bin/bash changed_when: false when: > not Kubernetes_Master | default(false) | bool and + not kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname + and k3s_installed.rc == 1 diff --git a/tasks/k3s/migrate_to_operator.yaml b/tasks/k3s/migrate_to_operator.yaml index 9f49754..977b9fc 100644 --- a/tasks/k3s/migrate_to_operator.yaml +++ b/tasks/k3s/migrate_to_operator.yaml @@ -150,10 +150,10 @@ projectcalico.org/operator-node-migration- executable: bash become: true - delegate_to: "{{ kubernetes_config.cluster.prime.name }}" + delegate_to: "{{ kubernetes_config.cluster.prime.name | default(inventory_hostname) }}" changed_when: false failed_when: false - loop: "{{ groups[kubernetes_config.cluster.group_name] }}" + loop: "{{ groups[kubernetes_config.cluster.group_name | default('make_me_optional')] | default([]) }}" # kubectl label node ip-10-229-92-202.eu-west-1.compute.internal projectcalico.org/operator-node-migration- # migration started diff --git a/templates/ConfigMap-manifest-Calico_Service_Endpoint.yaml.j2 b/templates/ConfigMap-manifest-Calico_Service_Endpoint.yaml.j2 index 0572639..58225b7 100644 --- a/templates/ConfigMap-manifest-Calico_Service_Endpoint.yaml.j2 +++ b/templates/ConfigMap-manifest-Calico_Service_Endpoint.yaml.j2 @@ -6,6 +6,6 @@ metadata: namespace: tigera-operator data: KUBERNETES_SERVICE_HOST: " - {%- set octet = kubernetes_config.cluster.networking.ServiceSubnet | split('.') -%} + {%- set octet = kubernetes_config.cluster.networking.ServiceSubnet | default(nfc_role_kubernetes_service_subnet) | split('.') -%} {{- octet[0] }}.{{- octet[1] }}.{{- octet[2] }}.1" KUBERNETES_SERVICE_PORT: '443' diff --git a/templates/IPPool-manifest-Calico_Cluster.yaml.j2 b/templates/IPPool-manifest-Calico_Cluster.yaml.j2 index c2e9b00..a86a343 100644 --- a/templates/IPPool-manifest-Calico_Cluster.yaml.j2 +++ b/templates/IPPool-manifest-Calico_Cluster.yaml.j2 @@ -9,7 +9,7 @@ spec: - Workload - Tunnel blockSize: 26 - cidr: {{ kubernetes_config.cluster.networking.podSubnet }} + cidr: {{ kubernetes_config.cluster.networking.podSubnet | default(nfc_role_kubernetes_pod_subnet) }} ipipMode: Never natOutgoing: true nodeSelector: all() diff --git a/templates/Installation-manifest-Calico_Cluster.yaml.j2 b/templates/Installation-manifest-Calico_Cluster.yaml.j2 index 55547dd..aac7b9e 100644 --- a/templates/Installation-manifest-Calico_Cluster.yaml.j2 +++ b/templates/Installation-manifest-Calico_Cluster.yaml.j2 @@ -11,7 +11,7 @@ spec: hostPorts: Enabled ipPools: - blockSize: 26 - cidr: {{ kubernetes_config.cluster.networking.podSubnet }} + cidr: {{ kubernetes_config.cluster.networking.podSubnet | default(nfc_role_kubernetes_pod_subnet) }} disableBGPExport: false encapsulation: VXLAN natOutgoing: Enabled @@ -41,7 +41,7 @@ spec: type: RollingUpdate nonPrivileged: Disabled serviceCIDRs: - - {{ kubernetes_config.cluster.networking.ServiceSubnet }} + - {{ kubernetes_config.cluster.networking.ServiceSubnet | default(nfc_role_kubernetes_service_subnet) }} typhaDeployment: spec: template: diff --git a/templates/calico.yaml.j2 b/templates/calico.yaml.j2 index 85cae09..529696c 100644 --- a/templates/calico.yaml.j2 +++ b/templates/calico.yaml.j2 @@ -4810,7 +4810,7 @@ spec: # chosen from this range. Changing this value after installation will have # no effect. This should fall within `--cluster-cidr`. - name: CALICO_IPV4POOL_CIDR - value: "{{ KubernetesPodSubnet }}" + value: "{{ nfc_role_kubernetes_pod_subnet }}" # Disable file logging so `kubectl logs` works. - name: CALICO_DISABLE_FILE_LOGGING value: "true" diff --git a/templates/iptables-kubernetes.rules.j2 b/templates/iptables-kubernetes.rules.j2 index 35912ed..3fa9f60 100644 --- a/templates/iptables-kubernetes.rules.j2 +++ b/templates/iptables-kubernetes.rules.j2 @@ -31,7 +31,7 @@ {%- endif -%} -{%- for kubernetes_host in groups[kubernetes_config.cluster.group_name] -%} +{%- for kubernetes_host in groups[kubernetes_config.cluster.group_name | default('me_is_optional')] | default([]) -%} {%- set kubernetes_host = hostvars[kubernetes_host].ansible_host -%} @@ -63,7 +63,7 @@ {%- for master_host in groups['kubernetes_master'] -%} - {%- if master_host in groups[kubernetes_config.cluster.group_name] -%} + {%- if master_host in groups[kubernetes_config.cluster.group_name | default('me_is_optional')] | default([]) -%} {%- set master_host = hostvars[master_host].ansible_host -%} diff --git a/templates/k3s-config.yaml.j2 b/templates/k3s-config.yaml.j2 index f6a2f8e..c0ccef7 100644 --- a/templates/k3s-config.yaml.j2 +++ b/templates/k3s-config.yaml.j2 @@ -6,7 +6,12 @@ # Dont edit this file directly as it will be overwritten. # -{%- if inventory_hostname in groups['kubernetes_master'] -%} +{%- if + inventory_hostname in groups['kubernetes_master'] + or + kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname +-%} + {% set kube_apiserver_arg = [ @@ -19,7 +24,7 @@ -%} {% set servers_config = { - "cluster-cidr": KubernetesPodSubnet, + "cluster-cidr": nfc_role_kubernetes_pod_subnet, "disable": [ "traefik" ], @@ -27,23 +32,23 @@ "etcd-snapshot-retention": kubernetes_etcd_snapshot_retention | int, "etcd-snapshot-schedule-cron": kubernetes_etcd_snapshot_cron_schedule | string, "flannel-backend": "none", - "service-cidr": KubernetesServiceSubnet + "service-cidr": nfc_role_kubernetes_service_subnet } -%} {%- if - kubernetes_config.cluster.domain_name is defined + kubernetes_config.cluster.domain_name | default(nfc_role_kubernetes_cluster_domain) is defined and - kubernetes_config.cluster.domain_name | default('') != '' + kubernetes_config.cluster.domain_name | default(nfc_role_kubernetes_cluster_domain) != '' -%} {%- set servers_config = servers_config | combine({ - "cluster-domain": kubernetes_config.cluster.domain_name + "cluster-domain": kubernetes_config.cluster.domain_name | default(nfc_role_kubernetes_cluster_domain) }) -%} {%- endif -%} - {%- if kubernetes_config.cluster.oidc.enabled | default(false) | bool -%} + {%- if kubernetes_config.cluster.oidc.enabled | default(nfc_role_kubernetes_oidc_enabled) | default(false) | bool -%} {%- set kube_apiserver_arg = kube_apiserver_arg + [ @@ -129,7 +134,7 @@ -%} -{%- if groups[kubernetes_config.cluster.group_name] | default([]) | list | length > 0 -%} +{%- if groups[kubernetes_config.cluster.group_name | default('make_me_optional')] | default([]) | list | length > 0 -%} {%- if k3s_installed.rc == 0 -%} @@ -215,7 +220,11 @@ {# EoF All Nodes #} -{%- if inventory_hostname in groups['kubernetes_master'] -%} +{%- if + inventory_hostname in groups['kubernetes_master'] + or + kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname +-%} {%- set servers_config = servers_config | combine( all_nodes_config ) -%}