@ -16,16 +16,29 @@ nfc_kubernetes_tigera_operator_tag: v1.32.3 # Calico v3.27.0
|
|||||||
nfc_kubernetes_enable_metallb: false
|
nfc_kubernetes_enable_metallb: false
|
||||||
nfc_kubernetes_enable_servicelb: false
|
nfc_kubernetes_enable_servicelb: false
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
nfc_role_kubernetes_cluster_domain: cluster.local
|
||||||
|
|
||||||
|
nfc_role_kubernetes_etcd_enabled: false
|
||||||
|
|
||||||
nfc_role_kubernetes_install_olm: false
|
nfc_role_kubernetes_install_olm: false
|
||||||
|
|
||||||
|
nfc_role_kubernetes_oidc_enabled: false
|
||||||
|
|
||||||
|
nfc_role_kubernetes_pod_subnet: 172.16.248.0/21
|
||||||
|
nfc_role_kubernetes_service_subnet: 172.16.244.0/22
|
||||||
|
|
||||||
|
nfc_role_kubernetes_prime: true
|
||||||
|
nfc_role_kubernetes_master: true
|
||||||
|
nfc_role_kubernetes_worker: false
|
||||||
|
|
||||||
############################################################################################################
|
############################################################################################################
|
||||||
#
|
#
|
||||||
# Old Vars requiring refactoring
|
# Old Vars requiring refactoring
|
||||||
#
|
#
|
||||||
# ############################################################################################################
|
# ############################################################################################################
|
||||||
# KubernetesPodSubnet: 10.85.0.0/16
|
|
||||||
# KubernetesServiceSubnet: 10.86.0.0/16
|
|
||||||
|
|
||||||
|
|
||||||
Kubernetes_Prime: false # Optional, Boolean. Is the current host the Prime master?
|
Kubernetes_Prime: false # Optional, Boolean. Is the current host the Prime master?
|
||||||
@ -74,7 +87,7 @@ k3s:
|
|||||||
kind: Policy
|
kind: Policy
|
||||||
rules:
|
rules:
|
||||||
- level: Request
|
- level: Request
|
||||||
when: "{{ Kubernetes_Master | default(false) }}"
|
when: "{{ nfc_role_kubernetes_master }}"
|
||||||
|
|
||||||
- name: 90-kubelet.conf
|
- name: 90-kubelet.conf
|
||||||
path: /etc/sysctl.d
|
path: /etc/sysctl.d
|
||||||
@ -106,7 +119,7 @@ k3s:
|
|||||||
# usernames: []
|
# usernames: []
|
||||||
# runtimeClasses: []
|
# runtimeClasses: []
|
||||||
# namespaces: [kube-system]
|
# namespaces: [kube-system]
|
||||||
when: "{{ kubernetes_config.cluster.prime.name == inventory_hostname }}"
|
when: "{{ kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname }}"
|
||||||
|
|
||||||
|
|
||||||
#############################################################################################
|
#############################################################################################
|
||||||
|
|||||||
@ -28,7 +28,7 @@
|
|||||||
|
|
||||||
- src: kubernetes-manifest-rbac.yaml.j2
|
- src: kubernetes-manifest-rbac.yaml.j2
|
||||||
dest: /var/lib/rancher/k3s/server/manifests/rbac-authorization-common.yaml
|
dest: /var/lib/rancher/k3s/server/manifests/rbac-authorization-common.yaml
|
||||||
when: "{{ kubernetes_config.cluster.prime.name == inventory_hostname }}"
|
when: "{{ kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname }}"
|
||||||
|
|
||||||
- src: iptables-kubernetes.rules.j2
|
- src: iptables-kubernetes.rules.j2
|
||||||
dest: "/etc/iptables.rules.d/iptables-kubernetes.rules"
|
dest: "/etc/iptables.rules.d/iptables-kubernetes.rules"
|
||||||
|
|||||||
@ -3,14 +3,12 @@
|
|||||||
- name: Check for calico deployment manifest
|
- name: Check for calico deployment manifest
|
||||||
ansible.builtin.stat:
|
ansible.builtin.stat:
|
||||||
name: /var/lib/rancher/k3s/server/manifests/calico.yaml
|
name: /var/lib/rancher/k3s/server/manifests/calico.yaml
|
||||||
become: true
|
|
||||||
register: file_calico_yaml_metadata
|
register: file_calico_yaml_metadata
|
||||||
|
|
||||||
|
|
||||||
- name: Check for calico Operator deployment manifest
|
- name: Check for calico Operator deployment manifest
|
||||||
ansible.builtin.stat:
|
ansible.builtin.stat:
|
||||||
name: /var/lib/rancher/k3s/ansible/deployment-manifest-calico_operator.yaml
|
name: /var/lib/rancher/k3s/ansible/deployment-manifest-calico_operator.yaml
|
||||||
become: true
|
|
||||||
register: file_calico_operator_yaml_metadata
|
register: file_calico_operator_yaml_metadata
|
||||||
|
|
||||||
|
|
||||||
@ -113,7 +111,6 @@
|
|||||||
- name: Check for Network Manager Directory
|
- name: Check for Network Manager Directory
|
||||||
ansible.builtin.stat:
|
ansible.builtin.stat:
|
||||||
name: /etc/NetworkManager/conf.d
|
name: /etc/NetworkManager/conf.d
|
||||||
become: true
|
|
||||||
register: directory_network_manager_metadata
|
register: directory_network_manager_metadata
|
||||||
|
|
||||||
|
|
||||||
@ -133,7 +130,6 @@
|
|||||||
mode: '770'
|
mode: '770'
|
||||||
owner: root
|
owner: root
|
||||||
group: root
|
group: root
|
||||||
become: true
|
|
||||||
diff: true
|
diff: true
|
||||||
when: directory_network_manager_metadata.stat.exists
|
when: directory_network_manager_metadata.stat.exists
|
||||||
|
|
||||||
@ -159,7 +155,7 @@
|
|||||||
failed_when: false
|
failed_when: false
|
||||||
register: k3s_installed
|
register: k3s_installed
|
||||||
when: >
|
when: >
|
||||||
not Kubernetes_Master | default(false) | bool
|
not Kubernetes_worker | default(false) | bool
|
||||||
|
|
||||||
|
|
||||||
- name: Check Machine Architecture
|
- name: Check Machine Architecture
|
||||||
@ -185,7 +181,7 @@
|
|||||||
when: >
|
when: >
|
||||||
ansible_os_family == 'Debian'
|
ansible_os_family == 'Debian'
|
||||||
and
|
and
|
||||||
{{ item.when | default(true) | bool }}
|
item.when | default(true) | bool
|
||||||
loop: "{{ download_files }}"
|
loop: "{{ download_files }}"
|
||||||
vars:
|
vars:
|
||||||
ansible_connection: local
|
ansible_connection: local
|
||||||
@ -246,7 +242,7 @@
|
|||||||
ansible.builtin.copy:
|
ansible.builtin.copy:
|
||||||
src: "/tmp/k3s.{{ ansible_architecture }}"
|
src: "/tmp/k3s.{{ ansible_architecture }}"
|
||||||
dest: "/usr/local/bin/k3s"
|
dest: "/usr/local/bin/k3s"
|
||||||
mode: '740'
|
mode: '741'
|
||||||
owner: root
|
owner: root
|
||||||
group: root
|
group: root
|
||||||
when: hash_sha256_k3s_existing_binary.stat.checksum | default('0') != hash_sha256_k3s_downloaded_binary
|
when: hash_sha256_k3s_existing_binary.stat.checksum | default('0') != hash_sha256_k3s_downloaded_binary
|
||||||
@ -254,8 +250,8 @@
|
|||||||
|
|
||||||
- name: Copy install scripts to Host
|
- name: Copy install scripts to Host
|
||||||
ansible.builtin.copy:
|
ansible.builtin.copy:
|
||||||
src: "{{ item }}"
|
src: "{{ item.path }}"
|
||||||
dest: "{{ item }}"
|
dest: "{{ item.path }}"
|
||||||
mode: '755'
|
mode: '755'
|
||||||
owner: root
|
owner: root
|
||||||
group: root
|
group: root
|
||||||
@ -267,7 +263,7 @@
|
|||||||
- path: "/tmp/install_olm.sh"
|
- path: "/tmp/install_olm.sh"
|
||||||
when: "{{ nfc_role_kubernetes_install_olm }}"
|
when: "{{ nfc_role_kubernetes_install_olm }}"
|
||||||
when: >
|
when: >
|
||||||
{{ item.when | default(true) | bool }}
|
item.when | default(true) | bool
|
||||||
|
|
||||||
|
|
||||||
- name: Required Initial config files
|
- name: Required Initial config files
|
||||||
@ -305,7 +301,7 @@
|
|||||||
dest: /var/lib/rancher/k3s/server/manifests/calico.yaml
|
dest: /var/lib/rancher/k3s/server/manifests/calico.yaml
|
||||||
when: >
|
when: >
|
||||||
{{
|
{{
|
||||||
kubernetes_config.cluster.prime.name == inventory_hostname
|
kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname
|
||||||
and
|
and
|
||||||
(
|
(
|
||||||
(
|
(
|
||||||
@ -350,7 +346,7 @@
|
|||||||
/tmp/install.sh --cluster-init
|
/tmp/install.sh --cluster-init
|
||||||
changed_when: false
|
changed_when: false
|
||||||
when: >
|
when: >
|
||||||
kubernetes_config.cluster.prime.name == inventory_hostname
|
kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname
|
||||||
and
|
and
|
||||||
k3s_installed.rc == 1
|
k3s_installed.rc == 1
|
||||||
|
|
||||||
@ -374,7 +370,7 @@
|
|||||||
and
|
and
|
||||||
'calico_manifest' not in ansible_run_tags
|
'calico_manifest' not in ansible_run_tags
|
||||||
and
|
and
|
||||||
kubernetes_config.cluster.prime.name == inventory_hostname
|
kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname
|
||||||
|
|
||||||
|
|
||||||
- name: Install MetalLB Operator
|
- name: Install MetalLB Operator
|
||||||
@ -393,7 +389,7 @@
|
|||||||
when: >-
|
when: >-
|
||||||
nfc_kubernetes_enable_metallb | default(false) | bool
|
nfc_kubernetes_enable_metallb | default(false) | bool
|
||||||
and
|
and
|
||||||
kubernetes_config.cluster.prime.name == inventory_hostname
|
kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname
|
||||||
|
|
||||||
|
|
||||||
- name: Wait for kubernetes prime to be ready
|
- name: Wait for kubernetes prime to be ready
|
||||||
@ -407,7 +403,7 @@
|
|||||||
exit 127;
|
exit 127;
|
||||||
fi
|
fi
|
||||||
executable: /bin/bash
|
executable: /bin/bash
|
||||||
delegate_to: "{{ kubernetes_config.cluster.prime.name }}"
|
delegate_to: "{{ kubernetes_config.cluster.prime.name | default(inventory_hostname) }}"
|
||||||
run_once: true
|
run_once: true
|
||||||
register: kubernetes_ready_check
|
register: kubernetes_ready_check
|
||||||
retries: 30
|
retries: 30
|
||||||
@ -431,7 +427,7 @@
|
|||||||
install_olm.rc == 1
|
install_olm.rc == 1
|
||||||
register: install_olm
|
register: install_olm
|
||||||
when: >
|
when: >
|
||||||
kubernetes_config.cluster.prime.name == inventory_hostname
|
kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname
|
||||||
and
|
and
|
||||||
nfc_role_kubernetes_install_olm | default(false) | bool
|
nfc_role_kubernetes_install_olm | default(false) | bool
|
||||||
|
|
||||||
@ -459,7 +455,7 @@
|
|||||||
failed_when: false
|
failed_when: false
|
||||||
register: install_olm
|
register: install_olm
|
||||||
when: >
|
when: >
|
||||||
kubernetes_config.cluster.prime.name == inventory_hostname
|
kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname
|
||||||
and
|
and
|
||||||
'olm_uninstall' in ansible_run_tags
|
'olm_uninstall' in ansible_run_tags
|
||||||
|
|
||||||
@ -470,7 +466,7 @@
|
|||||||
changed_when: false
|
changed_when: false
|
||||||
failed_when: false # New cluster will fail
|
failed_when: false # New cluster will fail
|
||||||
when: >
|
when: >
|
||||||
kubernetes_config.cluster.prime.name == inventory_hostname
|
kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname
|
||||||
and
|
and
|
||||||
kubernetes_config.cluster.networking.encrypt | default(false) | bool
|
kubernetes_config.cluster.networking.encrypt | default(false) | bool
|
||||||
and
|
and
|
||||||
@ -488,7 +484,7 @@
|
|||||||
- name: Fetch Join Token
|
- name: Fetch Join Token
|
||||||
ansible.builtin.slurp:
|
ansible.builtin.slurp:
|
||||||
src: /var/lib/rancher/k3s/server/token
|
src: /var/lib/rancher/k3s/server/token
|
||||||
delegate_to: "{{ kubernetes_config.cluster.prime.name }}"
|
delegate_to: "{{ kubernetes_config.cluster.prime.name | default(inventory_hostname) }}"
|
||||||
run_once: true
|
run_once: true
|
||||||
register: k3s_join_token
|
register: k3s_join_token
|
||||||
no_log: true # Value is sensitive
|
no_log: true # Value is sensitive
|
||||||
@ -497,7 +493,7 @@
|
|||||||
- name: Create Token fact
|
- name: Create Token fact
|
||||||
ansible.builtin.set_fact:
|
ansible.builtin.set_fact:
|
||||||
k3s_join_token: "{{ k3s_join_token.content | b64decode | replace('\n', '') }}"
|
k3s_join_token: "{{ k3s_join_token.content | b64decode | replace('\n', '') }}"
|
||||||
delegate_to: "{{ kubernetes_config.cluster.prime.name }}"
|
delegate_to: "{{ kubernetes_config.cluster.prime.name | default(inventory_hostname) }}"
|
||||||
run_once: true
|
run_once: true
|
||||||
no_log: true # Value is sensitive
|
no_log: true # Value is sensitive
|
||||||
|
|
||||||
@ -515,7 +511,7 @@
|
|||||||
when: >
|
when: >
|
||||||
Kubernetes_Master | default(false) | bool
|
Kubernetes_Master | default(false) | bool
|
||||||
and
|
and
|
||||||
not kubernetes_config.cluster.prime.name == inventory_hostname
|
not kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname
|
||||||
and
|
and
|
||||||
k3s_installed.rc == 1
|
k3s_installed.rc == 1
|
||||||
|
|
||||||
@ -528,13 +524,15 @@
|
|||||||
INSTALL_K3S_SKIP_DOWNLOAD=true \
|
INSTALL_K3S_SKIP_DOWNLOAD=true \
|
||||||
INSTALL_K3S_VERSION="v{{ KubernetesVersion }}{{ KubernetesVersion_k3s_prefix }}" \
|
INSTALL_K3S_VERSION="v{{ KubernetesVersion }}{{ KubernetesVersion_k3s_prefix }}" \
|
||||||
K3S_TOKEN="{{ k3s_join_token }}" \
|
K3S_TOKEN="{{ k3s_join_token }}" \
|
||||||
K3S_URL="https://{{ hostvars[kubernetes_config.cluster.prime.name].ansible_host }}:6443" \
|
K3S_URL="https://{{ hostvars[kubernetes_config.cluster.prime.name | default(inventory_hostname)].ansible_host }}:6443" \
|
||||||
/tmp/install.sh -
|
/tmp/install.sh -
|
||||||
executable: /bin/bash
|
executable: /bin/bash
|
||||||
changed_when: false
|
changed_when: false
|
||||||
when: >
|
when: >
|
||||||
not Kubernetes_Master | default(false) | bool
|
not Kubernetes_Master | default(false) | bool
|
||||||
and
|
and
|
||||||
|
not kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname
|
||||||
|
and
|
||||||
k3s_installed.rc == 1
|
k3s_installed.rc == 1
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@ -150,10 +150,10 @@
|
|||||||
projectcalico.org/operator-node-migration-
|
projectcalico.org/operator-node-migration-
|
||||||
executable: bash
|
executable: bash
|
||||||
become: true
|
become: true
|
||||||
delegate_to: "{{ kubernetes_config.cluster.prime.name }}"
|
delegate_to: "{{ kubernetes_config.cluster.prime.name | default(inventory_hostname) }}"
|
||||||
changed_when: false
|
changed_when: false
|
||||||
failed_when: false
|
failed_when: false
|
||||||
loop: "{{ groups[kubernetes_config.cluster.group_name] }}"
|
loop: "{{ groups[kubernetes_config.cluster.group_name | default('make_me_optional')] | default([]) }}"
|
||||||
|
|
||||||
# kubectl label node ip-10-229-92-202.eu-west-1.compute.internal projectcalico.org/operator-node-migration-
|
# kubectl label node ip-10-229-92-202.eu-west-1.compute.internal projectcalico.org/operator-node-migration-
|
||||||
# migration started
|
# migration started
|
||||||
|
|||||||
@ -6,6 +6,6 @@ metadata:
|
|||||||
namespace: tigera-operator
|
namespace: tigera-operator
|
||||||
data:
|
data:
|
||||||
KUBERNETES_SERVICE_HOST: "
|
KUBERNETES_SERVICE_HOST: "
|
||||||
{%- set octet = kubernetes_config.cluster.networking.ServiceSubnet | split('.') -%}
|
{%- set octet = kubernetes_config.cluster.networking.ServiceSubnet | default(nfc_role_kubernetes_service_subnet) | split('.') -%}
|
||||||
{{- octet[0] }}.{{- octet[1] }}.{{- octet[2] }}.1"
|
{{- octet[0] }}.{{- octet[1] }}.{{- octet[2] }}.1"
|
||||||
KUBERNETES_SERVICE_PORT: '443'
|
KUBERNETES_SERVICE_PORT: '443'
|
||||||
|
|||||||
@ -9,7 +9,7 @@ spec:
|
|||||||
- Workload
|
- Workload
|
||||||
- Tunnel
|
- Tunnel
|
||||||
blockSize: 26
|
blockSize: 26
|
||||||
cidr: {{ kubernetes_config.cluster.networking.podSubnet }}
|
cidr: {{ kubernetes_config.cluster.networking.podSubnet | default(nfc_role_kubernetes_pod_subnet) }}
|
||||||
ipipMode: Never
|
ipipMode: Never
|
||||||
natOutgoing: true
|
natOutgoing: true
|
||||||
nodeSelector: all()
|
nodeSelector: all()
|
||||||
|
|||||||
@ -11,7 +11,7 @@ spec:
|
|||||||
hostPorts: Enabled
|
hostPorts: Enabled
|
||||||
ipPools:
|
ipPools:
|
||||||
- blockSize: 26
|
- blockSize: 26
|
||||||
cidr: {{ kubernetes_config.cluster.networking.podSubnet }}
|
cidr: {{ kubernetes_config.cluster.networking.podSubnet | default(nfc_role_kubernetes_pod_subnet) }}
|
||||||
disableBGPExport: false
|
disableBGPExport: false
|
||||||
encapsulation: VXLAN
|
encapsulation: VXLAN
|
||||||
natOutgoing: Enabled
|
natOutgoing: Enabled
|
||||||
@ -41,7 +41,7 @@ spec:
|
|||||||
type: RollingUpdate
|
type: RollingUpdate
|
||||||
nonPrivileged: Disabled
|
nonPrivileged: Disabled
|
||||||
serviceCIDRs:
|
serviceCIDRs:
|
||||||
- {{ kubernetes_config.cluster.networking.ServiceSubnet }}
|
- {{ kubernetes_config.cluster.networking.ServiceSubnet | default(nfc_role_kubernetes_service_subnet) }}
|
||||||
typhaDeployment:
|
typhaDeployment:
|
||||||
spec:
|
spec:
|
||||||
template:
|
template:
|
||||||
|
|||||||
@ -4810,7 +4810,7 @@ spec:
|
|||||||
# chosen from this range. Changing this value after installation will have
|
# chosen from this range. Changing this value after installation will have
|
||||||
# no effect. This should fall within `--cluster-cidr`.
|
# no effect. This should fall within `--cluster-cidr`.
|
||||||
- name: CALICO_IPV4POOL_CIDR
|
- name: CALICO_IPV4POOL_CIDR
|
||||||
value: "{{ KubernetesPodSubnet }}"
|
value: "{{ nfc_role_kubernetes_pod_subnet }}"
|
||||||
# Disable file logging so `kubectl logs` works.
|
# Disable file logging so `kubectl logs` works.
|
||||||
- name: CALICO_DISABLE_FILE_LOGGING
|
- name: CALICO_DISABLE_FILE_LOGGING
|
||||||
value: "true"
|
value: "true"
|
||||||
|
|||||||
@ -31,7 +31,7 @@
|
|||||||
|
|
||||||
{%- endif -%}
|
{%- endif -%}
|
||||||
|
|
||||||
{%- for kubernetes_host in groups[kubernetes_config.cluster.group_name] -%}
|
{%- for kubernetes_host in groups[kubernetes_config.cluster.group_name | default('me_is_optional')] | default([]) -%}
|
||||||
|
|
||||||
{%- set kubernetes_host = hostvars[kubernetes_host].ansible_host -%}
|
{%- set kubernetes_host = hostvars[kubernetes_host].ansible_host -%}
|
||||||
|
|
||||||
@ -63,7 +63,7 @@
|
|||||||
|
|
||||||
{%- for master_host in groups['kubernetes_master'] -%}
|
{%- for master_host in groups['kubernetes_master'] -%}
|
||||||
|
|
||||||
{%- if master_host in groups[kubernetes_config.cluster.group_name] -%}
|
{%- if master_host in groups[kubernetes_config.cluster.group_name | default('me_is_optional')] | default([]) -%}
|
||||||
|
|
||||||
{%- set master_host = hostvars[master_host].ansible_host -%}
|
{%- set master_host = hostvars[master_host].ansible_host -%}
|
||||||
|
|
||||||
|
|||||||
@ -6,7 +6,12 @@
|
|||||||
# Dont edit this file directly as it will be overwritten.
|
# Dont edit this file directly as it will be overwritten.
|
||||||
#
|
#
|
||||||
|
|
||||||
{%- if inventory_hostname in groups['kubernetes_master'] -%}
|
{%- if
|
||||||
|
inventory_hostname in groups['kubernetes_master']
|
||||||
|
or
|
||||||
|
kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname
|
||||||
|
-%}
|
||||||
|
|
||||||
{%
|
{%
|
||||||
|
|
||||||
set kube_apiserver_arg = [
|
set kube_apiserver_arg = [
|
||||||
@ -19,7 +24,7 @@
|
|||||||
-%}
|
-%}
|
||||||
{%
|
{%
|
||||||
set servers_config = {
|
set servers_config = {
|
||||||
"cluster-cidr": KubernetesPodSubnet,
|
"cluster-cidr": nfc_role_kubernetes_pod_subnet,
|
||||||
"disable": [
|
"disable": [
|
||||||
"traefik"
|
"traefik"
|
||||||
],
|
],
|
||||||
@ -27,23 +32,23 @@
|
|||||||
"etcd-snapshot-retention": kubernetes_etcd_snapshot_retention | int,
|
"etcd-snapshot-retention": kubernetes_etcd_snapshot_retention | int,
|
||||||
"etcd-snapshot-schedule-cron": kubernetes_etcd_snapshot_cron_schedule | string,
|
"etcd-snapshot-schedule-cron": kubernetes_etcd_snapshot_cron_schedule | string,
|
||||||
"flannel-backend": "none",
|
"flannel-backend": "none",
|
||||||
"service-cidr": KubernetesServiceSubnet
|
"service-cidr": nfc_role_kubernetes_service_subnet
|
||||||
}
|
}
|
||||||
-%}
|
-%}
|
||||||
|
|
||||||
{%- if
|
{%- if
|
||||||
kubernetes_config.cluster.domain_name is defined
|
kubernetes_config.cluster.domain_name | default(nfc_role_kubernetes_cluster_domain) is defined
|
||||||
and
|
and
|
||||||
kubernetes_config.cluster.domain_name | default('') != ''
|
kubernetes_config.cluster.domain_name | default(nfc_role_kubernetes_cluster_domain) != ''
|
||||||
-%}
|
-%}
|
||||||
|
|
||||||
{%- set servers_config = servers_config | combine({
|
{%- set servers_config = servers_config | combine({
|
||||||
"cluster-domain": kubernetes_config.cluster.domain_name
|
"cluster-domain": kubernetes_config.cluster.domain_name | default(nfc_role_kubernetes_cluster_domain)
|
||||||
}) -%}
|
}) -%}
|
||||||
|
|
||||||
{%- endif -%}
|
{%- endif -%}
|
||||||
|
|
||||||
{%- if kubernetes_config.cluster.oidc.enabled | default(false) | bool -%}
|
{%- if kubernetes_config.cluster.oidc.enabled | default(nfc_role_kubernetes_oidc_enabled) | default(false) | bool -%}
|
||||||
|
|
||||||
{%-
|
{%-
|
||||||
set kube_apiserver_arg = kube_apiserver_arg + [
|
set kube_apiserver_arg = kube_apiserver_arg + [
|
||||||
@ -129,7 +134,7 @@
|
|||||||
-%}
|
-%}
|
||||||
|
|
||||||
|
|
||||||
{%- if groups[kubernetes_config.cluster.group_name] | default([]) | list | length > 0 -%}
|
{%- if groups[kubernetes_config.cluster.group_name | default('make_me_optional')] | default([]) | list | length > 0 -%}
|
||||||
|
|
||||||
{%- if k3s_installed.rc == 0 -%}
|
{%- if k3s_installed.rc == 0 -%}
|
||||||
|
|
||||||
@ -215,7 +220,11 @@
|
|||||||
{# EoF All Nodes #}
|
{# EoF All Nodes #}
|
||||||
|
|
||||||
|
|
||||||
{%- if inventory_hostname in groups['kubernetes_master'] -%}
|
{%- if
|
||||||
|
inventory_hostname in groups['kubernetes_master']
|
||||||
|
or
|
||||||
|
kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname
|
||||||
|
-%}
|
||||||
|
|
||||||
{%- set servers_config = servers_config | combine( all_nodes_config ) -%}
|
{%- set servers_config = servers_config | combine( all_nodes_config ) -%}
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user