chore: migrated from internal repo

!1 nofusscomputing/infrastructure/config!28
This commit is contained in:
2023-10-27 21:47:03 +09:30
parent e45190fab4
commit 93b63308ef
30 changed files with 7326 additions and 0 deletions

39
README-orig.md Normal file
View File

@ -0,0 +1,39 @@
# Kubernetes Ansible Playbook
## Additional changes
- `SystemdCgroup = false` -> `SystemdCgroup = true` [See this comment](https://github.com/kubernetes/kubernetes/issues/110177#issuecomment-1161647736)
## Tags
This role has been setup to take advantage of Ansible tags. The use of these tags enables finer control over what tasks are run. By design, when you set a task, only what is required for the tag is run.
available tags are as follows
- `containerregistry` apply container/docker registry settings
- `firewall` apply firewall settings (firewall name/type independent)
- `install` Run every task within the role. this is the same as omitting `--tags`
- `iptables` apply iptables settings
- `manifest` Apply/remove kubernetes manifests
- `namespace` Apply/remove kubernetes namespaces
- `nodelabels` Apply/remove kubernetes node labels
- `taints` Apply/remove kubernetes taints
!!! tip
if you intend on running the `install` tag, you can omit the `--tags` flag from the ansible tag all togther
!!! alert
the first time this playbook is run if cli switch `--extra-vars "init=true"` is used with `init` either a bool true/false, the manifests will not be applied. this is to enable the kubernetes to be fully setup prior to applying manifests that may prevent successful completion of the play.
## command Cheatsheet
- `crictl --runtime-endpoint unix:///run/containerd/containerd.sock images` list all container images on the host
## Links / References
- ContainerD Configuration
- [Registry Configuration](https://github.com/containerd/containerd/blob/7cd72cce99c8d3b938c1b763c2744a0b699028ab/docs/cri/config.md#registry-configuration)
- [Configuring ContainerD registries](https://github.com/containerd/containerd/blob/7cd72cce99c8d3b938c1b763c2744a0b699028ab/docs/hosts.md#cri)

125
defaults/main.yml Normal file
View File

@ -0,0 +1,125 @@
KubernetesPodSubnet: 10.85.0.0/16
KubernetesServiceSubnet: 10.86.0.0/16
Kubernetes_Prime: false # Optional, Boolean. Is the current host the Prime master?
Kubernetes_Master: false # Optional, Boolean. Is the current host a master host?
ContainerDioVersion: 1.6.20-1
KubernetesVersion: '1.26.2' # must match the repository release version
KubernetesVersion_k8s_prefix: '-00'
KubernetesVersion_k3s_prefix: '+k3s1'
kubernetes_private_container_registry: [] # Optional, Array. if none use `[]`
# host_external_ip: '' # Optional, String. External IP Address for host.
# Optional, Dict. Used to configure Kubernetes with OIDC Authentication.
# kubernetes_oidc:
# enabled: true # Mandatory, boolen. speaks for itself.
# issuer_url: https://domainname.com/realms/realm-name # Mandatory, String. URL of OIDC Provider
# client_id: kubernetes-test # Mandatory, string. OIDC Client ID
# username_claim: preferred_username # Mandatory, String. Claim name containing username.
# username_prefix: oidc # Optional, String. What to prefix to username
# groups_claim: roles # Mandatory, String. Claim name containing groups
# groups_prefix: '' # Optional, String. string to append to groups
kubernetes_type: k8s # Mandatory, String. choice K8s | k3s
nfc_kubernetes:
enable_firewall: true # Optional, bool enable firewall rules from role 'nfc_firewall'
k3s:
files:
# - name: config.yaml
# path: /etc/rancher/k3s
# content: |
# flannel-backend: none
# cluster-cidr: "{{ KubernetesPodSubnet }}"
# cluster-init: true
# {% if not Kubernetes_Prime | default(false) | bool -%}server: https://{{ hostvars[kubernetes_config.cluster.prime.name].ansible_host }}:6443{% endif %}
# service-cidr: "{{ KubernetesServiceSubnet }}"
# disable-network-policy: true
# disable:
# - traefik
# kube-apiserver-arg:
# - audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log
# - audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml
# # - admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml
# {% if kubernetes_oidc.enabled | default(false) | bool -%}
# - oidc-issuer-url={{ kubernetes_oidc.issuer_url }}
# - oidc-client-id={{ kubernetes_oidc.client_id }}
# - oidc-username-claim={{ kubernetes_oidc.username_claim }}
# - {% if kubernetes_oidc.oidc_username_prefix | default('') != '' %}oidc-username-prefix={{ kubernetes_oidc.oidc_username_prefix }}{% endif %}
# - oidc-groups-claim={{ kubernetes_oidc.groups_claim }}
# {% if kubernetes_oidc.groups_prefix | default('') != '' %}- oidc-groups-prefix={{ kubernetes_oidc.groups_prefix }}{% endif %}
# {% endif %}
# node-external-ip: "{{ host_external_ip }}"
- name: audit.yaml
path: /var/lib/rancher/k3s/server
content: |
apiVersion: audit.k8s.io/v1
kind: Policy
rules:
- level: Request
- name: 90-kubelet.conf
path: /etc/sysctl.d
content: |
vm.panic_on_oom=0
vm.overcommit_memory=1
kernel.panic=10
kernel.panic_on_oops=1
kernel.keys.root_maxbytes=25000000
- name: psa.yaml
path: /var/lib/rancher/k3s/server
content: ""
# apiVersion: apiserver.config.k8s.io/v1
# kind: AdmissionConfiguration
# plugins:
# - name: PodSecurity
# configuration:
# apiVersion: pod-security.admission.config.k8s.io/v1beta1
# kind: PodSecurityConfiguration
# defaults:
# enforce: "restricted"
# enforce-version: "latest"
# audit: "restricted"
# audit-version: "latest"
# warn: "restricted"
# warn-version: "latest"
# exemptions:
# usernames: []
# runtimeClasses: []
# namespaces: [kube-system]
#############################################################################################
# Cluster Config when stored in Inventory
#
# One required per cluster. recommend creating one ansible host group per cluster.
#############################################################################################
# kubernetes_config: # Dict. Cluster Config
# cluster:
# access: # Mandatory. List, DNS host name or IPv4/IPv6 Address.
# # if none use '[]'
# - 'my.dnshostname.com'
# - '2001:4860:4860::8888'
# - '192.168.1.1'
# Name: earth # Mandatory, String. Cluster Name
# prime:
# name: k3s-prod # Mandatory, String. Ansible inventory_host that will
# # act as the prime master node.
# networking:
# podSubnet: 172.16.70.0/24 # Mandatory, String. CIDR
# ServiceSubnet: 172.16.72.0/24 # Mandatory, String. CIDR
# # Mandatory, String. Token to join nodes to the cluster
# node_token: !vault |
# $ANSIBLE_VAULT;1.2;AES256;kubernetes/cluster/production
# {rest_of encrypted key}

72
docs/firewall.md Normal file
View File

@ -0,0 +1,72 @@
---
title: Firewall - nfc_kubernetes
description: No Fuss Computings Ansible role nfc_kubernetes
date: 2023-10-24
template: project.html
about: https://gitlab.com/nofusscomputing/projects/ansible/roles/kubernetes
---
This role include logic to generate firewall rules for iptables. Both IPv4 and IPv6 rules are generated. to survive reboots or network cable disconects, a script is created and added to the `if-up.d.` This enables that each time the interface is brought up, the firewall rules are applied. For a list of the firewall rules applied see the [K3s documentation](https://docs.k3s.io/installation/requirements#inbound-rules-for-k3s-server-nodes)
Rules generation workflow:
- itertes over all kubernetes hosts
- adds rules if host is masters for worker access
- adds rules if worker for all node access
- adds rules for additional hosts to access kubernetes api
What you end up with:
- chains for each area of access to the cluster
- The input table contains the jump to each chain, based off of destination port and protocol
- each chain returns to INPUT table for further processing.
!!! danger Security
The way the rules are created and applied, they all return to the `INPUT` table for further processing. If the `INPUT` tables default policy is `ACCEPT`. Regardless of the firewall rules in place, Any host with network access to the kubernetes host can access the desired service without needing a rule to grant access.
**Recommendation:** Set the `INPUT` tables default policy to `DROP`
!!! info Info
If a DNS name is used for any off the address' a DNS lookup is done for both IPv4 and IPv6 adding the first host found to the applicable chain.
## Providing access to the cluster
No special skill is required apart from adding the host to grant access to the right list variable. i.e.
``` yaml
kubernetes_config:
cluster:
access:
- '192.168.1.1'
- 'my.hostname.com'
```
Any host that is added to the `access` list will be granted access to the Kubernetes API. Hosts in this list are intended to be the hosts your end users are on. If you join a new node to the cluster, the applicable firewall rules will automagically generated and added to each hosts firewall. It's important that when adding anew node to the cluster, that the playbook is run agains all nodes of the cluster, not just the new node. Failing to do so, will have the existing nodes block access to the new node due to missing firewall rules.
!!! tip Tip
When manually adding a host use insert `-I` not append `-A` as the last rule must be `-j RETURN`
exmple: `-I {chain_name} -s {hostname/ipaddress} -j ACCEPT`
Protocol Port Source Destination Description
TCP 2379-2380 Servers Servers Required only for HA with embedded etcd
TCP 6443 Agents Servers K3s supervisor and Kubernetes API Server
UDP 8472 All nodes All nodes Required only for Flannel VXLAN
TCP 10250 All nodes All nodes Kubelet metrics
UDP 51820 All nodes All nodes Required only for Flannel Wireguard with IPv4
UDP 51821 All nodes All nodes Required only for Flannel Wireguard with IPv6

19
docs/index.md Normal file
View File

@ -0,0 +1,19 @@
---
title: Kubernetes Ansible Role
description: No Fuss Computings Ansible role nfc_kubernetes
date: 2023-10-24
template: project.html
about: https://gitlab.com/nofusscomputing/projects/ansible/roles/kubernetes
---
Expected inventory setup:
- each host has a host_vars file with `ansible_host` defined. _can be either DNS name, IPv4/IPv6 Address_
- `k3s` host group with all hosts part of this group
- `kubernetes_master` host group with all master nodes part of
- variable `Kubernetes_Master` _boolean_ set for all host that are master nodes.
- hosts that require Kubernetes API access added to variable `kubernetes_config.cluster.access`

29
handlers/main.yml Normal file
View File

@ -0,0 +1,29 @@
---
- name: "restart ContainerD"
service:
name: containerd
state: restarted
# when: opensshd_installed is defined
when: >
containerd_config.changed | default(false) | bool
and
containerd_installed.rc | default(1) | int == 0
and
kubernetes_type == 'k8s'
tags:
- configure
- install
- name: "Apply Firewall Rules"
ansible.builtin.shell: |
/sbin/iptables-restore < /etc/iptables-kubernetes.rules
changed_when: false
listen: kubernetes_firewall_rules
# when: "ansible_os_family == 'Debian' and iptables_installed.rc == 0"
- name: Restart Kubernetes
ansible.builtin.service:
name: "{% if kubernetes_type == 'k3s' %}k3s{% else %}kubelet{% endif %}"
state: restarted
listen: kubernetes_restart

20
meta/main.yml Normal file
View File

@ -0,0 +1,20 @@
galaxy_info:
role_name: Kubernetes
author: No Fuss Computing
description: template role to install kubernetes on a host
issue_tracker_url: https://gitlab.com/nofusscomputing/infrastructure/ansible-roles
license: https://gitlab.com/nofusscomputing/infrastructure/ansible-roles/-/blob/master/LICENSE
min_ansible_version: 1.2
platforms:
- name: Debian
versions:
- 11
galaxy_tags:
- kubernetes
- k8s

318
tasks/common.yaml Normal file
View File

@ -0,0 +1,318 @@
---
- name: "{{ role_name }} Install Software"
include_role:
name: nfc_common
vars:
common_gather_facts: false
aptSigningKeys:
- name: docker
url: https://download.docker.com/linux/debian/gpg
save_directory: /usr/share/keyrings
file_extension: asc
- name: kubernetes
url: https://packages.cloud.google.com/apt/doc/apt-key.gpg
save_directory: /usr/share/keyrings
file_extension: asc
aptRepositories:
- name: docker
repo: deb [arch={{ dynamic_processor_architecture }} signed-by=/usr/share/keyrings/docker.asc] http://download.docker.com/linux/{{ ansible_os_family | lower }} {{ ansible_lsb.codename | lower }} stable
- name: kubernetes
repo: deb [signed-by=/usr/share/keyrings/kubernetes.asc] http://apt.kubernetes.io/ kubernetes-xenial main
aptInstall:
- name: gnupg2
- name: apt-transport-https
- name: software-properties-common
- name: ca-certificates
- name: iptables
- name: python3-pip
- name: python3-virtualenv
- name: containerd.io
version: "{{ ContainerDioVersion }}"
- name: kubectl
version: "{{ KubernetesVersion }}"
- name: kubelet
version: "{{ KubernetesVersion }}"
- name: kubeadm
version: "{{ KubernetesVersion }}"
tags:
- install
# containerd.io=1.6.22-1 kubectl=1.26.9-00 kubelet=1.26.9-00 kubeadm=1.26.9-00
- name: Remove swapfile from /etc/fstab
mount:
name: "{{ item }}"
fstype: swap
state: absent
with_items:
- swap
- none
when:
- ansible_os_family == 'Debian' # ansible_lsb.codename = bullseye, ansible_lsb.major_release = 11
tags:
- install
- name: Disable swap
command: swapoff -a
changed_when: true == false
when:
#- ansible_swaptotal_mb > 0
- ansible_os_family == 'Debian'
tags:
- install
- name: Check an armbian os system
stat:
path: /etc/default/armbian-zram-config
register: armbian_stat_result
- name: Armbian Disable Swap
ansible.builtin.shell:
cmd: |
sed -i 's/\# SWAP=false/SWAP=false/g' /etc/default/armbian-zram-config;
sed -i 's/ENABLED=true/ENABLED=false/g' /etc/default/armbian-zram-config;
args:
executable: bash
changed_when: false
# failed_when: false
#notify: RebootHost # doesnt need to reboot as swapoff -a covers the deployment
when: armbian_stat_result.stat.exists
- name: Add the overlay module
community.general.modprobe:
name: overlay
state: present
when:
- ansible_os_family == 'Debian'
tags:
- install
- name: Add the br_netfilter module
community.general.modprobe:
name: br_netfilter
state: present
when:
- ansible_os_family == 'Debian'
tags:
- install
- name: check if containerd installed
ansible.builtin.shell:
cmd: which containerd
failed_when: false
changed_when: false
register: containerd_installed
- name: "Containerd.io Started?"
service:
name: containerd
state: started
tags:
- configure
- install
when: >
ansible_os_family == 'Debian'
and
containerd_installed.rc | default(1) | int == 0
- name: containerd load modules config
template:
src: "etc_module_containerd.conf"
dest: /etc/modules-load.d/containerd.conf
owner: root
mode: 0700
notify: "restart ContainerD"
when:
- ansible_os_family == 'Debian'
tags:
- install
- name: Create containerD host directories.
become_method: sudo
become: yes
file:
path: /etc/containerd/certs.d/{{ item.name }}
state: directory
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
mode: 0700
with_items: "{{ containerd.repositories }}"
tags:
- install
- containerRegistry
- name: containerD registry host
template:
src: "containerd-registry-hosts.toml.j2"
dest: /etc/containerd/certs.d/{{ item.name }}/hosts.toml
owner: root
mode: 0700
notify: "restart ContainerD"
with_items: "{{ containerd.repositories }}"
when:
- ansible_os_family == 'Debian'
tags:
- install
- containerRegistry
- name: containerD default config
template:
src: "etc_containerd_containerd.toml"
dest: /etc/containerd/config.toml
owner: root
mode: 0700
notify: "restart ContainerD"
register: containerd_config
when:
- ansible_os_family == 'Debian'
tags:
- install
- containerRegistry
- name: Restart ContainerD if required
meta: flush_handlers
tags:
- install
- containerRegistry
- name: Install required python modules
ansible.builtin.pip:
name: kubernetes
state: forcereinstall
#virtualenv: /tmp/venv_ansible
when: inventory_hostname != 'op1'
tags:
- install
- name: sysctl net.bridge.bridge-nf-call-ip6tables
sysctl:
name: net.bridge.bridge-nf-call-ip6tables
value: '1'
sysctl_set: yes
state: present
reload: yes
when:
- ansible_os_family == 'Debian'
tags:
- install
- name: sysctl net.bridge.bridge-nf-call-iptables
sysctl:
name: net.bridge.bridge-nf-call-iptables
value: '1'
sysctl_set: yes
state: present
reload: yes
when:
- ansible_os_family == 'Debian'
tags:
- install
- name: sysctl net.ipv4.ip_forward
sysctl:
name: net.ipv4.ip_forward
value: '1'
sysctl_set: yes
state: present
reload: yes
when:
- ansible_os_family == 'Debian'
tags:
- install
# - name: Check if kubernetes has been Initialized
# stat:
# path: /etc/kubernetes/admin.conf
# register: KubernetesInit
# when:
# - kubernetes_config.cluster.prime.name == inventory_hostname
- name: check if iptables is installed
ansible.builtin.shell: |-
dpkg -s iptables &> /dev/null
changed_when: true == false
register: iptables_installed
when:
- ansible_os_family == 'Debian'
tags:
- install
- iptables
- firewall
- name: Add kubernetes Firewall Rules - '/etc/iptables-kubernetes.rules'
template:
src: iptables-kubernetes.rules.j2
dest: "/etc/iptables-kubernetes.rules"
owner: root
mode: 0700
force: yes
notify: "Apply Firewall Rules"
when:
- ansible_os_family == 'Debian'
- iptables_installed.rc == 0
tags:
- install
- iptables
- firewall
- name: File - '/etc/network/if-pre-up.d/firewall-kubernetes'
template:
src: firewall-kubernetes.j2
dest: "/etc/network/if-pre-up.d/firewall-kubernetes"
owner: root
mode: 0700
force: yes
when:
- ansible_os_family == 'Debian'
- iptables_installed.rc == 0
tags:
- install
- iptables
- firewall
- name: Apply new firewall rules, if required
meta: flush_handlers
tags:
- install
- iptables
- firewall
- name: Create local workdir
file:
path: "{{ item }}"
state: directory
mode: 0700
delegate_to: localhost
connection: local
run_once: true
changed_when: true == false
with_items:
- /tmp/ansible/
tags:
- always

241
tasks/k3s.yaml Normal file
View File

@ -0,0 +1,241 @@
---
- name: Install Software
ansible.builtin.include_role:
name: nfc_common
vars:
common_gather_facts: false
aptInstall:
- name: curl
- name: iptables
- name: Create Required directories
ansible.builtin.file:
name: "{{ item.name }}"
state: "{{ item.state }}"
mode: "{{ item.mode }}"
loop: "{{ dirs }}"
vars:
dirs:
- name: /etc/rancher/k3s
state: directory
mode: 700
- name: /var/lib/rancher/k3s/server/logs
state: directory
mode: 700
- name: /var/lib/rancher/k3s/server/manifests
state: directory
mode: 700
# - name: Local Container Registry
# ansible.builtin.copy:
# content: |
# #
# # Private Container Registries for Kubernetes
# #
# # Managed By ansible/role/nfc_kubernetes
# #
# # Dont edit this file directly as it will be overwritten.
# #
# {% set registries = kubernetes_private_container_registry | default([]) -%}
# {% if registries | length > 0 %}mirrors:
# {% for entry in registries %}
# {{ entry.name }}:
# endpoint:
# - "{{ entry.url }}"
# {%- endfor %}
# {% endif %}
# dest: /etc/rancher/k3s/registries.yaml
# owner: root
# mode: '700'
# # notify: "restart ContainerD"
# # with_items: "{{ containerd.repositories }}"
# # when:
# # ansible_os_family == 'Debian'
# # and
# # Kubernetes_private_container_registry | default([]) | length > 0
- name: Add sysctl net.ipv4.ip_forward
sysctl:
name: net.ipv4.ip_forward
value: '1'
sysctl_set: true
state: present
reload: true
notify: reboot_host
when:
- ansible_os_family == 'Debian'
# On change reboot
- name: Check if K3s Installed
ansible.builtin.shell:
cmd: |
if [[ $(service k3s status) ]]; then exit 0; else exit 1; fi
executable: /bin/bash
changed_when: false
failed_when: false
register: k3s_installed
# - name: Download K3s Binary
# ansible.builtin.uri:
# url: "{{ item.url }}"
# method: GET
# return_content: true
# # body: ""
# status_code:
# - 200
# - 304
# # headers:
# # App-Token: "{{ glpi.app_token }}"
# # Authorization: "user_token {{ glpi.user_token }}"
# #body_format: json
# # validate_certs: false
# dest: "{{ item.dest }}"
# mode: "744"
# register: k3s_download_files
# delegate_to: localhost
# # no_log: true
# when: ansible_os_family == 'Debian'
# loop: "{{ download_files }}"
# vars:
# ansible_connection: local
# download_files:
# - dest: /tmp/install.sh
# url: https://get.k3s.io
# - dest: "/tmp/k3s"
# url: "https://github.com/k3s-io/k3s/releases/download/v{{ KubernetesVersion + KubernetesVersion_k3s_prefix | urlencode }}/k3s"
# - name: "[TRACE] Downloaded File SHA256"
# ansible.builtin.set_fact:
# hash_sha256_k3s_downloaded_binary: "{{ lookup('ansible.builtin.file', '/tmp/k3s') | hash('sha256') | string }}"
# delegate_to: localhost
# - name: Existing k3s File hash
# ansible.builtin.stat:
# checksum_algorithm: sha256
# name: /usr/local/bin/k3s
# register: hash_sha256_k3s_existing_binary
# - name: Copy K3s binary to Host
# ansible.builtin.copy:
# src: "/tmp/k3s"
# dest: "/usr/local/bin/k3s"
# mode: '740'
# owner: root
# group: root
# when: hash_sha256_k3s_existing_binary.stat.checksum | default('0') != hash_sha256_k3s_downloaded_binary
# - name: Copy install script to Host
# ansible.builtin.copy:
# src: "/tmp/install.sh"
# dest: "/tmp/install.sh"
# mode: '755'
# owner: root
# group: root
# # when: hash_sha256_k3s_existing_binary.stat.checksum | default('0') != hash_sha256_k3s_downloaded_binary
- name: Additional config files
ansible.builtin.copy:
content: |
{{ item.content }}
dest: "{{ item.path }}/{{ item.name }}"
mode: '740'
owner: root
group: root
loop: "{{ k3s.files }}"
- name: Copy Templates
ansible.builtin.template:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
owner: root
mode: '700'
force: true
notify: "{{ item.notify | default(omit) }}"
loop: "{{ templates_to_apply }}"
vars:
templates_to_apply:
- src: "calico.yaml.j2"
dest: /var/lib/rancher/k3s/server/manifests/calico.yaml
- src: kubernetes-manifest-rbac.yaml.j2
dest: /var/lib/rancher/k3s/server/manifests/rbac-authorization-common.yaml
# - src: firewall-kubernetes.j2
# dest: "/etc/network/if-up.d/firewall-kubernetes"
- src: iptables-kubernetes.rules.j2
dest: "/etc/iptables.rules.d/iptables-kubernetes.rules"
notify: firewall_reloader
- src: k3s-registries.yaml.j2
dest: /etc/rancher/k3s/registries.yaml
notify: kubernetes_restart
- src: k3s-config.yaml.j2
dest: /etc/rancher/k3s/config.yaml
notify: kubernetes_restart
# - name: Templates IPv6
# ansible.builtin.template:
# src: iptables-kubernetes.rules.j2
# dest: "/etc/ip6tables.rules.d/ip6tables-kubernetes.rules"
# owner: root
# mode: '700'
# force: true
# vars:
# ipv6: true
# # - name: Set IPTables to legacy mode
# # ansible.builtin.command:
# # cmd: update-alternatives --set iptables /usr/sbin/iptables-legacy
# # changed_when: false
# - name: Server install K3s
# ansible.builtin.shell:
# cmd: |
# # INSTALL_K3S_SKIP_DOWNLOAD=true \
# # INSTALL_K3S_VERSION="v{{ KubernetesVersion }}{{ KubernetesVersion_k3s_prefix }}" \
# # /tmp/install.sh
# curl -sfL https://get.k3s.io | \
# INSTALL_K3S_VERSION="v1.26.9+k3s1" \
# sh -
# failed_when: false
# # when: >
# # k3s_installed.rc | int == 1
# # and
# # Kubernetes_Master | default(false)
# when: Kubernetes_Master | default(false)
# - name: Agent install K3s
# ansible.builtin.shell:
# cmd: |
# INSTALL_K3S_SKIP_DOWNLOAD=true \
# INSTALL_K3S_VERSION="v{{ KubernetesVersion }}{{ KubernetesVersion_k3s_prefix }}" \
# K3S_URL=https://{{ hostvars[kubernetes_config.cluster.prime.name].ansible_host }}:6443 \
# K3S_TOKEN={{ node_token }} \
# /tmp/install.sh
# when: >
# k3s_installed.rc | int == 1
# and
# not Kubernetes_Master | default(false)
# # - name: Look up AAAA (IPv4) records for example.org
# # ansible.builtin.debug:
# # msg: "{{ query('community.dns.lookup', 'nww-au1.networkedweb.com.', type='A') }}"
# # - name: Look up AAAA (IPv6) records for example.org
# # ansible.builtin.debug:
# # msg: "{{ query('community.dns.lookup', 'nww-au1.networkedweb.com.', type='AAAA') }}"

103
tasks/k8s.yaml Normal file
View File

@ -0,0 +1,103 @@
---
- name: Common Tasks
include_tasks: common.yaml
# tags:
# - install
- name: Check if kubernetes has been Initialized
stat:
path: /etc/kubernetes/admin.conf
register: KubernetesInitialized
tags:
- always
- name: kubernetes prime
include_tasks: prime.yaml
when: kubernetes_config.cluster.prime.name == inventory_hostname
- name: kubernetes workers
include_tasks: workers.yaml
when: kubernetes_config.cluster.prime.name != inventory_hostname
- name: Add Kubernetes Node Labels
kubernetes.core.k8s:
definition:
apiVersion: v1
kind: Node
metadata:
name: "{{ inventory_hostname }}"
labels:
"{{ item | from_yaml_all }}"
delegate_to: "{{ kubernetes_config.cluster.prime.name }}"
with_items:
- "{{ kubernetes_config.hosts[inventory_hostname].labels }}"
when:
- ( kubernetes_config.hosts[inventory_hostname].labels is defined and
kubernetes_config.hosts[inventory_hostname].labels|default('')|length > 0 )
tags:
- install
- nodelabels
- name: Add Node Taints
kubernetes.core.k8s_taint:
state: "present"
name: "{{ inventory_hostname }}"
taints:
- "{{ item | from_yaml_all }}"
delegate_to: "{{ kubernetes_config.cluster.prime.name }}"
with_items:
- "{{ kubernetes_config.hosts[inventory_hostname].taints.present }}"
when:
- (kubernetes_config.hosts[inventory_hostname].taints.present is defined and
kubernetes_config.hosts[inventory_hostname].taints.present|default('')|length > 0 )
tags:
- install
- taints
- name: Remove Node Taints
kubernetes.core.k8s_taint:
state: "absent"
name: "{{ inventory_hostname }}"
taints:
- "{{ item | from_yaml_all }}"
delegate_to: "{{ kubernetes_config.cluster.prime.name }}"
with_items:
- "{{ kubernetes_config.hosts[inventory_hostname].taints.absent }}"
when:
- ( kubernetes_config.hosts[inventory_hostname].taints.absent is defined and
kubernetes_config.hosts[inventory_hostname].taints.absent|default('')|length > 0 )
tags:
- install
- taints
- name: Create Cluster Namespaces
kubernetes.core.k8s:
state: present
definition:
apiVersion: v1
kind: Namespace
metadata:
name: "{{ item.name }}"
labels:
#app.kubernetes.io/version: # App version
#app.kubernetes.io/component:
#app.kubernetes.io/part-of:
app.kubernetes.io/managed-by: Ansible
#meta.kubernetes.io/description: "{{ item.description | default('') }}"
meta.kubernetes.io/version: "{{ deployment_git_current_short_hash | default('') }}"
with_items:
- "{{ kubernetes_config.namespaces }}"
when:
( kubernetes_config.namespaces is defined and
kubernetes_config.namespaces | default('') | length > 0 and
kubernetes_config.cluster.prime.name == inventory_hostname )
tags:
- install
- namespaces

14
tasks/main.yml Normal file
View File

@ -0,0 +1,14 @@
---
- name: Firewall Rules
ansible.builtin.include_role:
name: nfc_firewall
vars:
nfc_firewall_enabled_kubernetes: "{{ nfc_kubernetes.enable_firewall | default(false) | bool }}"
- name: K8s Cluster
ansible.builtin.include_tasks: k8s.yaml
when: kubernetes_type == 'k8s'
- name: K3s Cluster
ansible.builtin.include_tasks: k3s.yaml
when: kubernetes_type == 'k3s'

146
tasks/prime.yaml Normal file
View File

@ -0,0 +1,146 @@
---
- name: initialize Kubernetes cluster
block:
- name: Intilizing Kubernetes Cluster
#command: kubeadm init --pod-network-cidr "{{ KubernetesPodSubnet }}" --apiserver-advertise-address "{{ ansible_default_ipv4.address }}" --ignore-preflight-errors Mem --cri-socket=unix:///var/run/crio/crio.sock
command: kubeadm init --pod-network-cidr "{{ KubernetesPodSubnet }}" --service-cidr "{{ KubernetesServiceSubnet }}" --apiserver-advertise-address "0.0.0.0" --ignore-preflight-errors Mem #--cri-socket=unix:///var/run/containerd/containerd.sock
when:
- not KubernetesInitialized.stat.exists
rescue:
- name: Reset Kubeadmn
ansible.builtin.shell: "{{ item }}"
#register: kube_reset
failed_when: item.rc != 0
with_items:
- kubeadm reset --force
- rm -Rf /etc/cni/net.d
- name: Check if kubernetes has been Initialized
stat:
path: /etc/kubernetes/admin.conf
register: KubernetesInitialized
tags:
- always
- name: fetch kubernetes health
ansible.builtin.shell: " wget http://localhost:10248/healthz -q -O - || true"
register: KubernetesHealth
changed_when: true == false
when: KubernetesInitialized.stat.exists
tags:
- always
- name: set kubeernetes health fact
set_fact:
kube_health: "{{ KubernetesHealth.stdout | default(false) == 'ok' }}"
changed_when: true == false
tags:
- always
- name: Create directory for kube config.
become_method: sudo
become: yes
file:
#path: /home/{{ ansible_user }}/.kube
path: ~/.kube
state: directory
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
mode: 0700
# when: Kubernetes_Master
tags:
- always
- name: Copy Kube config for local user
copy:
remote_src: yes
src: /etc/kubernetes/admin.conf
#dest: /home/{{ ansible_user }}/.kube/config
dest: ~/.kube/config
owner: "{{ ansible_user }}"
group: "{{ ansible_user }}"
mode: 0700
tags:
- always
- name: Add calico networking.
template:
src: "calico.yaml.j2"
dest: /etc/kubernetes/manifests/calico.yaml
owner: root
mode: 0744
- name: apply calico manifest
command: kubectl apply -f /etc/kubernetes/manifests/calico.yaml
tags:
- install
- manifest
- name: create remote workdir
file:
path: "{{ item }}"
state: directory
mode: 0700
with_items:
- /tmp/ansible/
tags:
- always
- name: Create local workdir
file:
path: "{{ item }}"
state: directory
mode: 0700
delegate_to: localhost
connection: local
with_items:
- /tmp/ansible/
tags:
- always
- name: get join command
ansible.builtin.shell: kubeadm token create --print-join-command > /tmp/ansible/join_kubernetes.sh
changed_when: true == false
tags:
- always
- name: download join command
fetch:
src: /tmp/ansible/join_kubernetes.sh
dest: /tmp/ansible/
flat: yes
changed_when: true == false
tags:
- always
# always:
# - name: remove remote workdir
# file:
# path: "{{ item }}"
# state: absent
# with_items:
# - /tmp/ansible/join_kubernetes.sh
# changed_when: true == false
# when:
# #- Kubernetes_Prime
# #- KubernetesInit.stat.exists
# - kubernetes_config.cluster.prime.name == inventory_hostname

46
tasks/workers.yaml Normal file
View File

@ -0,0 +1,46 @@
---
# - name: configure non-prime nodes - check node health
# shell: "curl http://localhost:10248/healthz || true"
# register: health
# changed_when: true == false
# - set_fact:
# kube_joined: "{{ health.stdout == 'ok' }}"
# changed_when: true == false
# # when:
# # - not Kubernetes_Prime
- name: configure non-prime nodes - create remote workdir
file:
path: "{{ item }}"
state: directory
mode: 0700
changed_when: true == false
with_items:
- /tmp/ansible/
- ansible.builtin.shell: " wget http://localhost:10248/healthz -q -O - || true"
register: health
changed_when: true == false
- set_fact:
kube_joined: "{{ health.stdout == 'ok' }}"
changed_when: true == false
- name: get join command from ansible controller
copy:
src: /tmp/ansible/join_kubernetes.sh
dest: /tmp/ansible/join_kubernetes.sh
mode: 0700
changed_when: true == false
when:
- not kube_joined
- name: configure non-prime nodes - join node to kubernetes cluster
command: sh /tmp/ansible/join_kubernetes.sh
when:
- not kube_joined

View File

@ -0,0 +1,51 @@
# ---
# apiVersion: kyverno.io/v1
# kind: ClusterPolicy
# metadata:
# name: add-networkpolicy
# labels:
# <<: {{ kubernetes_config.defaults.labels.deployment_labels | from_yaml }}
# annotations:
# ansible.kubernetes.io/path: {{ item }}
# policies.kyverno.io/title: Add Network Policy
# policies.kyverno.io/category: Multi-Tenancy, EKS Best Practices
# policies.kyverno.io/subject: NetworkPolicy
# policies.kyverno.io/minversion: 1.6.0
# policies.kyverno.io/description: >-
# By default, Kubernetes allows communications across all Pods within a cluster.
# The NetworkPolicy resource and a CNI plug-in that supports NetworkPolicy must be used to restrict
# communications. A default NetworkPolicy should be configured for each Namespace to
# default deny all ingress and egress traffic to the Pods in the Namespace. Application
# teams can then configure additional NetworkPolicy resources to allow desired traffic
# to application Pods from select sources. This policy will create a new NetworkPolicy resource
# named `default-deny` which will deny all traffic anytime a new Namespace is created.
# spec:
# rules:
# - name: default-deny
# match:
# any:
# - resources:
# kinds:
# - Namespace
# exclude:
# any:
# - resources:
# namespaces:
# - kube-metrics
# - kube-policy
# - kube-system
# - default
# generate:
# apiVersion: networking.k8s.io/v1
# kind: NetworkPolicy
# name: default-deny
# namespace: "{{'{{request.object.metadata.name}}'}}"
# synchronize: true
# data:
# spec:
# # select all pods in the namespace
# podSelector: {}
# # deny all traffic
# policyTypes:
# - Ingress
# - Egress

View File

@ -0,0 +1,60 @@
# ---
# apiVersion: kyverno.io/v1
# kind: ClusterPolicy
# metadata:
# name: add-networkpolicy-dns
# labels:
# <<: {{ kubernetes_config.defaults.labels.deployment_labels | from_yaml }}
# annotations:
# ansible.kubernetes.io/path: {{ item }}
# policies.kyverno.io/title: Add Network Policy for DNS
# policies.kyverno.io/category: Multi-Tenancy, EKS Best Practices
# policies.kyverno.io/subject: NetworkPolicy
# kyverno.io/kyverno-version: 1.6.2
# policies.kyverno.io/minversion: 1.6.0
# kyverno.io/kubernetes-version: "1.23"
# policies.kyverno.io/description: >-
# By default, Kubernetes allows communications across all Pods within a cluster.
# The NetworkPolicy resource and a CNI plug-in that supports NetworkPolicy must be used to restrict
# communications. A default NetworkPolicy should be configured for each Namespace to
# default deny all ingress and egress traffic to the Pods in the Namespace. Application
# teams can then configure additional NetworkPolicy resources to allow desired traffic
# to application Pods from select sources. This policy will create a new NetworkPolicy resource
# named `default-deny` which will deny all traffic anytime a new Namespace is created.
# spec:
# generateExistingOnPolicyUpdate: true
# rules:
# - name: add-netpol-dns
# match:
# any:
# - resources:
# kinds:
# - Namespace
# exclude:
# any:
# - resources:
# namespaces:
# - kube-metrics
# - kube-policy
# - kube-system
# - default
# generate:
# apiVersion: networking.k8s.io/v1
# kind: NetworkPolicy
# name: allow-dns
# namespace: "{{'{{request.object.metadata.name}}'}}"
# synchronize: true
# data:
# spec:
# podSelector:
# matchLabels: {}
# policyTypes:
# - Egress
# egress:
# - to:
# - namespaceSelector:
# matchLabels:
# name: kube-system
# ports:
# - protocol: UDP
# port: 53

View File

@ -0,0 +1,48 @@
---
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: disallow-mutable-tag
labels:
<<: {{ kubernetes_config.defaults.labels.deployment_labels | from_yaml }}
annotations:
ansible.kubernetes.io/path: {{ item }}
policies.kyverno.io/title: Disallow mutable Tag
policies.kyverno.io/category: Best Practices
policies.kyverno.io/minversion: 1.6.0
policies.kyverno.io/severity: medium
policies.kyverno.io/subject: Pod
policies.kyverno.io/description: >-
The ':latest', ':master' and ':dev(elopment)' tags are mutable and can lead to unexpected errors if the
image changes. A best practice is to use an immutable tag that maps to
a specific version of an application Pod. This policy validates that the image
specifies a tag and that it is not called `latest` `master` or`dev(elopment)`.
spec:
#failurePolicy: Fail
validationFailureAction: Audit
background: true
rules:
- name: require-image-tag
match:
any:
- resources:
kinds:
- Pod
validate:
message: "An image tag is required."
pattern:
spec:
containers:
- image: "*:*"
- name: validate-image-tag
match:
any:
- resources:
kinds:
- Pod
validate:
message: "Using a mutable image tag e.g. 'latest', 'master' or 'dev[elopment]' is not allowed."
pattern:
spec:
containers:
- image: "!*:[latest|master|dev|development]"

View File

@ -0,0 +1,52 @@
---
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: disallow-default-namespace
labels:
<<: {{ kubernetes_config.defaults.labels.deployment_labels | from_yaml }}
annotations:
pod-policies.kyverno.io/autogen-controllers: none
policies.kyverno.io/title: Disallow Default Namespace
policies.kyverno.io/minversion: 1.6.0
policies.kyverno.io/category: Multi-Tenancy
policies.kyverno.io/severity: medium
policies.kyverno.io/subject: Pod
policies.kyverno.io/description: >-
Kubernetes Namespaces are an optional feature that provide a way to segment and
isolate cluster resources across multiple applications and users. As a best
practice, workloads should be isolated with Namespaces. Namespaces should be required
and the default (empty) Namespace should not be used. This policy validates that Pods
specify a Namespace name other than `default`. Rule auto-generation is disabled here
due to Pod controllers need to specify the `namespace` field under the top-level `metadata`
object and not at the Pod template level.
spec:
#failurePolicy: Fail
validationFailureAction: Audit
background: true
rules:
- name: validate-namespace
match:
any:
- resources:
kinds:
- Pod
validate:
message: "Using 'default' namespace is not allowed."
pattern:
metadata:
namespace: "!default"
- name: validate-podcontroller-namespace
match:
any:
- resources:
kinds:
- DaemonSet
- Deployment
- Job
- StatefulSet
validate:
message: "Using 'default' namespace is not allowed for pod controllers."
pattern:
metadata:
namespace: "!default"

View File

@ -0,0 +1,48 @@
---
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: spread-pods
labels:
<<: {{ kubernetes_config.defaults.labels.deployment_labels | from_yaml }}
annotations:
policies.kyverno.io/title: Spread Pods Across Nodes
policies.kyverno.io/category: Sample
policies.kyverno.io/subject: Deployment, Pod
policies.kyverno.io/minversion: 1.6.0
policies.kyverno.io/description: >-
Deployments to a Kubernetes cluster with multiple availability zones often need to
distribute those replicas to align with those zones to ensure site-level failures
do not impact availability. This policy matches Deployments with the label
`distributed=required` and mutates them to spread Pods across zones.
spec:
generateExistingOnPolicyUpdate: true
background: true
rules:
- name: spread-pods-across-nodes
# Matches any Deployment with the label `distributed=required`
match:
any:
- resources:
kinds:
- Deployment
- StatefulSet
preconditions:
all:
- key: "{{ '{{ request.object.spec.replicas }}' }}"
operator: GreaterThanOrEquals
value: 2
# Mutates the incoming Deployment.
mutate:
patchStrategicMerge:
spec:
template:
spec:
# Adds the topologySpreadConstraints field if non-existent in the request.
+(topologySpreadConstraints):
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app.kubernetes.io/name: "{% raw %} '{{ request.object.metadata.labels.\"app.kubernetes.io/name\" }}' {% endraw %}"

View File

@ -0,0 +1,38 @@
# apiVersion: networking.k8s.io/v1
# kind: NetworkPolicy
# metadata:
# name: kube-metrics
# namespace: kube-metrics
# labels:
# app.kubernetes.io/name: kube-metrics
# # app.kubernetes.io/instance: { .Release.Name }}
# # app.kubernetes.io/version: { .Chart.Version | quote }}
# # app.kubernetes.io/managed-by: { .Release.Service }}
# app.kubernetes.io/component: loki
# app.kubernetes.io/part-of: metrics
# spec:
# egress:
# - to:
# #- podSelector:
# - namespaceSelector:
# matchLabels:
# kubernetes.io/metadata.name: "default"
# ports:
# - port: 443
# protocol: TCP
# # ingress:
# # - from:
# # #- podSelector:
# # - namespaceSelector:
# # matchLabels:
# # #app.kubernetes.io/name: prometheus
# # #app.kubernetes.io/instance: k8s
# # #app.kubernetes.io/managed-by: prometheus-operator
# # app.kubernetes.io/name: grafana-agent
# # #app.kubernetes.io/part-of: kube-prometheus
# # #app: grafana
# policyTypes:
# - Egress
# #- Ingress

4983
templates/calico.yaml.j2 Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,10 @@
#
# {{ item.name }} Container Registry Configuration
# Managed by: Ansible
#
server = "{{ item.server }}"
[host."{{ item.url }}"]
capabilities = {{ item.capabilities | from_yaml_all }}
skip_verify = {{ item.skip_verify | default(false) | lower }}

View File

@ -0,0 +1,20 @@
{
"cniVersion": "0.3.1",
"name": "crio",
"type": "bridge",
"bridge": "cni0",
"isGateway": true,
"ipMasq": true,
"hairpinMode": true,
"ipam": {
"type": "host-local",
"routes": [
{ "dst": "0.0.0.0/0" },
{ "dst": "1100:200::1/24" }
],
"ranges": [
[{ "subnet": "{{ KubernetesPodSubnet }}" }],
[{ "subnet": "1100:200::/24" }]
]
}
}

View File

@ -0,0 +1,250 @@
disabled_plugins = []
imports = []
oom_score = 0
plugin_dir = ""
required_plugins = []
root = "/var/lib/containerd"
state = "/run/containerd"
temp = ""
version = 2
[cgroup]
path = ""
[debug]
address = ""
format = ""
gid = 0
level = ""
uid = 0
[grpc]
address = "/run/containerd/containerd.sock"
gid = 0
max_recv_message_size = 16777216
max_send_message_size = 16777216
tcp_address = ""
tcp_tls_ca = ""
tcp_tls_cert = ""
tcp_tls_key = ""
uid = 0
[metrics]
address = ""
grpc_histogram = false
[plugins]
[plugins."io.containerd.gc.v1.scheduler"]
deletion_threshold = 0
mutation_threshold = 100
pause_threshold = 0.02
schedule_delay = "0s"
startup_delay = "100ms"
[plugins."io.containerd.grpc.v1.cri"]
device_ownership_from_security_context = false
disable_apparmor = false
disable_cgroup = false
disable_hugetlb_controller = true
disable_proc_mount = false
disable_tcp_service = true
enable_selinux = false
enable_tls_streaming = false
enable_unprivileged_icmp = false
enable_unprivileged_ports = false
ignore_image_defined_volumes = false
max_concurrent_downloads = 3
max_container_log_line_size = 16384
netns_mounts_under_state_dir = false
restrict_oom_score_adj = false
sandbox_image = "registry.k8s.io/pause:3.6"
selinux_category_range = 1024
stats_collect_period = 10
stream_idle_timeout = "4h0m0s"
stream_server_address = "127.0.0.1"
stream_server_port = "0"
systemd_cgroup = false
tolerate_missing_hugetlb_controller = true
unset_seccomp_profile = ""
[plugins."io.containerd.grpc.v1.cri".cni]
bin_dir = "/opt/cni/bin"
conf_dir = "/etc/cni/net.d"
conf_template = ""
ip_pref = ""
max_conf_num = 1
[plugins."io.containerd.grpc.v1.cri".containerd]
default_runtime_name = "runc"
disable_snapshot_annotations = true
discard_unpacked_layers = false
ignore_rdt_not_enabled_errors = false
no_pivot = false
snapshotter = "overlayfs"
[plugins."io.containerd.grpc.v1.cri".containerd.default_runtime]
base_runtime_spec = ""
cni_conf_dir = ""
cni_max_conf_num = 0
container_annotations = []
pod_annotations = []
privileged_without_host_devices = false
runtime_engine = ""
runtime_path = ""
runtime_root = ""
runtime_type = ""
[plugins."io.containerd.grpc.v1.cri".containerd.default_runtime.options]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
base_runtime_spec = ""
cni_conf_dir = ""
cni_max_conf_num = 0
container_annotations = []
pod_annotations = []
privileged_without_host_devices = false
runtime_engine = ""
runtime_path = ""
runtime_root = ""
runtime_type = "io.containerd.runc.v2"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
BinaryName = ""
CriuImagePath = ""
CriuPath = ""
CriuWorkPath = ""
IoGid = 0
IoUid = 0
NoNewKeyring = false
NoPivotRoot = false
Root = ""
ShimCgroup = ""
SystemdCgroup = true
[plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime]
base_runtime_spec = ""
cni_conf_dir = ""
cni_max_conf_num = 0
container_annotations = []
pod_annotations = []
privileged_without_host_devices = false
runtime_engine = ""
runtime_path = ""
runtime_root = ""
runtime_type = ""
[plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime.options]
[plugins."io.containerd.grpc.v1.cri".image_decryption]
key_model = "node"
[plugins."io.containerd.grpc.v1.cri".registry]
config_path = "/etc/containerd/certs.d"
[plugins."io.containerd.grpc.v1.cri".registry.auths]
[plugins."io.containerd.grpc.v1.cri".registry.configs]
[plugins."io.containerd.grpc.v1.cri".registry.headers]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors]
[plugins."io.containerd.grpc.v1.cri".x509_key_pair_streaming]
tls_cert_file = ""
tls_key_file = ""
[plugins."io.containerd.internal.v1.opt"]
path = "/opt/containerd"
[plugins."io.containerd.internal.v1.restart"]
interval = "10s"
[plugins."io.containerd.internal.v1.tracing"]
sampling_ratio = 1.0
service_name = "containerd"
[plugins."io.containerd.metadata.v1.bolt"]
content_sharing_policy = "shared"
[plugins."io.containerd.monitor.v1.cgroups"]
no_prometheus = false
[plugins."io.containerd.runtime.v1.linux"]
no_shim = false
runtime = "runc"
runtime_root = ""
shim = "containerd-shim"
shim_debug = false
[plugins."io.containerd.runtime.v2.task"]
platforms = ["linux/amd64"]
sched_core = false
[plugins."io.containerd.service.v1.diff-service"]
default = ["walking"]
[plugins."io.containerd.service.v1.tasks-service"]
rdt_config_file = ""
[plugins."io.containerd.snapshotter.v1.aufs"]
root_path = ""
[plugins."io.containerd.snapshotter.v1.btrfs"]
root_path = ""
[plugins."io.containerd.snapshotter.v1.devmapper"]
async_remove = false
base_image_size = ""
discard_blocks = false
fs_options = ""
fs_type = ""
pool_name = ""
root_path = ""
[plugins."io.containerd.snapshotter.v1.native"]
root_path = ""
[plugins."io.containerd.snapshotter.v1.overlayfs"]
root_path = ""
upperdir_label = false
[plugins."io.containerd.snapshotter.v1.zfs"]
root_path = ""
[plugins."io.containerd.tracing.processor.v1.otlp"]
endpoint = ""
insecure = false
protocol = ""
[proxy_plugins]
[stream_processors]
[stream_processors."io.containerd.ocicrypt.decoder.v1.tar"]
accepts = ["application/vnd.oci.image.layer.v1.tar+encrypted"]
args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"]
env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"]
path = "ctd-decoder"
returns = "application/vnd.oci.image.layer.v1.tar"
[stream_processors."io.containerd.ocicrypt.decoder.v1.tar.gzip"]
accepts = ["application/vnd.oci.image.layer.v1.tar+gzip+encrypted"]
args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"]
env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"]
path = "ctd-decoder"
returns = "application/vnd.oci.image.layer.v1.tar+gzip"
[timeouts]
"io.containerd.timeout.bolt.open" = "0s"
"io.containerd.timeout.shim.cleanup" = "5s"
"io.containerd.timeout.shim.load" = "5s"
"io.containerd.timeout.shim.shutdown" = "3s"
"io.containerd.timeout.task.state" = "2s"
[ttrpc]
address = ""
gid = 0
uid = 0

View File

@ -0,0 +1,2 @@
overlay
br_netfilter

View File

@ -0,0 +1,4 @@
#!/bin/bash
/sbin/iptables-restore < /etc/iptables-kubernetes.rules;
/sbin/ip6tables-restore < /etc/ip6tables-kubernetes.rules;

View File

@ -0,0 +1,23 @@
#
# IP Tables Firewall Rules for Kubernetes
#
# Managed By ansible/role/nfc_kubernetes
#
# Dont edit this file directly as it will be overwritten. To grant a host API access
# edit the cluster config, adding the hostname/ip to path kubernetes_config.cluster.access
#
*filter
iptables -N sshd
iptables -A sshd -j RETURN
iptables -A INPUT -p tcp --dport 22 -m comment --comment "OpenSSH Server" -j sshd
iptables -I sshd -m comment --comment "allow All Hosts" -j ACCEPT
COMMIT

View File

@ -0,0 +1,263 @@
#
# IP Tables Firewall Rules for Kubernetes
#
# Managed By ansible/role/nfc_kubernetes
#
# Dont edit this file directly as it will be overwritten. To grant a host API access
# edit the cluster config, adding the hostname/ip to path kubernetes_config.cluster.access
#
# This file is periodicly called by cron
#
{% set data = namespace(firewall_rules=[]) -%}
{%- if ansible_host is regex('^[a-z]') and ':' not in ansible_host -%} {#- Convert DNs name to IP Address -#}
{%- if ipv6 | default(false) -%}
{%- set ansible_host = query('community.dns.lookup', ansible_host + '.', type='AAAA' ) -%}
{%- else -%}
{%- set ansible_host = query('community.dns.lookup', ansible_host + '.', type='A' ) -%}
{%- endif -%}
{%- if ansible_host | list | length > 0 -%} {#- Convert dns lookup to list, and select the first item -#}
{%- set ansible_host = ansible_host | from_yaml_all | list -%}
{%- set ansible_host = ansible_host[0] -%}
{%- endif -%}
{%- endif -%}
{%- for kubernetes_host in groups[kubernetes_type] -%}
{%- if kubernetes_host is regex('^[a-z]') and ':' not in kubernetes_host -%} {#- Convert DNs name to IP Address -#}
{%- if ipv6 | default(false) -%}
{%- set kubernetes_host = query('community.dns.lookup', kubernetes_host + '.', type='AAAA' ) -%}
{%- else -%}
{%- set kubernetes_host = query('community.dns.lookup', kubernetes_host + '.', type='A' ) -%}
{%- endif -%}
{%- if kubernetes_host | list | length > 0 -%} {#- Convert dns lookup to list, and select the first item -#}
{%- set kubernetes_host = kubernetes_host | from_yaml_all | list -%}
{%- set kubernetes_host = kubernetes_host[0] -%}
{%- endif -%}
{%- endif -%}
{%- for master_host in groups['kubernetes_master'] -%}
{%- if master_host is regex('^[a-z]') and ':' not in master_host -%} {#- Convert DNs name to IP Address -#}
{%- if ipv6 | default(false) -%}
{%- set master_host = query('community.dns.lookup', master_host + '.', type='AAAA' ) -%}
{%- else -%}
{%- set master_host = query('community.dns.lookup', master_host + '.', type='A' ) -%}
{%- endif -%}
{%- if master_host | list | length > 0 -%} {#- Convert dns lookup to list, and select the first item -#}
{%- set master_host = master_host | from_yaml_all | list -%}
{%- set master_host = master_host[0] -%}
{%- endif -%}
{%- endif -%}
{%- if Kubernetes_Master | default(false) | bool -%}
{%- if
master_host == kubernetes_host
and
master_host != ansible_host
and
(
(
ipv6 | default(false)
and
':' in master_host
)
or
(
not ipv6 | default(false)
and
'.' in master_host
)
)
-%}
{#- master hosts only -#}
{%- set data.firewall_rules = data.firewall_rules + ['-I kubernetes-embedded-etcd -s ' + master_host + ' -j ACCEPT'] -%}
{# {%- set data.firewall_rules = data.firewall_rules + ['-I INPUT -s ' + master_host + ' -p tcp -m multiport --dports 2380 -j ACCEPT'] -%} #}
{%- set data.firewall_rules = data.firewall_rules + ['-I kubernetes-api -s ' + master_host + ' -j ACCEPT'] -%}
{%- endif -%}
{%- endif -%}
{%- endfor -%}
{%- if
ansible_host != kubernetes_host
and
(
(
ipv6 | default(false)
and
':' in kubernetes_host
)
or
(
not ipv6 | default(false)
and
'.' in kubernetes_host
)
)
-%}
{#- All cluster Hosts -#}
{%- set data.firewall_rules = data.firewall_rules + ['-I kubelet-metrics -s ' + kubernetes_host + ' -j ACCEPT'] -%}
{%- endif -%}
{%- endfor -%}
{%- for api_client in kubernetes_config.cluster.access | default([]) -%}
{%- if api_client is regex('^[a-z]') and ':' not in api_client -%} {#- Convert DNs name to IP Address -#}
{%- set api_client_dns_name = api_client -%}
{%- if ipv6 | default(false) -%}
{%- set api_client = query('community.dns.lookup', api_client + '.', type='AAAA' ) -%}
{%- else -%}
{%- set api_client = query('community.dns.lookup', api_client + '.', type='A' ) -%}
{%- endif -%}
{%- if api_client | list | length > 0 -%} {#- Convert dns lookup to list, and select the first item -#}
{%- set api_client = api_client | from_yaml_all | list -%}
{%- set api_client = api_client[0] -%}
{%- endif -%}
{%- endif -%}
{%- if
api_client != ansible_host
and
(
(
ipv6 | default(false)
and
':' in api_client
)
or
(
not ipv6 | default(false)
and
'.' in api_client
)
)
-%}
{#- Hosts allowed to access API -#}
{%- set data.firewall_rules = data.firewall_rules + ['-I kubernetes-api -s ' + api_client + ' -m comment --comment "host: ' + api_client_dns_name | default(api_client) + '" -j ACCEPT'] -%}
{%- endif -%}
{%- endfor %}
*filter
{# -N kubernetes-embedded-etcd
-A kubernetes-embedded-etcd -j RETURN
-A INPUT -p tcp -m multiport --dports 2379,2380 -m comment --comment "etcd. Servers only" -j kubernetes-embedded-etcd
-N kubernetes-api
-A kubernetes-api -j RETURN
-A INPUT -p tcp --dport 6443 -m comment --comment "Kubernetes API access. All Cluster hosts and end users" -j kubernetes-api
-N kubernetes-flannel-vxlan
-A kubernetes-flannel-vxlan -j RETURN
-A INPUT -p udp --dport 8472 -m comment --comment "Flannel. All cluster hosts" -j kubernetes-flannel-vxlan
-N kubernetes-kubelet-metrics
-A kubernetes-kubelet-metrics -j RETURN
-A INPUT -p tcp --dport 10250 -m comment --comment "Kubernetes Metrics. All cluster hosts" -j kubernetes-kubelet-metrics
-N kubernetes-flannel-wg-four
-A kubernetes-flannel-wg-four -j RETURN
-A INPUT -p udp --dport 51820 -m comment --comment "Flannel Wiregaurd IPv4. All cluster hosts" -j kubernetes-flannel-wg-four
-N kubernetes-flannel-wg-six
-A kubernetes-flannel-wg-six -j RETURN
-A INPUT -p udp --dport 51821 -m comment --comment "Flannel Wiregaurd IPv6. All cluster hosts" -j kubernetes-flannel-wg-six #}
{% if data.firewall_rules | length | int > 0 -%}
{% for rule in data.firewall_rules -%}
{{ rule }}
{% endfor -%}
{% endif -%}
{#- #-I INPUT -s 192.168.1.0/24 -p tcp -m multiport --dports 6443 -j ACCEPT
#-I INPUT -s 192.168.1.0/24 -p tcp -m multiport --dports 179 -j ACCEPT
#-I INPUT -s 192.168.1.0/24 -p tcp -m multiport --dports 10250 -j ACCEPT
#-I INPUT -s 192.168.1.0/24 -p udp -m multiport --dports 4789 -j ACCEPT
#-I INPUT -s 192.168.1.0/24 -p tcp -m multiport --dports 2379 -j ACCEPT
#-I INPUT -s 192.168.1.0/24 -p tcp -m multiport --dports 2380 -j ACCEPT
-I INPUT -p tcp -m multiport --dports 6443 -j ACCEPT
-I INPUT -p tcp -m multiport --dports 179 -j ACCEPT
-I INPUT -p tcp -m multiport --dports 10250 -j ACCEPT
-I INPUT -p udp -m multiport --dports 4789 -j ACCEPT
-I INPUT -p tcp -m multiport --dports 2379 -j ACCEPT
-I INPUT -p tcp -m multiport --dports 2380 -j ACCEPT #}
COMMIT
{# iptables -I kubernetes-api -s nww-au1.networkedweb.com -j ACCEPT #}

View File

@ -0,0 +1,29 @@
#
# K3s Configuration for running Kubernetes
#
# Managed By ansible/role/nfc_kubernetes
#
# Dont edit this file directly as it will be overwritten.
#
flannel-backend: none
cluster-cidr: "{{ KubernetesPodSubnet }}"
cluster-init: true
{% if not Kubernetes_Prime | default(false) | bool -%}server: https://{{ hostvars[kubernetes_config.cluster.prime.name].ansible_host }}:6443{% endif %}
service-cidr: "{{ KubernetesServiceSubnet }}"
disable-network-policy: true
disable:
- traefik
kube-apiserver-arg:
- audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log
- audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml
# - admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml
{% if kubernetes_oidc.enabled | default(false) | bool -%}
- oidc-issuer-url={{ kubernetes_oidc.issuer_url }}
- oidc-client-id={{ kubernetes_oidc.client_id }}
- oidc-username-claim={{ kubernetes_oidc.username_claim }}
{% if kubernetes_oidc.oidc_username_prefix | default('') != '' -%} - oidc-username-prefix={{ kubernetes_oidc.oidc_username_prefix }}{% endif %}
- oidc-groups-claim={{ kubernetes_oidc.groups_claim }}
{% if kubernetes_oidc.groups_prefix | default('') != '' %} - oidc-groups-prefix={{ kubernetes_oidc.groups_prefix }}{% endif %}
{% endif %}
{% if host_external_ip | default('') %} node-external-ip: "{{ host_external_ip }}"{% endif %}

View File

@ -0,0 +1,19 @@
#
# Private Container Registries for Kubernetes
#
# Managed By ansible/role/nfc_kubernetes
#
# Dont edit this file directly as it will be overwritten.
#
{% set registries = kubernetes_private_container_registry | default([]) -%}
{% if registries | length > 0 %}mirrors:
{% for entry in registries %}
{{ entry.name }}:
endpoint:
- "{{ entry.url }}"
{%- endfor %}
{% endif %}

View File

@ -0,0 +1,240 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
authorization/description: >-
provide full access to everything.
Using this Cluster role should be avoided with additional cluster roles
created to meet the additional authorization requirements.
authorization/target: cluster, namespace
labels:
app.kubernetes.io/part-of: nfc_kubernetes
app.kubernetes.io/managed-by: ansible
app.kubernetes.io/version: ''
name: authorization:common:full
rules:
- apiGroups:
- "*"
resources:
- "*"
verbs:
- "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
authorization/description: |-
Provide Access for reading ALL non-secret items, this includes reading pod and node metrics.
This role is designed for users who require access to audit/view/diagnose at either the
cluster level `ClusterRoleBinding` or namespace level `RoleBinding`
authorization/target: namespace
labels:
app.kubernetes.io/part-of: nfc_kubernetes
app.kubernetes.io/managed-by: ansible
app.kubernetes.io/version: ''
name: authorization:common:namespace:read
rules:
- apiGroups: # Get Metrics
- metrics.k8s.io
resources:
- pods
- nodes
verbs:
- get
- list
- watch
- apiGroups: # Read-only access to resrouces
- "*"
resources:
- awx
- cronjobs
- daemonset
- deployments
- helmcharts
- helmchartconfigs
- ingress
- jobs
- namespaces
- pods
- pv
- pvc
- serviceaccount
- services
- statefuleset
- storageclasses
- configmap
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
authorization/description: |-
Provide access for reading ALL items.
This role is designed for users who own and is designed to be
bound to a namespace using a `RoleBinding`
authorization/target: namespace
labels:
app.kubernetes.io/part-of: nfc_kubernetes
app.kubernetes.io/managed-by: ansible
app.kubernetes.io/version: ''
name: authorization:common:namespace:owner
rules:
- apiGroups: # Read-only access to resrouces
- "*"
resources:
- awx
- cronjobs
- daemonset
- deployments
- helmcharts
- helmchartconfigs
- ingress
- jobs
- pods
- pvc
- roles
- rolebindings
- secrets
- serviceaccount
- services
- statefuleset
- storageclasses
- configmap
verbs:
- create
- get
- list
- watch
- delete
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: authorization:common:cluster:view-metrics
rules:
- apiGroups:
- metrics.k8s.io
- "" # Without this metrics don't work. this also grants access to view nodes
resources:
- pods
- nodes
verbs:
- get
- list
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: authorization:read
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: authorization:common:namespace:read
subjects:
- kind: Group
name: administrators
- kind: Group
name: technician
- kind: Group
name: NodeRED
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: authorization:view-metrics
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: authorization:common:cluster:view-metrics
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: Group
name: administrators
- kind: Group
name: technician
# ---
# kind: ClusterRoleBinding
# apiVersion: rbac.authorization.k8s.io/v1
# metadata:
# name: authorization:full
# roleRef:
# apiGroup: rbac.authorization.k8s.io
# kind: ClusterRole
# name: authorization:full
# subjects:
# - kind: Group
# name: administrators
# - kind: Group
# name: technician
###################################################################################################################
# Namespace role binding
# ---
# apiVersion: rbac.authorization.k8s.io/v1
# kind: RoleBinding
# metadata:
# # labels:
# name: authorization:full
# namespace: development
# roleRef:
# apiGroup: rbac.authorization.k8s.io
# kind: Role
# name: authorization:full
# subjects:
# - kind: Group
# name: administrators
# namespace: development
# - kind: Group
# name: technician
# - kind: Group
# name: NodeRED
# ---
# - apiVersion: rbac.authorization.k8s.io/v1
# kind: Role
# metadata:
# labels:
# app.kubernetes.io/description: |-
# provide full access to the testing namespace
# name: authorization:full
# namespace: development
# rules:
# - apiGroups:
# - ""
# resources:
# - ""
# verbs:
# - add
# - delete
# - edit
# - get
# - list
# - watch

View File

@ -0,0 +1,14 @@
/var/lib/docker/containers/*/*.log {
daily
missingok
rotate 7
compress
delaycompress
notifempty
postrotate
docker restart $(docker ps -q)
endscript
}