Merge branch 'development' into 'master'
chore: release to stable See merge request nofusscomputing/projects/ansible/kubernetes!3
This commit is contained in:
7
.cz.yaml
Normal file
7
.cz.yaml
Normal file
@ -0,0 +1,7 @@
|
||||
commitizen:
|
||||
bump_message: "build(version): bump version $current_version \u2192 $new_version"
|
||||
changelog_incremental: false
|
||||
name: cz_conventional_commits
|
||||
tag_format: $major.$minor.$patch$prerelease
|
||||
update_changelog_on_bump: true
|
||||
version: 0.0.1
|
21
.gitlab-ci.yml
Normal file
21
.gitlab-ci.yml
Normal file
@ -0,0 +1,21 @@
|
||||
---
|
||||
include:
|
||||
- project: nofusscomputing/projects/gitlab-ci
|
||||
ref: development
|
||||
file:
|
||||
- .gitlab-ci_common.yaml
|
||||
- template/automagic.gitlab-ci.yaml
|
||||
|
||||
variables:
|
||||
MY_PROJECT_ID: "51640029"
|
||||
GIT_SYNC_URL: "https://$GITHUB_USERNAME_ROBOT:$GITHUB_TOKEN_ROBOT@github.com/NoFussComputing/ansible_role_nfc_kubernetes.git"
|
||||
PAGES_ENVIRONMENT_PATH: projects/ansible/roles/kubernetes/
|
||||
|
||||
|
||||
Ansible-roles.Submodule.Deploy:
|
||||
extends: .submodule_update_trigger
|
||||
variables:
|
||||
SUBMODULE_UPDATE_TRIGGER_PROJECT: nofusscomputing/projects/ansible/ansible-roles
|
||||
GIT_COMMIT_TYPE: feat
|
||||
GIT_COMMIT_TYPE_CATEGORY: $CI_PROJECT_NAME
|
||||
GIT_CONFIG_SUBMODULE_NAME: nfc_kubernetes
|
8
.gitmodules
vendored
Normal file
8
.gitmodules
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
[submodule "gitlab-ci"]
|
||||
path = gitlab-ci
|
||||
url = https://gitlab.com/nofusscomputing/projects/gitlab-ci.git
|
||||
branch = development
|
||||
[submodule "website-template"]
|
||||
path = website-template
|
||||
url = https://gitlab.com/nofusscomputing/infrastructure/website-template.git
|
||||
branch = development
|
10
.nfc_automation.yaml
Normal file
10
.nfc_automation.yaml
Normal file
@ -0,0 +1,10 @@
|
||||
---
|
||||
|
||||
role_git_conf:
|
||||
gitlab:
|
||||
submodule_branch: "development"
|
||||
default_branch: development
|
||||
mr_labels: ~"type::automation" ~"impact::0" ~"priority::0"
|
||||
auto_merge: true
|
||||
merge_request:
|
||||
patch_labels: '~"code review::not started"'
|
39
README-orig.md
Normal file
39
README-orig.md
Normal file
@ -0,0 +1,39 @@
|
||||
# Kubernetes Ansible Playbook
|
||||
|
||||
|
||||
## Additional changes
|
||||
|
||||
- `SystemdCgroup = false` -> `SystemdCgroup = true` [See this comment](https://github.com/kubernetes/kubernetes/issues/110177#issuecomment-1161647736)
|
||||
|
||||
## Tags
|
||||
|
||||
This role has been setup to take advantage of Ansible tags. The use of these tags enables finer control over what tasks are run. By design, when you set a task, only what is required for the tag is run.
|
||||
|
||||
available tags are as follows
|
||||
|
||||
- `containerregistry` apply container/docker registry settings
|
||||
- `firewall` apply firewall settings (firewall name/type independent)
|
||||
- `install` Run every task within the role. this is the same as omitting `--tags`
|
||||
- `iptables` apply iptables settings
|
||||
- `manifest` Apply/remove kubernetes manifests
|
||||
- `namespace` Apply/remove kubernetes namespaces
|
||||
- `nodelabels` Apply/remove kubernetes node labels
|
||||
- `taints` Apply/remove kubernetes taints
|
||||
|
||||
!!! tip
|
||||
if you intend on running the `install` tag, you can omit the `--tags` flag from the ansible tag all togther
|
||||
|
||||
!!! alert
|
||||
the first time this playbook is run if cli switch `--extra-vars "init=true"` is used with `init` either a bool true/false, the manifests will not be applied. this is to enable the kubernetes to be fully setup prior to applying manifests that may prevent successful completion of the play.
|
||||
|
||||
## command Cheatsheet
|
||||
|
||||
- `crictl --runtime-endpoint unix:///run/containerd/containerd.sock images` list all container images on the host
|
||||
|
||||
## Links / References
|
||||
|
||||
- ContainerD Configuration
|
||||
|
||||
- [Registry Configuration](https://github.com/containerd/containerd/blob/7cd72cce99c8d3b938c1b763c2744a0b699028ab/docs/cri/config.md#registry-configuration)
|
||||
|
||||
- [Configuring ContainerD registries](https://github.com/containerd/containerd/blob/7cd72cce99c8d3b938c1b763c2744a0b699028ab/docs/hosts.md#cri)
|
126
defaults/main.yml
Normal file
126
defaults/main.yml
Normal file
@ -0,0 +1,126 @@
|
||||
KubernetesPodSubnet: 10.85.0.0/16
|
||||
KubernetesServiceSubnet: 10.86.0.0/16
|
||||
|
||||
|
||||
Kubernetes_Prime: false # Optional, Boolean. Is the current host the Prime master?
|
||||
Kubernetes_Master: false # Optional, Boolean. Is the current host a master host?
|
||||
|
||||
ContainerDioVersion: 1.6.20-1
|
||||
KubernetesVersion: '1.26.2' # must match the repository release version
|
||||
|
||||
KubernetesVersion_k8s_prefix: '-00'
|
||||
KubernetesVersion_k3s_prefix: '+k3s1'
|
||||
|
||||
kubernetes_private_container_registry: [] # Optional, Array. if none use `[]`
|
||||
|
||||
# host_external_ip: '' # Optional, String. External IP Address for host.
|
||||
|
||||
# Optional, Dict. Used to configure Kubernetes with OIDC Authentication.
|
||||
# kubernetes_oidc:
|
||||
# enabled: true # Mandatory, boolen. speaks for itself.
|
||||
# issuer_url: https://domainname.com/realms/realm-name # Mandatory, String. URL of OIDC Provider
|
||||
# client_id: kubernetes-test # Mandatory, string. OIDC Client ID
|
||||
# username_claim: preferred_username # Mandatory, String. Claim name containing username.
|
||||
# username_prefix: oidc # Optional, String. What to prefix to username
|
||||
# groups_claim: roles # Mandatory, String. Claim name containing groups
|
||||
# groups_prefix: '' # Optional, String. string to append to groups
|
||||
|
||||
kubernetes_type: k8s # Mandatory, String. choice K8s | k3s
|
||||
|
||||
|
||||
nfc_kubernetes:
|
||||
enable_firewall: true # Optional, bool enable firewall rules from role 'nfc_firewall'
|
||||
|
||||
|
||||
k3s:
|
||||
files:
|
||||
# - name: config.yaml
|
||||
# path: /etc/rancher/k3s
|
||||
# content: |
|
||||
# flannel-backend: none
|
||||
# cluster-cidr: "{{ KubernetesPodSubnet }}"
|
||||
# cluster-init: true
|
||||
# {% if not Kubernetes_Prime | default(false) | bool -%}server: https://{{ hostvars[kubernetes_config.cluster.prime.name].ansible_host }}:6443{% endif %}
|
||||
# service-cidr: "{{ KubernetesServiceSubnet }}"
|
||||
# disable-network-policy: true
|
||||
# disable:
|
||||
# - traefik
|
||||
# kube-apiserver-arg:
|
||||
# - audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log
|
||||
# - audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml
|
||||
# # - admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml
|
||||
# {% if kubernetes_oidc.enabled | default(false) | bool -%}
|
||||
# - oidc-issuer-url={{ kubernetes_oidc.issuer_url }}
|
||||
# - oidc-client-id={{ kubernetes_oidc.client_id }}
|
||||
# - oidc-username-claim={{ kubernetes_oidc.username_claim }}
|
||||
# - {% if kubernetes_oidc.oidc_username_prefix | default('') != '' %}oidc-username-prefix={{ kubernetes_oidc.oidc_username_prefix }}{% endif %}
|
||||
# - oidc-groups-claim={{ kubernetes_oidc.groups_claim }}
|
||||
# {% if kubernetes_oidc.groups_prefix | default('') != '' %}- oidc-groups-prefix={{ kubernetes_oidc.groups_prefix }}{% endif %}
|
||||
# {% endif %}
|
||||
# node-external-ip: "{{ host_external_ip }}"
|
||||
|
||||
- name: audit.yaml
|
||||
path: /var/lib/rancher/k3s/server
|
||||
content: |
|
||||
apiVersion: audit.k8s.io/v1
|
||||
kind: Policy
|
||||
rules:
|
||||
- level: Request
|
||||
|
||||
- name: 90-kubelet.conf
|
||||
path: /etc/sysctl.d
|
||||
content: |
|
||||
vm.panic_on_oom=0
|
||||
vm.overcommit_memory=1
|
||||
kernel.panic=10
|
||||
kernel.panic_on_oops=1
|
||||
kernel.keys.root_maxbytes=25000000
|
||||
|
||||
- name: psa.yaml
|
||||
path: /var/lib/rancher/k3s/server
|
||||
content: ""
|
||||
# apiVersion: apiserver.config.k8s.io/v1
|
||||
# kind: AdmissionConfiguration
|
||||
# plugins:
|
||||
# - name: PodSecurity
|
||||
# configuration:
|
||||
# apiVersion: pod-security.admission.config.k8s.io/v1beta1
|
||||
# kind: PodSecurityConfiguration
|
||||
# defaults:
|
||||
# enforce: "restricted"
|
||||
# enforce-version: "latest"
|
||||
# audit: "restricted"
|
||||
# audit-version: "latest"
|
||||
# warn: "restricted"
|
||||
# warn-version: "latest"
|
||||
# exemptions:
|
||||
# usernames: []
|
||||
# runtimeClasses: []
|
||||
# namespaces: [kube-system]
|
||||
|
||||
|
||||
|
||||
#############################################################################################
|
||||
# Cluster Config when stored in Inventory
|
||||
#
|
||||
# One required per cluster. recommend creating one ansible host group per cluster.
|
||||
#############################################################################################
|
||||
# kubernetes_config: # Dict. Cluster Config
|
||||
# cluster:
|
||||
# access: # Mandatory. List, DNS host name or IPv4/IPv6 Address.
|
||||
# # if none use '[]'
|
||||
# - 'my.dnshostname.com'
|
||||
# - '2001:4860:4860::8888'
|
||||
# - '192.168.1.1'
|
||||
# Name: earth # Mandatory, String. Cluster Name
|
||||
# prime:
|
||||
# name: k3s-prod # Mandatory, String. Ansible inventory_host that will
|
||||
# # act as the prime master node.
|
||||
# networking:
|
||||
# encrypt: true # Optional, Boolean. default `false`. Install wireguard for inter-node encryption
|
||||
# podSubnet: 172.16.70.0/24 # Mandatory, String. CIDR
|
||||
# ServiceSubnet: 172.16.72.0/24 # Mandatory, String. CIDR
|
||||
# # Mandatory, String. Token to join nodes to the cluster
|
||||
# node_token: !vault |
|
||||
# $ANSIBLE_VAULT;1.2;AES256;kubernetes/cluster/production
|
||||
# {rest_of encrypted key}
|
0
docs/articles/index.md
Normal file
0
docs/articles/index.md
Normal file
0
docs/contact.md
Normal file
0
docs/contact.md
Normal file
0
docs/index.md
Normal file
0
docs/index.md
Normal file
0
docs/operations/index.md
Normal file
0
docs/operations/index.md
Normal file
0
docs/projects/ansible/index.md
Normal file
0
docs/projects/ansible/index.md
Normal file
0
docs/projects/ansible/playbooks/index.md
Normal file
0
docs/projects/ansible/playbooks/index.md
Normal file
0
docs/projects/ansible/roles/index.md
Normal file
0
docs/projects/ansible/roles/index.md
Normal file
146
docs/projects/ansible/roles/kubernetes/ansible.md
Normal file
146
docs/projects/ansible/roles/kubernetes/ansible.md
Normal file
@ -0,0 +1,146 @@
|
||||
---
|
||||
title: Ansible
|
||||
description: No Fuss Computings Ansible role nfc_kubernetes Ansible docs
|
||||
date: 2023-10-24
|
||||
template: project.html
|
||||
about: https://gitlab.com/nofusscomputing/projects/ansible/roles/kubernetes
|
||||
---
|
||||
|
||||
This page intends to describe/explain the setup of ansible for this role.
|
||||
|
||||
## Inventory Setup
|
||||
|
||||
There are many ways to layout your inventory within Ansible. To take full advantage of this role the following could be used:
|
||||
|
||||
- A aroup containing all master nodes
|
||||
|
||||
- A group containing all worker nodes
|
||||
|
||||
- a group containing all nodes for a cluster
|
||||
|
||||
- All groups above made a subordinate of a master group
|
||||
|
||||
- variable `ansible_host`. _can be either DNS name, IPv4/IPv6 Address_
|
||||
|
||||
!!! info Info
|
||||
The nfc_kubernetes role uses this field for any configuration that requires a hostname. You are strongly encouraged to use DNS name and the DNS name be resolveable for each host accessing to the host in question. Using DNS host name is of paramount importance for a host with dynamic DHCP being used.
|
||||
|
||||
- variable `Kubernetes_Master` _boolean_ set for all host that are master nodes.
|
||||
|
||||
- hosts that require Kubernetes API access added to variable `kubernetes_config.cluster.access`
|
||||
|
||||
An example inventory file that would suffice.
|
||||
|
||||
``` yaml
|
||||
all:
|
||||
hosts:
|
||||
localhost:
|
||||
vars:
|
||||
ansible_connection: local
|
||||
children:
|
||||
|
||||
kubernetes:
|
||||
children:
|
||||
|
||||
k3s:
|
||||
hosts:
|
||||
|
||||
|
||||
k8s:
|
||||
hosts:
|
||||
|
||||
|
||||
kubernetes_cluster:
|
||||
children:
|
||||
|
||||
kubernetes_cluster_{cluster_name_here}:
|
||||
hosts:
|
||||
|
||||
|
||||
kubernetes_master:
|
||||
hosts:
|
||||
|
||||
|
||||
kubernetes_worker:
|
||||
hosts:
|
||||
|
||||
```
|
||||
|
||||
The reasoning for the layout above is:
|
||||
|
||||
- group `kubernetes` used as a selector within playbook or limitor when running a playbook to cover all kubernetes hosts.
|
||||
|
||||
- groups `kubernetes`, `k3s`, `k8s` and `kubernetes_cluster_{cluster_name_here}` used for variable files (`inventory/group_vars/{group_name}.yaml`). with the latter containing all settings for the cluster in question.
|
||||
|
||||
- Hosts are added to ALL groups relevent to them.
|
||||
|
||||
|
||||
The following group variable files will also need to be created:
|
||||
|
||||
- `inventory/group_vars/all.yaml` Variables applicable to all hosts
|
||||
|
||||
- `inventory/group_vars/kubernetes.yaml` software versions for kubernetes
|
||||
|
||||
- `inventory/group_vars/kubernetes_cluster_{cluster_name_here}.yaml` cluster configuration
|
||||
|
||||
|
||||
## Playbooks Setup
|
||||
|
||||
Whilst there are many ways to skin a cat, using the inventory layout as defined above, with the creation of playbooks as detailed below is a possible solution covering most basis' of using this role.
|
||||
|
||||
playbooks/kubernetes.yaml
|
||||
|
||||
``` yaml
|
||||
---
|
||||
- name: Kubernetes Group and sub-groups
|
||||
hosts: "{{ groups.kubernetes }}"
|
||||
gather_facts: true
|
||||
|
||||
roles: []
|
||||
|
||||
- name: Kubernetes Master
|
||||
import_playbook: kubernetes/master.yaml
|
||||
|
||||
- name: Kubernetes Worker
|
||||
import_playbook: kubernetes/worker.yaml
|
||||
```
|
||||
|
||||
playbooks/kubernetes/master.yaml
|
||||
``` yaml
|
||||
---
|
||||
- name: Kubernetes Master Nodes
|
||||
hosts: "{{ kubernetes_master }}"
|
||||
gather_facts: true
|
||||
|
||||
roles:
|
||||
- name: Kubernetes Setup
|
||||
role: nfc_kubernetes
|
||||
|
||||
```
|
||||
|
||||
playbooks/kubernetes/worker.yaml
|
||||
``` yaml
|
||||
---
|
||||
- name: Kubernetes worker Nodes
|
||||
hosts: "{{ kubernetes_worker }}"
|
||||
gather_facts: true
|
||||
|
||||
roles:
|
||||
- name: Kubernetes Setup
|
||||
role: nfc_kubernetes
|
||||
|
||||
```
|
||||
|
||||
Running the above playbooks with the inventory setup allows the following and more:
|
||||
|
||||
- Setup Kubernetes on all applicable kubernetes hosts
|
||||
|
||||
> `ansible-playbook -i inventory/production playbooks/kubernetes.yaml`
|
||||
|
||||
- Setup kubernetes cluster `{cluster_name}`
|
||||
|
||||
> `ansible-playbook --limit kubernetes_cluster_{cluster_name_here} -i inventory/production playbooks/kubernetes.yaml`
|
||||
|
||||
- Setup all Kubernetes master nodes, regardless of cluster
|
||||
|
||||
> `ansible-playbook --limit kubernetes_master -i inventory/production playbooks/kubernetes.yaml`
|
72
docs/projects/ansible/roles/kubernetes/firewall.md
Normal file
72
docs/projects/ansible/roles/kubernetes/firewall.md
Normal file
@ -0,0 +1,72 @@
|
||||
---
|
||||
title: Firewall
|
||||
description: No Fuss Computings Ansible role nfc_kubernetes
|
||||
date: 2023-10-24
|
||||
template: project.html
|
||||
about: https://gitlab.com/nofusscomputing/projects/ansible/roles/kubernetes
|
||||
---
|
||||
|
||||
This role include logic to generate firewall rules for iptables. Both IPv4 and IPv6 rules are generated. to survive reboots or network cable disconects, a script is created and added to the `if-up.d.` This enables that each time the interface is brought up, the firewall rules are applied. For a list of the firewall rules applied see the [K3s documentation](https://docs.k3s.io/installation/requirements#inbound-rules-for-k3s-server-nodes)
|
||||
|
||||
Rules generation workflow:
|
||||
|
||||
- itertes over all kubernetes hosts
|
||||
|
||||
- adds rules if host is masters for worker access
|
||||
|
||||
- adds rules if worker for all node access
|
||||
|
||||
- adds rules for additional hosts to access kubernetes api
|
||||
|
||||
What you end up with:
|
||||
|
||||
- chains for each area of access to the cluster
|
||||
|
||||
- The input table contains the jump to each chain, based off of destination port and protocol
|
||||
|
||||
- each chain returns to INPUT table for further processing.
|
||||
|
||||
!!! danger Security
|
||||
The way the rules are created and applied, they all return to the `INPUT` table for further processing. If the `INPUT` tables default policy is `ACCEPT`. Regardless of the firewall rules in place, Any host with network access to the kubernetes host can access the desired service without needing a rule to grant access.
|
||||
|
||||
**Recommendation:** Set the `INPUT` tables default policy to `DROP`
|
||||
|
||||
!!! info Info
|
||||
If a DNS name is used for any off the address' a DNS lookup is done for both IPv4 and IPv6 adding the first host found to the applicable chain.
|
||||
|
||||
|
||||
## Providing access to the cluster
|
||||
|
||||
No special skill is required apart from adding the host to grant access to the right list variable. i.e.
|
||||
|
||||
``` yaml
|
||||
kubernetes_config:
|
||||
cluster:
|
||||
access:
|
||||
- '192.168.1.1'
|
||||
- 'my.hostname.com'
|
||||
```
|
||||
|
||||
Any host that is added to the `access` list will be granted access to the Kubernetes API. Hosts in this list are intended to be the hosts your end users are on. If you join a new node to the cluster, the applicable firewall rules will automagically generated and added to each hosts firewall. It's important that when adding anew node to the cluster, that the playbook is run agains all nodes of the cluster, not just the new node. Failing to do so, will have the existing nodes block access to the new node due to missing firewall rules.
|
||||
|
||||
|
||||
!!! tip Tip
|
||||
When manually adding a host use insert `-I` not append `-A` as the last rule must be `-j RETURN`
|
||||
|
||||
exmple: `-I {chain_name} -s {hostname/ipaddress} -j ACCEPT`
|
||||
|
||||
|
||||
|
||||
Protocol Port Source Destination Description
|
||||
TCP 2379-2380 Servers Servers Required only for HA with embedded etcd
|
||||
TCP 6443 Agents Servers K3s supervisor and Kubernetes API Server
|
||||
UDP 8472 All nodes All nodes Required only for Flannel VXLAN
|
||||
TCP 10250 All nodes All nodes Kubelet metrics
|
||||
UDP 51820 All nodes All nodes Required only for Flannel Wireguard with IPv4
|
||||
UDP 51821 All nodes All nodes Required only for Flannel Wireguard with IPv6
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
59
docs/projects/ansible/roles/kubernetes/index.md
Normal file
59
docs/projects/ansible/roles/kubernetes/index.md
Normal file
@ -0,0 +1,59 @@
|
||||
---
|
||||
title: Kubernetes
|
||||
description: No Fuss Computings Ansible role nfc_kubernetes
|
||||
date: 2023-10-24
|
||||
template: project.html
|
||||
about: https://gitlab.com/nofusscomputing/projects/ansible/roles/kubernetes
|
||||
---
|
||||
|
||||
This Ansible roles purpose is to install and configure Kubernetes with configuration from code. You can also use [our playbooks](../../playbooks/index.md) to deploy using this role. this is especially useful if you are also using [our Ansible Execution Environment](../../execution_environment/index.md)
|
||||
|
||||
|
||||
## Features
|
||||
|
||||
This role deploys a K3s cluster. In addition it has the following features:
|
||||
|
||||
- CNI Setup
|
||||
|
||||
- Configurable Container Registries
|
||||
|
||||
- _[ToDo-#3](https://gitlab.com/nofusscomputing/projects/ansible/kubernetes/-/issues/3)_ Encryption between nodes (Wireguard)
|
||||
|
||||
- [Firewall configured for kubernetes host](firewall.md)
|
||||
|
||||
- _[ToDo-#2](https://gitlab.com/nofusscomputing/projects/ansible/kubernetes/-/issues/2)_ Multi-node Deployment
|
||||
|
||||
- OpenID Connect SSO Authentication
|
||||
|
||||
- [Basic RBAC `ClusterRoles` and Bindings](rbac.md)
|
||||
|
||||
- _[ToDo-#5](https://gitlab.com/nofusscomputing/projects/ansible/kubernetes/-/issues/5)_ Restore backup on fresh install of a cluster
|
||||
|
||||
|
||||
## Role Workflow
|
||||
|
||||
The roles workflow is as follows
|
||||
|
||||
1. Download both install script and k3s binary to ansible controller
|
||||
|
||||
1. copy install script and k3s binary to host
|
||||
|
||||
1. Create required config files needed for installation
|
||||
|
||||
1. _(kubernetes prime only)_ Add install required config files
|
||||
|
||||
1. Install kubernetes
|
||||
|
||||
1. Configure Kubernetes
|
||||
|
||||
If the playbook is setup as per [our recommendation](ansible.md) step 2 onwards is first done on master nodes then worker nodes.
|
||||
|
||||
|
||||
## Default Variables
|
||||
|
||||
|
||||
``` yaml title="defaults/main.yaml" linenums="1"
|
||||
|
||||
--8<-- "defaults/main.yaml"
|
||||
|
||||
```
|
37
docs/projects/ansible/roles/kubernetes/rbac.md
Normal file
37
docs/projects/ansible/roles/kubernetes/rbac.md
Normal file
@ -0,0 +1,37 @@
|
||||
---
|
||||
title: RBAC
|
||||
description: No Fuss Computings Ansible role nfc_kubernetes RBAC documentation.
|
||||
date: 2023-10-29
|
||||
template: project.html
|
||||
about: https://gitlab.com/nofusscomputing/projects/ansible/roles/kubernetes
|
||||
---
|
||||
|
||||
As part of this roles workflow, A set of Clester Roles and Cluster Bindings are deployed and ready to use. The intent of these roles is to create a default set of roles that only require the authorization system to provide the users groups. As they have been defined as Cluster Roles you can bind to both cluster and/or namespace.
|
||||
A minimum access required princible has been adopted in the creation of these roles. With the roles designed to be for whom would access/use the cluster (An End user).
|
||||
|
||||
!!! tip
|
||||
All Deployed `ClusterRole` include a labels `authorization/description` and `authorization/target` explaining their intended purpose and where they a recommended for binding.
|
||||
|
||||
|
||||
Currently the following roles are deployed as part of this Anible role:
|
||||
|
||||
- authorization:namespace:read
|
||||
|
||||
> Full read access to all objects except secrets
|
||||
|
||||
- authorization:full
|
||||
|
||||
> Full read/write access to all objects including secrets
|
||||
|
||||
- authorization:namespace:owner
|
||||
|
||||
> Full read/write access to all objects including secrets
|
||||
|
||||
- authorization:cluster:view-metrics
|
||||
|
||||
> View node and pod metrics
|
||||
|
||||
- **[ToDo-#6](https://gitlab.com/nofusscomputing/projects/ansible/kubernetes/-/issues/6)** authorization:cluster:admin
|
||||
|
||||
> Configure the cluster with this not including anything that can be deployed.
|
||||
|
0
docs/projects/index.md
Normal file
0
docs/projects/index.md
Normal file
0
docs/tags.md
Normal file
0
docs/tags.md
Normal file
1
gitlab-ci
Submodule
1
gitlab-ci
Submodule
Submodule gitlab-ci added at 52f4ebda54
21
handlers/main.yml
Normal file
21
handlers/main.yml
Normal file
@ -0,0 +1,21 @@
|
||||
---
|
||||
- name: "restart ContainerD"
|
||||
service:
|
||||
name: containerd
|
||||
state: restarted
|
||||
when: >
|
||||
containerd_config.changed | default(false) | bool
|
||||
and
|
||||
containerd_installed.rc | default(1) | int == 0
|
||||
and
|
||||
kubernetes_type == 'k8s'
|
||||
tags:
|
||||
- configure
|
||||
- install
|
||||
|
||||
|
||||
- name: Restart Kubernetes
|
||||
ansible.builtin.service:
|
||||
name: "{% if kubernetes_type == 'k3s' %}k3s{% else %}kubelet{% endif %}"
|
||||
state: restarted
|
||||
listen: kubernetes_restart
|
24
meta/main.yml
Normal file
24
meta/main.yml
Normal file
@ -0,0 +1,24 @@
|
||||
galaxy_info:
|
||||
role_name: nfc_kubernetes
|
||||
author: No Fuss Computing
|
||||
description: template role to install and configure Kubernetes on a host
|
||||
|
||||
issue_tracker_url: https://gitlab.com/nofusscomputing/projects/ansible/kubernetes
|
||||
|
||||
license: https://gitlab.com/nofusscomputing/projects/ansible/kubernetes/-/blob/master/LICENSE
|
||||
|
||||
min_ansible_version: '2.15'
|
||||
|
||||
platforms:
|
||||
- name: Debian
|
||||
versions:
|
||||
- bullseye
|
||||
- name: Ubuntu
|
||||
versions:
|
||||
- 21
|
||||
|
||||
galaxy_tags:
|
||||
- k3s
|
||||
- k8s
|
||||
- kubernetes
|
||||
- container
|
52
mkdocs.yml
Normal file
52
mkdocs.yml
Normal file
@ -0,0 +1,52 @@
|
||||
INHERIT: website-template/mkdocs.yml
|
||||
|
||||
docs_dir: 'docs'
|
||||
|
||||
repo_name: Kubernetes Ansible Role
|
||||
repo_url: https://gitlab.com/nofusscomputing/projects/ansible/kubernetes
|
||||
edit_uri: '/-/ide/project/nofusscomputing/projects/ansible/kubernetes/edit/development/-/docs/'
|
||||
|
||||
nav:
|
||||
- Home: index.md
|
||||
|
||||
- Articles:
|
||||
|
||||
- articles/index.md
|
||||
|
||||
- Projects:
|
||||
|
||||
- projects/index.md
|
||||
|
||||
- Ansible:
|
||||
|
||||
- projects/ansible/index.md
|
||||
|
||||
- Execution Environment:
|
||||
|
||||
- projects/ansible/execution_environment/index.md
|
||||
|
||||
- Playbooks:
|
||||
|
||||
- projects/ansible/playbooks/index.md
|
||||
|
||||
- Roles:
|
||||
|
||||
- projects/ansible/roles/index.md
|
||||
|
||||
- Kubernetes:
|
||||
|
||||
- projects/ansible/roles/kubernetes/index.md
|
||||
|
||||
- projects/ansible/roles/kubernetes/ansible.md
|
||||
|
||||
- projects/ansible/roles/kubernetes/firewall.md
|
||||
|
||||
- projects/ansible/roles/kubernetes/rbac.md
|
||||
|
||||
|
||||
- Operations:
|
||||
|
||||
- operations/index.md
|
||||
|
||||
- Contact Us: contact.md
|
||||
|
303
tasks/common.yaml
Normal file
303
tasks/common.yaml
Normal file
@ -0,0 +1,303 @@
|
||||
---
|
||||
|
||||
- name: "{{ role_name }} Install Software"
|
||||
include_role:
|
||||
name: nfc_common
|
||||
vars:
|
||||
common_gather_facts: false
|
||||
aptSigningKeys:
|
||||
- name: docker
|
||||
url: https://download.docker.com/linux/debian/gpg
|
||||
save_directory: /usr/share/keyrings
|
||||
file_extension: asc
|
||||
|
||||
- name: kubernetes
|
||||
url: https://packages.cloud.google.com/apt/doc/apt-key.gpg
|
||||
save_directory: /usr/share/keyrings
|
||||
file_extension: asc
|
||||
|
||||
aptRepositories:
|
||||
- name: docker
|
||||
repo: deb [arch={{ dynamic_processor_architecture }} signed-by=/usr/share/keyrings/docker.asc] http://download.docker.com/linux/{{ ansible_os_family | lower }} {{ ansible_lsb.codename | lower }} stable
|
||||
- name: kubernetes
|
||||
repo: deb [signed-by=/usr/share/keyrings/kubernetes.asc] http://apt.kubernetes.io/ kubernetes-xenial main
|
||||
|
||||
aptInstall:
|
||||
- name: gnupg2
|
||||
- name: apt-transport-https
|
||||
- name: software-properties-common
|
||||
- name: ca-certificates
|
||||
- name: iptables
|
||||
- name: python3-pip
|
||||
- name: python3-virtualenv
|
||||
|
||||
- name: containerd.io
|
||||
version: "{{ ContainerDioVersion }}"
|
||||
|
||||
- name: kubectl
|
||||
version: "{{ KubernetesVersion }}"
|
||||
- name: kubelet
|
||||
version: "{{ KubernetesVersion }}"
|
||||
- name: kubeadm
|
||||
version: "{{ KubernetesVersion }}"
|
||||
tags:
|
||||
- install
|
||||
|
||||
# containerd.io=1.6.22-1 kubectl=1.26.9-00 kubelet=1.26.9-00 kubeadm=1.26.9-00
|
||||
|
||||
- name: Remove swapfile from /etc/fstab
|
||||
mount:
|
||||
name: "{{ item }}"
|
||||
fstype: swap
|
||||
state: absent
|
||||
with_items:
|
||||
- swap
|
||||
- none
|
||||
when:
|
||||
- ansible_os_family == 'Debian' # ansible_lsb.codename = bullseye, ansible_lsb.major_release = 11
|
||||
tags:
|
||||
- install
|
||||
|
||||
|
||||
- name: Disable swap
|
||||
command: swapoff -a
|
||||
changed_when: true == false
|
||||
when:
|
||||
#- ansible_swaptotal_mb > 0
|
||||
- ansible_os_family == 'Debian'
|
||||
tags:
|
||||
- install
|
||||
|
||||
- name: Check an armbian os system
|
||||
stat:
|
||||
path: /etc/default/armbian-zram-config
|
||||
register: armbian_stat_result
|
||||
|
||||
|
||||
- name: Armbian Disable Swap
|
||||
ansible.builtin.shell:
|
||||
cmd: |
|
||||
sed -i 's/\# SWAP=false/SWAP=false/g' /etc/default/armbian-zram-config;
|
||||
sed -i 's/ENABLED=true/ENABLED=false/g' /etc/default/armbian-zram-config;
|
||||
args:
|
||||
executable: bash
|
||||
changed_when: false
|
||||
# failed_when: false
|
||||
#notify: RebootHost # doesnt need to reboot as swapoff -a covers the deployment
|
||||
when: armbian_stat_result.stat.exists
|
||||
|
||||
|
||||
- name: Add the overlay module
|
||||
community.general.modprobe:
|
||||
name: overlay
|
||||
state: present
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
tags:
|
||||
- install
|
||||
|
||||
|
||||
- name: Add the br_netfilter module
|
||||
community.general.modprobe:
|
||||
name: br_netfilter
|
||||
state: present
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
tags:
|
||||
- install
|
||||
|
||||
- name: check if containerd installed
|
||||
ansible.builtin.shell:
|
||||
cmd: which containerd
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
register: containerd_installed
|
||||
|
||||
|
||||
- name: "Containerd.io Started?"
|
||||
service:
|
||||
name: containerd
|
||||
state: started
|
||||
tags:
|
||||
- configure
|
||||
- install
|
||||
when: >
|
||||
ansible_os_family == 'Debian'
|
||||
and
|
||||
containerd_installed.rc | default(1) | int == 0
|
||||
|
||||
|
||||
- name: containerd load modules config
|
||||
template:
|
||||
src: "etc_module_containerd.conf"
|
||||
dest: /etc/modules-load.d/containerd.conf
|
||||
owner: root
|
||||
mode: 0700
|
||||
notify: "restart ContainerD"
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
tags:
|
||||
- install
|
||||
|
||||
|
||||
- name: Create containerD host directories.
|
||||
become_method: sudo
|
||||
become: yes
|
||||
file:
|
||||
path: /etc/containerd/certs.d/{{ item.name }}
|
||||
state: directory
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "{{ ansible_user }}"
|
||||
mode: 0700
|
||||
with_items: "{{ containerd.repositories }}"
|
||||
tags:
|
||||
- install
|
||||
- containerRegistry
|
||||
|
||||
|
||||
- name: containerD registry host
|
||||
template:
|
||||
src: "containerd-registry-hosts.toml.j2"
|
||||
dest: /etc/containerd/certs.d/{{ item.name }}/hosts.toml
|
||||
owner: root
|
||||
mode: 0700
|
||||
notify: "restart ContainerD"
|
||||
with_items: "{{ containerd.repositories }}"
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
tags:
|
||||
- install
|
||||
- containerRegistry
|
||||
|
||||
|
||||
- name: containerD default config
|
||||
template:
|
||||
src: "etc_containerd_containerd.toml"
|
||||
dest: /etc/containerd/config.toml
|
||||
owner: root
|
||||
mode: 0700
|
||||
notify: "restart ContainerD"
|
||||
register: containerd_config
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
tags:
|
||||
- install
|
||||
- containerRegistry
|
||||
|
||||
|
||||
- name: Install required python modules
|
||||
ansible.builtin.pip:
|
||||
name: kubernetes
|
||||
state: forcereinstall
|
||||
#virtualenv: /tmp/venv_ansible
|
||||
when: inventory_hostname != 'op1'
|
||||
tags:
|
||||
- install
|
||||
|
||||
|
||||
- name: sysctl net.bridge.bridge-nf-call-ip6tables
|
||||
sysctl:
|
||||
name: net.bridge.bridge-nf-call-ip6tables
|
||||
value: '1'
|
||||
sysctl_set: yes
|
||||
state: present
|
||||
reload: yes
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
tags:
|
||||
- install
|
||||
|
||||
|
||||
- name: sysctl net.bridge.bridge-nf-call-iptables
|
||||
sysctl:
|
||||
name: net.bridge.bridge-nf-call-iptables
|
||||
value: '1'
|
||||
sysctl_set: yes
|
||||
state: present
|
||||
reload: yes
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
tags:
|
||||
- install
|
||||
|
||||
|
||||
- name: sysctl net.ipv4.ip_forward
|
||||
sysctl:
|
||||
name: net.ipv4.ip_forward
|
||||
value: '1'
|
||||
sysctl_set: yes
|
||||
state: present
|
||||
reload: yes
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
tags:
|
||||
- install
|
||||
|
||||
|
||||
# - name: Check if kubernetes has been Initialized
|
||||
# stat:
|
||||
# path: /etc/kubernetes/admin.conf
|
||||
# register: KubernetesInit
|
||||
# when:
|
||||
# - kubernetes_config.cluster.prime.name == inventory_hostname
|
||||
|
||||
|
||||
- name: check if iptables is installed
|
||||
ansible.builtin.shell: |-
|
||||
dpkg -s iptables &> /dev/null
|
||||
changed_when: true == false
|
||||
register: iptables_installed
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
tags:
|
||||
- install
|
||||
- iptables
|
||||
- firewall
|
||||
|
||||
|
||||
- name: Add kubernetes Firewall Rules - '/etc/iptables-kubernetes.rules'
|
||||
template:
|
||||
src: iptables-kubernetes.rules.j2
|
||||
dest: "/etc/iptables-kubernetes.rules"
|
||||
owner: root
|
||||
mode: 0700
|
||||
force: yes
|
||||
notify: "Apply Firewall Rules"
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
- iptables_installed.rc == 0
|
||||
tags:
|
||||
- install
|
||||
- iptables
|
||||
- firewall
|
||||
|
||||
|
||||
- name: File - '/etc/network/if-pre-up.d/firewall-kubernetes'
|
||||
template:
|
||||
src: firewall-kubernetes.j2
|
||||
dest: "/etc/network/if-pre-up.d/firewall-kubernetes"
|
||||
owner: root
|
||||
mode: 0700
|
||||
force: yes
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
- iptables_installed.rc == 0
|
||||
tags:
|
||||
- install
|
||||
- iptables
|
||||
- firewall
|
||||
|
||||
|
||||
- name: Create local workdir
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
mode: 0700
|
||||
delegate_to: localhost
|
||||
connection: local
|
||||
run_once: true
|
||||
changed_when: true == false
|
||||
with_items:
|
||||
- /tmp/ansible/
|
||||
tags:
|
||||
- always
|
32
tasks/k3s.yaml
Normal file
32
tasks/k3s.yaml
Normal file
@ -0,0 +1,32 @@
|
||||
---
|
||||
# kubernetes_installed
|
||||
|
||||
- name: K3s Install
|
||||
ansible.builtin.include_tasks:
|
||||
file: k3s/install.yaml
|
||||
when: >
|
||||
install_kubernetes | default(true) | bool
|
||||
and
|
||||
not kubernetes_installed | default(false) | bool
|
||||
|
||||
|
||||
- name: K3s Configure
|
||||
ansible.builtin.include_tasks:
|
||||
file: k3s/configure.yaml
|
||||
when: >
|
||||
install_kubernetes | default(true) | bool
|
||||
and
|
||||
kubernetes_installed | default(false) | bool
|
||||
|
||||
|
||||
- name: Wireguard Cluster Encryption
|
||||
ansible.builtin.include_tasks:
|
||||
file: k3s/wireguard.yaml
|
||||
when: >
|
||||
install_kubernetes | default(true) | bool
|
||||
and
|
||||
kubernetes_installed | default(false) | bool
|
||||
and
|
||||
not kubernetes_installed_encryption | default(false) | bool
|
||||
and
|
||||
kubernetes_config.cluster.networking.encrypt | default(false) | bool
|
74
tasks/k3s/configure.yaml
Normal file
74
tasks/k3s/configure.yaml
Normal file
@ -0,0 +1,74 @@
|
||||
---
|
||||
|
||||
- name: Local Container Registry
|
||||
ansible.builtin.copy:
|
||||
content: |
|
||||
#
|
||||
# Private Container Registries for Kubernetes
|
||||
#
|
||||
# Managed By ansible/role/nfc_kubernetes
|
||||
#
|
||||
# Dont edit this file directly as it will be overwritten.
|
||||
#
|
||||
|
||||
{% set registries = kubernetes_private_container_registry | default([]) -%}
|
||||
|
||||
{% if registries | length > 0 %}mirrors:
|
||||
{% for entry in registries %}
|
||||
|
||||
{{ entry.name }}:
|
||||
endpoint:
|
||||
- "{{ entry.url }}"
|
||||
|
||||
{%- endfor %}
|
||||
{% endif %}
|
||||
dest: /etc/rancher/k3s/registries.yaml
|
||||
owner: root
|
||||
mode: '700'
|
||||
# notify: "restart ContainerD"
|
||||
# with_items: "{{ containerd.repositories }}"
|
||||
# when:
|
||||
# ansible_os_family == 'Debian'
|
||||
# and
|
||||
# Kubernetes_private_container_registry | default([]) | length > 0
|
||||
|
||||
|
||||
- name: Additional config files
|
||||
ansible.builtin.copy:
|
||||
content: |
|
||||
{{ item.content }}
|
||||
dest: "{{ item.path }}/{{ item.name }}"
|
||||
mode: '740'
|
||||
owner: root
|
||||
group: root
|
||||
loop: "{{ k3s.files }}"
|
||||
|
||||
|
||||
- name: Copy Templates
|
||||
ansible.builtin.template:
|
||||
src: "{{ item.src }}"
|
||||
dest: "{{ item.dest }}"
|
||||
owner: root
|
||||
mode: '700'
|
||||
force: true
|
||||
notify: "{{ item.notify | default(omit) }}"
|
||||
loop: "{{ templates_to_apply }}"
|
||||
vars:
|
||||
templates_to_apply:
|
||||
- src: "calico.yaml.j2"
|
||||
dest: /var/lib/rancher/k3s/server/manifests/calico.yaml
|
||||
|
||||
- src: kubernetes-manifest-rbac.yaml.j2
|
||||
dest: /var/lib/rancher/k3s/server/manifests/rbac-authorization-common.yaml
|
||||
|
||||
- src: iptables-kubernetes.rules.j2
|
||||
dest: "/etc/iptables.rules.d/iptables-kubernetes.rules"
|
||||
notify: firewall_reloader
|
||||
|
||||
- src: k3s-registries.yaml.j2
|
||||
dest: /etc/rancher/k3s/registries.yaml
|
||||
notify: kubernetes_restart
|
||||
|
||||
- src: k3s-config.yaml.j2
|
||||
dest: /etc/rancher/k3s/config.yaml
|
||||
notify: kubernetes_restart
|
188
tasks/k3s/install.yaml
Normal file
188
tasks/k3s/install.yaml
Normal file
@ -0,0 +1,188 @@
|
||||
---
|
||||
- name: Install Software
|
||||
ansible.builtin.include_role:
|
||||
name: nfc_common
|
||||
vars:
|
||||
common_gather_facts: false
|
||||
aptInstall:
|
||||
- name: curl
|
||||
- name: iptables
|
||||
|
||||
|
||||
- name: Create Required directories
|
||||
ansible.builtin.file:
|
||||
name: "{{ item.name }}"
|
||||
state: "{{ item.state }}"
|
||||
mode: "{{ item.mode }}"
|
||||
loop: "{{ dirs }}"
|
||||
vars:
|
||||
dirs:
|
||||
- name: /etc/rancher/k3s
|
||||
state: directory
|
||||
mode: 700
|
||||
- name: /var/lib/rancher/k3s/server/logs
|
||||
state: directory
|
||||
mode: 700
|
||||
- name: /var/lib/rancher/k3s/server/manifests
|
||||
state: directory
|
||||
mode: 700
|
||||
|
||||
|
||||
- name: Add sysctl net.ipv4.ip_forward
|
||||
ansible.posix.sysctl:
|
||||
name: net.ipv4.ip_forward
|
||||
value: '1'
|
||||
sysctl_set: true
|
||||
state: present
|
||||
reload: true
|
||||
notify: reboot_host
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
# On change reboot
|
||||
|
||||
|
||||
- name: Check if K3s Installed
|
||||
ansible.builtin.shell:
|
||||
cmd: |
|
||||
if [[ $(service k3s status) ]]; then exit 0; else exit 1; fi
|
||||
executable: /bin/bash
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
register: k3s_installed
|
||||
|
||||
|
||||
- name: Download K3s Binary
|
||||
ansible.builtin.uri:
|
||||
url: "{{ item.url }}"
|
||||
method: GET
|
||||
return_content: true
|
||||
status_code:
|
||||
- 200
|
||||
- 304
|
||||
dest: "{{ item.dest }}"
|
||||
mode: "744"
|
||||
register: k3s_download_files
|
||||
delegate_to: localhost
|
||||
# no_log: true
|
||||
when: ansible_os_family == 'Debian'
|
||||
loop: "{{ download_files }}"
|
||||
vars:
|
||||
ansible_connection: local
|
||||
download_files:
|
||||
- dest: /tmp/install.sh
|
||||
url: https://get.k3s.io
|
||||
- dest: "/tmp/k3s"
|
||||
url: "https://github.com/k3s-io/k3s/releases/download/v{{ KubernetesVersion + KubernetesVersion_k3s_prefix | urlencode }}/k3s"
|
||||
|
||||
|
||||
- name: "[TRACE] Downloaded File SHA256"
|
||||
ansible.builtin.set_fact:
|
||||
hash_sha256_k3s_downloaded_binary: "{{ lookup('ansible.builtin.file', '/tmp/k3s') | hash('sha256') | string }}"
|
||||
delegate_to: localhost
|
||||
|
||||
|
||||
- name: Existing k3s File hash
|
||||
ansible.builtin.stat:
|
||||
checksum_algorithm: sha256
|
||||
name: /usr/local/bin/k3s
|
||||
register: hash_sha256_k3s_existing_binary
|
||||
|
||||
|
||||
- name: Copy K3s binary to Host
|
||||
ansible.builtin.copy:
|
||||
src: "/tmp/k3s"
|
||||
dest: "/usr/local/bin/k3s"
|
||||
mode: '740'
|
||||
owner: root
|
||||
group: root
|
||||
when: hash_sha256_k3s_existing_binary.stat.checksum | default('0') != hash_sha256_k3s_downloaded_binary
|
||||
|
||||
- name: Copy install script to Host
|
||||
ansible.builtin.copy:
|
||||
src: "/tmp/install.sh"
|
||||
dest: "/tmp/install.sh"
|
||||
mode: '755'
|
||||
owner: root
|
||||
group: root
|
||||
# when: hash_sha256_k3s_existing_binary.stat.checksum | default('0') != hash_sha256_k3s_downloaded_binary
|
||||
|
||||
- name: Required Initial config files
|
||||
ansible.builtin.copy:
|
||||
content: |
|
||||
{{ item.content }}
|
||||
dest: "{{ item.path }}/{{ item.name }}"
|
||||
mode: '740'
|
||||
owner: root
|
||||
group: root
|
||||
loop: "{{ k3s.files }}"
|
||||
when: >
|
||||
kubernetes_config.cluster.prime.name == inventory_hostname
|
||||
|
||||
|
||||
- name: Copy Intial required templates
|
||||
ansible.builtin.template:
|
||||
src: "{{ item.src }}"
|
||||
dest: "{{ item.dest }}"
|
||||
owner: root
|
||||
mode: '700'
|
||||
force: true
|
||||
notify: "{{ item.notify | default(omit) }}"
|
||||
loop: "{{ templates_to_apply }}"
|
||||
vars:
|
||||
templates_to_apply:
|
||||
- src: k3s-config.yaml.j2
|
||||
dest: /etc/rancher/k3s/config.yaml
|
||||
notify: kubernetes_restart
|
||||
when: >
|
||||
kubernetes_config.cluster.prime.name == inventory_hostname
|
||||
|
||||
# - name: Templates IPv6
|
||||
# ansible.builtin.template:
|
||||
# src: iptables-kubernetes.rules.j2
|
||||
# dest: "/etc/ip6tables.rules.d/ip6tables-kubernetes.rules"
|
||||
# owner: root
|
||||
# mode: '700'
|
||||
# force: true
|
||||
# vars:
|
||||
# ipv6: true
|
||||
|
||||
|
||||
- name: Set IPTables to legacy mode
|
||||
ansible.builtin.command:
|
||||
cmd: update-alternatives --set iptables /usr/sbin/iptables-legacy
|
||||
changed_when: false
|
||||
|
||||
|
||||
- name: Server install K3s
|
||||
ansible.builtin.shell:
|
||||
cmd: |
|
||||
# INSTALL_K3S_SKIP_DOWNLOAD=true \
|
||||
# INSTALL_K3S_VERSION="v{{ KubernetesVersion }}{{ KubernetesVersion_k3s_prefix }}" \
|
||||
# /tmp/install.sh
|
||||
curl -sfL https://get.k3s.io | \
|
||||
INSTALL_K3S_VERSION="v1.26.9+k3s1" \
|
||||
sh -
|
||||
failed_when: false
|
||||
# when: >
|
||||
# k3s_installed.rc | int == 1
|
||||
# and
|
||||
# Kubernetes_Master | default(false)
|
||||
when: Kubernetes_Master | default(false) | bool
|
||||
|
||||
|
||||
- name: Agent install K3s
|
||||
ansible.builtin.shell:
|
||||
cmd: |
|
||||
INSTALL_K3S_SKIP_DOWNLOAD=true \
|
||||
INSTALL_K3S_VERSION="v{{ KubernetesVersion }}{{ KubernetesVersion_k3s_prefix }}" \
|
||||
K3S_URL=https://{{ hostvars[kubernetes_config.cluster.prime.name].ansible_host }}:6443 \
|
||||
K3S_TOKEN={{ node_token }} \
|
||||
/tmp/install.sh
|
||||
when: >
|
||||
k3s_installed.rc | int == 1
|
||||
and
|
||||
not Kubernetes_Master | default(false) | bool
|
||||
|
||||
- name: Set Kubernetes Final Install Fact
|
||||
ansible.builtin.set_fact:
|
||||
kubernetes_installed: true
|
22
tasks/k3s/wireguard.yaml
Normal file
22
tasks/k3s/wireguard.yaml
Normal file
@ -0,0 +1,22 @@
|
||||
---
|
||||
- name: Install Wireguard
|
||||
ansible.builtin.apt:
|
||||
name:
|
||||
- wireguard
|
||||
update_cache: false
|
||||
when: >
|
||||
ansible_os_family == 'Debian'
|
||||
# and
|
||||
# kubernetes.networking.encrypt | default(false) | bool
|
||||
|
||||
|
||||
- name: Enable Cluster Encryption
|
||||
ansible.builtin.command:
|
||||
cmd: kubectl patch felixconfiguration default --type='merge' -p '{"spec":{"wireguardEnabled":true,"wireguardEnabledV6":true}}'
|
||||
changed_when: false
|
||||
when: >
|
||||
kubernetes_config.cluster.prime.name == inventory_hostname
|
||||
|
||||
- name: Set Kubernetes Encryption Final Install Fact
|
||||
ansible.builtin.set_fact:
|
||||
kubernetes_installed_encryption: true
|
103
tasks/k8s.yaml
Normal file
103
tasks/k8s.yaml
Normal file
@ -0,0 +1,103 @@
|
||||
---
|
||||
- name: Common Tasks
|
||||
include_tasks: common.yaml
|
||||
# tags:
|
||||
# - install
|
||||
|
||||
- name: Check if kubernetes has been Initialized
|
||||
stat:
|
||||
path: /etc/kubernetes/admin.conf
|
||||
register: KubernetesInitialized
|
||||
tags:
|
||||
- always
|
||||
|
||||
- name: kubernetes prime
|
||||
include_tasks: prime.yaml
|
||||
when: kubernetes_config.cluster.prime.name == inventory_hostname
|
||||
|
||||
|
||||
- name: kubernetes workers
|
||||
include_tasks: workers.yaml
|
||||
when: kubernetes_config.cluster.prime.name != inventory_hostname
|
||||
|
||||
|
||||
- name: Add Kubernetes Node Labels
|
||||
kubernetes.core.k8s:
|
||||
definition:
|
||||
apiVersion: v1
|
||||
kind: Node
|
||||
metadata:
|
||||
name: "{{ inventory_hostname }}"
|
||||
labels:
|
||||
"{{ item | from_yaml_all }}"
|
||||
delegate_to: "{{ kubernetes_config.cluster.prime.name }}"
|
||||
with_items:
|
||||
- "{{ kubernetes_config.hosts[inventory_hostname].labels }}"
|
||||
when:
|
||||
- ( kubernetes_config.hosts[inventory_hostname].labels is defined and
|
||||
kubernetes_config.hosts[inventory_hostname].labels|default('')|length > 0 )
|
||||
tags:
|
||||
- install
|
||||
- nodelabels
|
||||
|
||||
|
||||
- name: Add Node Taints
|
||||
kubernetes.core.k8s_taint:
|
||||
state: "present"
|
||||
name: "{{ inventory_hostname }}"
|
||||
taints:
|
||||
- "{{ item | from_yaml_all }}"
|
||||
delegate_to: "{{ kubernetes_config.cluster.prime.name }}"
|
||||
with_items:
|
||||
- "{{ kubernetes_config.hosts[inventory_hostname].taints.present }}"
|
||||
when:
|
||||
- (kubernetes_config.hosts[inventory_hostname].taints.present is defined and
|
||||
kubernetes_config.hosts[inventory_hostname].taints.present|default('')|length > 0 )
|
||||
tags:
|
||||
- install
|
||||
- taints
|
||||
|
||||
|
||||
- name: Remove Node Taints
|
||||
kubernetes.core.k8s_taint:
|
||||
state: "absent"
|
||||
name: "{{ inventory_hostname }}"
|
||||
taints:
|
||||
- "{{ item | from_yaml_all }}"
|
||||
delegate_to: "{{ kubernetes_config.cluster.prime.name }}"
|
||||
with_items:
|
||||
- "{{ kubernetes_config.hosts[inventory_hostname].taints.absent }}"
|
||||
when:
|
||||
- ( kubernetes_config.hosts[inventory_hostname].taints.absent is defined and
|
||||
kubernetes_config.hosts[inventory_hostname].taints.absent|default('')|length > 0 )
|
||||
tags:
|
||||
- install
|
||||
- taints
|
||||
|
||||
|
||||
|
||||
|
||||
- name: Create Cluster Namespaces
|
||||
kubernetes.core.k8s:
|
||||
state: present
|
||||
definition:
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: "{{ item.name }}"
|
||||
labels:
|
||||
#app.kubernetes.io/version: # App version
|
||||
#app.kubernetes.io/component:
|
||||
#app.kubernetes.io/part-of:
|
||||
app.kubernetes.io/managed-by: Ansible
|
||||
#meta.kubernetes.io/description: "{{ item.description | default('') }}"
|
||||
meta.kubernetes.io/version: "{{ deployment_git_current_short_hash | default('') }}"
|
||||
with_items:
|
||||
- "{{ kubernetes_config.namespaces }}"
|
||||
when:
|
||||
( kubernetes_config.namespaces is defined and
|
||||
kubernetes_config.namespaces | default('') | length > 0 and
|
||||
kubernetes_config.cluster.prime.name == inventory_hostname )
|
||||
tags:
|
||||
- install
|
||||
- namespaces
|
14
tasks/main.yml
Normal file
14
tasks/main.yml
Normal file
@ -0,0 +1,14 @@
|
||||
---
|
||||
- name: Firewall Rules
|
||||
ansible.builtin.include_role:
|
||||
name: nfc_firewall
|
||||
vars:
|
||||
nfc_firewall_enabled_kubernetes: "{{ nfc_kubernetes.enable_firewall | default(false) | bool }}"
|
||||
|
||||
- name: K8s Cluster
|
||||
ansible.builtin.include_tasks: k8s.yaml
|
||||
when: kubernetes_type == 'k8s'
|
||||
|
||||
- name: K3s Cluster
|
||||
ansible.builtin.include_tasks: k3s.yaml
|
||||
when: kubernetes_type == 'k3s'
|
146
tasks/prime.yaml
Normal file
146
tasks/prime.yaml
Normal file
@ -0,0 +1,146 @@
|
||||
---
|
||||
|
||||
- name: initialize Kubernetes cluster
|
||||
block:
|
||||
- name: Intilizing Kubernetes Cluster
|
||||
#command: kubeadm init --pod-network-cidr "{{ KubernetesPodSubnet }}" --apiserver-advertise-address "{{ ansible_default_ipv4.address }}" --ignore-preflight-errors Mem --cri-socket=unix:///var/run/crio/crio.sock
|
||||
command: kubeadm init --pod-network-cidr "{{ KubernetesPodSubnet }}" --service-cidr "{{ KubernetesServiceSubnet }}" --apiserver-advertise-address "0.0.0.0" --ignore-preflight-errors Mem #--cri-socket=unix:///var/run/containerd/containerd.sock
|
||||
when:
|
||||
- not KubernetesInitialized.stat.exists
|
||||
|
||||
rescue:
|
||||
- name: Reset Kubeadmn
|
||||
ansible.builtin.shell: "{{ item }}"
|
||||
#register: kube_reset
|
||||
failed_when: item.rc != 0
|
||||
with_items:
|
||||
- kubeadm reset --force
|
||||
- rm -Rf /etc/cni/net.d
|
||||
|
||||
|
||||
- name: Check if kubernetes has been Initialized
|
||||
stat:
|
||||
path: /etc/kubernetes/admin.conf
|
||||
register: KubernetesInitialized
|
||||
tags:
|
||||
- always
|
||||
|
||||
|
||||
- name: fetch kubernetes health
|
||||
ansible.builtin.shell: " wget http://localhost:10248/healthz -q -O - || true"
|
||||
register: KubernetesHealth
|
||||
changed_when: true == false
|
||||
when: KubernetesInitialized.stat.exists
|
||||
tags:
|
||||
- always
|
||||
|
||||
|
||||
- name: set kubeernetes health fact
|
||||
set_fact:
|
||||
kube_health: "{{ KubernetesHealth.stdout | default(false) == 'ok' }}"
|
||||
changed_when: true == false
|
||||
tags:
|
||||
- always
|
||||
|
||||
|
||||
- name: Create directory for kube config.
|
||||
become_method: sudo
|
||||
become: yes
|
||||
file:
|
||||
#path: /home/{{ ansible_user }}/.kube
|
||||
path: ~/.kube
|
||||
state: directory
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "{{ ansible_user }}"
|
||||
mode: 0700
|
||||
# when: Kubernetes_Master
|
||||
tags:
|
||||
- always
|
||||
|
||||
|
||||
- name: Copy Kube config for local user
|
||||
copy:
|
||||
remote_src: yes
|
||||
src: /etc/kubernetes/admin.conf
|
||||
#dest: /home/{{ ansible_user }}/.kube/config
|
||||
dest: ~/.kube/config
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "{{ ansible_user }}"
|
||||
mode: 0700
|
||||
tags:
|
||||
- always
|
||||
|
||||
|
||||
- name: Add calico networking.
|
||||
template:
|
||||
src: "calico.yaml.j2"
|
||||
dest: /etc/kubernetes/manifests/calico.yaml
|
||||
owner: root
|
||||
mode: 0744
|
||||
|
||||
|
||||
- name: apply calico manifest
|
||||
command: kubectl apply -f /etc/kubernetes/manifests/calico.yaml
|
||||
tags:
|
||||
- install
|
||||
- manifest
|
||||
|
||||
|
||||
- name: create remote workdir
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
mode: 0700
|
||||
with_items:
|
||||
- /tmp/ansible/
|
||||
tags:
|
||||
- always
|
||||
|
||||
|
||||
- name: Create local workdir
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
mode: 0700
|
||||
delegate_to: localhost
|
||||
connection: local
|
||||
with_items:
|
||||
- /tmp/ansible/
|
||||
tags:
|
||||
- always
|
||||
|
||||
|
||||
- name: get join command
|
||||
ansible.builtin.shell: kubeadm token create --print-join-command > /tmp/ansible/join_kubernetes.sh
|
||||
changed_when: true == false
|
||||
tags:
|
||||
- always
|
||||
|
||||
|
||||
- name: download join command
|
||||
fetch:
|
||||
src: /tmp/ansible/join_kubernetes.sh
|
||||
dest: /tmp/ansible/
|
||||
flat: yes
|
||||
changed_when: true == false
|
||||
tags:
|
||||
- always
|
||||
|
||||
|
||||
# always:
|
||||
|
||||
# - name: remove remote workdir
|
||||
# file:
|
||||
# path: "{{ item }}"
|
||||
# state: absent
|
||||
# with_items:
|
||||
# - /tmp/ansible/join_kubernetes.sh
|
||||
# changed_when: true == false
|
||||
|
||||
# when:
|
||||
# #- Kubernetes_Prime
|
||||
# #- KubernetesInit.stat.exists
|
||||
# - kubernetes_config.cluster.prime.name == inventory_hostname
|
||||
|
||||
|
||||
|
46
tasks/workers.yaml
Normal file
46
tasks/workers.yaml
Normal file
@ -0,0 +1,46 @@
|
||||
---
|
||||
# - name: configure non-prime nodes - check node health
|
||||
# shell: "curl http://localhost:10248/healthz || true"
|
||||
# register: health
|
||||
# changed_when: true == false
|
||||
|
||||
# - set_fact:
|
||||
# kube_joined: "{{ health.stdout == 'ok' }}"
|
||||
# changed_when: true == false
|
||||
# # when:
|
||||
# # - not Kubernetes_Prime
|
||||
|
||||
- name: configure non-prime nodes - create remote workdir
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
mode: 0700
|
||||
changed_when: true == false
|
||||
with_items:
|
||||
- /tmp/ansible/
|
||||
|
||||
|
||||
- ansible.builtin.shell: " wget http://localhost:10248/healthz -q -O - || true"
|
||||
register: health
|
||||
changed_when: true == false
|
||||
|
||||
|
||||
- set_fact:
|
||||
kube_joined: "{{ health.stdout == 'ok' }}"
|
||||
changed_when: true == false
|
||||
|
||||
|
||||
- name: get join command from ansible controller
|
||||
copy:
|
||||
src: /tmp/ansible/join_kubernetes.sh
|
||||
dest: /tmp/ansible/join_kubernetes.sh
|
||||
mode: 0700
|
||||
changed_when: true == false
|
||||
when:
|
||||
- not kube_joined
|
||||
|
||||
|
||||
- name: configure non-prime nodes - join node to kubernetes cluster
|
||||
command: sh /tmp/ansible/join_kubernetes.sh
|
||||
when:
|
||||
- not kube_joined
|
@ -0,0 +1,51 @@
|
||||
# ---
|
||||
# apiVersion: kyverno.io/v1
|
||||
# kind: ClusterPolicy
|
||||
# metadata:
|
||||
# name: add-networkpolicy
|
||||
# labels:
|
||||
# <<: {{ kubernetes_config.defaults.labels.deployment_labels | from_yaml }}
|
||||
# annotations:
|
||||
# ansible.kubernetes.io/path: {{ item }}
|
||||
# policies.kyverno.io/title: Add Network Policy
|
||||
# policies.kyverno.io/category: Multi-Tenancy, EKS Best Practices
|
||||
# policies.kyverno.io/subject: NetworkPolicy
|
||||
# policies.kyverno.io/minversion: 1.6.0
|
||||
# policies.kyverno.io/description: >-
|
||||
# By default, Kubernetes allows communications across all Pods within a cluster.
|
||||
# The NetworkPolicy resource and a CNI plug-in that supports NetworkPolicy must be used to restrict
|
||||
# communications. A default NetworkPolicy should be configured for each Namespace to
|
||||
# default deny all ingress and egress traffic to the Pods in the Namespace. Application
|
||||
# teams can then configure additional NetworkPolicy resources to allow desired traffic
|
||||
# to application Pods from select sources. This policy will create a new NetworkPolicy resource
|
||||
# named `default-deny` which will deny all traffic anytime a new Namespace is created.
|
||||
# spec:
|
||||
# rules:
|
||||
# - name: default-deny
|
||||
# match:
|
||||
# any:
|
||||
# - resources:
|
||||
# kinds:
|
||||
# - Namespace
|
||||
# exclude:
|
||||
# any:
|
||||
# - resources:
|
||||
# namespaces:
|
||||
# - kube-metrics
|
||||
# - kube-policy
|
||||
# - kube-system
|
||||
# - default
|
||||
# generate:
|
||||
# apiVersion: networking.k8s.io/v1
|
||||
# kind: NetworkPolicy
|
||||
# name: default-deny
|
||||
# namespace: "{{'{{request.object.metadata.name}}'}}"
|
||||
# synchronize: true
|
||||
# data:
|
||||
# spec:
|
||||
# # select all pods in the namespace
|
||||
# podSelector: {}
|
||||
# # deny all traffic
|
||||
# policyTypes:
|
||||
# - Ingress
|
||||
# - Egress
|
@ -0,0 +1,60 @@
|
||||
# ---
|
||||
# apiVersion: kyverno.io/v1
|
||||
# kind: ClusterPolicy
|
||||
# metadata:
|
||||
# name: add-networkpolicy-dns
|
||||
# labels:
|
||||
# <<: {{ kubernetes_config.defaults.labels.deployment_labels | from_yaml }}
|
||||
# annotations:
|
||||
# ansible.kubernetes.io/path: {{ item }}
|
||||
# policies.kyverno.io/title: Add Network Policy for DNS
|
||||
# policies.kyverno.io/category: Multi-Tenancy, EKS Best Practices
|
||||
# policies.kyverno.io/subject: NetworkPolicy
|
||||
# kyverno.io/kyverno-version: 1.6.2
|
||||
# policies.kyverno.io/minversion: 1.6.0
|
||||
# kyverno.io/kubernetes-version: "1.23"
|
||||
# policies.kyverno.io/description: >-
|
||||
# By default, Kubernetes allows communications across all Pods within a cluster.
|
||||
# The NetworkPolicy resource and a CNI plug-in that supports NetworkPolicy must be used to restrict
|
||||
# communications. A default NetworkPolicy should be configured for each Namespace to
|
||||
# default deny all ingress and egress traffic to the Pods in the Namespace. Application
|
||||
# teams can then configure additional NetworkPolicy resources to allow desired traffic
|
||||
# to application Pods from select sources. This policy will create a new NetworkPolicy resource
|
||||
# named `default-deny` which will deny all traffic anytime a new Namespace is created.
|
||||
# spec:
|
||||
# generateExistingOnPolicyUpdate: true
|
||||
# rules:
|
||||
# - name: add-netpol-dns
|
||||
# match:
|
||||
# any:
|
||||
# - resources:
|
||||
# kinds:
|
||||
# - Namespace
|
||||
# exclude:
|
||||
# any:
|
||||
# - resources:
|
||||
# namespaces:
|
||||
# - kube-metrics
|
||||
# - kube-policy
|
||||
# - kube-system
|
||||
# - default
|
||||
# generate:
|
||||
# apiVersion: networking.k8s.io/v1
|
||||
# kind: NetworkPolicy
|
||||
# name: allow-dns
|
||||
# namespace: "{{'{{request.object.metadata.name}}'}}"
|
||||
# synchronize: true
|
||||
# data:
|
||||
# spec:
|
||||
# podSelector:
|
||||
# matchLabels: {}
|
||||
# policyTypes:
|
||||
# - Egress
|
||||
# egress:
|
||||
# - to:
|
||||
# - namespaceSelector:
|
||||
# matchLabels:
|
||||
# name: kube-system
|
||||
# ports:
|
||||
# - protocol: UDP
|
||||
# port: 53
|
@ -0,0 +1,48 @@
|
||||
---
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: disallow-mutable-tag
|
||||
labels:
|
||||
<<: {{ kubernetes_config.defaults.labels.deployment_labels | from_yaml }}
|
||||
annotations:
|
||||
ansible.kubernetes.io/path: {{ item }}
|
||||
policies.kyverno.io/title: Disallow mutable Tag
|
||||
policies.kyverno.io/category: Best Practices
|
||||
policies.kyverno.io/minversion: 1.6.0
|
||||
policies.kyverno.io/severity: medium
|
||||
policies.kyverno.io/subject: Pod
|
||||
policies.kyverno.io/description: >-
|
||||
The ':latest', ':master' and ':dev(elopment)' tags are mutable and can lead to unexpected errors if the
|
||||
image changes. A best practice is to use an immutable tag that maps to
|
||||
a specific version of an application Pod. This policy validates that the image
|
||||
specifies a tag and that it is not called `latest` `master` or`dev(elopment)`.
|
||||
spec:
|
||||
#failurePolicy: Fail
|
||||
validationFailureAction: Audit
|
||||
background: true
|
||||
rules:
|
||||
- name: require-image-tag
|
||||
match:
|
||||
any:
|
||||
- resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "An image tag is required."
|
||||
pattern:
|
||||
spec:
|
||||
containers:
|
||||
- image: "*:*"
|
||||
- name: validate-image-tag
|
||||
match:
|
||||
any:
|
||||
- resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "Using a mutable image tag e.g. 'latest', 'master' or 'dev[elopment]' is not allowed."
|
||||
pattern:
|
||||
spec:
|
||||
containers:
|
||||
- image: "!*:[latest|master|dev|development]"
|
@ -0,0 +1,52 @@
|
||||
---
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: disallow-default-namespace
|
||||
labels:
|
||||
<<: {{ kubernetes_config.defaults.labels.deployment_labels | from_yaml }}
|
||||
annotations:
|
||||
pod-policies.kyverno.io/autogen-controllers: none
|
||||
policies.kyverno.io/title: Disallow Default Namespace
|
||||
policies.kyverno.io/minversion: 1.6.0
|
||||
policies.kyverno.io/category: Multi-Tenancy
|
||||
policies.kyverno.io/severity: medium
|
||||
policies.kyverno.io/subject: Pod
|
||||
policies.kyverno.io/description: >-
|
||||
Kubernetes Namespaces are an optional feature that provide a way to segment and
|
||||
isolate cluster resources across multiple applications and users. As a best
|
||||
practice, workloads should be isolated with Namespaces. Namespaces should be required
|
||||
and the default (empty) Namespace should not be used. This policy validates that Pods
|
||||
specify a Namespace name other than `default`. Rule auto-generation is disabled here
|
||||
due to Pod controllers need to specify the `namespace` field under the top-level `metadata`
|
||||
object and not at the Pod template level.
|
||||
spec:
|
||||
#failurePolicy: Fail
|
||||
validationFailureAction: Audit
|
||||
background: true
|
||||
rules:
|
||||
- name: validate-namespace
|
||||
match:
|
||||
any:
|
||||
- resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "Using 'default' namespace is not allowed."
|
||||
pattern:
|
||||
metadata:
|
||||
namespace: "!default"
|
||||
- name: validate-podcontroller-namespace
|
||||
match:
|
||||
any:
|
||||
- resources:
|
||||
kinds:
|
||||
- DaemonSet
|
||||
- Deployment
|
||||
- Job
|
||||
- StatefulSet
|
||||
validate:
|
||||
message: "Using 'default' namespace is not allowed for pod controllers."
|
||||
pattern:
|
||||
metadata:
|
||||
namespace: "!default"
|
@ -0,0 +1,48 @@
|
||||
---
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: spread-pods
|
||||
labels:
|
||||
<<: {{ kubernetes_config.defaults.labels.deployment_labels | from_yaml }}
|
||||
annotations:
|
||||
policies.kyverno.io/title: Spread Pods Across Nodes
|
||||
policies.kyverno.io/category: Sample
|
||||
policies.kyverno.io/subject: Deployment, Pod
|
||||
policies.kyverno.io/minversion: 1.6.0
|
||||
policies.kyverno.io/description: >-
|
||||
Deployments to a Kubernetes cluster with multiple availability zones often need to
|
||||
distribute those replicas to align with those zones to ensure site-level failures
|
||||
do not impact availability. This policy matches Deployments with the label
|
||||
`distributed=required` and mutates them to spread Pods across zones.
|
||||
spec:
|
||||
generateExistingOnPolicyUpdate: true
|
||||
background: true
|
||||
rules:
|
||||
- name: spread-pods-across-nodes
|
||||
# Matches any Deployment with the label `distributed=required`
|
||||
match:
|
||||
any:
|
||||
- resources:
|
||||
kinds:
|
||||
- Deployment
|
||||
- StatefulSet
|
||||
preconditions:
|
||||
all:
|
||||
- key: "{{ '{{ request.object.spec.replicas }}' }}"
|
||||
operator: GreaterThanOrEquals
|
||||
value: 2
|
||||
# Mutates the incoming Deployment.
|
||||
mutate:
|
||||
patchStrategicMerge:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
# Adds the topologySpreadConstraints field if non-existent in the request.
|
||||
+(topologySpreadConstraints):
|
||||
- maxSkew: 1
|
||||
topologyKey: kubernetes.io/hostname
|
||||
whenUnsatisfiable: ScheduleAnyway
|
||||
labelSelector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: "{% raw %} '{{ request.object.metadata.labels.\"app.kubernetes.io/name\" }}' {% endraw %}"
|
@ -0,0 +1,38 @@
|
||||
# apiVersion: networking.k8s.io/v1
|
||||
# kind: NetworkPolicy
|
||||
# metadata:
|
||||
# name: kube-metrics
|
||||
# namespace: kube-metrics
|
||||
# labels:
|
||||
# app.kubernetes.io/name: kube-metrics
|
||||
# # app.kubernetes.io/instance: { .Release.Name }}
|
||||
# # app.kubernetes.io/version: { .Chart.Version | quote }}
|
||||
# # app.kubernetes.io/managed-by: { .Release.Service }}
|
||||
# app.kubernetes.io/component: loki
|
||||
# app.kubernetes.io/part-of: metrics
|
||||
|
||||
# spec:
|
||||
# egress:
|
||||
# - to:
|
||||
# #- podSelector:
|
||||
# - namespaceSelector:
|
||||
# matchLabels:
|
||||
# kubernetes.io/metadata.name: "default"
|
||||
# ports:
|
||||
# - port: 443
|
||||
# protocol: TCP
|
||||
# # ingress:
|
||||
# # - from:
|
||||
# # #- podSelector:
|
||||
# # - namespaceSelector:
|
||||
# # matchLabels:
|
||||
# # #app.kubernetes.io/name: prometheus
|
||||
# # #app.kubernetes.io/instance: k8s
|
||||
# # #app.kubernetes.io/managed-by: prometheus-operator
|
||||
# # app.kubernetes.io/name: grafana-agent
|
||||
# # #app.kubernetes.io/part-of: kube-prometheus
|
||||
|
||||
# # #app: grafana
|
||||
# policyTypes:
|
||||
# - Egress
|
||||
# #- Ingress
|
4983
templates/calico.yaml.j2
Normal file
4983
templates/calico.yaml.j2
Normal file
File diff suppressed because it is too large
Load Diff
10
templates/containerd-registry-hosts.toml.j2
Normal file
10
templates/containerd-registry-hosts.toml.j2
Normal file
@ -0,0 +1,10 @@
|
||||
#
|
||||
# {{ item.name }} Container Registry Configuration
|
||||
# Managed by: Ansible
|
||||
#
|
||||
|
||||
server = "{{ item.server }}"
|
||||
|
||||
[host."{{ item.url }}"]
|
||||
capabilities = {{ item.capabilities | from_yaml_all }}
|
||||
skip_verify = {{ item.skip_verify | default(false) | lower }}
|
20
templates/etc_cni_net.d_100-crio-bridge.conf.j2
Normal file
20
templates/etc_cni_net.d_100-crio-bridge.conf.j2
Normal file
@ -0,0 +1,20 @@
|
||||
{
|
||||
"cniVersion": "0.3.1",
|
||||
"name": "crio",
|
||||
"type": "bridge",
|
||||
"bridge": "cni0",
|
||||
"isGateway": true,
|
||||
"ipMasq": true,
|
||||
"hairpinMode": true,
|
||||
"ipam": {
|
||||
"type": "host-local",
|
||||
"routes": [
|
||||
{ "dst": "0.0.0.0/0" },
|
||||
{ "dst": "1100:200::1/24" }
|
||||
],
|
||||
"ranges": [
|
||||
[{ "subnet": "{{ KubernetesPodSubnet }}" }],
|
||||
[{ "subnet": "1100:200::/24" }]
|
||||
]
|
||||
}
|
||||
}
|
250
templates/etc_containerd_containerd.toml
Normal file
250
templates/etc_containerd_containerd.toml
Normal file
@ -0,0 +1,250 @@
|
||||
disabled_plugins = []
|
||||
imports = []
|
||||
oom_score = 0
|
||||
plugin_dir = ""
|
||||
required_plugins = []
|
||||
root = "/var/lib/containerd"
|
||||
state = "/run/containerd"
|
||||
temp = ""
|
||||
version = 2
|
||||
|
||||
[cgroup]
|
||||
path = ""
|
||||
|
||||
[debug]
|
||||
address = ""
|
||||
format = ""
|
||||
gid = 0
|
||||
level = ""
|
||||
uid = 0
|
||||
|
||||
[grpc]
|
||||
address = "/run/containerd/containerd.sock"
|
||||
gid = 0
|
||||
max_recv_message_size = 16777216
|
||||
max_send_message_size = 16777216
|
||||
tcp_address = ""
|
||||
tcp_tls_ca = ""
|
||||
tcp_tls_cert = ""
|
||||
tcp_tls_key = ""
|
||||
uid = 0
|
||||
|
||||
[metrics]
|
||||
address = ""
|
||||
grpc_histogram = false
|
||||
|
||||
[plugins]
|
||||
|
||||
[plugins."io.containerd.gc.v1.scheduler"]
|
||||
deletion_threshold = 0
|
||||
mutation_threshold = 100
|
||||
pause_threshold = 0.02
|
||||
schedule_delay = "0s"
|
||||
startup_delay = "100ms"
|
||||
|
||||
[plugins."io.containerd.grpc.v1.cri"]
|
||||
device_ownership_from_security_context = false
|
||||
disable_apparmor = false
|
||||
disable_cgroup = false
|
||||
disable_hugetlb_controller = true
|
||||
disable_proc_mount = false
|
||||
disable_tcp_service = true
|
||||
enable_selinux = false
|
||||
enable_tls_streaming = false
|
||||
enable_unprivileged_icmp = false
|
||||
enable_unprivileged_ports = false
|
||||
ignore_image_defined_volumes = false
|
||||
max_concurrent_downloads = 3
|
||||
max_container_log_line_size = 16384
|
||||
netns_mounts_under_state_dir = false
|
||||
restrict_oom_score_adj = false
|
||||
sandbox_image = "registry.k8s.io/pause:3.6"
|
||||
selinux_category_range = 1024
|
||||
stats_collect_period = 10
|
||||
stream_idle_timeout = "4h0m0s"
|
||||
stream_server_address = "127.0.0.1"
|
||||
stream_server_port = "0"
|
||||
systemd_cgroup = false
|
||||
tolerate_missing_hugetlb_controller = true
|
||||
unset_seccomp_profile = ""
|
||||
|
||||
[plugins."io.containerd.grpc.v1.cri".cni]
|
||||
bin_dir = "/opt/cni/bin"
|
||||
conf_dir = "/etc/cni/net.d"
|
||||
conf_template = ""
|
||||
ip_pref = ""
|
||||
max_conf_num = 1
|
||||
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd]
|
||||
default_runtime_name = "runc"
|
||||
disable_snapshot_annotations = true
|
||||
discard_unpacked_layers = false
|
||||
ignore_rdt_not_enabled_errors = false
|
||||
no_pivot = false
|
||||
snapshotter = "overlayfs"
|
||||
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.default_runtime]
|
||||
base_runtime_spec = ""
|
||||
cni_conf_dir = ""
|
||||
cni_max_conf_num = 0
|
||||
container_annotations = []
|
||||
pod_annotations = []
|
||||
privileged_without_host_devices = false
|
||||
runtime_engine = ""
|
||||
runtime_path = ""
|
||||
runtime_root = ""
|
||||
runtime_type = ""
|
||||
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.default_runtime.options]
|
||||
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
|
||||
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
|
||||
base_runtime_spec = ""
|
||||
cni_conf_dir = ""
|
||||
cni_max_conf_num = 0
|
||||
container_annotations = []
|
||||
pod_annotations = []
|
||||
privileged_without_host_devices = false
|
||||
runtime_engine = ""
|
||||
runtime_path = ""
|
||||
runtime_root = ""
|
||||
runtime_type = "io.containerd.runc.v2"
|
||||
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
|
||||
BinaryName = ""
|
||||
CriuImagePath = ""
|
||||
CriuPath = ""
|
||||
CriuWorkPath = ""
|
||||
IoGid = 0
|
||||
IoUid = 0
|
||||
NoNewKeyring = false
|
||||
NoPivotRoot = false
|
||||
Root = ""
|
||||
ShimCgroup = ""
|
||||
SystemdCgroup = true
|
||||
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime]
|
||||
base_runtime_spec = ""
|
||||
cni_conf_dir = ""
|
||||
cni_max_conf_num = 0
|
||||
container_annotations = []
|
||||
pod_annotations = []
|
||||
privileged_without_host_devices = false
|
||||
runtime_engine = ""
|
||||
runtime_path = ""
|
||||
runtime_root = ""
|
||||
runtime_type = ""
|
||||
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime.options]
|
||||
|
||||
[plugins."io.containerd.grpc.v1.cri".image_decryption]
|
||||
key_model = "node"
|
||||
|
||||
[plugins."io.containerd.grpc.v1.cri".registry]
|
||||
config_path = "/etc/containerd/certs.d"
|
||||
|
||||
[plugins."io.containerd.grpc.v1.cri".registry.auths]
|
||||
|
||||
[plugins."io.containerd.grpc.v1.cri".registry.configs]
|
||||
|
||||
[plugins."io.containerd.grpc.v1.cri".registry.headers]
|
||||
|
||||
[plugins."io.containerd.grpc.v1.cri".registry.mirrors]
|
||||
|
||||
[plugins."io.containerd.grpc.v1.cri".x509_key_pair_streaming]
|
||||
tls_cert_file = ""
|
||||
tls_key_file = ""
|
||||
|
||||
[plugins."io.containerd.internal.v1.opt"]
|
||||
path = "/opt/containerd"
|
||||
|
||||
[plugins."io.containerd.internal.v1.restart"]
|
||||
interval = "10s"
|
||||
|
||||
[plugins."io.containerd.internal.v1.tracing"]
|
||||
sampling_ratio = 1.0
|
||||
service_name = "containerd"
|
||||
|
||||
[plugins."io.containerd.metadata.v1.bolt"]
|
||||
content_sharing_policy = "shared"
|
||||
|
||||
[plugins."io.containerd.monitor.v1.cgroups"]
|
||||
no_prometheus = false
|
||||
|
||||
[plugins."io.containerd.runtime.v1.linux"]
|
||||
no_shim = false
|
||||
runtime = "runc"
|
||||
runtime_root = ""
|
||||
shim = "containerd-shim"
|
||||
shim_debug = false
|
||||
|
||||
[plugins."io.containerd.runtime.v2.task"]
|
||||
platforms = ["linux/amd64"]
|
||||
sched_core = false
|
||||
|
||||
[plugins."io.containerd.service.v1.diff-service"]
|
||||
default = ["walking"]
|
||||
|
||||
[plugins."io.containerd.service.v1.tasks-service"]
|
||||
rdt_config_file = ""
|
||||
|
||||
[plugins."io.containerd.snapshotter.v1.aufs"]
|
||||
root_path = ""
|
||||
|
||||
[plugins."io.containerd.snapshotter.v1.btrfs"]
|
||||
root_path = ""
|
||||
|
||||
[plugins."io.containerd.snapshotter.v1.devmapper"]
|
||||
async_remove = false
|
||||
base_image_size = ""
|
||||
discard_blocks = false
|
||||
fs_options = ""
|
||||
fs_type = ""
|
||||
pool_name = ""
|
||||
root_path = ""
|
||||
|
||||
[plugins."io.containerd.snapshotter.v1.native"]
|
||||
root_path = ""
|
||||
|
||||
[plugins."io.containerd.snapshotter.v1.overlayfs"]
|
||||
root_path = ""
|
||||
upperdir_label = false
|
||||
|
||||
[plugins."io.containerd.snapshotter.v1.zfs"]
|
||||
root_path = ""
|
||||
|
||||
[plugins."io.containerd.tracing.processor.v1.otlp"]
|
||||
endpoint = ""
|
||||
insecure = false
|
||||
protocol = ""
|
||||
|
||||
[proxy_plugins]
|
||||
|
||||
[stream_processors]
|
||||
|
||||
[stream_processors."io.containerd.ocicrypt.decoder.v1.tar"]
|
||||
accepts = ["application/vnd.oci.image.layer.v1.tar+encrypted"]
|
||||
args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"]
|
||||
env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"]
|
||||
path = "ctd-decoder"
|
||||
returns = "application/vnd.oci.image.layer.v1.tar"
|
||||
|
||||
[stream_processors."io.containerd.ocicrypt.decoder.v1.tar.gzip"]
|
||||
accepts = ["application/vnd.oci.image.layer.v1.tar+gzip+encrypted"]
|
||||
args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"]
|
||||
env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"]
|
||||
path = "ctd-decoder"
|
||||
returns = "application/vnd.oci.image.layer.v1.tar+gzip"
|
||||
|
||||
[timeouts]
|
||||
"io.containerd.timeout.bolt.open" = "0s"
|
||||
"io.containerd.timeout.shim.cleanup" = "5s"
|
||||
"io.containerd.timeout.shim.load" = "5s"
|
||||
"io.containerd.timeout.shim.shutdown" = "3s"
|
||||
"io.containerd.timeout.task.state" = "2s"
|
||||
|
||||
[ttrpc]
|
||||
address = ""
|
||||
gid = 0
|
||||
uid = 0
|
2
templates/etc_module_containerd.conf
Normal file
2
templates/etc_module_containerd.conf
Normal file
@ -0,0 +1,2 @@
|
||||
overlay
|
||||
br_netfilter
|
263
templates/iptables-kubernetes.rules.j2
Normal file
263
templates/iptables-kubernetes.rules.j2
Normal file
@ -0,0 +1,263 @@
|
||||
#
|
||||
# IP Tables Firewall Rules for Kubernetes
|
||||
#
|
||||
# Managed By ansible/role/nfc_kubernetes
|
||||
#
|
||||
# Dont edit this file directly as it will be overwritten. To grant a host API access
|
||||
# edit the cluster config, adding the hostname/ip to path kubernetes_config.cluster.access
|
||||
#
|
||||
# This file is periodicly called by cron
|
||||
#
|
||||
|
||||
{% set data = namespace(firewall_rules=[]) -%}
|
||||
|
||||
{%- if ansible_host is regex('^[a-z]') and ':' not in ansible_host -%} {#- Convert DNs name to IP Address -#}
|
||||
|
||||
{%- if ipv6 | default(false) -%}
|
||||
|
||||
{%- set ansible_host = query('community.dns.lookup', ansible_host + '.', type='AAAA' ) -%}
|
||||
|
||||
{%- else -%}
|
||||
|
||||
{%- set ansible_host = query('community.dns.lookup', ansible_host + '.', type='A' ) -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- if ansible_host | list | length > 0 -%} {#- Convert dns lookup to list, and select the first item -#}
|
||||
{%- set ansible_host = ansible_host | from_yaml_all | list -%}
|
||||
|
||||
{%- set ansible_host = ansible_host[0] -%}
|
||||
{%- endif -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- for kubernetes_host in groups[kubernetes_type] -%}
|
||||
|
||||
|
||||
{%- if kubernetes_host is regex('^[a-z]') and ':' not in kubernetes_host -%} {#- Convert DNs name to IP Address -#}
|
||||
|
||||
{%- if ipv6 | default(false) -%}
|
||||
|
||||
{%- set kubernetes_host = query('community.dns.lookup', kubernetes_host + '.', type='AAAA' ) -%}
|
||||
|
||||
{%- else -%}
|
||||
|
||||
{%- set kubernetes_host = query('community.dns.lookup', kubernetes_host + '.', type='A' ) -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- if kubernetes_host | list | length > 0 -%} {#- Convert dns lookup to list, and select the first item -#}
|
||||
{%- set kubernetes_host = kubernetes_host | from_yaml_all | list -%}
|
||||
|
||||
{%- set kubernetes_host = kubernetes_host[0] -%}
|
||||
{%- endif -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
|
||||
{%- for master_host in groups['kubernetes_master'] -%}
|
||||
|
||||
|
||||
{%- if master_host is regex('^[a-z]') and ':' not in master_host -%} {#- Convert DNs name to IP Address -#}
|
||||
|
||||
{%- if ipv6 | default(false) -%}
|
||||
|
||||
{%- set master_host = query('community.dns.lookup', master_host + '.', type='AAAA' ) -%}
|
||||
|
||||
{%- else -%}
|
||||
|
||||
{%- set master_host = query('community.dns.lookup', master_host + '.', type='A' ) -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- if master_host | list | length > 0 -%} {#- Convert dns lookup to list, and select the first item -#}
|
||||
{%- set master_host = master_host | from_yaml_all | list -%}
|
||||
|
||||
{%- set master_host = master_host[0] -%}
|
||||
{%- endif -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
|
||||
{%- if Kubernetes_Master | default(false) | bool -%}
|
||||
|
||||
{%- if
|
||||
master_host == kubernetes_host
|
||||
and
|
||||
master_host != ansible_host
|
||||
and
|
||||
(
|
||||
(
|
||||
ipv6 | default(false)
|
||||
and
|
||||
':' in master_host
|
||||
)
|
||||
or
|
||||
(
|
||||
not ipv6 | default(false)
|
||||
and
|
||||
'.' in master_host
|
||||
)
|
||||
)
|
||||
-%}
|
||||
|
||||
{#- master hosts only -#}
|
||||
|
||||
{%- set data.firewall_rules = data.firewall_rules + ['-I kubernetes-embedded-etcd -s ' + master_host + ' -j ACCEPT'] -%}
|
||||
{# {%- set data.firewall_rules = data.firewall_rules + ['-I INPUT -s ' + master_host + ' -p tcp -m multiport --dports 2380 -j ACCEPT'] -%} #}
|
||||
{%- set data.firewall_rules = data.firewall_rules + ['-I kubernetes-api -s ' + master_host + ' -j ACCEPT'] -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- endfor -%}
|
||||
|
||||
{%- if
|
||||
ansible_host != kubernetes_host
|
||||
and
|
||||
(
|
||||
(
|
||||
ipv6 | default(false)
|
||||
and
|
||||
':' in kubernetes_host
|
||||
)
|
||||
or
|
||||
(
|
||||
not ipv6 | default(false)
|
||||
and
|
||||
'.' in kubernetes_host
|
||||
)
|
||||
)
|
||||
-%}
|
||||
|
||||
{#- All cluster Hosts -#}
|
||||
|
||||
{%- set data.firewall_rules = data.firewall_rules + ['-I kubelet-metrics -s ' + kubernetes_host + ' -j ACCEPT'] -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- endfor -%}
|
||||
|
||||
|
||||
{%- for api_client in kubernetes_config.cluster.access | default([]) -%}
|
||||
|
||||
{%- if api_client is regex('^[a-z]') and ':' not in api_client -%} {#- Convert DNs name to IP Address -#}
|
||||
|
||||
{%- set api_client_dns_name = api_client -%}
|
||||
|
||||
{%- if ipv6 | default(false) -%}
|
||||
|
||||
{%- set api_client = query('community.dns.lookup', api_client + '.', type='AAAA' ) -%}
|
||||
|
||||
{%- else -%}
|
||||
|
||||
{%- set api_client = query('community.dns.lookup', api_client + '.', type='A' ) -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- if api_client | list | length > 0 -%} {#- Convert dns lookup to list, and select the first item -#}
|
||||
|
||||
{%- set api_client = api_client | from_yaml_all | list -%}
|
||||
|
||||
{%- set api_client = api_client[0] -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
|
||||
{%- if
|
||||
api_client != ansible_host
|
||||
and
|
||||
(
|
||||
(
|
||||
ipv6 | default(false)
|
||||
and
|
||||
':' in api_client
|
||||
)
|
||||
or
|
||||
(
|
||||
not ipv6 | default(false)
|
||||
and
|
||||
'.' in api_client
|
||||
)
|
||||
)
|
||||
-%}
|
||||
|
||||
{#- Hosts allowed to access API -#}
|
||||
|
||||
{%- set data.firewall_rules = data.firewall_rules + ['-I kubernetes-api -s ' + api_client + ' -m comment --comment "host: ' + api_client_dns_name | default(api_client) + '" -j ACCEPT'] -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- endfor %}
|
||||
|
||||
*filter
|
||||
|
||||
{# -N kubernetes-embedded-etcd
|
||||
-A kubernetes-embedded-etcd -j RETURN
|
||||
|
||||
-A INPUT -p tcp -m multiport --dports 2379,2380 -m comment --comment "etcd. Servers only" -j kubernetes-embedded-etcd
|
||||
|
||||
|
||||
-N kubernetes-api
|
||||
-A kubernetes-api -j RETURN
|
||||
|
||||
-A INPUT -p tcp --dport 6443 -m comment --comment "Kubernetes API access. All Cluster hosts and end users" -j kubernetes-api
|
||||
|
||||
|
||||
-N kubernetes-flannel-vxlan
|
||||
-A kubernetes-flannel-vxlan -j RETURN
|
||||
|
||||
-A INPUT -p udp --dport 8472 -m comment --comment "Flannel. All cluster hosts" -j kubernetes-flannel-vxlan
|
||||
|
||||
|
||||
-N kubernetes-kubelet-metrics
|
||||
-A kubernetes-kubelet-metrics -j RETURN
|
||||
|
||||
-A INPUT -p tcp --dport 10250 -m comment --comment "Kubernetes Metrics. All cluster hosts" -j kubernetes-kubelet-metrics
|
||||
|
||||
|
||||
-N kubernetes-flannel-wg-four
|
||||
-A kubernetes-flannel-wg-four -j RETURN
|
||||
|
||||
-A INPUT -p udp --dport 51820 -m comment --comment "Flannel Wiregaurd IPv4. All cluster hosts" -j kubernetes-flannel-wg-four
|
||||
|
||||
|
||||
-N kubernetes-flannel-wg-six
|
||||
-A kubernetes-flannel-wg-six -j RETURN
|
||||
|
||||
-A INPUT -p udp --dport 51821 -m comment --comment "Flannel Wiregaurd IPv6. All cluster hosts" -j kubernetes-flannel-wg-six #}
|
||||
|
||||
|
||||
{% if data.firewall_rules | length | int > 0 -%}
|
||||
{% for rule in data.firewall_rules -%}
|
||||
{{ rule }}
|
||||
{% endfor -%}
|
||||
{% endif -%}
|
||||
|
||||
{#- #-I INPUT -s 192.168.1.0/24 -p tcp -m multiport --dports 6443 -j ACCEPT
|
||||
#-I INPUT -s 192.168.1.0/24 -p tcp -m multiport --dports 179 -j ACCEPT
|
||||
#-I INPUT -s 192.168.1.0/24 -p tcp -m multiport --dports 10250 -j ACCEPT
|
||||
|
||||
#-I INPUT -s 192.168.1.0/24 -p udp -m multiport --dports 4789 -j ACCEPT
|
||||
#-I INPUT -s 192.168.1.0/24 -p tcp -m multiport --dports 2379 -j ACCEPT
|
||||
#-I INPUT -s 192.168.1.0/24 -p tcp -m multiport --dports 2380 -j ACCEPT
|
||||
|
||||
|
||||
-I INPUT -p tcp -m multiport --dports 6443 -j ACCEPT
|
||||
-I INPUT -p tcp -m multiport --dports 179 -j ACCEPT
|
||||
-I INPUT -p tcp -m multiport --dports 10250 -j ACCEPT
|
||||
|
||||
-I INPUT -p udp -m multiport --dports 4789 -j ACCEPT
|
||||
-I INPUT -p tcp -m multiport --dports 2379 -j ACCEPT
|
||||
-I INPUT -p tcp -m multiport --dports 2380 -j ACCEPT #}
|
||||
|
||||
COMMIT
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
{# iptables -I kubernetes-api -s nww-au1.networkedweb.com -j ACCEPT #}
|
29
templates/k3s-config.yaml.j2
Normal file
29
templates/k3s-config.yaml.j2
Normal file
@ -0,0 +1,29 @@
|
||||
#
|
||||
# K3s Configuration for running Kubernetes
|
||||
#
|
||||
# Managed By ansible/role/nfc_kubernetes
|
||||
#
|
||||
# Dont edit this file directly as it will be overwritten.
|
||||
#
|
||||
|
||||
flannel-backend: none
|
||||
cluster-cidr: "{{ KubernetesPodSubnet }}"
|
||||
cluster-init: true
|
||||
{% if not Kubernetes_Prime | default(false) | bool -%}server: https://{{ hostvars[kubernetes_config.cluster.prime.name].ansible_host }}:6443{% endif %}
|
||||
service-cidr: "{{ KubernetesServiceSubnet }}"
|
||||
disable-network-policy: true
|
||||
disable:
|
||||
- traefik
|
||||
kube-apiserver-arg:
|
||||
- audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log
|
||||
- audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml
|
||||
# - admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml
|
||||
{% if kubernetes_oidc.enabled | default(false) | bool -%}
|
||||
- oidc-issuer-url={{ kubernetes_oidc.issuer_url }}
|
||||
- oidc-client-id={{ kubernetes_oidc.client_id }}
|
||||
- oidc-username-claim={{ kubernetes_oidc.username_claim }}
|
||||
{% if kubernetes_oidc.oidc_username_prefix | default('') != '' -%} - oidc-username-prefix={{ kubernetes_oidc.oidc_username_prefix }}{% endif %}
|
||||
- oidc-groups-claim={{ kubernetes_oidc.groups_claim }}
|
||||
{% if kubernetes_oidc.groups_prefix | default('') != '' %} - oidc-groups-prefix={{ kubernetes_oidc.groups_prefix }}{% endif %}
|
||||
{% endif %}
|
||||
{% if host_external_ip | default('') %} node-external-ip: "{{ host_external_ip }}"{% endif %}
|
19
templates/k3s-registries.yaml.j2
Normal file
19
templates/k3s-registries.yaml.j2
Normal file
@ -0,0 +1,19 @@
|
||||
#
|
||||
# Private Container Registries for Kubernetes
|
||||
#
|
||||
# Managed By ansible/role/nfc_kubernetes
|
||||
#
|
||||
# Dont edit this file directly as it will be overwritten.
|
||||
#
|
||||
|
||||
{% set registries = kubernetes_private_container_registry | default([]) -%}
|
||||
|
||||
{% if registries | length > 0 %}mirrors:
|
||||
{% for entry in registries %}
|
||||
|
||||
{{ entry.name }}:
|
||||
endpoint:
|
||||
- "{{ entry.url }}"
|
||||
|
||||
{%- endfor %}
|
||||
{% endif %}
|
240
templates/kubernetes-manifest-rbac.yaml.j2
Normal file
240
templates/kubernetes-manifest-rbac.yaml.j2
Normal file
@ -0,0 +1,240 @@
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
annotations:
|
||||
authorization/description: >-
|
||||
provide full access to everything.
|
||||
|
||||
Using this Cluster role should be avoided with additional cluster roles
|
||||
created to meet the additional authorization requirements.
|
||||
authorization/target: cluster, namespace
|
||||
labels:
|
||||
app.kubernetes.io/part-of: nfc_kubernetes
|
||||
app.kubernetes.io/managed-by: ansible
|
||||
app.kubernetes.io/version: ''
|
||||
name: authorization:full
|
||||
rules:
|
||||
- apiGroups:
|
||||
- "*"
|
||||
resources:
|
||||
- "*"
|
||||
verbs:
|
||||
- "*"
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
annotations:
|
||||
authorization/description: |-
|
||||
Provide Access for reading ALL non-secret items, this includes reading pod and node metrics.
|
||||
|
||||
This role is designed for users who require access to audit/view/diagnose at either the
|
||||
cluster level `ClusterRoleBinding` or namespace level `RoleBinding`
|
||||
authorization/target: namespace
|
||||
labels:
|
||||
app.kubernetes.io/part-of: nfc_kubernetes
|
||||
app.kubernetes.io/managed-by: ansible
|
||||
app.kubernetes.io/version: ''
|
||||
name: authorization:namespace:read
|
||||
rules:
|
||||
- apiGroups: # Get Metrics
|
||||
- metrics.k8s.io
|
||||
resources:
|
||||
- pods
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups: # Read-only access to resrouces
|
||||
- "*"
|
||||
resources:
|
||||
- awx
|
||||
- cronjobs
|
||||
- daemonset
|
||||
- deployments
|
||||
- helmcharts
|
||||
- helmchartconfigs
|
||||
- ingress
|
||||
- jobs
|
||||
- namespaces
|
||||
- pods
|
||||
- pv
|
||||
- pvc
|
||||
- serviceaccount
|
||||
- services
|
||||
- statefuleset
|
||||
- storageclasses
|
||||
- configmap
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
annotations:
|
||||
authorization/description: |-
|
||||
Provide access for reading ALL items.
|
||||
|
||||
This role is designed for users who own and is designed to be
|
||||
bound to a namespace using a `RoleBinding`
|
||||
authorization/target: namespace
|
||||
labels:
|
||||
app.kubernetes.io/part-of: nfc_kubernetes
|
||||
app.kubernetes.io/managed-by: ansible
|
||||
app.kubernetes.io/version: ''
|
||||
name: authorization:namespace:owner
|
||||
rules:
|
||||
- apiGroups: # Read-only access to resrouces
|
||||
- "*"
|
||||
resources:
|
||||
- awx
|
||||
- cronjobs
|
||||
- daemonset
|
||||
- deployments
|
||||
- helmcharts
|
||||
- helmchartconfigs
|
||||
- ingress
|
||||
- jobs
|
||||
- pods
|
||||
- pvc
|
||||
- roles
|
||||
- rolebindings
|
||||
- secrets
|
||||
- serviceaccount
|
||||
- services
|
||||
- statefuleset
|
||||
- storageclasses
|
||||
- configmap
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- delete
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: authorization:cluster:view-metrics
|
||||
rules:
|
||||
- apiGroups:
|
||||
- metrics.k8s.io
|
||||
- "" # Without this metrics don't work. this also grants access to view nodes
|
||||
resources:
|
||||
- pods
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
|
||||
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: authorization:read
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: authorization:namespace:read
|
||||
subjects:
|
||||
- kind: Group
|
||||
name: administrators
|
||||
- kind: Group
|
||||
name: technician
|
||||
|
||||
- kind: Group
|
||||
name: NodeRED
|
||||
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: authorization:view-metrics
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: authorization:cluster:view-metrics
|
||||
subjects:
|
||||
- apiGroup: rbac.authorization.k8s.io
|
||||
kind: Group
|
||||
name: administrators
|
||||
- kind: Group
|
||||
name: technician
|
||||
|
||||
|
||||
# ---
|
||||
# kind: ClusterRoleBinding
|
||||
# apiVersion: rbac.authorization.k8s.io/v1
|
||||
# metadata:
|
||||
# name: authorization:full
|
||||
# roleRef:
|
||||
# apiGroup: rbac.authorization.k8s.io
|
||||
# kind: ClusterRole
|
||||
# name: authorization:full
|
||||
# subjects:
|
||||
# - kind: Group
|
||||
# name: administrators
|
||||
# - kind: Group
|
||||
# name: technician
|
||||
|
||||
|
||||
###################################################################################################################
|
||||
# Namespace role binding
|
||||
|
||||
|
||||
# ---
|
||||
# apiVersion: rbac.authorization.k8s.io/v1
|
||||
# kind: RoleBinding
|
||||
# metadata:
|
||||
# # labels:
|
||||
|
||||
# name: authorization:full
|
||||
# namespace: development
|
||||
# roleRef:
|
||||
# apiGroup: rbac.authorization.k8s.io
|
||||
# kind: Role
|
||||
# name: authorization:full
|
||||
# subjects:
|
||||
# - kind: Group
|
||||
# name: administrators
|
||||
# namespace: development
|
||||
# - kind: Group
|
||||
# name: technician
|
||||
|
||||
# - kind: Group
|
||||
# name: NodeRED
|
||||
|
||||
|
||||
# ---
|
||||
|
||||
# - apiVersion: rbac.authorization.k8s.io/v1
|
||||
# kind: Role
|
||||
# metadata:
|
||||
# labels:
|
||||
# app.kubernetes.io/description: |-
|
||||
# provide full access to the testing namespace
|
||||
# name: authorization:full
|
||||
# namespace: development
|
||||
# rules:
|
||||
# - apiGroups:
|
||||
# - ""
|
||||
# resources:
|
||||
# - ""
|
||||
# verbs:
|
||||
# - add
|
||||
# - delete
|
||||
# - edit
|
||||
# - get
|
||||
# - list
|
||||
# - watch
|
||||
|
1
website-template
Submodule
1
website-template
Submodule
Submodule website-template added at 992b54805b
Reference in New Issue
Block a user