Compare commits
271 Commits
Author | SHA1 | Date | |
---|---|---|---|
1a11014420 | |||
ad55d3e874 | |||
e0035d88df | |||
52c4ee12fa | |||
b4d5031b0a | |||
3cf2a2e169 | |||
358891e1cc | |||
9fa3b233a9 | |||
9ec1ba4c51 | |||
bb707149f6 | |||
f622228493 | |||
5efd9807f6 | |||
f09a71ef77 | |||
9d9cffb03a | |||
50c89c9f00 | |||
325b0e51d0 | |||
1068223abd | |||
241c737647 | |||
33a40d0ba9 | |||
0ce3ed1245 | |||
0097556730 | |||
6faee04b39 | |||
ef8255cca6 | |||
725e8dbfec | |||
c5b9420ed9 | |||
c5b4add4c7 | |||
aa3735f271 | |||
0ccb121955 | |||
98a9e6dcdf | |||
7271e28c76 | |||
70a350bf56 | |||
af10814791 | |||
f139827554 | |||
5980123e7a | |||
7ef739d063 | |||
4d44c01b32 | |||
c5371b8ff4 | |||
7c20146660 | |||
6c4616873e | |||
3243578951 | |||
0fd15f2195 | |||
03e48c7031 | |||
11756037a3 | |||
6498a48e82 | |||
053d1f17ec | |||
17ff472577 | |||
ec94414383 | |||
1faae0327e | |||
17e3318c3c | |||
89b5593abf | |||
10eae79a74 | |||
0be7080089 | |||
d3666c6825 | |||
4af31ff3ac | |||
74187c7023 | |||
47ac3095b6 | |||
dd4638bc93 | |||
3ed6fd0f4c | |||
beb1bd2006 | |||
4a83550530 | |||
7c54b19b64 | |||
173c840121 | |||
f0f5d686fa | |||
536c6e7b26 | |||
a23bc5e9ee | |||
5444f583e5 | |||
b4ad0a4e61 | |||
c9961973e1 | |||
622338e497 | |||
dec65ed57c | |||
71d1dd884e | |||
7a077dabe0 | |||
16add8a5b8 | |||
1bbbdd23c3 | |||
9552ed7703 | |||
05fc3455da | |||
8f81d10168 | |||
9cdab3446d | |||
d522559277 | |||
3e4a17437c | |||
447bb621cd | |||
32c3f7ab71 | |||
7e86574684 | |||
4d8f2c57d5 | |||
b063db8dc1 | |||
27eaff7547 | |||
d7e9f64161 | |||
826468fc42 | |||
164b59c100 | |||
29a9e696a9 | |||
6a10eb22cc | |||
43c6c940a1 | |||
9d5a078320 | |||
2ec8fe814c | |||
2b041c1cca | |||
af26559485 | |||
cb5a5697c1 | |||
c7a5c7c7e3 | |||
aca7e557a6 | |||
f1d20aac80 | |||
3b760db6e7 | |||
83ddfd4fbf | |||
967829d9e4 | |||
56ac6eb3b4 | |||
283568f72a | |||
c7a3e617f0 | |||
21d0dbefa9 | |||
9dad960208 | |||
96ff6ba860 | |||
edd4d2b434 | |||
9dcea39df6 | |||
c765efe99d | |||
916a3b475b | |||
dc53c7694a | |||
d4efa4c9b3 | |||
b4481d3f27 | |||
315ea4058e | |||
7019150433 | |||
3bd2b88ecb | |||
eabbe49ed9 | |||
5585c1eb0b | |||
37bf447779 | |||
41a59a80d9 | |||
79a64c670d | |||
672b0c03c0 | |||
60054a23ab | |||
88c54d5b59 | |||
7adeb7daee | |||
434a40be1e | |||
7ecd4e21fa | |||
ef90e653df | |||
abb7042cbd | |||
a45fe0c9f9 | |||
623d178196 | |||
b915b1e947 | |||
6c0c18dd7b | |||
e2a438ec8f | |||
d0388fb0fe | |||
b978e86db4 | |||
ff08e57793 | |||
1ef63026e1 | |||
dac14cedde | |||
f017801f7a | |||
18218cd4d1 | |||
45863ecff3 | |||
b43e1dbb80 | |||
efba1ff6c7 | |||
4d02c170e8 | |||
506385f3d8 | |||
ccf5c03a4c | |||
b350b2e188 | |||
2e136ee088 | |||
384ef924ca | |||
54f9ec0c95 | |||
bed1bf3095 | |||
7a017c4e29 | |||
3004f998bf | |||
6d974083cf | |||
9cdc89b1ec | |||
26c0ab1236 | |||
b2f9e5d3ca | |||
be1ddecc33 | |||
2d225fd44d | |||
79d89b3b3a | |||
5edfdf4faf | |||
12a42a3583 | |||
8d8ba0951e | |||
478e4ccfa5 | |||
8919486b6b | |||
5925a26c60 | |||
5ffbd78e2b | |||
988b91f85a | |||
f48f645468 | |||
7049c57bd0 | |||
c6ff60bb14 | |||
e135a8690d | |||
aa2d858ede | |||
e1220b0dac | |||
88d57588fc | |||
681b52b31a | |||
60f7c2d6b6 | |||
a54fbe26f3 | |||
8e3217d1bd | |||
c04b12a714 | |||
26120c3e98 | |||
f2c833893f | |||
0bdd5c66c2 | |||
74cc207947 | |||
440d25295d | |||
c28f0b8ee3 | |||
99badaf7f6 | |||
ea38ddf22b | |||
4a41f7e348 | |||
a31837c803 | |||
7369163195 | |||
59699afb44 | |||
077ce062ee | |||
56bb4557b5 | |||
7d81b897ff | |||
0fb5e27612 | |||
301ed9ad3f | |||
4ce5f37223 | |||
86af4606d7 | |||
4a51210677 | |||
58a95e6781 | |||
37a7718043 | |||
c41e12544b | |||
915cdf5e1e | |||
021e54f328 | |||
f0cf4cd00c | |||
ed1a1acf7e | |||
59a5e0aacf | |||
20dae6ba4d | |||
1b49969a99 | |||
fac3ace5f5 | |||
354fb8946d | |||
3198b5d2f9 | |||
1a0407a901 | |||
c7cd1da431 | |||
cdc06363aa | |||
34432433f3 | |||
21cef1f4c3 | |||
c6581267f6 | |||
2767b9629a | |||
59f50d53df | |||
f09737b21f | |||
6ab17bdc3c | |||
9936cd4499 | |||
0acc7a3cc2 | |||
5278a4996e | |||
d2081284d1 | |||
42ac18e057 | |||
ecc2afee68 | |||
65cb3b9102 | |||
9c4204751e | |||
4d9f9dcdff | |||
ba59dd3057 | |||
c7907bf585 | |||
4a9d98394e | |||
fd547a4c0f | |||
50f48ab5a1 | |||
89b6573247 | |||
4465bcd2c4 | |||
b77cc6a8e9 | |||
26f1f2efe6 | |||
db515d2c1d | |||
1b62a66632 | |||
1319325a4c | |||
76e48fd965 | |||
abc01ce48c | |||
d8ecc66035 | |||
76f3de5592 | |||
779be0200e | |||
9ac52ee165 | |||
8272b2507b | |||
c3843ddef0 | |||
60fd25df8e | |||
57d268ec3c | |||
93897ea7d5 | |||
b69d5b8a35 | |||
60392a565c | |||
0f4a02cadd | |||
bbfbbedd11 | |||
3e785d7db1 | |||
7abfb70320 | |||
4908775367 | |||
a60b1fcc8d | |||
b588b0383d | |||
55d5c5d694 | |||
6763fe6509 | |||
93b63308ef |
8
.cz.yaml
Normal file
8
.cz.yaml
Normal file
@ -0,0 +1,8 @@
|
||||
---
|
||||
commitizen:
|
||||
name: cz_conventional_commits
|
||||
prerelease_offset: 1
|
||||
tag_format: $version
|
||||
update_changelog_on_bump: false
|
||||
version: 1.8.1-a2
|
||||
version_scheme: semver
|
5
.gitignore
vendored
Normal file
5
.gitignore
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
artifacts/
|
||||
build/
|
||||
test_results/
|
||||
test_results.json
|
||||
*.tar.gz
|
111
.gitlab-ci.yml
Normal file
111
.gitlab-ci.yml
Normal file
@ -0,0 +1,111 @@
|
||||
---
|
||||
|
||||
variables:
|
||||
ANSIBLE_GALAXY_PACKAGE_NAME: kubernetes
|
||||
MY_PROJECT_ID: "51640029"
|
||||
GIT_SYNC_URL: "https://$GITHUB_USERNAME_ROBOT:$GITHUB_TOKEN_ROBOT@github.com/NoFussComputing/ansible_collection_kubernetes.git"
|
||||
PAGES_ENVIRONMENT_PATH: projects/ansible/collection/kubernetes/
|
||||
RELEASE_ADDITIONAL_ACTIONS_BUMP: ./.gitlab/additional_actions_bump.sh
|
||||
|
||||
|
||||
include:
|
||||
- local: .gitlab/integration_test.gitlab-ci.yml
|
||||
- project: nofusscomputing/projects/gitlab-ci
|
||||
ref: development
|
||||
file:
|
||||
- .gitlab-ci_common.yaml
|
||||
- conventional_commits/.gitlab-ci.yml
|
||||
- template/ansible-collection.gitlab-ci.yaml
|
||||
- template/mkdocs-documentation.gitlab-ci.yaml
|
||||
# ToDo: update gitlabCI jobs for collections workflow
|
||||
- git_push_mirror/.gitlab-ci.yml
|
||||
- automation/.gitlab-ci-ansible.yaml
|
||||
|
||||
|
||||
Build Collection:
|
||||
extends: .ansible_collection_build
|
||||
needs:
|
||||
- job: Ansible Lint
|
||||
optional: true
|
||||
- job: Ansible Lint (galaxy.yml)
|
||||
optional: true
|
||||
|
||||
rules:
|
||||
|
||||
- if: $CI_COMMIT_TAG
|
||||
when: always
|
||||
|
||||
# Needs to run, even by bot as the test results need to be available
|
||||
# - if: "$CI_COMMIT_AUTHOR =='nfc_bot <helpdesk@nofusscomputing.com>'"
|
||||
# when: never
|
||||
|
||||
- if: # Occur on merge
|
||||
$CI_COMMIT_BRANCH
|
||||
&&
|
||||
$CI_PIPELINE_SOURCE == "push"
|
||||
when: always
|
||||
|
||||
# - if:
|
||||
# $CI_COMMIT_BRANCH != "development"
|
||||
# &&
|
||||
# $CI_COMMIT_BRANCH != "master"
|
||||
# &&
|
||||
# $CI_PIPELINE_SOURCE == "push"
|
||||
# when: always
|
||||
|
||||
- when: never
|
||||
|
||||
|
||||
Update Git Submodules:
|
||||
extends: .ansible_playbook_git_submodule
|
||||
|
||||
|
||||
Github (Push --mirror):
|
||||
extends:
|
||||
- .git_push_mirror
|
||||
needs: []
|
||||
|
||||
|
||||
Gitlab Release:
|
||||
extends: .ansible_collection_release
|
||||
needs:
|
||||
- Stage Collection
|
||||
release:
|
||||
tag_name: $CI_COMMIT_TAG
|
||||
description: ./artifacts/release_notes.md
|
||||
name: $CI_COMMIT_TAG
|
||||
assets:
|
||||
links:
|
||||
- name: 'Ansible Galaxy'
|
||||
url: https://galaxy.ansible.com/ui/repo/published/${ANSIBLE_GALAXY_NAMESPACE}/${ANSIBLE_GALAXY_PACKAGE_NAME}/?version=${CI_COMMIT_TAG}
|
||||
|
||||
- name: ${ANSIBLE_GALAXY_NAMESPACE}-${ANSIBLE_GALAXY_PACKAGE_NAME}-${CI_COMMIT_TAG}.tar.gz
|
||||
url: https://galaxy.ansible.com/api/v3/plugin/ansible/content/published/collections/artifacts/${ANSIBLE_GALAXY_NAMESPACE}-${ANSIBLE_GALAXY_PACKAGE_NAME}-${CI_COMMIT_TAG}.tar.gz
|
||||
link_type: package
|
||||
|
||||
- name: Documentation
|
||||
url: https://nofusscomputing.com/${PAGES_ENVIRONMENT_PATH}
|
||||
milestones:
|
||||
- $CI_MERGE_REQUEST_MILESTONE
|
||||
|
||||
|
||||
Website.Submodule.Deploy:
|
||||
extends: .submodule_update_trigger
|
||||
variables:
|
||||
SUBMODULE_UPDATE_TRIGGER_PROJECT: nofusscomputing/infrastructure/website
|
||||
environment:
|
||||
url: https://nofusscomputing.com/$PAGES_ENVIRONMENT_PATH
|
||||
name: Documentation
|
||||
rules:
|
||||
- if: # condition_dev_branch_push
|
||||
$CI_COMMIT_BRANCH == "development" &&
|
||||
$CI_PIPELINE_SOURCE == "push"
|
||||
exists:
|
||||
- '{docs/**,pages/**}/*.md'
|
||||
changes:
|
||||
paths:
|
||||
- '{docs/**,pages/**}/*.md'
|
||||
compare_to: 'master'
|
||||
when: always
|
||||
|
||||
- when: never
|
3
.gitlab/additional_actions_bump.sh
Normal file
3
.gitlab/additional_actions_bump.sh
Normal file
@ -0,0 +1,3 @@
|
||||
#!/bin/sh
|
||||
|
||||
echo "Nothing to do here!!";
|
217
.gitlab/integration_test.gitlab-ci.yml
Normal file
217
.gitlab/integration_test.gitlab-ci.yml
Normal file
@ -0,0 +1,217 @@
|
||||
|
||||
.integration_test:
|
||||
|
||||
stage: test
|
||||
|
||||
needs:
|
||||
- "Build Collection"
|
||||
|
||||
image:
|
||||
name: nofusscomputing/docker-buildx-qemu:dev
|
||||
pull_policy: always
|
||||
|
||||
variables:
|
||||
DOCKER_HOST: tcp://docker:2375/
|
||||
DOCKER_DRIVER: overlay2
|
||||
# GIT_STRATEGY: none
|
||||
|
||||
services:
|
||||
- name: docker:23-dind
|
||||
entrypoint: ["env", "-u", "DOCKER_HOST"]
|
||||
command: ["dockerd-entrypoint.sh"]
|
||||
before_script:
|
||||
- | # start test container
|
||||
docker run -d \
|
||||
--privileged \
|
||||
-v ${PWD}:/workdir \
|
||||
-v ${PWD}/artifacts/galaxy:/collection \
|
||||
--workdir /workdir \
|
||||
--rm \
|
||||
--env "ANSIBLE_FORCE_COLOR=true" \
|
||||
--env "CI_COMMIT_SHA=${CI_COMMIT_SHA}" \
|
||||
--env "ANSIBLE_LOG_PATH=/workdir/ansible.log" \
|
||||
--env "PIP_BREAK_SYSTEM_PACKAGES=1" \
|
||||
--name test_image_${CI_JOB_ID} \
|
||||
nofusscomputing/ansible-docker-os:dev-${test_image}
|
||||
|
||||
- | # enter test container
|
||||
docker exec -i test_image_${CI_JOB_ID} ps aux
|
||||
- docker ps
|
||||
- docker exec -i test_image_${CI_JOB_ID} apt update
|
||||
- docker exec -i test_image_${CI_JOB_ID} apt install -y --no-install-recommends python3-pip net-tools dnsutils iptables
|
||||
- |
|
||||
if [ "${test_image}" == 'debian-12' ]; then
|
||||
|
||||
echo "Debian 12":
|
||||
|
||||
docker exec -i test_image_${CI_JOB_ID} pip install ansible-core --break-system-packages;
|
||||
|
||||
docker exec -i test_image_${CI_JOB_ID} mkdir -p /etc/iptables;
|
||||
|
||||
docker exec -i test_image_${CI_JOB_ID} touch /etc/iptables/rules.v6;
|
||||
|
||||
docker exec -i test_image_${CI_JOB_ID} update-alternatives --set iptables /usr/sbin/iptables-legacy;
|
||||
|
||||
else
|
||||
|
||||
echo " Not Debian 12":
|
||||
|
||||
docker exec -i test_image_${CI_JOB_ID} pip install ansible-core;
|
||||
|
||||
fi
|
||||
|
||||
- docker exec -i test_image_${CI_JOB_ID} cat /etc/hosts
|
||||
- docker exec -i test_image_${CI_JOB_ID} cat /etc/resolv.conf
|
||||
- | # check if DNS working
|
||||
docker exec -i test_image_${CI_JOB_ID} nslookup google.com
|
||||
script:
|
||||
- | # inside container?
|
||||
docker exec -i test_image_${CI_JOB_ID} ls -l /collection;
|
||||
docker exec -i test_image_${CI_JOB_ID} echo $PWD;
|
||||
|
||||
- | # Show Network Interfaces
|
||||
docker exec -i test_image_${CI_JOB_ID} ifconfig;
|
||||
|
||||
- | # Install the collection
|
||||
docker exec -i test_image_${CI_JOB_ID} bash -c 'ansible-galaxy collection install $(ls /collection/*.tar.gz)'
|
||||
|
||||
- | # output ansible vars
|
||||
docker exec -i test_image_${CI_JOB_ID} ansible -m setup localhost
|
||||
|
||||
- | # run the collection
|
||||
docker exec -i test_image_${CI_JOB_ID} \
|
||||
${test_command} \
|
||||
--extra-vars "nfc_role_firewall_policy_input=ACCEPT" \
|
||||
--extra-vars "nfc_role_firewall_policy_forward=ACCEPT" \
|
||||
-vv
|
||||
|
||||
- | # Create test.yaml
|
||||
mkdir -p test_results;
|
||||
cat <<EOF > test_results/${test_image}.json
|
||||
{
|
||||
"$( echo ${test_image} | sed -e 's/\./_/')": "Pass"
|
||||
}
|
||||
|
||||
EOF
|
||||
|
||||
after_script:
|
||||
- | # Create test.yaml if not exists
|
||||
if [ ! -f test_results/${test_image}.json ]; then
|
||||
|
||||
echo "[TRACE] Test has failed"
|
||||
|
||||
mkdir -p test_results;
|
||||
|
||||
cat <<EOF > test_results/${test_image}.json
|
||||
{
|
||||
"$( echo ${test_image} | sed -e 's/\./_/')": "Fail"
|
||||
}
|
||||
|
||||
EOF
|
||||
|
||||
fi
|
||||
|
||||
- | # Run trace script for debugging
|
||||
chmod +x ./.gitlab/integration_test_trace.sh;
|
||||
|
||||
./.gitlab/integration_test_trace.sh;
|
||||
|
||||
artifacts:
|
||||
untracked: false
|
||||
paths:
|
||||
- ansible.log
|
||||
- test_results/*
|
||||
when: always
|
||||
|
||||
rules:
|
||||
|
||||
- if: $CI_COMMIT_TAG
|
||||
allow_failure: true
|
||||
when: on_success
|
||||
|
||||
# Needs to run, even by bot as the test results need to be available
|
||||
# - if: "$CI_COMMIT_AUTHOR =='nfc_bot <helpdesk@nofusscomputing.com>'"
|
||||
# when: never
|
||||
|
||||
- if: # Occur on merge
|
||||
$CI_COMMIT_BRANCH
|
||||
&&
|
||||
$CI_PIPELINE_SOURCE == "push"
|
||||
allow_failure: true
|
||||
when: on_success
|
||||
|
||||
# - if:
|
||||
# $CI_COMMIT_BRANCH != "development"
|
||||
# &&
|
||||
# $CI_COMMIT_BRANCH != "master"
|
||||
# &&
|
||||
# $CI_PIPELINE_SOURCE == "push"
|
||||
# allow_failure: true
|
||||
# when: always
|
||||
|
||||
- when: never
|
||||
|
||||
|
||||
|
||||
Playbook - Install:
|
||||
extends: .integration_test
|
||||
parallel:
|
||||
matrix:
|
||||
- test_image: debian-11
|
||||
test_command: ansible-playbook nofusscomputing.kubernetes.install
|
||||
- test_image: debian-12
|
||||
test_command: ansible-playbook nofusscomputing.kubernetes.install
|
||||
- test_image: ubuntu-20.04
|
||||
test_command: ansible-playbook nofusscomputing.kubernetes.install
|
||||
- test_image: ubuntu-22.04
|
||||
test_command: ansible-playbook nofusscomputing.kubernetes.install
|
||||
|
||||
|
||||
|
||||
test_results:
|
||||
stage: test
|
||||
|
||||
extends: .ansible_playbook
|
||||
|
||||
variables:
|
||||
ansible_playbook: .gitlab/test_results.yaml
|
||||
ANSIBLE_PLAYBOOK_DIR: $CI_PROJECT_DIR
|
||||
|
||||
needs:
|
||||
- Playbook - Install
|
||||
|
||||
artifacts:
|
||||
untracked: false
|
||||
when: always
|
||||
access: all
|
||||
expire_in: "3 days"
|
||||
paths:
|
||||
- test_results.json
|
||||
|
||||
rules:
|
||||
|
||||
- if: $CI_COMMIT_TAG
|
||||
allow_failure: true
|
||||
when: on_success
|
||||
|
||||
# Needs to run, even by bot as the test results need to be available
|
||||
# - if: "$CI_COMMIT_AUTHOR =='nfc_bot <helpdesk@nofusscomputing.com>'"
|
||||
# when: never
|
||||
|
||||
- if: # Occur on merge
|
||||
$CI_COMMIT_BRANCH
|
||||
&&
|
||||
$CI_PIPELINE_SOURCE == "push"
|
||||
allow_failure: true
|
||||
when: on_success
|
||||
|
||||
# - if:
|
||||
# $CI_COMMIT_BRANCH != "development"
|
||||
# &&
|
||||
# $CI_COMMIT_BRANCH != "master"
|
||||
# &&
|
||||
# $CI_PIPELINE_SOURCE == "push"
|
||||
# allow_failure: true
|
||||
# when: always
|
||||
|
||||
- when: never
|
42
.gitlab/integration_test_trace.sh
Normal file
42
.gitlab/integration_test_trace.sh
Normal file
@ -0,0 +1,42 @@
|
||||
#!/bin/bash
|
||||
|
||||
# colour ref: https://stackoverflow.com/a/28938235
|
||||
|
||||
NC='\033[0m' # Text Reset
|
||||
|
||||
# Regular Colors
|
||||
Black='\033[0;30m' # Black
|
||||
Red='\033[0;31m' # Red
|
||||
Green='\033[0;32m' # Green
|
||||
Yellow='\033[0;33m' # Yellow
|
||||
Blue='\033[0;34m' # Blue
|
||||
Purple='\033[0;35m' # Purple
|
||||
Cyan='\033[0;36m' # Cyan
|
||||
|
||||
|
||||
cmd() {
|
||||
|
||||
echo -e "${Yellow}[TRACE] ${Green}executing ${Cyan}'$1'${NC}"
|
||||
|
||||
docker exec -i test_image_${CI_JOB_ID} $1 || true
|
||||
|
||||
}
|
||||
|
||||
|
||||
cmd "journalctl -xeu netfilter-persistent.service";
|
||||
|
||||
cmd "journalctl -xeu iptables.service"
|
||||
|
||||
cmd "journalctl -xeu k3s.service"
|
||||
|
||||
cmd "systemctl status netfilter-persistent.service"
|
||||
|
||||
cmd "systemctl status iptables.service"
|
||||
|
||||
cmd "systemctl status k3s.service"
|
||||
|
||||
cmd "kubectl get po -A -o wide"
|
||||
|
||||
cmd "kubectl get no -o wide"
|
||||
|
||||
cmd "iptables -nvL --line-numbers"
|
0
.gitlab/merge_request_templates/.gitkeep
Normal file
0
.gitlab/merge_request_templates/.gitkeep
Normal file
22
.gitlab/merge_request_templates/default.md
Normal file
22
.gitlab/merge_request_templates/default.md
Normal file
@ -0,0 +1,22 @@
|
||||
### :books: Summary
|
||||
<!-- your summary here emojis ref: https://github.com/yodamad/gitlab-emoji -->
|
||||
|
||||
|
||||
|
||||
### :link: Links / References
|
||||
<!-- using a list as any links to other references or links as required. if relevent, describe the link/reference -->
|
||||
|
||||
|
||||
### :construction_worker: Tasks
|
||||
|
||||
- [ ] Add your tasks here if required (delete)
|
||||
|
||||
<!-- dont remove tasks below strike through including the checkbox by enclosing in double tidle '~~' -->
|
||||
|
||||
- [ ] Playbook Update
|
||||
|
||||
This collection has a [corresponding playbook](https://gitlab.com/nofusscomputing/projects/ansible/ansible_playbooks/-/blob/development/role.yaml) that may need to be updated (Ansible Role), specifically [Role Validation](https://gitlab.com/nofusscomputing/projects/ansible/ansible_playbooks/-/blob/development/tasks/role/validation/nfc_kubernetes.yaml).
|
||||
|
||||
- [ ] NetBox Rendered Config Update
|
||||
|
||||
This Collection has a [NetBox Rendered Config template](https://gitlab.com/nofusscomputing/infrastructure/configuration-management/netbox/-/blob/development/templates/cluster.json.j2) that may need to be updated. Specifically Section `cluster.type == 'kubernetes'`
|
19
.gitlab/test_results.yaml
Normal file
19
.gitlab/test_results.yaml
Normal file
@ -0,0 +1,19 @@
|
||||
---
|
||||
|
||||
- name: Create Test Results File
|
||||
hosts: localhost
|
||||
gather_facts: false
|
||||
|
||||
|
||||
tasks:
|
||||
|
||||
|
||||
- name: Load Test Results
|
||||
ansible.builtin.include_vars:
|
||||
dir: ../test_results
|
||||
name: test_results
|
||||
|
||||
- name: Create Results file
|
||||
ansible.builtin.copy:
|
||||
content: "{{ (test_results) | to_nice_json }}"
|
||||
dest: ../test_results.json
|
8
.gitmodules
vendored
Normal file
8
.gitmodules
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
[submodule "gitlab-ci"]
|
||||
path = gitlab-ci
|
||||
url = https://gitlab.com/nofusscomputing/projects/gitlab-ci.git
|
||||
branch = development
|
||||
[submodule "website-template"]
|
||||
path = website-template
|
||||
url = https://gitlab.com/nofusscomputing/infrastructure/website-template.git
|
||||
branch = development
|
10
.nfc_automation.yaml
Normal file
10
.nfc_automation.yaml
Normal file
@ -0,0 +1,10 @@
|
||||
---
|
||||
|
||||
role_git_conf:
|
||||
gitlab:
|
||||
submodule_branch: "development"
|
||||
default_branch: development
|
||||
mr_labels: ~"type::automation" ~"impact::0" ~"priority::0"
|
||||
auto_merge: true
|
||||
merge_request:
|
||||
patch_labels: '~"code review::not started"'
|
15
.vscode/settings.json
vendored
Normal file
15
.vscode/settings.json
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
{
|
||||
"yaml.schemas": {
|
||||
"https://raw.githubusercontent.com/ansible/ansible-lint/main/src/ansiblelint/schemas/ansible.json#/$defs/tasks": [
|
||||
"roles/nfc_kubernetes/tasks/*.yaml",
|
||||
"roles/nfc_kubernetes/tasks/*/*.yaml",
|
||||
"roles/nfc_kubernetes/tasks/*/*/*.yaml"
|
||||
],
|
||||
"https://raw.githubusercontent.com/ansible/ansible-lint/main/src/ansiblelint/schemas/vars.json": [
|
||||
"roles/nfc_kubernetes/variables/**.yaml"
|
||||
],
|
||||
"https://raw.githubusercontent.com/ansible/ansible-lint/main/src/ansiblelint/schemas/ansible.json#/$defs/playbook": ".gitlab/test_results.yaml"
|
||||
},
|
||||
"gitlab.aiAssistedCodeSuggestions.enabled": false,
|
||||
"gitlab.duoChat.enabled": false,
|
||||
}
|
91
CHANGELOG.md
Normal file
91
CHANGELOG.md
Normal file
@ -0,0 +1,91 @@
|
||||
## 1.8.1-a2 (2024-05-02)
|
||||
|
||||
### Fix
|
||||
|
||||
- **nfc_kubernetes**: cast url var as list
|
||||
|
||||
## 1.8.1-a1 (2024-05-02)
|
||||
|
||||
### Fix
|
||||
|
||||
- **nfc_kubernetes**: correct url build to loop through all cpu arch
|
||||
|
||||
## 1.8.0 (2024-05-02)
|
||||
|
||||
### Feat
|
||||
|
||||
- **nfc_kubernetes**: build url and on use cast as string
|
||||
|
||||
## 1.7.2 (2024-04-25)
|
||||
|
||||
### Fix
|
||||
|
||||
- **nfc_kubernetes**: adjust some tasks to run during checkmode
|
||||
|
||||
## 1.7.1 (2024-04-24)
|
||||
|
||||
### Fix
|
||||
|
||||
- add role readme
|
||||
|
||||
## 1.7.0 (2024-04-24)
|
||||
|
||||
### Feat
|
||||
|
||||
- **kubernetes_netbox**: custom field bug work around
|
||||
- **services**: add netbox service fields
|
||||
- **role**: New role kubernetes_netbox
|
||||
|
||||
### Fix
|
||||
|
||||
- **nfc_kubernetes**: ensure install tasks run when job_tags specified
|
||||
- **facts**: gather required facts if not already available
|
||||
- **install**: correct template installed var
|
||||
- **install**: as part of install check, confirm service
|
||||
|
||||
## 1.6.0 (2024-03-29)
|
||||
|
||||
### Feat
|
||||
|
||||
- **test**: add integration test. playbook install
|
||||
- add retry=3 delay=10 secs to all ansible url modules
|
||||
- **upgrade**: If upgrade occurs, dont run remaining tasks
|
||||
- support upgrading cluster
|
||||
|
||||
### Fix
|
||||
|
||||
- **docs**: use correct badge query url
|
||||
|
||||
### Refactor
|
||||
|
||||
- **galaxy**: for dependent collections prefix with `>=` so as to not cause version lock
|
||||
|
||||
## 1.5.0 (2024-03-21)
|
||||
|
||||
### Feat
|
||||
|
||||
- **collection**: nofusscomputing.firewall update 1.0.1 -> 1.1.0
|
||||
|
||||
## 1.4.0 (2024-03-20)
|
||||
|
||||
### Feat
|
||||
|
||||
- **install**: "ansible_check_mode=true" no hostname check
|
||||
|
||||
## 1.3.0 (2024-03-18)
|
||||
|
||||
### Fix
|
||||
|
||||
- **handler**: add missing 'reboot_host' handler
|
||||
- **firewall**: ensure slave nodes can access ALL masters API point
|
||||
- **firewall**: dont add rules for disabled features
|
||||
|
||||
## 1.2.0 (2024-03-16)
|
||||
|
||||
### Feat
|
||||
|
||||
- **firewall**: use collection nofusscomputing.firewall to configure kubernetes firewall
|
||||
|
||||
### Fix
|
||||
|
||||
- **config**: use correct var name when setting node name
|
24
CONTRIBUTING.md
Normal file
24
CONTRIBUTING.md
Normal file
@ -0,0 +1,24 @@
|
||||
# Contribution Guide
|
||||
|
||||
|
||||
|
||||
## Updating components with a remote source
|
||||
|
||||
Some components within this role are sourced from a remote source. To update them to the latest release use the following commands.
|
||||
|
||||
> Ensure that before committing the update remote files to the repository, that no features have been removed that were added.
|
||||
|
||||
|
||||
### Kubevirt
|
||||
|
||||
``` bash
|
||||
|
||||
export KUBEVIRT_RELEASE='<kubevirt release i.e. v1.2.0>'
|
||||
|
||||
# From within roles/nfc_kubernetes/templates directory
|
||||
wget https://github.com/kubevirt/kubevirt/releases/download/${KUBEVIRT_RELEASE}/kubevirt-operator.yaml -O kubevirt-operator.yaml.j2
|
||||
|
||||
# From within the roles/nfc_kubernetes/templates directory
|
||||
wget https://github.com/kubevirt/kubevirt/releases/download/${KUBEVIRT_RELEASE}/kubevirt-cr.yaml -O kubevirt-cr.yaml.j2
|
||||
|
||||
```
|
32
README.md
32
README.md
@ -1,34 +1,44 @@
|
||||
<div align="center" width="100%">
|
||||
<span style="text-align: center;">
|
||||
|
||||
|
||||
# No Fuss Computing - Ansible Role: nfc_kubernetes
|
||||
# No Fuss Computing - Ansible Collection Kubernetes
|
||||
|
||||
<br>
|
||||
|
||||

|
||||
|
||||
|
||||
[](https://galaxy.ansible.com/ui/repo/published/nofusscomputing/kubernetes/)
|
||||
|
||||
|
||||
----
|
||||
|
||||
<br>
|
||||
|
||||
  [](https://gitlab.com/nofusscomputing/projects/ansible/kubernetes/-/issues)
|
||||
  [](https://gitlab.com/nofusscomputing/projects/ansible/collections/kubernetes/-/issues)
|
||||
|
||||
|
||||
|
||||
  
|
||||
  
|
||||
<br>
|
||||
|
||||
This project is hosted on [gitlab](https://gitlab.com/nofusscomputing/projects/ansible/kubernetes) and has a read-only copy hosted on [Github](https://github.com/NofussComputing/ansible_role_nfc_kubernetes).
|
||||
This project is hosted on [gitlab](https://gitlab.com/nofusscomputing/projects/ansible/collections/kubernetes) and has a read-only copy hosted on [Github](https://github.com/NofussComputing/ansible_collection_kubernetes).
|
||||
|
||||
----
|
||||
|
||||
**Stable Branch**
|
||||
|
||||
 
|
||||
 
|
||||
|
||||
   
|
||||
|
||||
|
||||
----
|
||||
|
||||
**Development Branch**
|
||||
|
||||
 
|
||||
 
|
||||
|
||||
   
|
||||
|
||||
----
|
||||
<br>
|
||||
@ -37,14 +47,14 @@ This project is hosted on [gitlab](https://gitlab.com/nofusscomputing/projects/a
|
||||
|
||||
links:
|
||||
|
||||
- [Issues](https://gitlab.com/nofusscomputing/projects/ansible/kubernetes/-/issues)
|
||||
- [Issues](https://gitlab.com/nofusscomputing/projects/ansible/collections/kubernetes/-/issues)
|
||||
|
||||
- [Merge Requests (Pull Requests)](https://gitlab.com/nofusscomputing/projects/ansible/kubernetes/-/merge_requests)
|
||||
- [Merge Requests (Pull Requests)](https://gitlab.com/nofusscomputing/projects/ansible/collections/kubernetes/-/merge_requests)
|
||||
|
||||
|
||||
|
||||
## Contributing
|
||||
All contributions for this project must conducted from [Gitlab](https://gitlab.com/nofusscomputing/projects/ansible/kubernetes).
|
||||
All contributions for this project must conducted from [Gitlab](https://gitlab.com/nofusscomputing/projects/ansible/collections/kubernetes).
|
||||
|
||||
For further details on contributing please refer to the [contribution guide](CONTRIBUTING.md).
|
||||
|
||||
|
0
docs/articles/index.md
Normal file
0
docs/articles/index.md
Normal file
0
docs/contact.md
Normal file
0
docs/contact.md
Normal file
0
docs/index.md
Normal file
0
docs/index.md
Normal file
0
docs/operations/index.md
Normal file
0
docs/operations/index.md
Normal file
1
docs/projects/ansible/collection/firewall/index.md
Normal file
1
docs/projects/ansible/collection/firewall/index.md
Normal file
@ -0,0 +1 @@
|
||||
linked to
|
0
docs/projects/ansible/collection/index.md
Normal file
0
docs/projects/ansible/collection/index.md
Normal file
83
docs/projects/ansible/collection/kubernetes/index.md
Normal file
83
docs/projects/ansible/collection/kubernetes/index.md
Normal file
@ -0,0 +1,83 @@
|
||||
---
|
||||
title: Kubernetes
|
||||
description: No Fuss Computings Ansible Collection Kubernetes
|
||||
date: 2024-03-13
|
||||
template: project.html
|
||||
about: https://gitlab.com/nofusscomputing/projects/ansible/collections/kubernetes
|
||||
---
|
||||
|
||||
<span style="text-align: center;">
|
||||
|
||||

|
||||
|
||||
|
||||
 
|
||||
|
||||
   
|
||||
|
||||
|
||||
[](https://galaxy.ansible.com/ui/repo/published/nofusscomputing/kubernetes/)
|
||||
|
||||
|
||||
</span>
|
||||
|
||||
This Ansible Collection is for installing a K3s Kubernetes cluster, both single and multi-node cluster deployments are supported. In addition to installing and configuring the firewall for the node. for further information on the firewall config please see the [firewall docs](../firewall/index.md)
|
||||
|
||||
|
||||
## Installation
|
||||
|
||||
To install this collection use `ansible-galaxy collection install nofusscomputing.kubernetes`
|
||||
|
||||
|
||||
## Features
|
||||
|
||||
- Install k3s cluster. Both Single and multi-node clusters
|
||||
|
||||
- Configure the cluster
|
||||
|
||||
- Upgrade a cluster
|
||||
|
||||
For a more detailed list of featured checkout the roles [documentation](roles/nfc_kubernetes/index.md).
|
||||
|
||||
|
||||
## Using this collection
|
||||
|
||||
This collection has been designed to be a complete and self-contained management tool for a K3s kubernetes cluster.
|
||||
|
||||
## Cluster Installation
|
||||
|
||||
By default the install playbook will install to localhost.
|
||||
|
||||
``` bash
|
||||
|
||||
ansible-playbook nofusscomputing.kubernetes.install
|
||||
|
||||
```
|
||||
|
||||
!!! danger
|
||||
By default when the install task is run, The firewall is also configured. The default sets the `FORWARD` and `INPUT` tables to have a policy of `DROP`. Failing to add any required additional rules before installing/configuring kubernetes will cause you to not have remote access to the machine.
|
||||
|
||||
You are encouraged to run `ansible-playbook nofusscomputing.firewall.install` with your rules configured within your inventory first. see the [firewall docs](../firewall/index.md) for more information.
|
||||
|
||||
The install playbook has a dynamic `hosts` key. This has been done to specifically support running the playbook from AWX and being able to populate the field from the survey feature. Order of precedence for the host variable is as follows:
|
||||
|
||||
- `nfc_pb_host` set to any valid value that a playbook `hosts` key can accept
|
||||
|
||||
- `nfc_pb_kubernetes_cluster_name` with the name of the cluster. This variable is appended to string `kubernetes_cluster_` to serve as a group name for the cluster to be installed. i.e. for a cluster called `prime`, the group name would be set to `kubernetes_cluster_prime`
|
||||
|
||||
- `--limit` specified at runtime
|
||||
|
||||
- `localhost`
|
||||
|
||||
For the available variables please view the [nfc_kubernetes role docs](roles/nfc_kubernetes/index.md#default-variables)
|
||||
|
||||
|
||||
## Cluster Upgrade
|
||||
|
||||
[In place cluster upgrades](https://docs.k3s.io/upgrades/manual#upgrade-k3s-using-the-binary) is the method used to conduct the cluster upgrades. The logic for the upgrades first confirms that K3s is installed and that the local binary and running k3s version are the desired versions. If they are not, they will be updated to the desired version. On completion of this the node has its `k3s` service restarted which completes the upgrade process.
|
||||
|
||||
!!! info
|
||||
If an upgrade occurs, no other task within the play will run. This is by design. if you have further tasks to be run in addition to the upgrade, run the play again.
|
||||
|
||||
!!! danger
|
||||
not following the [Kubernetes version skew policy](https://kubernetes.io/releases/version-skew-policy/) when upgrading your cluster may break your cluster.
|
@ -0,0 +1,46 @@
|
||||
---
|
||||
title: NetBox Kubernetes
|
||||
description: No Fuss Computings Ansible role kubernetes_netbox
|
||||
date: 2023-10-24
|
||||
template: project.html
|
||||
about: https://gitlab.com/nofusscomputing/projects/ansible/collections/kubernetes
|
||||
---
|
||||
|
||||
This Ansible role as part of our collection `nofusscomputing.kubernetes` is intended to be used to setup NetBox so that the settings for deploying a kubernetes cluster can be stored within NetBox.
|
||||
|
||||
|
||||
## Role Details
|
||||
|
||||
| Item| Value | Description |
|
||||
|:---|:---:|:---|
|
||||
| Dependent Roles | _None_ | |
|
||||
| Optional Roles | _None_ | |
|
||||
| Idempotent | _Yes_ | |
|
||||
| Stats Available | _Not Yet_ | |
|
||||
| Tags | _Nil_ | |
|
||||
| Requirements | _None_ | |
|
||||
|
||||
|
||||
## Features
|
||||
|
||||
- Adds custom fields to `cluster` object within NetBox that this collection can use to deploy a kubernetes cluster.
|
||||
|
||||
!!! info
|
||||
Due to a bug in ansible module `netbox.netbox.netbox_custom_field` The fields are not created as they should be. For example, the fields are supposed to be set to only display when not empty. for more information see [Github #1210](https://github.com/netbox-community/ansible_modules/issues/1210). We have [added a workaround](https://gitlab.com/nofusscomputing/projects/ansible/collections/kubernetes/-/merge_requests/56#note_1876912267) so the fields are created.
|
||||
|
||||
Other than that, the fields are created as they should.
|
||||
|
||||
|
||||
## Usage
|
||||
|
||||
To configure NetBox, ensure that the NetBox Access variables are set and run playbook `nofusscomputing.netbox.kubernetes_netbox`. This will setup NetBox with the required fields that role [nfc_kubernetes](../nfc_kubernetes/index.md) uses.
|
||||
|
||||
|
||||
## Default Variables
|
||||
|
||||
|
||||
``` yaml title="defaults/main.yaml" linenums="1"
|
||||
|
||||
--8<-- "roles/kubernetes_netbox/defaults/main.yaml"
|
||||
|
||||
```
|
@ -0,0 +1,146 @@
|
||||
---
|
||||
title: Ansible
|
||||
description: No Fuss Computings Ansible role nfc_kubernetes Ansible docs
|
||||
date: 2023-10-24
|
||||
template: project.html
|
||||
about: https://gitlab.com/nofusscomputing/projects/ansible/collections/kubernetes
|
||||
---
|
||||
|
||||
This page intends to describe/explain the setup of ansible for this role.
|
||||
|
||||
## Inventory Setup
|
||||
|
||||
There are many ways to layout your inventory within Ansible. To take full advantage of this role the following could be used:
|
||||
|
||||
- A aroup containing all master nodes
|
||||
|
||||
- A group containing all worker nodes
|
||||
|
||||
- a group containing all nodes for a cluster
|
||||
|
||||
- All groups above made a subordinate of a master group
|
||||
|
||||
- variable `ansible_host`. _can be either DNS name, IPv4/IPv6 Address_
|
||||
|
||||
!!! info Info
|
||||
The nfc_kubernetes role uses this field for any configuration that requires a hostname. You are strongly encouraged to use DNS name and the DNS name be resolveable for each host accessing to the host in question. Using DNS host name is of paramount importance for a host with dynamic DHCP being used.
|
||||
|
||||
- variable `nfc_role_kubernetes_master` _boolean_ set for all host that are master nodes.
|
||||
|
||||
- hosts that require Kubernetes API access added to variable `kubernetes_config.cluster.access`
|
||||
|
||||
An example inventory file that would suffice.
|
||||
|
||||
``` yaml
|
||||
all:
|
||||
hosts:
|
||||
localhost:
|
||||
vars:
|
||||
ansible_connection: local
|
||||
children:
|
||||
|
||||
kubernetes:
|
||||
children:
|
||||
|
||||
k3s:
|
||||
hosts:
|
||||
|
||||
|
||||
k8s:
|
||||
hosts:
|
||||
|
||||
|
||||
kubernetes_cluster:
|
||||
children:
|
||||
|
||||
kubernetes_cluster_{cluster_name_here}:
|
||||
hosts:
|
||||
|
||||
|
||||
kubernetes_master:
|
||||
hosts:
|
||||
|
||||
|
||||
kubernetes_worker:
|
||||
hosts:
|
||||
|
||||
```
|
||||
|
||||
The reasoning for the layout above is:
|
||||
|
||||
- group `kubernetes` used as a selector within playbook or limitor when running a playbook to cover all kubernetes hosts.
|
||||
|
||||
- groups `kubernetes`, `k3s`, `k8s` and `kubernetes_cluster_{cluster_name_here}` used for variable files (`inventory/group_vars/{group_name}.yaml`). with the latter containing all settings for the cluster in question.
|
||||
|
||||
- Hosts are added to ALL groups relevent to them.
|
||||
|
||||
|
||||
The following group variable files will also need to be created:
|
||||
|
||||
- `inventory/group_vars/all.yaml` Variables applicable to all hosts
|
||||
|
||||
- `inventory/group_vars/kubernetes.yaml` software versions for kubernetes
|
||||
|
||||
- `inventory/group_vars/kubernetes_cluster_{cluster_name_here}.yaml` cluster configuration
|
||||
|
||||
|
||||
## Playbooks Setup
|
||||
|
||||
Whilst there are many ways to skin a cat, using the inventory layout as defined above, with the creation of playbooks as detailed below is a possible solution covering most basis' of using this role.
|
||||
|
||||
playbooks/kubernetes.yaml
|
||||
|
||||
``` yaml
|
||||
---
|
||||
- name: Kubernetes Group and sub-groups
|
||||
hosts: "{{ groups.kubernetes }}"
|
||||
gather_facts: true
|
||||
|
||||
roles: []
|
||||
|
||||
- name: Kubernetes Master
|
||||
import_playbook: kubernetes/master.yaml
|
||||
|
||||
- name: Kubernetes Worker
|
||||
import_playbook: kubernetes/worker.yaml
|
||||
```
|
||||
|
||||
playbooks/kubernetes/master.yaml
|
||||
``` yaml
|
||||
---
|
||||
- name: Kubernetes Master Nodes
|
||||
hosts: "{{ kubernetes_master }}"
|
||||
gather_facts: true
|
||||
|
||||
roles:
|
||||
- name: Kubernetes Setup
|
||||
role: nfc_kubernetes
|
||||
|
||||
```
|
||||
|
||||
playbooks/kubernetes/worker.yaml
|
||||
``` yaml
|
||||
---
|
||||
- name: Kubernetes worker Nodes
|
||||
hosts: "{{ kubernetes_worker }}"
|
||||
gather_facts: true
|
||||
|
||||
roles:
|
||||
- name: Kubernetes Setup
|
||||
role: nfc_kubernetes
|
||||
|
||||
```
|
||||
|
||||
Running the above playbooks with the inventory setup allows the following and more:
|
||||
|
||||
- Setup Kubernetes on all applicable kubernetes hosts
|
||||
|
||||
> `ansible-playbook -i inventory/production playbooks/kubernetes.yaml`
|
||||
|
||||
- Setup kubernetes cluster `{cluster_name}`
|
||||
|
||||
> `ansible-playbook --limit kubernetes_cluster_{cluster_name_here} -i inventory/production playbooks/kubernetes.yaml`
|
||||
|
||||
- Setup all Kubernetes master nodes, regardless of cluster
|
||||
|
||||
> `ansible-playbook --limit kubernetes_master -i inventory/production playbooks/kubernetes.yaml`
|
@ -0,0 +1,72 @@
|
||||
---
|
||||
title: Firewall
|
||||
description: No Fuss Computings Ansible role nfc_kubernetes
|
||||
date: 2023-10-24
|
||||
template: project.html
|
||||
about: https://gitlab.com/nofusscomputing/projects/ansible/collections/kubernetes
|
||||
---
|
||||
|
||||
This role include logic to generate firewall rules for iptables. Both IPv4 and IPv6 rules are generated. to survive reboots or network cable disconects, a script is created and added to the `if-up.d.` This enables that each time the interface is brought up, the firewall rules are applied. For a list of the firewall rules applied see the [K3s documentation](https://docs.k3s.io/installation/requirements#inbound-rules-for-k3s-server-nodes)
|
||||
|
||||
Rules generation workflow:
|
||||
|
||||
- iterates over all kubernetes hosts
|
||||
|
||||
- adds rules if host is masters for worker access
|
||||
|
||||
- adds rules if worker for all node access
|
||||
|
||||
- adds rules for additional hosts to access kubernetes api
|
||||
|
||||
What you end up with:
|
||||
|
||||
- chains for each area of access to the cluster
|
||||
|
||||
- The input table contains the jump to each chain, based off of destination port and protocol
|
||||
|
||||
- each chain returns to INPUT table for further processing.
|
||||
|
||||
!!! danger Security
|
||||
The way the rules are created and applied, they all return to the `INPUT` table for further processing. If the `INPUT` tables default policy is `ACCEPT`. Regardless of the firewall rules in place, Any host with network access to the kubernetes host can access the desired service without needing a rule to grant access.
|
||||
|
||||
**Recommendation:** Set the `INPUT` tables default policy to `DROP`
|
||||
|
||||
!!! info Info
|
||||
If a DNS name is used for any off the address' a DNS lookup is done for both IPv4 and IPv6 adding the first host found to the applicable chain.
|
||||
|
||||
|
||||
## Providing access to the cluster
|
||||
|
||||
No special skill is required apart from adding the host to grant access to the right list variable. i.e.
|
||||
|
||||
``` yaml
|
||||
kubernetes_config:
|
||||
cluster:
|
||||
access:
|
||||
- '192.168.1.1'
|
||||
- 'my.hostname.com'
|
||||
```
|
||||
|
||||
Any host that is added to the `access` list will be granted access to the Kubernetes API. Hosts in this list are intended to be the hosts your end users are on. If you join a new node to the cluster, the applicable firewall rules will automagically generated and added to each hosts firewall. It's important that when adding anew node to the cluster, that the playbook is run agains all nodes of the cluster, not just the new node. Failing to do so, will have the existing nodes block access to the new node due to missing firewall rules.
|
||||
|
||||
|
||||
!!! tip Tip
|
||||
When manually adding a host use insert `-I` not append `-A` as the last rule must be `-j RETURN`
|
||||
|
||||
exmple: `-I {chain_name} -s {hostname/ipaddress} -j ACCEPT`
|
||||
|
||||
|
||||
|
||||
Protocol Port Source Destination Description
|
||||
TCP 2379-2380 Servers Servers Required only for HA with embedded etcd
|
||||
TCP 6443 Agents Servers K3s supervisor and Kubernetes API Server
|
||||
UDP 8472 All nodes All nodes Required only for Flannel VXLAN
|
||||
TCP 10250 All nodes All nodes Kubelet metrics
|
||||
UDP 51820 All nodes All nodes Required only for Flannel Wireguard with IPv4
|
||||
UDP 51821 All nodes All nodes Required only for Flannel Wireguard with IPv6
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
@ -0,0 +1,119 @@
|
||||
---
|
||||
title: Kubernetes
|
||||
description: No Fuss Computings Ansible role nfc_kubernetes
|
||||
date: 2023-10-24
|
||||
template: project.html
|
||||
about: https://gitlab.com/nofusscomputing/projects/ansible/collections/kubernetes
|
||||
---
|
||||
|
||||
This Ansible role is designed to deploy a K3s Kubernetes cluster. Without adding cluster configuration this role will install K3s as a single node cluster. To deploy a multi-node cluster add your configuration, K3s will be installed on all nodes. On completion you will have fully configured cluster in a state ready to use. This role can be used with our [our playbooks](../../../../playbooks/index.md) or comes included, along with the playbook within our [Ansible Execution Environment](../../../../execution_environment/index.md).
|
||||
|
||||
|
||||
## Role Details
|
||||
|
||||
| Item| Value | Description |
|
||||
|:---|:---:|:---|
|
||||
| Dependent Roles | _None_ | |
|
||||
| Optional Roles | _nfc_firewall_ | Used to setup the firewall for kubernetes. |
|
||||
| Idempotent | _Yes_ | |
|
||||
| Stats Available | _Not Yet_ | |
|
||||
| Tags | _Nil_ | |
|
||||
| Requirements | _Gather Facts_ | |
|
||||
| | _become_ | |
|
||||
|
||||
|
||||
## Features
|
||||
|
||||
- CNI Setup, calico including `calicoctl` plugin
|
||||
|
||||
> `kubectl calico ....` instead of `calicoctl ....`
|
||||
|
||||
- Configurable:
|
||||
|
||||
- Container Registries
|
||||
|
||||
- ectd deployment
|
||||
|
||||
- etcd snapshot cron schedule
|
||||
|
||||
- etcd snapshot retention
|
||||
|
||||
- Cluster Domain
|
||||
|
||||
- Configure System reserved CPU, Storage and Memory.
|
||||
|
||||
- Node Labels
|
||||
|
||||
- Node Taints
|
||||
|
||||
- Service Load Balancer Namespace
|
||||
|
||||
- Encryption between nodes (Wireguard)
|
||||
|
||||
- [Firewall configured for kubernetes host](firewall.md)
|
||||
|
||||
- Multi-node Deployment
|
||||
|
||||
- OpenID Connect SSO Authentication
|
||||
|
||||
- [Basic RBAC `ClusterRoles` and Bindings](rbac.md)
|
||||
|
||||
- _[ToDo-#5](https://gitlab.com/nofusscomputing/projects/ansible/kubernetes/-/issues/5)_ Restore backup on fresh install of a cluster
|
||||
|
||||
- Installs OLM for operator subscriptions
|
||||
|
||||
- Install MetalLB
|
||||
|
||||
- Install KubeVirt including `virtctl` plugin
|
||||
|
||||
> `kubectl virt ....` instead of `virtctl ....`
|
||||
|
||||
- Install the Helm Binary
|
||||
|
||||
- Upgrade cluster
|
||||
|
||||
|
||||
## Role Workflow
|
||||
|
||||
For a more probable than not success this role first installs/configures prime master, other master(s) and worker nodes using the following simplified workflow:
|
||||
|
||||
1. Download both install script and k3s binary to ansible controller
|
||||
|
||||
1. copy install script and k3s binary to host
|
||||
|
||||
1. Create required config files needed for installation
|
||||
|
||||
1. _(kubernetes prime master only)_ Add install required config files
|
||||
|
||||
1. Install kubernetes
|
||||
|
||||
1. _(kubernetes prime master only)_ Wait for kubernetes to be ready. Playbook is paused until `true`
|
||||
|
||||
1. Configure Kubernetes
|
||||
|
||||
1. Install Kubevirt
|
||||
|
||||
If the playbook is setup as per [our recommendation](ansible.md) step 2 onwards is first done on master nodes then worker nodes.
|
||||
|
||||
!!! tip
|
||||
If you prefer to manually restart the kubernetes service the following variables can be set to prevent a restart of the kubernetes service
|
||||
|
||||
``` yaml
|
||||
nfc_kubernetes_no_restart: false
|
||||
nfc_kubernetes_no_restart_master: false
|
||||
nfc_kubernetes_no_restart_prime: false
|
||||
nfc_kubernetes_no_restart_slave: false
|
||||
```
|
||||
_See default variables below for explanation of each variable if it's not evident enough._
|
||||
|
||||
|
||||
## Default Variables
|
||||
|
||||
On viewing these variables you will notice there are single dictionary keys prefixed `nfc_role_kubernetes_` and a dictionary of dictionaries `kubernetes_config`. variables prefixed with `nfc_role_kubernetes_` are for single node installs with the `kubernetes_config` dictionary containing all of the information for an entire cluster. The `kubernetes_config` dictionary variables take precedence. Even if you are installing a cluster on multiple nodes, you are still advised to review the variables prefixed with `nfc_role_kubernetes_` as they may still be needed. i.e. setting a node type use keys `nfc_role_kubernetes_prime`, `nfc_role_kubernetes_master` and `nfc_role_kubernetes_worker`.
|
||||
|
||||
|
||||
``` yaml title="defaults/main.yaml" linenums="1"
|
||||
|
||||
--8<-- "roles/nfc_kubernetes/defaults/main.yml"
|
||||
|
||||
```
|
@ -0,0 +1,37 @@
|
||||
---
|
||||
title: RBAC
|
||||
description: No Fuss Computings Ansible role nfc_kubernetes RBAC documentation.
|
||||
date: 2023-10-29
|
||||
template: project.html
|
||||
about: https://gitlab.com/nofusscomputing/projects/ansible/collections/kubernetes
|
||||
---
|
||||
|
||||
As part of this roles workflow, A set of Clester Roles and Cluster Bindings are deployed and ready to use. The intent of these roles is to create a default set of roles that only require the authorization system to provide the users groups. As they have been defined as Cluster Roles you can bind to both cluster and/or namespace.
|
||||
A minimum access required princible has been adopted in the creation of these roles. With the roles designed to be for whom would access/use the cluster (An End user).
|
||||
|
||||
!!! tip
|
||||
All Deployed `ClusterRole` include a labels `authorization/description` and `authorization/target` explaining their intended purpose and where they a recommended for binding.
|
||||
|
||||
|
||||
Currently the following roles are deployed as part of this Anible role:
|
||||
|
||||
- authorization:namespace:read
|
||||
|
||||
> Full read access to all objects except secrets
|
||||
|
||||
- authorization:full
|
||||
|
||||
> Full read/write access to all objects including secrets
|
||||
|
||||
- authorization:namespace:owner
|
||||
|
||||
> Full read/write access to all objects including secrets
|
||||
|
||||
- authorization:cluster:view-metrics
|
||||
|
||||
> View node and pod metrics
|
||||
|
||||
- **[ToDo-#6](https://gitlab.com/nofusscomputing/projects/ansible/kubernetes/-/issues/6)** authorization:cluster:admin
|
||||
|
||||
> Configure the cluster with this not including anything that can be deployed.
|
||||
|
@ -0,0 +1,30 @@
|
||||
---
|
||||
title: Release Notes
|
||||
description: No Fuss Computings Ansible role nfc_kubernetes
|
||||
date: 2024-01-31
|
||||
template: project.html
|
||||
about: https://gitlab.com/nofusscomputing/projects/ansible/collections/kubernetes
|
||||
---
|
||||
|
||||
This document details any changes that have occured that may impact users of this role. It's a rolling document and will be amended from time to time.
|
||||
|
||||
|
||||
## Changes with an impact
|
||||
|
||||
- _**13 Mar 2024**_ Container Images now a dictionary. This role has two images `kubevirt_operator` and `tigera_operator`.
|
||||
|
||||
- All Images are stored in dictionary `nfc_role_kubernetes_container_images` with each image using its own dictionary with mandatory keys `registry`, `image` and `tag`. This change has been made to cater for those whom store their images within their inventory as a dict of dict. For instance to use your inventory image declare variable `nfc_role_kubernetes_container_images.kubevirt_operator: my_images.my_kubevirt_dict` as an example.
|
||||
|
||||
- A lot of variables have been updated. To view what has changed, please see `defaults/main.yaml` in [MR !35](https://gitlab.com/nofusscomputing/projects/ansible/kubernetes/-/merge_requests/35)
|
||||
|
||||
- _**31 Jan 2024**_ Calico CNI deployment has been migrated to use the calico operator.
|
||||
|
||||
- All new cluster installations will be deployed with the operator
|
||||
|
||||
- Existing deployments will be required to run a deployment with job tag `operator_migrate_calico` to migrate their deployment to the operator
|
||||
|
||||
- if an issue occurs with the migration it can be rolled back by `kubectl delete -f` for all manifests in the `/var/lib/rancher/k3s/ansible` directory and redeploying with job tag `calico_manifest`. This re-deploys calico using the current manifest.
|
||||
|
||||
- This tag will be removed in the future at no set date.
|
||||
|
||||
- `ServiceLB` / `klipperLB` no longer deploys by default and to deploy it variable `nfc_kubernetes_enable_servicelb` must be set `true`
|
0
docs/projects/ansible/index.md
Normal file
0
docs/projects/ansible/index.md
Normal file
0
docs/projects/ansible/playbooks/index.md
Normal file
0
docs/projects/ansible/playbooks/index.md
Normal file
0
docs/projects/index.md
Normal file
0
docs/projects/index.md
Normal file
0
docs/tags.md
Normal file
0
docs/tags.md
Normal file
87
galaxy.yml
Normal file
87
galaxy.yml
Normal file
@ -0,0 +1,87 @@
|
||||
### REQUIRED
|
||||
# The namespace of the collection. This can be a company/brand/organization or product namespace under which all
|
||||
# content lives. May only contain alphanumeric lowercase characters and underscores. Namespaces cannot start with
|
||||
# underscores or numbers and cannot contain consecutive underscores
|
||||
namespace: nofusscomputing
|
||||
|
||||
# The name of the collection. Has the same character restrictions as 'namespace'
|
||||
name: kubernetes
|
||||
|
||||
# The version of the collection. Must be compatible with semantic versioning
|
||||
version: 1.8.1-a2
|
||||
|
||||
# The path to the Markdown (.md) readme file. This path is relative to the root of the collection
|
||||
readme: README.md
|
||||
|
||||
# A list of the collection's content authors. Can be just the name or in the format 'Full Name <email> (url)
|
||||
# @nicks:irc/im.site#channel'
|
||||
authors:
|
||||
- No Fuss Computing
|
||||
|
||||
|
||||
### OPTIONAL but strongly recommended
|
||||
# A short summary description of the collection
|
||||
description: Install a K3s Kubernetes Cluster
|
||||
|
||||
# Either a single license or a list of licenses for content inside of a collection. Ansible Galaxy currently only
|
||||
# accepts L(SPDX,https://spdx.org/licenses/) licenses. This key is mutually exclusive with 'license_file'
|
||||
license:
|
||||
- MIT
|
||||
|
||||
# The path to the license file for the collection. This path is relative to the root of the collection. This key is
|
||||
# mutually exclusive with 'license'
|
||||
license_file: ''
|
||||
|
||||
# A list of tags you want to associate with the collection for indexing/searching. A tag name has the same character
|
||||
# requirements as 'namespace' and 'name'
|
||||
tags:
|
||||
- k3s
|
||||
- kubernetes
|
||||
- tools
|
||||
|
||||
# Collections that this collection requires to be installed for it to be usable. The key of the dict is the
|
||||
# collection label 'namespace.name'. The value is a version range
|
||||
# L(specifiers,https://python-semanticversion.readthedocs.io/en/latest/#requirement-specification). Multiple version
|
||||
# range specifiers can be set and are separated by ','
|
||||
dependencies:
|
||||
ansible.posix: '>=1.5.4'
|
||||
kubernetes.core: '>=3.0.0'
|
||||
nofusscomputing.firewall: '>=1.1.0'
|
||||
netbox.netbox: '>=3.16.0'
|
||||
|
||||
|
||||
# The URL of the originating SCM repository
|
||||
repository: https://gitlab.com/nofusscomputing/projects/ansible/collections/kubernetes
|
||||
|
||||
# The URL to any online docs
|
||||
documentation: https://nofusscomputing.com/projects/ansible/collection/kubernetes/
|
||||
|
||||
# The URL to the homepage of the collection/project
|
||||
# homepage: https://example.com
|
||||
|
||||
# The URL to the collection issue tracker
|
||||
issues: https://gitlab.com/nofusscomputing/projects/ansible/collections/kubernetes/-/issues
|
||||
|
||||
# A list of file glob-like patterns used to filter any files or directories that should not be included in the build
|
||||
# artifact. A pattern is matched from the relative path of the file or directory of the collection directory. This
|
||||
# uses 'fnmatch' to match the files or directories. Some directories and files like 'galaxy.yml', '*.pyc', '*.retry',
|
||||
# and '.git' are always filtered. Mutually exclusive with 'manifest'
|
||||
build_ignore:
|
||||
- .vscode
|
||||
- artifacts
|
||||
- docs
|
||||
- .git*
|
||||
- gitlab-ci
|
||||
- website-template
|
||||
- .ansible-lint-ignore
|
||||
- .cz.yaml
|
||||
- .nfc_automation.yaml
|
||||
- dockerfile
|
||||
- mkdocs.yml
|
||||
|
||||
# A dict controlling use of manifest directives used in building the collection artifact. The key 'directives' is a
|
||||
# list of MANIFEST.in style
|
||||
# L(directives,https://packaging.python.org/en/latest/guides/using-manifest-in/#manifest-in-commands). The key
|
||||
# 'omit_default_directives' is a boolean that controls whether the default directives are used. Mutually exclusive
|
||||
# with 'build_ignore'
|
||||
# manifest: null
|
1
gitlab-ci
Submodule
1
gitlab-ci
Submodule
Submodule gitlab-ci added at a24f352ca3
52
meta/runtime.yml
Normal file
52
meta/runtime.yml
Normal file
@ -0,0 +1,52 @@
|
||||
---
|
||||
# Collections must specify a minimum required ansible version to upload
|
||||
# to galaxy
|
||||
requires_ansible: '>=2.14.0'
|
||||
|
||||
# Content that Ansible needs to load from another location or that has
|
||||
# been deprecated/removed
|
||||
# plugin_routing:
|
||||
# action:
|
||||
# redirected_plugin_name:
|
||||
# redirect: ns.col.new_location
|
||||
# deprecated_plugin_name:
|
||||
# deprecation:
|
||||
# removal_version: "4.0.0"
|
||||
# warning_text: |
|
||||
# See the porting guide on how to update your playbook to
|
||||
# use ns.col.another_plugin instead.
|
||||
# removed_plugin_name:
|
||||
# tombstone:
|
||||
# removal_version: "2.0.0"
|
||||
# warning_text: |
|
||||
# See the porting guide on how to update your playbook to
|
||||
# use ns.col.another_plugin instead.
|
||||
# become:
|
||||
# cache:
|
||||
# callback:
|
||||
# cliconf:
|
||||
# connection:
|
||||
# doc_fragments:
|
||||
# filter:
|
||||
# httpapi:
|
||||
# inventory:
|
||||
# lookup:
|
||||
# module_utils:
|
||||
# modules:
|
||||
# netconf:
|
||||
# shell:
|
||||
# strategy:
|
||||
# terminal:
|
||||
# test:
|
||||
# vars:
|
||||
|
||||
# Python import statements that Ansible needs to load from another location
|
||||
# import_redirection:
|
||||
# ansible_collections.ns.col.plugins.module_utils.old_location:
|
||||
# redirect: ansible_collections.ns.col.plugins.module_utils.new_location
|
||||
|
||||
# Groups of actions/modules that take a common set of options
|
||||
# action_groups:
|
||||
# group_name:
|
||||
# - module1
|
||||
# - module2
|
61
mkdocs.yml
Normal file
61
mkdocs.yml
Normal file
@ -0,0 +1,61 @@
|
||||
INHERIT: website-template/mkdocs.yml
|
||||
|
||||
docs_dir: 'docs'
|
||||
|
||||
repo_name: Kubernetes Ansible Collection
|
||||
repo_url: https://gitlab.com/nofusscomputing/projects/ansible/collections/kubernetes
|
||||
edit_uri: '/-/ide/project/nofusscomputing/projects/ansible/collections/kubernetes/edit/development/-/docs/'
|
||||
|
||||
nav:
|
||||
- Home: index.md
|
||||
|
||||
- Articles:
|
||||
|
||||
- articles/index.md
|
||||
|
||||
- Projects:
|
||||
|
||||
- projects/index.md
|
||||
|
||||
- Ansible:
|
||||
|
||||
- projects/ansible/index.md
|
||||
|
||||
- Execution Environment:
|
||||
|
||||
- projects/ansible/execution_environment/index.md
|
||||
|
||||
- Playbooks:
|
||||
|
||||
- projects/ansible/playbooks/index.md
|
||||
|
||||
- Collections:
|
||||
|
||||
- projects/ansible/collection/index.md
|
||||
|
||||
- Kubernetes:
|
||||
|
||||
- projects/ansible/collection/kubernetes/index.md
|
||||
|
||||
- Role nfc_kubernetes:
|
||||
|
||||
- projects/ansible/collection/kubernetes/roles/nfc_kubernetes/index.md
|
||||
|
||||
- projects/ansible/collection/kubernetes/roles/nfc_kubernetes/ansible.md
|
||||
|
||||
- projects/ansible/collection/kubernetes/roles/nfc_kubernetes/firewall.md
|
||||
|
||||
- projects/ansible/collection/kubernetes/roles/nfc_kubernetes/rbac.md
|
||||
|
||||
- projects/ansible/collection/kubernetes/roles/nfc_kubernetes/release_notes.md
|
||||
|
||||
- Role kubernetes_netbox:
|
||||
|
||||
- projects/ansible/collection/kubernetes/roles/kubernetes_netbox/index.md
|
||||
|
||||
- Operations:
|
||||
|
||||
- operations/index.md
|
||||
|
||||
- Contact Us: contact.md
|
||||
|
64
playbooks/install.yaml
Normal file
64
playbooks/install.yaml
Normal file
@ -0,0 +1,64 @@
|
||||
---
|
||||
- name: Install K3s Kubernetes
|
||||
hosts: |-
|
||||
{%- if nfc_pb_host is defined -%}
|
||||
|
||||
{{ nfc_pb_host }}
|
||||
|
||||
{%- elif nfc_pb_kubernetes_cluster_name is defined -%}
|
||||
|
||||
kubernetes_cluster_{{ nfc_pb_kubernetes_cluster_name | lower }}
|
||||
|
||||
{%- else -%}
|
||||
|
||||
{%- if ansible_limit is defined -%}
|
||||
|
||||
{{ ansible_limit }}
|
||||
|
||||
{%- else -%}
|
||||
|
||||
localhost
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- endif %}
|
||||
become: true
|
||||
gather_facts: true
|
||||
|
||||
|
||||
tasks:
|
||||
|
||||
|
||||
- name: Install/Configure Kubernetes
|
||||
ansible.builtin.include_role:
|
||||
name: nfc_kubernetes
|
||||
tags:
|
||||
- always
|
||||
|
||||
|
||||
# vars:
|
||||
|
||||
#
|
||||
# Future feature, add playbook to import to awx
|
||||
#
|
||||
# nfc_pb_awx_tower_template:
|
||||
|
||||
|
||||
# - name: "Collection/NoFussComputing/Kubernetes/Install"
|
||||
# ask_credential_on_launch: true
|
||||
# ask_job_type_on_launch: true
|
||||
# ask_limit_on_launch: true
|
||||
# ask_tags_on_launch: true
|
||||
# ask_variables_on_launch: true
|
||||
# description: |
|
||||
# Playbook to Install/Configure Kubernetes using configuration
|
||||
# from code.
|
||||
# execution_environment: "No Fuss Computing EE"
|
||||
# job_type: "check"
|
||||
# labels:
|
||||
# - cluster
|
||||
# - k3s
|
||||
# - kubernetes
|
||||
# verbosity: 2
|
||||
# use_fact_cache: true
|
||||
# survey_enabled: false
|
64
playbooks/netbox.yaml
Normal file
64
playbooks/netbox.yaml
Normal file
@ -0,0 +1,64 @@
|
||||
---
|
||||
- name: Install K3s Kubernetes
|
||||
hosts: |-
|
||||
{%- if nfc_pb_host is defined -%}
|
||||
|
||||
{{ nfc_pb_host }}
|
||||
|
||||
{%- elif nfc_pb_kubernetes_cluster_name is defined -%}
|
||||
|
||||
kubernetes_cluster_{{ nfc_pb_kubernetes_cluster_name | lower }}
|
||||
|
||||
{%- else -%}
|
||||
|
||||
{%- if ansible_limit is defined -%}
|
||||
|
||||
{{ ansible_limit }}
|
||||
|
||||
{%- else -%}
|
||||
|
||||
localhost
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- endif %}
|
||||
become: false
|
||||
gather_facts: false
|
||||
|
||||
|
||||
tasks:
|
||||
|
||||
|
||||
- name: Configure NetBox for Kubernetes Deployment(s)
|
||||
ansible.builtin.include_role:
|
||||
name: kubernetes_netbox
|
||||
tags:
|
||||
- always
|
||||
|
||||
|
||||
# vars:
|
||||
|
||||
#
|
||||
# Future feature, add playbook to import to awx
|
||||
#
|
||||
# nfc_pb_awx_tower_template:
|
||||
|
||||
|
||||
# - name: "Collection/NoFussComputing/Kubernetes/NetBox/Configure"
|
||||
# ask_credential_on_launch: true
|
||||
# ask_job_type_on_launch: true
|
||||
# ask_limit_on_launch: true
|
||||
# ask_tags_on_launch: true
|
||||
# ask_variables_on_launch: true
|
||||
# description: |
|
||||
# Playbook to Install/Configure Kubernetes using configuration
|
||||
# from code.
|
||||
# execution_environment: "No Fuss Computing EE"
|
||||
# job_type: "check"
|
||||
# labels:
|
||||
# - cluster
|
||||
# - k3s
|
||||
# - kubernetes
|
||||
# verbosity: 2
|
||||
# use_fact_cache: true
|
||||
# survey_enabled: false
|
0
plugins/.gitkeep
Normal file
0
plugins/.gitkeep
Normal file
2
requirements.txt
Normal file
2
requirements.txt
Normal file
@ -0,0 +1,2 @@
|
||||
pynetbox
|
||||
pytz
|
9
roles/defaults/main.yaml
Normal file
9
roles/defaults/main.yaml
Normal file
@ -0,0 +1,9 @@
|
||||
---
|
||||
|
||||
#
|
||||
# NetBox Access Variables. Required
|
||||
#
|
||||
|
||||
# nfc_pb_api_netbox_url: # ENV [NETBOX_API]
|
||||
# nfc_pb_api_netbox_token: # ENV [NETBOX_TOKEN]
|
||||
# nfc_pb_api_netbox_validate_cert: true # ENV [NETBOX_VALIDATE_CERT]
|
3
roles/kubernetes_netbox/README.md
Normal file
3
roles/kubernetes_netbox/README.md
Normal file
@ -0,0 +1,3 @@
|
||||
## No Fuss Computing - Ansible Role kubernetes_netbox
|
||||
|
||||
Nothing to see here
|
30
roles/kubernetes_netbox/meta/main.yaml
Normal file
30
roles/kubernetes_netbox/meta/main.yaml
Normal file
@ -0,0 +1,30 @@
|
||||
galaxy_info:
|
||||
|
||||
role_name: kubernetes_netbox
|
||||
|
||||
author: No Fuss Computing
|
||||
|
||||
description: Configure the required items within Netbox to support deploying kubernetes from Netbox configuration.
|
||||
|
||||
issue_tracker_url: https://gitlab.com/nofusscomputing/projects/ansible/collections/kubernetes
|
||||
|
||||
license: MIT
|
||||
|
||||
min_ansible_version: '2.15'
|
||||
|
||||
platforms:
|
||||
|
||||
- name: Debian
|
||||
versions:
|
||||
- bullseye
|
||||
- bookworm
|
||||
|
||||
- name: Ubuntu
|
||||
versions:
|
||||
- 21
|
||||
|
||||
galaxy_tags:
|
||||
- cluster
|
||||
- k3s
|
||||
- kubernetes
|
||||
- netbox
|
255
roles/kubernetes_netbox/tasks/cluster.yaml
Normal file
255
roles/kubernetes_netbox/tasks/cluster.yaml
Normal file
@ -0,0 +1,255 @@
|
||||
---
|
||||
|
||||
# add cluster type kubernetes
|
||||
|
||||
- name: Create Custom Field - Configure Firewall
|
||||
netbox.netbox.netbox_custom_field:
|
||||
netbox_url: "{{ lookup('env', 'NETBOX_API') | default(nfc_pb_api_netbox_url) }}"
|
||||
netbox_token: "{{ lookup('env', 'NETBOX_TOKEN') | default(nfc_pb_api_netbox_token) }}"
|
||||
data:
|
||||
content_types:
|
||||
- virtualization.cluster
|
||||
default: null
|
||||
group_name: Kubernetes
|
||||
label: Configure Firewall
|
||||
name: nfc_role_kubernetes_configure_firewall
|
||||
type: boolean
|
||||
ui_visibility: 'hidden-ifunset'
|
||||
# is_cloneable: false
|
||||
weight: 100
|
||||
state: present
|
||||
validate_certs: "{{ lookup('env', 'NETBOX_VALIDATE_CERT') | default(nfc_pb_api_netbox_validate_cert) | default(true) | bool }}"
|
||||
delegate_to: localhost
|
||||
failed_when: >
|
||||
custom_field_tmp.msg != 'ui_visibility does not exist on existing object. Check to make sure valid field.'
|
||||
and
|
||||
custom_field_tmp.diff is not defined
|
||||
register: custom_field_tmp
|
||||
|
||||
|
||||
- name: Create Custom Field - ETCD Enabled
|
||||
netbox.netbox.netbox_custom_field:
|
||||
netbox_url: "{{ lookup('env', 'NETBOX_API') | default(nfc_pb_api_netbox_url) }}"
|
||||
netbox_token: "{{ lookup('env', 'NETBOX_TOKEN') | default(nfc_pb_api_netbox_token) }}"
|
||||
data:
|
||||
content_types:
|
||||
- virtualization.cluster
|
||||
default: null
|
||||
group_name: Kubernetes
|
||||
label: ETCD Enabled
|
||||
name: nfc_role_kubernetes_etcd_enabled
|
||||
type: boolean
|
||||
ui_visibility: hidden-ifunset
|
||||
# is_cloneable: false
|
||||
weight: 100
|
||||
state: present
|
||||
validate_certs: "{{ lookup('env', 'NETBOX_VALIDATE_CERT') | default(nfc_pb_api_netbox_validate_cert) | default(true) | bool }}"
|
||||
delegate_to: localhost
|
||||
failed_when: >
|
||||
custom_field_tmp.msg != 'ui_visibility does not exist on existing object. Check to make sure valid field.'
|
||||
and
|
||||
custom_field_tmp.diff is not defined
|
||||
register: custom_field_tmp
|
||||
|
||||
|
||||
- name: Create Custom Field - Install OLM
|
||||
netbox.netbox.netbox_custom_field:
|
||||
netbox_url: "{{ lookup('env', 'NETBOX_API') | default(nfc_pb_api_netbox_url) }}"
|
||||
netbox_token: "{{ lookup('env', 'NETBOX_TOKEN') | default(nfc_pb_api_netbox_token) }}"
|
||||
data:
|
||||
content_types:
|
||||
- virtualization.cluster
|
||||
default: null
|
||||
group_name: Kubernetes
|
||||
label: Install OLM
|
||||
name: nfc_role_kubernetes_install_olm
|
||||
type: boolean
|
||||
ui_visibility: hidden-ifunset
|
||||
# is_cloneable: false
|
||||
weight: 100
|
||||
state: present
|
||||
validate_certs: "{{ lookup('env', 'NETBOX_VALIDATE_CERT') | default(nfc_pb_api_netbox_validate_cert) | default(true) | bool }}"
|
||||
delegate_to: localhost
|
||||
failed_when: >
|
||||
custom_field_tmp.msg != 'ui_visibility does not exist on existing object. Check to make sure valid field.'
|
||||
and
|
||||
custom_field_tmp.diff is not defined
|
||||
register: custom_field_tmp
|
||||
|
||||
|
||||
- name: Create Custom Field - Install Helm
|
||||
netbox.netbox.netbox_custom_field:
|
||||
netbox_url: "{{ lookup('env', 'NETBOX_API') | default(nfc_pb_api_netbox_url) }}"
|
||||
netbox_token: "{{ lookup('env', 'NETBOX_TOKEN') | default(nfc_pb_api_netbox_token) }}"
|
||||
data:
|
||||
content_types:
|
||||
- virtualization.cluster
|
||||
default: null
|
||||
group_name: Kubernetes
|
||||
label: Install Helm
|
||||
name: nfc_role_kubernetes_install_helm
|
||||
type: boolean
|
||||
ui_visibility: hidden-ifunset
|
||||
# is_cloneable: false
|
||||
weight: 100
|
||||
state: present
|
||||
validate_certs: "{{ lookup('env', 'NETBOX_VALIDATE_CERT') | default(nfc_pb_api_netbox_validate_cert) | default(true) | bool }}"
|
||||
delegate_to: localhost
|
||||
failed_when: >
|
||||
custom_field_tmp.msg != 'ui_visibility does not exist on existing object. Check to make sure valid field.'
|
||||
and
|
||||
custom_field_tmp.diff is not defined
|
||||
register: custom_field_tmp
|
||||
|
||||
|
||||
- name: Create Custom Field - Install KubeVirt
|
||||
netbox.netbox.netbox_custom_field:
|
||||
netbox_url: "{{ lookup('env', 'NETBOX_API') | default(nfc_pb_api_netbox_url) }}"
|
||||
netbox_token: "{{ lookup('env', 'NETBOX_TOKEN') | default(nfc_pb_api_netbox_token) }}"
|
||||
data:
|
||||
content_types:
|
||||
- virtualization.cluster
|
||||
default: null
|
||||
group_name: Kubernetes
|
||||
label: Install KubeVirt
|
||||
name: nfc_role_kubernetes_install_kubevirt
|
||||
type: boolean
|
||||
ui_visibility: hidden-ifunset
|
||||
# is_cloneable: false
|
||||
weight: 100
|
||||
state: present
|
||||
validate_certs: "{{ lookup('env', 'NETBOX_VALIDATE_CERT') | default(nfc_pb_api_netbox_validate_cert) | default(true) | bool }}"
|
||||
delegate_to: localhost
|
||||
failed_when: >
|
||||
custom_field_tmp.msg != 'ui_visibility does not exist on existing object. Check to make sure valid field.'
|
||||
and
|
||||
custom_field_tmp.diff is not defined
|
||||
register: custom_field_tmp
|
||||
|
||||
|
||||
- name: Create Custom Field - KubeVirt Operator Replicas
|
||||
netbox.netbox.netbox_custom_field:
|
||||
netbox_url: "{{ lookup('env', 'NETBOX_API') | default(nfc_pb_api_netbox_url) }}"
|
||||
netbox_token: "{{ lookup('env', 'NETBOX_TOKEN') | default(nfc_pb_api_netbox_token) }}"
|
||||
data:
|
||||
content_types:
|
||||
- virtualization.cluster
|
||||
default: null
|
||||
group_name: Kubernetes
|
||||
label: KubeVirt Operator Replicas
|
||||
name: nfc_role_kubernetes_kubevirt_operator_replicas
|
||||
type: integer
|
||||
ui_visibility: hidden-ifunset
|
||||
# is_cloneable: false
|
||||
validation_minimum: 1
|
||||
weight: 100
|
||||
state: present
|
||||
validate_certs: "{{ lookup('env', 'NETBOX_VALIDATE_CERT') | default(nfc_pb_api_netbox_validate_cert) | default(true) | bool }}"
|
||||
delegate_to: localhost
|
||||
failed_when: >
|
||||
custom_field_tmp.msg != 'ui_visibility does not exist on existing object. Check to make sure valid field.'
|
||||
and
|
||||
custom_field_tmp.diff is not defined
|
||||
register: custom_field_tmp
|
||||
|
||||
|
||||
- name: Create Custom Field - Enable MetalLB
|
||||
netbox.netbox.netbox_custom_field:
|
||||
netbox_url: "{{ lookup('env', 'NETBOX_API') | default(nfc_pb_api_netbox_url) }}"
|
||||
netbox_token: "{{ lookup('env', 'NETBOX_TOKEN') | default(nfc_pb_api_netbox_token) }}"
|
||||
data:
|
||||
content_types:
|
||||
- virtualization.cluster
|
||||
default: null
|
||||
group_name: Kubernetes
|
||||
label: Enable MetalLB
|
||||
name: nfc_kubernetes_enable_metallb
|
||||
type: boolean
|
||||
ui_visibility: hidden-ifunset
|
||||
# is_cloneable: false
|
||||
weight: 100
|
||||
state: present
|
||||
validate_certs: "{{ lookup('env', 'NETBOX_VALIDATE_CERT') | default(nfc_pb_api_netbox_validate_cert) | default(true) | bool }}"
|
||||
delegate_to: localhost
|
||||
failed_when: >
|
||||
custom_field_tmp.msg != 'ui_visibility does not exist on existing object. Check to make sure valid field.'
|
||||
and
|
||||
custom_field_tmp.diff is not defined
|
||||
register: custom_field_tmp
|
||||
|
||||
|
||||
- name: Create Custom Field - Enable ServiceLB (klipper)
|
||||
netbox.netbox.netbox_custom_field:
|
||||
netbox_url: "{{ lookup('env', 'NETBOX_API') | default(nfc_pb_api_netbox_url) }}"
|
||||
netbox_token: "{{ lookup('env', 'NETBOX_TOKEN') | default(nfc_pb_api_netbox_token) }}"
|
||||
data:
|
||||
content_types:
|
||||
- virtualization.cluster
|
||||
default: null
|
||||
group_name: Kubernetes
|
||||
label: Enable ServiceLB (klipper)
|
||||
name: nfc_kubernetes_enable_servicelb
|
||||
type: boolean
|
||||
ui_visibility: hidden-ifunset
|
||||
# is_cloneable: false
|
||||
weight: 100
|
||||
state: present
|
||||
validate_certs: "{{ lookup('env', 'NETBOX_VALIDATE_CERT') | default(nfc_pb_api_netbox_validate_cert) | default(true) | bool }}"
|
||||
delegate_to: localhost
|
||||
failed_when: >
|
||||
custom_field_tmp.msg != 'ui_visibility does not exist on existing object. Check to make sure valid field.'
|
||||
and
|
||||
custom_field_tmp.diff is not defined
|
||||
register: custom_field_tmp
|
||||
|
||||
|
||||
- name: Create Custom Field - Pod Subnet
|
||||
netbox.netbox.netbox_custom_field:
|
||||
netbox_url: "{{ lookup('env', 'NETBOX_API') | default(nfc_pb_api_netbox_url) }}"
|
||||
netbox_token: "{{ lookup('env', 'NETBOX_TOKEN') | default(nfc_pb_api_netbox_token) }}"
|
||||
data:
|
||||
content_types:
|
||||
- virtualization.cluster
|
||||
default: null
|
||||
group_name: Kubernetes
|
||||
label: Pod Subnet
|
||||
name: nfc_role_kubernetes_pod_subnet
|
||||
object_type: ipam.prefix
|
||||
type: object
|
||||
ui_visibility: hidden-ifunset
|
||||
# is_cloneable: false
|
||||
weight: 100
|
||||
state: present
|
||||
validate_certs: "{{ lookup('env', 'NETBOX_VALIDATE_CERT') | default(nfc_pb_api_netbox_validate_cert) | default(true) | bool }}"
|
||||
delegate_to: localhost
|
||||
failed_when: >
|
||||
custom_field_tmp.msg != 'ui_visibility does not exist on existing object. Check to make sure valid field.'
|
||||
and
|
||||
custom_field_tmp.diff is not defined
|
||||
register: custom_field_tmp
|
||||
|
||||
|
||||
- name: Create Custom Field - Service Subnet
|
||||
netbox.netbox.netbox_custom_field:
|
||||
netbox_url: "{{ lookup('env', 'NETBOX_API') | default(nfc_pb_api_netbox_url) }}"
|
||||
netbox_token: "{{ lookup('env', 'NETBOX_TOKEN') | default(nfc_pb_api_netbox_token) }}"
|
||||
data:
|
||||
content_types:
|
||||
- virtualization.cluster
|
||||
default: null
|
||||
group_name: Kubernetes
|
||||
label: Service Subnet
|
||||
name: nfc_role_kubernetes_service_subnet
|
||||
object_type: ipam.prefix
|
||||
type: object
|
||||
ui_visibility: hidden-ifunset
|
||||
# is_cloneable: false
|
||||
weight: 100
|
||||
state: present
|
||||
validate_certs: "{{ lookup('env', 'NETBOX_VALIDATE_CERT') | default(nfc_pb_api_netbox_validate_cert) | default(true) | bool }}"
|
||||
delegate_to: localhost
|
||||
failed_when: >
|
||||
custom_field_tmp.msg != 'ui_visibility does not exist on existing object. Check to make sure valid field.'
|
||||
and
|
||||
custom_field_tmp.diff is not defined
|
||||
register: custom_field_tmp
|
21
roles/kubernetes_netbox/tasks/main.yaml
Normal file
21
roles/kubernetes_netbox/tasks/main.yaml
Normal file
@ -0,0 +1,21 @@
|
||||
---
|
||||
|
||||
- name: Setup NetBox for Kubernetes Cluster Deployments
|
||||
ansible.builtin.include_tasks:
|
||||
file: cluster.yaml
|
||||
apply:
|
||||
tags:
|
||||
- always
|
||||
tags:
|
||||
- always
|
||||
|
||||
|
||||
- name: Setup NetBox for Kubernetes Service Deployments
|
||||
ansible.builtin.include_tasks:
|
||||
file: services.yaml
|
||||
apply:
|
||||
tags:
|
||||
- always
|
||||
tags:
|
||||
- never
|
||||
- services
|
50
roles/kubernetes_netbox/tasks/services.yaml
Normal file
50
roles/kubernetes_netbox/tasks/services.yaml
Normal file
@ -0,0 +1,50 @@
|
||||
---
|
||||
|
||||
- name: Create Custom Field - Instance
|
||||
netbox.netbox.netbox_custom_field:
|
||||
netbox_url: "{{ lookup('env', 'NETBOX_API') | default(nfc_pb_api_netbox_url) }}"
|
||||
netbox_token: "{{ lookup('env', 'NETBOX_TOKEN') | default(nfc_pb_api_netbox_token) }}"
|
||||
data:
|
||||
content_types:
|
||||
- ipam.service
|
||||
group_name: Kubernetes
|
||||
label: Instance Name
|
||||
description: "Name of the Instance to be deployed"
|
||||
name: service_kubernetes_instance
|
||||
type: text
|
||||
ui_visibility: hidden-ifunset
|
||||
# is_cloneable: true
|
||||
weight: 100
|
||||
state: present
|
||||
validate_certs: "{{ lookup('env', 'NETBOX_VALIDATE_CERT') | default(nfc_pb_api_netbox_validate_cert) | default(true) | bool }}"
|
||||
delegate_to: localhost
|
||||
failed_when: >
|
||||
custom_field_tmp.msg != 'ui_visibility does not exist on existing object. Check to make sure valid field.'
|
||||
and
|
||||
custom_field_tmp.diff is not defined
|
||||
register: custom_field_tmp
|
||||
|
||||
|
||||
- name: Create Custom Field - Namespace
|
||||
netbox.netbox.netbox_custom_field:
|
||||
netbox_url: "{{ lookup('env', 'NETBOX_API') | default(nfc_pb_api_netbox_url) }}"
|
||||
netbox_token: "{{ lookup('env', 'NETBOX_TOKEN') | default(nfc_pb_api_netbox_token) }}"
|
||||
data:
|
||||
content_types:
|
||||
- ipam.service
|
||||
group_name: Kubernetes
|
||||
label: Service Namespace
|
||||
description: "Deployment Namespace"
|
||||
name: service_kubernetes_namespace
|
||||
type: text
|
||||
ui_visibility: hidden-ifunset
|
||||
# is_cloneable: true
|
||||
weight: 100
|
||||
state: present
|
||||
validate_certs: "{{ lookup('env', 'NETBOX_VALIDATE_CERT') | default(nfc_pb_api_netbox_validate_cert) | default(true) | bool }}"
|
||||
delegate_to: localhost
|
||||
failed_when: >
|
||||
custom_field_tmp.msg != 'ui_visibility does not exist on existing object. Check to make sure valid field.'
|
||||
and
|
||||
custom_field_tmp.diff is not defined
|
||||
register: custom_field_tmp
|
3
roles/nfc_kubernetes/README.md
Normal file
3
roles/nfc_kubernetes/README.md
Normal file
@ -0,0 +1,3 @@
|
||||
## No Fuss Computing - Ansible Role nfc_kubernetes
|
||||
|
||||
Nothing to see here
|
196
roles/nfc_kubernetes/defaults/main.yml
Normal file
196
roles/nfc_kubernetes/defaults/main.yml
Normal file
@ -0,0 +1,196 @@
|
||||
|
||||
# Depreciated:
|
||||
# Calico is being migrated to use the calico operator.
|
||||
# in a near future release, this method of deploying calico
|
||||
# will be removed. use tag `operator_migrate_calico` to migrate
|
||||
calico_image_tag: v3.25.0 # Depreciated
|
||||
# EoF Depreciated
|
||||
# SoF New Variables
|
||||
nfc_role_kubernetes_calico_version: v3.27.0
|
||||
# nfc_kubernetes_tigera_operator_registry: quay.io
|
||||
# nfc_kubernetes_tigera_operator_image: tigera/operator
|
||||
# nfc_kubernetes_tigera_operator_tag: v1.32.3 # Calico v3.27.0
|
||||
# EoF New Variables, EEoF Depreciated
|
||||
|
||||
|
||||
nfc_kubernetes_enable_metallb: false
|
||||
nfc_kubernetes_enable_servicelb: false
|
||||
|
||||
|
||||
nfc_role_kubernetes_container_images:
|
||||
|
||||
kubevirt_operator:
|
||||
name: Kubevirt Operator
|
||||
registry: quay.io
|
||||
image: kubevirt/virt-operator
|
||||
tag: v1.2.0
|
||||
|
||||
tigera_operator:
|
||||
name: Tigera Operator
|
||||
registry: quay.io
|
||||
image: tigera/operator
|
||||
tag: v1.32.3 # Calico v3.27.0
|
||||
|
||||
|
||||
nfc_role_kubernetes_cluster_domain: cluster.local
|
||||
|
||||
nfc_role_kubernetes_configure_firewall: true
|
||||
|
||||
nfc_role_kubernetes_etcd_enabled: false
|
||||
|
||||
nfc_role_kubernetes_install_olm: false
|
||||
|
||||
nfc_role_kubernetes_install_helm: true
|
||||
|
||||
nfc_role_kubernetes_install_kubevirt: false
|
||||
|
||||
nfc_role_kubernetes_kubevirt_operator_replicas: 1
|
||||
|
||||
nfc_role_kubernetes_oidc_enabled: false
|
||||
|
||||
nfc_role_kubernetes_resolv_conf_file: /etc/resolv.conf
|
||||
|
||||
nfc_role_kubernetes_pod_subnet: 172.16.248.0/21
|
||||
nfc_role_kubernetes_service_subnet: 172.16.244.0/22
|
||||
|
||||
nfc_role_kubernetes_prime: true # Mandatory for a node designated as the prime master node
|
||||
nfc_role_kubernetes_master: true # Mandatory for a node designated as a master node and the prime master node
|
||||
nfc_role_kubernetes_worker: false # Mandatory for a node designated as a worker node
|
||||
|
||||
############################################################################################################
|
||||
#
|
||||
# Old Vars requiring refactoring
|
||||
#
|
||||
# ############################################################################################################
|
||||
|
||||
|
||||
KubernetesVersion: '1.26.12' # must match the repository release version
|
||||
kubernetes_version_olm: '0.27.0'
|
||||
|
||||
|
||||
|
||||
KubernetesVersion_k3s_prefix: '+k3s1'
|
||||
|
||||
kubernetes_private_container_registry: [] # Optional, Array. if none use `[]`
|
||||
|
||||
kubernetes_etcd_snapshot_cron_schedule: '0 */12 * * *'
|
||||
kubernetes_etcd_snapshot_retention: 5
|
||||
|
||||
# host_external_ip: '' # Optional, String. External IP Address for host.
|
||||
|
||||
kube_apiserver_arg_audit_log_maxage: 2
|
||||
|
||||
kubelet_arg_system_reserved_cpu: 450m
|
||||
kubelet_arg_system_reserved_memory: 512Mi
|
||||
kubelet_arg_system_reserved_storage: 8Gi
|
||||
|
||||
|
||||
nfc_kubernetes:
|
||||
enable_firewall: false # Optional, bool enable firewall rules from role 'nfc_firewall'
|
||||
|
||||
nfc_kubernetes_no_restart: false # Set to true to prevent role from restarting kubernetes on the host(s)
|
||||
nfc_kubernetes_no_restart_master: false # Set to true to prevent role from restarting kubernetes on master host(s)
|
||||
nfc_kubernetes_no_restart_prime: false # Set to true to prevent role from restarting kubernetes on prime host
|
||||
nfc_kubernetes_no_restart_slave: false # Set to true to prevent role from restarting kubernetes on slave host(s)
|
||||
|
||||
|
||||
k3s:
|
||||
files:
|
||||
|
||||
- name: audit.yaml
|
||||
path: /var/lib/rancher/k3s/server
|
||||
content: |
|
||||
apiVersion: audit.k8s.io/v1
|
||||
kind: Policy
|
||||
rules:
|
||||
- level: Request
|
||||
when: "{{ nfc_role_kubernetes_master }}"
|
||||
|
||||
- name: 90-kubelet.conf
|
||||
path: /etc/sysctl.d
|
||||
content: |
|
||||
vm.panic_on_oom=0
|
||||
vm.overcommit_memory=1
|
||||
kernel.panic=10
|
||||
kernel.panic_on_oops=1
|
||||
kernel.keys.root_maxbytes=25000000
|
||||
|
||||
- name: psa.yaml
|
||||
path: /var/lib/rancher/k3s/server
|
||||
content: ""
|
||||
# apiVersion: apiserver.conf0 */12 * * *ig.k8s.io/v1
|
||||
# kind: AdmissionConfiguration
|
||||
# plugins:
|
||||
# - name: PodSecurity
|
||||
# configuration:
|
||||
# apiVersion: pod-security.admission.config.k8s.io/v1beta1
|
||||
# kind: PodSecurityConfiguration
|
||||
# defaults:
|
||||
# enforce: "restricted"
|
||||
# enforce-version: "latest"
|
||||
# audit: "restricted"
|
||||
# audit-version: "latest"
|
||||
# warn: "restricted"
|
||||
# warn-version: "latest"
|
||||
# exemptions:
|
||||
# usernames: []
|
||||
# runtimeClasses: []
|
||||
# namespaces: [kube-system]
|
||||
when: "{{ kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname }}"
|
||||
|
||||
|
||||
#############################################################################################
|
||||
# Cluster Config when stored in Inventory
|
||||
#
|
||||
# One required per cluster. recommend creating one ansible host group per cluster.
|
||||
#############################################################################################
|
||||
# kubernetes_config: # Dict. Cluster Config
|
||||
# cluster:
|
||||
# access: # Mandatory. List, DNS host name or IPv4/IPv6 Address.
|
||||
# # if none use '[]'
|
||||
# - 'my.dnshostname.com'
|
||||
# - '2001:4860:4860::8888'
|
||||
# - '192.168.1.1'
|
||||
# domain_name: earth # Mandatory, String. Cluster Domain Name
|
||||
# group_name: # Mandatory, String. name of the ansible inventory group containg all cluster hosts
|
||||
# prime:
|
||||
# name: k3s-prod # Mandatory, String. Ansible inventory_host that will
|
||||
# # act as the prime master node.
|
||||
# networking:
|
||||
# encrypt: true # Optional, Boolean. default `false`. Install wireguard for inter-node encryption
|
||||
# podSubnet: 172.16.70.0/24 # Mandatory, String. CIDR
|
||||
# ServiceSubnet: 172.16.72.0/24 # Mandatory, String. CIDR
|
||||
#
|
||||
#
|
||||
# helm:
|
||||
# enabled: true # Optional, Boolean. default=false. Install Helm Binary
|
||||
#
|
||||
#
|
||||
# kube_virt:
|
||||
# enabled: false # Optional, Boolean. default=false. Install KubeVirt
|
||||
#
|
||||
# nodes: [] # Optional, List of String. default=inventory_hostname. List of nodes to install kibevirt on.
|
||||
#
|
||||
# operator:
|
||||
# replicas: 2 # Optional, Integer. How many virt_operators to deploy.
|
||||
#
|
||||
#
|
||||
# oidc: # Used to configure Kubernetes with OIDC Authentication.
|
||||
# enabled: true # Mandatory, boolen. speaks for itself.
|
||||
# issuer_url: https://domainname.com/realms/realm-name # Mandatory, String. URL of OIDC Provider
|
||||
# client_id: kubernetes-test # Mandatory, string. OIDC Client ID
|
||||
# username_claim: preferred_username # Mandatory, String. Claim name containing username.
|
||||
# username_prefix: oidc # Optional, String. What to prefix to username
|
||||
# groups_claim: roles # Mandatory, String. Claim name containing groups
|
||||
# groups_prefix: '' # Optional, String. string to append to groups
|
||||
#
|
||||
# hosts:
|
||||
#
|
||||
# my-host-name:
|
||||
# labels:
|
||||
# mylabel: myvalue
|
||||
#
|
||||
# taints:
|
||||
# - effect: NoSchedule
|
||||
# key: taintkey
|
||||
# value: taintvalue
|
41
roles/nfc_kubernetes/handlers/main.yml
Normal file
41
roles/nfc_kubernetes/handlers/main.yml
Normal file
@ -0,0 +1,41 @@
|
||||
---
|
||||
|
||||
- name: Reboot Node
|
||||
ansible.builtin.reboot:
|
||||
reboot_timeout: 300
|
||||
listen: reboot_host
|
||||
when: ansible_connection == 'ssh'
|
||||
|
||||
|
||||
- name: Restart Kubernetes
|
||||
ansible.builtin.service:
|
||||
name: |-
|
||||
{%- if nfc_role_kubernetes_master | default(false) | bool -%}
|
||||
k3s
|
||||
{%- else -%}
|
||||
k3s-agent
|
||||
{%- endif -%}
|
||||
state: restarted
|
||||
listen: kubernetes_restart
|
||||
when: |-
|
||||
not (
|
||||
nfc_kubernetes_no_restart
|
||||
or
|
||||
(
|
||||
nfc_role_kubernetes_master
|
||||
and
|
||||
nfc_kubernetes_no_restart_master
|
||||
)
|
||||
or
|
||||
(
|
||||
inventory_hostname == kubernetes_config.cluster.prime.name | default(inventory_hostname)
|
||||
and
|
||||
nfc_kubernetes_no_restart_prime
|
||||
)
|
||||
or
|
||||
(
|
||||
nfc_role_kubernetes_worker
|
||||
and
|
||||
nfc_kubernetes_no_restart_slave
|
||||
)
|
||||
)
|
29
roles/nfc_kubernetes/meta/main.yml
Normal file
29
roles/nfc_kubernetes/meta/main.yml
Normal file
@ -0,0 +1,29 @@
|
||||
galaxy_info:
|
||||
|
||||
role_name: nfc_kubernetes
|
||||
|
||||
author: No Fuss Computing
|
||||
|
||||
description: Install and configure single and multi-node K3s Kubernetes cluster.
|
||||
|
||||
issue_tracker_url: https://gitlab.com/nofusscomputing/projects/ansible/collections/kubernetes
|
||||
|
||||
license: MIT
|
||||
|
||||
min_ansible_version: '2.15'
|
||||
|
||||
platforms:
|
||||
|
||||
- name: Debian
|
||||
versions:
|
||||
- bullseye
|
||||
- bookworm
|
||||
|
||||
- name: Ubuntu
|
||||
versions:
|
||||
- 21
|
||||
|
||||
galaxy_tags:
|
||||
- cluster
|
||||
- k3s
|
||||
- kubernetes
|
30
roles/nfc_kubernetes/tasks/helm/main.yaml
Normal file
30
roles/nfc_kubernetes/tasks/helm/main.yaml
Normal file
@ -0,0 +1,30 @@
|
||||
---
|
||||
|
||||
- name: Fetch Helm APT Key
|
||||
ansible.builtin.get_url:
|
||||
url: https://baltocdn.com/helm/signing.asc
|
||||
dest: /usr/share/keyrings/helm.asc
|
||||
mode: 740
|
||||
changed_when: not ansible_check_mode
|
||||
delay: 10
|
||||
retries: 3
|
||||
|
||||
|
||||
- name: Add Helm Repository
|
||||
ansible.builtin.apt_repository:
|
||||
repo: >-
|
||||
deb [arch={%- if ansible_architecture == 'aarch64' -%}
|
||||
arm64
|
||||
{%- else -%}
|
||||
amd64
|
||||
{%- endif %} signed-by=/usr/share/keyrings/helm.asc] http://baltocdn.com/helm/stable/{{
|
||||
ansible_os_family | lower }}/ all main
|
||||
state: present
|
||||
filename: helm
|
||||
|
||||
|
||||
- name: Install Helm
|
||||
ansible.builtin.apt:
|
||||
package:
|
||||
- helm
|
||||
state: present
|
147
roles/nfc_kubernetes/tasks/install.yaml
Normal file
147
roles/nfc_kubernetes/tasks/install.yaml
Normal file
@ -0,0 +1,147 @@
|
||||
---
|
||||
|
||||
- name: Get Hostname
|
||||
ansible.builtin.command:
|
||||
cmd: hostname
|
||||
changed_when: false
|
||||
check_mode: false
|
||||
register: hostname_to_check
|
||||
tags:
|
||||
- always
|
||||
|
||||
|
||||
- name: Hostname Check
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- hostname_to_check.stdout == inventory_hostname
|
||||
msg: The hostname must match the inventory_hostname
|
||||
tags:
|
||||
- always
|
||||
when: >
|
||||
inventory_hostname != 'localhost'
|
||||
|
||||
|
||||
- name: Testing Env Variables
|
||||
ansible.builtin.set_fact:
|
||||
ansible_default_ipv4: {
|
||||
"address": "127.0.0.1"
|
||||
}
|
||||
check_mode: false
|
||||
tags:
|
||||
- always
|
||||
when: >
|
||||
lookup('ansible.builtin.env', 'CI_COMMIT_SHA') | default('') != ''
|
||||
|
||||
|
||||
- name: Gather Facts required by role
|
||||
ansible.builtin.setup:
|
||||
gather_subset:
|
||||
- all_ipv4_addresses
|
||||
- os_family
|
||||
- processor
|
||||
tags:
|
||||
- always
|
||||
when: >
|
||||
ansible_architecture is not defined
|
||||
or
|
||||
ansible_default_ipv4 is not defined
|
||||
or
|
||||
ansible_os_family is not defined
|
||||
|
||||
|
||||
- name: Check Machine Architecture
|
||||
ansible.builtin.set_fact:
|
||||
nfc_kubernetes_install_architectures: "{{ nfc_kubernetes_install_architectures | default({}) | combine({ansible_architecture: ''}) }}"
|
||||
tags:
|
||||
- always
|
||||
|
||||
|
||||
- name: Configure Kubernetes Firewall Rules
|
||||
ansible.builtin.include_role:
|
||||
name: nofusscomputing.firewall.nfc_firewall
|
||||
vars:
|
||||
nfc_role_firewall_firewall_type: iptables
|
||||
nfc_role_firewall_additional_rules: "{{ ( lookup('template', 'vars/firewall_rules.yaml') | from_yaml ).kubernetes_chains }}"
|
||||
tags:
|
||||
- always
|
||||
when: >
|
||||
nfc_role_kubernetes_configure_firewall
|
||||
|
||||
|
||||
- name: Install required software
|
||||
ansible.builtin.apt:
|
||||
name: python3-pip
|
||||
install_recommends: false
|
||||
state: present
|
||||
when: >
|
||||
install_kubernetes | default(true) | bool
|
||||
and
|
||||
not kubernetes_installed | default(false) | bool
|
||||
tags:
|
||||
- always
|
||||
|
||||
|
||||
- name: K3s Install
|
||||
ansible.builtin.include_tasks:
|
||||
file: k3s/install.yaml
|
||||
apply:
|
||||
tags:
|
||||
- always
|
||||
when: >
|
||||
install_kubernetes | default(true) | bool
|
||||
and
|
||||
not kubernetes_installed | default(false) | bool
|
||||
tags:
|
||||
- always
|
||||
|
||||
|
||||
- name: K3s Configure
|
||||
ansible.builtin.include_tasks:
|
||||
file: k3s/configure.yaml
|
||||
apply:
|
||||
tags:
|
||||
- always
|
||||
when: >
|
||||
install_kubernetes | default(true) | bool
|
||||
and
|
||||
kubernetes_installed | default(false) | bool
|
||||
and
|
||||
not nfc_role_kubernetes_cluster_upgraded | default(false) | bool
|
||||
tags:
|
||||
- always
|
||||
|
||||
|
||||
- name: Kubevert
|
||||
ansible.builtin.include_tasks:
|
||||
file: kubevirt/main.yaml
|
||||
apply:
|
||||
tags:
|
||||
- always
|
||||
when: >
|
||||
kubernetes_installed | default(false) | bool
|
||||
and
|
||||
kubernetes_config.kube_virt.enabled | default(nfc_role_kubernetes_install_kubevirt)
|
||||
and
|
||||
inventory_hostname in kubernetes_config.kube_virt.nodes | default([ inventory_hostname ]) | list
|
||||
and
|
||||
not nfc_role_kubernetes_cluster_upgraded | default(false) | bool
|
||||
tags:
|
||||
- always
|
||||
|
||||
|
||||
- name: Helm
|
||||
ansible.builtin.include_tasks:
|
||||
file: helm/main.yaml
|
||||
apply:
|
||||
tags:
|
||||
- always
|
||||
when: >
|
||||
kubernetes_installed | default(false) | bool
|
||||
and
|
||||
kubernetes_config.helm.enabled | default(nfc_role_kubernetes_install_helm)
|
||||
and
|
||||
nfc_role_kubernetes_master
|
||||
and
|
||||
not nfc_role_kubernetes_cluster_upgraded | default(false) | bool
|
||||
tags:
|
||||
- always
|
78
roles/nfc_kubernetes/tasks/k3s/configure.yaml
Normal file
78
roles/nfc_kubernetes/tasks/k3s/configure.yaml
Normal file
@ -0,0 +1,78 @@
|
||||
---
|
||||
|
||||
- name: Additional config files
|
||||
ansible.builtin.copy:
|
||||
content: |
|
||||
{{ item.content }}
|
||||
dest: "{{ item.path }}/{{ item.name }}"
|
||||
mode: '740'
|
||||
owner: root
|
||||
group: root
|
||||
loop: "{{ k3s.files }}"
|
||||
when: item.when | default(false) | bool
|
||||
|
||||
|
||||
- name: Check if FW dir exists
|
||||
ansible.builtin.stat:
|
||||
name: /etc/iptables-reloader/rules.d
|
||||
register: firewall_rules_dir_metadata
|
||||
|
||||
|
||||
- name: Copy Templates
|
||||
ansible.builtin.template:
|
||||
src: "{{ item.src }}"
|
||||
dest: "{{ item.dest }}"
|
||||
owner: root
|
||||
mode: '700'
|
||||
force: true
|
||||
notify: "{{ item.notify | default(omit) }}"
|
||||
loop: "{{ templates_to_apply }}"
|
||||
when: >
|
||||
item.when | default(true) | bool
|
||||
vars:
|
||||
templates_to_apply:
|
||||
|
||||
- src: kubernetes-manifest-rbac.yaml.j2
|
||||
dest: /var/lib/rancher/k3s/server/manifests/rbac-authorization-common.yaml
|
||||
when: "{{ kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname }}"
|
||||
|
||||
- src: iptables-kubernetes.rules.j2
|
||||
dest: "/etc/iptables-reloader/rules.d/iptables-kubernetes.rules"
|
||||
notify: firewall_reloader
|
||||
when: |-
|
||||
{%- if firewall_installed -%}
|
||||
|
||||
{{ firewall_rules_dir_metadata.stat.exists }}
|
||||
|
||||
{%- else -%}
|
||||
|
||||
false
|
||||
|
||||
{%- endif %}
|
||||
|
||||
|
||||
- name: Add Kubernetes Node Labels
|
||||
ansible.builtin.copy:
|
||||
content: |-
|
||||
apiVersion: v1
|
||||
kind: Node
|
||||
metadata:
|
||||
name: "{{ inventory_hostname }}"
|
||||
{% if kubernetes_config.hosts[inventory_hostname].labels | default([]) | list | length > 0 -%}
|
||||
labels:
|
||||
{{ kubernetes_config.hosts[inventory_hostname].labels | to_nice_yaml | indent(4) }}
|
||||
{%- endif +%}
|
||||
{% if kubernetes_config.hosts[inventory_hostname].taints | default([]) | list | length > 0 -%}
|
||||
spec:
|
||||
taints:
|
||||
{{ kubernetes_config.hosts[inventory_hostname].taints | to_nice_yaml(indent=0) | indent(4) }}
|
||||
{% endif %}
|
||||
dest: /var/lib/rancher/k3s/server/manifests/node-manifest-{{ inventory_hostname }}.yaml
|
||||
owner: root
|
||||
group: root
|
||||
mode: '700'
|
||||
delegate_to: "{{ kubernetes_config.cluster.prime.name }}"
|
||||
when:
|
||||
kubernetes_config.hosts[inventory_hostname].labels | default([]) | list | length > 0
|
||||
or
|
||||
kubernetes_config.hosts[inventory_hostname].taints | default([]) | list | length > 0
|
779
roles/nfc_kubernetes/tasks/k3s/install.yaml
Normal file
779
roles/nfc_kubernetes/tasks/k3s/install.yaml
Normal file
@ -0,0 +1,779 @@
|
||||
---
|
||||
|
||||
- name: Install required python modules
|
||||
ansible.builtin.pip:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
loop: "{{ pip_packages }}"
|
||||
vars:
|
||||
pip_packages:
|
||||
- kubernetes>=12.0.0
|
||||
- PyYAML>=3.11
|
||||
|
||||
|
||||
- name: Check for calico deployment manifest
|
||||
ansible.builtin.stat:
|
||||
name: /var/lib/rancher/k3s/server/manifests/calico.yaml
|
||||
register: file_calico_yaml_metadata
|
||||
|
||||
|
||||
- name: Check for calico Operator deployment manifest
|
||||
ansible.builtin.stat:
|
||||
name: /var/lib/rancher/k3s/ansible/deployment-manifest-calico_operator.yaml
|
||||
register: file_calico_operator_yaml_metadata
|
||||
|
||||
|
||||
- name: Install dependent packages
|
||||
ansible.builtin.apt:
|
||||
name: "{{ package }}"
|
||||
state: present
|
||||
loop: "{{ packages }}"
|
||||
loop_control:
|
||||
loop_var: package
|
||||
vars:
|
||||
packages:
|
||||
- wget
|
||||
- curl
|
||||
- iptables
|
||||
- jq
|
||||
- wireguard
|
||||
|
||||
|
||||
- name: Remove swapfile from /etc/fstab
|
||||
ansible.posix.mount:
|
||||
name: "{{ item }}"
|
||||
fstype: swap
|
||||
state: absent
|
||||
with_items:
|
||||
- swap
|
||||
- none
|
||||
when:
|
||||
- ansible_os_family == 'Debian' # ansible_lsb.codename = bullseye, ansible_lsb.major_release = 11
|
||||
tags:
|
||||
- install
|
||||
|
||||
|
||||
- name: Testing Environment try/catch
|
||||
block:
|
||||
|
||||
|
||||
- name: Disable swap
|
||||
ansible.builtin.command:
|
||||
cmd: swapoff -a
|
||||
changed_when: false
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
tags:
|
||||
- install
|
||||
|
||||
|
||||
rescue:
|
||||
|
||||
- name: Check if inside Gitlab CI
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- lookup('ansible.builtin.env', 'CI_COMMIT_SHA') | default('') != ''
|
||||
success_msg: "Inside testing enviroment, 'Disable swap' error OK"
|
||||
fail_msg: "You should figure out what went wrong"
|
||||
|
||||
|
||||
- name: Check an armbian os system
|
||||
ansible.builtin.stat:
|
||||
path: /etc/default/armbian-zram-config
|
||||
register: armbian_stat_result
|
||||
|
||||
|
||||
- name: Armbian Disable Swap
|
||||
ansible.builtin.shell:
|
||||
cmd: |
|
||||
sed -i 's/\# SWAP=false/SWAP=false/g' /etc/default/armbian-zram-config;
|
||||
sed -i 's/ENABLED=true/ENABLED=false/g' /etc/default/armbian-zram-config;
|
||||
args:
|
||||
executable: bash
|
||||
changed_when: false
|
||||
when: armbian_stat_result.stat.exists
|
||||
|
||||
|
||||
- name: Create Required directories
|
||||
ansible.builtin.file:
|
||||
name: "{{ item.name }}"
|
||||
state: "{{ item.state }}"
|
||||
mode: "{{ item.mode }}"
|
||||
loop: "{{ dirs }}"
|
||||
vars:
|
||||
dirs:
|
||||
- name: /etc/rancher/k3s
|
||||
state: directory
|
||||
mode: 700
|
||||
- name: /var/lib/rancher/k3s/server/logs
|
||||
state: directory
|
||||
mode: 700
|
||||
- name: /var/lib/rancher/k3s/server/manifests
|
||||
state: directory
|
||||
mode: 700
|
||||
- name: /var/lib/rancher/k3s/ansible
|
||||
state: directory
|
||||
mode: 700
|
||||
|
||||
|
||||
- name: Add sysctl net.ipv4.ip_forward
|
||||
ansible.posix.sysctl:
|
||||
name: "{{ item.name }}"
|
||||
value: "{{ item.value }}"
|
||||
sysctl_set: true
|
||||
state: present
|
||||
reload: true
|
||||
loop: "{{ settings }}"
|
||||
notify: reboot_host # On change reboot
|
||||
vars:
|
||||
settings:
|
||||
- name: net.ipv4.ip_forward
|
||||
value: '1'
|
||||
- name: fs.inotify.max_user_watches
|
||||
value: '524288'
|
||||
- name: fs.inotify.max_user_instances
|
||||
value: '512'
|
||||
- name: net.ipv6.conf.all.disable_ipv6
|
||||
value: '1'
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
|
||||
|
||||
- name: Check for Network Manager Directory
|
||||
ansible.builtin.stat:
|
||||
name: /etc/NetworkManager/conf.d
|
||||
register: directory_network_manager_metadata
|
||||
|
||||
|
||||
- name: Network Manager Setup
|
||||
ansible.builtin.copy:
|
||||
content: |-
|
||||
#
|
||||
# K3s Configuration for Network Manager
|
||||
#
|
||||
# Managed By ansible/role/nfc_kubernetes
|
||||
#
|
||||
# Dont edit this file directly as it will be overwritten.
|
||||
#
|
||||
[keyfile]
|
||||
unmanaged-devices=interface-name:cali*;interface-name:tunl*;interface-name:vxlan.calico;interface-name:vxlan-v6.calico;interface-name:wireguard.cali;interface-name:wg-v6.cali
|
||||
dest: /etc/NetworkManager/conf.d/calico.conf
|
||||
mode: '770'
|
||||
owner: root
|
||||
group: root
|
||||
diff: true
|
||||
when: directory_network_manager_metadata.stat.exists
|
||||
|
||||
|
||||
- name: File Metadata - k3s binary
|
||||
ansible.builtin.stat:
|
||||
checksum_algorithm: sha256
|
||||
name: /usr/local/bin/k3s
|
||||
register: metadata_file_k3s_existing_binary
|
||||
|
||||
|
||||
- name: File Metadata - k3s[-agent].service
|
||||
ansible.builtin.stat:
|
||||
checksum_algorithm: sha256
|
||||
name: |-
|
||||
/etc/systemd/system/k3s
|
||||
{%- if not nfc_role_kubernetes_master | default(false) | bool -%}
|
||||
-agent
|
||||
{%- endif -%}
|
||||
.service
|
||||
register: metadata_file_k3s_service
|
||||
|
||||
|
||||
- name: Directory Metadata - /etc/rancher/k3s/k3s.yaml
|
||||
ansible.builtin.stat:
|
||||
name: /etc/rancher/k3s/k3s.yaml
|
||||
register: metadata_dir_etc_k3s
|
||||
|
||||
|
||||
- name: File Metadata - /var/lib/rancher/k3s/server/token
|
||||
ansible.builtin.stat:
|
||||
checksum_algorithm: sha256
|
||||
name: /var/lib/rancher/k3s/server/token
|
||||
register: metadata_file_var_k3s_token
|
||||
|
||||
|
||||
- name: Config Link
|
||||
ansible.builtin.shell:
|
||||
cmd: >
|
||||
ln -s /etc/rancher/k3s/k3s.yaml ~/.kube/config
|
||||
executable: bash
|
||||
creates: ~/.kube/config
|
||||
when: >
|
||||
nfc_role_kubernetes_master | default(false) | bool
|
||||
and
|
||||
metadata_dir_etc_k3s.stat.exists | default(false) | bool
|
||||
|
||||
|
||||
- name: Fetch Kubernetes Node Object
|
||||
kubernetes.core.k8s_info:
|
||||
kind: Node
|
||||
name: "{{ inventory_hostname }}"
|
||||
register: kubernetes_node
|
||||
when: >
|
||||
metadata_file_k3s_existing_binary.stat.exists | default(false) | bool
|
||||
and
|
||||
metadata_file_k3s_service.stat.exists | default(false) | bool
|
||||
and
|
||||
metadata_dir_etc_k3s.stat.exists | default(false) | bool
|
||||
and
|
||||
metadata_file_var_k3s_token.stat.exists | default(false) | bool
|
||||
|
||||
|
||||
- name: Fetch Installed K3s Metadata
|
||||
ansible.builtin.shell:
|
||||
cmd: |
|
||||
export installed_version=$(k3s --version | grep k3s | awk '{print $3}');
|
||||
export installed="
|
||||
{%- if
|
||||
metadata_file_k3s_existing_binary.stat.exists | default(false) | bool
|
||||
and
|
||||
metadata_file_k3s_service.stat.exists | default(false) | bool
|
||||
and
|
||||
metadata_dir_etc_k3s.stat.exists | default(false) | bool
|
||||
and
|
||||
metadata_file_var_k3s_token.stat.exists | default(false) | bool
|
||||
-%}
|
||||
true
|
||||
{%- else -%}
|
||||
false
|
||||
{%- endif -%}";
|
||||
|
||||
if ! service k3s status > /dev/null; then
|
||||
|
||||
export installed='false';
|
||||
|
||||
fi
|
||||
|
||||
export running_version="{{ kubernetes_node.resources[0].status.nodeInfo.kubeletVersion | default('0') }}";
|
||||
|
||||
export correct_hash=$(wget -q https://github.com/k3s-io/k3s/releases/download/v
|
||||
{{-KubernetesVersion + KubernetesVersion_k3s_prefix | urlencode -}}
|
||||
/sha256sum-
|
||||
{%- if ansible_architecture | lower == 'x86_64' -%}
|
||||
amd64
|
||||
{%- elif ansible_architecture | lower == 'aarch64' -%}
|
||||
arm64
|
||||
{%- endif %}.txt -O - | grep -v 'images' | awk '{print $1}');
|
||||
|
||||
cat <<EOF
|
||||
{
|
||||
"current_hash": "{{ metadata_file_k3s_existing_binary.stat.checksum | default('') }}",
|
||||
"current_version": "${installed_version}",
|
||||
"desired_hash": "${correct_hash}",
|
||||
"desired_version": "v{{ KubernetesVersion + KubernetesVersion_k3s_prefix | default('') }}",
|
||||
"installed": ${installed},
|
||||
"running_version": "${running_version}"
|
||||
}
|
||||
|
||||
EOF
|
||||
|
||||
executable: /bin/bash
|
||||
changed_when: false
|
||||
check_mode: false
|
||||
failed_when: false
|
||||
register: k3s_metadata
|
||||
|
||||
|
||||
- name: K3s Metadata Fact
|
||||
ansible.builtin.set_fact:
|
||||
node_k3s: "{{ k3s_metadata.stdout | from_yaml }}"
|
||||
|
||||
|
||||
- name: Cached K3s Binary Details
|
||||
ansible.builtin.stat:
|
||||
path: "/tmp/k3s.{{ ansible_architecture }}"
|
||||
checksum_algorithm: sha256
|
||||
delegate_to: localhost
|
||||
register: file_cached_k3s_binary
|
||||
vars:
|
||||
ansible_connection: local
|
||||
|
||||
|
||||
- name: Remove Cached K3s Binaries
|
||||
ansible.builtin.file:
|
||||
path: "/tmp/k3s.{{ ansible_architecture }}"
|
||||
state: absent
|
||||
delegate_to: localhost
|
||||
vars:
|
||||
ansible_connection: local
|
||||
when: >
|
||||
file_cached_k3s_binary.stat.checksum | default('0') != node_k3s.desired_hash
|
||||
|
||||
# Workaround. See: https://github.com/ansible/awx/issues/15161
|
||||
- name: Build K3s Download URL
|
||||
ansible.builtin.set_fact:
|
||||
cacheable: false
|
||||
url_download_k3s: |-
|
||||
[
|
||||
{%- for key, value in nfc_kubernetes_install_architectures | dict2items -%}
|
||||
"https://github.com/k3s-io/k3s/releases/download/
|
||||
{{- node_k3s.desired_version | urlencode -}}
|
||||
/k3s
|
||||
{%- if key == 'aarch64' -%}
|
||||
-arm64
|
||||
{%- endif %}",
|
||||
{%- endfor -%}
|
||||
]
|
||||
changed_when: false
|
||||
check_mode: false
|
||||
delegate_to: localhost
|
||||
loop: "{{ nfc_kubernetes_install_architectures | dict2items }}"
|
||||
loop_control:
|
||||
loop_var: cpu_arch
|
||||
vars:
|
||||
ansible_connection: local
|
||||
|
||||
|
||||
- name: Download K3s Binary
|
||||
ansible.builtin.uri:
|
||||
url: "{{ url | string }}"
|
||||
method: GET
|
||||
return_content: false
|
||||
status_code:
|
||||
- 200
|
||||
- 304
|
||||
dest: "/tmp/k3s.{{ ansible_architecture }}"
|
||||
mode: "744"
|
||||
changed_when: not ansible_check_mode
|
||||
check_mode: false
|
||||
delay: 10
|
||||
retries: 3
|
||||
register: k3s_download_files
|
||||
delegate_to: localhost
|
||||
failed_when: >
|
||||
(lookup('ansible.builtin.file', '/tmp/k3s.' + ansible_architecture) | hash('sha256') | string) != node_k3s.desired_hash
|
||||
and
|
||||
(
|
||||
k3s_download_files.status | int != 200
|
||||
or
|
||||
k3s_download_files.status | int != 304
|
||||
)
|
||||
run_once: true
|
||||
when: ansible_os_family == 'Debian'
|
||||
loop: "{{ url_download_k3s | from_yaml }}"
|
||||
loop_control:
|
||||
loop_var: url
|
||||
vars:
|
||||
ansible_connection: local
|
||||
|
||||
|
||||
- name: Copy K3s binary to Host
|
||||
ansible.builtin.copy:
|
||||
src: "/tmp/k3s.{{ ansible_architecture }}"
|
||||
dest: "/usr/local/bin/k3s"
|
||||
mode: '741'
|
||||
owner: root
|
||||
group: root
|
||||
register: k3s_binary_copy
|
||||
when: >
|
||||
node_k3s.current_hash != node_k3s.desired_hash
|
||||
|
||||
|
||||
- name: K3s Binary Upgrade
|
||||
ansible.builtin.service:
|
||||
name: |-
|
||||
{%- if nfc_role_kubernetes_master | default(false) | bool -%}
|
||||
k3s
|
||||
{%- else -%}
|
||||
k3s-agent
|
||||
{%- endif %}
|
||||
state: restarted
|
||||
register: k3s_upgrade_service_restart
|
||||
when: >
|
||||
(
|
||||
k3s_binary_copy.changed | default(false) | bool
|
||||
and
|
||||
node_k3s.installed | default(false) | bool
|
||||
)
|
||||
or
|
||||
(
|
||||
node_k3s.running_version != node_k3s.desired_version
|
||||
and
|
||||
node_k3s.installed | default(false) | bool
|
||||
)
|
||||
|
||||
|
||||
- name: Create Fact - cluster_upgraded
|
||||
ansible.builtin.set_fact:
|
||||
nfc_role_kubernetes_cluster_upgraded: true
|
||||
cacheable: true
|
||||
when: >
|
||||
k3s_upgrade_service_restart.changed | default(false) | bool
|
||||
|
||||
|
||||
- name: Download Install Scripts
|
||||
ansible.builtin.uri:
|
||||
url: "{{ item.url }}"
|
||||
method: GET
|
||||
return_content: true
|
||||
status_code:
|
||||
- 200
|
||||
- 304
|
||||
dest: "{{ item.dest }}"
|
||||
mode: "744"
|
||||
check_mode: false
|
||||
changed_when: false
|
||||
delay: 10
|
||||
retries: 3
|
||||
register: k3s_download_script
|
||||
delegate_to: localhost
|
||||
run_once: true
|
||||
# no_log: true
|
||||
when: >
|
||||
ansible_os_family == 'Debian'
|
||||
and
|
||||
item.when | default(true) | bool
|
||||
and
|
||||
not nfc_role_kubernetes_cluster_upgraded | default(false) | bool
|
||||
loop: "{{ download_files }}"
|
||||
vars:
|
||||
ansible_connection: local
|
||||
download_files:
|
||||
- dest: /tmp/install.sh
|
||||
url: https://get.k3s.io
|
||||
- dest: /tmp/install_olm.sh
|
||||
url: https://raw.githubusercontent.com/operator-framework/operator-lifecycle-manager/v{{ kubernetes_version_olm }}/scripts/install.sh
|
||||
when: "{{ nfc_role_kubernetes_install_olm }}"
|
||||
|
||||
|
||||
- name: Copy install scripts to Host
|
||||
ansible.builtin.copy:
|
||||
src: "{{ item.path }}"
|
||||
dest: "{{ item.path }}"
|
||||
mode: '755'
|
||||
owner: root
|
||||
group: root
|
||||
changed_when: false
|
||||
loop: "{{ install_scripts }}"
|
||||
vars:
|
||||
install_scripts:
|
||||
- path: "/tmp/install.sh"
|
||||
- path: "/tmp/install_olm.sh"
|
||||
when: "{{ nfc_role_kubernetes_install_olm }}"
|
||||
when: >
|
||||
item.when | default(true) | bool
|
||||
and
|
||||
not nfc_role_kubernetes_cluster_upgraded | default(false) | bool
|
||||
|
||||
|
||||
- name: Required Initial config files
|
||||
ansible.builtin.copy:
|
||||
content: |
|
||||
{{ item.content }}
|
||||
dest: "{{ item.path }}/{{ item.name }}"
|
||||
mode: '740'
|
||||
owner: root
|
||||
group: root
|
||||
loop: "{{ k3s.files }}"
|
||||
when: >
|
||||
item.when | default(true) | bool
|
||||
and
|
||||
not nfc_role_kubernetes_cluster_upgraded | default(false) | bool
|
||||
|
||||
|
||||
- name: Copy Intial required templates
|
||||
ansible.builtin.template:
|
||||
src: "{{ item.src }}"
|
||||
dest: "{{ item.dest }}"
|
||||
owner: root
|
||||
mode: '700'
|
||||
force: true
|
||||
notify: "{{ item.notify | default(omit) }}"
|
||||
loop: "{{ templates_to_apply }}"
|
||||
diff: true
|
||||
when: >
|
||||
item.when | default(true) | bool
|
||||
and
|
||||
not nfc_role_kubernetes_cluster_upgraded | default(false) | bool
|
||||
vars:
|
||||
templates_to_apply:
|
||||
- src: k3s-config.yaml.j2
|
||||
dest: /etc/rancher/k3s/config.yaml
|
||||
notify: kubernetes_restart
|
||||
- src: "calico.yaml.j2"
|
||||
dest: /var/lib/rancher/k3s/server/manifests/calico.yaml
|
||||
when: >
|
||||
{{
|
||||
kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname
|
||||
and
|
||||
(
|
||||
(
|
||||
not file_calico_operator_yaml_metadata.stat.exists
|
||||
and
|
||||
file_calico_yaml_metadata.stat.exists
|
||||
and
|
||||
not node_k3s.installed | bool
|
||||
)
|
||||
or
|
||||
'calico_manifest' in ansible_run_tags
|
||||
)
|
||||
}}
|
||||
- src: k3s-registries.yaml.j2
|
||||
dest: /etc/rancher/k3s/registries.yaml
|
||||
notify: kubernetes_restart
|
||||
when: "{{ (kubernetes_private_container_registry | default([])) | from_yaml | list | length > 0 }}"
|
||||
|
||||
|
||||
# - name: Templates IPv6
|
||||
# ansible.builtin.template:
|
||||
# src: iptables-kubernetes.rules.j2
|
||||
# dest: "/etc/ip6tables.rules.d/ip6tables-kubernetes.rules"
|
||||
# owner: root
|
||||
# mode: '700'
|
||||
# force: true
|
||||
# vars:
|
||||
# ipv6: true
|
||||
|
||||
|
||||
- name: Set IPTables to legacy mode
|
||||
ansible.builtin.command:
|
||||
cmd: update-alternatives --set iptables /usr/sbin/iptables-legacy
|
||||
changed_when: false
|
||||
when: >
|
||||
not nfc_role_kubernetes_cluster_upgraded | default(false) | bool
|
||||
|
||||
|
||||
- name: Install K3s (prime master)
|
||||
ansible.builtin.shell:
|
||||
cmd: |
|
||||
INSTALL_K3S_SKIP_DOWNLOAD=true \
|
||||
INSTALL_K3S_VERSION="{{ node_k3s.desired_version }}" \
|
||||
/tmp/install.sh {% if nfc_role_kubernetes_etcd_enabled %}--cluster-init{% endif %}
|
||||
changed_when: false
|
||||
when: >
|
||||
kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname
|
||||
and
|
||||
not node_k3s.installed | bool
|
||||
and
|
||||
not nfc_role_kubernetes_cluster_upgraded | default(false) | bool
|
||||
|
||||
|
||||
- name: Install Calico Operator
|
||||
ansible.builtin.include_tasks:
|
||||
file: migrate_to_operator.yaml
|
||||
apply:
|
||||
tags:
|
||||
- always
|
||||
when: >-
|
||||
(
|
||||
(
|
||||
'operator_migrate_calico' in ansible_run_tags
|
||||
or
|
||||
'operator_calico' in ansible_run_tags
|
||||
)
|
||||
or
|
||||
not file_calico_yaml_metadata.stat.exists
|
||||
)
|
||||
and
|
||||
'calico_manifest' not in ansible_run_tags
|
||||
and
|
||||
kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname
|
||||
and
|
||||
not nfc_role_kubernetes_cluster_upgraded | default(false) | bool
|
||||
|
||||
|
||||
- name: Install MetalLB Operator
|
||||
ansible.builtin.include_tasks:
|
||||
file: manifest_apply.yaml
|
||||
apply:
|
||||
tags:
|
||||
- always
|
||||
loop: "{{ manifests }}"
|
||||
loop_control:
|
||||
loop_var: manifest
|
||||
vars:
|
||||
manifests:
|
||||
- name: MetalLB Operator
|
||||
template: Deployment-manifest-MetalLB_Operator.yaml
|
||||
when: >-
|
||||
nfc_kubernetes_enable_metallb | default(false) | bool
|
||||
and
|
||||
kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname
|
||||
and
|
||||
not nfc_role_kubernetes_cluster_upgraded | default(false) | bool
|
||||
|
||||
|
||||
- name: Wait for kubernetes prime to be ready
|
||||
ansible.builtin.shell:
|
||||
cmd: |
|
||||
set -o pipefail
|
||||
if [ `which jq` ]; then
|
||||
echo $(kubectl get no $(hostname) -o json | jq .status.conditions[4].status | tr -d '"');
|
||||
else
|
||||
echo jq command not found;
|
||||
exit 127;
|
||||
fi
|
||||
executable: /bin/bash
|
||||
delegate_to: "{{ kubernetes_config.cluster.prime.name | default(inventory_hostname) }}"
|
||||
run_once: true
|
||||
register: kubernetes_ready_check
|
||||
retries: 30
|
||||
delay: 10
|
||||
until: >
|
||||
kubernetes_ready_check.stdout | default(false) | bool
|
||||
or
|
||||
kubernetes_ready_check.rc != 0
|
||||
changed_when: false
|
||||
failed_when: kubernetes_ready_check.rc != 0
|
||||
when: >
|
||||
not nfc_role_kubernetes_cluster_upgraded | default(false) | bool
|
||||
and
|
||||
not ansible_check_mode
|
||||
|
||||
|
||||
- name: Config Link
|
||||
ansible.builtin.shell:
|
||||
cmd: >
|
||||
ln -s /etc/rancher/k3s/k3s.yaml ~/.kube/config
|
||||
executable: bash
|
||||
creates: ~/.kube/config
|
||||
when: >
|
||||
nfc_role_kubernetes_master | default(false) | bool
|
||||
and
|
||||
not nfc_role_kubernetes_cluster_upgraded | default(false) | bool
|
||||
|
||||
|
||||
- name: Install olm
|
||||
ansible.builtin.shell:
|
||||
cmd: |
|
||||
/tmp/install_olm.sh v{{ kubernetes_version_olm }}
|
||||
changed_when: false
|
||||
failed_when: >
|
||||
'already installed' not in install_olm.stdout
|
||||
and
|
||||
install_olm.rc == 1
|
||||
register: install_olm
|
||||
when: >
|
||||
kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname
|
||||
and
|
||||
nfc_role_kubernetes_install_olm | default(false) | bool
|
||||
and
|
||||
not nfc_role_kubernetes_cluster_upgraded | default(false) | bool
|
||||
|
||||
|
||||
- name: Uninstall OLM
|
||||
ansible.builtin.shell:
|
||||
cmd: |
|
||||
kubectl delete -n olm deployment packageserver;
|
||||
kubectl delete -n olm deployment catalog-operator;
|
||||
kubectl delete -n olm deployment olm-operator;
|
||||
|
||||
kubectl delete crd catalogsources.operators.coreos.com;
|
||||
kubectl delete crd clusterserviceversions.operators.coreos.com;
|
||||
kubectl delete crd installplans.operators.coreos.com;
|
||||
kubectl delete crd olmconfigs.operators.coreos.com;
|
||||
kubectl delete crd operatorconditions.operators.coreos.com;
|
||||
kubectl delete crd operatorgroups.operators.coreos.com;
|
||||
kubectl delete crd operators.operators.coreos.com;
|
||||
kubectl delete crd subscriptions.operators.coreos.com;
|
||||
|
||||
kubectl delete namespace operators --force;
|
||||
kubectl delete namespace olm --force;
|
||||
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
register: install_olm
|
||||
when: >
|
||||
kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname
|
||||
and
|
||||
'olm_uninstall' in ansible_run_tags
|
||||
and
|
||||
not nfc_role_kubernetes_cluster_upgraded | default(false) | bool
|
||||
|
||||
|
||||
- name: Enable Cluster Encryption
|
||||
ansible.builtin.command:
|
||||
cmd: kubectl patch felixconfiguration default --type='merge' -p '{"spec":{"wireguardEnabled":true,"wireguardEnabledV6":true}}'
|
||||
changed_when: false
|
||||
failed_when: false # New cluster will fail
|
||||
when: >
|
||||
kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname
|
||||
and
|
||||
kubernetes_config.cluster.networking.encrypt | default(false) | bool
|
||||
and
|
||||
not nfc_role_kubernetes_cluster_upgraded | default(false) | bool
|
||||
and
|
||||
(
|
||||
'calico_manifest' in ansible_run_tags
|
||||
or
|
||||
(
|
||||
'operator_migrate_calico' not in ansible_run_tags
|
||||
or
|
||||
'operator_calico' not in ansible_run_tags
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
- name: Fetch Join Token
|
||||
ansible.builtin.slurp:
|
||||
src: /var/lib/rancher/k3s/server/token
|
||||
delegate_to: "{{ kubernetes_config.cluster.prime.name | default(inventory_hostname) }}"
|
||||
run_once: true
|
||||
register: k3s_join_token
|
||||
no_log: true # Value is sensitive
|
||||
when: >
|
||||
not nfc_role_kubernetes_cluster_upgraded | default(false) | bool
|
||||
|
||||
|
||||
- name: Create Token fact
|
||||
ansible.builtin.set_fact:
|
||||
k3s_join_token: "{{ k3s_join_token.content | b64decode | replace('\n', '') }}"
|
||||
delegate_to: "{{ kubernetes_config.cluster.prime.name | default(inventory_hostname) }}"
|
||||
run_once: true
|
||||
no_log: true # Value is sensitive
|
||||
when: >
|
||||
not nfc_role_kubernetes_cluster_upgraded | default(false) | bool
|
||||
|
||||
|
||||
- name: Install K3s (master nodes)
|
||||
ansible.builtin.shell:
|
||||
cmd: |
|
||||
INSTALL_K3S_EXEC="server" \
|
||||
INSTALL_K3S_SKIP_DOWNLOAD=true \
|
||||
INSTALL_K3S_VERSION="{{ node_k3s.desired_version }}" \
|
||||
K3S_TOKEN="{{ k3s_join_token }}" \
|
||||
/tmp/install.sh
|
||||
executable: /bin/bash
|
||||
changed_when: false
|
||||
when: >
|
||||
nfc_role_kubernetes_master | default(false) | bool
|
||||
and
|
||||
not kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname
|
||||
and
|
||||
not node_k3s.installed | bool
|
||||
and
|
||||
not nfc_role_kubernetes_cluster_upgraded | default(false) | bool
|
||||
|
||||
|
||||
- name: Install K3s (worker nodes)
|
||||
ansible.builtin.shell:
|
||||
cmd: |
|
||||
set -o pipefail
|
||||
INSTALL_K3S_EXEC="agent" \
|
||||
INSTALL_K3S_SKIP_DOWNLOAD=true \
|
||||
INSTALL_K3S_VERSION="v{{ node_k3s.desired_version }}" \
|
||||
K3S_TOKEN="{{ k3s_join_token }}" \
|
||||
K3S_URL="https://{{ hostvars[kubernetes_config.cluster.prime.name | default(inventory_hostname)].ansible_host }}:6443" \
|
||||
/tmp/install.sh -
|
||||
executable: /bin/bash
|
||||
changed_when: false
|
||||
when: >
|
||||
not nfc_role_kubernetes_master | default(false) | bool
|
||||
and
|
||||
not kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname
|
||||
and
|
||||
not node_k3s.installed | bool
|
||||
and
|
||||
not nfc_role_kubernetes_cluster_upgraded | default(false) | bool
|
||||
|
||||
|
||||
- name: Set Kubernetes Final Install Fact
|
||||
ansible.builtin.set_fact:
|
||||
kubernetes_installed: true
|
||||
# Clear Token as no llonger required and due to being a sensitive value
|
||||
k3s_join_token: null
|
49
roles/nfc_kubernetes/tasks/k3s/manifest_apply.yaml
Normal file
49
roles/nfc_kubernetes/tasks/k3s/manifest_apply.yaml
Normal file
@ -0,0 +1,49 @@
|
||||
---
|
||||
|
||||
# Save the manifests in a dir so that diff's can be shown for changes
|
||||
- name: Copy Manifest for addition - {{ manifest.name }}
|
||||
ansible.builtin.template:
|
||||
src: "{{ manifest.template }}"
|
||||
dest: "/var/lib/rancher/k3s/ansible/{{ manifest.template | lower | replace('.j2', '') }}"
|
||||
mode: '744'
|
||||
become: true
|
||||
diff: true
|
||||
|
||||
|
||||
- name: Try / Catch
|
||||
block:
|
||||
|
||||
# Try to create first, if fail use replace.
|
||||
- name: Apply Manifest Create - {{ manifest.name }}
|
||||
ansible.builtin.command:
|
||||
cmd: "kubectl create -f /var/lib/rancher/k3s/ansible/{{ manifest.template | lower | replace('.j2', '') }}"
|
||||
become: true
|
||||
changed_when: false
|
||||
failed_when: >
|
||||
'Error from server' in manifest_stdout.stderr
|
||||
register: manifest_stdout
|
||||
|
||||
|
||||
rescue:
|
||||
|
||||
|
||||
- name: TRACE - Manifest Create - {{ manifest.name }}
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ manifest_stdout }}"
|
||||
|
||||
|
||||
- name: Replace Manifests - "Rescue" - {{ manifest.name }}
|
||||
ansible.builtin.command:
|
||||
cmd: "kubectl replace -f /var/lib/rancher/k3s/ansible/{{ manifest.template | lower | replace('.j2', '') }}"
|
||||
become: true
|
||||
changed_when: false
|
||||
failed_when: >
|
||||
'Error from server' in manifest_stdout.stderr
|
||||
and
|
||||
'ensure CRDs are installed first' in manifest_stdout.stderr
|
||||
register: manifest_stdout
|
||||
|
||||
|
||||
- name: TRACE - Replace Manifest - "Rescue" - {{ manifest.name }}
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ manifest_stdout }}"
|
199
roles/nfc_kubernetes/tasks/k3s/migrate_to_operator.yaml
Normal file
199
roles/nfc_kubernetes/tasks/k3s/migrate_to_operator.yaml
Normal file
@ -0,0 +1,199 @@
|
||||
---
|
||||
|
||||
# Reference https://docs.tigera.io/calico/3.25/operations/operator-migration
|
||||
|
||||
# Script creation of imageset: https://docs.tigera.io/calico/latest/operations/image-options/imageset#create-an-imageset
|
||||
# above may pull sha for arch of machine who ran the script
|
||||
|
||||
- name: Try / Catch
|
||||
vars:
|
||||
operator_manifests:
|
||||
- Deployment-manifest-Calico_Operator.yaml.j2
|
||||
- Installation-manifest-Calico_Cluster.yaml.j2
|
||||
- FelixConfiguration-manifest-Calico_Cluster.yaml
|
||||
- IPPool-manifest-Calico_Cluster.yaml.j2
|
||||
- APIServer-manifest-Calico_Cluster.yaml
|
||||
- ConfigMap-manifest-Calico_Service_Endpoint.yaml.j2
|
||||
block:
|
||||
|
||||
|
||||
- name: Move Calico Manifest from addons directory
|
||||
ansible.builtin.command:
|
||||
cmd: mv /var/lib/rancher/k3s/server/manifests/calico.yaml /tmp/
|
||||
become: true
|
||||
changed_when: false
|
||||
when: file_calico_yaml_metadata.stat.exists
|
||||
|
||||
|
||||
- name: Remove addon from Kubernetes
|
||||
ansible.builtin.command:
|
||||
cmd: kubectl delete addon -n kube-system calico
|
||||
become: true
|
||||
changed_when: false
|
||||
when: file_calico_yaml_metadata.stat.exists
|
||||
|
||||
|
||||
- name: Uninstall Calico
|
||||
ansible.builtin.command:
|
||||
cmd: kubectl delete -f /tmp/calico.yaml
|
||||
become: true
|
||||
changed_when: false
|
||||
when: file_calico_yaml_metadata.stat.exists
|
||||
|
||||
|
||||
# Save the manifests in a dir so that diff's can be shown for changes
|
||||
- name: Copy Manifest for addition
|
||||
ansible.builtin.template:
|
||||
src: "{{ item }}"
|
||||
dest: "/var/lib/rancher/k3s/ansible/{{ item | lower | replace('.j2', '') }}"
|
||||
mode: '744'
|
||||
become: true
|
||||
diff: true
|
||||
loop: "{{ operator_manifests }}"
|
||||
|
||||
|
||||
- name: Try / Catch
|
||||
block:
|
||||
|
||||
|
||||
- name: Apply Operator Manifests
|
||||
ansible.builtin.command:
|
||||
cmd: "kubectl create -f /var/lib/rancher/k3s/ansible/{{ item | lower | replace('.j2', '') }}"
|
||||
become: true
|
||||
changed_when: false
|
||||
failed_when: >
|
||||
'Error from server' in operator_manifest_stdout.stderr
|
||||
loop: "{{ operator_manifests }}"
|
||||
register: operator_manifest_stdout
|
||||
|
||||
|
||||
rescue:
|
||||
|
||||
|
||||
- name: TRACE - Operator manifest apply
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ operator_manifest_stdout }}"
|
||||
|
||||
|
||||
- name: Apply Operator Manifests - "Rescue"
|
||||
ansible.builtin.command:
|
||||
cmd: "kubectl replace -f /var/lib/rancher/k3s/ansible/{{ item | lower | replace('.j2', '') }}"
|
||||
become: true
|
||||
changed_when: false
|
||||
failed_when: >
|
||||
'Error from server' in operator_manifest_stdout.stderr
|
||||
and
|
||||
'ensure CRDs are installed first' in operator_manifest_stdout.stderr
|
||||
loop: "{{ operator_manifests }}"
|
||||
register: operator_manifest_stdout
|
||||
|
||||
|
||||
- name: TRACE - Operator manifest apply. Rescued
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ operator_manifest_stdout }}"
|
||||
|
||||
|
||||
- name: Fetch Calico Kubectl Plugin
|
||||
ansible.builtin.uri:
|
||||
url: |-
|
||||
https://github.com/projectcalico/calico/releases/download/{{ nfc_role_kubernetes_calico_version }}/calicoctl-linux-
|
||||
{%- if cpu_arch.key == 'aarch64' -%}
|
||||
arm64
|
||||
{%- else -%}
|
||||
amd64
|
||||
{%- endif %}
|
||||
status_code:
|
||||
- 200
|
||||
- 304
|
||||
dest: "/tmp/kubectl-calico.{{ cpu_arch.key }}"
|
||||
mode: '777'
|
||||
owner: root
|
||||
group: 'root'
|
||||
changed_when: false
|
||||
check_mode: false
|
||||
become: true
|
||||
delegate_to: localhost
|
||||
loop: "{{ nfc_kubernetes_install_architectures | dict2items }}"
|
||||
loop_control:
|
||||
loop_var: cpu_arch
|
||||
vars:
|
||||
ansible_connection: local
|
||||
|
||||
|
||||
- name: Add calico Plugin
|
||||
ansible.builtin.copy:
|
||||
src: "/tmp/kubectl-calico.{{ ansible_architecture }}"
|
||||
dest: /usr/local/bin/kubectl-calico
|
||||
mode: '770'
|
||||
owner: root
|
||||
group: 'root'
|
||||
become: true
|
||||
when: nfc_role_kubernetes_master
|
||||
|
||||
|
||||
- name: Setup Automagic Host Endpoints
|
||||
ansible.builtin.shell:
|
||||
cmd: |-
|
||||
kubectl calico \
|
||||
patch kubecontrollersconfiguration \
|
||||
default --patch='{"spec": {"controllers": {"node": {"hostEndpoint": {"autoCreate": "Enabled"}}}}}'
|
||||
executable: bash
|
||||
become: true
|
||||
changed_when: false
|
||||
failed_when: false # fixme
|
||||
|
||||
|
||||
- name: Remove calico migration label
|
||||
ansible.builtin.shell:
|
||||
cmd: |-
|
||||
kubectl label \
|
||||
{{ inventory_hostname }} \
|
||||
projectcalico.org/operator-node-migration-
|
||||
executable: bash
|
||||
become: true
|
||||
delegate_to: "{{ kubernetes_config.cluster.prime.name | default(inventory_hostname) }}"
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
loop: "{{ groups[kubernetes_config.cluster.group_name | default('make_me_optional')] | default([]) }}"
|
||||
|
||||
# kubectl label node ip-10-229-92-202.eu-west-1.compute.internal projectcalico.org/operator-node-migration-
|
||||
# migration started
|
||||
|
||||
rescue:
|
||||
|
||||
|
||||
- name: Remove Operator Manifests
|
||||
ansible.builtin.command:
|
||||
cmd: "kubectl delete -f /var/lib/rancher/k3s/ansible/{{ item | lower | replace('.j2', '') }}"
|
||||
become: true
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
loop: "{{ operator_manifests }}"
|
||||
when: file_calico_yaml_metadata.stat.exists # Only rescue if it was a migration
|
||||
|
||||
|
||||
- name: Move Calico Manifest from addons directory
|
||||
ansible.builtin.command:
|
||||
cmd: mv /tmp/calico.yaml /var/lib/rancher/k3s/server/manifests/
|
||||
become: true
|
||||
changed_when: false
|
||||
when: file_calico_yaml_metadata.stat.exists
|
||||
|
||||
|
||||
- name: Re-install Calico
|
||||
ansible.builtin.command:
|
||||
cmd: kubectl apply -f /var/lib/rancher/k3s/server/manifests/calico.yaml
|
||||
become: true
|
||||
changed_when: false
|
||||
when: file_calico_yaml_metadata.stat.exists
|
||||
|
||||
|
||||
always:
|
||||
|
||||
|
||||
- name: Clean-up Temp File
|
||||
ansible.builtin.file:
|
||||
name: /tmp/calico.yaml
|
||||
state: absent
|
||||
become: true
|
||||
when: file_calico_yaml_metadata.stat.exists
|
72
roles/nfc_kubernetes/tasks/kubevirt/main.yaml
Normal file
72
roles/nfc_kubernetes/tasks/kubevirt/main.yaml
Normal file
@ -0,0 +1,72 @@
|
||||
---
|
||||
|
||||
- name: Validate Virtualization Support
|
||||
ansible.builtin.include_tasks:
|
||||
file: kubevirt/validate.yaml
|
||||
apply:
|
||||
tags:
|
||||
- always
|
||||
tags:
|
||||
- always
|
||||
|
||||
|
||||
- name: Deploy KubeVirt
|
||||
ansible.builtin.template:
|
||||
src: "{{ item }}"
|
||||
dest: "/var/lib/rancher/k3s/server/manifests/{{ item | replace('.j2', '') | lower }}"
|
||||
owner: root
|
||||
mode: '700'
|
||||
force: true
|
||||
notify: "{{ item.notify | default(omit) }}"
|
||||
loop: "{{ templates_to_apply }}"
|
||||
diff: true
|
||||
vars:
|
||||
templates_to_apply:
|
||||
- kubevirt-operator.yaml.j2
|
||||
- kubevirt-cr.yaml.j2
|
||||
|
||||
|
||||
- name: Fetch virtctl Kubectl Plugin
|
||||
ansible.builtin.uri:
|
||||
url: |-
|
||||
https://github.com/kubevirt/kubevirt/releases/download/{{
|
||||
nfc_role_kubernetes_container_images.kubevirt_operator.tag }}/virtctl-{{
|
||||
nfc_role_kubernetes_container_images.kubevirt_operator.tag }}-linux-
|
||||
{%- if cpu_arch.key == 'aarch64' -%}
|
||||
arm64
|
||||
{%- else -%}
|
||||
amd64
|
||||
{%- endif %}
|
||||
status_code:
|
||||
- 200
|
||||
- 304
|
||||
dest: "/tmp/kubectl-virtctl.{{ cpu_arch.key }}"
|
||||
mode: '777'
|
||||
owner: root
|
||||
group: 'root'
|
||||
changed_when: false
|
||||
become: true
|
||||
delegate_to: localhost
|
||||
loop: "{{ nfc_kubernetes_install_architectures | dict2items }}"
|
||||
loop_control:
|
||||
loop_var: cpu_arch
|
||||
vars:
|
||||
ansible_connection: local
|
||||
|
||||
|
||||
- name: Add virtctl Plugin
|
||||
ansible.builtin.copy:
|
||||
src: "/tmp/kubectl-virtctl.{{ ansible_architecture }}"
|
||||
dest: /usr/local/bin/kubectl-virt
|
||||
mode: '770'
|
||||
owner: root
|
||||
group: 'root'
|
||||
become: true
|
||||
when: nfc_role_kubernetes_master
|
||||
|
||||
|
||||
- name: Wait for KubeVirt to initialize
|
||||
ansible.builtin.command:
|
||||
cmd: kubectl -n kubevirt wait kv kubevirt --for condition=Available
|
||||
changed_when: false
|
||||
failed_when: false
|
25
roles/nfc_kubernetes/tasks/kubevirt/validate.yaml
Normal file
25
roles/nfc_kubernetes/tasks/kubevirt/validate.yaml
Normal file
@ -0,0 +1,25 @@
|
||||
---
|
||||
|
||||
- name: Install LibVirt-Clients
|
||||
ansible.builtin.apt:
|
||||
name: libvirt-clients
|
||||
state: present
|
||||
|
||||
|
||||
- name: Confirm Virtualization Support
|
||||
ansible.builtin.command:
|
||||
cmd: virt-host-validate qemu
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
register: virt_support_check_command
|
||||
|
||||
|
||||
- name: Confirm No QEMU failures
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- (": FAIL" | string) not in (item | string)
|
||||
- |
|
||||
(": PASS" | string) in (item | string)
|
||||
or
|
||||
(": WARN" | string) in (item | string)
|
||||
loop: "{{ virt_support_check_command.stdout_lines }}"
|
41
roles/nfc_kubernetes/tasks/main.yaml
Normal file
41
roles/nfc_kubernetes/tasks/main.yaml
Normal file
@ -0,0 +1,41 @@
|
||||
---
|
||||
|
||||
- name: Install/Configure Kubernetes Prime Master Node
|
||||
ansible.builtin.include_tasks:
|
||||
file: install.yaml
|
||||
tags:
|
||||
- always
|
||||
when:
|
||||
kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname
|
||||
and
|
||||
nfc_role_kubernetes_prime | bool
|
||||
and
|
||||
not kubernetes_installed | default(false)
|
||||
|
||||
|
||||
- name: Install/Configure Kubernetes on remaining Master Nodes
|
||||
ansible.builtin.include_tasks:
|
||||
file: install.yaml
|
||||
tags:
|
||||
- always
|
||||
when:
|
||||
kubernetes_config.cluster.prime.name | default(inventory_hostname) != inventory_hostname
|
||||
and
|
||||
nfc_role_kubernetes_master | bool
|
||||
and
|
||||
not kubernetes_installed | default(false)
|
||||
|
||||
|
||||
- name: Install/Configure Kubernetes on Worker Nodes
|
||||
ansible.builtin.include_tasks:
|
||||
file: install.yaml
|
||||
tags:
|
||||
- always
|
||||
when: >
|
||||
nfc_role_kubernetes_worker | bool
|
||||
and
|
||||
not nfc_role_kubernetes_prime | bool
|
||||
and
|
||||
not nfc_role_kubernetes_master | bool
|
||||
and
|
||||
not kubernetes_installed | default(false)
|
@ -0,0 +1,51 @@
|
||||
# ---
|
||||
# apiVersion: kyverno.io/v1
|
||||
# kind: ClusterPolicy
|
||||
# metadata:
|
||||
# name: add-networkpolicy
|
||||
# labels:
|
||||
# <<: {{ kubernetes_config.defaults.labels.deployment_labels | from_yaml }}
|
||||
# annotations:
|
||||
# ansible.kubernetes.io/path: {{ item }}
|
||||
# policies.kyverno.io/title: Add Network Policy
|
||||
# policies.kyverno.io/category: Multi-Tenancy, EKS Best Practices
|
||||
# policies.kyverno.io/subject: NetworkPolicy
|
||||
# policies.kyverno.io/minversion: 1.6.0
|
||||
# policies.kyverno.io/description: >-
|
||||
# By default, Kubernetes allows communications across all Pods within a cluster.
|
||||
# The NetworkPolicy resource and a CNI plug-in that supports NetworkPolicy must be used to restrict
|
||||
# communications. A default NetworkPolicy should be configured for each Namespace to
|
||||
# default deny all ingress and egress traffic to the Pods in the Namespace. Application
|
||||
# teams can then configure additional NetworkPolicy resources to allow desired traffic
|
||||
# to application Pods from select sources. This policy will create a new NetworkPolicy resource
|
||||
# named `default-deny` which will deny all traffic anytime a new Namespace is created.
|
||||
# spec:
|
||||
# rules:
|
||||
# - name: default-deny
|
||||
# match:
|
||||
# any:
|
||||
# - resources:
|
||||
# kinds:
|
||||
# - Namespace
|
||||
# exclude:
|
||||
# any:
|
||||
# - resources:
|
||||
# namespaces:
|
||||
# - kube-metrics
|
||||
# - kube-policy
|
||||
# - kube-system
|
||||
# - default
|
||||
# generate:
|
||||
# apiVersion: networking.k8s.io/v1
|
||||
# kind: NetworkPolicy
|
||||
# name: default-deny
|
||||
# namespace: "{{'{{request.object.metadata.name}}'}}"
|
||||
# synchronize: true
|
||||
# data:
|
||||
# spec:
|
||||
# # select all pods in the namespace
|
||||
# podSelector: {}
|
||||
# # deny all traffic
|
||||
# policyTypes:
|
||||
# - Ingress
|
||||
# - Egress
|
@ -0,0 +1,60 @@
|
||||
# ---
|
||||
# apiVersion: kyverno.io/v1
|
||||
# kind: ClusterPolicy
|
||||
# metadata:
|
||||
# name: add-networkpolicy-dns
|
||||
# labels:
|
||||
# <<: {{ kubernetes_config.defaults.labels.deployment_labels | from_yaml }}
|
||||
# annotations:
|
||||
# ansible.kubernetes.io/path: {{ item }}
|
||||
# policies.kyverno.io/title: Add Network Policy for DNS
|
||||
# policies.kyverno.io/category: Multi-Tenancy, EKS Best Practices
|
||||
# policies.kyverno.io/subject: NetworkPolicy
|
||||
# kyverno.io/kyverno-version: 1.6.2
|
||||
# policies.kyverno.io/minversion: 1.6.0
|
||||
# kyverno.io/kubernetes-version: "1.23"
|
||||
# policies.kyverno.io/description: >-
|
||||
# By default, Kubernetes allows communications across all Pods within a cluster.
|
||||
# The NetworkPolicy resource and a CNI plug-in that supports NetworkPolicy must be used to restrict
|
||||
# communications. A default NetworkPolicy should be configured for each Namespace to
|
||||
# default deny all ingress and egress traffic to the Pods in the Namespace. Application
|
||||
# teams can then configure additional NetworkPolicy resources to allow desired traffic
|
||||
# to application Pods from select sources. This policy will create a new NetworkPolicy resource
|
||||
# named `default-deny` which will deny all traffic anytime a new Namespace is created.
|
||||
# spec:
|
||||
# generateExistingOnPolicyUpdate: true
|
||||
# rules:
|
||||
# - name: add-netpol-dns
|
||||
# match:
|
||||
# any:
|
||||
# - resources:
|
||||
# kinds:
|
||||
# - Namespace
|
||||
# exclude:
|
||||
# any:
|
||||
# - resources:
|
||||
# namespaces:
|
||||
# - kube-metrics
|
||||
# - kube-policy
|
||||
# - kube-system
|
||||
# - default
|
||||
# generate:
|
||||
# apiVersion: networking.k8s.io/v1
|
||||
# kind: NetworkPolicy
|
||||
# name: allow-dns
|
||||
# namespace: "{{'{{request.object.metadata.name}}'}}"
|
||||
# synchronize: true
|
||||
# data:
|
||||
# spec:
|
||||
# podSelector:
|
||||
# matchLabels: {}
|
||||
# policyTypes:
|
||||
# - Egress
|
||||
# egress:
|
||||
# - to:
|
||||
# - namespaceSelector:
|
||||
# matchLabels:
|
||||
# name: kube-system
|
||||
# ports:
|
||||
# - protocol: UDP
|
||||
# port: 53
|
@ -0,0 +1,48 @@
|
||||
---
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: disallow-mutable-tag
|
||||
labels:
|
||||
<<: {{ kubernetes_config.defaults.labels.deployment_labels | from_yaml }}
|
||||
annotations:
|
||||
ansible.kubernetes.io/path: {{ item }}
|
||||
policies.kyverno.io/title: Disallow mutable Tag
|
||||
policies.kyverno.io/category: Best Practices
|
||||
policies.kyverno.io/minversion: 1.6.0
|
||||
policies.kyverno.io/severity: medium
|
||||
policies.kyverno.io/subject: Pod
|
||||
policies.kyverno.io/description: >-
|
||||
The ':latest', ':master' and ':dev(elopment)' tags are mutable and can lead to unexpected errors if the
|
||||
image changes. A best practice is to use an immutable tag that maps to
|
||||
a specific version of an application Pod. This policy validates that the image
|
||||
specifies a tag and that it is not called `latest` `master` or`dev(elopment)`.
|
||||
spec:
|
||||
#failurePolicy: Fail
|
||||
validationFailureAction: Audit
|
||||
background: true
|
||||
rules:
|
||||
- name: require-image-tag
|
||||
match:
|
||||
any:
|
||||
- resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "An image tag is required."
|
||||
pattern:
|
||||
spec:
|
||||
containers:
|
||||
- image: "*:*"
|
||||
- name: validate-image-tag
|
||||
match:
|
||||
any:
|
||||
- resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "Using a mutable image tag e.g. 'latest', 'master' or 'dev[elopment]' is not allowed."
|
||||
pattern:
|
||||
spec:
|
||||
containers:
|
||||
- image: "!*:[latest|master|dev|development]"
|
@ -0,0 +1,52 @@
|
||||
---
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: disallow-default-namespace
|
||||
labels:
|
||||
<<: {{ kubernetes_config.defaults.labels.deployment_labels | from_yaml }}
|
||||
annotations:
|
||||
pod-policies.kyverno.io/autogen-controllers: none
|
||||
policies.kyverno.io/title: Disallow Default Namespace
|
||||
policies.kyverno.io/minversion: 1.6.0
|
||||
policies.kyverno.io/category: Multi-Tenancy
|
||||
policies.kyverno.io/severity: medium
|
||||
policies.kyverno.io/subject: Pod
|
||||
policies.kyverno.io/description: >-
|
||||
Kubernetes Namespaces are an optional feature that provide a way to segment and
|
||||
isolate cluster resources across multiple applications and users. As a best
|
||||
practice, workloads should be isolated with Namespaces. Namespaces should be required
|
||||
and the default (empty) Namespace should not be used. This policy validates that Pods
|
||||
specify a Namespace name other than `default`. Rule auto-generation is disabled here
|
||||
due to Pod controllers need to specify the `namespace` field under the top-level `metadata`
|
||||
object and not at the Pod template level.
|
||||
spec:
|
||||
#failurePolicy: Fail
|
||||
validationFailureAction: Audit
|
||||
background: true
|
||||
rules:
|
||||
- name: validate-namespace
|
||||
match:
|
||||
any:
|
||||
- resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "Using 'default' namespace is not allowed."
|
||||
pattern:
|
||||
metadata:
|
||||
namespace: "!default"
|
||||
- name: validate-podcontroller-namespace
|
||||
match:
|
||||
any:
|
||||
- resources:
|
||||
kinds:
|
||||
- DaemonSet
|
||||
- Deployment
|
||||
- Job
|
||||
- StatefulSet
|
||||
validate:
|
||||
message: "Using 'default' namespace is not allowed for pod controllers."
|
||||
pattern:
|
||||
metadata:
|
||||
namespace: "!default"
|
@ -0,0 +1,48 @@
|
||||
---
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: spread-pods
|
||||
labels:
|
||||
<<: {{ kubernetes_config.defaults.labels.deployment_labels | from_yaml }}
|
||||
annotations:
|
||||
policies.kyverno.io/title: Spread Pods Across Nodes
|
||||
policies.kyverno.io/category: Sample
|
||||
policies.kyverno.io/subject: Deployment, Pod
|
||||
policies.kyverno.io/minversion: 1.6.0
|
||||
policies.kyverno.io/description: >-
|
||||
Deployments to a Kubernetes cluster with multiple availability zones often need to
|
||||
distribute those replicas to align with those zones to ensure site-level failures
|
||||
do not impact availability. This policy matches Deployments with the label
|
||||
`distributed=required` and mutates them to spread Pods across zones.
|
||||
spec:
|
||||
generateExistingOnPolicyUpdate: true
|
||||
background: true
|
||||
rules:
|
||||
- name: spread-pods-across-nodes
|
||||
# Matches any Deployment with the label `distributed=required`
|
||||
match:
|
||||
any:
|
||||
- resources:
|
||||
kinds:
|
||||
- Deployment
|
||||
- StatefulSet
|
||||
preconditions:
|
||||
all:
|
||||
- key: "{{ '{{ request.object.spec.replicas }}' }}"
|
||||
operator: GreaterThanOrEquals
|
||||
value: 2
|
||||
# Mutates the incoming Deployment.
|
||||
mutate:
|
||||
patchStrategicMerge:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
# Adds the topologySpreadConstraints field if non-existent in the request.
|
||||
+(topologySpreadConstraints):
|
||||
- maxSkew: 1
|
||||
topologyKey: kubernetes.io/hostname
|
||||
whenUnsatisfiable: ScheduleAnyway
|
||||
labelSelector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: "{% raw %} '{{ request.object.metadata.labels.\"app.kubernetes.io/name\" }}' {% endraw %}"
|
@ -0,0 +1,38 @@
|
||||
# apiVersion: networking.k8s.io/v1
|
||||
# kind: NetworkPolicy
|
||||
# metadata:
|
||||
# name: kube-metrics
|
||||
# namespace: kube-metrics
|
||||
# labels:
|
||||
# app.kubernetes.io/name: kube-metrics
|
||||
# # app.kubernetes.io/instance: { .Release.Name }}
|
||||
# # app.kubernetes.io/version: { .Chart.Version | quote }}
|
||||
# # app.kubernetes.io/managed-by: { .Release.Service }}
|
||||
# app.kubernetes.io/component: loki
|
||||
# app.kubernetes.io/part-of: metrics
|
||||
|
||||
# spec:
|
||||
# egress:
|
||||
# - to:
|
||||
# #- podSelector:
|
||||
# - namespaceSelector:
|
||||
# matchLabels:
|
||||
# kubernetes.io/metadata.name: "default"
|
||||
# ports:
|
||||
# - port: 443
|
||||
# protocol: TCP
|
||||
# # ingress:
|
||||
# # - from:
|
||||
# # #- podSelector:
|
||||
# # - namespaceSelector:
|
||||
# # matchLabels:
|
||||
# # #app.kubernetes.io/name: prometheus
|
||||
# # #app.kubernetes.io/instance: k8s
|
||||
# # #app.kubernetes.io/managed-by: prometheus-operator
|
||||
# # app.kubernetes.io/name: grafana-agent
|
||||
# # #app.kubernetes.io/part-of: kube-prometheus
|
||||
|
||||
# # #app: grafana
|
||||
# policyTypes:
|
||||
# - Egress
|
||||
# #- Ingress
|
@ -0,0 +1,6 @@
|
||||
---
|
||||
apiVersion: operator.tigera.io/v1
|
||||
kind: APIServer
|
||||
metadata:
|
||||
name: default
|
||||
spec: {}
|
@ -0,0 +1,11 @@
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: kubernetes-services-endpoint
|
||||
namespace: tigera-operator
|
||||
data:
|
||||
KUBERNETES_SERVICE_HOST: "
|
||||
{%- set octet = kubernetes_config.cluster.networking.ServiceSubnet | default(nfc_role_kubernetes_service_subnet) | split('.') -%}
|
||||
{{- octet[0] }}.{{- octet[1] }}.{{- octet[2] }}.1"
|
||||
KUBERNETES_SERVICE_PORT: '443'
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,17 @@
|
||||
---
|
||||
|
||||
apiVersion: crd.projectcalico.org/v1
|
||||
kind: FelixConfiguration
|
||||
metadata:
|
||||
name: default
|
||||
spec:
|
||||
# bpfConnectTimeLoadBalancing: TCP
|
||||
# bpfExternalServiceMode: DSR
|
||||
# bpfHostNetworkedNATWithoutCTLB: Enabled
|
||||
bpfLogLevel: ""
|
||||
floatingIPs: Disabled
|
||||
healthPort: 9099
|
||||
logSeverityScreen: Info
|
||||
reportingInterval: 0s
|
||||
wireguardEnabled: true
|
||||
wireguardEnabledV6: true
|
@ -0,0 +1,16 @@
|
||||
---
|
||||
|
||||
apiVersion: crd.projectcalico.org/v1
|
||||
kind: IPPool
|
||||
metadata:
|
||||
name: default-ipv4-ippool
|
||||
spec:
|
||||
allowedUses:
|
||||
- Workload
|
||||
- Tunnel
|
||||
blockSize: 26
|
||||
cidr: {{ kubernetes_config.cluster.networking.podSubnet | default(nfc_role_kubernetes_pod_subnet) }}
|
||||
ipipMode: Never
|
||||
natOutgoing: true
|
||||
nodeSelector: all()
|
||||
vxlanMode: Always
|
@ -0,0 +1,53 @@
|
||||
---
|
||||
|
||||
apiVersion: operator.tigera.io/v1
|
||||
kind: Installation
|
||||
metadata:
|
||||
name: default
|
||||
spec:
|
||||
calicoNetwork:
|
||||
bgp: Disabled
|
||||
containerIPForwarding: Enabled
|
||||
hostPorts: Enabled
|
||||
ipPools:
|
||||
- blockSize: 26
|
||||
cidr: {{ kubernetes_config.cluster.networking.podSubnet | default(nfc_role_kubernetes_pod_subnet) }}
|
||||
disableBGPExport: false
|
||||
encapsulation: VXLAN
|
||||
natOutgoing: Enabled
|
||||
nodeSelector: all()
|
||||
# linuxDataplane: Iptables
|
||||
linuxDataplane: BPF
|
||||
mtu: 0
|
||||
multiInterfaceMode: None
|
||||
nodeAddressAutodetectionV4:
|
||||
kubernetes: NodeInternalIP
|
||||
cni:
|
||||
ipam:
|
||||
type: Calico
|
||||
type: Calico
|
||||
componentResources:
|
||||
- componentName: Node
|
||||
resourceRequirements:
|
||||
requests:
|
||||
cpu: 250m
|
||||
controlPlaneReplicas: 3
|
||||
flexVolumePath: None
|
||||
kubeletVolumePluginPath: None
|
||||
nodeUpdateStrategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 0
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
nonPrivileged: Disabled
|
||||
serviceCIDRs:
|
||||
- {{ kubernetes_config.cluster.networking.ServiceSubnet | default(nfc_role_kubernetes_service_subnet) }}
|
||||
typhaDeployment:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
tolerations:
|
||||
- effect: NoExecute
|
||||
key: CriticalAddonsOnly
|
||||
value: "true"
|
||||
variant: Calico
|
4992
roles/nfc_kubernetes/templates/calico.yaml.j2
Normal file
4992
roles/nfc_kubernetes/templates/calico.yaml.j2
Normal file
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,20 @@
|
||||
{
|
||||
"cniVersion": "0.3.1",
|
||||
"name": "crio",
|
||||
"type": "bridge",
|
||||
"bridge": "cni0",
|
||||
"isGateway": true,
|
||||
"ipMasq": true,
|
||||
"hairpinMode": true,
|
||||
"ipam": {
|
||||
"type": "host-local",
|
||||
"routes": [
|
||||
{ "dst": "0.0.0.0/0" },
|
||||
{ "dst": "1100:200::1/24" }
|
||||
],
|
||||
"ranges": [
|
||||
[{ "subnet": "{{ KubernetesPodSubnet }}" }],
|
||||
[{ "subnet": "1100:200::/24" }]
|
||||
]
|
||||
}
|
||||
}
|
328
roles/nfc_kubernetes/templates/iptables-kubernetes.rules.j2
Normal file
328
roles/nfc_kubernetes/templates/iptables-kubernetes.rules.j2
Normal file
@ -0,0 +1,328 @@
|
||||
#
|
||||
# IP Tables Firewall Rules for Kubernetes
|
||||
#
|
||||
# Managed By ansible/collection/kubernetes
|
||||
#
|
||||
# Dont edit this file directly as it will be overwritten. To grant a host API access
|
||||
# edit the cluster config, adding the hostname/ip to path kubernetes_config.cluster.access
|
||||
#
|
||||
# This file is periodicly called by cron
|
||||
#
|
||||
|
||||
{% set data = namespace(firewall_rules=[]) -%}
|
||||
|
||||
{%- if ansible_host is regex('^[a-z]') and ':' not in ansible_host -%} {#- Convert DNs name to IP Address -#}
|
||||
|
||||
{%- if ipv6 | default(false) -%}
|
||||
|
||||
{%- set ansible_host = query('community.dns.lookup', ansible_host + '.', type='AAAA' ) -%}
|
||||
|
||||
{%- else -%}
|
||||
|
||||
{%- set ansible_host = query('community.dns.lookup', ansible_host + '.', type='A' ) -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- if ansible_host | list | length > 0 -%} {#- Convert dns lookup to list, and select the first item -#}
|
||||
{%- set ansible_host = ansible_host | from_yaml_all | list -%}
|
||||
|
||||
{%- set ansible_host = ansible_host[0] -%}
|
||||
{%- endif -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- for kubernetes_host in groups[kubernetes_config.cluster.group_name | default('me_is_optional')] | default([]) -%}
|
||||
|
||||
{%- set kubernetes_host = hostvars[kubernetes_host].ansible_host -%}
|
||||
|
||||
{%- if kubernetes_host is regex('^[a-z]') and ':' not in kubernetes_host -%} {#- Convert DNs name to IP Address -#}
|
||||
|
||||
{%- if ipv6 | default(false) -%}
|
||||
|
||||
{%- set kubernetes_host = query('community.dns.lookup', kubernetes_host + '.', type='AAAA' ) -%}
|
||||
|
||||
{%- else -%}
|
||||
|
||||
{%- set kubernetes_host = query('community.dns.lookup', kubernetes_host + '.', type='A' ) -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- if
|
||||
kubernetes_host is iterable
|
||||
and
|
||||
kubernetes_host is not string
|
||||
-%} {#- Convert dns lookup to list, and select the first item -#}
|
||||
{%- set kubernetes_host = kubernetes_host | from_yaml_all | list -%}
|
||||
|
||||
{%- set kubernetes_host = kubernetes_host[0] | default('') -%}
|
||||
{%- endif -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- if kubernetes_host != '' -%}
|
||||
|
||||
{%- for master_host in groups['kubernetes_master'] | default([]) -%}
|
||||
|
||||
{%- if master_host in groups[kubernetes_config.cluster.group_name | default('me_is_optional')] | default([]) -%}
|
||||
|
||||
{%- set master_host = hostvars[master_host].ansible_host -%}
|
||||
|
||||
{%- if master_host is regex('^[a-z]') and ':' not in master_host -%} {#- Convert DNs name to IP Address -#}
|
||||
|
||||
{%- if ipv6 | default(false) -%}
|
||||
|
||||
{%- set master_host = query('community.dns.lookup', master_host + '.', type='AAAA' ) -%}
|
||||
|
||||
{%- else -%}
|
||||
|
||||
{%- set master_host = query('community.dns.lookup', master_host + '.', type='A' ) -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- if master_host | list | length > 0 -%} {#- Convert dns lookup to list, and select the first item -#}
|
||||
{%- set master_host = master_host | from_yaml_all | list -%}
|
||||
|
||||
{%- set master_host = master_host[0] -%}
|
||||
{%- endif -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
|
||||
{%- if nfc_role_kubernetes_master | default(false) | bool -%}
|
||||
|
||||
{%- if
|
||||
master_host == kubernetes_host
|
||||
and
|
||||
master_host != ansible_host
|
||||
and
|
||||
(
|
||||
(
|
||||
ipv6 | default(false)
|
||||
and
|
||||
':' in master_host
|
||||
)
|
||||
or
|
||||
(
|
||||
not ipv6 | default(false)
|
||||
and
|
||||
'.' in master_host
|
||||
)
|
||||
)
|
||||
-%}
|
||||
|
||||
{#- master hosts only -#}
|
||||
|
||||
{%- set data.firewall_rules = data.firewall_rules + ['-I kubernetes-embedded-etcd -s ' + master_host + ' -j ACCEPT'] -%}
|
||||
{# {%- set data.firewall_rules = data.firewall_rules + ['-I INPUT -s ' + master_host + ' -p tcp -m multiport --dports 2380 -j ACCEPT'] -%} #}
|
||||
|
||||
{%- if '-I kubernetes-api -s ' + master_host + ' -j ACCEPT' not in data.firewall_rules -%}
|
||||
|
||||
{%- set data.firewall_rules = data.firewall_rules + ['-I kubernetes-api -s ' + master_host + ' -j ACCEPT'] -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- endfor -%}
|
||||
|
||||
{%- if
|
||||
ansible_host != kubernetes_host
|
||||
and
|
||||
(
|
||||
(
|
||||
ipv6 | default(false)
|
||||
and
|
||||
':' in kubernetes_host
|
||||
)
|
||||
or
|
||||
(
|
||||
not ipv6 | default(false)
|
||||
and
|
||||
'.' in kubernetes_host
|
||||
)
|
||||
)
|
||||
-%}
|
||||
|
||||
{#- All cluster Hosts -#}
|
||||
|
||||
{%- if
|
||||
nfc_role_kubernetes_master | default(false) | bool
|
||||
and
|
||||
kubernetes_host not in groups['kubernetes_master']
|
||||
and
|
||||
'-I kubernetes-api -s ' + kubernetes_host + ' -j ACCEPT' not in data.firewall_rules
|
||||
-%}
|
||||
|
||||
{%- set data.firewall_rules = data.firewall_rules + ['-I kubernetes-api -s ' + kubernetes_host + ' -j ACCEPT'] -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- set data.firewall_rules = data.firewall_rules + ['-I kubernetes-flannel-vxlan -s ' + kubernetes_host + ' -j ACCEPT'] -%}
|
||||
|
||||
{%- set data.firewall_rules = data.firewall_rules + ['-I kubernetes-kubelet-metrics -s ' + kubernetes_host + ' -j ACCEPT'] -%}
|
||||
|
||||
{%- set data.firewall_rules = data.firewall_rules + ['-I kubernetes-flannel-wg-four -s ' + kubernetes_host + ' -j ACCEPT'] -%}
|
||||
|
||||
{%- if false -%}{# see IPv6 is disabled #}
|
||||
|
||||
{%- set data.firewall_rules = data.firewall_rules + ['-I kubernetes-flannel-wg-six -s ' + kubernetes_host + ' -j ACCEPT'] -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- if false -%}{# see Installation-manifest-Calico_Cluster.yaml.j2 bgp is disabled #}
|
||||
|
||||
{%- set data.firewall_rules = data.firewall_rules + ['-I kubernetes-calico-bgp -s ' + kubernetes_host + ' -j ACCEPT'] -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- set data.firewall_rules = data.firewall_rules + ['-I kubernetes-calico-typha -s ' + kubernetes_host + ' -j ACCEPT'] -%}
|
||||
|
||||
{%- if nfc_kubernetes_enable_metallb | default(false) -%}
|
||||
|
||||
{%- set data.firewall_rules = data.firewall_rules + ['-I metallb-l2-tcp -s ' + kubernetes_host + ' -j ACCEPT'] -%}
|
||||
|
||||
{%- set data.firewall_rules = data.firewall_rules + ['-I metallb-l2-udp -s ' + kubernetes_host + ' -j ACCEPT'] -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- endfor -%}
|
||||
|
||||
{%- if nfc_role_kubernetes_master | default(false) | bool -%}
|
||||
|
||||
{%- if host_external_ip is defined -%}
|
||||
|
||||
{%- set data.firewall_rules = data.firewall_rules + ['-I kubernetes-api -s ' + host_external_ip + ' -m comment --comment "hosts configured external IP" -j ACCEPT'] -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- for api_client in kubernetes_config.cluster.access | default([]) -%}
|
||||
|
||||
{%- if api_client is regex('^[a-z]') and ':' not in api_client -%} {#- Convert DNs name to IP Address -#}
|
||||
|
||||
{%- set api_client_dns_name = api_client -%}
|
||||
|
||||
{%- if ipv6 | default(false) -%}
|
||||
|
||||
{%- set api_client = query('community.dns.lookup', api_client + '.', type='AAAA' ) -%}
|
||||
|
||||
{%- else -%}
|
||||
|
||||
{%- set api_client = query('community.dns.lookup', api_client + '.', type='A' ) -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- if api_client | list | length > 0 -%} {#- Convert dns lookup to list, and select the first item -#}
|
||||
|
||||
{%- set api_client = api_client | from_yaml_all | list -%}
|
||||
|
||||
{%- set api_client = api_client[0] -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
|
||||
{%- if
|
||||
api_client != ansible_host
|
||||
and
|
||||
(
|
||||
(
|
||||
ipv6 | default(false)
|
||||
and
|
||||
':' in api_client
|
||||
)
|
||||
or
|
||||
(
|
||||
not ipv6 | default(false)
|
||||
and
|
||||
'.' in api_client
|
||||
)
|
||||
)
|
||||
-%}
|
||||
|
||||
{#- Hosts allowed to access API -#}
|
||||
|
||||
{%- set data.firewall_rules = data.firewall_rules + ['-I kubernetes-api -s ' + api_client + ' -m comment --comment "host: ' + api_client_dns_name | default(api_client) + '" -j ACCEPT'] -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- endfor %}
|
||||
|
||||
{%- endif %}
|
||||
|
||||
*filter
|
||||
|
||||
{# -N kubernetes-embedded-etcd
|
||||
-A kubernetes-embedded-etcd -j RETURN
|
||||
|
||||
-A INPUT -p tcp -m multiport --dports 2379,2380 -m comment --comment "etcd. Servers only" -j kubernetes-embedded-etcd
|
||||
|
||||
|
||||
-N kubernetes-api
|
||||
-A kubernetes-api -j RETURN
|
||||
|
||||
-A INPUT -p tcp --dport 6443 -m comment --comment "Kubernetes API access. All Cluster hosts and end users" -j kubernetes-api
|
||||
|
||||
|
||||
-N kubernetes-flannel-vxlan
|
||||
-A kubernetes-flannel-vxlan -j RETURN
|
||||
|
||||
-A INPUT -p udp --dport 8472 -m comment --comment "Flannel. All cluster hosts" -j kubernetes-flannel-vxlan
|
||||
|
||||
|
||||
-N kubernetes-kubelet-metrics
|
||||
-A kubernetes-kubelet-metrics -j RETURN
|
||||
|
||||
-A INPUT -p tcp --dport 10250 -m comment --comment "Kubernetes Metrics. All cluster hosts" -j kubernetes-kubelet-metrics
|
||||
|
||||
|
||||
-N kubernetes-flannel-wg-four
|
||||
-A kubernetes-flannel-wg-four -j RETURN
|
||||
|
||||
-A INPUT -p udp --dport 51820 -m comment --comment "Flannel Wiregaurd IPv4. All cluster hosts" -j kubernetes-flannel-wg-four
|
||||
|
||||
|
||||
-N kubernetes-flannel-wg-six
|
||||
-A kubernetes-flannel-wg-six -j RETURN
|
||||
|
||||
-A INPUT -p udp --dport 51821 -m comment --comment "Flannel Wiregaurd IPv6. All cluster hosts" -j kubernetes-flannel-wg-six #}
|
||||
|
||||
|
||||
{% if data.firewall_rules | length | int > 0 -%}
|
||||
{% for rule in data.firewall_rules -%}
|
||||
{{ rule }}
|
||||
{% endfor -%}
|
||||
{% endif -%}
|
||||
|
||||
{#- #-I INPUT -s 192.168.1.0/24 -p tcp -m multiport --dports 6443 -j ACCEPT
|
||||
#-I INPUT -s 192.168.1.0/24 -p tcp -m multiport --dports 179 -j ACCEPT
|
||||
#-I INPUT -s 192.168.1.0/24 -p tcp -m multiport --dports 10250 -j ACCEPT
|
||||
|
||||
#-I INPUT -s 192.168.1.0/24 -p udp -m multiport --dports 4789 -j ACCEPT
|
||||
#-I INPUT -s 192.168.1.0/24 -p tcp -m multiport --dports 2379 -j ACCEPT
|
||||
#-I INPUT -s 192.168.1.0/24 -p tcp -m multiport --dports 2380 -j ACCEPT
|
||||
|
||||
|
||||
-I INPUT -p tcp -m multiport --dports 6443 -j ACCEPT
|
||||
-I INPUT -p tcp -m multiport --dports 179 -j ACCEPT
|
||||
-I INPUT -p tcp -m multiport --dports 10250 -j ACCEPT
|
||||
|
||||
-I INPUT -p udp -m multiport --dports 4789 -j ACCEPT
|
||||
-I INPUT -p tcp -m multiport --dports 2379 -j ACCEPT
|
||||
-I INPUT -p tcp -m multiport --dports 2380 -j ACCEPT #}
|
||||
|
||||
COMMIT
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
{# iptables -I kubernetes-api -s nww-au1.networkedweb.com -j ACCEPT #}
|
255
roles/nfc_kubernetes/templates/k3s-config.yaml.j2
Normal file
255
roles/nfc_kubernetes/templates/k3s-config.yaml.j2
Normal file
@ -0,0 +1,255 @@
|
||||
#
|
||||
# K3s Configuration for running Kubernetes
|
||||
#
|
||||
# Managed By ansible/role/nfc_kubernetes
|
||||
#
|
||||
# Dont edit this file directly as it will be overwritten.
|
||||
#
|
||||
|
||||
{%- if
|
||||
nfc_role_kubernetes_master
|
||||
or
|
||||
kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname
|
||||
-%}
|
||||
|
||||
{%
|
||||
|
||||
set kube_apiserver_arg = [
|
||||
"audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log",
|
||||
"audit-log-maxage=" + kube_apiserver_arg_audit_log_maxage | string,
|
||||
"audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml",
|
||||
]
|
||||
|
||||
|
||||
-%}
|
||||
{%
|
||||
set servers_config = {
|
||||
"cluster-cidr": nfc_role_kubernetes_pod_subnet,
|
||||
"disable": [
|
||||
"traefik"
|
||||
],
|
||||
"disable-network-policy": true,
|
||||
"flannel-backend": "none",
|
||||
"service-cidr": nfc_role_kubernetes_service_subnet
|
||||
}
|
||||
-%}
|
||||
|
||||
{%- if nfc_role_kubernetes_etcd_enabled -%}
|
||||
|
||||
{%- set servers_config = servers_config | combine({
|
||||
"etcd-snapshot-retention": kubernetes_etcd_snapshot_retention | int,
|
||||
"etcd-snapshot-schedule-cron": kubernetes_etcd_snapshot_cron_schedule | string,
|
||||
}) -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- if
|
||||
kubernetes_config.cluster.domain_name | default(nfc_role_kubernetes_cluster_domain) is defined
|
||||
and
|
||||
kubernetes_config.cluster.domain_name | default(nfc_role_kubernetes_cluster_domain) != ''
|
||||
-%}
|
||||
|
||||
{%- set servers_config = servers_config | combine({
|
||||
"cluster-domain": kubernetes_config.cluster.domain_name | default(nfc_role_kubernetes_cluster_domain)
|
||||
}) -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- if kubernetes_config.cluster.oidc.enabled | default(nfc_role_kubernetes_oidc_enabled) | default(false) | bool -%}
|
||||
|
||||
{%-
|
||||
set kube_apiserver_arg = kube_apiserver_arg + [
|
||||
"oidc-client-id=" + kubernetes_config.cluster.oidc.client_id,
|
||||
"oidc-groups-claim=" + kubernetes_config.cluster.oidc.groups_claim,
|
||||
"oidc-issuer-url=" + kubernetes_config.cluster.oidc.issuer_url,
|
||||
"oidc-username-claim=" + kubernetes_config.cluster.oidc.username_claim
|
||||
] -%}
|
||||
|
||||
{%- if kubernetes_config.cluster.oidc.oidc_username_prefix | default('') != '' -%}
|
||||
|
||||
{%- set kube_apiserver_arg = kube_apiserver_arg + [
|
||||
"oidc-username-prefix=" + kubernetes_config.cluster.oidc.oidc_username_prefix
|
||||
] -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- if kubernetes_config.cluster.oidc.groups_prefix | default('') != '' -%}
|
||||
|
||||
{%- set kube_apiserver_arg = kube_apiserver_arg + [
|
||||
"oidc-groups-prefix=" + kubernetes_config.cluster.oidc.groups_prefix
|
||||
]
|
||||
-%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
|
||||
{%- if (
|
||||
nfc_kubernetes_enable_metallb | default(false)
|
||||
or
|
||||
not nfc_kubernetes_enable_servicelb | default(false)
|
||||
) -%}
|
||||
|
||||
{%- set disable = servers_config.disable + [ "servicelb" ] -%}
|
||||
|
||||
{%
|
||||
set servers_config = servers_config | combine({
|
||||
"disable": disable
|
||||
})
|
||||
-%}
|
||||
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- if (
|
||||
not nfc_kubernetes_enable_metallb | default(false)
|
||||
and
|
||||
nfc_kubernetes_enable_servicelb | default(false)
|
||||
) -%}
|
||||
|
||||
{%- set servers_config = servers_config | combine({
|
||||
"servicelb-namespace": kubernetes_config.cluster.networking.service_load_balancer_namespace | default('kube-system')
|
||||
}) -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
|
||||
{# Combine Remaining Server Objects #}
|
||||
|
||||
{%
|
||||
set servers_config = servers_config | combine({
|
||||
"kube-apiserver-arg": kube_apiserver_arg
|
||||
})
|
||||
-%}
|
||||
|
||||
{%- endif -%}
|
||||
{# Eof Server Nodes #}
|
||||
|
||||
{# SoF All Nodes #}
|
||||
|
||||
{%- if inventory_hostname == 'localhost' -%}
|
||||
|
||||
{%- set node_name = hostname_to_check.stdout -%}
|
||||
|
||||
{%- else -%}
|
||||
|
||||
{%- set node_name = inventory_hostname -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%
|
||||
|
||||
set all_nodes_config = {
|
||||
"kubelet-arg": [
|
||||
"system-reserved=cpu=" + kubelet_arg_system_reserved_cpu + ",memory=" + kubelet_arg_system_reserved_memory +
|
||||
",ephemeral-storage=" + kubelet_arg_system_reserved_storage
|
||||
],
|
||||
"node-name": node_name,
|
||||
"resolv-conf": nfc_role_kubernetes_resolv_conf_file,
|
||||
}
|
||||
|
||||
-%}
|
||||
|
||||
|
||||
{%- if groups[kubernetes_config.cluster.group_name | default('make_me_optional')] | default([]) | list | length > 0 -%}
|
||||
|
||||
{%- if node_k3s.installed -%}
|
||||
|
||||
{%- set ns = namespace(server=[]) -%}
|
||||
|
||||
{%- for cluster_node in groups[kubernetes_config.cluster.group_name] -%}
|
||||
|
||||
{%- if cluster_node in groups['kubernetes_master'] | default([]) -%}
|
||||
|
||||
{%- if hostvars[cluster_node].host_external_ip is defined -%}
|
||||
|
||||
{%- if
|
||||
hostvars[cluster_node].host_external_ip != ansible_default_ipv4.address
|
||||
and
|
||||
cluster_node == inventory_hostname
|
||||
-%} {# Server self, use internal ip if external ip exists #}
|
||||
|
||||
{%- set server_node = ansible_default_ipv4.address -%}
|
||||
|
||||
{%- else -%}
|
||||
|
||||
{%- set server_node = hostvars[cluster_node].host_external_ip -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- else -%}
|
||||
|
||||
{%- set server_node = hostvars[cluster_node].ansible_host -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- set ns.server = (ns.server | default([])) + [
|
||||
"https://" + server_node + ":6443"
|
||||
] -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- endfor -%}
|
||||
|
||||
{%- set all_nodes_config = all_nodes_config | combine({
|
||||
"server": ns.server,
|
||||
}) -%}
|
||||
|
||||
{%- elif
|
||||
kubernetes_config.cluster.prime.name != inventory_hostname
|
||||
and
|
||||
not node_k3s.installed
|
||||
-%}
|
||||
|
||||
{%- set server = (server | default([])) + [
|
||||
"https://" + hostvars[kubernetes_config.cluster.prime.name].ansible_host + ":6443"
|
||||
] -%}
|
||||
|
||||
{%- set all_nodes_config = all_nodes_config | combine({
|
||||
"server": server,
|
||||
}) -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
|
||||
|
||||
{%- if
|
||||
host_external_ip is defined
|
||||
and
|
||||
ansible_default_ipv4.address != host_external_ip
|
||||
-%}
|
||||
|
||||
{%- set all_nodes_config = all_nodes_config | combine({
|
||||
"node-external-ip": host_external_ip
|
||||
}) -%}
|
||||
|
||||
{%- else -%}
|
||||
|
||||
{%- set all_nodes_config = all_nodes_config | combine({
|
||||
"node-ip": ansible_default_ipv4.address
|
||||
}) -%}
|
||||
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{# EoF All Nodes #}
|
||||
|
||||
|
||||
{%- if
|
||||
nfc_role_kubernetes_master
|
||||
or
|
||||
kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname
|
||||
-%}
|
||||
|
||||
{%- set servers_config = servers_config | combine( all_nodes_config ) -%}
|
||||
|
||||
{{ servers_config | to_nice_yaml(indent=2) }}
|
||||
|
||||
{%- else -%}
|
||||
|
||||
{{ all_nodes_config | to_nice_yaml(indent=2) }}
|
||||
|
||||
{%- endif -%}
|
19
roles/nfc_kubernetes/templates/k3s-registries.yaml.j2
Normal file
19
roles/nfc_kubernetes/templates/k3s-registries.yaml.j2
Normal file
@ -0,0 +1,19 @@
|
||||
#
|
||||
# Private Container Registries for Kubernetes
|
||||
#
|
||||
# Managed By ansible/role/nfc_kubernetes
|
||||
#
|
||||
# Dont edit this file directly as it will be overwritten.
|
||||
#
|
||||
|
||||
{% set registries = kubernetes_private_container_registry | default([]) -%}
|
||||
|
||||
{% if registries | length > 0 %}mirrors:
|
||||
{% for entry in registries %}
|
||||
|
||||
{{ entry.name }}:
|
||||
endpoint:
|
||||
- "{{ entry.url }}"
|
||||
|
||||
{%- endfor %}
|
||||
{% endif %}
|
294
roles/nfc_kubernetes/templates/kubernetes-manifest-rbac.yaml.j2
Normal file
294
roles/nfc_kubernetes/templates/kubernetes-manifest-rbac.yaml.j2
Normal file
@ -0,0 +1,294 @@
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
annotations:
|
||||
authorization/description: >-
|
||||
provide full access to everything.
|
||||
|
||||
Using this Cluster role should be avoided with additional cluster roles
|
||||
created to meet the additional authorization requirements.
|
||||
authorization/target: cluster, namespace
|
||||
labels:
|
||||
app.kubernetes.io/part-of: nfc_kubernetes
|
||||
app.kubernetes.io/managed-by: ansible
|
||||
app.kubernetes.io/version: ''
|
||||
name: authorization:full
|
||||
rules:
|
||||
- apiGroups:
|
||||
- "*"
|
||||
resources:
|
||||
- "*"
|
||||
verbs:
|
||||
- "*"
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
annotations:
|
||||
authorization/description: |-
|
||||
Provide Access for reading ALL non-secret items, this includes reading pod and node metrics.
|
||||
|
||||
This role is designed for users who require access to audit/view/diagnose at either the
|
||||
cluster level `ClusterRoleBinding` or namespace level `RoleBinding`
|
||||
authorization/target: namespace
|
||||
labels:
|
||||
app.kubernetes.io/part-of: nfc_kubernetes
|
||||
app.kubernetes.io/managed-by: ansible
|
||||
app.kubernetes.io/version: ''
|
||||
name: authorization:namespace:read
|
||||
rules:
|
||||
- apiGroups: # Get Metrics
|
||||
- metrics.k8s.io
|
||||
resources:
|
||||
- pods
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups: # Read-only access to resrouces
|
||||
- "*"
|
||||
resources:
|
||||
- awx
|
||||
- cronjobs
|
||||
- daemonset
|
||||
- deployments
|
||||
- helmcharts
|
||||
- helmchartconfigs
|
||||
- ingress
|
||||
- jobs
|
||||
- namespaces
|
||||
- pods
|
||||
- pv
|
||||
- pvc
|
||||
- serviceaccount
|
||||
- services
|
||||
- statefuleset
|
||||
- storageclasses
|
||||
- configmap
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
annotations:
|
||||
authorization/description: |-
|
||||
Provide access for reading ALL items.
|
||||
|
||||
This role is designed for users who own and is designed to be
|
||||
bound to a namespace using a `RoleBinding`
|
||||
authorization/target: namespace
|
||||
labels:
|
||||
app.kubernetes.io/part-of: nfc_kubernetes
|
||||
app.kubernetes.io/managed-by: ansible
|
||||
app.kubernetes.io/version: ''
|
||||
name: authorization:namespace:owner
|
||||
rules:
|
||||
- apiGroups: # Read-Write access to resrouces
|
||||
- "*"
|
||||
resources:
|
||||
- cronjobs
|
||||
- daemonset
|
||||
- deployments
|
||||
- helmcharts
|
||||
- helmchartconfigs
|
||||
- jobs
|
||||
- pods
|
||||
- pvc
|
||||
- roles
|
||||
- rolebindings
|
||||
- secrets
|
||||
- serviceaccount
|
||||
- services
|
||||
- statefuleset
|
||||
- configmap
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- delete
|
||||
- apiGroups: # Read-Remove access
|
||||
- "*"
|
||||
resources:
|
||||
- ingress
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- delete
|
||||
- apiGroups: # Read access
|
||||
- "*"
|
||||
resources:
|
||||
- storageclasses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
annotations:
|
||||
authorization/description: |-
|
||||
Provide access for adding/editing/removing Ingress'.
|
||||
|
||||
This role is designed for a user who is responsible for the
|
||||
cluster ingress.
|
||||
authorization/target: namespace
|
||||
name: authorization:cluster:ingress-admin
|
||||
rules:
|
||||
- apiGroups:
|
||||
- "*"
|
||||
resources:
|
||||
- pods
|
||||
- nodes
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- delete
|
||||
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: authorization:cluster:view-metrics
|
||||
rules:
|
||||
- apiGroups:
|
||||
- metrics.k8s.io
|
||||
- "" # Without this metrics don't work. this also grants access to view nodes
|
||||
resources:
|
||||
- pods
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
|
||||
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: authorization:read
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: authorization:namespace:read
|
||||
subjects:
|
||||
- kind: Group
|
||||
name: administrators
|
||||
- kind: Group
|
||||
name: technician
|
||||
|
||||
- kind: Group
|
||||
name: NodeRED
|
||||
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: authorization:view-metrics
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: authorization:cluster:view-metrics
|
||||
subjects:
|
||||
- apiGroup: rbac.authorization.k8s.io
|
||||
kind: Group
|
||||
name: administrators
|
||||
- kind: Group
|
||||
name: technician
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: authorization:ingress-admin
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: authorization:cluster:ingress-admin
|
||||
subjects:
|
||||
- apiGroup: rbac.authorization.k8s.io
|
||||
kind: Group
|
||||
name: administrators
|
||||
|
||||
|
||||
# ---
|
||||
# kind: ClusterRoleBinding
|
||||
# apiVersion: rbac.authorization.k8s.io/v1
|
||||
# metadata:
|
||||
# name: authorization:full
|
||||
# roleRef:
|
||||
# apiGroup: rbac.authorization.k8s.io
|
||||
# kind: ClusterRole
|
||||
# name: authorization:full
|
||||
# subjects:
|
||||
# - kind: Group
|
||||
# name: administrators
|
||||
# - kind: Group
|
||||
# name: technician
|
||||
|
||||
|
||||
###################################################################################################################
|
||||
# Namespace role binding
|
||||
|
||||
|
||||
# ---
|
||||
# apiVersion: rbac.authorization.k8s.io/v1
|
||||
# kind: RoleBinding
|
||||
# metadata:
|
||||
# # labels:
|
||||
|
||||
# name: authorization:full
|
||||
# namespace: development
|
||||
# roleRef:
|
||||
# apiGroup: rbac.authorization.k8s.io
|
||||
# kind: Role
|
||||
# name: authorization:full
|
||||
# subjects:
|
||||
# - kind: Group
|
||||
# name: administrators
|
||||
# namespace: development
|
||||
# - kind: Group
|
||||
# name: technician
|
||||
|
||||
# - kind: Group
|
||||
# name: NodeRED
|
||||
|
||||
|
||||
# ---
|
||||
|
||||
# - apiVersion: rbac.authorization.k8s.io/v1
|
||||
# kind: Role
|
||||
# metadata:
|
||||
# labels:
|
||||
# app.kubernetes.io/description: |-
|
||||
# provide full access to the testing namespace
|
||||
# name: authorization:full
|
||||
# namespace: development
|
||||
# rules:
|
||||
# - apiGroups:
|
||||
# - ""
|
||||
# resources:
|
||||
# - ""
|
||||
# verbs:
|
||||
# - add
|
||||
# - delete
|
||||
# - edit
|
||||
# - get
|
||||
# - list
|
||||
# - watch
|
||||
|
16
roles/nfc_kubernetes/templates/kubevirt-cr.yaml.j2
Normal file
16
roles/nfc_kubernetes/templates/kubevirt-cr.yaml.j2
Normal file
@ -0,0 +1,16 @@
|
||||
---
|
||||
apiVersion: kubevirt.io/v1
|
||||
kind: KubeVirt
|
||||
metadata:
|
||||
name: kubevirt
|
||||
namespace: kubevirt
|
||||
spec:
|
||||
certificateRotateStrategy: {}
|
||||
configuration:
|
||||
developerConfiguration:
|
||||
featureGates: []
|
||||
customizeComponents: {}
|
||||
imagePullPolicy: IfNotPresent
|
||||
workloadUpdateStrategy:
|
||||
workloadUpdateMethods:
|
||||
- LiveMigrate
|
7572
roles/nfc_kubernetes/templates/kubevirt-operator.yaml.j2
Normal file
7572
roles/nfc_kubernetes/templates/kubevirt-operator.yaml.j2
Normal file
File diff suppressed because it is too large
Load Diff
90
roles/nfc_kubernetes/vars/firewall_rules.yaml
Normal file
90
roles/nfc_kubernetes/vars/firewall_rules.yaml
Normal file
@ -0,0 +1,90 @@
|
||||
---
|
||||
|
||||
kubernetes_chains:
|
||||
|
||||
- name: kubernetes-embedded-etcd
|
||||
chain: true
|
||||
table: INPUT
|
||||
protocol: tcp
|
||||
dest:
|
||||
port:
|
||||
- '2379'
|
||||
- '2380'
|
||||
comment: etcd. Servers only
|
||||
when: "{{ nfc_role_kubernetes_etcd_enabled }}"
|
||||
|
||||
- name: kubernetes-api
|
||||
chain: true
|
||||
table: INPUT
|
||||
protocol: tcp
|
||||
dest:
|
||||
port: '6443'
|
||||
comment: Kubernetes API access. All Cluster hosts and end users
|
||||
|
||||
- name: kubernetes-calico-bgp
|
||||
chain: true
|
||||
table: INPUT
|
||||
protocol: tcp
|
||||
dest:
|
||||
port: '179'
|
||||
comment: Kubernetes Calico BGP. All Cluster hosts and end users
|
||||
when: false # currently hard set to false. see Installation-manifest-Calico_Cluster.yaml.j2
|
||||
|
||||
- name: kubernetes-flannel-vxlan
|
||||
chain: true
|
||||
table: INPUT
|
||||
protocol: udp
|
||||
dest:
|
||||
port: '4789'
|
||||
comment: Flannel. All cluster hosts
|
||||
|
||||
- name: kubernetes-kubelet-metrics
|
||||
chain: true
|
||||
table: INPUT
|
||||
protocol: tcp
|
||||
dest:
|
||||
port: '10250'
|
||||
comment: Kubernetes Metrics. All cluster hosts
|
||||
|
||||
- name: kubernetes-flannel-wg-four
|
||||
chain: true
|
||||
table: INPUT
|
||||
protocol: udp
|
||||
dest:
|
||||
port: '51820'
|
||||
comment: Flannel Wiregaurd IPv4. All cluster hosts
|
||||
|
||||
- name: kubernetes-flannel-wg-six
|
||||
chain: true
|
||||
table: INPUT
|
||||
protocol: udp
|
||||
dest:
|
||||
port: '51821'
|
||||
comment: Flannel Wiregaurd IPv6. All cluster hosts
|
||||
when: false # ipv6 is disabled. see install.yaml sysctrl
|
||||
|
||||
- name: kubernetes-calico-typha
|
||||
chain: true
|
||||
table: INPUT
|
||||
protocol: tcp
|
||||
dest:
|
||||
port: '5473'
|
||||
comment: Calico networking with Typha enabled. Typha agent hosts.
|
||||
|
||||
- name: metallb-l2-tcp
|
||||
chain: true
|
||||
table: INPUT
|
||||
protocol: tcp
|
||||
dest:
|
||||
port: '7946'
|
||||
comment: MetalLB Gossip
|
||||
when: "{{ nfc_kubernetes_enable_metallb }}"
|
||||
|
||||
- name: metallb-l2-udp
|
||||
chain: true
|
||||
table: INPUT
|
||||
protocol: udp
|
||||
dest:
|
||||
port: '7946'
|
||||
comment: MetalLB Gossip
|
||||
when: "{{ nfc_kubernetes_enable_metallb }}"
|
1
website-template
Submodule
1
website-template
Submodule
Submodule website-template added at f5a82d3604
Reference in New Issue
Block a user