Compare commits
206 Commits
Author | SHA1 | Date | |
---|---|---|---|
053d1f17ec | |||
17ff472577 | |||
ec94414383 | |||
1faae0327e | |||
17e3318c3c | |||
89b5593abf | |||
10eae79a74 | |||
0be7080089 | |||
d3666c6825 | |||
4af31ff3ac | |||
74187c7023 | |||
47ac3095b6 | |||
dd4638bc93 | |||
3ed6fd0f4c | |||
beb1bd2006 | |||
4a83550530 | |||
7c54b19b64 | |||
173c840121 | |||
f0f5d686fa | |||
536c6e7b26 | |||
a23bc5e9ee | |||
5444f583e5 | |||
b4ad0a4e61 | |||
c9961973e1 | |||
622338e497 | |||
dec65ed57c | |||
71d1dd884e | |||
7a077dabe0 | |||
16add8a5b8 | |||
1bbbdd23c3 | |||
9552ed7703 | |||
05fc3455da | |||
8f81d10168 | |||
9cdab3446d | |||
d522559277 | |||
3e4a17437c | |||
447bb621cd | |||
32c3f7ab71 | |||
7e86574684 | |||
4d8f2c57d5 | |||
b063db8dc1 | |||
27eaff7547 | |||
d7e9f64161 | |||
826468fc42 | |||
164b59c100 | |||
29a9e696a9 | |||
6a10eb22cc | |||
43c6c940a1 | |||
9d5a078320 | |||
2ec8fe814c | |||
2b041c1cca | |||
af26559485 | |||
cb5a5697c1 | |||
c7a5c7c7e3 | |||
aca7e557a6 | |||
f1d20aac80 | |||
3b760db6e7 | |||
83ddfd4fbf | |||
967829d9e4 | |||
56ac6eb3b4 | |||
283568f72a | |||
c7a3e617f0 | |||
21d0dbefa9 | |||
9dad960208 | |||
96ff6ba860 | |||
edd4d2b434 | |||
9dcea39df6 | |||
c765efe99d | |||
916a3b475b | |||
dc53c7694a | |||
d4efa4c9b3 | |||
b4481d3f27 | |||
315ea4058e | |||
7019150433 | |||
3bd2b88ecb | |||
eabbe49ed9 | |||
5585c1eb0b | |||
37bf447779 | |||
41a59a80d9 | |||
79a64c670d | |||
672b0c03c0 | |||
60054a23ab | |||
88c54d5b59 | |||
7adeb7daee | |||
434a40be1e | |||
7ecd4e21fa | |||
ef90e653df | |||
abb7042cbd | |||
a45fe0c9f9 | |||
623d178196 | |||
b915b1e947 | |||
6c0c18dd7b | |||
e2a438ec8f | |||
d0388fb0fe | |||
b978e86db4 | |||
ff08e57793 | |||
1ef63026e1 | |||
dac14cedde | |||
f017801f7a | |||
18218cd4d1 | |||
45863ecff3 | |||
b43e1dbb80 | |||
efba1ff6c7 | |||
4d02c170e8 | |||
506385f3d8 | |||
ccf5c03a4c | |||
b350b2e188 | |||
2e136ee088 | |||
384ef924ca | |||
54f9ec0c95 | |||
bed1bf3095 | |||
7a017c4e29 | |||
3004f998bf | |||
6d974083cf | |||
9cdc89b1ec | |||
26c0ab1236 | |||
b2f9e5d3ca | |||
be1ddecc33 | |||
2d225fd44d | |||
79d89b3b3a | |||
5edfdf4faf | |||
12a42a3583 | |||
8d8ba0951e | |||
478e4ccfa5 | |||
8919486b6b | |||
5925a26c60 | |||
5ffbd78e2b | |||
988b91f85a | |||
f48f645468 | |||
7049c57bd0 | |||
c6ff60bb14 | |||
e135a8690d | |||
aa2d858ede | |||
e1220b0dac | |||
88d57588fc | |||
681b52b31a | |||
60f7c2d6b6 | |||
a54fbe26f3 | |||
8e3217d1bd | |||
c04b12a714 | |||
26120c3e98 | |||
f2c833893f | |||
0bdd5c66c2 | |||
74cc207947 | |||
440d25295d | |||
c28f0b8ee3 | |||
99badaf7f6 | |||
ea38ddf22b | |||
4a41f7e348 | |||
a31837c803 | |||
7369163195 | |||
59699afb44 | |||
077ce062ee | |||
56bb4557b5 | |||
7d81b897ff | |||
0fb5e27612 | |||
301ed9ad3f | |||
4ce5f37223 | |||
86af4606d7 | |||
4a51210677 | |||
58a95e6781 | |||
37a7718043 | |||
c41e12544b | |||
915cdf5e1e | |||
021e54f328 | |||
f0cf4cd00c | |||
ed1a1acf7e | |||
59a5e0aacf | |||
20dae6ba4d | |||
1b49969a99 | |||
fac3ace5f5 | |||
354fb8946d | |||
3198b5d2f9 | |||
1a0407a901 | |||
c7cd1da431 | |||
cdc06363aa | |||
34432433f3 | |||
21cef1f4c3 | |||
c6581267f6 | |||
2767b9629a | |||
59f50d53df | |||
f09737b21f | |||
6ab17bdc3c | |||
9936cd4499 | |||
0acc7a3cc2 | |||
5278a4996e | |||
d2081284d1 | |||
42ac18e057 | |||
ecc2afee68 | |||
65cb3b9102 | |||
9c4204751e | |||
4d9f9dcdff | |||
ba59dd3057 | |||
c7907bf585 | |||
4a9d98394e | |||
fd547a4c0f | |||
50f48ab5a1 | |||
89b6573247 | |||
4465bcd2c4 | |||
b77cc6a8e9 | |||
26f1f2efe6 | |||
db515d2c1d | |||
1b62a66632 | |||
1319325a4c | |||
76e48fd965 | |||
abc01ce48c |
11
.cz.yaml
11
.cz.yaml
@ -1,7 +1,8 @@
|
||||
---
|
||||
commitizen:
|
||||
bump_message: "build(version): bump version $current_version \u2192 $new_version"
|
||||
changelog_incremental: false
|
||||
name: cz_conventional_commits
|
||||
tag_format: $major.$minor.$patch$prerelease
|
||||
update_changelog_on_bump: true
|
||||
version: 0.1.0
|
||||
prerelease_offset: 1
|
||||
tag_format: $version
|
||||
update_changelog_on_bump: false
|
||||
version: 1.3.0
|
||||
version_scheme: semver
|
||||
|
3
.gitignore
vendored
Normal file
3
.gitignore
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
artifacts/
|
||||
build/
|
||||
*.tar.gz
|
@ -1,21 +1,76 @@
|
||||
---
|
||||
|
||||
variables:
|
||||
ANSIBLE_GALAXY_PACKAGE_NAME: kubernetes
|
||||
MY_PROJECT_ID: "51640029"
|
||||
GIT_SYNC_URL: "https://$GITHUB_USERNAME_ROBOT:$GITHUB_TOKEN_ROBOT@github.com/NoFussComputing/ansible_collection_kubernetes.git"
|
||||
PAGES_ENVIRONMENT_PATH: projects/ansible/collection/kubernetes/
|
||||
RELEASE_ADDITIONAL_ACTIONS_BUMP: ./.gitlab/additional_actions_bump.sh
|
||||
|
||||
|
||||
include:
|
||||
- project: nofusscomputing/projects/gitlab-ci
|
||||
ref: development
|
||||
file:
|
||||
- .gitlab-ci_common.yaml
|
||||
- template/automagic.gitlab-ci.yaml
|
||||
|
||||
variables:
|
||||
MY_PROJECT_ID: "51640029"
|
||||
GIT_SYNC_URL: "https://$GITHUB_USERNAME_ROBOT:$GITHUB_TOKEN_ROBOT@github.com/NoFussComputing/ansible_role_nfc_kubernetes.git"
|
||||
PAGES_ENVIRONMENT_PATH: projects/ansible/roles/kubernetes/
|
||||
- conventional_commits/.gitlab-ci.yml
|
||||
- template/ansible-collection.gitlab-ci.yaml
|
||||
- template/mkdocs-documentation.gitlab-ci.yaml
|
||||
# ToDo: update gitlabCI jobs for collections workflow
|
||||
- git_push_mirror/.gitlab-ci.yml
|
||||
- automation/.gitlab-ci-ansible.yaml
|
||||
|
||||
|
||||
Ansible-roles.Submodule.Deploy:
|
||||
Update Git Submodules:
|
||||
extends: .ansible_playbook_git_submodule
|
||||
|
||||
|
||||
Github (Push --mirror):
|
||||
extends:
|
||||
- .git_push_mirror
|
||||
needs: []
|
||||
|
||||
|
||||
Gitlab Release:
|
||||
extends: .ansible_collection_release
|
||||
needs:
|
||||
- Stage Collection
|
||||
release:
|
||||
tag_name: $CI_COMMIT_TAG
|
||||
description: ./artifacts/release_notes.md
|
||||
name: $CI_COMMIT_TAG
|
||||
assets:
|
||||
links:
|
||||
- name: 'Ansible Galaxy'
|
||||
url: https://galaxy.ansible.com/ui/repo/published/${ANSIBLE_GALAXY_NAMESPACE}/${ANSIBLE_GALAXY_PACKAGE_NAME}/?version=${CI_COMMIT_TAG}
|
||||
|
||||
- name: ${ANSIBLE_GALAXY_NAMESPACE}-${ANSIBLE_GALAXY_PACKAGE_NAME}-${CI_COMMIT_TAG}.tar.gz
|
||||
url: https://galaxy.ansible.com/api/v3/plugin/ansible/content/published/collections/artifacts/${ANSIBLE_GALAXY_NAMESPACE}-${ANSIBLE_GALAXY_PACKAGE_NAME}-${CI_COMMIT_TAG}.tar.gz
|
||||
link_type: package
|
||||
|
||||
- name: Documentation
|
||||
url: https://nofusscomputing.com/${PAGES_ENVIRONMENT_PATH}
|
||||
milestones:
|
||||
- $CI_MERGE_REQUEST_MILESTONE
|
||||
|
||||
|
||||
Website.Submodule.Deploy:
|
||||
extends: .submodule_update_trigger
|
||||
variables:
|
||||
SUBMODULE_UPDATE_TRIGGER_PROJECT: nofusscomputing/projects/ansible/ansible-roles
|
||||
GIT_COMMIT_TYPE: feat
|
||||
GIT_COMMIT_TYPE_CATEGORY: $CI_PROJECT_NAME
|
||||
GIT_CONFIG_SUBMODULE_NAME: nfc_kubernetes
|
||||
SUBMODULE_UPDATE_TRIGGER_PROJECT: nofusscomputing/infrastructure/website
|
||||
environment:
|
||||
url: https://nofusscomputing.com/$PAGES_ENVIRONMENT_PATH
|
||||
name: Documentation
|
||||
rules:
|
||||
- if: # condition_dev_branch_push
|
||||
$CI_COMMIT_BRANCH == "development" &&
|
||||
$CI_PIPELINE_SOURCE == "push"
|
||||
exists:
|
||||
- '{docs/**,pages/**}/*.md'
|
||||
changes:
|
||||
paths:
|
||||
- '{docs/**,pages/**}/*.md'
|
||||
compare_to: 'master'
|
||||
when: always
|
||||
|
||||
- when: never
|
||||
|
3
.gitlab/additional_actions_bump.sh
Normal file
3
.gitlab/additional_actions_bump.sh
Normal file
@ -0,0 +1,3 @@
|
||||
#!/bin/sh
|
||||
|
||||
echo "Nothing to do here!!";
|
14
.vscode/settings.json
vendored
Normal file
14
.vscode/settings.json
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
{
|
||||
"yaml.schemas": {
|
||||
"https://raw.githubusercontent.com/ansible/ansible-lint/main/src/ansiblelint/schemas/ansible.json#/$defs/tasks": [
|
||||
"roles/nfc_firewall/tasks/*.yaml",
|
||||
"roles/nfc_firewall/tasks/*/*.yaml",
|
||||
"roles/nfc_firewall/tasks/*/*/*.yaml"
|
||||
],
|
||||
"https://raw.githubusercontent.com/ansible/ansible-lint/main/src/ansiblelint/schemas/vars.json": [
|
||||
"roles/nfc_kubernetes/variables/**.yaml"
|
||||
]
|
||||
},
|
||||
"gitlab.aiAssistedCodeSuggestions.enabled": false,
|
||||
"gitlab.duoChat.enabled": false,
|
||||
}
|
88
CHANGELOG.md
88
CHANGELOG.md
@ -1,34 +1,76 @@
|
||||
## 0.1.0 (2023-10-29)
|
||||
## 1.3.0 (2024-03-18)
|
||||
|
||||
### Bug Fixes
|
||||
### Feat
|
||||
|
||||
- **k3s**: [9ac52ee1](https://gitlab.com/nofusscomputing/projects/ansible/kubernetes/-/commit/9ac52ee165fd364c7091ab3f1e14df365270f532) - use correct variables in conditional clauses [ [!2](https://gitlab.com/nofusscomputing/projects/ansible/kubernetes/-/merge_requests/2) ]
|
||||
- [8272b250](https://gitlab.com/nofusscomputing/projects/ansible/kubernetes/-/commit/8272b2507b298ccec05e6dbaa2a526b5136b8d2d) - uncommented hash tasks as they are required [ [!2](https://gitlab.com/nofusscomputing/projects/ansible/kubernetes/-/merge_requests/2) ]
|
||||
- **install**: [57d268ec](https://gitlab.com/nofusscomputing/projects/ansible/kubernetes/-/commit/57d268ec3cd990ea21979cbafe7421a0af04ea91) - config files only required for prime master [ [!2](https://gitlab.com/nofusscomputing/projects/ansible/kubernetes/-/merge_requests/2) ]
|
||||
- **install**: [0f4a02ca](https://gitlab.com/nofusscomputing/projects/ansible/kubernetes/-/commit/0f4a02cadd24dc1890e57bba5266f17dd44e9766) - restructure and uncommented install steps [ [!2](https://gitlab.com/nofusscomputing/projects/ansible/kubernetes/-/merge_requests/2) ]
|
||||
- [49087753](https://gitlab.com/nofusscomputing/projects/ansible/kubernetes/-/commit/4908775367a657867878111ad7e8a75e5203e492) - dont flush handlers [ [!2](https://gitlab.com/nofusscomputing/projects/ansible/kubernetes/-/merge_requests/2) ]
|
||||
- dont attempt to install if already installed
|
||||
|
||||
### Code Refactor
|
||||
### Fix
|
||||
|
||||
- [93897ea7](https://gitlab.com/nofusscomputing/projects/ansible/kubernetes/-/commit/93897ea7d5d8e11725aa1c285fac64388215d00b) - moved config file deploy to be part of install tasks [ [!2](https://gitlab.com/nofusscomputing/projects/ansible/kubernetes/-/merge_requests/2) ]
|
||||
- **handler**: add missing 'reboot_host' handler
|
||||
- **firewall**: ensure slave nodes can access ALL masters API point
|
||||
- **firewall**: dont add rules for disabled features
|
||||
|
||||
### Continious Integration
|
||||
## 1.2.0 (2024-03-16)
|
||||
|
||||
- [55d5c5d6](https://gitlab.com/nofusscomputing/projects/ansible/kubernetes/-/commit/55d5c5d6943a0794bd73f8701667e85dd653c5ea) - add initial jobs [ [!1](https://gitlab.com/nofusscomputing/projects/ansible/kubernetes/-/merge_requests/1) ]
|
||||
### Feat
|
||||
|
||||
### Documentaton / Guides
|
||||
- **firewall**: use collection nofusscomputing.firewall to configure kubernetes firewall
|
||||
|
||||
- [779be020](https://gitlab.com/nofusscomputing/projects/ansible/kubernetes/-/commit/779be0200e71956a3125332d57ac6e0dc7a4914a) - add to feature list openid [ [!2](https://gitlab.com/nofusscomputing/projects/ansible/kubernetes/-/merge_requests/2) ]
|
||||
- [c3843dde](https://gitlab.com/nofusscomputing/projects/ansible/kubernetes/-/commit/c3843ddef0a6d4f885a989675b79ac5861e21138) - role workflow [ [!2](https://gitlab.com/nofusscomputing/projects/ansible/kubernetes/-/merge_requests/2) [#4](https://gitlab.com/nofusscomputing/projects/ansible/kubernetes/-/issues/4) ]
|
||||
- [b69d5b8a](https://gitlab.com/nofusscomputing/projects/ansible/kubernetes/-/commit/b69d5b8a358e6b024b0afda819af0082c0b87a48) - feature list [ [!2](https://gitlab.com/nofusscomputing/projects/ansible/kubernetes/-/merge_requests/2) [#4](https://gitlab.com/nofusscomputing/projects/ansible/kubernetes/-/issues/4) ]
|
||||
- [60392a56](https://gitlab.com/nofusscomputing/projects/ansible/kubernetes/-/commit/60392a565c53010faca6c6eda15d2c386133a8f7) - restructure for seperate role index and ansible setup [ [!2](https://gitlab.com/nofusscomputing/projects/ansible/kubernetes/-/merge_requests/2) [#4](https://gitlab.com/nofusscomputing/projects/ansible/kubernetes/-/issues/4) ]
|
||||
- [bbfbbedd](https://gitlab.com/nofusscomputing/projects/ansible/kubernetes/-/commit/bbfbbedd11ea5b1fde199899b70cd87119e3a989) - initial docs for rbac [ [!2](https://gitlab.com/nofusscomputing/projects/ansible/kubernetes/-/merge_requests/2) [#4](https://gitlab.com/nofusscomputing/projects/ansible/kubernetes/-/issues/4) ]
|
||||
- [3e785d7d](https://gitlab.com/nofusscomputing/projects/ansible/kubernetes/-/commit/3e785d7db158e41744ad19c4fcab1c11aa23823f) - added other projects to blurb [ [!2](https://gitlab.com/nofusscomputing/projects/ansible/kubernetes/-/merge_requests/2) [#4](https://gitlab.com/nofusscomputing/projects/ansible/kubernetes/-/issues/4) ]
|
||||
- [7abfb703](https://gitlab.com/nofusscomputing/projects/ansible/kubernetes/-/commit/7abfb70320419ab1e98666a16453bb1b0a48426e) - Ansible setup [ [!2](https://gitlab.com/nofusscomputing/projects/ansible/kubernetes/-/merge_requests/2) [#4](https://gitlab.com/nofusscomputing/projects/ansible/kubernetes/-/issues/4) ]
|
||||
- [b588b038](https://gitlab.com/nofusscomputing/projects/ansible/kubernetes/-/commit/b588b0383d3e353a8e487d06b787aed2e28de2d8) - added docs layout [ [!1](https://gitlab.com/nofusscomputing/projects/ansible/kubernetes/-/merge_requests/1) ]
|
||||
### Fix
|
||||
|
||||
### Features
|
||||
- **config**: use correct var name when setting node name
|
||||
|
||||
- **networking**: [60fd25df](https://gitlab.com/nofusscomputing/projects/ansible/kubernetes/-/commit/60fd25df8ec897e74c164d9cc0e49ed07d002d0e) - install and configure wireguard encryption [ [!2](https://gitlab.com/nofusscomputing/projects/ansible/kubernetes/-/merge_requests/2) [#3](https://gitlab.com/nofusscomputing/projects/ansible/kubernetes/-/issues/3) ]
|
||||
## 1.1.2 (2024-03-13)
|
||||
|
||||
## 0.0.1 (2023-10-28)
|
||||
### Fix
|
||||
|
||||
- **readme**: update gitlab links to new loc
|
||||
- **configure**: dont attempt to configure firewall if install=false
|
||||
- **handler**: remove old k8s code causing handler to fail
|
||||
- **handler**: kubernetes restart handler now using updated node type vars
|
||||
- **config**: if hostname=localhost use hostname command to fetch hostname
|
||||
- limit the use of master group
|
||||
- add missing dependency ansible.posix
|
||||
- **install**: use correct var type for packages
|
||||
|
||||
## 1.1.1 (2024-03-13)
|
||||
|
||||
### Fix
|
||||
|
||||
- don't check hostname for localhost
|
||||
|
||||
## 1.1.0 (2024-03-13)
|
||||
|
||||
### Feat
|
||||
|
||||
- add role readme and fix gitlab release job
|
||||
|
||||
## 1.0.1 (2024-03-13)
|
||||
|
||||
### Fix
|
||||
|
||||
- **ci**: ensure correct package name is used
|
||||
|
||||
## 1.0.0 (2024-03-13)
|
||||
|
||||
### Feat
|
||||
|
||||
- **playbook**: add the install playbook
|
||||
|
||||
### Refactor
|
||||
|
||||
- **nfc_kubernetes**: update meta file
|
||||
- remove dependency on role nfc_common
|
||||
- **nfc_kubernetes**: layout role ingress to install prime -> master -> worker nodes as separate groups
|
||||
- **docs**: restructure docs
|
||||
|
||||
## 0.3.0 (2024-03-13)
|
||||
|
||||
### Feat
|
||||
|
||||
- remove old var and update kube version
|
||||
- install helm binary
|
||||
|
||||
### Refactor
|
||||
|
||||
- image var update for calico
|
||||
|
24
CONTRIBUTING.md
Normal file
24
CONTRIBUTING.md
Normal file
@ -0,0 +1,24 @@
|
||||
# Contribution Guide
|
||||
|
||||
|
||||
|
||||
## Updating components with a remote source
|
||||
|
||||
Some components within this role are sourced from a remote source. To update them to the latest release use the following commands.
|
||||
|
||||
> Ensure that before committing the update remote files to the repository, that no features have been removed that were added.
|
||||
|
||||
|
||||
### Kubevirt
|
||||
|
||||
``` bash
|
||||
|
||||
export KUBEVIRT_RELEASE='<kubevirt release i.e. v1.2.0>'
|
||||
|
||||
# From within roles/nfc_kubernetes/templates directory
|
||||
wget https://github.com/kubevirt/kubevirt/releases/download/${KUBEVIRT_RELEASE}/kubevirt-operator.yaml -O kubevirt-operator.yaml.j2
|
||||
|
||||
# From within the roles/nfc_kubernetes/templates directory
|
||||
wget https://github.com/kubevirt/kubevirt/releases/download/${KUBEVIRT_RELEASE}/kubevirt-cr.yaml -O kubevirt-cr.yaml.j2
|
||||
|
||||
```
|
@ -1,39 +0,0 @@
|
||||
# Kubernetes Ansible Playbook
|
||||
|
||||
|
||||
## Additional changes
|
||||
|
||||
- `SystemdCgroup = false` -> `SystemdCgroup = true` [See this comment](https://github.com/kubernetes/kubernetes/issues/110177#issuecomment-1161647736)
|
||||
|
||||
## Tags
|
||||
|
||||
This role has been setup to take advantage of Ansible tags. The use of these tags enables finer control over what tasks are run. By design, when you set a task, only what is required for the tag is run.
|
||||
|
||||
available tags are as follows
|
||||
|
||||
- `containerregistry` apply container/docker registry settings
|
||||
- `firewall` apply firewall settings (firewall name/type independent)
|
||||
- `install` Run every task within the role. this is the same as omitting `--tags`
|
||||
- `iptables` apply iptables settings
|
||||
- `manifest` Apply/remove kubernetes manifests
|
||||
- `namespace` Apply/remove kubernetes namespaces
|
||||
- `nodelabels` Apply/remove kubernetes node labels
|
||||
- `taints` Apply/remove kubernetes taints
|
||||
|
||||
!!! tip
|
||||
if you intend on running the `install` tag, you can omit the `--tags` flag from the ansible tag all togther
|
||||
|
||||
!!! alert
|
||||
the first time this playbook is run if cli switch `--extra-vars "init=true"` is used with `init` either a bool true/false, the manifests will not be applied. this is to enable the kubernetes to be fully setup prior to applying manifests that may prevent successful completion of the play.
|
||||
|
||||
## command Cheatsheet
|
||||
|
||||
- `crictl --runtime-endpoint unix:///run/containerd/containerd.sock images` list all container images on the host
|
||||
|
||||
## Links / References
|
||||
|
||||
- ContainerD Configuration
|
||||
|
||||
- [Registry Configuration](https://github.com/containerd/containerd/blob/7cd72cce99c8d3b938c1b763c2744a0b699028ab/docs/cri/config.md#registry-configuration)
|
||||
|
||||
- [Configuring ContainerD registries](https://github.com/containerd/containerd/blob/7cd72cce99c8d3b938c1b763c2744a0b699028ab/docs/hosts.md#cri)
|
27
README.md
27
README.md
@ -1,34 +1,39 @@
|
||||
<div align="center" width="100%">
|
||||
<span style="text-align: center;">
|
||||
|
||||
|
||||
# No Fuss Computing - Ansible Role: nfc_kubernetes
|
||||
# No Fuss Computing - Ansible Collection Kubernetes
|
||||
|
||||
<br>
|
||||
|
||||

|
||||
|
||||
|
||||
[](https://galaxy.ansible.com/ui/repo/published/nofusscomputing/kubernetes/)
|
||||
|
||||
|
||||
----
|
||||
|
||||
<br>
|
||||
|
||||
  [](https://gitlab.com/nofusscomputing/projects/ansible/kubernetes/-/issues)
|
||||
  [](https://gitlab.com/nofusscomputing/projects/ansible/collections/kubernetes/-/issues)
|
||||
|
||||
|
||||
|
||||
  
|
||||
  
|
||||
<br>
|
||||
|
||||
This project is hosted on [gitlab](https://gitlab.com/nofusscomputing/projects/ansible/kubernetes) and has a read-only copy hosted on [Github](https://github.com/NofussComputing/ansible_role_nfc_kubernetes).
|
||||
This project is hosted on [gitlab](https://gitlab.com/nofusscomputing/projects/ansible/collections/kubernetes) and has a read-only copy hosted on [Github](https://github.com/NofussComputing/ansible_collection_kubernetes).
|
||||
|
||||
----
|
||||
|
||||
**Stable Branch**
|
||||
|
||||
 
|
||||
 
|
||||
|
||||
----
|
||||
|
||||
**Development Branch**
|
||||
|
||||
 
|
||||
 
|
||||
|
||||
----
|
||||
<br>
|
||||
@ -37,14 +42,14 @@ This project is hosted on [gitlab](https://gitlab.com/nofusscomputing/projects/a
|
||||
|
||||
links:
|
||||
|
||||
- [Issues](https://gitlab.com/nofusscomputing/projects/ansible/kubernetes/-/issues)
|
||||
- [Issues](https://gitlab.com/nofusscomputing/projects/ansible/collections/kubernetes/-/issues)
|
||||
|
||||
- [Merge Requests (Pull Requests)](https://gitlab.com/nofusscomputing/projects/ansible/kubernetes/-/merge_requests)
|
||||
- [Merge Requests (Pull Requests)](https://gitlab.com/nofusscomputing/projects/ansible/collections/kubernetes/-/merge_requests)
|
||||
|
||||
|
||||
|
||||
## Contributing
|
||||
All contributions for this project must conducted from [Gitlab](https://gitlab.com/nofusscomputing/projects/ansible/kubernetes).
|
||||
All contributions for this project must conducted from [Gitlab](https://gitlab.com/nofusscomputing/projects/ansible/collections/kubernetes).
|
||||
|
||||
For further details on contributing please refer to the [contribution guide](CONTRIBUTING.md).
|
||||
|
||||
|
@ -1,126 +0,0 @@
|
||||
KubernetesPodSubnet: 10.85.0.0/16
|
||||
KubernetesServiceSubnet: 10.86.0.0/16
|
||||
|
||||
|
||||
Kubernetes_Prime: false # Optional, Boolean. Is the current host the Prime master?
|
||||
Kubernetes_Master: false # Optional, Boolean. Is the current host a master host?
|
||||
|
||||
ContainerDioVersion: 1.6.20-1
|
||||
KubernetesVersion: '1.26.2' # must match the repository release version
|
||||
|
||||
KubernetesVersion_k8s_prefix: '-00'
|
||||
KubernetesVersion_k3s_prefix: '+k3s1'
|
||||
|
||||
kubernetes_private_container_registry: [] # Optional, Array. if none use `[]`
|
||||
|
||||
# host_external_ip: '' # Optional, String. External IP Address for host.
|
||||
|
||||
# Optional, Dict. Used to configure Kubernetes with OIDC Authentication.
|
||||
# kubernetes_oidc:
|
||||
# enabled: true # Mandatory, boolen. speaks for itself.
|
||||
# issuer_url: https://domainname.com/realms/realm-name # Mandatory, String. URL of OIDC Provider
|
||||
# client_id: kubernetes-test # Mandatory, string. OIDC Client ID
|
||||
# username_claim: preferred_username # Mandatory, String. Claim name containing username.
|
||||
# username_prefix: oidc # Optional, String. What to prefix to username
|
||||
# groups_claim: roles # Mandatory, String. Claim name containing groups
|
||||
# groups_prefix: '' # Optional, String. string to append to groups
|
||||
|
||||
kubernetes_type: k8s # Mandatory, String. choice K8s | k3s
|
||||
|
||||
|
||||
nfc_kubernetes:
|
||||
enable_firewall: true # Optional, bool enable firewall rules from role 'nfc_firewall'
|
||||
|
||||
|
||||
k3s:
|
||||
files:
|
||||
# - name: config.yaml
|
||||
# path: /etc/rancher/k3s
|
||||
# content: |
|
||||
# flannel-backend: none
|
||||
# cluster-cidr: "{{ KubernetesPodSubnet }}"
|
||||
# cluster-init: true
|
||||
# {% if not Kubernetes_Prime | default(false) | bool -%}server: https://{{ hostvars[kubernetes_config.cluster.prime.name].ansible_host }}:6443{% endif %}
|
||||
# service-cidr: "{{ KubernetesServiceSubnet }}"
|
||||
# disable-network-policy: true
|
||||
# disable:
|
||||
# - traefik
|
||||
# kube-apiserver-arg:
|
||||
# - audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log
|
||||
# - audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml
|
||||
# # - admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml
|
||||
# {% if kubernetes_oidc.enabled | default(false) | bool -%}
|
||||
# - oidc-issuer-url={{ kubernetes_oidc.issuer_url }}
|
||||
# - oidc-client-id={{ kubernetes_oidc.client_id }}
|
||||
# - oidc-username-claim={{ kubernetes_oidc.username_claim }}
|
||||
# - {% if kubernetes_oidc.oidc_username_prefix | default('') != '' %}oidc-username-prefix={{ kubernetes_oidc.oidc_username_prefix }}{% endif %}
|
||||
# - oidc-groups-claim={{ kubernetes_oidc.groups_claim }}
|
||||
# {% if kubernetes_oidc.groups_prefix | default('') != '' %}- oidc-groups-prefix={{ kubernetes_oidc.groups_prefix }}{% endif %}
|
||||
# {% endif %}
|
||||
# node-external-ip: "{{ host_external_ip }}"
|
||||
|
||||
- name: audit.yaml
|
||||
path: /var/lib/rancher/k3s/server
|
||||
content: |
|
||||
apiVersion: audit.k8s.io/v1
|
||||
kind: Policy
|
||||
rules:
|
||||
- level: Request
|
||||
|
||||
- name: 90-kubelet.conf
|
||||
path: /etc/sysctl.d
|
||||
content: |
|
||||
vm.panic_on_oom=0
|
||||
vm.overcommit_memory=1
|
||||
kernel.panic=10
|
||||
kernel.panic_on_oops=1
|
||||
kernel.keys.root_maxbytes=25000000
|
||||
|
||||
- name: psa.yaml
|
||||
path: /var/lib/rancher/k3s/server
|
||||
content: ""
|
||||
# apiVersion: apiserver.config.k8s.io/v1
|
||||
# kind: AdmissionConfiguration
|
||||
# plugins:
|
||||
# - name: PodSecurity
|
||||
# configuration:
|
||||
# apiVersion: pod-security.admission.config.k8s.io/v1beta1
|
||||
# kind: PodSecurityConfiguration
|
||||
# defaults:
|
||||
# enforce: "restricted"
|
||||
# enforce-version: "latest"
|
||||
# audit: "restricted"
|
||||
# audit-version: "latest"
|
||||
# warn: "restricted"
|
||||
# warn-version: "latest"
|
||||
# exemptions:
|
||||
# usernames: []
|
||||
# runtimeClasses: []
|
||||
# namespaces: [kube-system]
|
||||
|
||||
|
||||
|
||||
#############################################################################################
|
||||
# Cluster Config when stored in Inventory
|
||||
#
|
||||
# One required per cluster. recommend creating one ansible host group per cluster.
|
||||
#############################################################################################
|
||||
# kubernetes_config: # Dict. Cluster Config
|
||||
# cluster:
|
||||
# access: # Mandatory. List, DNS host name or IPv4/IPv6 Address.
|
||||
# # if none use '[]'
|
||||
# - 'my.dnshostname.com'
|
||||
# - '2001:4860:4860::8888'
|
||||
# - '192.168.1.1'
|
||||
# Name: earth # Mandatory, String. Cluster Name
|
||||
# prime:
|
||||
# name: k3s-prod # Mandatory, String. Ansible inventory_host that will
|
||||
# # act as the prime master node.
|
||||
# networking:
|
||||
# encrypt: true # Optional, Boolean. default `false`. Install wireguard for inter-node encryption
|
||||
# podSubnet: 172.16.70.0/24 # Mandatory, String. CIDR
|
||||
# ServiceSubnet: 172.16.72.0/24 # Mandatory, String. CIDR
|
||||
# # Mandatory, String. Token to join nodes to the cluster
|
||||
# node_token: !vault |
|
||||
# $ANSIBLE_VAULT;1.2;AES256;kubernetes/cluster/production
|
||||
# {rest_of encrypted key}
|
1
docs/projects/ansible/collection/firewall/index.md
Normal file
1
docs/projects/ansible/collection/firewall/index.md
Normal file
@ -0,0 +1 @@
|
||||
linked to
|
67
docs/projects/ansible/collection/kubernetes/index.md
Normal file
67
docs/projects/ansible/collection/kubernetes/index.md
Normal file
@ -0,0 +1,67 @@
|
||||
---
|
||||
title: Kubernetes
|
||||
description: No Fuss Computings Ansible Collection Kubernetes
|
||||
date: 2024-03-13
|
||||
template: project.html
|
||||
about: https://gitlab.com/nofusscomputing/projects/ansible/collections/kubernetes
|
||||
---
|
||||
|
||||
<span style="text-align: center;">
|
||||
|
||||

|
||||
|
||||
|
||||
 
|
||||
|
||||
|
||||
[](https://galaxy.ansible.com/ui/repo/published/nofusscomputing/kubernetes/)
|
||||
|
||||
|
||||
</span>
|
||||
|
||||
This Ansible Collection is for installing a K3s Kubernetes cluster, both single and multi-node cluster deployments are supported. In addition to installing and configuring the firewall for the node. for further information on the firewall config please see the [firewall docs](../firewall/index.md)
|
||||
|
||||
|
||||
## Installation
|
||||
|
||||
To install this collection use `ansible-galaxy collection install nofusscomputing.kubernetes`
|
||||
|
||||
|
||||
## Features
|
||||
|
||||
Most of the features of this collection are from the included role `nfc_kubernetes`, please [view its page for feature details](roles/nfc_kubernetes/index.md).
|
||||
|
||||
|
||||
## Using this collection
|
||||
|
||||
This collection has been designed to be a complete and self-contained management tool for a K3s kubernetes cluster.
|
||||
|
||||
## K3s Kubernetes Installation
|
||||
|
||||
By default the install playbook will install to localhost.
|
||||
|
||||
``` bash
|
||||
|
||||
ansible-playbook nofusscomputing.kubernetes.install
|
||||
|
||||
```
|
||||
|
||||
!!! danger
|
||||
By default when the install task is run, The firewall is also configured. The default sets the `FORWARD` and `INPUT` tables to have a policy of `DROP`. Failing to add any required additional rules before installing/configuring kubernetes will cause you to not have remote access to the machine.
|
||||
|
||||
You are encouraged to run `ansible-playbook nofusscomputing.firewall.install` with your rules configured within your inventory first. see the [firewall docs](../firewall/index.md) for more information.
|
||||
|
||||
The install playbook has a dynamic `hosts` key. This has been done to specifically support running the playbook from AWX and being able to populate the field from the survey feature. Order of precedence for the host variable is as follows:
|
||||
|
||||
- `nfc_pb_host` set to any valid value that a playbook `hosts` key can accept
|
||||
|
||||
- `nfc_pb_kubernetes_cluster_name` with the name of the cluster. This variable is appended to string `kubernetes_cluster_` to serve as a group name for the cluster to be installed. i.e. for a cluster called `prime`, the group name would be set to `kubernetes_cluster_prime`
|
||||
|
||||
- `--limit` specified at runtime
|
||||
|
||||
- `localhost`
|
||||
|
||||
For the available variables please view the [nfc_kubernetes role docs](roles/nfc_kubernetes/index.md#default-variables)
|
||||
|
||||
|
||||
|
@ -3,7 +3,7 @@ title: Ansible
|
||||
description: No Fuss Computings Ansible role nfc_kubernetes Ansible docs
|
||||
date: 2023-10-24
|
||||
template: project.html
|
||||
about: https://gitlab.com/nofusscomputing/projects/ansible/roles/kubernetes
|
||||
about: https://gitlab.com/nofusscomputing/projects/ansible/collections/kubernetes
|
||||
---
|
||||
|
||||
This page intends to describe/explain the setup of ansible for this role.
|
||||
@ -25,7 +25,7 @@ There are many ways to layout your inventory within Ansible. To take full advant
|
||||
!!! info Info
|
||||
The nfc_kubernetes role uses this field for any configuration that requires a hostname. You are strongly encouraged to use DNS name and the DNS name be resolveable for each host accessing to the host in question. Using DNS host name is of paramount importance for a host with dynamic DHCP being used.
|
||||
|
||||
- variable `Kubernetes_Master` _boolean_ set for all host that are master nodes.
|
||||
- variable `nfc_role_kubernetes_master` _boolean_ set for all host that are master nodes.
|
||||
|
||||
- hosts that require Kubernetes API access added to variable `kubernetes_config.cluster.access`
|
||||
|
@ -3,14 +3,14 @@ title: Firewall
|
||||
description: No Fuss Computings Ansible role nfc_kubernetes
|
||||
date: 2023-10-24
|
||||
template: project.html
|
||||
about: https://gitlab.com/nofusscomputing/projects/ansible/roles/kubernetes
|
||||
about: https://gitlab.com/nofusscomputing/projects/ansible/collections/kubernetes
|
||||
---
|
||||
|
||||
This role include logic to generate firewall rules for iptables. Both IPv4 and IPv6 rules are generated. to survive reboots or network cable disconects, a script is created and added to the `if-up.d.` This enables that each time the interface is brought up, the firewall rules are applied. For a list of the firewall rules applied see the [K3s documentation](https://docs.k3s.io/installation/requirements#inbound-rules-for-k3s-server-nodes)
|
||||
|
||||
Rules generation workflow:
|
||||
|
||||
- itertes over all kubernetes hosts
|
||||
- iterates over all kubernetes hosts
|
||||
|
||||
- adds rules if host is masters for worker access
|
||||
|
@ -0,0 +1,117 @@
|
||||
---
|
||||
title: Kubernetes
|
||||
description: No Fuss Computings Ansible role nfc_kubernetes
|
||||
date: 2023-10-24
|
||||
template: project.html
|
||||
about: https://gitlab.com/nofusscomputing/projects/ansible/collections/kubernetes
|
||||
---
|
||||
|
||||
This Ansible role is designed to deploy a K3s Kubernetes cluster. Without adding cluster configuration this role will install K3s as a single node cluster. To deploy a multi-node cluster add your configuration, K3s will be installed on all nodes. On completion you will have fully configured cluster in a state ready to use. This role can be used with our [our playbooks](../../../../playbooks/index.md) or comes included, along with the playbook within our [Ansible Execution Environment](../../../../execution_environment/index.md).
|
||||
|
||||
|
||||
## Role Details
|
||||
|
||||
| Item| Value | Description |
|
||||
|:---|:---:|:---|
|
||||
| Dependent Roles | _None_ | |
|
||||
| Optional Roles | _nfc_firewall_ | Used to setup the firewall for kubernetes. |
|
||||
| Idempotent | _Yes_ | |
|
||||
| Stats Available | _Not Yet_ | |
|
||||
| Tags | _Nil_ | |
|
||||
| Requirements | _Gather Facts_ | |
|
||||
| | _become_ | |
|
||||
|
||||
|
||||
## Features
|
||||
|
||||
- CNI Setup, calico including `calicoctl` plugin
|
||||
|
||||
> `kubectl calico ....` instead of `calicoctl ....`
|
||||
|
||||
- Configurable:
|
||||
|
||||
- Container Registries
|
||||
|
||||
- ectd deployment
|
||||
|
||||
- etcd snapshot cron schedule
|
||||
|
||||
- etcd snapshot retention
|
||||
|
||||
- Cluster Domain
|
||||
|
||||
- Configure System reserved CPU, Storage and Memory.
|
||||
|
||||
- Node Labels
|
||||
|
||||
- Node Taints
|
||||
|
||||
- Service Load Balancer Namespace
|
||||
|
||||
- Encryption between nodes (Wireguard)
|
||||
|
||||
- [Firewall configured for kubernetes host](firewall.md)
|
||||
|
||||
- Multi-node Deployment
|
||||
|
||||
- OpenID Connect SSO Authentication
|
||||
|
||||
- [Basic RBAC `ClusterRoles` and Bindings](rbac.md)
|
||||
|
||||
- _[ToDo-#5](https://gitlab.com/nofusscomputing/projects/ansible/kubernetes/-/issues/5)_ Restore backup on fresh install of a cluster
|
||||
|
||||
- Installs OLM for operator subscriptions
|
||||
|
||||
- Install MetalLB
|
||||
|
||||
- Install KubeVirt including `virtctl` plugin
|
||||
|
||||
> `kubectl virt ....` instead of `virtctl ....`
|
||||
|
||||
- Install the Helm Binary
|
||||
|
||||
|
||||
## Role Workflow
|
||||
|
||||
For a more probable than not success this role first installs/configures prime master, other master(s) and worker nodes using the following simplified workflow:
|
||||
|
||||
1. Download both install script and k3s binary to ansible controller
|
||||
|
||||
1. copy install script and k3s binary to host
|
||||
|
||||
1. Create required config files needed for installation
|
||||
|
||||
1. _(kubernetes prime master only)_ Add install required config files
|
||||
|
||||
1. Install kubernetes
|
||||
|
||||
1. _(kubernetes prime master only)_ Wait for kubernetes to be ready. Playbook is paused until `true`
|
||||
|
||||
1. Configure Kubernetes
|
||||
|
||||
1. Install Kubevirt
|
||||
|
||||
If the playbook is setup as per [our recommendation](ansible.md) step 2 onwards is first done on master nodes then worker nodes.
|
||||
|
||||
!!! tip
|
||||
If you prefer to manually restart the kubernetes service the following variables can be set to prevent a restart of the kubernetes service
|
||||
|
||||
``` yaml
|
||||
nfc_kubernetes_no_restart: false
|
||||
nfc_kubernetes_no_restart_master: false
|
||||
nfc_kubernetes_no_restart_prime: false
|
||||
nfc_kubernetes_no_restart_slave: false
|
||||
```
|
||||
_See default variables below for explanation of each variable if it's not evident enough._
|
||||
|
||||
|
||||
## Default Variables
|
||||
|
||||
On viewing these variables you will notice there are single dictionary keys prefixed `nfc_role_kubernetes_` and a dictionary of dictionaries `kubernetes_config`. variables prefixed with `nfc_role_kubernetes_` are for single node installs with the `kubernetes_config` dictionary containing all of the information for an entire cluster. The `kubernetes_config` dictionary variables take precedence. Even if you are installing a cluster on multiple nodes, you are still advised to review the variables prefixed with `nfc_role_kubernetes_` as they may still be needed. i.e. setting a node type use keys `nfc_role_kubernetes_prime`, `nfc_role_kubernetes_master` and `nfc_role_kubernetes_worker`.
|
||||
|
||||
|
||||
``` yaml title="defaults/main.yaml" linenums="1"
|
||||
|
||||
--8<-- "roles/nfc_kubernetes/defaults/main.yml"
|
||||
|
||||
```
|
@ -3,7 +3,7 @@ title: RBAC
|
||||
description: No Fuss Computings Ansible role nfc_kubernetes RBAC documentation.
|
||||
date: 2023-10-29
|
||||
template: project.html
|
||||
about: https://gitlab.com/nofusscomputing/projects/ansible/roles/kubernetes
|
||||
about: https://gitlab.com/nofusscomputing/projects/ansible/collections/kubernetes
|
||||
---
|
||||
|
||||
As part of this roles workflow, A set of Clester Roles and Cluster Bindings are deployed and ready to use. The intent of these roles is to create a default set of roles that only require the authorization system to provide the users groups. As they have been defined as Cluster Roles you can bind to both cluster and/or namespace.
|
@ -0,0 +1,30 @@
|
||||
---
|
||||
title: Release Notes
|
||||
description: No Fuss Computings Ansible role nfc_kubernetes
|
||||
date: 2024-01-31
|
||||
template: project.html
|
||||
about: https://gitlab.com/nofusscomputing/projects/ansible/collections/kubernetes
|
||||
---
|
||||
|
||||
This document details any changes that have occured that may impact users of this role. It's a rolling document and will be amended from time to time.
|
||||
|
||||
|
||||
## Changes with an impact
|
||||
|
||||
- _**13 Mar 2024**_ Container Images now a dictionary. This role has two images `kubevirt_operator` and `tigera_operator`.
|
||||
|
||||
- All Images are stored in dictionary `nfc_role_kubernetes_container_images` with each image using its own dictionary with mandatory keys `registry`, `image` and `tag`. This change has been made to cater for those whom store their images within their inventory as a dict of dict. For instance to use your inventory image declare variable `nfc_role_kubernetes_container_images.kubevirt_operator: my_images.my_kubevirt_dict` as an example.
|
||||
|
||||
- A lot of variables have been updated. To view what has changed, please see `defaults/main.yaml` in [MR !35](https://gitlab.com/nofusscomputing/projects/ansible/kubernetes/-/merge_requests/35)
|
||||
|
||||
- _**31 Jan 2024**_ Calico CNI deployment has been migrated to use the calico operator.
|
||||
|
||||
- All new cluster installations will be deployed with the operator
|
||||
|
||||
- Existing deployments will be required to run a deployment with job tag `operator_migrate_calico` to migrate their deployment to the operator
|
||||
|
||||
- if an issue occurs with the migration it can be rolled back by `kubectl delete -f` for all manifests in the `/var/lib/rancher/k3s/ansible` directory and redeploying with job tag `calico_manifest`. This re-deploys calico using the current manifest.
|
||||
|
||||
- This tag will be removed in the future at no set date.
|
||||
|
||||
- `ServiceLB` / `klipperLB` no longer deploys by default and to deploy it variable `nfc_kubernetes_enable_servicelb` must be set `true`
|
@ -1,59 +0,0 @@
|
||||
---
|
||||
title: Kubernetes
|
||||
description: No Fuss Computings Ansible role nfc_kubernetes
|
||||
date: 2023-10-24
|
||||
template: project.html
|
||||
about: https://gitlab.com/nofusscomputing/projects/ansible/roles/kubernetes
|
||||
---
|
||||
|
||||
This Ansible roles purpose is to install and configure Kubernetes with configuration from code. You can also use [our playbooks](../../playbooks/index.md) to deploy using this role. this is especially useful if you are also using [our Ansible Execution Environment](../../execution_environment/index.md)
|
||||
|
||||
|
||||
## Features
|
||||
|
||||
This role deploys a K3s cluster. In addition it has the following features:
|
||||
|
||||
- CNI Setup
|
||||
|
||||
- Configurable Container Registries
|
||||
|
||||
- _[ToDo-#3](https://gitlab.com/nofusscomputing/projects/ansible/kubernetes/-/issues/3)_ Encryption between nodes (Wireguard)
|
||||
|
||||
- [Firewall configured for kubernetes host](firewall.md)
|
||||
|
||||
- _[ToDo-#2](https://gitlab.com/nofusscomputing/projects/ansible/kubernetes/-/issues/2)_ Multi-node Deployment
|
||||
|
||||
- OpenID Connect SSO Authentication
|
||||
|
||||
- [Basic RBAC `ClusterRoles` and Bindings](rbac.md)
|
||||
|
||||
- _[ToDo-#5](https://gitlab.com/nofusscomputing/projects/ansible/kubernetes/-/issues/5)_ Restore backup on fresh install of a cluster
|
||||
|
||||
|
||||
## Role Workflow
|
||||
|
||||
The roles workflow is as follows
|
||||
|
||||
1. Download both install script and k3s binary to ansible controller
|
||||
|
||||
1. copy install script and k3s binary to host
|
||||
|
||||
1. Create required config files needed for installation
|
||||
|
||||
1. _(kubernetes prime only)_ Add install required config files
|
||||
|
||||
1. Install kubernetes
|
||||
|
||||
1. Configure Kubernetes
|
||||
|
||||
If the playbook is setup as per [our recommendation](ansible.md) step 2 onwards is first done on master nodes then worker nodes.
|
||||
|
||||
|
||||
## Default Variables
|
||||
|
||||
|
||||
``` yaml title="defaults/main.yaml" linenums="1"
|
||||
|
||||
--8<-- "defaults/main.yaml"
|
||||
|
||||
```
|
86
galaxy.yml
Normal file
86
galaxy.yml
Normal file
@ -0,0 +1,86 @@
|
||||
### REQUIRED
|
||||
# The namespace of the collection. This can be a company/brand/organization or product namespace under which all
|
||||
# content lives. May only contain alphanumeric lowercase characters and underscores. Namespaces cannot start with
|
||||
# underscores or numbers and cannot contain consecutive underscores
|
||||
namespace: nofusscomputing
|
||||
|
||||
# The name of the collection. Has the same character restrictions as 'namespace'
|
||||
name: kubernetes
|
||||
|
||||
# The version of the collection. Must be compatible with semantic versioning
|
||||
version: 1.3.0
|
||||
|
||||
# The path to the Markdown (.md) readme file. This path is relative to the root of the collection
|
||||
readme: README.md
|
||||
|
||||
# A list of the collection's content authors. Can be just the name or in the format 'Full Name <email> (url)
|
||||
# @nicks:irc/im.site#channel'
|
||||
authors:
|
||||
- No Fuss Computing
|
||||
|
||||
|
||||
### OPTIONAL but strongly recommended
|
||||
# A short summary description of the collection
|
||||
description: Install a K3s Kubernetes Cluster
|
||||
|
||||
# Either a single license or a list of licenses for content inside of a collection. Ansible Galaxy currently only
|
||||
# accepts L(SPDX,https://spdx.org/licenses/) licenses. This key is mutually exclusive with 'license_file'
|
||||
license:
|
||||
- MIT
|
||||
|
||||
# The path to the license file for the collection. This path is relative to the root of the collection. This key is
|
||||
# mutually exclusive with 'license'
|
||||
license_file: ''
|
||||
|
||||
# A list of tags you want to associate with the collection for indexing/searching. A tag name has the same character
|
||||
# requirements as 'namespace' and 'name'
|
||||
tags:
|
||||
- k3s
|
||||
- kubernetes
|
||||
- tools
|
||||
|
||||
# Collections that this collection requires to be installed for it to be usable. The key of the dict is the
|
||||
# collection label 'namespace.name'. The value is a version range
|
||||
# L(specifiers,https://python-semanticversion.readthedocs.io/en/latest/#requirement-specification). Multiple version
|
||||
# range specifiers can be set and are separated by ','
|
||||
dependencies:
|
||||
ansible.posix: '1.5.4'
|
||||
kubernetes.core: '3.0.0'
|
||||
nofusscomputing.firewall: '1.0.1'
|
||||
|
||||
|
||||
# The URL of the originating SCM repository
|
||||
repository: https://gitlab.com/nofusscomputing/projects/ansible/collections/kubernetes
|
||||
|
||||
# The URL to any online docs
|
||||
documentation: https://nofusscomputing.com/projects/ansible/collection/kubernetes/
|
||||
|
||||
# The URL to the homepage of the collection/project
|
||||
# homepage: https://example.com
|
||||
|
||||
# The URL to the collection issue tracker
|
||||
issues: https://gitlab.com/nofusscomputing/projects/ansible/collections/kubernetes/-/issues
|
||||
|
||||
# A list of file glob-like patterns used to filter any files or directories that should not be included in the build
|
||||
# artifact. A pattern is matched from the relative path of the file or directory of the collection directory. This
|
||||
# uses 'fnmatch' to match the files or directories. Some directories and files like 'galaxy.yml', '*.pyc', '*.retry',
|
||||
# and '.git' are always filtered. Mutually exclusive with 'manifest'
|
||||
build_ignore:
|
||||
- .vscode
|
||||
- artifacts
|
||||
- docs
|
||||
- .git*
|
||||
- gitlab-ci
|
||||
- website-template
|
||||
- .ansible-lint-ignore
|
||||
- .cz.yaml
|
||||
- .nfc_automation.yaml
|
||||
- dockerfile
|
||||
- mkdocs.yml
|
||||
|
||||
# A dict controlling use of manifest directives used in building the collection artifact. The key 'directives' is a
|
||||
# list of MANIFEST.in style
|
||||
# L(directives,https://packaging.python.org/en/latest/guides/using-manifest-in/#manifest-in-commands). The key
|
||||
# 'omit_default_directives' is a boolean that controls whether the default directives are used. Mutually exclusive
|
||||
# with 'build_ignore'
|
||||
# manifest: null
|
Submodule gitlab-ci updated: 52f4ebda54...a24f352ca3
@ -1,21 +0,0 @@
|
||||
---
|
||||
- name: "restart ContainerD"
|
||||
service:
|
||||
name: containerd
|
||||
state: restarted
|
||||
when: >
|
||||
containerd_config.changed | default(false) | bool
|
||||
and
|
||||
containerd_installed.rc | default(1) | int == 0
|
||||
and
|
||||
kubernetes_type == 'k8s'
|
||||
tags:
|
||||
- configure
|
||||
- install
|
||||
|
||||
|
||||
- name: Restart Kubernetes
|
||||
ansible.builtin.service:
|
||||
name: "{% if kubernetes_type == 'k3s' %}k3s{% else %}kubelet{% endif %}"
|
||||
state: restarted
|
||||
listen: kubernetes_restart
|
52
meta/runtime.yml
Normal file
52
meta/runtime.yml
Normal file
@ -0,0 +1,52 @@
|
||||
---
|
||||
# Collections must specify a minimum required ansible version to upload
|
||||
# to galaxy
|
||||
requires_ansible: '>=2.14.0'
|
||||
|
||||
# Content that Ansible needs to load from another location or that has
|
||||
# been deprecated/removed
|
||||
# plugin_routing:
|
||||
# action:
|
||||
# redirected_plugin_name:
|
||||
# redirect: ns.col.new_location
|
||||
# deprecated_plugin_name:
|
||||
# deprecation:
|
||||
# removal_version: "4.0.0"
|
||||
# warning_text: |
|
||||
# See the porting guide on how to update your playbook to
|
||||
# use ns.col.another_plugin instead.
|
||||
# removed_plugin_name:
|
||||
# tombstone:
|
||||
# removal_version: "2.0.0"
|
||||
# warning_text: |
|
||||
# See the porting guide on how to update your playbook to
|
||||
# use ns.col.another_plugin instead.
|
||||
# become:
|
||||
# cache:
|
||||
# callback:
|
||||
# cliconf:
|
||||
# connection:
|
||||
# doc_fragments:
|
||||
# filter:
|
||||
# httpapi:
|
||||
# inventory:
|
||||
# lookup:
|
||||
# module_utils:
|
||||
# modules:
|
||||
# netconf:
|
||||
# shell:
|
||||
# strategy:
|
||||
# terminal:
|
||||
# test:
|
||||
# vars:
|
||||
|
||||
# Python import statements that Ansible needs to load from another location
|
||||
# import_redirection:
|
||||
# ansible_collections.ns.col.plugins.module_utils.old_location:
|
||||
# redirect: ansible_collections.ns.col.plugins.module_utils.new_location
|
||||
|
||||
# Groups of actions/modules that take a common set of options
|
||||
# action_groups:
|
||||
# group_name:
|
||||
# - module1
|
||||
# - module2
|
24
mkdocs.yml
24
mkdocs.yml
@ -2,9 +2,9 @@ INHERIT: website-template/mkdocs.yml
|
||||
|
||||
docs_dir: 'docs'
|
||||
|
||||
repo_name: Kubernetes Ansible Role
|
||||
repo_url: https://gitlab.com/nofusscomputing/projects/ansible/kubernetes
|
||||
edit_uri: '/-/ide/project/nofusscomputing/projects/ansible/kubernetes/edit/development/-/docs/'
|
||||
repo_name: Kubernetes Ansible Collection
|
||||
repo_url: https://gitlab.com/nofusscomputing/projects/ansible/collections/kubernetes
|
||||
edit_uri: '/-/ide/project/nofusscomputing/projects/ansible/collections/kubernetes/edit/development/-/docs/'
|
||||
|
||||
nav:
|
||||
- Home: index.md
|
||||
@ -29,19 +29,25 @@ nav:
|
||||
|
||||
- projects/ansible/playbooks/index.md
|
||||
|
||||
- Roles:
|
||||
- Collections:
|
||||
|
||||
- projects/ansible/roles/index.md
|
||||
- projects/ansible/collection/index.md
|
||||
|
||||
- Kubernetes:
|
||||
|
||||
- projects/ansible/roles/kubernetes/index.md
|
||||
- projects/ansible/collection/kubernetes/index.md
|
||||
|
||||
- projects/ansible/roles/kubernetes/ansible.md
|
||||
- Role nfc_kubernetes:
|
||||
|
||||
- projects/ansible/roles/kubernetes/firewall.md
|
||||
- projects/ansible/collection/kubernetes/roles/nfc_kubernetes/index.md
|
||||
|
||||
- projects/ansible/roles/kubernetes/rbac.md
|
||||
- projects/ansible/collection/kubernetes/roles/nfc_kubernetes/ansible.md
|
||||
|
||||
- projects/ansible/collection/kubernetes/roles/nfc_kubernetes/firewall.md
|
||||
|
||||
- projects/ansible/collection/kubernetes/roles/nfc_kubernetes/rbac.md
|
||||
|
||||
- projects/ansible/collection/kubernetes/roles/nfc_kubernetes/release_notes.md
|
||||
|
||||
|
||||
- Operations:
|
||||
|
64
playbooks/install.yaml
Normal file
64
playbooks/install.yaml
Normal file
@ -0,0 +1,64 @@
|
||||
---
|
||||
- name: Install K3s Kubernetes
|
||||
hosts: |-
|
||||
{%- if nfc_pb_host is defined -%}
|
||||
|
||||
{{ nfc_pb_host }}
|
||||
|
||||
{%- elif nfc_pb_kubernetes_cluster_name is defined -%}
|
||||
|
||||
kubernetes_cluster_{{ nfc_pb_kubernetes_cluster_name | lower }}
|
||||
|
||||
{%- else -%}
|
||||
|
||||
{%- if ansible_limit is defined -%}
|
||||
|
||||
{{ ansible_limit }}
|
||||
|
||||
{%- else -%}
|
||||
|
||||
localhost
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- endif %}
|
||||
become: true
|
||||
gather_facts: true
|
||||
|
||||
|
||||
tasks:
|
||||
|
||||
|
||||
- name: Install/Configure Kubernetes
|
||||
ansible.builtin.include_role:
|
||||
name: nfc_kubernetes
|
||||
tags:
|
||||
- always
|
||||
|
||||
|
||||
# vars:
|
||||
|
||||
#
|
||||
# Future feature, add playbook to import to awx
|
||||
#
|
||||
# nfc_pb_awx_tower_template:
|
||||
|
||||
|
||||
# - name: "Collection/NoFussComputing/Kubernetes/Install"
|
||||
# ask_credential_on_launch: true
|
||||
# ask_job_type_on_launch: true
|
||||
# ask_limit_on_launch: true
|
||||
# ask_tags_on_launch: true
|
||||
# ask_variables_on_launch: true
|
||||
# description: |
|
||||
# Playbook to Install/Configure Kubernetes using configuration
|
||||
# from code.
|
||||
# execution_environment: "No Fuss Computing EE"
|
||||
# job_type: "check"
|
||||
# labels:
|
||||
# - cluster
|
||||
# - k3s
|
||||
# - kubernetes
|
||||
# verbosity: 2
|
||||
# use_fact_cache: true
|
||||
# survey_enabled: false
|
0
plugins/.gitkeep
Normal file
0
plugins/.gitkeep
Normal file
3
roles/nfc_kubernetes/README.md
Normal file
3
roles/nfc_kubernetes/README.md
Normal file
@ -0,0 +1,3 @@
|
||||
## No Fuss Computing - Ansible Role nfc_kubernetes
|
||||
|
||||
Nothing to see here
|
194
roles/nfc_kubernetes/defaults/main.yml
Normal file
194
roles/nfc_kubernetes/defaults/main.yml
Normal file
@ -0,0 +1,194 @@
|
||||
|
||||
# Depreciated:
|
||||
# Calico is being migrated to use the calico operator.
|
||||
# in a near future release, this method of deploying calico
|
||||
# will be removed. use tag `operator_migrate_calico` to migrate
|
||||
calico_image_tag: v3.25.0 # Depreciated
|
||||
# EoF Depreciated
|
||||
# SoF New Variables
|
||||
nfc_role_kubernetes_calico_version: v3.27.0
|
||||
# nfc_kubernetes_tigera_operator_registry: quay.io
|
||||
# nfc_kubernetes_tigera_operator_image: tigera/operator
|
||||
# nfc_kubernetes_tigera_operator_tag: v1.32.3 # Calico v3.27.0
|
||||
# EoF New Variables, EEoF Depreciated
|
||||
|
||||
|
||||
nfc_kubernetes_enable_metallb: false
|
||||
nfc_kubernetes_enable_servicelb: false
|
||||
|
||||
|
||||
nfc_role_kubernetes_container_images:
|
||||
|
||||
kubevirt_operator:
|
||||
name: Kubevirt Operator
|
||||
registry: quay.io
|
||||
image: kubevirt/virt-operator
|
||||
tag: v1.2.0
|
||||
|
||||
tigera_operator:
|
||||
name: Tigera Operator
|
||||
registry: quay.io
|
||||
image: tigera/operator
|
||||
tag: v1.32.3 # Calico v3.27.0
|
||||
|
||||
|
||||
nfc_role_kubernetes_cluster_domain: cluster.local
|
||||
|
||||
nfc_role_kubernetes_configure_firewall: true
|
||||
|
||||
nfc_role_kubernetes_etcd_enabled: false
|
||||
|
||||
nfc_role_kubernetes_install_olm: false
|
||||
|
||||
nfc_role_kubernetes_install_helm: true
|
||||
|
||||
nfc_role_kubernetes_install_kubevirt: false
|
||||
|
||||
nfc_role_kubernetes_kubevirt_operator_replicas: 1
|
||||
|
||||
nfc_role_kubernetes_oidc_enabled: false
|
||||
|
||||
nfc_role_kubernetes_pod_subnet: 172.16.248.0/21
|
||||
nfc_role_kubernetes_service_subnet: 172.16.244.0/22
|
||||
|
||||
nfc_role_kubernetes_prime: true # Mandatory for a node designated as the prime master node
|
||||
nfc_role_kubernetes_master: true # Mandatory for a node designated as a master node and the prime master node
|
||||
nfc_role_kubernetes_worker: false # Mandatory for a node designated as a worker node
|
||||
|
||||
############################################################################################################
|
||||
#
|
||||
# Old Vars requiring refactoring
|
||||
#
|
||||
# ############################################################################################################
|
||||
|
||||
|
||||
KubernetesVersion: '1.26.12' # must match the repository release version
|
||||
kubernetes_version_olm: '0.27.0'
|
||||
|
||||
|
||||
|
||||
KubernetesVersion_k3s_prefix: '+k3s1'
|
||||
|
||||
kubernetes_private_container_registry: [] # Optional, Array. if none use `[]`
|
||||
|
||||
kubernetes_etcd_snapshot_cron_schedule: '0 */12 * * *'
|
||||
kubernetes_etcd_snapshot_retention: 5
|
||||
|
||||
# host_external_ip: '' # Optional, String. External IP Address for host.
|
||||
|
||||
kube_apiserver_arg_audit_log_maxage: 2
|
||||
|
||||
kubelet_arg_system_reserved_cpu: 450m
|
||||
kubelet_arg_system_reserved_memory: 512Mi
|
||||
kubelet_arg_system_reserved_storage: 8Gi
|
||||
|
||||
|
||||
nfc_kubernetes:
|
||||
enable_firewall: false # Optional, bool enable firewall rules from role 'nfc_firewall'
|
||||
|
||||
nfc_kubernetes_no_restart: false # Set to true to prevent role from restarting kubernetes on the host(s)
|
||||
nfc_kubernetes_no_restart_master: false # Set to true to prevent role from restarting kubernetes on master host(s)
|
||||
nfc_kubernetes_no_restart_prime: false # Set to true to prevent role from restarting kubernetes on prime host
|
||||
nfc_kubernetes_no_restart_slave: false # Set to true to prevent role from restarting kubernetes on slave host(s)
|
||||
|
||||
|
||||
k3s:
|
||||
files:
|
||||
|
||||
- name: audit.yaml
|
||||
path: /var/lib/rancher/k3s/server
|
||||
content: |
|
||||
apiVersion: audit.k8s.io/v1
|
||||
kind: Policy
|
||||
rules:
|
||||
- level: Request
|
||||
when: "{{ nfc_role_kubernetes_master }}"
|
||||
|
||||
- name: 90-kubelet.conf
|
||||
path: /etc/sysctl.d
|
||||
content: |
|
||||
vm.panic_on_oom=0
|
||||
vm.overcommit_memory=1
|
||||
kernel.panic=10
|
||||
kernel.panic_on_oops=1
|
||||
kernel.keys.root_maxbytes=25000000
|
||||
|
||||
- name: psa.yaml
|
||||
path: /var/lib/rancher/k3s/server
|
||||
content: ""
|
||||
# apiVersion: apiserver.conf0 */12 * * *ig.k8s.io/v1
|
||||
# kind: AdmissionConfiguration
|
||||
# plugins:
|
||||
# - name: PodSecurity
|
||||
# configuration:
|
||||
# apiVersion: pod-security.admission.config.k8s.io/v1beta1
|
||||
# kind: PodSecurityConfiguration
|
||||
# defaults:
|
||||
# enforce: "restricted"
|
||||
# enforce-version: "latest"
|
||||
# audit: "restricted"
|
||||
# audit-version: "latest"
|
||||
# warn: "restricted"
|
||||
# warn-version: "latest"
|
||||
# exemptions:
|
||||
# usernames: []
|
||||
# runtimeClasses: []
|
||||
# namespaces: [kube-system]
|
||||
when: "{{ kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname }}"
|
||||
|
||||
|
||||
#############################################################################################
|
||||
# Cluster Config when stored in Inventory
|
||||
#
|
||||
# One required per cluster. recommend creating one ansible host group per cluster.
|
||||
#############################################################################################
|
||||
# kubernetes_config: # Dict. Cluster Config
|
||||
# cluster:
|
||||
# access: # Mandatory. List, DNS host name or IPv4/IPv6 Address.
|
||||
# # if none use '[]'
|
||||
# - 'my.dnshostname.com'
|
||||
# - '2001:4860:4860::8888'
|
||||
# - '192.168.1.1'
|
||||
# domain_name: earth # Mandatory, String. Cluster Domain Name
|
||||
# group_name: # Mandatory, String. name of the ansible inventory group containg all cluster hosts
|
||||
# prime:
|
||||
# name: k3s-prod # Mandatory, String. Ansible inventory_host that will
|
||||
# # act as the prime master node.
|
||||
# networking:
|
||||
# encrypt: true # Optional, Boolean. default `false`. Install wireguard for inter-node encryption
|
||||
# podSubnet: 172.16.70.0/24 # Mandatory, String. CIDR
|
||||
# ServiceSubnet: 172.16.72.0/24 # Mandatory, String. CIDR
|
||||
#
|
||||
#
|
||||
# helm:
|
||||
# enabled: true # Optional, Boolean. default=false. Install Helm Binary
|
||||
#
|
||||
#
|
||||
# kube_virt:
|
||||
# enabled: false # Optional, Boolean. default=false. Install KubeVirt
|
||||
#
|
||||
# nodes: [] # Optional, List of String. default=inventory_hostname. List of nodes to install kibevirt on.
|
||||
#
|
||||
# operator:
|
||||
# replicas: 2 # Optional, Integer. How many virt_operators to deploy.
|
||||
#
|
||||
#
|
||||
# oidc: # Used to configure Kubernetes with OIDC Authentication.
|
||||
# enabled: true # Mandatory, boolen. speaks for itself.
|
||||
# issuer_url: https://domainname.com/realms/realm-name # Mandatory, String. URL of OIDC Provider
|
||||
# client_id: kubernetes-test # Mandatory, string. OIDC Client ID
|
||||
# username_claim: preferred_username # Mandatory, String. Claim name containing username.
|
||||
# username_prefix: oidc # Optional, String. What to prefix to username
|
||||
# groups_claim: roles # Mandatory, String. Claim name containing groups
|
||||
# groups_prefix: '' # Optional, String. string to append to groups
|
||||
#
|
||||
# hosts:
|
||||
#
|
||||
# my-host-name:
|
||||
# labels:
|
||||
# mylabel: myvalue
|
||||
#
|
||||
# taints:
|
||||
# - effect: NoSchedule
|
||||
# key: taintkey
|
||||
# value: taintvalue
|
41
roles/nfc_kubernetes/handlers/main.yml
Normal file
41
roles/nfc_kubernetes/handlers/main.yml
Normal file
@ -0,0 +1,41 @@
|
||||
---
|
||||
|
||||
- name: Reboot Node
|
||||
ansible.builtin.reboot:
|
||||
reboot_timeout: 300
|
||||
listen: reboot_host
|
||||
when: ansible_connection == 'ssh'
|
||||
|
||||
|
||||
- name: Restart Kubernetes
|
||||
ansible.builtin.service:
|
||||
name: |-
|
||||
{%- if nfc_role_kubernetes_master | default(false) | bool -%}
|
||||
k3s
|
||||
{%- else -%}
|
||||
k3s-agent
|
||||
{%- endif -%}
|
||||
state: restarted
|
||||
listen: kubernetes_restart
|
||||
when: |-
|
||||
not (
|
||||
nfc_kubernetes_no_restart
|
||||
or
|
||||
(
|
||||
nfc_role_kubernetes_master
|
||||
and
|
||||
nfc_kubernetes_no_restart_master
|
||||
)
|
||||
or
|
||||
(
|
||||
inventory_hostname == kubernetes_config.cluster.prime.name | default(inventory_hostname)
|
||||
and
|
||||
nfc_kubernetes_no_restart_prime
|
||||
)
|
||||
or
|
||||
(
|
||||
nfc_role_kubernetes_worker
|
||||
and
|
||||
nfc_kubernetes_no_restart_slave
|
||||
)
|
||||
)
|
@ -1,24 +1,29 @@
|
||||
galaxy_info:
|
||||
|
||||
role_name: nfc_kubernetes
|
||||
|
||||
author: No Fuss Computing
|
||||
description: template role to install and configure Kubernetes on a host
|
||||
|
||||
issue_tracker_url: https://gitlab.com/nofusscomputing/projects/ansible/kubernetes
|
||||
description: Install and configure single and multi-node K3s Kubernetes cluster.
|
||||
|
||||
license: https://gitlab.com/nofusscomputing/projects/ansible/kubernetes/-/blob/master/LICENSE
|
||||
issue_tracker_url: https://gitlab.com/nofusscomputing/projects/ansible/collections/kubernetes
|
||||
|
||||
license: MIT
|
||||
|
||||
min_ansible_version: '2.15'
|
||||
|
||||
platforms:
|
||||
|
||||
- name: Debian
|
||||
versions:
|
||||
- bullseye
|
||||
- bookworm
|
||||
|
||||
- name: Ubuntu
|
||||
versions:
|
||||
- 21
|
||||
|
||||
galaxy_tags:
|
||||
- cluster
|
||||
- k3s
|
||||
- k8s
|
||||
- kubernetes
|
||||
- container
|
27
roles/nfc_kubernetes/tasks/helm/main.yaml
Normal file
27
roles/nfc_kubernetes/tasks/helm/main.yaml
Normal file
@ -0,0 +1,27 @@
|
||||
---
|
||||
|
||||
- name: Fetch Helm APT Key
|
||||
ansible.builtin.get_url:
|
||||
url: https://baltocdn.com/helm/signing.asc
|
||||
dest: /usr/share/keyrings/helm.asc
|
||||
mode: 740
|
||||
|
||||
|
||||
- name: Add Helm Repository
|
||||
ansible.builtin.apt_repository:
|
||||
repo: >-
|
||||
deb [arch={%- if ansible_architecture == 'aarch64' -%}
|
||||
arm64
|
||||
{%- else -%}
|
||||
amd64
|
||||
{%- endif %} signed-by=/usr/share/keyrings/helm.asc] http://baltocdn.com/helm/stable/{{
|
||||
ansible_os_family | lower }}/ all main
|
||||
state: present
|
||||
filename: helm
|
||||
|
||||
|
||||
- name: Install Helm
|
||||
ansible.builtin.apt:
|
||||
package:
|
||||
- helm
|
||||
state: present
|
93
roles/nfc_kubernetes/tasks/install.yaml
Normal file
93
roles/nfc_kubernetes/tasks/install.yaml
Normal file
@ -0,0 +1,93 @@
|
||||
---
|
||||
|
||||
- name: Get Hostname
|
||||
ansible.builtin.command:
|
||||
cmd: hostname
|
||||
changed_when: false
|
||||
register: hostname_to_check
|
||||
|
||||
|
||||
- name: Hostname Check
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- hostname_to_check.stdout == inventory_hostname
|
||||
msg: The hostname must match the inventory_hostname
|
||||
when: >
|
||||
inventory_hostname != 'localhost'
|
||||
|
||||
|
||||
- name: Check Machine Architecture
|
||||
ansible.builtin.set_fact:
|
||||
nfc_kubernetes_install_architectures: "{{ nfc_kubernetes_install_architectures | default({}) | combine({ansible_architecture: ''}) }}"
|
||||
|
||||
|
||||
- name: Configure Kubernetes Firewall Rules
|
||||
ansible.builtin.include_role:
|
||||
name: nofusscomputing.firewall.nfc_firewall
|
||||
vars:
|
||||
nfc_role_firewall_firewall_type: iptables
|
||||
nfc_role_firewall_additional_rules: "{{ ( lookup('template', 'vars/firewall_rules.yaml') | from_yaml ).kubernetes_chains }}"
|
||||
tags:
|
||||
- always
|
||||
when: >
|
||||
nfc_role_kubernetes_configure_firewall
|
||||
|
||||
|
||||
- name: K3s Install
|
||||
ansible.builtin.include_tasks:
|
||||
file: k3s/install.yaml
|
||||
apply:
|
||||
tags:
|
||||
- always
|
||||
when: >
|
||||
install_kubernetes | default(true) | bool
|
||||
and
|
||||
not kubernetes_installed | default(false) | bool
|
||||
tags:
|
||||
- always
|
||||
|
||||
|
||||
- name: K3s Configure
|
||||
ansible.builtin.include_tasks:
|
||||
file: k3s/configure.yaml
|
||||
apply:
|
||||
tags:
|
||||
- always
|
||||
when: >
|
||||
install_kubernetes | default(true) | bool
|
||||
and
|
||||
kubernetes_installed | default(false) | bool
|
||||
tags:
|
||||
- always
|
||||
|
||||
|
||||
- name: Kubevert
|
||||
ansible.builtin.include_tasks:
|
||||
file: kubevirt/main.yaml
|
||||
apply:
|
||||
tags:
|
||||
- always
|
||||
when: >
|
||||
kubernetes_installed | default(false) | bool
|
||||
and
|
||||
kubernetes_config.kube_virt.enabled | default(nfc_role_kubernetes_install_kubevirt)
|
||||
and
|
||||
inventory_hostname in kubernetes_config.kube_virt.nodes | default([ inventory_hostname ]) | list
|
||||
tags:
|
||||
- always
|
||||
|
||||
|
||||
- name: Helm
|
||||
ansible.builtin.include_tasks:
|
||||
file: helm/main.yaml
|
||||
apply:
|
||||
tags:
|
||||
- always
|
||||
when: >
|
||||
kubernetes_installed | default(false) | bool
|
||||
and
|
||||
kubernetes_config.helm.enabled | default(nfc_role_kubernetes_install_helm)
|
||||
and
|
||||
nfc_role_kubernetes_master
|
||||
tags:
|
||||
- always
|
78
roles/nfc_kubernetes/tasks/k3s/configure.yaml
Normal file
78
roles/nfc_kubernetes/tasks/k3s/configure.yaml
Normal file
@ -0,0 +1,78 @@
|
||||
---
|
||||
|
||||
- name: Additional config files
|
||||
ansible.builtin.copy:
|
||||
content: |
|
||||
{{ item.content }}
|
||||
dest: "{{ item.path }}/{{ item.name }}"
|
||||
mode: '740'
|
||||
owner: root
|
||||
group: root
|
||||
loop: "{{ k3s.files }}"
|
||||
when: item.when | default(false) | bool
|
||||
|
||||
|
||||
- name: Check if FW dir exists
|
||||
ansible.builtin.stat:
|
||||
name: /etc/iptables-reloader/rules.d
|
||||
register: firewall_rules_dir_metadata
|
||||
|
||||
|
||||
- name: Copy Templates
|
||||
ansible.builtin.template:
|
||||
src: "{{ item.src }}"
|
||||
dest: "{{ item.dest }}"
|
||||
owner: root
|
||||
mode: '700'
|
||||
force: true
|
||||
notify: "{{ item.notify | default(omit) }}"
|
||||
loop: "{{ templates_to_apply }}"
|
||||
when: >
|
||||
item.when | default(true) | bool
|
||||
vars:
|
||||
templates_to_apply:
|
||||
|
||||
- src: kubernetes-manifest-rbac.yaml.j2
|
||||
dest: /var/lib/rancher/k3s/server/manifests/rbac-authorization-common.yaml
|
||||
when: "{{ kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname }}"
|
||||
|
||||
- src: iptables-kubernetes.rules.j2
|
||||
dest: "/etc/iptables-reloader/rules.d/iptables-kubernetes.rules"
|
||||
notify: firewall_reloader
|
||||
when: |-
|
||||
{%- if firewall_installed -%}
|
||||
|
||||
{{ firewall_rules_dir_metadata.stat.exists }}
|
||||
|
||||
{%- else -%}
|
||||
|
||||
false
|
||||
|
||||
{%- endif %}
|
||||
|
||||
|
||||
- name: Add Kubernetes Node Labels
|
||||
ansible.builtin.copy:
|
||||
content: |-
|
||||
apiVersion: v1
|
||||
kind: Node
|
||||
metadata:
|
||||
name: "{{ inventory_hostname }}"
|
||||
{% if kubernetes_config.hosts[inventory_hostname].labels | default([]) | list | length > 0 -%}
|
||||
labels:
|
||||
{{ kubernetes_config.hosts[inventory_hostname].labels | to_nice_yaml | indent(4) }}
|
||||
{%- endif +%}
|
||||
{% if kubernetes_config.hosts[inventory_hostname].taints | default([]) | list | length > 0 -%}
|
||||
spec:
|
||||
taints:
|
||||
{{ kubernetes_config.hosts[inventory_hostname].taints | to_nice_yaml(indent=0) | indent(4) }}
|
||||
{% endif %}
|
||||
dest: /var/lib/rancher/k3s/server/manifests/node-manifest-{{ inventory_hostname }}.yaml
|
||||
owner: root
|
||||
group: root
|
||||
mode: '700'
|
||||
delegate_to: "{{ kubernetes_config.cluster.prime.name }}"
|
||||
when:
|
||||
kubernetes_config.hosts[inventory_hostname].labels | default([]) | list | length > 0
|
||||
or
|
||||
kubernetes_config.hosts[inventory_hostname].taints | default([]) | list | length > 0
|
542
roles/nfc_kubernetes/tasks/k3s/install.yaml
Normal file
542
roles/nfc_kubernetes/tasks/k3s/install.yaml
Normal file
@ -0,0 +1,542 @@
|
||||
---
|
||||
|
||||
- name: Check for calico deployment manifest
|
||||
ansible.builtin.stat:
|
||||
name: /var/lib/rancher/k3s/server/manifests/calico.yaml
|
||||
register: file_calico_yaml_metadata
|
||||
|
||||
|
||||
- name: Check for calico Operator deployment manifest
|
||||
ansible.builtin.stat:
|
||||
name: /var/lib/rancher/k3s/ansible/deployment-manifest-calico_operator.yaml
|
||||
register: file_calico_operator_yaml_metadata
|
||||
|
||||
|
||||
- name: Install dependent packages
|
||||
ansible.builtin.apt:
|
||||
name: "{{ package }}"
|
||||
state: present
|
||||
loop: "{{ packages }}"
|
||||
loop_control:
|
||||
loop_var: package
|
||||
vars:
|
||||
packages:
|
||||
- curl
|
||||
- iptables
|
||||
- jq
|
||||
- wireguard
|
||||
|
||||
|
||||
- name: Remove swapfile from /etc/fstab
|
||||
ansible.posix.mount:
|
||||
name: "{{ item }}"
|
||||
fstype: swap
|
||||
state: absent
|
||||
with_items:
|
||||
- swap
|
||||
- none
|
||||
when:
|
||||
- ansible_os_family == 'Debian' # ansible_lsb.codename = bullseye, ansible_lsb.major_release = 11
|
||||
tags:
|
||||
- install
|
||||
|
||||
|
||||
- name: Disable swap
|
||||
ansible.builtin.command:
|
||||
cmd: swapoff -a
|
||||
changed_when: false
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
tags:
|
||||
- install
|
||||
|
||||
- name: Check an armbian os system
|
||||
ansible.builtin.stat:
|
||||
path: /etc/default/armbian-zram-config
|
||||
register: armbian_stat_result
|
||||
|
||||
|
||||
- name: Armbian Disable Swap
|
||||
ansible.builtin.shell:
|
||||
cmd: |
|
||||
sed -i 's/\# SWAP=false/SWAP=false/g' /etc/default/armbian-zram-config;
|
||||
sed -i 's/ENABLED=true/ENABLED=false/g' /etc/default/armbian-zram-config;
|
||||
args:
|
||||
executable: bash
|
||||
changed_when: false
|
||||
when: armbian_stat_result.stat.exists
|
||||
|
||||
|
||||
- name: Create Required directories
|
||||
ansible.builtin.file:
|
||||
name: "{{ item.name }}"
|
||||
state: "{{ item.state }}"
|
||||
mode: "{{ item.mode }}"
|
||||
loop: "{{ dirs }}"
|
||||
vars:
|
||||
dirs:
|
||||
- name: /etc/rancher/k3s
|
||||
state: directory
|
||||
mode: 700
|
||||
- name: /var/lib/rancher/k3s/server/logs
|
||||
state: directory
|
||||
mode: 700
|
||||
- name: /var/lib/rancher/k3s/server/manifests
|
||||
state: directory
|
||||
mode: 700
|
||||
- name: /var/lib/rancher/k3s/ansible
|
||||
state: directory
|
||||
mode: 700
|
||||
|
||||
|
||||
- name: Add sysctl net.ipv4.ip_forward
|
||||
ansible.posix.sysctl:
|
||||
name: "{{ item.name }}"
|
||||
value: "{{ item.value }}"
|
||||
sysctl_set: true
|
||||
state: present
|
||||
reload: true
|
||||
loop: "{{ settings }}"
|
||||
notify: reboot_host # On change reboot
|
||||
vars:
|
||||
settings:
|
||||
- name: net.ipv4.ip_forward
|
||||
value: '1'
|
||||
- name: fs.inotify.max_user_watches
|
||||
value: '524288'
|
||||
- name: fs.inotify.max_user_instances
|
||||
value: '512'
|
||||
- name: net.ipv6.conf.all.disable_ipv6
|
||||
value: '1'
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
|
||||
|
||||
- name: Check for Network Manager Directory
|
||||
ansible.builtin.stat:
|
||||
name: /etc/NetworkManager/conf.d
|
||||
register: directory_network_manager_metadata
|
||||
|
||||
|
||||
- name: Network Manager Setup
|
||||
ansible.builtin.copy:
|
||||
content: |-
|
||||
#
|
||||
# K3s Configuration for Network Manager
|
||||
#
|
||||
# Managed By ansible/role/nfc_kubernetes
|
||||
#
|
||||
# Dont edit this file directly as it will be overwritten.
|
||||
#
|
||||
[keyfile]
|
||||
unmanaged-devices=interface-name:cali*;interface-name:tunl*;interface-name:vxlan.calico;interface-name:vxlan-v6.calico;interface-name:wireguard.cali;interface-name:wg-v6.cali
|
||||
dest: /etc/NetworkManager/conf.d/calico.conf
|
||||
mode: '770'
|
||||
owner: root
|
||||
group: root
|
||||
diff: true
|
||||
when: directory_network_manager_metadata.stat.exists
|
||||
|
||||
|
||||
- name: Check if K3s Installed
|
||||
ansible.builtin.shell:
|
||||
cmd: |
|
||||
if [[ $(service k3s status) ]]; then exit 0; else exit 1; fi
|
||||
executable: /bin/bash
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
register: k3s_installed
|
||||
when: >
|
||||
nfc_role_kubernetes_master | default(false) | bool
|
||||
|
||||
|
||||
- name: Check if K3s Installed
|
||||
ansible.builtin.shell:
|
||||
cmd: |
|
||||
if [[ $(service k3s-agent status) ]]; then exit 0; else exit 1; fi
|
||||
executable: /bin/bash
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
register: k3s_installed
|
||||
when: >
|
||||
not nfc_role_kubernetes_worker | default(false) | bool
|
||||
|
||||
|
||||
- name: Download Install Scripts
|
||||
ansible.builtin.uri:
|
||||
url: "{{ item.url }}"
|
||||
method: GET
|
||||
return_content: true
|
||||
status_code:
|
||||
- 200
|
||||
- 304
|
||||
dest: "{{ item.dest }}"
|
||||
mode: "744"
|
||||
changed_when: false
|
||||
register: k3s_download_script
|
||||
delegate_to: localhost
|
||||
run_once: true
|
||||
# no_log: true
|
||||
when: >
|
||||
ansible_os_family == 'Debian'
|
||||
and
|
||||
item.when | default(true) | bool
|
||||
loop: "{{ download_files }}"
|
||||
vars:
|
||||
ansible_connection: local
|
||||
download_files:
|
||||
- dest: /tmp/install.sh
|
||||
url: https://get.k3s.io
|
||||
- dest: /tmp/install_olm.sh
|
||||
url: https://raw.githubusercontent.com/operator-framework/operator-lifecycle-manager/v{{ kubernetes_version_olm }}/scripts/install.sh
|
||||
when: "{{ nfc_role_kubernetes_install_olm }}"
|
||||
|
||||
|
||||
- name: Download K3s Binary
|
||||
ansible.builtin.uri:
|
||||
url: |-
|
||||
https://github.com/k3s-io/k3s/releases/download/v
|
||||
{{- KubernetesVersion + KubernetesVersion_k3s_prefix | urlencode -}}
|
||||
/k3s
|
||||
{%- if cpu_arch.key == 'aarch64' -%}
|
||||
-arm64
|
||||
{%- endif %}
|
||||
method: GET
|
||||
return_content: false
|
||||
status_code:
|
||||
- 200
|
||||
- 304
|
||||
dest: "/tmp/k3s.{{ cpu_arch.key }}"
|
||||
mode: "744"
|
||||
changed_when: false
|
||||
register: k3s_download_files
|
||||
delegate_to: localhost
|
||||
run_once: true
|
||||
# no_log: true
|
||||
when: ansible_os_family == 'Debian'
|
||||
loop: "{{ nfc_kubernetes_install_architectures | dict2items }}"
|
||||
loop_control:
|
||||
loop_var: cpu_arch
|
||||
vars:
|
||||
ansible_connection: local
|
||||
|
||||
|
||||
- name: "[TRACE] Downloaded File SHA256"
|
||||
ansible.builtin.set_fact:
|
||||
hash_sha256_k3s_downloaded_binary: "{{ lookup('ansible.builtin.file', '/tmp/k3s.' + cpu_arch.key) | hash('sha256') | string }}"
|
||||
delegate_to: localhost
|
||||
loop: "{{ nfc_kubernetes_install_architectures | dict2items }}"
|
||||
loop_control:
|
||||
loop_var: cpu_arch
|
||||
|
||||
|
||||
- name: Existing k3s File hash
|
||||
ansible.builtin.stat:
|
||||
checksum_algorithm: sha256
|
||||
name: /usr/local/bin/k3s
|
||||
register: hash_sha256_k3s_existing_binary
|
||||
|
||||
|
||||
- name: Copy K3s binary to Host
|
||||
ansible.builtin.copy:
|
||||
src: "/tmp/k3s.{{ ansible_architecture }}"
|
||||
dest: "/usr/local/bin/k3s"
|
||||
mode: '741'
|
||||
owner: root
|
||||
group: root
|
||||
when: hash_sha256_k3s_existing_binary.stat.checksum | default('0') != hash_sha256_k3s_downloaded_binary
|
||||
|
||||
|
||||
- name: Copy install scripts to Host
|
||||
ansible.builtin.copy:
|
||||
src: "{{ item.path }}"
|
||||
dest: "{{ item.path }}"
|
||||
mode: '755'
|
||||
owner: root
|
||||
group: root
|
||||
changed_when: false
|
||||
loop: "{{ install_scripts }}"
|
||||
vars:
|
||||
install_scripts:
|
||||
- path: "/tmp/install.sh"
|
||||
- path: "/tmp/install_olm.sh"
|
||||
when: "{{ nfc_role_kubernetes_install_olm }}"
|
||||
when: >
|
||||
item.when | default(true) | bool
|
||||
|
||||
|
||||
- name: Required Initial config files
|
||||
ansible.builtin.copy:
|
||||
content: |
|
||||
{{ item.content }}
|
||||
dest: "{{ item.path }}/{{ item.name }}"
|
||||
mode: '740'
|
||||
owner: root
|
||||
group: root
|
||||
loop: "{{ k3s.files }}"
|
||||
when: >
|
||||
item.when | default(true) | bool
|
||||
# kubernetes_config.cluster.prime.name == inventory_hostname
|
||||
|
||||
|
||||
- name: Copy Intial required templates
|
||||
ansible.builtin.template:
|
||||
src: "{{ item.src }}"
|
||||
dest: "{{ item.dest }}"
|
||||
owner: root
|
||||
mode: '700'
|
||||
force: true
|
||||
notify: "{{ item.notify | default(omit) }}"
|
||||
loop: "{{ templates_to_apply }}"
|
||||
diff: true
|
||||
when: >
|
||||
item.when | default(true) | bool
|
||||
vars:
|
||||
templates_to_apply:
|
||||
- src: k3s-config.yaml.j2
|
||||
dest: /etc/rancher/k3s/config.yaml
|
||||
notify: kubernetes_restart
|
||||
- src: "calico.yaml.j2"
|
||||
dest: /var/lib/rancher/k3s/server/manifests/calico.yaml
|
||||
when: >
|
||||
{{
|
||||
kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname
|
||||
and
|
||||
(
|
||||
(
|
||||
not file_calico_operator_yaml_metadata.stat.exists
|
||||
and
|
||||
file_calico_yaml_metadata.stat.exists
|
||||
and
|
||||
k3s_installed.rc == 0
|
||||
)
|
||||
or
|
||||
'calico_manifest' in ansible_run_tags
|
||||
)
|
||||
}}
|
||||
- src: k3s-registries.yaml.j2
|
||||
dest: /etc/rancher/k3s/registries.yaml
|
||||
notify: kubernetes_restart
|
||||
when: "{{ (kubernetes_private_container_registry | default([])) | from_yaml | list | length > 0 }}"
|
||||
|
||||
|
||||
# - name: Templates IPv6
|
||||
# ansible.builtin.template:
|
||||
# src: iptables-kubernetes.rules.j2
|
||||
# dest: "/etc/ip6tables.rules.d/ip6tables-kubernetes.rules"
|
||||
# owner: root
|
||||
# mode: '700'
|
||||
# force: true
|
||||
# vars:
|
||||
# ipv6: true
|
||||
|
||||
|
||||
- name: Set IPTables to legacy mode
|
||||
ansible.builtin.command:
|
||||
cmd: update-alternatives --set iptables /usr/sbin/iptables-legacy
|
||||
changed_when: false
|
||||
|
||||
|
||||
- name: Install K3s (prime master)
|
||||
ansible.builtin.shell:
|
||||
cmd: |
|
||||
INSTALL_K3S_SKIP_DOWNLOAD=true \
|
||||
INSTALL_K3S_VERSION="v{{ KubernetesVersion }}{{ KubernetesVersion_k3s_prefix }}" \
|
||||
/tmp/install.sh {% if nfc_role_kubernetes_etcd_enabled %}--cluster-init{% endif %}
|
||||
changed_when: false
|
||||
when: >
|
||||
kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname
|
||||
and
|
||||
k3s_installed.rc == 1
|
||||
|
||||
|
||||
- name: Install Calico Operator
|
||||
ansible.builtin.include_tasks:
|
||||
file: migrate_to_operator.yaml
|
||||
apply:
|
||||
tags:
|
||||
- always
|
||||
when: >-
|
||||
(
|
||||
(
|
||||
'operator_migrate_calico' in ansible_run_tags
|
||||
or
|
||||
'operator_calico' in ansible_run_tags
|
||||
)
|
||||
or
|
||||
not file_calico_yaml_metadata.stat.exists
|
||||
)
|
||||
and
|
||||
'calico_manifest' not in ansible_run_tags
|
||||
and
|
||||
kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname
|
||||
|
||||
|
||||
- name: Install MetalLB Operator
|
||||
ansible.builtin.include_tasks:
|
||||
file: manifest_apply.yaml
|
||||
apply:
|
||||
tags:
|
||||
- always
|
||||
loop: "{{ manifests }}"
|
||||
loop_control:
|
||||
loop_var: manifest
|
||||
vars:
|
||||
manifests:
|
||||
- name: MetalLB Operator
|
||||
template: Deployment-manifest-MetalLB_Operator.yaml
|
||||
when: >-
|
||||
nfc_kubernetes_enable_metallb | default(false) | bool
|
||||
and
|
||||
kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname
|
||||
|
||||
|
||||
- name: Wait for kubernetes prime to be ready
|
||||
ansible.builtin.shell:
|
||||
cmd: |
|
||||
set -o pipefail
|
||||
if [ `which jq` ]; then
|
||||
echo $(kubectl get no $(hostname) -o json | jq .status.conditions[4].status | tr -d '"');
|
||||
else
|
||||
echo jq command not found;
|
||||
exit 127;
|
||||
fi
|
||||
executable: /bin/bash
|
||||
delegate_to: "{{ kubernetes_config.cluster.prime.name | default(inventory_hostname) }}"
|
||||
run_once: true
|
||||
register: kubernetes_ready_check
|
||||
retries: 30
|
||||
delay: 10
|
||||
until: >
|
||||
kubernetes_ready_check.stdout | default(false) | bool
|
||||
or
|
||||
kubernetes_ready_check.rc != 0
|
||||
changed_when: false
|
||||
failed_when: kubernetes_ready_check.rc != 0
|
||||
|
||||
|
||||
- name: Install olm
|
||||
ansible.builtin.shell:
|
||||
cmd: |
|
||||
/tmp/install_olm.sh v{{ kubernetes_version_olm }}
|
||||
changed_when: false
|
||||
failed_when: >
|
||||
'already installed' not in install_olm.stdout
|
||||
and
|
||||
install_olm.rc == 1
|
||||
register: install_olm
|
||||
when: >
|
||||
kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname
|
||||
and
|
||||
nfc_role_kubernetes_install_olm | default(false) | bool
|
||||
|
||||
|
||||
- name: Uninstall OLM
|
||||
ansible.builtin.shell:
|
||||
cmd: |
|
||||
kubectl delete -n olm deployment packageserver;
|
||||
kubectl delete -n olm deployment catalog-operator;
|
||||
kubectl delete -n olm deployment olm-operator;
|
||||
|
||||
kubectl delete crd catalogsources.operators.coreos.com;
|
||||
kubectl delete crd clusterserviceversions.operators.coreos.com;
|
||||
kubectl delete crd installplans.operators.coreos.com;
|
||||
kubectl delete crd olmconfigs.operators.coreos.com;
|
||||
kubectl delete crd operatorconditions.operators.coreos.com;
|
||||
kubectl delete crd operatorgroups.operators.coreos.com;
|
||||
kubectl delete crd operators.operators.coreos.com;
|
||||
kubectl delete crd subscriptions.operators.coreos.com;
|
||||
|
||||
kubectl delete namespace operators --force;
|
||||
kubectl delete namespace olm --force;
|
||||
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
register: install_olm
|
||||
when: >
|
||||
kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname
|
||||
and
|
||||
'olm_uninstall' in ansible_run_tags
|
||||
|
||||
|
||||
- name: Enable Cluster Encryption
|
||||
ansible.builtin.command:
|
||||
cmd: kubectl patch felixconfiguration default --type='merge' -p '{"spec":{"wireguardEnabled":true,"wireguardEnabledV6":true}}'
|
||||
changed_when: false
|
||||
failed_when: false # New cluster will fail
|
||||
when: >
|
||||
kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname
|
||||
and
|
||||
kubernetes_config.cluster.networking.encrypt | default(false) | bool
|
||||
and
|
||||
(
|
||||
'calico_manifest' in ansible_run_tags
|
||||
or
|
||||
(
|
||||
'operator_migrate_calico' not in ansible_run_tags
|
||||
or
|
||||
'operator_calico' not in ansible_run_tags
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
- name: Fetch Join Token
|
||||
ansible.builtin.slurp:
|
||||
src: /var/lib/rancher/k3s/server/token
|
||||
delegate_to: "{{ kubernetes_config.cluster.prime.name | default(inventory_hostname) }}"
|
||||
run_once: true
|
||||
register: k3s_join_token
|
||||
no_log: true # Value is sensitive
|
||||
|
||||
|
||||
- name: Create Token fact
|
||||
ansible.builtin.set_fact:
|
||||
k3s_join_token: "{{ k3s_join_token.content | b64decode | replace('\n', '') }}"
|
||||
delegate_to: "{{ kubernetes_config.cluster.prime.name | default(inventory_hostname) }}"
|
||||
run_once: true
|
||||
no_log: true # Value is sensitive
|
||||
|
||||
|
||||
- name: Install K3s (master nodes)
|
||||
ansible.builtin.shell:
|
||||
cmd: |
|
||||
INSTALL_K3S_EXEC="server" \
|
||||
INSTALL_K3S_SKIP_DOWNLOAD=true \
|
||||
INSTALL_K3S_VERSION="v{{ KubernetesVersion }}{{ KubernetesVersion_k3s_prefix }}" \
|
||||
K3S_TOKEN="{{ k3s_join_token }}" \
|
||||
/tmp/install.sh
|
||||
executable: /bin/bash
|
||||
changed_when: false
|
||||
when: >
|
||||
nfc_role_kubernetes_master | default(false) | bool
|
||||
and
|
||||
not kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname
|
||||
and
|
||||
k3s_installed.rc == 1
|
||||
|
||||
|
||||
- name: Install K3s (worker nodes)
|
||||
ansible.builtin.shell:
|
||||
cmd: |
|
||||
set -o pipefail
|
||||
INSTALL_K3S_EXEC="agent" \
|
||||
INSTALL_K3S_SKIP_DOWNLOAD=true \
|
||||
INSTALL_K3S_VERSION="v{{ KubernetesVersion }}{{ KubernetesVersion_k3s_prefix }}" \
|
||||
K3S_TOKEN="{{ k3s_join_token }}" \
|
||||
K3S_URL="https://{{ hostvars[kubernetes_config.cluster.prime.name | default(inventory_hostname)].ansible_host }}:6443" \
|
||||
/tmp/install.sh -
|
||||
executable: /bin/bash
|
||||
changed_when: false
|
||||
when: >
|
||||
not nfc_role_kubernetes_master | default(false) | bool
|
||||
and
|
||||
not kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname
|
||||
and
|
||||
k3s_installed.rc == 1
|
||||
|
||||
|
||||
- name: Set Kubernetes Final Install Fact
|
||||
ansible.builtin.set_fact:
|
||||
kubernetes_installed: true
|
||||
# Clear Token as no llonger required and due to being a sensitive value
|
||||
k3s_join_token: null
|
49
roles/nfc_kubernetes/tasks/k3s/manifest_apply.yaml
Normal file
49
roles/nfc_kubernetes/tasks/k3s/manifest_apply.yaml
Normal file
@ -0,0 +1,49 @@
|
||||
---
|
||||
|
||||
# Save the manifests in a dir so that diff's can be shown for changes
|
||||
- name: Copy Manifest for addition - {{ manifest.name }}
|
||||
ansible.builtin.template:
|
||||
src: "{{ manifest.template }}"
|
||||
dest: "/var/lib/rancher/k3s/ansible/{{ manifest.template | lower | replace('.j2', '') }}"
|
||||
mode: '744'
|
||||
become: true
|
||||
diff: true
|
||||
|
||||
|
||||
- name: Try / Catch
|
||||
block:
|
||||
|
||||
# Try to create first, if fail use replace.
|
||||
- name: Apply Manifest Create - {{ manifest.name }}
|
||||
ansible.builtin.command:
|
||||
cmd: "kubectl create -f /var/lib/rancher/k3s/ansible/{{ manifest.template | lower | replace('.j2', '') }}"
|
||||
become: true
|
||||
changed_when: false
|
||||
failed_when: >
|
||||
'Error from server' in manifest_stdout.stderr
|
||||
register: manifest_stdout
|
||||
|
||||
|
||||
rescue:
|
||||
|
||||
|
||||
- name: TRACE - Manifest Create - {{ manifest.name }}
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ manifest_stdout }}"
|
||||
|
||||
|
||||
- name: Replace Manifests - "Rescue" - {{ manifest.name }}
|
||||
ansible.builtin.command:
|
||||
cmd: "kubectl replace -f /var/lib/rancher/k3s/ansible/{{ manifest.template | lower | replace('.j2', '') }}"
|
||||
become: true
|
||||
changed_when: false
|
||||
failed_when: >
|
||||
'Error from server' in manifest_stdout.stderr
|
||||
and
|
||||
'ensure CRDs are installed first' in manifest_stdout.stderr
|
||||
register: manifest_stdout
|
||||
|
||||
|
||||
- name: TRACE - Replace Manifest - "Rescue" - {{ manifest.name }}
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ manifest_stdout }}"
|
198
roles/nfc_kubernetes/tasks/k3s/migrate_to_operator.yaml
Normal file
198
roles/nfc_kubernetes/tasks/k3s/migrate_to_operator.yaml
Normal file
@ -0,0 +1,198 @@
|
||||
---
|
||||
|
||||
# Reference https://docs.tigera.io/calico/3.25/operations/operator-migration
|
||||
|
||||
# Script creation of imageset: https://docs.tigera.io/calico/latest/operations/image-options/imageset#create-an-imageset
|
||||
# above may pull sha for arch of machine who ran the script
|
||||
|
||||
- name: Try / Catch
|
||||
vars:
|
||||
operator_manifests:
|
||||
- Deployment-manifest-Calico_Operator.yaml.j2
|
||||
- Installation-manifest-Calico_Cluster.yaml.j2
|
||||
- FelixConfiguration-manifest-Calico_Cluster.yaml
|
||||
- IPPool-manifest-Calico_Cluster.yaml.j2
|
||||
- APIServer-manifest-Calico_Cluster.yaml
|
||||
- ConfigMap-manifest-Calico_Service_Endpoint.yaml.j2
|
||||
block:
|
||||
|
||||
|
||||
- name: Move Calico Manifest from addons directory
|
||||
ansible.builtin.command:
|
||||
cmd: mv /var/lib/rancher/k3s/server/manifests/calico.yaml /tmp/
|
||||
become: true
|
||||
changed_when: false
|
||||
when: file_calico_yaml_metadata.stat.exists
|
||||
|
||||
|
||||
- name: Remove addon from Kubernetes
|
||||
ansible.builtin.command:
|
||||
cmd: kubectl delete addon -n kube-system calico
|
||||
become: true
|
||||
changed_when: false
|
||||
when: file_calico_yaml_metadata.stat.exists
|
||||
|
||||
|
||||
- name: Uninstall Calico
|
||||
ansible.builtin.command:
|
||||
cmd: kubectl delete -f /tmp/calico.yaml
|
||||
become: true
|
||||
changed_when: false
|
||||
when: file_calico_yaml_metadata.stat.exists
|
||||
|
||||
|
||||
# Save the manifests in a dir so that diff's can be shown for changes
|
||||
- name: Copy Manifest for addition
|
||||
ansible.builtin.template:
|
||||
src: "{{ item }}"
|
||||
dest: "/var/lib/rancher/k3s/ansible/{{ item | lower | replace('.j2', '') }}"
|
||||
mode: '744'
|
||||
become: true
|
||||
diff: true
|
||||
loop: "{{ operator_manifests }}"
|
||||
|
||||
|
||||
- name: Try / Catch
|
||||
block:
|
||||
|
||||
|
||||
- name: Apply Operator Manifests
|
||||
ansible.builtin.command:
|
||||
cmd: "kubectl create -f /var/lib/rancher/k3s/ansible/{{ item | lower | replace('.j2', '') }}"
|
||||
become: true
|
||||
changed_when: false
|
||||
failed_when: >
|
||||
'Error from server' in operator_manifest_stdout.stderr
|
||||
loop: "{{ operator_manifests }}"
|
||||
register: operator_manifest_stdout
|
||||
|
||||
|
||||
rescue:
|
||||
|
||||
|
||||
- name: TRACE - Operator manifest apply
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ operator_manifest_stdout }}"
|
||||
|
||||
|
||||
- name: Apply Operator Manifests - "Rescue"
|
||||
ansible.builtin.command:
|
||||
cmd: "kubectl replace -f /var/lib/rancher/k3s/ansible/{{ item | lower | replace('.j2', '') }}"
|
||||
become: true
|
||||
changed_when: false
|
||||
failed_when: >
|
||||
'Error from server' in operator_manifest_stdout.stderr
|
||||
and
|
||||
'ensure CRDs are installed first' in operator_manifest_stdout.stderr
|
||||
loop: "{{ operator_manifests }}"
|
||||
register: operator_manifest_stdout
|
||||
|
||||
|
||||
- name: TRACE - Operator manifest apply. Rescued
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ operator_manifest_stdout }}"
|
||||
|
||||
|
||||
- name: Fetch Calico Kubectl Plugin
|
||||
ansible.builtin.uri:
|
||||
url: |-
|
||||
https://github.com/projectcalico/calico/releases/download/{{ nfc_role_kubernetes_calico_version }}/calicoctl-linux-
|
||||
{%- if cpu_arch.key == 'aarch64' -%}
|
||||
arm64
|
||||
{%- else -%}
|
||||
amd64
|
||||
{%- endif %}
|
||||
status_code:
|
||||
- 200
|
||||
- 304
|
||||
dest: "/tmp/kubectl-calico.{{ cpu_arch.key }}"
|
||||
mode: '777'
|
||||
owner: root
|
||||
group: 'root'
|
||||
changed_when: false
|
||||
become: true
|
||||
delegate_to: localhost
|
||||
loop: "{{ nfc_kubernetes_install_architectures | dict2items }}"
|
||||
loop_control:
|
||||
loop_var: cpu_arch
|
||||
vars:
|
||||
ansible_connection: local
|
||||
|
||||
|
||||
- name: Add calico Plugin
|
||||
ansible.builtin.copy:
|
||||
src: "/tmp/kubectl-calico.{{ ansible_architecture }}"
|
||||
dest: /usr/local/bin/kubectl-calico
|
||||
mode: '770'
|
||||
owner: root
|
||||
group: 'root'
|
||||
become: true
|
||||
when: nfc_role_kubernetes_master
|
||||
|
||||
|
||||
- name: Setup Automagic Host Endpoints
|
||||
ansible.builtin.shell:
|
||||
cmd: |-
|
||||
kubectl calico \
|
||||
patch kubecontrollersconfiguration \
|
||||
default --patch='{"spec": {"controllers": {"node": {"hostEndpoint": {"autoCreate": "Enabled"}}}}}'
|
||||
executable: bash
|
||||
become: true
|
||||
changed_when: false
|
||||
failed_when: false # fixme
|
||||
|
||||
|
||||
- name: Remove calico migration label
|
||||
ansible.builtin.shell:
|
||||
cmd: |-
|
||||
kubectl label \
|
||||
{{ inventory_hostname }} \
|
||||
projectcalico.org/operator-node-migration-
|
||||
executable: bash
|
||||
become: true
|
||||
delegate_to: "{{ kubernetes_config.cluster.prime.name | default(inventory_hostname) }}"
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
loop: "{{ groups[kubernetes_config.cluster.group_name | default('make_me_optional')] | default([]) }}"
|
||||
|
||||
# kubectl label node ip-10-229-92-202.eu-west-1.compute.internal projectcalico.org/operator-node-migration-
|
||||
# migration started
|
||||
|
||||
rescue:
|
||||
|
||||
|
||||
- name: Remove Operator Manifests
|
||||
ansible.builtin.command:
|
||||
cmd: "kubectl delete -f /var/lib/rancher/k3s/ansible/{{ item | lower | replace('.j2', '') }}"
|
||||
become: true
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
loop: "{{ operator_manifests }}"
|
||||
when: file_calico_yaml_metadata.stat.exists # Only rescue if it was a migration
|
||||
|
||||
|
||||
- name: Move Calico Manifest from addons directory
|
||||
ansible.builtin.command:
|
||||
cmd: mv /tmp/calico.yaml /var/lib/rancher/k3s/server/manifests/
|
||||
become: true
|
||||
changed_when: false
|
||||
when: file_calico_yaml_metadata.stat.exists
|
||||
|
||||
|
||||
- name: Re-install Calico
|
||||
ansible.builtin.command:
|
||||
cmd: kubectl apply -f /var/lib/rancher/k3s/server/manifests/calico.yaml
|
||||
become: true
|
||||
changed_when: false
|
||||
when: file_calico_yaml_metadata.stat.exists
|
||||
|
||||
|
||||
always:
|
||||
|
||||
|
||||
- name: Clean-up Temp File
|
||||
ansible.builtin.file:
|
||||
name: /tmp/calico.yaml
|
||||
state: absent
|
||||
become: true
|
||||
when: file_calico_yaml_metadata.stat.exists
|
72
roles/nfc_kubernetes/tasks/kubevirt/main.yaml
Normal file
72
roles/nfc_kubernetes/tasks/kubevirt/main.yaml
Normal file
@ -0,0 +1,72 @@
|
||||
---
|
||||
|
||||
- name: Validate Virtualization Support
|
||||
ansible.builtin.include_tasks:
|
||||
file: kubevirt/validate.yaml
|
||||
apply:
|
||||
tags:
|
||||
- always
|
||||
tags:
|
||||
- always
|
||||
|
||||
|
||||
- name: Deploy KubeVirt
|
||||
ansible.builtin.template:
|
||||
src: "{{ item }}"
|
||||
dest: "/var/lib/rancher/k3s/server/manifests/{{ item | replace('.j2', '') | lower }}"
|
||||
owner: root
|
||||
mode: '700'
|
||||
force: true
|
||||
notify: "{{ item.notify | default(omit) }}"
|
||||
loop: "{{ templates_to_apply }}"
|
||||
diff: true
|
||||
vars:
|
||||
templates_to_apply:
|
||||
- kubevirt-operator.yaml.j2
|
||||
- kubevirt-cr.yaml.j2
|
||||
|
||||
|
||||
- name: Fetch virtctl Kubectl Plugin
|
||||
ansible.builtin.uri:
|
||||
url: |-
|
||||
https://github.com/kubevirt/kubevirt/releases/download/{{
|
||||
nfc_role_kubernetes_container_images.kubevirt_operator.tag }}/virtctl-{{
|
||||
nfc_role_kubernetes_container_images.kubevirt_operator.tag }}-linux-
|
||||
{%- if cpu_arch.key == 'aarch64' -%}
|
||||
arm64
|
||||
{%- else -%}
|
||||
amd64
|
||||
{%- endif %}
|
||||
status_code:
|
||||
- 200
|
||||
- 304
|
||||
dest: "/tmp/kubectl-virtctl.{{ cpu_arch.key }}"
|
||||
mode: '777'
|
||||
owner: root
|
||||
group: 'root'
|
||||
changed_when: false
|
||||
become: true
|
||||
delegate_to: localhost
|
||||
loop: "{{ nfc_kubernetes_install_architectures | dict2items }}"
|
||||
loop_control:
|
||||
loop_var: cpu_arch
|
||||
vars:
|
||||
ansible_connection: local
|
||||
|
||||
|
||||
- name: Add virtctl Plugin
|
||||
ansible.builtin.copy:
|
||||
src: "/tmp/kubectl-virtctl.{{ ansible_architecture }}"
|
||||
dest: /usr/local/bin/kubectl-virt
|
||||
mode: '770'
|
||||
owner: root
|
||||
group: 'root'
|
||||
become: true
|
||||
when: nfc_role_kubernetes_master
|
||||
|
||||
|
||||
- name: Wait for KubeVirt to initialize
|
||||
ansible.builtin.command:
|
||||
cmd: kubectl -n kubevirt wait kv kubevirt --for condition=Available
|
||||
changed_when: false
|
||||
failed_when: false
|
25
roles/nfc_kubernetes/tasks/kubevirt/validate.yaml
Normal file
25
roles/nfc_kubernetes/tasks/kubevirt/validate.yaml
Normal file
@ -0,0 +1,25 @@
|
||||
---
|
||||
|
||||
- name: Install LibVirt-Clients
|
||||
ansible.builtin.apt:
|
||||
name: libvirt-clients
|
||||
state: present
|
||||
|
||||
|
||||
- name: Confirm Virtualization Support
|
||||
ansible.builtin.command:
|
||||
cmd: virt-host-validate qemu
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
register: virt_support_check_command
|
||||
|
||||
|
||||
- name: Confirm No QEMU failures
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- (": FAIL" | string) not in (item | string)
|
||||
- |
|
||||
(": PASS" | string) in (item | string)
|
||||
or
|
||||
(": WARN" | string) in (item | string)
|
||||
loop: "{{ virt_support_check_command.stdout_lines }}"
|
41
roles/nfc_kubernetes/tasks/main.yaml
Normal file
41
roles/nfc_kubernetes/tasks/main.yaml
Normal file
@ -0,0 +1,41 @@
|
||||
---
|
||||
|
||||
- name: Install/Configure Kubernetes Prime Master Node
|
||||
ansible.builtin.include_tasks:
|
||||
file: install.yaml
|
||||
tags:
|
||||
- always
|
||||
when:
|
||||
kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname
|
||||
and
|
||||
nfc_role_kubernetes_prime | bool
|
||||
and
|
||||
not kubernetes_installed | default(false)
|
||||
|
||||
|
||||
- name: Install/Configure Kubernetes on remaining Master Nodes
|
||||
ansible.builtin.include_tasks:
|
||||
file: install.yaml
|
||||
tags:
|
||||
- always
|
||||
when:
|
||||
kubernetes_config.cluster.prime.name | default(inventory_hostname) != inventory_hostname
|
||||
and
|
||||
nfc_role_kubernetes_master | bool
|
||||
and
|
||||
not kubernetes_installed | default(false)
|
||||
|
||||
|
||||
- name: Install/Configure Kubernetes on Worker Nodes
|
||||
ansible.builtin.include_tasks:
|
||||
file: install.yaml
|
||||
tags:
|
||||
- always
|
||||
when: >
|
||||
nfc_role_kubernetes_worker | bool
|
||||
and
|
||||
not nfc_role_kubernetes_prime | bool
|
||||
and
|
||||
not nfc_role_kubernetes_master | bool
|
||||
and
|
||||
not kubernetes_installed | default(false)
|
@ -0,0 +1,6 @@
|
||||
---
|
||||
apiVersion: operator.tigera.io/v1
|
||||
kind: APIServer
|
||||
metadata:
|
||||
name: default
|
||||
spec: {}
|
@ -0,0 +1,11 @@
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: kubernetes-services-endpoint
|
||||
namespace: tigera-operator
|
||||
data:
|
||||
KUBERNETES_SERVICE_HOST: "
|
||||
{%- set octet = kubernetes_config.cluster.networking.ServiceSubnet | default(nfc_role_kubernetes_service_subnet) | split('.') -%}
|
||||
{{- octet[0] }}.{{- octet[1] }}.{{- octet[2] }}.1"
|
||||
KUBERNETES_SERVICE_PORT: '443'
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,17 @@
|
||||
---
|
||||
|
||||
apiVersion: crd.projectcalico.org/v1
|
||||
kind: FelixConfiguration
|
||||
metadata:
|
||||
name: default
|
||||
spec:
|
||||
# bpfConnectTimeLoadBalancing: TCP
|
||||
# bpfExternalServiceMode: DSR
|
||||
# bpfHostNetworkedNATWithoutCTLB: Enabled
|
||||
bpfLogLevel: ""
|
||||
floatingIPs: Disabled
|
||||
healthPort: 9099
|
||||
logSeverityScreen: Info
|
||||
reportingInterval: 0s
|
||||
wireguardEnabled: true
|
||||
wireguardEnabledV6: true
|
@ -0,0 +1,16 @@
|
||||
---
|
||||
|
||||
apiVersion: crd.projectcalico.org/v1
|
||||
kind: IPPool
|
||||
metadata:
|
||||
name: default-ipv4-ippool
|
||||
spec:
|
||||
allowedUses:
|
||||
- Workload
|
||||
- Tunnel
|
||||
blockSize: 26
|
||||
cidr: {{ kubernetes_config.cluster.networking.podSubnet | default(nfc_role_kubernetes_pod_subnet) }}
|
||||
ipipMode: Never
|
||||
natOutgoing: true
|
||||
nodeSelector: all()
|
||||
vxlanMode: Always
|
@ -0,0 +1,53 @@
|
||||
---
|
||||
|
||||
apiVersion: operator.tigera.io/v1
|
||||
kind: Installation
|
||||
metadata:
|
||||
name: default
|
||||
spec:
|
||||
calicoNetwork:
|
||||
bgp: Disabled
|
||||
containerIPForwarding: Enabled
|
||||
hostPorts: Enabled
|
||||
ipPools:
|
||||
- blockSize: 26
|
||||
cidr: {{ kubernetes_config.cluster.networking.podSubnet | default(nfc_role_kubernetes_pod_subnet) }}
|
||||
disableBGPExport: false
|
||||
encapsulation: VXLAN
|
||||
natOutgoing: Enabled
|
||||
nodeSelector: all()
|
||||
# linuxDataplane: Iptables
|
||||
linuxDataplane: BPF
|
||||
mtu: 0
|
||||
multiInterfaceMode: None
|
||||
nodeAddressAutodetectionV4:
|
||||
kubernetes: NodeInternalIP
|
||||
cni:
|
||||
ipam:
|
||||
type: Calico
|
||||
type: Calico
|
||||
componentResources:
|
||||
- componentName: Node
|
||||
resourceRequirements:
|
||||
requests:
|
||||
cpu: 250m
|
||||
controlPlaneReplicas: 3
|
||||
flexVolumePath: None
|
||||
kubeletVolumePluginPath: None
|
||||
nodeUpdateStrategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 0
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
nonPrivileged: Disabled
|
||||
serviceCIDRs:
|
||||
- {{ kubernetes_config.cluster.networking.ServiceSubnet | default(nfc_role_kubernetes_service_subnet) }}
|
||||
typhaDeployment:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
tolerations:
|
||||
- effect: NoExecute
|
||||
key: CriticalAddonsOnly
|
||||
value: "true"
|
||||
variant: Calico
|
@ -1,4 +1,11 @@
|
||||
---
|
||||
# Depreciated:
|
||||
# Calico is being migrated to use the calico operator.
|
||||
# in a near future release, this method of deploying calico
|
||||
# will be removed. use tag `operator_migrate_calico` to migrate
|
||||
# and tag `operator_calico` to keep.
|
||||
#
|
||||
#
|
||||
# URL: https://github.com/projectcalico/calico/blob/8f2548a71ddc4fbe2497a0c20a3b24fc7a165851/manifests/calico.yaml
|
||||
# Source: calico/templates/calico-kube-controllers.yaml
|
||||
# This manifest creates a Pod Disruption Budget for Controller to allow K8s Cluster Autoscaler to evict
|
||||
@ -4774,13 +4781,13 @@ spec:
|
||||
value: "autodetect"
|
||||
# Enable IPIP
|
||||
- name: CALICO_IPV4POOL_IPIP
|
||||
value: "Always"
|
||||
value: "Never"
|
||||
# Enable or Disable VXLAN on the default IP pool.
|
||||
- name: CALICO_IPV4POOL_VXLAN
|
||||
value: "Never"
|
||||
value: "Always"
|
||||
# Enable or Disable VXLAN on the default IPv6 IP pool.
|
||||
- name: CALICO_IPV6POOL_VXLAN
|
||||
value: "Never"
|
||||
value: "Always"
|
||||
# Set MTU for tunnel device used if ipip is enabled
|
||||
- name: FELIX_IPINIPMTU
|
||||
valueFrom:
|
||||
@ -4803,7 +4810,7 @@ spec:
|
||||
# chosen from this range. Changing this value after installation will have
|
||||
# no effect. This should fall within `--cluster-cidr`.
|
||||
- name: CALICO_IPV4POOL_CIDR
|
||||
value: "{{ KubernetesPodSubnet }}"
|
||||
value: "{{ nfc_role_kubernetes_pod_subnet }}"
|
||||
# Disable file logging so `kubectl logs` works.
|
||||
- name: CALICO_DISABLE_FILE_LOGGING
|
||||
value: "true"
|
||||
@ -4815,6 +4822,8 @@ spec:
|
||||
value: "false"
|
||||
- name: FELIX_HEALTHENABLED
|
||||
value: "true"
|
||||
- name: IP_AUTODETECTION_METHOD
|
||||
value: kubernetes-internal-ip
|
||||
securityContext:
|
||||
privileged: true
|
||||
resources:
|
||||
@ -4831,7 +4840,7 @@ spec:
|
||||
command:
|
||||
- /bin/calico-node
|
||||
- -felix-live
|
||||
- -bird-live
|
||||
#- -bird-live
|
||||
periodSeconds: 10
|
||||
initialDelaySeconds: 10
|
||||
failureThreshold: 6
|
||||
@ -4841,7 +4850,7 @@ spec:
|
||||
command:
|
||||
- /bin/calico-node
|
||||
- -felix-ready
|
||||
- -bird-ready
|
||||
#- -bird-ready
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 10
|
||||
volumeMounts:
|
328
roles/nfc_kubernetes/templates/iptables-kubernetes.rules.j2
Normal file
328
roles/nfc_kubernetes/templates/iptables-kubernetes.rules.j2
Normal file
@ -0,0 +1,328 @@
|
||||
#
|
||||
# IP Tables Firewall Rules for Kubernetes
|
||||
#
|
||||
# Managed By ansible/collection/kubernetes
|
||||
#
|
||||
# Dont edit this file directly as it will be overwritten. To grant a host API access
|
||||
# edit the cluster config, adding the hostname/ip to path kubernetes_config.cluster.access
|
||||
#
|
||||
# This file is periodicly called by cron
|
||||
#
|
||||
|
||||
{% set data = namespace(firewall_rules=[]) -%}
|
||||
|
||||
{%- if ansible_host is regex('^[a-z]') and ':' not in ansible_host -%} {#- Convert DNs name to IP Address -#}
|
||||
|
||||
{%- if ipv6 | default(false) -%}
|
||||
|
||||
{%- set ansible_host = query('community.dns.lookup', ansible_host + '.', type='AAAA' ) -%}
|
||||
|
||||
{%- else -%}
|
||||
|
||||
{%- set ansible_host = query('community.dns.lookup', ansible_host + '.', type='A' ) -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- if ansible_host | list | length > 0 -%} {#- Convert dns lookup to list, and select the first item -#}
|
||||
{%- set ansible_host = ansible_host | from_yaml_all | list -%}
|
||||
|
||||
{%- set ansible_host = ansible_host[0] -%}
|
||||
{%- endif -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- for kubernetes_host in groups[kubernetes_config.cluster.group_name | default('me_is_optional')] | default([]) -%}
|
||||
|
||||
{%- set kubernetes_host = hostvars[kubernetes_host].ansible_host -%}
|
||||
|
||||
{%- if kubernetes_host is regex('^[a-z]') and ':' not in kubernetes_host -%} {#- Convert DNs name to IP Address -#}
|
||||
|
||||
{%- if ipv6 | default(false) -%}
|
||||
|
||||
{%- set kubernetes_host = query('community.dns.lookup', kubernetes_host + '.', type='AAAA' ) -%}
|
||||
|
||||
{%- else -%}
|
||||
|
||||
{%- set kubernetes_host = query('community.dns.lookup', kubernetes_host + '.', type='A' ) -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- if
|
||||
kubernetes_host is iterable
|
||||
and
|
||||
kubernetes_host is not string
|
||||
-%} {#- Convert dns lookup to list, and select the first item -#}
|
||||
{%- set kubernetes_host = kubernetes_host | from_yaml_all | list -%}
|
||||
|
||||
{%- set kubernetes_host = kubernetes_host[0] | default('') -%}
|
||||
{%- endif -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- if kubernetes_host != '' -%}
|
||||
|
||||
{%- for master_host in groups['kubernetes_master'] | default([]) -%}
|
||||
|
||||
{%- if master_host in groups[kubernetes_config.cluster.group_name | default('me_is_optional')] | default([]) -%}
|
||||
|
||||
{%- set master_host = hostvars[master_host].ansible_host -%}
|
||||
|
||||
{%- if master_host is regex('^[a-z]') and ':' not in master_host -%} {#- Convert DNs name to IP Address -#}
|
||||
|
||||
{%- if ipv6 | default(false) -%}
|
||||
|
||||
{%- set master_host = query('community.dns.lookup', master_host + '.', type='AAAA' ) -%}
|
||||
|
||||
{%- else -%}
|
||||
|
||||
{%- set master_host = query('community.dns.lookup', master_host + '.', type='A' ) -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- if master_host | list | length > 0 -%} {#- Convert dns lookup to list, and select the first item -#}
|
||||
{%- set master_host = master_host | from_yaml_all | list -%}
|
||||
|
||||
{%- set master_host = master_host[0] -%}
|
||||
{%- endif -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
|
||||
{%- if nfc_role_kubernetes_master | default(false) | bool -%}
|
||||
|
||||
{%- if
|
||||
master_host == kubernetes_host
|
||||
and
|
||||
master_host != ansible_host
|
||||
and
|
||||
(
|
||||
(
|
||||
ipv6 | default(false)
|
||||
and
|
||||
':' in master_host
|
||||
)
|
||||
or
|
||||
(
|
||||
not ipv6 | default(false)
|
||||
and
|
||||
'.' in master_host
|
||||
)
|
||||
)
|
||||
-%}
|
||||
|
||||
{#- master hosts only -#}
|
||||
|
||||
{%- set data.firewall_rules = data.firewall_rules + ['-I kubernetes-embedded-etcd -s ' + master_host + ' -j ACCEPT'] -%}
|
||||
{# {%- set data.firewall_rules = data.firewall_rules + ['-I INPUT -s ' + master_host + ' -p tcp -m multiport --dports 2380 -j ACCEPT'] -%} #}
|
||||
|
||||
{%- if '-I kubernetes-api -s ' + master_host + ' -j ACCEPT' not in data.firewall_rules -%}
|
||||
|
||||
{%- set data.firewall_rules = data.firewall_rules + ['-I kubernetes-api -s ' + master_host + ' -j ACCEPT'] -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- endfor -%}
|
||||
|
||||
{%- if
|
||||
ansible_host != kubernetes_host
|
||||
and
|
||||
(
|
||||
(
|
||||
ipv6 | default(false)
|
||||
and
|
||||
':' in kubernetes_host
|
||||
)
|
||||
or
|
||||
(
|
||||
not ipv6 | default(false)
|
||||
and
|
||||
'.' in kubernetes_host
|
||||
)
|
||||
)
|
||||
-%}
|
||||
|
||||
{#- All cluster Hosts -#}
|
||||
|
||||
{%- if
|
||||
nfc_role_kubernetes_master | default(false) | bool
|
||||
and
|
||||
kubernetes_host not in groups['kubernetes_master']
|
||||
and
|
||||
'-I kubernetes-api -s ' + kubernetes_host + ' -j ACCEPT' not in data.firewall_rules
|
||||
-%}
|
||||
|
||||
{%- set data.firewall_rules = data.firewall_rules + ['-I kubernetes-api -s ' + kubernetes_host + ' -j ACCEPT'] -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- set data.firewall_rules = data.firewall_rules + ['-I kubernetes-flannel-vxlan -s ' + kubernetes_host + ' -j ACCEPT'] -%}
|
||||
|
||||
{%- set data.firewall_rules = data.firewall_rules + ['-I kubernetes-kubelet-metrics -s ' + kubernetes_host + ' -j ACCEPT'] -%}
|
||||
|
||||
{%- set data.firewall_rules = data.firewall_rules + ['-I kubernetes-flannel-wg-four -s ' + kubernetes_host + ' -j ACCEPT'] -%}
|
||||
|
||||
{%- if false -%}{# see IPv6 is disabled #}
|
||||
|
||||
{%- set data.firewall_rules = data.firewall_rules + ['-I kubernetes-flannel-wg-six -s ' + kubernetes_host + ' -j ACCEPT'] -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- if false -%}{# see Installation-manifest-Calico_Cluster.yaml.j2 bgp is disabled #}
|
||||
|
||||
{%- set data.firewall_rules = data.firewall_rules + ['-I kubernetes-calico-bgp -s ' + kubernetes_host + ' -j ACCEPT'] -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- set data.firewall_rules = data.firewall_rules + ['-I kubernetes-calico-typha -s ' + kubernetes_host + ' -j ACCEPT'] -%}
|
||||
|
||||
{%- if nfc_kubernetes_enable_metallb | default(false) -%}
|
||||
|
||||
{%- set data.firewall_rules = data.firewall_rules + ['-I metallb-l2-tcp -s ' + kubernetes_host + ' -j ACCEPT'] -%}
|
||||
|
||||
{%- set data.firewall_rules = data.firewall_rules + ['-I metallb-l2-udp -s ' + kubernetes_host + ' -j ACCEPT'] -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- endfor -%}
|
||||
|
||||
{%- if nfc_role_kubernetes_master | default(false) | bool -%}
|
||||
|
||||
{%- if host_external_ip is defined -%}
|
||||
|
||||
{%- set data.firewall_rules = data.firewall_rules + ['-I kubernetes-api -s ' + host_external_ip + ' -m comment --comment "hosts configured external IP" -j ACCEPT'] -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- for api_client in kubernetes_config.cluster.access | default([]) -%}
|
||||
|
||||
{%- if api_client is regex('^[a-z]') and ':' not in api_client -%} {#- Convert DNs name to IP Address -#}
|
||||
|
||||
{%- set api_client_dns_name = api_client -%}
|
||||
|
||||
{%- if ipv6 | default(false) -%}
|
||||
|
||||
{%- set api_client = query('community.dns.lookup', api_client + '.', type='AAAA' ) -%}
|
||||
|
||||
{%- else -%}
|
||||
|
||||
{%- set api_client = query('community.dns.lookup', api_client + '.', type='A' ) -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- if api_client | list | length > 0 -%} {#- Convert dns lookup to list, and select the first item -#}
|
||||
|
||||
{%- set api_client = api_client | from_yaml_all | list -%}
|
||||
|
||||
{%- set api_client = api_client[0] -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
|
||||
{%- if
|
||||
api_client != ansible_host
|
||||
and
|
||||
(
|
||||
(
|
||||
ipv6 | default(false)
|
||||
and
|
||||
':' in api_client
|
||||
)
|
||||
or
|
||||
(
|
||||
not ipv6 | default(false)
|
||||
and
|
||||
'.' in api_client
|
||||
)
|
||||
)
|
||||
-%}
|
||||
|
||||
{#- Hosts allowed to access API -#}
|
||||
|
||||
{%- set data.firewall_rules = data.firewall_rules + ['-I kubernetes-api -s ' + api_client + ' -m comment --comment "host: ' + api_client_dns_name | default(api_client) + '" -j ACCEPT'] -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- endfor %}
|
||||
|
||||
{%- endif %}
|
||||
|
||||
*filter
|
||||
|
||||
{# -N kubernetes-embedded-etcd
|
||||
-A kubernetes-embedded-etcd -j RETURN
|
||||
|
||||
-A INPUT -p tcp -m multiport --dports 2379,2380 -m comment --comment "etcd. Servers only" -j kubernetes-embedded-etcd
|
||||
|
||||
|
||||
-N kubernetes-api
|
||||
-A kubernetes-api -j RETURN
|
||||
|
||||
-A INPUT -p tcp --dport 6443 -m comment --comment "Kubernetes API access. All Cluster hosts and end users" -j kubernetes-api
|
||||
|
||||
|
||||
-N kubernetes-flannel-vxlan
|
||||
-A kubernetes-flannel-vxlan -j RETURN
|
||||
|
||||
-A INPUT -p udp --dport 8472 -m comment --comment "Flannel. All cluster hosts" -j kubernetes-flannel-vxlan
|
||||
|
||||
|
||||
-N kubernetes-kubelet-metrics
|
||||
-A kubernetes-kubelet-metrics -j RETURN
|
||||
|
||||
-A INPUT -p tcp --dport 10250 -m comment --comment "Kubernetes Metrics. All cluster hosts" -j kubernetes-kubelet-metrics
|
||||
|
||||
|
||||
-N kubernetes-flannel-wg-four
|
||||
-A kubernetes-flannel-wg-four -j RETURN
|
||||
|
||||
-A INPUT -p udp --dport 51820 -m comment --comment "Flannel Wiregaurd IPv4. All cluster hosts" -j kubernetes-flannel-wg-four
|
||||
|
||||
|
||||
-N kubernetes-flannel-wg-six
|
||||
-A kubernetes-flannel-wg-six -j RETURN
|
||||
|
||||
-A INPUT -p udp --dport 51821 -m comment --comment "Flannel Wiregaurd IPv6. All cluster hosts" -j kubernetes-flannel-wg-six #}
|
||||
|
||||
|
||||
{% if data.firewall_rules | length | int > 0 -%}
|
||||
{% for rule in data.firewall_rules -%}
|
||||
{{ rule }}
|
||||
{% endfor -%}
|
||||
{% endif -%}
|
||||
|
||||
{#- #-I INPUT -s 192.168.1.0/24 -p tcp -m multiport --dports 6443 -j ACCEPT
|
||||
#-I INPUT -s 192.168.1.0/24 -p tcp -m multiport --dports 179 -j ACCEPT
|
||||
#-I INPUT -s 192.168.1.0/24 -p tcp -m multiport --dports 10250 -j ACCEPT
|
||||
|
||||
#-I INPUT -s 192.168.1.0/24 -p udp -m multiport --dports 4789 -j ACCEPT
|
||||
#-I INPUT -s 192.168.1.0/24 -p tcp -m multiport --dports 2379 -j ACCEPT
|
||||
#-I INPUT -s 192.168.1.0/24 -p tcp -m multiport --dports 2380 -j ACCEPT
|
||||
|
||||
|
||||
-I INPUT -p tcp -m multiport --dports 6443 -j ACCEPT
|
||||
-I INPUT -p tcp -m multiport --dports 179 -j ACCEPT
|
||||
-I INPUT -p tcp -m multiport --dports 10250 -j ACCEPT
|
||||
|
||||
-I INPUT -p udp -m multiport --dports 4789 -j ACCEPT
|
||||
-I INPUT -p tcp -m multiport --dports 2379 -j ACCEPT
|
||||
-I INPUT -p tcp -m multiport --dports 2380 -j ACCEPT #}
|
||||
|
||||
COMMIT
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
{# iptables -I kubernetes-api -s nww-au1.networkedweb.com -j ACCEPT #}
|
254
roles/nfc_kubernetes/templates/k3s-config.yaml.j2
Normal file
254
roles/nfc_kubernetes/templates/k3s-config.yaml.j2
Normal file
@ -0,0 +1,254 @@
|
||||
#
|
||||
# K3s Configuration for running Kubernetes
|
||||
#
|
||||
# Managed By ansible/role/nfc_kubernetes
|
||||
#
|
||||
# Dont edit this file directly as it will be overwritten.
|
||||
#
|
||||
|
||||
{%- if
|
||||
nfc_role_kubernetes_master
|
||||
or
|
||||
kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname
|
||||
-%}
|
||||
|
||||
{%
|
||||
|
||||
set kube_apiserver_arg = [
|
||||
"audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log",
|
||||
"audit-log-maxage=" + kube_apiserver_arg_audit_log_maxage | string,
|
||||
"audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml",
|
||||
]
|
||||
|
||||
|
||||
-%}
|
||||
{%
|
||||
set servers_config = {
|
||||
"cluster-cidr": nfc_role_kubernetes_pod_subnet,
|
||||
"disable": [
|
||||
"traefik"
|
||||
],
|
||||
"disable-network-policy": true,
|
||||
"flannel-backend": "none",
|
||||
"service-cidr": nfc_role_kubernetes_service_subnet
|
||||
}
|
||||
-%}
|
||||
|
||||
{%- if nfc_role_kubernetes_etcd_enabled -%}
|
||||
|
||||
{%- set servers_config = servers_config | combine({
|
||||
"etcd-snapshot-retention": kubernetes_etcd_snapshot_retention | int,
|
||||
"etcd-snapshot-schedule-cron": kubernetes_etcd_snapshot_cron_schedule | string,
|
||||
}) -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- if
|
||||
kubernetes_config.cluster.domain_name | default(nfc_role_kubernetes_cluster_domain) is defined
|
||||
and
|
||||
kubernetes_config.cluster.domain_name | default(nfc_role_kubernetes_cluster_domain) != ''
|
||||
-%}
|
||||
|
||||
{%- set servers_config = servers_config | combine({
|
||||
"cluster-domain": kubernetes_config.cluster.domain_name | default(nfc_role_kubernetes_cluster_domain)
|
||||
}) -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- if kubernetes_config.cluster.oidc.enabled | default(nfc_role_kubernetes_oidc_enabled) | default(false) | bool -%}
|
||||
|
||||
{%-
|
||||
set kube_apiserver_arg = kube_apiserver_arg + [
|
||||
"oidc-client-id=" + kubernetes_config.cluster.oidc.client_id,
|
||||
"oidc-groups-claim=" + kubernetes_config.cluster.oidc.groups_claim,
|
||||
"oidc-issuer-url=" + kubernetes_config.cluster.oidc.issuer_url,
|
||||
"oidc-username-claim=" + kubernetes_config.cluster.oidc.username_claim
|
||||
] -%}
|
||||
|
||||
{%- if kubernetes_config.cluster.oidc.oidc_username_prefix | default('') != '' -%}
|
||||
|
||||
{%- set kube_apiserver_arg = kube_apiserver_arg + [
|
||||
"oidc-username-prefix=" + kubernetes_config.cluster.oidc.oidc_username_prefix
|
||||
] -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- if kubernetes_config.cluster.oidc.groups_prefix | default('') != '' -%}
|
||||
|
||||
{%- set kube_apiserver_arg = kube_apiserver_arg + [
|
||||
"oidc-groups-prefix=" + kubernetes_config.cluster.oidc.groups_prefix
|
||||
]
|
||||
-%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
|
||||
{%- if (
|
||||
nfc_kubernetes_enable_metallb | default(false)
|
||||
or
|
||||
not nfc_kubernetes_enable_servicelb | default(false)
|
||||
) -%}
|
||||
|
||||
{%- set disable = servers_config.disable + [ "servicelb" ] -%}
|
||||
|
||||
{%
|
||||
set servers_config = servers_config | combine({
|
||||
"disable": disable
|
||||
})
|
||||
-%}
|
||||
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- if (
|
||||
not nfc_kubernetes_enable_metallb | default(false)
|
||||
and
|
||||
nfc_kubernetes_enable_servicelb | default(false)
|
||||
) -%}
|
||||
|
||||
{%- set servers_config = servers_config | combine({
|
||||
"servicelb-namespace": kubernetes_config.cluster.networking.service_load_balancer_namespace | default('kube-system')
|
||||
}) -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
|
||||
{# Combine Remaining Server Objects #}
|
||||
|
||||
{%
|
||||
set servers_config = servers_config | combine({
|
||||
"kube-apiserver-arg": kube_apiserver_arg
|
||||
})
|
||||
-%}
|
||||
|
||||
{%- endif -%}
|
||||
{# Eof Server Nodes #}
|
||||
|
||||
{# SoF All Nodes #}
|
||||
|
||||
{%- if inventory_hostname == 'localhost' -%}
|
||||
|
||||
{%- set node_name = hostname_to_check.stdout -%}
|
||||
|
||||
{%- else -%}
|
||||
|
||||
{%- set node_name = inventory_hostname -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%
|
||||
|
||||
set all_nodes_config = {
|
||||
"kubelet-arg": [
|
||||
"system-reserved=cpu=" + kubelet_arg_system_reserved_cpu + ",memory=" + kubelet_arg_system_reserved_memory +
|
||||
",ephemeral-storage=" + kubelet_arg_system_reserved_storage
|
||||
],
|
||||
"node-name": node_name,
|
||||
}
|
||||
|
||||
-%}
|
||||
|
||||
|
||||
{%- if groups[kubernetes_config.cluster.group_name | default('make_me_optional')] | default([]) | list | length > 0 -%}
|
||||
|
||||
{%- if k3s_installed.rc == 0 -%}
|
||||
|
||||
{%- set ns = namespace(server=[]) -%}
|
||||
|
||||
{%- for cluster_node in groups[kubernetes_config.cluster.group_name] -%}
|
||||
|
||||
{%- if cluster_node in groups['kubernetes_master'] | default([]) -%}
|
||||
|
||||
{%- if hostvars[cluster_node].host_external_ip is defined -%}
|
||||
|
||||
{%- if
|
||||
hostvars[cluster_node].host_external_ip != ansible_default_ipv4.address
|
||||
and
|
||||
cluster_node == inventory_hostname
|
||||
-%} {# Server self, use internal ip if external ip exists #}
|
||||
|
||||
{%- set server_node = ansible_default_ipv4.address -%}
|
||||
|
||||
{%- else -%}
|
||||
|
||||
{%- set server_node = hostvars[cluster_node].host_external_ip -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- else -%}
|
||||
|
||||
{%- set server_node = hostvars[cluster_node].ansible_host -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- set ns.server = (ns.server | default([])) + [
|
||||
"https://" + server_node + ":6443"
|
||||
] -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- endfor -%}
|
||||
|
||||
{%- set all_nodes_config = all_nodes_config | combine({
|
||||
"server": ns.server,
|
||||
}) -%}
|
||||
|
||||
{%- elif
|
||||
kubernetes_config.cluster.prime.name != inventory_hostname
|
||||
and
|
||||
k3s_installed.rc == 1
|
||||
-%}
|
||||
|
||||
{%- set server = (server | default([])) + [
|
||||
"https://" + hostvars[kubernetes_config.cluster.prime.name].ansible_host + ":6443"
|
||||
] -%}
|
||||
|
||||
{%- set all_nodes_config = all_nodes_config | combine({
|
||||
"server": server,
|
||||
}) -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
|
||||
|
||||
{%- if
|
||||
host_external_ip is defined
|
||||
and
|
||||
ansible_default_ipv4.address != host_external_ip
|
||||
-%}
|
||||
|
||||
{%- set all_nodes_config = all_nodes_config | combine({
|
||||
"node-external-ip": host_external_ip
|
||||
}) -%}
|
||||
|
||||
{%- else -%}
|
||||
|
||||
{%- set all_nodes_config = all_nodes_config | combine({
|
||||
"node-ip": ansible_default_ipv4.address
|
||||
}) -%}
|
||||
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{# EoF All Nodes #}
|
||||
|
||||
|
||||
{%- if
|
||||
nfc_role_kubernetes_master
|
||||
or
|
||||
kubernetes_config.cluster.prime.name | default(inventory_hostname) == inventory_hostname
|
||||
-%}
|
||||
|
||||
{%- set servers_config = servers_config | combine( all_nodes_config ) -%}
|
||||
|
||||
{{ servers_config | to_nice_yaml(indent=2) }}
|
||||
|
||||
{%- else -%}
|
||||
|
||||
{{ all_nodes_config | to_nice_yaml(indent=2) }}
|
||||
|
||||
{%- endif -%}
|
@ -90,16 +90,14 @@ metadata:
|
||||
app.kubernetes.io/version: ''
|
||||
name: authorization:namespace:owner
|
||||
rules:
|
||||
- apiGroups: # Read-only access to resrouces
|
||||
- apiGroups: # Read-Write access to resrouces
|
||||
- "*"
|
||||
resources:
|
||||
- awx
|
||||
- cronjobs
|
||||
- daemonset
|
||||
- deployments
|
||||
- helmcharts
|
||||
- helmchartconfigs
|
||||
- ingress
|
||||
- jobs
|
||||
- pods
|
||||
- pvc
|
||||
@ -109,7 +107,6 @@ rules:
|
||||
- serviceaccount
|
||||
- services
|
||||
- statefuleset
|
||||
- storageclasses
|
||||
- configmap
|
||||
verbs:
|
||||
- create
|
||||
@ -117,6 +114,49 @@ rules:
|
||||
- list
|
||||
- watch
|
||||
- delete
|
||||
- apiGroups: # Read-Remove access
|
||||
- "*"
|
||||
resources:
|
||||
- ingress
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- delete
|
||||
- apiGroups: # Read access
|
||||
- "*"
|
||||
resources:
|
||||
- storageclasses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
annotations:
|
||||
authorization/description: |-
|
||||
Provide access for adding/editing/removing Ingress'.
|
||||
|
||||
This role is designed for a user who is responsible for the
|
||||
cluster ingress.
|
||||
authorization/target: namespace
|
||||
name: authorization:cluster:ingress-admin
|
||||
rules:
|
||||
- apiGroups:
|
||||
- "*"
|
||||
resources:
|
||||
- pods
|
||||
- nodes
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- delete
|
||||
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
@ -171,6 +211,20 @@ subjects:
|
||||
- kind: Group
|
||||
name: technician
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: authorization:ingress-admin
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: authorization:cluster:ingress-admin
|
||||
subjects:
|
||||
- apiGroup: rbac.authorization.k8s.io
|
||||
kind: Group
|
||||
name: administrators
|
||||
|
||||
|
||||
# ---
|
||||
# kind: ClusterRoleBinding
|
16
roles/nfc_kubernetes/templates/kubevirt-cr.yaml.j2
Normal file
16
roles/nfc_kubernetes/templates/kubevirt-cr.yaml.j2
Normal file
@ -0,0 +1,16 @@
|
||||
---
|
||||
apiVersion: kubevirt.io/v1
|
||||
kind: KubeVirt
|
||||
metadata:
|
||||
name: kubevirt
|
||||
namespace: kubevirt
|
||||
spec:
|
||||
certificateRotateStrategy: {}
|
||||
configuration:
|
||||
developerConfiguration:
|
||||
featureGates: []
|
||||
customizeComponents: {}
|
||||
imagePullPolicy: IfNotPresent
|
||||
workloadUpdateStrategy:
|
||||
workloadUpdateMethods:
|
||||
- LiveMigrate
|
7572
roles/nfc_kubernetes/templates/kubevirt-operator.yaml.j2
Normal file
7572
roles/nfc_kubernetes/templates/kubevirt-operator.yaml.j2
Normal file
File diff suppressed because it is too large
Load Diff
90
roles/nfc_kubernetes/vars/firewall_rules.yaml
Normal file
90
roles/nfc_kubernetes/vars/firewall_rules.yaml
Normal file
@ -0,0 +1,90 @@
|
||||
---
|
||||
|
||||
kubernetes_chains:
|
||||
|
||||
- name: kubernetes-embedded-etcd
|
||||
chain: true
|
||||
table: INPUT
|
||||
protocol: tcp
|
||||
dest:
|
||||
port:
|
||||
- '2379'
|
||||
- '2380'
|
||||
comment: etcd. Servers only
|
||||
when: "{{ nfc_role_kubernetes_etcd_enabled }}"
|
||||
|
||||
- name: kubernetes-api
|
||||
chain: true
|
||||
table: INPUT
|
||||
protocol: tcp
|
||||
dest:
|
||||
port: '6443'
|
||||
comment: Kubernetes API access. All Cluster hosts and end users
|
||||
|
||||
- name: kubernetes-calico-bgp
|
||||
chain: true
|
||||
table: INPUT
|
||||
protocol: tcp
|
||||
dest:
|
||||
port: '179'
|
||||
comment: Kubernetes Calico BGP. All Cluster hosts and end users
|
||||
when: false # currently hard set to false. see Installation-manifest-Calico_Cluster.yaml.j2
|
||||
|
||||
- name: kubernetes-flannel-vxlan
|
||||
chain: true
|
||||
table: INPUT
|
||||
protocol: udp
|
||||
dest:
|
||||
port: '4789'
|
||||
comment: Flannel. All cluster hosts
|
||||
|
||||
- name: kubernetes-kubelet-metrics
|
||||
chain: true
|
||||
table: INPUT
|
||||
protocol: tcp
|
||||
dest:
|
||||
port: '10250'
|
||||
comment: Kubernetes Metrics. All cluster hosts
|
||||
|
||||
- name: kubernetes-flannel-wg-four
|
||||
chain: true
|
||||
table: INPUT
|
||||
protocol: udp
|
||||
dest:
|
||||
port: '51820'
|
||||
comment: Flannel Wiregaurd IPv4. All cluster hosts
|
||||
|
||||
- name: kubernetes-flannel-wg-six
|
||||
chain: true
|
||||
table: INPUT
|
||||
protocol: udp
|
||||
dest:
|
||||
port: '51821'
|
||||
comment: Flannel Wiregaurd IPv6. All cluster hosts
|
||||
when: false # ipv6 is disabled. see install.yaml sysctrl
|
||||
|
||||
- name: kubernetes-calico-typha
|
||||
chain: true
|
||||
table: INPUT
|
||||
protocol: tcp
|
||||
dest:
|
||||
port: '5473'
|
||||
comment: Calico networking with Typha enabled. Typha agent hosts.
|
||||
|
||||
- name: metallb-l2-tcp
|
||||
chain: true
|
||||
table: INPUT
|
||||
protocol: tcp
|
||||
dest:
|
||||
port: '7946'
|
||||
comment: MetalLB Gossip
|
||||
when: "{{ nfc_kubernetes_enable_metallb }}"
|
||||
|
||||
- name: metallb-l2-udp
|
||||
chain: true
|
||||
table: INPUT
|
||||
protocol: udp
|
||||
dest:
|
||||
port: '7946'
|
||||
comment: MetalLB Gossip
|
||||
when: "{{ nfc_kubernetes_enable_metallb }}"
|
@ -1,303 +0,0 @@
|
||||
---
|
||||
|
||||
- name: "{{ role_name }} Install Software"
|
||||
include_role:
|
||||
name: nfc_common
|
||||
vars:
|
||||
common_gather_facts: false
|
||||
aptSigningKeys:
|
||||
- name: docker
|
||||
url: https://download.docker.com/linux/debian/gpg
|
||||
save_directory: /usr/share/keyrings
|
||||
file_extension: asc
|
||||
|
||||
- name: kubernetes
|
||||
url: https://packages.cloud.google.com/apt/doc/apt-key.gpg
|
||||
save_directory: /usr/share/keyrings
|
||||
file_extension: asc
|
||||
|
||||
aptRepositories:
|
||||
- name: docker
|
||||
repo: deb [arch={{ dynamic_processor_architecture }} signed-by=/usr/share/keyrings/docker.asc] http://download.docker.com/linux/{{ ansible_os_family | lower }} {{ ansible_lsb.codename | lower }} stable
|
||||
- name: kubernetes
|
||||
repo: deb [signed-by=/usr/share/keyrings/kubernetes.asc] http://apt.kubernetes.io/ kubernetes-xenial main
|
||||
|
||||
aptInstall:
|
||||
- name: gnupg2
|
||||
- name: apt-transport-https
|
||||
- name: software-properties-common
|
||||
- name: ca-certificates
|
||||
- name: iptables
|
||||
- name: python3-pip
|
||||
- name: python3-virtualenv
|
||||
|
||||
- name: containerd.io
|
||||
version: "{{ ContainerDioVersion }}"
|
||||
|
||||
- name: kubectl
|
||||
version: "{{ KubernetesVersion }}"
|
||||
- name: kubelet
|
||||
version: "{{ KubernetesVersion }}"
|
||||
- name: kubeadm
|
||||
version: "{{ KubernetesVersion }}"
|
||||
tags:
|
||||
- install
|
||||
|
||||
# containerd.io=1.6.22-1 kubectl=1.26.9-00 kubelet=1.26.9-00 kubeadm=1.26.9-00
|
||||
|
||||
- name: Remove swapfile from /etc/fstab
|
||||
mount:
|
||||
name: "{{ item }}"
|
||||
fstype: swap
|
||||
state: absent
|
||||
with_items:
|
||||
- swap
|
||||
- none
|
||||
when:
|
||||
- ansible_os_family == 'Debian' # ansible_lsb.codename = bullseye, ansible_lsb.major_release = 11
|
||||
tags:
|
||||
- install
|
||||
|
||||
|
||||
- name: Disable swap
|
||||
command: swapoff -a
|
||||
changed_when: true == false
|
||||
when:
|
||||
#- ansible_swaptotal_mb > 0
|
||||
- ansible_os_family == 'Debian'
|
||||
tags:
|
||||
- install
|
||||
|
||||
- name: Check an armbian os system
|
||||
stat:
|
||||
path: /etc/default/armbian-zram-config
|
||||
register: armbian_stat_result
|
||||
|
||||
|
||||
- name: Armbian Disable Swap
|
||||
ansible.builtin.shell:
|
||||
cmd: |
|
||||
sed -i 's/\# SWAP=false/SWAP=false/g' /etc/default/armbian-zram-config;
|
||||
sed -i 's/ENABLED=true/ENABLED=false/g' /etc/default/armbian-zram-config;
|
||||
args:
|
||||
executable: bash
|
||||
changed_when: false
|
||||
# failed_when: false
|
||||
#notify: RebootHost # doesnt need to reboot as swapoff -a covers the deployment
|
||||
when: armbian_stat_result.stat.exists
|
||||
|
||||
|
||||
- name: Add the overlay module
|
||||
community.general.modprobe:
|
||||
name: overlay
|
||||
state: present
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
tags:
|
||||
- install
|
||||
|
||||
|
||||
- name: Add the br_netfilter module
|
||||
community.general.modprobe:
|
||||
name: br_netfilter
|
||||
state: present
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
tags:
|
||||
- install
|
||||
|
||||
- name: check if containerd installed
|
||||
ansible.builtin.shell:
|
||||
cmd: which containerd
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
register: containerd_installed
|
||||
|
||||
|
||||
- name: "Containerd.io Started?"
|
||||
service:
|
||||
name: containerd
|
||||
state: started
|
||||
tags:
|
||||
- configure
|
||||
- install
|
||||
when: >
|
||||
ansible_os_family == 'Debian'
|
||||
and
|
||||
containerd_installed.rc | default(1) | int == 0
|
||||
|
||||
|
||||
- name: containerd load modules config
|
||||
template:
|
||||
src: "etc_module_containerd.conf"
|
||||
dest: /etc/modules-load.d/containerd.conf
|
||||
owner: root
|
||||
mode: 0700
|
||||
notify: "restart ContainerD"
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
tags:
|
||||
- install
|
||||
|
||||
|
||||
- name: Create containerD host directories.
|
||||
become_method: sudo
|
||||
become: yes
|
||||
file:
|
||||
path: /etc/containerd/certs.d/{{ item.name }}
|
||||
state: directory
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "{{ ansible_user }}"
|
||||
mode: 0700
|
||||
with_items: "{{ containerd.repositories }}"
|
||||
tags:
|
||||
- install
|
||||
- containerRegistry
|
||||
|
||||
|
||||
- name: containerD registry host
|
||||
template:
|
||||
src: "containerd-registry-hosts.toml.j2"
|
||||
dest: /etc/containerd/certs.d/{{ item.name }}/hosts.toml
|
||||
owner: root
|
||||
mode: 0700
|
||||
notify: "restart ContainerD"
|
||||
with_items: "{{ containerd.repositories }}"
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
tags:
|
||||
- install
|
||||
- containerRegistry
|
||||
|
||||
|
||||
- name: containerD default config
|
||||
template:
|
||||
src: "etc_containerd_containerd.toml"
|
||||
dest: /etc/containerd/config.toml
|
||||
owner: root
|
||||
mode: 0700
|
||||
notify: "restart ContainerD"
|
||||
register: containerd_config
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
tags:
|
||||
- install
|
||||
- containerRegistry
|
||||
|
||||
|
||||
- name: Install required python modules
|
||||
ansible.builtin.pip:
|
||||
name: kubernetes
|
||||
state: forcereinstall
|
||||
#virtualenv: /tmp/venv_ansible
|
||||
when: inventory_hostname != 'op1'
|
||||
tags:
|
||||
- install
|
||||
|
||||
|
||||
- name: sysctl net.bridge.bridge-nf-call-ip6tables
|
||||
sysctl:
|
||||
name: net.bridge.bridge-nf-call-ip6tables
|
||||
value: '1'
|
||||
sysctl_set: yes
|
||||
state: present
|
||||
reload: yes
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
tags:
|
||||
- install
|
||||
|
||||
|
||||
- name: sysctl net.bridge.bridge-nf-call-iptables
|
||||
sysctl:
|
||||
name: net.bridge.bridge-nf-call-iptables
|
||||
value: '1'
|
||||
sysctl_set: yes
|
||||
state: present
|
||||
reload: yes
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
tags:
|
||||
- install
|
||||
|
||||
|
||||
- name: sysctl net.ipv4.ip_forward
|
||||
sysctl:
|
||||
name: net.ipv4.ip_forward
|
||||
value: '1'
|
||||
sysctl_set: yes
|
||||
state: present
|
||||
reload: yes
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
tags:
|
||||
- install
|
||||
|
||||
|
||||
# - name: Check if kubernetes has been Initialized
|
||||
# stat:
|
||||
# path: /etc/kubernetes/admin.conf
|
||||
# register: KubernetesInit
|
||||
# when:
|
||||
# - kubernetes_config.cluster.prime.name == inventory_hostname
|
||||
|
||||
|
||||
- name: check if iptables is installed
|
||||
ansible.builtin.shell: |-
|
||||
dpkg -s iptables &> /dev/null
|
||||
changed_when: true == false
|
||||
register: iptables_installed
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
tags:
|
||||
- install
|
||||
- iptables
|
||||
- firewall
|
||||
|
||||
|
||||
- name: Add kubernetes Firewall Rules - '/etc/iptables-kubernetes.rules'
|
||||
template:
|
||||
src: iptables-kubernetes.rules.j2
|
||||
dest: "/etc/iptables-kubernetes.rules"
|
||||
owner: root
|
||||
mode: 0700
|
||||
force: yes
|
||||
notify: "Apply Firewall Rules"
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
- iptables_installed.rc == 0
|
||||
tags:
|
||||
- install
|
||||
- iptables
|
||||
- firewall
|
||||
|
||||
|
||||
- name: File - '/etc/network/if-pre-up.d/firewall-kubernetes'
|
||||
template:
|
||||
src: firewall-kubernetes.j2
|
||||
dest: "/etc/network/if-pre-up.d/firewall-kubernetes"
|
||||
owner: root
|
||||
mode: 0700
|
||||
force: yes
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
- iptables_installed.rc == 0
|
||||
tags:
|
||||
- install
|
||||
- iptables
|
||||
- firewall
|
||||
|
||||
|
||||
- name: Create local workdir
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
mode: 0700
|
||||
delegate_to: localhost
|
||||
connection: local
|
||||
run_once: true
|
||||
changed_when: true == false
|
||||
with_items:
|
||||
- /tmp/ansible/
|
||||
tags:
|
||||
- always
|
@ -1,32 +0,0 @@
|
||||
---
|
||||
# kubernetes_installed
|
||||
|
||||
- name: K3s Install
|
||||
ansible.builtin.include_tasks:
|
||||
file: k3s/install.yaml
|
||||
when: >
|
||||
install_kubernetes | default(true) | bool
|
||||
and
|
||||
not kubernetes_installed | default(false) | bool
|
||||
|
||||
|
||||
- name: K3s Configure
|
||||
ansible.builtin.include_tasks:
|
||||
file: k3s/configure.yaml
|
||||
when: >
|
||||
install_kubernetes | default(true) | bool
|
||||
and
|
||||
kubernetes_installed | default(false) | bool
|
||||
|
||||
|
||||
- name: Wireguard Cluster Encryption
|
||||
ansible.builtin.include_tasks:
|
||||
file: k3s/wireguard.yaml
|
||||
when: >
|
||||
install_kubernetes | default(true) | bool
|
||||
and
|
||||
kubernetes_installed | default(false) | bool
|
||||
and
|
||||
not kubernetes_installed_encryption | default(false) | bool
|
||||
and
|
||||
kubernetes_config.cluster.networking.encrypt | default(false) | bool
|
@ -1,74 +0,0 @@
|
||||
---
|
||||
|
||||
- name: Local Container Registry
|
||||
ansible.builtin.copy:
|
||||
content: |
|
||||
#
|
||||
# Private Container Registries for Kubernetes
|
||||
#
|
||||
# Managed By ansible/role/nfc_kubernetes
|
||||
#
|
||||
# Dont edit this file directly as it will be overwritten.
|
||||
#
|
||||
|
||||
{% set registries = kubernetes_private_container_registry | default([]) -%}
|
||||
|
||||
{% if registries | length > 0 %}mirrors:
|
||||
{% for entry in registries %}
|
||||
|
||||
{{ entry.name }}:
|
||||
endpoint:
|
||||
- "{{ entry.url }}"
|
||||
|
||||
{%- endfor %}
|
||||
{% endif %}
|
||||
dest: /etc/rancher/k3s/registries.yaml
|
||||
owner: root
|
||||
mode: '700'
|
||||
# notify: "restart ContainerD"
|
||||
# with_items: "{{ containerd.repositories }}"
|
||||
# when:
|
||||
# ansible_os_family == 'Debian'
|
||||
# and
|
||||
# Kubernetes_private_container_registry | default([]) | length > 0
|
||||
|
||||
|
||||
- name: Additional config files
|
||||
ansible.builtin.copy:
|
||||
content: |
|
||||
{{ item.content }}
|
||||
dest: "{{ item.path }}/{{ item.name }}"
|
||||
mode: '740'
|
||||
owner: root
|
||||
group: root
|
||||
loop: "{{ k3s.files }}"
|
||||
|
||||
|
||||
- name: Copy Templates
|
||||
ansible.builtin.template:
|
||||
src: "{{ item.src }}"
|
||||
dest: "{{ item.dest }}"
|
||||
owner: root
|
||||
mode: '700'
|
||||
force: true
|
||||
notify: "{{ item.notify | default(omit) }}"
|
||||
loop: "{{ templates_to_apply }}"
|
||||
vars:
|
||||
templates_to_apply:
|
||||
- src: "calico.yaml.j2"
|
||||
dest: /var/lib/rancher/k3s/server/manifests/calico.yaml
|
||||
|
||||
- src: kubernetes-manifest-rbac.yaml.j2
|
||||
dest: /var/lib/rancher/k3s/server/manifests/rbac-authorization-common.yaml
|
||||
|
||||
- src: iptables-kubernetes.rules.j2
|
||||
dest: "/etc/iptables.rules.d/iptables-kubernetes.rules"
|
||||
notify: firewall_reloader
|
||||
|
||||
- src: k3s-registries.yaml.j2
|
||||
dest: /etc/rancher/k3s/registries.yaml
|
||||
notify: kubernetes_restart
|
||||
|
||||
- src: k3s-config.yaml.j2
|
||||
dest: /etc/rancher/k3s/config.yaml
|
||||
notify: kubernetes_restart
|
@ -1,188 +0,0 @@
|
||||
---
|
||||
- name: Install Software
|
||||
ansible.builtin.include_role:
|
||||
name: nfc_common
|
||||
vars:
|
||||
common_gather_facts: false
|
||||
aptInstall:
|
||||
- name: curl
|
||||
- name: iptables
|
||||
|
||||
|
||||
- name: Create Required directories
|
||||
ansible.builtin.file:
|
||||
name: "{{ item.name }}"
|
||||
state: "{{ item.state }}"
|
||||
mode: "{{ item.mode }}"
|
||||
loop: "{{ dirs }}"
|
||||
vars:
|
||||
dirs:
|
||||
- name: /etc/rancher/k3s
|
||||
state: directory
|
||||
mode: 700
|
||||
- name: /var/lib/rancher/k3s/server/logs
|
||||
state: directory
|
||||
mode: 700
|
||||
- name: /var/lib/rancher/k3s/server/manifests
|
||||
state: directory
|
||||
mode: 700
|
||||
|
||||
|
||||
- name: Add sysctl net.ipv4.ip_forward
|
||||
ansible.posix.sysctl:
|
||||
name: net.ipv4.ip_forward
|
||||
value: '1'
|
||||
sysctl_set: true
|
||||
state: present
|
||||
reload: true
|
||||
notify: reboot_host
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
# On change reboot
|
||||
|
||||
|
||||
- name: Check if K3s Installed
|
||||
ansible.builtin.shell:
|
||||
cmd: |
|
||||
if [[ $(service k3s status) ]]; then exit 0; else exit 1; fi
|
||||
executable: /bin/bash
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
register: k3s_installed
|
||||
|
||||
|
||||
- name: Download K3s Binary
|
||||
ansible.builtin.uri:
|
||||
url: "{{ item.url }}"
|
||||
method: GET
|
||||
return_content: true
|
||||
status_code:
|
||||
- 200
|
||||
- 304
|
||||
dest: "{{ item.dest }}"
|
||||
mode: "744"
|
||||
register: k3s_download_files
|
||||
delegate_to: localhost
|
||||
# no_log: true
|
||||
when: ansible_os_family == 'Debian'
|
||||
loop: "{{ download_files }}"
|
||||
vars:
|
||||
ansible_connection: local
|
||||
download_files:
|
||||
- dest: /tmp/install.sh
|
||||
url: https://get.k3s.io
|
||||
- dest: "/tmp/k3s"
|
||||
url: "https://github.com/k3s-io/k3s/releases/download/v{{ KubernetesVersion + KubernetesVersion_k3s_prefix | urlencode }}/k3s"
|
||||
|
||||
|
||||
- name: "[TRACE] Downloaded File SHA256"
|
||||
ansible.builtin.set_fact:
|
||||
hash_sha256_k3s_downloaded_binary: "{{ lookup('ansible.builtin.file', '/tmp/k3s') | hash('sha256') | string }}"
|
||||
delegate_to: localhost
|
||||
|
||||
|
||||
- name: Existing k3s File hash
|
||||
ansible.builtin.stat:
|
||||
checksum_algorithm: sha256
|
||||
name: /usr/local/bin/k3s
|
||||
register: hash_sha256_k3s_existing_binary
|
||||
|
||||
|
||||
- name: Copy K3s binary to Host
|
||||
ansible.builtin.copy:
|
||||
src: "/tmp/k3s"
|
||||
dest: "/usr/local/bin/k3s"
|
||||
mode: '740'
|
||||
owner: root
|
||||
group: root
|
||||
when: hash_sha256_k3s_existing_binary.stat.checksum | default('0') != hash_sha256_k3s_downloaded_binary
|
||||
|
||||
- name: Copy install script to Host
|
||||
ansible.builtin.copy:
|
||||
src: "/tmp/install.sh"
|
||||
dest: "/tmp/install.sh"
|
||||
mode: '755'
|
||||
owner: root
|
||||
group: root
|
||||
# when: hash_sha256_k3s_existing_binary.stat.checksum | default('0') != hash_sha256_k3s_downloaded_binary
|
||||
|
||||
- name: Required Initial config files
|
||||
ansible.builtin.copy:
|
||||
content: |
|
||||
{{ item.content }}
|
||||
dest: "{{ item.path }}/{{ item.name }}"
|
||||
mode: '740'
|
||||
owner: root
|
||||
group: root
|
||||
loop: "{{ k3s.files }}"
|
||||
when: >
|
||||
kubernetes_config.cluster.prime.name == inventory_hostname
|
||||
|
||||
|
||||
- name: Copy Intial required templates
|
||||
ansible.builtin.template:
|
||||
src: "{{ item.src }}"
|
||||
dest: "{{ item.dest }}"
|
||||
owner: root
|
||||
mode: '700'
|
||||
force: true
|
||||
notify: "{{ item.notify | default(omit) }}"
|
||||
loop: "{{ templates_to_apply }}"
|
||||
vars:
|
||||
templates_to_apply:
|
||||
- src: k3s-config.yaml.j2
|
||||
dest: /etc/rancher/k3s/config.yaml
|
||||
notify: kubernetes_restart
|
||||
when: >
|
||||
kubernetes_config.cluster.prime.name == inventory_hostname
|
||||
|
||||
# - name: Templates IPv6
|
||||
# ansible.builtin.template:
|
||||
# src: iptables-kubernetes.rules.j2
|
||||
# dest: "/etc/ip6tables.rules.d/ip6tables-kubernetes.rules"
|
||||
# owner: root
|
||||
# mode: '700'
|
||||
# force: true
|
||||
# vars:
|
||||
# ipv6: true
|
||||
|
||||
|
||||
- name: Set IPTables to legacy mode
|
||||
ansible.builtin.command:
|
||||
cmd: update-alternatives --set iptables /usr/sbin/iptables-legacy
|
||||
changed_when: false
|
||||
|
||||
|
||||
- name: Server install K3s
|
||||
ansible.builtin.shell:
|
||||
cmd: |
|
||||
# INSTALL_K3S_SKIP_DOWNLOAD=true \
|
||||
# INSTALL_K3S_VERSION="v{{ KubernetesVersion }}{{ KubernetesVersion_k3s_prefix }}" \
|
||||
# /tmp/install.sh
|
||||
curl -sfL https://get.k3s.io | \
|
||||
INSTALL_K3S_VERSION="v1.26.9+k3s1" \
|
||||
sh -
|
||||
failed_when: false
|
||||
# when: >
|
||||
# k3s_installed.rc | int == 1
|
||||
# and
|
||||
# Kubernetes_Master | default(false)
|
||||
when: Kubernetes_Master | default(false) | bool
|
||||
|
||||
|
||||
- name: Agent install K3s
|
||||
ansible.builtin.shell:
|
||||
cmd: |
|
||||
INSTALL_K3S_SKIP_DOWNLOAD=true \
|
||||
INSTALL_K3S_VERSION="v{{ KubernetesVersion }}{{ KubernetesVersion_k3s_prefix }}" \
|
||||
K3S_URL=https://{{ hostvars[kubernetes_config.cluster.prime.name].ansible_host }}:6443 \
|
||||
K3S_TOKEN={{ node_token }} \
|
||||
/tmp/install.sh
|
||||
when: >
|
||||
k3s_installed.rc | int == 1
|
||||
and
|
||||
not Kubernetes_Master | default(false) | bool
|
||||
|
||||
- name: Set Kubernetes Final Install Fact
|
||||
ansible.builtin.set_fact:
|
||||
kubernetes_installed: true
|
@ -1,22 +0,0 @@
|
||||
---
|
||||
- name: Install Wireguard
|
||||
ansible.builtin.apt:
|
||||
name:
|
||||
- wireguard
|
||||
update_cache: false
|
||||
when: >
|
||||
ansible_os_family == 'Debian'
|
||||
# and
|
||||
# kubernetes.networking.encrypt | default(false) | bool
|
||||
|
||||
|
||||
- name: Enable Cluster Encryption
|
||||
ansible.builtin.command:
|
||||
cmd: kubectl patch felixconfiguration default --type='merge' -p '{"spec":{"wireguardEnabled":true,"wireguardEnabledV6":true}}'
|
||||
changed_when: false
|
||||
when: >
|
||||
kubernetes_config.cluster.prime.name == inventory_hostname
|
||||
|
||||
- name: Set Kubernetes Encryption Final Install Fact
|
||||
ansible.builtin.set_fact:
|
||||
kubernetes_installed_encryption: true
|
103
tasks/k8s.yaml
103
tasks/k8s.yaml
@ -1,103 +0,0 @@
|
||||
---
|
||||
- name: Common Tasks
|
||||
include_tasks: common.yaml
|
||||
# tags:
|
||||
# - install
|
||||
|
||||
- name: Check if kubernetes has been Initialized
|
||||
stat:
|
||||
path: /etc/kubernetes/admin.conf
|
||||
register: KubernetesInitialized
|
||||
tags:
|
||||
- always
|
||||
|
||||
- name: kubernetes prime
|
||||
include_tasks: prime.yaml
|
||||
when: kubernetes_config.cluster.prime.name == inventory_hostname
|
||||
|
||||
|
||||
- name: kubernetes workers
|
||||
include_tasks: workers.yaml
|
||||
when: kubernetes_config.cluster.prime.name != inventory_hostname
|
||||
|
||||
|
||||
- name: Add Kubernetes Node Labels
|
||||
kubernetes.core.k8s:
|
||||
definition:
|
||||
apiVersion: v1
|
||||
kind: Node
|
||||
metadata:
|
||||
name: "{{ inventory_hostname }}"
|
||||
labels:
|
||||
"{{ item | from_yaml_all }}"
|
||||
delegate_to: "{{ kubernetes_config.cluster.prime.name }}"
|
||||
with_items:
|
||||
- "{{ kubernetes_config.hosts[inventory_hostname].labels }}"
|
||||
when:
|
||||
- ( kubernetes_config.hosts[inventory_hostname].labels is defined and
|
||||
kubernetes_config.hosts[inventory_hostname].labels|default('')|length > 0 )
|
||||
tags:
|
||||
- install
|
||||
- nodelabels
|
||||
|
||||
|
||||
- name: Add Node Taints
|
||||
kubernetes.core.k8s_taint:
|
||||
state: "present"
|
||||
name: "{{ inventory_hostname }}"
|
||||
taints:
|
||||
- "{{ item | from_yaml_all }}"
|
||||
delegate_to: "{{ kubernetes_config.cluster.prime.name }}"
|
||||
with_items:
|
||||
- "{{ kubernetes_config.hosts[inventory_hostname].taints.present }}"
|
||||
when:
|
||||
- (kubernetes_config.hosts[inventory_hostname].taints.present is defined and
|
||||
kubernetes_config.hosts[inventory_hostname].taints.present|default('')|length > 0 )
|
||||
tags:
|
||||
- install
|
||||
- taints
|
||||
|
||||
|
||||
- name: Remove Node Taints
|
||||
kubernetes.core.k8s_taint:
|
||||
state: "absent"
|
||||
name: "{{ inventory_hostname }}"
|
||||
taints:
|
||||
- "{{ item | from_yaml_all }}"
|
||||
delegate_to: "{{ kubernetes_config.cluster.prime.name }}"
|
||||
with_items:
|
||||
- "{{ kubernetes_config.hosts[inventory_hostname].taints.absent }}"
|
||||
when:
|
||||
- ( kubernetes_config.hosts[inventory_hostname].taints.absent is defined and
|
||||
kubernetes_config.hosts[inventory_hostname].taints.absent|default('')|length > 0 )
|
||||
tags:
|
||||
- install
|
||||
- taints
|
||||
|
||||
|
||||
|
||||
|
||||
- name: Create Cluster Namespaces
|
||||
kubernetes.core.k8s:
|
||||
state: present
|
||||
definition:
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: "{{ item.name }}"
|
||||
labels:
|
||||
#app.kubernetes.io/version: # App version
|
||||
#app.kubernetes.io/component:
|
||||
#app.kubernetes.io/part-of:
|
||||
app.kubernetes.io/managed-by: Ansible
|
||||
#meta.kubernetes.io/description: "{{ item.description | default('') }}"
|
||||
meta.kubernetes.io/version: "{{ deployment_git_current_short_hash | default('') }}"
|
||||
with_items:
|
||||
- "{{ kubernetes_config.namespaces }}"
|
||||
when:
|
||||
( kubernetes_config.namespaces is defined and
|
||||
kubernetes_config.namespaces | default('') | length > 0 and
|
||||
kubernetes_config.cluster.prime.name == inventory_hostname )
|
||||
tags:
|
||||
- install
|
||||
- namespaces
|
@ -1,14 +0,0 @@
|
||||
---
|
||||
- name: Firewall Rules
|
||||
ansible.builtin.include_role:
|
||||
name: nfc_firewall
|
||||
vars:
|
||||
nfc_firewall_enabled_kubernetes: "{{ nfc_kubernetes.enable_firewall | default(false) | bool }}"
|
||||
|
||||
- name: K8s Cluster
|
||||
ansible.builtin.include_tasks: k8s.yaml
|
||||
when: kubernetes_type == 'k8s'
|
||||
|
||||
- name: K3s Cluster
|
||||
ansible.builtin.include_tasks: k3s.yaml
|
||||
when: kubernetes_type == 'k3s'
|
146
tasks/prime.yaml
146
tasks/prime.yaml
@ -1,146 +0,0 @@
|
||||
---
|
||||
|
||||
- name: initialize Kubernetes cluster
|
||||
block:
|
||||
- name: Intilizing Kubernetes Cluster
|
||||
#command: kubeadm init --pod-network-cidr "{{ KubernetesPodSubnet }}" --apiserver-advertise-address "{{ ansible_default_ipv4.address }}" --ignore-preflight-errors Mem --cri-socket=unix:///var/run/crio/crio.sock
|
||||
command: kubeadm init --pod-network-cidr "{{ KubernetesPodSubnet }}" --service-cidr "{{ KubernetesServiceSubnet }}" --apiserver-advertise-address "0.0.0.0" --ignore-preflight-errors Mem #--cri-socket=unix:///var/run/containerd/containerd.sock
|
||||
when:
|
||||
- not KubernetesInitialized.stat.exists
|
||||
|
||||
rescue:
|
||||
- name: Reset Kubeadmn
|
||||
ansible.builtin.shell: "{{ item }}"
|
||||
#register: kube_reset
|
||||
failed_when: item.rc != 0
|
||||
with_items:
|
||||
- kubeadm reset --force
|
||||
- rm -Rf /etc/cni/net.d
|
||||
|
||||
|
||||
- name: Check if kubernetes has been Initialized
|
||||
stat:
|
||||
path: /etc/kubernetes/admin.conf
|
||||
register: KubernetesInitialized
|
||||
tags:
|
||||
- always
|
||||
|
||||
|
||||
- name: fetch kubernetes health
|
||||
ansible.builtin.shell: " wget http://localhost:10248/healthz -q -O - || true"
|
||||
register: KubernetesHealth
|
||||
changed_when: true == false
|
||||
when: KubernetesInitialized.stat.exists
|
||||
tags:
|
||||
- always
|
||||
|
||||
|
||||
- name: set kubeernetes health fact
|
||||
set_fact:
|
||||
kube_health: "{{ KubernetesHealth.stdout | default(false) == 'ok' }}"
|
||||
changed_when: true == false
|
||||
tags:
|
||||
- always
|
||||
|
||||
|
||||
- name: Create directory for kube config.
|
||||
become_method: sudo
|
||||
become: yes
|
||||
file:
|
||||
#path: /home/{{ ansible_user }}/.kube
|
||||
path: ~/.kube
|
||||
state: directory
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "{{ ansible_user }}"
|
||||
mode: 0700
|
||||
# when: Kubernetes_Master
|
||||
tags:
|
||||
- always
|
||||
|
||||
|
||||
- name: Copy Kube config for local user
|
||||
copy:
|
||||
remote_src: yes
|
||||
src: /etc/kubernetes/admin.conf
|
||||
#dest: /home/{{ ansible_user }}/.kube/config
|
||||
dest: ~/.kube/config
|
||||
owner: "{{ ansible_user }}"
|
||||
group: "{{ ansible_user }}"
|
||||
mode: 0700
|
||||
tags:
|
||||
- always
|
||||
|
||||
|
||||
- name: Add calico networking.
|
||||
template:
|
||||
src: "calico.yaml.j2"
|
||||
dest: /etc/kubernetes/manifests/calico.yaml
|
||||
owner: root
|
||||
mode: 0744
|
||||
|
||||
|
||||
- name: apply calico manifest
|
||||
command: kubectl apply -f /etc/kubernetes/manifests/calico.yaml
|
||||
tags:
|
||||
- install
|
||||
- manifest
|
||||
|
||||
|
||||
- name: create remote workdir
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
mode: 0700
|
||||
with_items:
|
||||
- /tmp/ansible/
|
||||
tags:
|
||||
- always
|
||||
|
||||
|
||||
- name: Create local workdir
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
mode: 0700
|
||||
delegate_to: localhost
|
||||
connection: local
|
||||
with_items:
|
||||
- /tmp/ansible/
|
||||
tags:
|
||||
- always
|
||||
|
||||
|
||||
- name: get join command
|
||||
ansible.builtin.shell: kubeadm token create --print-join-command > /tmp/ansible/join_kubernetes.sh
|
||||
changed_when: true == false
|
||||
tags:
|
||||
- always
|
||||
|
||||
|
||||
- name: download join command
|
||||
fetch:
|
||||
src: /tmp/ansible/join_kubernetes.sh
|
||||
dest: /tmp/ansible/
|
||||
flat: yes
|
||||
changed_when: true == false
|
||||
tags:
|
||||
- always
|
||||
|
||||
|
||||
# always:
|
||||
|
||||
# - name: remove remote workdir
|
||||
# file:
|
||||
# path: "{{ item }}"
|
||||
# state: absent
|
||||
# with_items:
|
||||
# - /tmp/ansible/join_kubernetes.sh
|
||||
# changed_when: true == false
|
||||
|
||||
# when:
|
||||
# #- Kubernetes_Prime
|
||||
# #- KubernetesInit.stat.exists
|
||||
# - kubernetes_config.cluster.prime.name == inventory_hostname
|
||||
|
||||
|
||||
|
@ -1,46 +0,0 @@
|
||||
---
|
||||
# - name: configure non-prime nodes - check node health
|
||||
# shell: "curl http://localhost:10248/healthz || true"
|
||||
# register: health
|
||||
# changed_when: true == false
|
||||
|
||||
# - set_fact:
|
||||
# kube_joined: "{{ health.stdout == 'ok' }}"
|
||||
# changed_when: true == false
|
||||
# # when:
|
||||
# # - not Kubernetes_Prime
|
||||
|
||||
- name: configure non-prime nodes - create remote workdir
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
mode: 0700
|
||||
changed_when: true == false
|
||||
with_items:
|
||||
- /tmp/ansible/
|
||||
|
||||
|
||||
- ansible.builtin.shell: " wget http://localhost:10248/healthz -q -O - || true"
|
||||
register: health
|
||||
changed_when: true == false
|
||||
|
||||
|
||||
- set_fact:
|
||||
kube_joined: "{{ health.stdout == 'ok' }}"
|
||||
changed_when: true == false
|
||||
|
||||
|
||||
- name: get join command from ansible controller
|
||||
copy:
|
||||
src: /tmp/ansible/join_kubernetes.sh
|
||||
dest: /tmp/ansible/join_kubernetes.sh
|
||||
mode: 0700
|
||||
changed_when: true == false
|
||||
when:
|
||||
- not kube_joined
|
||||
|
||||
|
||||
- name: configure non-prime nodes - join node to kubernetes cluster
|
||||
command: sh /tmp/ansible/join_kubernetes.sh
|
||||
when:
|
||||
- not kube_joined
|
@ -1,10 +0,0 @@
|
||||
#
|
||||
# {{ item.name }} Container Registry Configuration
|
||||
# Managed by: Ansible
|
||||
#
|
||||
|
||||
server = "{{ item.server }}"
|
||||
|
||||
[host."{{ item.url }}"]
|
||||
capabilities = {{ item.capabilities | from_yaml_all }}
|
||||
skip_verify = {{ item.skip_verify | default(false) | lower }}
|
@ -1,250 +0,0 @@
|
||||
disabled_plugins = []
|
||||
imports = []
|
||||
oom_score = 0
|
||||
plugin_dir = ""
|
||||
required_plugins = []
|
||||
root = "/var/lib/containerd"
|
||||
state = "/run/containerd"
|
||||
temp = ""
|
||||
version = 2
|
||||
|
||||
[cgroup]
|
||||
path = ""
|
||||
|
||||
[debug]
|
||||
address = ""
|
||||
format = ""
|
||||
gid = 0
|
||||
level = ""
|
||||
uid = 0
|
||||
|
||||
[grpc]
|
||||
address = "/run/containerd/containerd.sock"
|
||||
gid = 0
|
||||
max_recv_message_size = 16777216
|
||||
max_send_message_size = 16777216
|
||||
tcp_address = ""
|
||||
tcp_tls_ca = ""
|
||||
tcp_tls_cert = ""
|
||||
tcp_tls_key = ""
|
||||
uid = 0
|
||||
|
||||
[metrics]
|
||||
address = ""
|
||||
grpc_histogram = false
|
||||
|
||||
[plugins]
|
||||
|
||||
[plugins."io.containerd.gc.v1.scheduler"]
|
||||
deletion_threshold = 0
|
||||
mutation_threshold = 100
|
||||
pause_threshold = 0.02
|
||||
schedule_delay = "0s"
|
||||
startup_delay = "100ms"
|
||||
|
||||
[plugins."io.containerd.grpc.v1.cri"]
|
||||
device_ownership_from_security_context = false
|
||||
disable_apparmor = false
|
||||
disable_cgroup = false
|
||||
disable_hugetlb_controller = true
|
||||
disable_proc_mount = false
|
||||
disable_tcp_service = true
|
||||
enable_selinux = false
|
||||
enable_tls_streaming = false
|
||||
enable_unprivileged_icmp = false
|
||||
enable_unprivileged_ports = false
|
||||
ignore_image_defined_volumes = false
|
||||
max_concurrent_downloads = 3
|
||||
max_container_log_line_size = 16384
|
||||
netns_mounts_under_state_dir = false
|
||||
restrict_oom_score_adj = false
|
||||
sandbox_image = "registry.k8s.io/pause:3.6"
|
||||
selinux_category_range = 1024
|
||||
stats_collect_period = 10
|
||||
stream_idle_timeout = "4h0m0s"
|
||||
stream_server_address = "127.0.0.1"
|
||||
stream_server_port = "0"
|
||||
systemd_cgroup = false
|
||||
tolerate_missing_hugetlb_controller = true
|
||||
unset_seccomp_profile = ""
|
||||
|
||||
[plugins."io.containerd.grpc.v1.cri".cni]
|
||||
bin_dir = "/opt/cni/bin"
|
||||
conf_dir = "/etc/cni/net.d"
|
||||
conf_template = ""
|
||||
ip_pref = ""
|
||||
max_conf_num = 1
|
||||
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd]
|
||||
default_runtime_name = "runc"
|
||||
disable_snapshot_annotations = true
|
||||
discard_unpacked_layers = false
|
||||
ignore_rdt_not_enabled_errors = false
|
||||
no_pivot = false
|
||||
snapshotter = "overlayfs"
|
||||
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.default_runtime]
|
||||
base_runtime_spec = ""
|
||||
cni_conf_dir = ""
|
||||
cni_max_conf_num = 0
|
||||
container_annotations = []
|
||||
pod_annotations = []
|
||||
privileged_without_host_devices = false
|
||||
runtime_engine = ""
|
||||
runtime_path = ""
|
||||
runtime_root = ""
|
||||
runtime_type = ""
|
||||
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.default_runtime.options]
|
||||
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
|
||||
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
|
||||
base_runtime_spec = ""
|
||||
cni_conf_dir = ""
|
||||
cni_max_conf_num = 0
|
||||
container_annotations = []
|
||||
pod_annotations = []
|
||||
privileged_without_host_devices = false
|
||||
runtime_engine = ""
|
||||
runtime_path = ""
|
||||
runtime_root = ""
|
||||
runtime_type = "io.containerd.runc.v2"
|
||||
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
|
||||
BinaryName = ""
|
||||
CriuImagePath = ""
|
||||
CriuPath = ""
|
||||
CriuWorkPath = ""
|
||||
IoGid = 0
|
||||
IoUid = 0
|
||||
NoNewKeyring = false
|
||||
NoPivotRoot = false
|
||||
Root = ""
|
||||
ShimCgroup = ""
|
||||
SystemdCgroup = true
|
||||
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime]
|
||||
base_runtime_spec = ""
|
||||
cni_conf_dir = ""
|
||||
cni_max_conf_num = 0
|
||||
container_annotations = []
|
||||
pod_annotations = []
|
||||
privileged_without_host_devices = false
|
||||
runtime_engine = ""
|
||||
runtime_path = ""
|
||||
runtime_root = ""
|
||||
runtime_type = ""
|
||||
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime.options]
|
||||
|
||||
[plugins."io.containerd.grpc.v1.cri".image_decryption]
|
||||
key_model = "node"
|
||||
|
||||
[plugins."io.containerd.grpc.v1.cri".registry]
|
||||
config_path = "/etc/containerd/certs.d"
|
||||
|
||||
[plugins."io.containerd.grpc.v1.cri".registry.auths]
|
||||
|
||||
[plugins."io.containerd.grpc.v1.cri".registry.configs]
|
||||
|
||||
[plugins."io.containerd.grpc.v1.cri".registry.headers]
|
||||
|
||||
[plugins."io.containerd.grpc.v1.cri".registry.mirrors]
|
||||
|
||||
[plugins."io.containerd.grpc.v1.cri".x509_key_pair_streaming]
|
||||
tls_cert_file = ""
|
||||
tls_key_file = ""
|
||||
|
||||
[plugins."io.containerd.internal.v1.opt"]
|
||||
path = "/opt/containerd"
|
||||
|
||||
[plugins."io.containerd.internal.v1.restart"]
|
||||
interval = "10s"
|
||||
|
||||
[plugins."io.containerd.internal.v1.tracing"]
|
||||
sampling_ratio = 1.0
|
||||
service_name = "containerd"
|
||||
|
||||
[plugins."io.containerd.metadata.v1.bolt"]
|
||||
content_sharing_policy = "shared"
|
||||
|
||||
[plugins."io.containerd.monitor.v1.cgroups"]
|
||||
no_prometheus = false
|
||||
|
||||
[plugins."io.containerd.runtime.v1.linux"]
|
||||
no_shim = false
|
||||
runtime = "runc"
|
||||
runtime_root = ""
|
||||
shim = "containerd-shim"
|
||||
shim_debug = false
|
||||
|
||||
[plugins."io.containerd.runtime.v2.task"]
|
||||
platforms = ["linux/amd64"]
|
||||
sched_core = false
|
||||
|
||||
[plugins."io.containerd.service.v1.diff-service"]
|
||||
default = ["walking"]
|
||||
|
||||
[plugins."io.containerd.service.v1.tasks-service"]
|
||||
rdt_config_file = ""
|
||||
|
||||
[plugins."io.containerd.snapshotter.v1.aufs"]
|
||||
root_path = ""
|
||||
|
||||
[plugins."io.containerd.snapshotter.v1.btrfs"]
|
||||
root_path = ""
|
||||
|
||||
[plugins."io.containerd.snapshotter.v1.devmapper"]
|
||||
async_remove = false
|
||||
base_image_size = ""
|
||||
discard_blocks = false
|
||||
fs_options = ""
|
||||
fs_type = ""
|
||||
pool_name = ""
|
||||
root_path = ""
|
||||
|
||||
[plugins."io.containerd.snapshotter.v1.native"]
|
||||
root_path = ""
|
||||
|
||||
[plugins."io.containerd.snapshotter.v1.overlayfs"]
|
||||
root_path = ""
|
||||
upperdir_label = false
|
||||
|
||||
[plugins."io.containerd.snapshotter.v1.zfs"]
|
||||
root_path = ""
|
||||
|
||||
[plugins."io.containerd.tracing.processor.v1.otlp"]
|
||||
endpoint = ""
|
||||
insecure = false
|
||||
protocol = ""
|
||||
|
||||
[proxy_plugins]
|
||||
|
||||
[stream_processors]
|
||||
|
||||
[stream_processors."io.containerd.ocicrypt.decoder.v1.tar"]
|
||||
accepts = ["application/vnd.oci.image.layer.v1.tar+encrypted"]
|
||||
args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"]
|
||||
env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"]
|
||||
path = "ctd-decoder"
|
||||
returns = "application/vnd.oci.image.layer.v1.tar"
|
||||
|
||||
[stream_processors."io.containerd.ocicrypt.decoder.v1.tar.gzip"]
|
||||
accepts = ["application/vnd.oci.image.layer.v1.tar+gzip+encrypted"]
|
||||
args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"]
|
||||
env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"]
|
||||
path = "ctd-decoder"
|
||||
returns = "application/vnd.oci.image.layer.v1.tar+gzip"
|
||||
|
||||
[timeouts]
|
||||
"io.containerd.timeout.bolt.open" = "0s"
|
||||
"io.containerd.timeout.shim.cleanup" = "5s"
|
||||
"io.containerd.timeout.shim.load" = "5s"
|
||||
"io.containerd.timeout.shim.shutdown" = "3s"
|
||||
"io.containerd.timeout.task.state" = "2s"
|
||||
|
||||
[ttrpc]
|
||||
address = ""
|
||||
gid = 0
|
||||
uid = 0
|
@ -1,2 +0,0 @@
|
||||
overlay
|
||||
br_netfilter
|
@ -1,263 +0,0 @@
|
||||
#
|
||||
# IP Tables Firewall Rules for Kubernetes
|
||||
#
|
||||
# Managed By ansible/role/nfc_kubernetes
|
||||
#
|
||||
# Dont edit this file directly as it will be overwritten. To grant a host API access
|
||||
# edit the cluster config, adding the hostname/ip to path kubernetes_config.cluster.access
|
||||
#
|
||||
# This file is periodicly called by cron
|
||||
#
|
||||
|
||||
{% set data = namespace(firewall_rules=[]) -%}
|
||||
|
||||
{%- if ansible_host is regex('^[a-z]') and ':' not in ansible_host -%} {#- Convert DNs name to IP Address -#}
|
||||
|
||||
{%- if ipv6 | default(false) -%}
|
||||
|
||||
{%- set ansible_host = query('community.dns.lookup', ansible_host + '.', type='AAAA' ) -%}
|
||||
|
||||
{%- else -%}
|
||||
|
||||
{%- set ansible_host = query('community.dns.lookup', ansible_host + '.', type='A' ) -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- if ansible_host | list | length > 0 -%} {#- Convert dns lookup to list, and select the first item -#}
|
||||
{%- set ansible_host = ansible_host | from_yaml_all | list -%}
|
||||
|
||||
{%- set ansible_host = ansible_host[0] -%}
|
||||
{%- endif -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- for kubernetes_host in groups[kubernetes_type] -%}
|
||||
|
||||
|
||||
{%- if kubernetes_host is regex('^[a-z]') and ':' not in kubernetes_host -%} {#- Convert DNs name to IP Address -#}
|
||||
|
||||
{%- if ipv6 | default(false) -%}
|
||||
|
||||
{%- set kubernetes_host = query('community.dns.lookup', kubernetes_host + '.', type='AAAA' ) -%}
|
||||
|
||||
{%- else -%}
|
||||
|
||||
{%- set kubernetes_host = query('community.dns.lookup', kubernetes_host + '.', type='A' ) -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- if kubernetes_host | list | length > 0 -%} {#- Convert dns lookup to list, and select the first item -#}
|
||||
{%- set kubernetes_host = kubernetes_host | from_yaml_all | list -%}
|
||||
|
||||
{%- set kubernetes_host = kubernetes_host[0] -%}
|
||||
{%- endif -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
|
||||
{%- for master_host in groups['kubernetes_master'] -%}
|
||||
|
||||
|
||||
{%- if master_host is regex('^[a-z]') and ':' not in master_host -%} {#- Convert DNs name to IP Address -#}
|
||||
|
||||
{%- if ipv6 | default(false) -%}
|
||||
|
||||
{%- set master_host = query('community.dns.lookup', master_host + '.', type='AAAA' ) -%}
|
||||
|
||||
{%- else -%}
|
||||
|
||||
{%- set master_host = query('community.dns.lookup', master_host + '.', type='A' ) -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- if master_host | list | length > 0 -%} {#- Convert dns lookup to list, and select the first item -#}
|
||||
{%- set master_host = master_host | from_yaml_all | list -%}
|
||||
|
||||
{%- set master_host = master_host[0] -%}
|
||||
{%- endif -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
|
||||
{%- if Kubernetes_Master | default(false) | bool -%}
|
||||
|
||||
{%- if
|
||||
master_host == kubernetes_host
|
||||
and
|
||||
master_host != ansible_host
|
||||
and
|
||||
(
|
||||
(
|
||||
ipv6 | default(false)
|
||||
and
|
||||
':' in master_host
|
||||
)
|
||||
or
|
||||
(
|
||||
not ipv6 | default(false)
|
||||
and
|
||||
'.' in master_host
|
||||
)
|
||||
)
|
||||
-%}
|
||||
|
||||
{#- master hosts only -#}
|
||||
|
||||
{%- set data.firewall_rules = data.firewall_rules + ['-I kubernetes-embedded-etcd -s ' + master_host + ' -j ACCEPT'] -%}
|
||||
{# {%- set data.firewall_rules = data.firewall_rules + ['-I INPUT -s ' + master_host + ' -p tcp -m multiport --dports 2380 -j ACCEPT'] -%} #}
|
||||
{%- set data.firewall_rules = data.firewall_rules + ['-I kubernetes-api -s ' + master_host + ' -j ACCEPT'] -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- endfor -%}
|
||||
|
||||
{%- if
|
||||
ansible_host != kubernetes_host
|
||||
and
|
||||
(
|
||||
(
|
||||
ipv6 | default(false)
|
||||
and
|
||||
':' in kubernetes_host
|
||||
)
|
||||
or
|
||||
(
|
||||
not ipv6 | default(false)
|
||||
and
|
||||
'.' in kubernetes_host
|
||||
)
|
||||
)
|
||||
-%}
|
||||
|
||||
{#- All cluster Hosts -#}
|
||||
|
||||
{%- set data.firewall_rules = data.firewall_rules + ['-I kubelet-metrics -s ' + kubernetes_host + ' -j ACCEPT'] -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- endfor -%}
|
||||
|
||||
|
||||
{%- for api_client in kubernetes_config.cluster.access | default([]) -%}
|
||||
|
||||
{%- if api_client is regex('^[a-z]') and ':' not in api_client -%} {#- Convert DNs name to IP Address -#}
|
||||
|
||||
{%- set api_client_dns_name = api_client -%}
|
||||
|
||||
{%- if ipv6 | default(false) -%}
|
||||
|
||||
{%- set api_client = query('community.dns.lookup', api_client + '.', type='AAAA' ) -%}
|
||||
|
||||
{%- else -%}
|
||||
|
||||
{%- set api_client = query('community.dns.lookup', api_client + '.', type='A' ) -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- if api_client | list | length > 0 -%} {#- Convert dns lookup to list, and select the first item -#}
|
||||
|
||||
{%- set api_client = api_client | from_yaml_all | list -%}
|
||||
|
||||
{%- set api_client = api_client[0] -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
|
||||
{%- if
|
||||
api_client != ansible_host
|
||||
and
|
||||
(
|
||||
(
|
||||
ipv6 | default(false)
|
||||
and
|
||||
':' in api_client
|
||||
)
|
||||
or
|
||||
(
|
||||
not ipv6 | default(false)
|
||||
and
|
||||
'.' in api_client
|
||||
)
|
||||
)
|
||||
-%}
|
||||
|
||||
{#- Hosts allowed to access API -#}
|
||||
|
||||
{%- set data.firewall_rules = data.firewall_rules + ['-I kubernetes-api -s ' + api_client + ' -m comment --comment "host: ' + api_client_dns_name | default(api_client) + '" -j ACCEPT'] -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- endfor %}
|
||||
|
||||
*filter
|
||||
|
||||
{# -N kubernetes-embedded-etcd
|
||||
-A kubernetes-embedded-etcd -j RETURN
|
||||
|
||||
-A INPUT -p tcp -m multiport --dports 2379,2380 -m comment --comment "etcd. Servers only" -j kubernetes-embedded-etcd
|
||||
|
||||
|
||||
-N kubernetes-api
|
||||
-A kubernetes-api -j RETURN
|
||||
|
||||
-A INPUT -p tcp --dport 6443 -m comment --comment "Kubernetes API access. All Cluster hosts and end users" -j kubernetes-api
|
||||
|
||||
|
||||
-N kubernetes-flannel-vxlan
|
||||
-A kubernetes-flannel-vxlan -j RETURN
|
||||
|
||||
-A INPUT -p udp --dport 8472 -m comment --comment "Flannel. All cluster hosts" -j kubernetes-flannel-vxlan
|
||||
|
||||
|
||||
-N kubernetes-kubelet-metrics
|
||||
-A kubernetes-kubelet-metrics -j RETURN
|
||||
|
||||
-A INPUT -p tcp --dport 10250 -m comment --comment "Kubernetes Metrics. All cluster hosts" -j kubernetes-kubelet-metrics
|
||||
|
||||
|
||||
-N kubernetes-flannel-wg-four
|
||||
-A kubernetes-flannel-wg-four -j RETURN
|
||||
|
||||
-A INPUT -p udp --dport 51820 -m comment --comment "Flannel Wiregaurd IPv4. All cluster hosts" -j kubernetes-flannel-wg-four
|
||||
|
||||
|
||||
-N kubernetes-flannel-wg-six
|
||||
-A kubernetes-flannel-wg-six -j RETURN
|
||||
|
||||
-A INPUT -p udp --dport 51821 -m comment --comment "Flannel Wiregaurd IPv6. All cluster hosts" -j kubernetes-flannel-wg-six #}
|
||||
|
||||
|
||||
{% if data.firewall_rules | length | int > 0 -%}
|
||||
{% for rule in data.firewall_rules -%}
|
||||
{{ rule }}
|
||||
{% endfor -%}
|
||||
{% endif -%}
|
||||
|
||||
{#- #-I INPUT -s 192.168.1.0/24 -p tcp -m multiport --dports 6443 -j ACCEPT
|
||||
#-I INPUT -s 192.168.1.0/24 -p tcp -m multiport --dports 179 -j ACCEPT
|
||||
#-I INPUT -s 192.168.1.0/24 -p tcp -m multiport --dports 10250 -j ACCEPT
|
||||
|
||||
#-I INPUT -s 192.168.1.0/24 -p udp -m multiport --dports 4789 -j ACCEPT
|
||||
#-I INPUT -s 192.168.1.0/24 -p tcp -m multiport --dports 2379 -j ACCEPT
|
||||
#-I INPUT -s 192.168.1.0/24 -p tcp -m multiport --dports 2380 -j ACCEPT
|
||||
|
||||
|
||||
-I INPUT -p tcp -m multiport --dports 6443 -j ACCEPT
|
||||
-I INPUT -p tcp -m multiport --dports 179 -j ACCEPT
|
||||
-I INPUT -p tcp -m multiport --dports 10250 -j ACCEPT
|
||||
|
||||
-I INPUT -p udp -m multiport --dports 4789 -j ACCEPT
|
||||
-I INPUT -p tcp -m multiport --dports 2379 -j ACCEPT
|
||||
-I INPUT -p tcp -m multiport --dports 2380 -j ACCEPT #}
|
||||
|
||||
COMMIT
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
{# iptables -I kubernetes-api -s nww-au1.networkedweb.com -j ACCEPT #}
|
@ -1,29 +0,0 @@
|
||||
#
|
||||
# K3s Configuration for running Kubernetes
|
||||
#
|
||||
# Managed By ansible/role/nfc_kubernetes
|
||||
#
|
||||
# Dont edit this file directly as it will be overwritten.
|
||||
#
|
||||
|
||||
flannel-backend: none
|
||||
cluster-cidr: "{{ KubernetesPodSubnet }}"
|
||||
cluster-init: true
|
||||
{% if not Kubernetes_Prime | default(false) | bool -%}server: https://{{ hostvars[kubernetes_config.cluster.prime.name].ansible_host }}:6443{% endif %}
|
||||
service-cidr: "{{ KubernetesServiceSubnet }}"
|
||||
disable-network-policy: true
|
||||
disable:
|
||||
- traefik
|
||||
kube-apiserver-arg:
|
||||
- audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log
|
||||
- audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml
|
||||
# - admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml
|
||||
{% if kubernetes_oidc.enabled | default(false) | bool -%}
|
||||
- oidc-issuer-url={{ kubernetes_oidc.issuer_url }}
|
||||
- oidc-client-id={{ kubernetes_oidc.client_id }}
|
||||
- oidc-username-claim={{ kubernetes_oidc.username_claim }}
|
||||
{% if kubernetes_oidc.oidc_username_prefix | default('') != '' -%} - oidc-username-prefix={{ kubernetes_oidc.oidc_username_prefix }}{% endif %}
|
||||
- oidc-groups-claim={{ kubernetes_oidc.groups_claim }}
|
||||
{% if kubernetes_oidc.groups_prefix | default('') != '' %} - oidc-groups-prefix={{ kubernetes_oidc.groups_prefix }}{% endif %}
|
||||
{% endif %}
|
||||
{% if host_external_ip | default('') %} node-external-ip: "{{ host_external_ip }}"{% endif %}
|
Submodule website-template updated: 992b54805b...f5a82d3604
Reference in New Issue
Block a user