chore: migrated from internal repo

!1 nofusscomputing/infrastructure/config!28
This commit is contained in:
2023-10-27 21:47:03 +09:30
parent e45190fab4
commit 93b63308ef
30 changed files with 7326 additions and 0 deletions

4983
templates/calico.yaml.j2 Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,10 @@
#
# {{ item.name }} Container Registry Configuration
# Managed by: Ansible
#
server = "{{ item.server }}"
[host."{{ item.url }}"]
capabilities = {{ item.capabilities | from_yaml_all }}
skip_verify = {{ item.skip_verify | default(false) | lower }}

View File

@ -0,0 +1,20 @@
{
"cniVersion": "0.3.1",
"name": "crio",
"type": "bridge",
"bridge": "cni0",
"isGateway": true,
"ipMasq": true,
"hairpinMode": true,
"ipam": {
"type": "host-local",
"routes": [
{ "dst": "0.0.0.0/0" },
{ "dst": "1100:200::1/24" }
],
"ranges": [
[{ "subnet": "{{ KubernetesPodSubnet }}" }],
[{ "subnet": "1100:200::/24" }]
]
}
}

View File

@ -0,0 +1,250 @@
disabled_plugins = []
imports = []
oom_score = 0
plugin_dir = ""
required_plugins = []
root = "/var/lib/containerd"
state = "/run/containerd"
temp = ""
version = 2
[cgroup]
path = ""
[debug]
address = ""
format = ""
gid = 0
level = ""
uid = 0
[grpc]
address = "/run/containerd/containerd.sock"
gid = 0
max_recv_message_size = 16777216
max_send_message_size = 16777216
tcp_address = ""
tcp_tls_ca = ""
tcp_tls_cert = ""
tcp_tls_key = ""
uid = 0
[metrics]
address = ""
grpc_histogram = false
[plugins]
[plugins."io.containerd.gc.v1.scheduler"]
deletion_threshold = 0
mutation_threshold = 100
pause_threshold = 0.02
schedule_delay = "0s"
startup_delay = "100ms"
[plugins."io.containerd.grpc.v1.cri"]
device_ownership_from_security_context = false
disable_apparmor = false
disable_cgroup = false
disable_hugetlb_controller = true
disable_proc_mount = false
disable_tcp_service = true
enable_selinux = false
enable_tls_streaming = false
enable_unprivileged_icmp = false
enable_unprivileged_ports = false
ignore_image_defined_volumes = false
max_concurrent_downloads = 3
max_container_log_line_size = 16384
netns_mounts_under_state_dir = false
restrict_oom_score_adj = false
sandbox_image = "registry.k8s.io/pause:3.6"
selinux_category_range = 1024
stats_collect_period = 10
stream_idle_timeout = "4h0m0s"
stream_server_address = "127.0.0.1"
stream_server_port = "0"
systemd_cgroup = false
tolerate_missing_hugetlb_controller = true
unset_seccomp_profile = ""
[plugins."io.containerd.grpc.v1.cri".cni]
bin_dir = "/opt/cni/bin"
conf_dir = "/etc/cni/net.d"
conf_template = ""
ip_pref = ""
max_conf_num = 1
[plugins."io.containerd.grpc.v1.cri".containerd]
default_runtime_name = "runc"
disable_snapshot_annotations = true
discard_unpacked_layers = false
ignore_rdt_not_enabled_errors = false
no_pivot = false
snapshotter = "overlayfs"
[plugins."io.containerd.grpc.v1.cri".containerd.default_runtime]
base_runtime_spec = ""
cni_conf_dir = ""
cni_max_conf_num = 0
container_annotations = []
pod_annotations = []
privileged_without_host_devices = false
runtime_engine = ""
runtime_path = ""
runtime_root = ""
runtime_type = ""
[plugins."io.containerd.grpc.v1.cri".containerd.default_runtime.options]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
base_runtime_spec = ""
cni_conf_dir = ""
cni_max_conf_num = 0
container_annotations = []
pod_annotations = []
privileged_without_host_devices = false
runtime_engine = ""
runtime_path = ""
runtime_root = ""
runtime_type = "io.containerd.runc.v2"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
BinaryName = ""
CriuImagePath = ""
CriuPath = ""
CriuWorkPath = ""
IoGid = 0
IoUid = 0
NoNewKeyring = false
NoPivotRoot = false
Root = ""
ShimCgroup = ""
SystemdCgroup = true
[plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime]
base_runtime_spec = ""
cni_conf_dir = ""
cni_max_conf_num = 0
container_annotations = []
pod_annotations = []
privileged_without_host_devices = false
runtime_engine = ""
runtime_path = ""
runtime_root = ""
runtime_type = ""
[plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime.options]
[plugins."io.containerd.grpc.v1.cri".image_decryption]
key_model = "node"
[plugins."io.containerd.grpc.v1.cri".registry]
config_path = "/etc/containerd/certs.d"
[plugins."io.containerd.grpc.v1.cri".registry.auths]
[plugins."io.containerd.grpc.v1.cri".registry.configs]
[plugins."io.containerd.grpc.v1.cri".registry.headers]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors]
[plugins."io.containerd.grpc.v1.cri".x509_key_pair_streaming]
tls_cert_file = ""
tls_key_file = ""
[plugins."io.containerd.internal.v1.opt"]
path = "/opt/containerd"
[plugins."io.containerd.internal.v1.restart"]
interval = "10s"
[plugins."io.containerd.internal.v1.tracing"]
sampling_ratio = 1.0
service_name = "containerd"
[plugins."io.containerd.metadata.v1.bolt"]
content_sharing_policy = "shared"
[plugins."io.containerd.monitor.v1.cgroups"]
no_prometheus = false
[plugins."io.containerd.runtime.v1.linux"]
no_shim = false
runtime = "runc"
runtime_root = ""
shim = "containerd-shim"
shim_debug = false
[plugins."io.containerd.runtime.v2.task"]
platforms = ["linux/amd64"]
sched_core = false
[plugins."io.containerd.service.v1.diff-service"]
default = ["walking"]
[plugins."io.containerd.service.v1.tasks-service"]
rdt_config_file = ""
[plugins."io.containerd.snapshotter.v1.aufs"]
root_path = ""
[plugins."io.containerd.snapshotter.v1.btrfs"]
root_path = ""
[plugins."io.containerd.snapshotter.v1.devmapper"]
async_remove = false
base_image_size = ""
discard_blocks = false
fs_options = ""
fs_type = ""
pool_name = ""
root_path = ""
[plugins."io.containerd.snapshotter.v1.native"]
root_path = ""
[plugins."io.containerd.snapshotter.v1.overlayfs"]
root_path = ""
upperdir_label = false
[plugins."io.containerd.snapshotter.v1.zfs"]
root_path = ""
[plugins."io.containerd.tracing.processor.v1.otlp"]
endpoint = ""
insecure = false
protocol = ""
[proxy_plugins]
[stream_processors]
[stream_processors."io.containerd.ocicrypt.decoder.v1.tar"]
accepts = ["application/vnd.oci.image.layer.v1.tar+encrypted"]
args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"]
env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"]
path = "ctd-decoder"
returns = "application/vnd.oci.image.layer.v1.tar"
[stream_processors."io.containerd.ocicrypt.decoder.v1.tar.gzip"]
accepts = ["application/vnd.oci.image.layer.v1.tar+gzip+encrypted"]
args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"]
env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"]
path = "ctd-decoder"
returns = "application/vnd.oci.image.layer.v1.tar+gzip"
[timeouts]
"io.containerd.timeout.bolt.open" = "0s"
"io.containerd.timeout.shim.cleanup" = "5s"
"io.containerd.timeout.shim.load" = "5s"
"io.containerd.timeout.shim.shutdown" = "3s"
"io.containerd.timeout.task.state" = "2s"
[ttrpc]
address = ""
gid = 0
uid = 0

View File

@ -0,0 +1,2 @@
overlay
br_netfilter

View File

@ -0,0 +1,4 @@
#!/bin/bash
/sbin/iptables-restore < /etc/iptables-kubernetes.rules;
/sbin/ip6tables-restore < /etc/ip6tables-kubernetes.rules;

View File

@ -0,0 +1,23 @@
#
# IP Tables Firewall Rules for Kubernetes
#
# Managed By ansible/role/nfc_kubernetes
#
# Dont edit this file directly as it will be overwritten. To grant a host API access
# edit the cluster config, adding the hostname/ip to path kubernetes_config.cluster.access
#
*filter
iptables -N sshd
iptables -A sshd -j RETURN
iptables -A INPUT -p tcp --dport 22 -m comment --comment "OpenSSH Server" -j sshd
iptables -I sshd -m comment --comment "allow All Hosts" -j ACCEPT
COMMIT

View File

@ -0,0 +1,263 @@
#
# IP Tables Firewall Rules for Kubernetes
#
# Managed By ansible/role/nfc_kubernetes
#
# Dont edit this file directly as it will be overwritten. To grant a host API access
# edit the cluster config, adding the hostname/ip to path kubernetes_config.cluster.access
#
# This file is periodicly called by cron
#
{% set data = namespace(firewall_rules=[]) -%}
{%- if ansible_host is regex('^[a-z]') and ':' not in ansible_host -%} {#- Convert DNs name to IP Address -#}
{%- if ipv6 | default(false) -%}
{%- set ansible_host = query('community.dns.lookup', ansible_host + '.', type='AAAA' ) -%}
{%- else -%}
{%- set ansible_host = query('community.dns.lookup', ansible_host + '.', type='A' ) -%}
{%- endif -%}
{%- if ansible_host | list | length > 0 -%} {#- Convert dns lookup to list, and select the first item -#}
{%- set ansible_host = ansible_host | from_yaml_all | list -%}
{%- set ansible_host = ansible_host[0] -%}
{%- endif -%}
{%- endif -%}
{%- for kubernetes_host in groups[kubernetes_type] -%}
{%- if kubernetes_host is regex('^[a-z]') and ':' not in kubernetes_host -%} {#- Convert DNs name to IP Address -#}
{%- if ipv6 | default(false) -%}
{%- set kubernetes_host = query('community.dns.lookup', kubernetes_host + '.', type='AAAA' ) -%}
{%- else -%}
{%- set kubernetes_host = query('community.dns.lookup', kubernetes_host + '.', type='A' ) -%}
{%- endif -%}
{%- if kubernetes_host | list | length > 0 -%} {#- Convert dns lookup to list, and select the first item -#}
{%- set kubernetes_host = kubernetes_host | from_yaml_all | list -%}
{%- set kubernetes_host = kubernetes_host[0] -%}
{%- endif -%}
{%- endif -%}
{%- for master_host in groups['kubernetes_master'] -%}
{%- if master_host is regex('^[a-z]') and ':' not in master_host -%} {#- Convert DNs name to IP Address -#}
{%- if ipv6 | default(false) -%}
{%- set master_host = query('community.dns.lookup', master_host + '.', type='AAAA' ) -%}
{%- else -%}
{%- set master_host = query('community.dns.lookup', master_host + '.', type='A' ) -%}
{%- endif -%}
{%- if master_host | list | length > 0 -%} {#- Convert dns lookup to list, and select the first item -#}
{%- set master_host = master_host | from_yaml_all | list -%}
{%- set master_host = master_host[0] -%}
{%- endif -%}
{%- endif -%}
{%- if Kubernetes_Master | default(false) | bool -%}
{%- if
master_host == kubernetes_host
and
master_host != ansible_host
and
(
(
ipv6 | default(false)
and
':' in master_host
)
or
(
not ipv6 | default(false)
and
'.' in master_host
)
)
-%}
{#- master hosts only -#}
{%- set data.firewall_rules = data.firewall_rules + ['-I kubernetes-embedded-etcd -s ' + master_host + ' -j ACCEPT'] -%}
{# {%- set data.firewall_rules = data.firewall_rules + ['-I INPUT -s ' + master_host + ' -p tcp -m multiport --dports 2380 -j ACCEPT'] -%} #}
{%- set data.firewall_rules = data.firewall_rules + ['-I kubernetes-api -s ' + master_host + ' -j ACCEPT'] -%}
{%- endif -%}
{%- endif -%}
{%- endfor -%}
{%- if
ansible_host != kubernetes_host
and
(
(
ipv6 | default(false)
and
':' in kubernetes_host
)
or
(
not ipv6 | default(false)
and
'.' in kubernetes_host
)
)
-%}
{#- All cluster Hosts -#}
{%- set data.firewall_rules = data.firewall_rules + ['-I kubelet-metrics -s ' + kubernetes_host + ' -j ACCEPT'] -%}
{%- endif -%}
{%- endfor -%}
{%- for api_client in kubernetes_config.cluster.access | default([]) -%}
{%- if api_client is regex('^[a-z]') and ':' not in api_client -%} {#- Convert DNs name to IP Address -#}
{%- set api_client_dns_name = api_client -%}
{%- if ipv6 | default(false) -%}
{%- set api_client = query('community.dns.lookup', api_client + '.', type='AAAA' ) -%}
{%- else -%}
{%- set api_client = query('community.dns.lookup', api_client + '.', type='A' ) -%}
{%- endif -%}
{%- if api_client | list | length > 0 -%} {#- Convert dns lookup to list, and select the first item -#}
{%- set api_client = api_client | from_yaml_all | list -%}
{%- set api_client = api_client[0] -%}
{%- endif -%}
{%- endif -%}
{%- if
api_client != ansible_host
and
(
(
ipv6 | default(false)
and
':' in api_client
)
or
(
not ipv6 | default(false)
and
'.' in api_client
)
)
-%}
{#- Hosts allowed to access API -#}
{%- set data.firewall_rules = data.firewall_rules + ['-I kubernetes-api -s ' + api_client + ' -m comment --comment "host: ' + api_client_dns_name | default(api_client) + '" -j ACCEPT'] -%}
{%- endif -%}
{%- endfor %}
*filter
{# -N kubernetes-embedded-etcd
-A kubernetes-embedded-etcd -j RETURN
-A INPUT -p tcp -m multiport --dports 2379,2380 -m comment --comment "etcd. Servers only" -j kubernetes-embedded-etcd
-N kubernetes-api
-A kubernetes-api -j RETURN
-A INPUT -p tcp --dport 6443 -m comment --comment "Kubernetes API access. All Cluster hosts and end users" -j kubernetes-api
-N kubernetes-flannel-vxlan
-A kubernetes-flannel-vxlan -j RETURN
-A INPUT -p udp --dport 8472 -m comment --comment "Flannel. All cluster hosts" -j kubernetes-flannel-vxlan
-N kubernetes-kubelet-metrics
-A kubernetes-kubelet-metrics -j RETURN
-A INPUT -p tcp --dport 10250 -m comment --comment "Kubernetes Metrics. All cluster hosts" -j kubernetes-kubelet-metrics
-N kubernetes-flannel-wg-four
-A kubernetes-flannel-wg-four -j RETURN
-A INPUT -p udp --dport 51820 -m comment --comment "Flannel Wiregaurd IPv4. All cluster hosts" -j kubernetes-flannel-wg-four
-N kubernetes-flannel-wg-six
-A kubernetes-flannel-wg-six -j RETURN
-A INPUT -p udp --dport 51821 -m comment --comment "Flannel Wiregaurd IPv6. All cluster hosts" -j kubernetes-flannel-wg-six #}
{% if data.firewall_rules | length | int > 0 -%}
{% for rule in data.firewall_rules -%}
{{ rule }}
{% endfor -%}
{% endif -%}
{#- #-I INPUT -s 192.168.1.0/24 -p tcp -m multiport --dports 6443 -j ACCEPT
#-I INPUT -s 192.168.1.0/24 -p tcp -m multiport --dports 179 -j ACCEPT
#-I INPUT -s 192.168.1.0/24 -p tcp -m multiport --dports 10250 -j ACCEPT
#-I INPUT -s 192.168.1.0/24 -p udp -m multiport --dports 4789 -j ACCEPT
#-I INPUT -s 192.168.1.0/24 -p tcp -m multiport --dports 2379 -j ACCEPT
#-I INPUT -s 192.168.1.0/24 -p tcp -m multiport --dports 2380 -j ACCEPT
-I INPUT -p tcp -m multiport --dports 6443 -j ACCEPT
-I INPUT -p tcp -m multiport --dports 179 -j ACCEPT
-I INPUT -p tcp -m multiport --dports 10250 -j ACCEPT
-I INPUT -p udp -m multiport --dports 4789 -j ACCEPT
-I INPUT -p tcp -m multiport --dports 2379 -j ACCEPT
-I INPUT -p tcp -m multiport --dports 2380 -j ACCEPT #}
COMMIT
{# iptables -I kubernetes-api -s nww-au1.networkedweb.com -j ACCEPT #}

View File

@ -0,0 +1,29 @@
#
# K3s Configuration for running Kubernetes
#
# Managed By ansible/role/nfc_kubernetes
#
# Dont edit this file directly as it will be overwritten.
#
flannel-backend: none
cluster-cidr: "{{ KubernetesPodSubnet }}"
cluster-init: true
{% if not Kubernetes_Prime | default(false) | bool -%}server: https://{{ hostvars[kubernetes_config.cluster.prime.name].ansible_host }}:6443{% endif %}
service-cidr: "{{ KubernetesServiceSubnet }}"
disable-network-policy: true
disable:
- traefik
kube-apiserver-arg:
- audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log
- audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml
# - admission-control-config-file=/var/lib/rancher/k3s/server/psa.yaml
{% if kubernetes_oidc.enabled | default(false) | bool -%}
- oidc-issuer-url={{ kubernetes_oidc.issuer_url }}
- oidc-client-id={{ kubernetes_oidc.client_id }}
- oidc-username-claim={{ kubernetes_oidc.username_claim }}
{% if kubernetes_oidc.oidc_username_prefix | default('') != '' -%} - oidc-username-prefix={{ kubernetes_oidc.oidc_username_prefix }}{% endif %}
- oidc-groups-claim={{ kubernetes_oidc.groups_claim }}
{% if kubernetes_oidc.groups_prefix | default('') != '' %} - oidc-groups-prefix={{ kubernetes_oidc.groups_prefix }}{% endif %}
{% endif %}
{% if host_external_ip | default('') %} node-external-ip: "{{ host_external_ip }}"{% endif %}

View File

@ -0,0 +1,19 @@
#
# Private Container Registries for Kubernetes
#
# Managed By ansible/role/nfc_kubernetes
#
# Dont edit this file directly as it will be overwritten.
#
{% set registries = kubernetes_private_container_registry | default([]) -%}
{% if registries | length > 0 %}mirrors:
{% for entry in registries %}
{{ entry.name }}:
endpoint:
- "{{ entry.url }}"
{%- endfor %}
{% endif %}

View File

@ -0,0 +1,240 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
authorization/description: >-
provide full access to everything.
Using this Cluster role should be avoided with additional cluster roles
created to meet the additional authorization requirements.
authorization/target: cluster, namespace
labels:
app.kubernetes.io/part-of: nfc_kubernetes
app.kubernetes.io/managed-by: ansible
app.kubernetes.io/version: ''
name: authorization:common:full
rules:
- apiGroups:
- "*"
resources:
- "*"
verbs:
- "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
authorization/description: |-
Provide Access for reading ALL non-secret items, this includes reading pod and node metrics.
This role is designed for users who require access to audit/view/diagnose at either the
cluster level `ClusterRoleBinding` or namespace level `RoleBinding`
authorization/target: namespace
labels:
app.kubernetes.io/part-of: nfc_kubernetes
app.kubernetes.io/managed-by: ansible
app.kubernetes.io/version: ''
name: authorization:common:namespace:read
rules:
- apiGroups: # Get Metrics
- metrics.k8s.io
resources:
- pods
- nodes
verbs:
- get
- list
- watch
- apiGroups: # Read-only access to resrouces
- "*"
resources:
- awx
- cronjobs
- daemonset
- deployments
- helmcharts
- helmchartconfigs
- ingress
- jobs
- namespaces
- pods
- pv
- pvc
- serviceaccount
- services
- statefuleset
- storageclasses
- configmap
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
authorization/description: |-
Provide access for reading ALL items.
This role is designed for users who own and is designed to be
bound to a namespace using a `RoleBinding`
authorization/target: namespace
labels:
app.kubernetes.io/part-of: nfc_kubernetes
app.kubernetes.io/managed-by: ansible
app.kubernetes.io/version: ''
name: authorization:common:namespace:owner
rules:
- apiGroups: # Read-only access to resrouces
- "*"
resources:
- awx
- cronjobs
- daemonset
- deployments
- helmcharts
- helmchartconfigs
- ingress
- jobs
- pods
- pvc
- roles
- rolebindings
- secrets
- serviceaccount
- services
- statefuleset
- storageclasses
- configmap
verbs:
- create
- get
- list
- watch
- delete
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: authorization:common:cluster:view-metrics
rules:
- apiGroups:
- metrics.k8s.io
- "" # Without this metrics don't work. this also grants access to view nodes
resources:
- pods
- nodes
verbs:
- get
- list
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: authorization:read
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: authorization:common:namespace:read
subjects:
- kind: Group
name: administrators
- kind: Group
name: technician
- kind: Group
name: NodeRED
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: authorization:view-metrics
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: authorization:common:cluster:view-metrics
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: Group
name: administrators
- kind: Group
name: technician
# ---
# kind: ClusterRoleBinding
# apiVersion: rbac.authorization.k8s.io/v1
# metadata:
# name: authorization:full
# roleRef:
# apiGroup: rbac.authorization.k8s.io
# kind: ClusterRole
# name: authorization:full
# subjects:
# - kind: Group
# name: administrators
# - kind: Group
# name: technician
###################################################################################################################
# Namespace role binding
# ---
# apiVersion: rbac.authorization.k8s.io/v1
# kind: RoleBinding
# metadata:
# # labels:
# name: authorization:full
# namespace: development
# roleRef:
# apiGroup: rbac.authorization.k8s.io
# kind: Role
# name: authorization:full
# subjects:
# - kind: Group
# name: administrators
# namespace: development
# - kind: Group
# name: technician
# - kind: Group
# name: NodeRED
# ---
# - apiVersion: rbac.authorization.k8s.io/v1
# kind: Role
# metadata:
# labels:
# app.kubernetes.io/description: |-
# provide full access to the testing namespace
# name: authorization:full
# namespace: development
# rules:
# - apiGroups:
# - ""
# resources:
# - ""
# verbs:
# - add
# - delete
# - edit
# - get
# - list
# - watch

View File

@ -0,0 +1,14 @@
/var/lib/docker/containers/*/*.log {
daily
missingok
rotate 7
compress
delaycompress
notifempty
postrotate
docker restart $(docker ps -q)
endscript
}