9 Commits

30 changed files with 1001 additions and 517 deletions

View File

View File

@ -11,13 +11,6 @@ include:
- .gitlab-ci_common.yaml
- template/automagic.gitlab-ci.yaml
Documentation.Lint:
rules:
- when: never
Documentation.Build:
rules:
- when: never
Website.Lint:
extends: .Lint_Markdown_Docs
@ -48,7 +41,7 @@ Assemble.Website.Prepare:
#- ls -laR $CI_PROJECT_DIR
# remove ops placeholder index.html
# Project: Operations
- echo "[DEBUG] fetch operations docs"
- 'HTTP_STATUS_FILE=$(curl --location -o /dev/null --silent --head --write-out "%{http_code}" --header "PRIVATE-TOKEN: ${GIT_COMMIT_TOKEN}" "https://gitlab.com/api/v4/projects/32419575/jobs/artifacts/development/download?job=Documentation%2EBuild")'
- echo "[DEBUG] HTTP_STATUS_FILE=$HTTP_STATUS_FILE"
@ -64,7 +57,7 @@ Assemble.Website.Prepare:
rm -Rf "$CI_PROJECT_DIR/artifacts/build/Documentation.Build";
fi
# Project: Gitlab-CI
- echo "[DEBUG] fetch gitlab-ci project docs"
- 'HTTP_STATUS_FILE=$(curl --location -o /dev/null --silent --head --write-out "%{http_code}" --header "PRIVATE-TOKEN: ${GIT_COMMIT_TOKEN}" "https://gitlab.com/api/v4/projects/28543717/jobs/artifacts/development/download?job=Documentation%2EBuild")'
- echo "[DEBUG] HTTP_STATUS_FILE=$HTTP_STATUS_FILE"
@ -80,7 +73,7 @@ Assemble.Website.Prepare:
rm -Rf "$CI_PROJECT_DIR/artifacts/build/Documentation.Build";
fi
# Project: ansible role, git_configuration
- echo "[DEBUG] fetch git_configuration project docs"
- 'HTTP_STATUS_FILE=$(curl --location -o /dev/null --silent --head --write-out "%{http_code}" --header "PRIVATE-TOKEN: ${GIT_COMMIT_TOKEN}" "https://gitlab.com/api/v4/projects/45705596/jobs/artifacts/development/download?job=Documentation%2EBuild")'
- echo "[DEBUG] HTTP_STATUS_FILE=$HTTP_STATUS_FILE"
@ -96,7 +89,7 @@ Assemble.Website.Prepare:
rm -Rf "$CI_PROJECT_DIR/artifacts/build/Documentation.Build";
fi
# Project: Docker Mail Server
- echo "[DEBUG] fetch docker-mail project docs"
- 'HTTP_STATUS_FILE=$(curl --location -o /dev/null --silent --head --write-out "%{http_code}" --header "PRIVATE-TOKEN: ${GIT_COMMIT_TOKEN}" "https://gitlab.com/api/v4/projects/33611657/jobs/artifacts/development/download?job=Documentation%2EBuild")'
- echo "[DEBUG] HTTP_STATUS_FILE=$HTTP_STATUS_FILE"
@ -112,7 +105,7 @@ Assemble.Website.Prepare:
rm -Rf "$CI_PROJECT_DIR/artifacts/build/Documentation.Build";
fi
# Project: Ansible Execution Environment
- echo "[DEBUG] fetch execution_environment project docs"
- 'HTTP_STATUS_FILE=$(curl --location -o /dev/null --silent --head --write-out "%{http_code}" --header "PRIVATE-TOKEN: ${GIT_COMMIT_TOKEN}" "https://gitlab.com/api/v4/projects/45741845/jobs/artifacts/development/download?job=Documentation%2EBuild")'
- echo "[DEBUG] HTTP_STATUS_FILE=$HTTP_STATUS_FILE"
@ -128,142 +121,6 @@ Assemble.Website.Prepare:
rm -Rf "$CI_PROJECT_DIR/artifacts/build/Documentation.Build";
fi
# Project: NodeRED LDAP Self Service
- echo "[DEBUG] fetch nodered_ldap_self_service project docs"
- 'HTTP_STATUS_FILE=$(curl --location -o /dev/null --silent --head --write-out "%{http_code}" --header "PRIVATE-TOKEN: ${GIT_COMMIT_TOKEN}" "https://gitlab.com/api/v4/projects/48321671/jobs/artifacts/development/download?job=Documentation%2EBuild")'
- echo "[DEBUG] HTTP_STATUS_FILE=$HTTP_STATUS_FILE"
- |
if [ "0$HTTP_STATUS_FILE" != "0200" ]; then
echo "[ERROR] Unable to fetch Job Artifacts due to HTTP status of $HTTP_STATUS_FILE";
# exit 1;
else
curl --location --output nodered_ldap_self_service-artifacts.zip --header "PRIVATE-TOKEN: ${GIT_COMMIT_TOKEN}" "https://gitlab.com/api/v4/projects/48321671/jobs/artifacts/development/download?job=Documentation%2EBuild";
unzip nodered_ldap_self_service-artifacts.zip;
rm -Rf "$CI_PROJECT_DIR/artifacts/$CI_JOB_STAGE/$CI_JOB_NAME/build/projects/ldap_self_service";
cp -rvf "$CI_PROJECT_DIR/artifacts/build/Documentation.Build/build/projects/ldap_self_service" "$CI_PROJECT_DIR/artifacts/$CI_JOB_STAGE/$CI_JOB_NAME/build/projects/ldap_self_service/";
rm -Rf "$CI_PROJECT_DIR/artifacts/build/Documentation.Build";
fi
# Project: docker GLPI
- echo "[DEBUG] fetch docker-glpi project docs"
- 'HTTP_STATUS_FILE=$(curl --location -o /dev/null --silent --head --write-out "%{http_code}" --header "PRIVATE-TOKEN: ${GIT_COMMIT_TOKEN}" "https://gitlab.com/api/v4/projects/12928828/jobs/artifacts/development/download?job=Documentation%2EBuild")'
- echo "[DEBUG] HTTP_STATUS_FILE=$HTTP_STATUS_FILE"
- |
if [ "0$HTTP_STATUS_FILE" != "0200" ]; then
echo "[ERROR] Unable to fetch Job Artifacts due to HTTP status of $HTTP_STATUS_FILE";
# exit 1;
else
curl --location --output docker-glpi-artifacts.zip --header "PRIVATE-TOKEN: ${GIT_COMMIT_TOKEN}" "https://gitlab.com/api/v4/projects/12928828/jobs/artifacts/development/download?job=Documentation%2EBuild";
unzip docker-glpi-artifacts.zip;
rm -Rf "$CI_PROJECT_DIR/artifacts/$CI_JOB_STAGE/$CI_JOB_NAME/build/projects/glpi";
cp -rvf "$CI_PROJECT_DIR/artifacts/build/Documentation.Build/build/projects/glpi" "$CI_PROJECT_DIR/artifacts/$CI_JOB_STAGE/$CI_JOB_NAME/build/projects/glpi/";
rm -Rf "$CI_PROJECT_DIR/artifacts/build/Documentation.Build";
fi
# Project: kubernetes monitoring helm chart
- echo "[DEBUG] fetch kubernetes_monitoring project docs"
- 'HTTP_STATUS_FILE=$(curl --location -o /dev/null --silent --head --write-out "%{http_code}" --header "PRIVATE-TOKEN: ${GIT_COMMIT_TOKEN}" "https://gitlab.com/api/v4/projects/50510268/jobs/artifacts/development/download?job=Documentation%2EBuild")'
- echo "[DEBUG] HTTP_STATUS_FILE=$HTTP_STATUS_FILE"
- |
if [ "0$HTTP_STATUS_FILE" != "0200" ]; then
echo "[ERROR] Unable to fetch Job Artifacts due to HTTP status of $HTTP_STATUS_FILE";
# exit 1;
else
curl --location --output kubernetes_monitoring.zip --header "PRIVATE-TOKEN: ${GIT_COMMIT_TOKEN}" "https://gitlab.com/api/v4/projects/50510268/jobs/artifacts/development/download?job=Documentation%2EBuild";
unzip kubernetes_monitoring.zip;
rm -Rf "$CI_PROJECT_DIR/artifacts/$CI_JOB_STAGE/$CI_JOB_NAME/build/projects/kubernetes_monitoring";
cp -rvf "$CI_PROJECT_DIR/artifacts/build/Documentation.Build/build/projects/kubernetes_monitoring" "$CI_PROJECT_DIR/artifacts/$CI_JOB_STAGE/$CI_JOB_NAME/build/projects/kubernetes_monitoring/";
rm -Rf "$CI_PROJECT_DIR/artifacts/build/Documentation.Build";
fi
# Project: Ansible Public Playbooks
- echo "[DEBUG] fetch Ansible Public Playbooks project docs"
- 'HTTP_STATUS_FILE=$(curl --location -o /dev/null --silent --head --write-out "%{http_code}" --header "PRIVATE-TOKEN: ${GIT_COMMIT_TOKEN}" "https://gitlab.com/api/v4/projects/46364551/jobs/artifacts/development/download?job=Documentation%2EBuild")'
- echo "[DEBUG] HTTP_STATUS_FILE=$HTTP_STATUS_FILE"
- |
if [ "0$HTTP_STATUS_FILE" != "0200" ]; then
echo "[ERROR] Unable to fetch Job Artifacts due to HTTP status of $HTTP_STATUS_FILE";
# exit 1;
else
curl --location --output ansible_playbooks.zip --header "PRIVATE-TOKEN: ${GIT_COMMIT_TOKEN}" "https://gitlab.com/api/v4/projects/46364551/jobs/artifacts/development/download?job=Documentation%2EBuild";
unzip ansible_playbooks.zip;
rm -Rf "$CI_PROJECT_DIR/artifacts/$CI_JOB_STAGE/$CI_JOB_NAME/build/projects/ansible/playbooks";
cp -rvf "$CI_PROJECT_DIR/artifacts/build/Documentation.Build/build/projects/ansible/playbooks" "$CI_PROJECT_DIR/artifacts/$CI_JOB_STAGE/$CI_JOB_NAME/build/projects/ansible/playbooks/";
rm -Rf "$CI_PROJECT_DIR/artifacts/build/Documentation.Build";
fi
# Project: Ansible role common
- echo "[DEBUG] fetch Ansible common project docs"
- 'HTTP_STATUS_FILE=$(curl --location -o /dev/null --silent --head --write-out "%{http_code}" --header "PRIVATE-TOKEN: ${GIT_COMMIT_TOKEN}" "https://gitlab.com/api/v4/projects/52226103/jobs/artifacts/development/download?job=Documentation%2EBuild")'
- echo "[DEBUG] HTTP_STATUS_FILE=$HTTP_STATUS_FILE"
- |
if [ "0$HTTP_STATUS_FILE" != "0200" ]; then
echo "[ERROR] Unable to fetch Job Artifacts due to HTTP status of $HTTP_STATUS_FILE";
# exit 1;
else
curl --location --output nfc_common.zip --header "PRIVATE-TOKEN: ${GIT_COMMIT_TOKEN}" "https://gitlab.com/api/v4/projects/52226103/jobs/artifacts/development/download?job=Documentation%2EBuild";
unzip nfc_common.zip;
rm -Rf "$CI_PROJECT_DIR/artifacts/$CI_JOB_STAGE/$CI_JOB_NAME/build/projects/ansible/roles/common";
cp -rvf "$CI_PROJECT_DIR/artifacts/build/Documentation.Build/build/projects/ansible/roles/common" "$CI_PROJECT_DIR/artifacts/$CI_JOB_STAGE/$CI_JOB_NAME/build/projects/ansible/roles/common/";
rm -Rf "$CI_PROJECT_DIR/artifacts/build/Documentation.Build";
fi
# Project: Ansible role firewall
- echo "[DEBUG] fetch Ansible firewall project docs"
- 'HTTP_STATUS_FILE=$(curl --location -o /dev/null --silent --head --write-out "%{http_code}" --header "PRIVATE-TOKEN: ${GIT_COMMIT_TOKEN}" "https://gitlab.com/api/v4/projects/51640016/jobs/artifacts/development/download?job=Documentation%2EBuild")'
- echo "[DEBUG] HTTP_STATUS_FILE=$HTTP_STATUS_FILE"
- |
if [ "0$HTTP_STATUS_FILE" != "0200" ]; then
echo "[ERROR] Unable to fetch Job Artifacts due to HTTP status of $HTTP_STATUS_FILE";
# exit 1;
else
curl --location --output nfc_firewall.zip --header "PRIVATE-TOKEN: ${GIT_COMMIT_TOKEN}" "https://gitlab.com/api/v4/projects/51640016/jobs/artifacts/development/download?job=Documentation%2EBuild";
unzip nfc_firewall.zip;
rm -Rf "$CI_PROJECT_DIR/artifacts/$CI_JOB_STAGE/$CI_JOB_NAME/build/projects/ansible/roles/firewall";
cp -rvf "$CI_PROJECT_DIR/artifacts/build/Documentation.Build/build/projects/ansible/roles/firewall" "$CI_PROJECT_DIR/artifacts/$CI_JOB_STAGE/$CI_JOB_NAME/build/projects/ansible/roles/firewall/";
rm -Rf "$CI_PROJECT_DIR/artifacts/build/Documentation.Build";
fi
# Project: Ansible role homeassistant
- echo "[DEBUG] fetch Ansible Public homeassistant project docs"
- 'HTTP_STATUS_FILE=$(curl --location -o /dev/null --silent --head --write-out "%{http_code}" --header "PRIVATE-TOKEN: ${GIT_COMMIT_TOKEN}" "https://gitlab.com/api/v4/projects/51020674/jobs/artifacts/development/download?job=Documentation%2EBuild")'
- echo "[DEBUG] HTTP_STATUS_FILE=$HTTP_STATUS_FILE"
- |
if [ "0$HTTP_STATUS_FILE" != "0200" ]; then
echo "[ERROR] Unable to fetch Job Artifacts due to HTTP status of $HTTP_STATUS_FILE";
# exit 1;
else
curl --location --output nfc_homeassistant.zip --header "PRIVATE-TOKEN: ${GIT_COMMIT_TOKEN}" "https://gitlab.com/api/v4/projects/51020674/jobs/artifacts/development/download?job=Documentation%2EBuild";
unzip nfc_homeassistant.zip;
rm -Rf "$CI_PROJECT_DIR/artifacts/$CI_JOB_STAGE/$CI_JOB_NAME/build/projects/ansible/roles/homeassistant";
cp -rvf "$CI_PROJECT_DIR/artifacts/build/Documentation.Build/build/projects/ansible/roles/homeassistant" "$CI_PROJECT_DIR/artifacts/$CI_JOB_STAGE/$CI_JOB_NAME/build/projects/ansible/roles/homeassistant/";
rm -Rf "$CI_PROJECT_DIR/artifacts/build/Documentation.Build";
fi
# Project: Ansible role kubernetes
- echo "[DEBUG] fetch Ansible kubernetes project docs"
- 'HTTP_STATUS_FILE=$(curl --location -o /dev/null --silent --head --write-out "%{http_code}" --header "PRIVATE-TOKEN: ${GIT_COMMIT_TOKEN}" "https://gitlab.com/api/v4/projects/51640029/jobs/artifacts/development/download?job=Documentation%2EBuild")'
- echo "[DEBUG] HTTP_STATUS_FILE=$HTTP_STATUS_FILE"
- |
if [ "0$HTTP_STATUS_FILE" != "0200" ]; then
echo "[ERROR] Unable to fetch Job Artifacts due to HTTP status of $HTTP_STATUS_FILE";
# exit 1;
else
curl --location --output nfc_kubernetes.zip --header "PRIVATE-TOKEN: ${GIT_COMMIT_TOKEN}" "https://gitlab.com/api/v4/projects/51640029/jobs/artifacts/development/download?job=Documentation%2EBuild";
unzip nfc_kubernetes.zip;
rm -Rf "$CI_PROJECT_DIR/artifacts/$CI_JOB_STAGE/$CI_JOB_NAME/build/projects/ansible/roles/kubernetes";
cp -rvf "$CI_PROJECT_DIR/artifacts/build/Documentation.Build/build/projects/ansible/roles/kubernetes" "$CI_PROJECT_DIR/artifacts/$CI_JOB_STAGE/$CI_JOB_NAME/build/projects/ansible/roles/kubernetes/";
rm -Rf "$CI_PROJECT_DIR/artifacts/build/Documentation.Build";
fi
# # below 2 lines commented out as need to ffigure out how to download artifacts.
# - rm -Rf "$CI_PROJECT_DIR/artifacts/$CI_JOB_STAGE/$CI_JOB_NAME/build/operations/index.html"
@ -505,17 +362,43 @@ Unit Tests:
# - echo "placeholder job for integration tests" > "$CI_PROJECT_DIR/artifacts/$CI_JOB_STAGE/$CI_JOB_NAME/DETEMEME.txt"
.Add_SSHKey: &Add_SSHKey_Before_Script |
mkdir -p ~/.ssh
chmod 700 ~/.ssh
eval $(ssh-agent -s)
SSH_KEY_NAME=SSH_PRIVATE_KEY_${ANSIBLE_USER}
echo "Debug SSH_KEY_NAME[$SSH_KEY_NAME]"
chmod 700 "${!SSH_KEY_NAME}"
ssh-add "${!SSH_KEY_NAME}"
#update next line so that ca key can be obtained. original source is ansible repo
#HOST_SSH_ID=$(cat roles/openssh-server/files/nww-nl/host_ca.pub)
HOST_SSH_ID=$(cat ${SSH_HOST_CA})
echo DEBUG HOST_SSH_ID[$HOST_SSH_ID]
echo "@cert-authority *.networkedweb.com $HOST_SSH_ID" > ~/.ssh/known_hosts
chmod 700 ~/.ssh/known_hosts
ls -la ~/.ssh
public_website:
stage: publish
image: alpine
image: debian:buster-slim
variables:
GIT_STRATEGY: none
before_script:
- ls -la /html
- if [ "0$ANSIBLE_USER" == "0" ]; then ANSIBLE_USER=deploy; fi
- echo Debug ANSIBLE_USER[$ANSIBLE_USER]
- apt update
- apt install --no-install-recommends -y ssh
- ls -la "$CI_PROJECT_DIR/artifacts/prepare/Assemble.Website.Prepare/build"
- mv "$CI_PROJECT_DIR/artifacts/prepare/Assemble.Website.Prepare/build" "$CI_PROJECT_DIR/public"
- rm -Rf "$CI_PROJECT_DIR/public/build"
- ls -la "$CI_PROJECT_DIR"
- ls -la "$CI_PROJECT_DIR/public"
- *Add_SSHKey_Before_Script
script:
- rm -rf /html/*
- cp -r "$CI_PROJECT_DIR/artifacts/prepare/Assemble.Website.Prepare/build"/* /html/
- ls -laR /html/
- ssh ${ANSIBLE_USER}@${HOST_PUBLIC_WEBSITE} sudo rm -Rf ${PUBLIC_WEBSITE_PATH}/*
- scp -r public/* ${ANSIBLE_USER}@${HOST_PUBLIC_WEBSITE}:${PUBLIC_WEBSITE_PATH}
needs: [ 'Assemble.Website.Prepare', 'Unit Tests']
resource_group: production
environment:
@ -525,6 +408,22 @@ public_website:
paths:
- public
rules:
# - if: '$CI_COMMIT_BRANCH && $CI_COMMIT_BRANCH == "master" && $CI_PIPELINE_SOURCE == "push"'
# when: on_success
# - if: '$CI_COMMIT_BRANCH && $CI_COMMIT_BRANCH == "development" && $CI_PIPELINE_SOURCE != "merge_request_event" && $CI_PIPELINE_SOURCE == "push"'
# when: manual
# - if: '$CI_COMMIT_BRANCH && $CI_COMMIT_BRANCH != "development" && $CI_COMMIT_BRANCH != "master" && $CI_PIPELINE_SOURCE != "merge_request_event" && $CI_PIPELINE_SOURCE == "push"'
# when: manual
# allow_failure: true
# Build docs on tag so they can be downloaded from the tag job and are always available.
# - if: # condition_git_tag
# $CI_COMMIT_TAG != null &&
# $CI_COMMIT_BRANCH == null
# exists:
# - '{docs/**,pages/**}/*.md'
# when: always
- if: # condition_master_branch_push
$CI_COMMIT_BRANCH == "master" &&
$CI_PIPELINE_SOURCE == "push"
@ -549,9 +448,20 @@ public_website:
# paths:
# - '{docs/**,pages/**}/*.md'
# compare_to: 'master'
allow_failure: true
when: manual
# - if: # condition_not_master_or_dev_push
# $CI_COMMIT_BRANCH != "master" &&
# $CI_COMMIT_BRANCH != "development" &&
# $CI_PIPELINE_SOURCE == "push"
# exists:
# - '{docs/**,pages/**}/*.md'
# changes:
# paths:
# - '{docs/**,pages/**}/*.md'
# compare_to: 'development'
# when: manual
# allow_failure: true
- when: never
tags:
- production
- website

View File

@ -2,7 +2,7 @@ INHERIT: website-template/mkdocs.yml
repo_name: Website
repo_url: https://gitlab.com/nofusscomputing/infrastructure/website
edit_uri: '/-/ide/project/nofusscomputing/projects/website/edit/development/-/pages/'
edit_uri: '/-/ide/project/nofusscomputing/projects/docker-mail/edit/development/-/pages/'
nav:
- Home: index.md
@ -11,9 +11,21 @@ nav:
- articles/index.md
- 2023:
- 2022:
- articles/2023/new_website.md
- articles/2022/gitlab_pipeline_from_github_actions.md
- articles/2022/fail2ban_running_considerations.md
- articles/2022/fail2ban_permanent_whitelist.md
- articles/2022/fail2ban_permanent_ban_closed_port_access.md
- articles/2022/fail2ban_block_suspisious_activity.md
- articles/2022/gitlab_piplines_vscode.md
- articles/2022/local_gitlab_pipeline.md
- 2015:
@ -29,58 +41,20 @@ nav:
- projects/index.md
- Ansible:
- Ansible Execution Environment:
- projects/ansible/index.md
- projects/execution_environment/index.md
- Ansible Execution Environment:
- projects/execution_environment/index.md
- Playbooks:
- projects/ansible/playbooks/index.md
- Roles:
- projects/ansible/roles/index.md
- Common:
- projects/ansible/roles/common/index.md
- Firewall:
- projects/ansible/roles/firewall/index.md
- Home Assistant:
- projects/ansible/roles/homeassistant/index.md
- Kubernetes:
- projects/ansible/roles/kubernetes/index.md
- Ansible Roles:
- Git Configuration:
- projects/git_configuration/index.md
- Docker:
- projects/docker/index.md
- Docker Mail: projects/docker-mail/index.md
- BIND DNS Server: projects/docker/bind/index.md
- Docker Mail: projects/docker-mail/index.md
- Gitlab CI: projects/gitlab-ci/index.md
- Docker GLPI: projects/glpi/index.md
- Kubernetes Monitoring: projects/kubernetes_monitoring/index.md
- LDAP Self Service: projects/ldap_self_service/index.md
- Python Gitlab Management: projects/python-gitlab-management/README.md
- Operations:

View File

@ -0,0 +1,194 @@
---
title: Setting up Fail2ban to Monitor Common TCP and UDP Ports for suspicious activity
description: An explanation on how to configure fail2ban to block suspisious activity.
date: 2022-06-12
template: article.html
type: blog
author: jon
about: https://www.fail2ban.org/
tags:
- Security
- Firewall
- Fail2ban
---
In this article, we'll explore how to set up an existing installation of Fail2ban to monitor common TCP and UDP ports. Fail2ban is a powerful tool that scans log files, detects suspicious activity, and automatically blocks the IP addresses of the offending hosts. By implementing Fail2ban to monitor common ports, we can enhance the security of our system and mitigate potential risks.
## Prerequisites
- An existing installation of Fail2ban
- Basic knowledge of working with the command line
## Step 1: Set up iptables Rules
1. Open the iptables configuration file in a text editor using root privileges:
```bash
sudo nano /etc/iptables/log_closed_ports.v4
```
2. Add the following rules for each specified port to log access to that port:
```bash
-A INPUT -p tcp --dport 80 -j LOG --log-prefix "[http-blocked-port-80] "
-A INPUT -p tcp --dport 443 -j LOG --log-prefix "[https-blocked-port-443] "
-A INPUT -p udp --dport 53 -j LOG --log-prefix "[dns-blocked-port-53] "
-A INPUT -p tcp --dport 22 -j LOG --log-prefix "[ssh-blocked-port-22] "
-A INPUT -p tcp --dport 3306 -j LOG --log-prefix "[mysql-blocked-port-3306] "
-A INPUT -p tcp --dport 5432 -j LOG --log-prefix "[postgresql-blocked-port-5432] "
```
Adjust the port numbers and log prefixes as needed for each port.
3. Save and exit the file (`Ctrl+O`, `Enter`, `Ctrl+X` in nano).
4. Restart the iptables service to apply the changes:
```bash
sudo service iptables restart
```
## Step 2: Configure Fail2ban Filters
1. Open the Fail2ban filters directory in a text editor:
```bash
sudo nano /etc/fail2ban/filter.d/iptables-port.conf
```
2. Add the following content to the file:
```ini
[Definition]
failregex = ^.*\[.*\] .* <HOST> .*$
ignoreregex =
actionban = iptables-multiport[logpath="/var/log/fail2ban_blocked_port_access.log", logprefix="[%(date)s] [%(name)s] [%(ip)s] "]
```
3. Save and exit the file (`Ctrl+O`, `Enter`, `Ctrl+X` in nano).
## Step 3: Configure Fail2ban Jail
1. Open the Fail2ban jail configuration file in a text editor:
```bash
sudo nano /etc/fail2ban/jail.d/custom.conf
```
2. Add the following configuration to the file:
```ini
[http-blocked-port-80]
enabled = true
filter = iptables-port
logpath = /var/log/iptables.log
maxretry = 3
banaction = iptables-multiport
[https-blocked-port-443]
enabled = true
filter = iptables-port
logpath = /var/log/iptables.log
maxretry = 3
banaction = iptables-multiport
[dns-blocked-port-53]
enabled = true
filter = iptables-port
logpath = /var/log/iptables.log
maxretry = 3
banaction = iptables-multiport
[ssh-blocked-port-22]
enabled = true
filter = iptables-port
logpath = /var/log/iptables.log
maxretry = 3
banaction = iptables-multiport
[mysql-blocked-port-3306]
enabled = true
filter = iptables-port
logpath = /var/log/iptables.log
maxretry = 3
banaction = iptables-multiport
[postgresql-blocked-port-5432]
enabled = true
filter = iptables-port
logpath = /var/log/iptables.log
maxretry = 3
banaction = iptables-multiport
```
Adjust the configuration as needed for each port.
3. Save and exit the file (`Ctrl+O`, `Enter`, `Ctrl+X` in nano).
## Step 4: Restart Fail2ban Service
1. Restart the Fail2ban service to apply the configuration changes:
```bash
sudo service fail2ban restart
```
Congratulations! You have successfully set up Fail2ban to monitor common TCP and UDP ports. Fail2ban will now log access attempts to the specified ports and automatically ban IP addresses that exceed the maximum number of allowed retries. The ban events will be logged in the `/var/log/fail2ban_blocked_port_access.log` file with the date, rule name, and IP address information.
!!! Alert
Please note that the provided configurations are examples, and you may need to modify them based on your specific needs and environment.
## Common Ports
Within this table you will find some common ports that maybe useful to include additional rules for.
| Port | Protocol | Description |
|------|----------|------------------------------|
| 20 | TCP | FTP Data |
| 21 | TCP | FTP Control |
| 22 | TCP | SSH |
| 23 | TCP | Telnet |
| 25 | TCP | SMTP |
| 53 | TCP/UDP | DNS |
| 67 | UDP | DHCP Server |
| 68 | UDP | DHCP Client |
| 69 | UDP | TFTP |
| 80 | TCP | HTTP |
| 110 | TCP | POP3 |
| 115 | TCP | SFTP |
| 123 | UDP | NTP |
| 137 | UDP | NetBIOS Name Service |
| 138 | UDP | NetBIOS Datagram Service |
| 139 | TCP | NetBIOS Session Service |
| 143 | TCP | IMAP |
| 161 | UDP | SNMP |
| 389 | TCP/UDP | LDAP |
| 443 | TCP | HTTPS |
| 445 | TCP/UDP | SMB |
| 465 | TCP | SMTPS |
| 514 | TCP/UDP | Syslog |
| 587 | TCP | SMTP (Submission) |
| 636 | TCP/UDP | LDAPS |
| 993 | TCP | IMAPS |
| 995 | TCP | POP3S |
| 1433 | TCP | MS SQL Server |
| 1434 | UDP | MS SQL Server (UDP) |
| 1521 | TCP | Oracle Database |
| 2049 | TCP/UDP | NFS |
| 3306 | TCP | MySQL |
| 3389 | TCP | Remote Desktop Protocol (RDP) |
| 5432 | TCP | PostgreSQL |
| 5900 | TCP | VNC |
| 5985 | TCP | WinRM |
| 6379 | TCP | Redis |
| 8080 | TCP | HTTP (Alternate) |
```

View File

@ -0,0 +1,154 @@
---
title: Managing Permanent Bans in Fail2ban
description: An explanation on how to configure fail2ban to permanently ban closed ports access as suspisious activity.
date: 2022-06-12
template: article.html
type: blog
author: jon
about: https://www.fail2ban.org/
tags:
- Security
- Firewall
- Fail2ban
---
In this article, we'll continue from where we left off in the article "[Setting up Fail2ban to Monitor Common TCP and UDP Ports for suspicious activity](fail2ban_block_suspisious_activity.md)" and explore how to manage permanent bans in Fail2ban on Debian/Ubuntu. We'll specifically focus on checking the closed port log file, `/var/log/fail2ban_blocked_port_access.log`, and adding hosts to a permanent ban list if they are found three times. This additional step will further enhance the security of your system by permanently blocking repeat offenders.
When an attacker targets a system, they often perform port scanning to identify open ports that can be exploited. Accessing a closed port, on the other hand, is considered suspicious and indicative of potentially malicious activity. Fail2ban helps detect and respond to such behavior by monitoring the system's log files, including the closed port log. By examining this log, we can identify hosts that repeatedly attempt to access closed ports, indicating a persistent threat.
In this article, we'll cover the steps to check the closed port log, create a permanent ban list, and configure Fail2ban to add IP addresses to the ban list when they are found three times in the log. By doing so, we can effectively protect our system from attackers who repeatedly attempt to access closed ports.
## Prerequisites
- An existing installation of Fail2ban on Debian/Ubuntu
- Basic knowledge of working with the command line
## Step 1: Checking the Closed Port Log
1. Open the closed port log file, `/var/log/fail2ban_blocked_port_access.log`, using a text editor:
```bash
sudo nano /var/log/fail2ban_blocked_port_access.log
```
2. Inside the log file, you will see entries in the following format:
```
[2023-06-12] [http-blocked-port-80] [192.168.0.1] Host banned permanently.
```
3. Each entry consists of the date, rule name, IP address, and the indication of a permanent ban.
## Step 2: Creating the Permanent Ban List
1. Open the Fail2ban jail local configuration file in a text editor:
```bash
sudo nano /etc/fail2ban/jail.local
```
2. Scroll to the end of the file and add the following section to create a permanent ban list:
```ini
[permanent-bans]
enabled = true
filter = permanent-bans
logpath = /var/log/fail2ban_blocked_port_access.log
maxretry = 1
bantime = -1
action = iptables-allports
```
Adjust the `filter` parameter and `logpath` as per your system configuration.
3. Save and exit the file (`Ctrl+O`, `Enter`, `Ctrl+X` in nano).
## Step 3: Creating the Filter for Permanent Bans
1. Open the Fail2ban filter file for permanent bans in a text editor:
```bash
sudo nano /etc/fail2ban/filter.d/permanent-bans.conf
```
2. Add the following content to the file:
```ini
[Definition]
failregex = ^\[\d{4}-\d{2}-\d{2}\] \[.*\] \[(?P<ip>\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\].*Host banned permanently\.$
ignoreregex =
```
3. Save and exit the file (`Ctrl+O`, `Enter`, `Ctrl+X` in nano).
## Step 4: Restarting Fail2ban Service
1. Restart the Fail2ban service to apply the configuration changes:
```bash
sudo service fail2ban restart
```
2. Fail2ban will now read the closed port log file and permanently ban any IP address that appears three times in the log.
Congratulations! You have successfully set up permanent bans in Fail2ban on Debian/Ubuntu. By monitoring the closed port log and adding repeat offenders to a permanent ban list, you have added an extra layer of security to your system.
!!! Tip
Please note that while permanent bans provide increased protection, they should be used judiciously. Review the closed port log entries carefully before applying permanent bans to avoid unintended consequences. It's recommended to test and fine-tune the configuration in a controlled environment before applying it to production systems.
If you have any questions or encounter any issues, feel free to reach out. Stay secure!
## Common Network Ports
| Port | Protocol | Description |
|------|----------|------------------------------|
| 20 | TCP | FTP Data |
| 21 | TCP | FTP Control |
| 22 | TCP | SSH |
| 23 | TCP | Telnet |
| 25 | TCP | SMTP |
| 53 | TCP/UDP | DNS |
| 67 | UDP | DHCP Server |
| 68 | UDP | DHCP Client |
| 69 | UDP | TFTP |
| 80 | TCP | HTTP |
| 110 | TCP | POP3 |
| 115 | TCP | SFTP |
| 123 | UDP | NTP |
| 137 | UDP | NetBIOS Name Service |
| 138 | UDP | NetBIOS Datagram Service |
| 139 | TCP | NetBIOS Session Service |
| 143 | TCP | IMAP |
| 161 | UDP | SNMP |
| 389 | TCP/UDP | LDAP |
| 443 | TCP | HTTPS |
| 445 | TCP/UDP | SMB |
| 465 | TCP | SMTPS |
| 514 | TCP/UDP | Syslog |
| 587 | TCP | SMTP (Submission) |
| 636 | TCP/UDP | LDAPS |
| 993 | TCP | IMAPS |
| 995 | TCP | POP3S |
| 1433 | TCP | MS SQL Server |
| 1434 | UDP | MS SQL Server (UDP) |
| 1521 | TCP | Oracle Database |
| 2049 | TCP/UDP | NFS |
| 3306 | TCP | MySQL |
| 3389 | TCP | Remote Desktop Protocol (RDP) |
| 5432 | TCP | PostgreSQL |
| 5900 | TCP | VNC |
| 5985 | TCP | WinRM |
| 6379 | TCP | Redis |
| 8080 | TCP | HTTP (Alternate) |
Feel free to reach out if you have any questions or encounter any issues along the way. Stay secure!
!!! Note
Please note that the provided configurations and port table are examples, and you may need to modify them based on your specific needs and environment.

View File

@ -0,0 +1,68 @@
---
title: Configuring a Permanent Whitelist in Fail2ban
description: An explanation on how to configure fail2ban to permanently allow hosts specified on a whitelist.
date: 2022-06-12
template: article.html
type: blog
author: jon
about: https://www.fail2ban.org/
tags:
- Security
- Firewall
- Fail2ban
---
In this article, we will explore how to configure a permanent whitelist in Fail2ban. A whitelist allows specific IP addresses or DNS names to bypass any blocking rules enforced by Fail2ban. This can be useful when you want to ensure uninterrupted access for trusted hosts while still benefiting from the protection provided by Fail2ban against suspicious activity.
## Prerequisites
- An existing installation of Fail2ban on your system
- Basic knowledge of working with the command line
## Step 1: Open Fail2ban Configuration
1. Open the Fail2ban configuration file in a text editor using root privileges:
```bash
sudo nano /etc/fail2ban/jail.local
```
2. Locate the `[DEFAULT]` section in the file.
## Step 2: Configure the Permanent Whitelist
1. Add the `ignoreip` parameter under the `[DEFAULT]` section to specify the IP addresses or DNS names to be whitelisted. You can whitelist multiple entries by separating them with a space.
```ini
[DEFAULT]
ignoreip = 192.168.1.100 example.com
```
Replace `192.168.1.100` with the desired IP address or add more IP addresses as needed. You can also include DNS names like `example.com` to whitelist specific domains.
2. Save and exit the file (`Ctrl+O`, `Enter`, `Ctrl+X` in nano).
## Step 3: Restart Fail2ban Service
1. Restart the Fail2ban service to apply the configuration changes:
```bash
sudo service fail2ban restart
```
Congratulations! You have successfully configured a permanent whitelist in Fail2ban. The IP addresses or DNS names specified in the `ignoreip` parameter will now be exempted from any blocking rules enforced by Fail2ban.
It's important to regularly review and update the whitelist to ensure it remains accurate and secure. Remember that introducing DNS names in the whitelist adds a dependency on DNS resolution, so ensure that DNS resolution is functioning properly on your system.
By configuring a permanent whitelist, you can allow trusted hosts to access your system without being affected by Fail2ban's blocking mechanisms. This helps strike a balance between security and accessibility.
Feel free to reach out if you have any questions or encounter any issues along the way. Stay secure!
!!! Note
The provided instructions are based on the assumption that you have Fail2ban installed and have administrative privileges on your system. Modify the configuration as per your specific requirements and system configuration.

View File

@ -0,0 +1,59 @@
---
title: Running Fail2ban Considerations
description: A food for thought article on running fail2ban and some considerations.
date: 2022-06-12
template: article.html
type: blog
author: jon
about: https://www.fail2ban.org/
tags:
- Security
- Firewall
- Fail2ban
---
Fail2ban is a powerful tool for enhancing the security of your system by automatically detecting and blocking suspicious activities. While Fail2ban can be installed and run in various environments, it's important to consider the best practices and potential challenges associated with running Fail2ban effectively. In this article, we will explore different methods of installing and running Fail2ban, and discuss why running Fail2ban within a Docker container may not be the optimal approach.
## Methods of Installing and Running Fail2ban
There are multiple methods available to install and run Fail2ban, including:
1. **Package Manager**: Many Linux distributions provide Fail2ban packages through their package managers. This method simplifies the installation process by automatically handling dependencies and providing system integration.
2. **Source Code**: Installing Fail2ban from source code gives you more control over the installation process and allows for customization. This method involves manually compiling and configuring Fail2ban on your system.
Now, let's delve into the reasons why running Fail2ban within a Docker container may not be a good idea.
## Reasons for Not Running Fail2ban within a Docker Container
While there may be scenarios where running Fail2ban within a Docker container seems appealing, it's important to consider the following reasons why it's generally not recommended:
1. **Limited Visibility**: Docker containers have their own isolated network stack, which can limit Fail2ban's visibility into the host system's network traffic. This can hinder Fail2ban's ability to accurately monitor and respond to malicious activities.
2. **Log File Monitoring**: Fail2ban relies on monitoring log files to detect and respond to malicious activities. When running within a Docker container, Fail2ban may have limited access to the host's log files, making it less effective in identifying and blocking malicious behavior.
3. **Network Filtering Limitations**: Fail2ban utilizes firewall rules to block malicious hosts. Running Fail2ban within a Docker container may limit its ability to apply firewall rules directly on the host system, reducing its effectiveness in mitigating threats.
4. **Complexity and Configuration Challenges**: Running Fail2ban within a Docker container introduces an additional layer of complexity and potential configuration challenges. It may require custom networking setups, log file sharing between the container and host, and intricate container-to-host communication mechanisms.
5. **Dependency on Docker Service**: When Fail2ban is running inside a Docker container, it becomes dependent on the Docker service itself. If the Docker service stops or encounters issues, Fail2ban will also be affected and may cease to function properly. This dependency introduces a single point of failure, potentially leaving your system vulnerable to malicious activities.
6. **Restart and Recovery Challenges**: When the Docker service restarts or if the host system reboots, Docker containers are typically not automatically started in a specific order. This can lead to a delay in Fail2ban being operational, leaving your system exposed to potential threats during that time.
Considering these reasons, it is generally recommended to install and run Fail2ban directly on the host system. By doing so, you ensure full visibility into the network traffic, unrestricted access to log files, seamless integration with firewall rules, simpler configuration setup, and avoid the potential issues associated with running Fail2ban within a Docker container.
## Conclusion
Installing and running Fail2ban using the package manager or from source code are common methods to enhance the security of your system. However, when it comes to running Fail2ban within a Docker container, reasons such as limited visibility, log file monitoring challenges, network filtering limitations, increased complexity, and the dependency on the Docker service indicate that it's not the optimal approach.
By following best practices and running Fail2ban directly on the host system, you can maximize its effectiveness in detecting and blocking malicious activities. Choose the installation method that best suits your needs and ensure regular updates to keep your system secure.
Remember to consider the reasons, including the scenario of Docker service stopping, presented here and evaluate the trade-offs before deciding to run Fail2ban within a Docker container. Prioritize the security of your system while maintaining simplicity and effectiveness.
If you have any questions or encounter any issues along the way, feel free to reach out. Stay secure!
!!! Note
The installation methods mentioned in this article are general guidelines. Refer to the official Fail2ban documentation and consult your specific Linux distribution's documentation for detailed instructions and any distribution-specific nuances.

View File

@ -0,0 +1,245 @@
---
title: Running GitLab Pipeline from GitHub Actions
description: An explanation on how to run a GitLab Pipeline or job from GitHub Actions.
date: 2022-06-12
template: article.html
type: blog
author: jon
about: https://www.fail2ban.org/
tags:
- Gitlab
- Github
- Docker
- CD/CI
- Pipeline
---
Migrating from GitLab to GitHub or having existing GitLab configurations in your repository? No worries! In this article, we will explore how to seamlessly execute GitLab pipelines using GitHub Actions, enabling you to leverage the power of GitHub's CI/CD capabilities while maintaining your existing GitLab configurations.
If you already have a GitLab CI/CD pipeline defined in your repository, this guide will help you execute it without the need for major modifications. By configuring a self-hosted GitLab Runner Docker container in GitHub Actions and utilizing the .gitlab-ci.yml file, you can easily trigger your GitLab pipeline and benefit from GitHub's collaborative features.
Let's dive into the steps required to configure GitHub Actions, execute the GitLab Runner Docker container, and seamlessly run your GitLab pipeline from within your GitHub repository.
## Prerequisites
- Access to a GitHub repository with the desired project code.
- Basic knowledge of GitLab CI/CD and GitHub Actions concepts.
## Step 1: Configure GitHub Actions Workflow and Execute GitLab Runner Docker Container
1. Open your workflow configuration file (e.g., `.github/workflows/main.yml`) in your GitHub repository.
2. Specify the runner using the `runs-on` field:
```yaml
jobs:
build:
runs-on: self-hosted
steps:
- name: Checkout code
uses: actions/checkout@v2
```
The `runs-on` field is set to `self-hosted`, instructing GitHub Actions to use a self-hosted runner.
3. Update the workflow file to include the following step for executing the GitLab Runner Docker container:
```yaml
jobs:
build:
runs-on: self-hosted
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Start GitLab Runner Docker Container
run: |
docker run -d --name gitlab-runner \
-v /var/run/docker.sock:/var/run/docker.sock \
-v /path/to/runner/config:/etc/gitlab-runner \
-v ${{ github.workspace }}:/${{ github.workspace }} \
-w /${{ github.workspace }} \
gitlab/gitlab-runner:latest
```
Replace `/path/to/runner/config` with the actual path where you want to store the GitLab Runner configuration files.
4. Commit and push your workflow configuration file to your GitHub repository.
## Step 2: Use .gitlab-ci.yml for Jobs and Pipelines
1. Create or update the `.gitlab-ci.yml` file in your GitHub repository to define the jobs and pipelines for your GitLab Runner.
```yaml
stages:
- build
- test
- deploy
build:
stage: build
script:
- echo "Running build job"
test:
stage: test
script:
- echo "Running test job"
deploy:
stage: deploy
script:
- echo "Running deploy job"
```
Customize the jobs and their respective scripts according to your specific CI/CD requirements.
2. Commit and push the `.gitlab-ci.yml` file to your GitHub repository.
## Step 3: Execute GitLab Pipeline using GitHub Actions
1. With the changes pushed to your GitHub repository, the self-hosted GitLab Runner Docker container will utilize the `.gitlab-ci.yml` file to execute the defined jobs and pipelines.
2. To run a specific job, add the job name as a parameter to the GitLab Runner command. For example, to run only the `test` job, modify the workflow configuration file as follows:
```yaml
jobs:
build:
runs-on: self-hosted
steps:
- name: Checkout code
uses: actions/checkout@v2
test:
runs-on: self-hosted
steps:
- name: Run GitLab Runner job
run: |
docker exec gitlab-runner gitlab-runner exec docker test
```
In this example, the `test` job
is executed using the `docker exec` command.
3. To run the entire pipeline defined in `.gitlab-ci.yml`, remove the specific job parameter from the `docker exec` command:
```yaml
jobs:
build:
runs-on: self-hosted
steps:
- name: Checkout code
uses: actions/checkout@v2
test:
runs-on: self-hosted
steps:
- name: Run GitLab Runner pipeline
run: |
docker exec gitlab-runner gitlab-runner exec docker
```
By removing the job parameter, the entire pipeline will be executed.
## Conclusion
By configuring a self-hosted GitLab Runner Docker container in GitHub Actions and utilizing the `.gitlab-ci.yml` file, you can seamlessly execute GitLab pipelines from within your GitHub repositories. This enables you to leverage the powerful CI/CD capabilities of GitLab while still benefiting from the collaborative features of GitHub.
I apologize for the confusion. If you want to focus solely on running GitLab pipelines from GitHub Actions and exclude any other methods, here's the revised article:
# Article 7: Running GitLab Pipeline from GitHub Actions
## Introduction
If you're looking to migrate from GitLab to GitHub or have existing GitLab configurations in your repository, this article is for you. We'll explore how to seamlessly execute GitLab pipelines using GitHub Actions, leveraging GitHub's CI/CD capabilities while maintaining your GitLab configurations.
By configuring GitHub Actions to trigger your GitLab pipeline, you can take advantage of GitHub's collaborative features while running your pipeline in a familiar environment.
## Prerequisites
Before we get started, make sure you have the following:
- A GitHub repository with your GitLab project code
- A `.gitlab-ci.yml` file defining your GitLab pipeline jobs
## Step 1: Configure GitHub Actions Workflow
1. In your GitHub repository, navigate to the **Actions** tab.
2. Click on **Set up a workflow yourself** to create a new workflow file.
3. Replace the contents of the workflow file with the following:
```yaml
name: Run GitLab Pipeline
on:
push:
branches:
- main
pull_request:
branches:
- main
jobs:
run-gitlab-pipeline:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v2
- name: Set up GitLab Runner
uses: docker://gitlab/gitlab-runner:latest
- name: Execute GitLab pipeline
run: |
# Customize this command to match your GitLab Runner configuration
gitlab-runner exec docker <your-pipeline-name>
```
Note: Replace `<your-pipeline-name>` with the name of your GitLab pipeline defined in `.gitlab-ci.yml`.
4. Commit and push the workflow file to your GitHub repository.
## Step 2: Customize GitLab Runner Configuration
1. In your GitHub repository, navigate to the **Settings** tab.
2. Click on **Secrets** in the left sidebar.
3. Add any necessary secrets or environment variables required for your GitLab Runner configuration.
For example, you may need to set the `CI_JOB_TOKEN` secret to authenticate with your GitLab repository.
## Step 3: Trigger GitLab Pipeline
Any push or pull request events on the `main` branch will now trigger the GitHub Actions workflow, which in turn executes your GitLab pipeline.
## Conclusion
Congratulations! You've successfully configured GitHub Actions to run your GitLab pipeline. By leveraging GitHub's CI/CD capabilities, you can seamlessly execute your GitLab pipelines and benefit from the collaborative features provided by GitHub.
Remember to keep your `.gitlab-ci.yml` file up to date with your desired pipeline configurations. Feel free to explore other features of GitHub Actions to further enhance your CI/CD workflows.
If you have any questions or encounter any issues along the way, don't hesitate to reach out for assistance. Happy automating!
Please note that this article assumes you already have a working `.gitlab-ci.yml` file and focuses solely on executing the GitLab pipeline using GitHub Actions.
I hope this revised version of Article 7 meets your requirements.

View File

@ -0,0 +1,114 @@
---
title: Running GitLab Runner with VS Code Tasks
description: An explanation on how to setup VSCode Tasks to run Gitlab Pipelines directly from VSCode.
date: 2022-06-12
template: article.html
type: blog
author: jon
about: https://hub.docker.com/r/gitlab/gitlab-runner
tags:
- Gitlab
- Docker
- CD/CI
- Pipeline
- VSCode
---
In a previous article, we learned how to run GitLab CI/CD pipelines locally using the GitLab Runner container. In this article, we'll explore how to streamline the process by creating VS Code tasks that allow us to launch the GitLab Runner with a simple keyboard shortcut. This will provide a convenient way to execute our pipelines or specific jobs directly from within the VS Code environment.
## Prerequisites
- VS Code installed on your machine
- [Completion of the previous steps](local_gitlab_pipeline.md)
## Step 1: Create a `.vscode` Directory
First, open your project in VS Code. In the root directory of your project, create a new directory called `.vscode` if it doesn't already exist.
## Step 2: Create a `tasks.json` File
Inside the `.vscode` directory, create a new file called `tasks.json`. This file will define the tasks we want to create.
Add the following content to the `tasks.json` file:
```json
{
"version": "2.0.0",
"tasks": [
{
"label": "Run GitLab CI/CD Pipeline",
"type": "shell",
"command": "docker run --rm -v /var/run/docker.sock:/var/run/docker.sock -v ${workspaceFolder}:/builds/project -w /builds/project gitlab/gitlab-runner:latest gitlab-runner exec docker --docker-privileged"
},
{
"label": "Run Specific Job",
"type": "shell",
"command": "docker run --rm -v /var/run/docker.sock:/var/run/docker.sock -v ${workspaceFolder}:/builds/project -w /builds/project gitlab/gitlab-runner:latest gitlab-runner exec docker --docker-privileged",
"args": [
"--",
"<job-name>"
],
"problemMatcher": []
}
]
}
```
The `tasks` array contains two tasks: "Run GitLab CI/CD Pipeline" and "Run Specific Job". The commands specified in the `command` field are the same as the ones we used previously to execute the GitLab Runner container.
For the "Run Specific Job" task, we have an additional `args` field where you should replace `<job-name>` with the name of the specific job you want to execute.
## Step 3: Define Keybindings (Optional)
To make it even more convenient, you can define keybindings for the tasks. Open the keybindings settings in VS Code by going to **File** -> **Preferences** -> **Keyboard Shortcuts** (or by pressing `Ctrl+K Ctrl+S`).
Add the following keybindings to the keybindings settings file:
```json
[
{
"key": "ctrl+shift+p",
"command": "workbench.action.tasks.runTask",
"args": "Run GitLab CI/CD Pipeline"
},
{
"key": "ctrl+shift+j",
"command": "workbench.action.tasks.runTask",
"args": "Run Specific Job"
}
]
```
These keybindings assign the "Run GitLab CI/CD Pipeline" task to `Ctrl+Shift+P` and the "Run Specific Job" task to `Ctrl+Shift+J`. Feel free to modify the keybindings according to your preference.
## Step 4: Adjust `.gitignore` File
To avoid inadvertently including artifacts generated during the local pipeline execution in your git commits, it's
essential to update your `.gitignore` file.
Open your project's `.gitignore` file and add the following line to exclude the generated artifacts:
```
# GitLab Runner artifacts
/builds/
```
This ensures that any files or directories created within the `/builds/` directory (which is used by the GitLab Runner container) will be ignored by git.
## Step 5: Run the Pipeline or Specific Job
You're now ready to run your GitLab CI/CD pipeline or specific job using the VS Code tasks. Press the assigned keybinding (`Ctrl+Shift+P` for the pipeline or `Ctrl+Shift+J` for a specific job) to launch the GitLab Runner container and execute the desired task.
## Conclusion
By creating VS Code tasks, we've made it even easier to run GitLab CI/CD pipelines or specific jobs locally using the GitLab Runner. With a simple keyboard shortcut, we can now execute our pipelines or test individual jobs directly from within the VS Code environment. Additionally, by adjusting the `.gitignore` file, we can ensure that artifacts generated during the local execution are not inadvertently included in git commits.
That's it! You've learned how to set up VS Code tasks to launch the GitLab Runner and exclude artifacts from being committed. Enjoy the streamlined process of running your pipelines and jobs with ease.

View File

@ -0,0 +1,65 @@
---
title: Running GitLab CI/CD Pipeline Locally with Docker-in-Docker using GitLab Runner Container
description: This article details how to use GitLab CI/CD pipelines locally which can be useful for testing and debugging purposes.
date: 2022-06-12
template: article.html
type: blog
author: jon
about: https://hub.docker.com/r/gitlab/gitlab-runner
tags:
- Gitlab
- Docker
- CD/CI
- Pipeline
- VSCode
---
Running GitLab CI/CD pipelines locally can be useful for testing and debugging purposes. While GitLab provides robust CI/CD capabilities on its platform, there may be scenarios where executing the pipeline locally within your development environment, such as Visual Studio Code (VS Code), can be beneficial. In this blog post, we'll explore how to set up and run a GitLab CI/CD pipeline locally using the GitLab Runner container with Docker-in-Docker (DinD) support.
## Prerequisites
- Docker installed on your machine
- Basic knowledge of GitLab CI/CD and Docker concepts
## Step 1: Pull the GitLab Runner Image
To begin, ensure that you have Docker installed on your machine. Open a terminal within VS Code or any command prompt and pull the latest GitLab Runner Docker image by executing the following command:
```shell
docker pull gitlab/gitlab-runner:latest
```
## Step 2: Execute the Pipeline or Multiple Jobs with DinD
Next, navigate to your project's root directory containing the `.gitlab-ci.yml` file. To execute the entire GitLab CI/CD pipeline locally or specific jobs within it with Docker-in-Docker functionality, use the following command:
```shell
docker run --rm -v /var/run/docker.sock:/var/run/docker.sock \
-v /path/to/your/project:/builds/project \
-w /builds/project gitlab/gitlab-runner:latest \
gitlab-runner exec docker --docker-privileged [<job-name1>,<job-name2>,<job-name3>,...]
```
Replace `/path/to/your/project` with the local path to your project directory, and `<job-name1>,<job-name2>,<job-name3>,...` with the comma-separated list of job names you want to execute (optional).
By mounting the Docker socket (`/var/run/docker.sock`) as a volume inside the container and using the `--docker-privileged` flag, the GitLab Runner container gains access to Docker-in-Docker functionality. This allows the execution of Docker commands within the runner.
If you don't specify any job names, the GitLab Runner container will execute all the jobs defined in your `.gitlab-ci.yml` file sequentially, providing a local simulation of the GitLab CI/CD pipeline execution.
If you specify one or more job names, only those specific jobs will be executed within the GitLab Runner container, allowing you to selectively test and debug multiple parts of your pipeline.
Make sure your project directory contains the necessary files, configurations, and dependencies required for the pipeline or jobs to run successfully.
Remember to exercise caution when running Docker-in-Docker, as it can have security implications. Ensure that your environment is appropriately secured and follow best practices.
## Conclusion
Running GitLab CI/CD pipelines locally within your development environment can greatly enhance the development and testing process. By leveraging the GitLab Runner container with Docker-in-Docker support, you can simulate the GitLab CI/CD pipeline execution within tools like VS Code. This enables you to test and validate your pipeline configurations before pushing them to the GitLab platform.
That's it! You now have the knowledge to run an entire GitLab CI/CD pipeline or multiple jobs locally with Docker-in-Docker using the GitLab Runner container. Happy pipeline testing and debugging!

View File

@ -1,29 +0,0 @@
---
title: Site Refurbishment
description: it's an update.
date: 2023-08-01
template: article.html
type: blog
author: jon
copyrightHolder: jon
about: https://nofusscomputing.com
tags:
- Automation
- Website
- Update
---
It's been a while and for all intents and purposes; Prior to today you would not have been mistaken for thinking that our site was dead in the water. I could give reasons, but the reality is it's an excuse and we all know that *"they're like arseholes, everyones got one!!"* As it currently stands, I find myself with a little extra time on my hands so this site revamp is the start and first visibility of what I've been doing.
I've spent a good part of a few of a few decades working with computers. Whilst this has been an interesting journey, in the last few years I've discovered Configuration-as-Code. The concept itself wasn't exactly new to me, I just put off learning anything about it past the name. As creatures of habits, us humans, once we have found our way we tend to stick to that routine or better stated, with what we know.
Moving from the early days (norton ghost and clonezilla) with manually built images for every different type of machine. which became very monotonous to manually update the images with patches. The opportunity had presented itself resently where for the first time in over two decades, I'm required to rebuild my infrastructre from scratch. As daunting as this sounds, given the leaps and bounds that have occured in the last decade, even more in the last five years. Technologies have evolved to the point where now it takes a fraction of the time to do what used to take upwards of a week. Upgrades now are not rebuild the image from scratch, clone and redeploy. Now, I punch the keyboard and characters show on the screen, then I run a program, Ansible. It reads the jibberish (json/yaml) and presto, Bobs your uncle, a deploy has occured. Fresh deployment or updates, doesn't matter, run the same code again and Ansible ensures it's setup how it's supposed to be. Need to update a piece of software, too easy, change the version number in your config files.
Other things of note worthy mention:
- For Family and friends, free of course I host Password vault, <https://vault.nofusscomputing.com>. This enables you to install an app on your phone and within your web browser which lets you sync your passwords, identities and secrets-and using zero-trust full encryption. Best feature of all,you only have to remember your vault password, as everything else is stored in the vault.
- Helpdesk now deployed publicly, <https://helpdesk.nofusscomputing.com>. Along with automating everything else a Service Catalog is being extended to automate other tasks.
- Website updating now occurs automagically. We do this via Gitlab using CD/CI pipelines. Now I just edit a file, push the changes and the changes deploy my site on the interwebs.
- Our [projects](../../projects/index.md) from [GitHub](https://github.com/NoFussComputing) and [GitLab](https://gitlab.com/nofusscomputing) deploy their docs to our site, again automagically.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 136 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 136 KiB

View File

@ -8,6 +8,6 @@ hide:
- toc
---
<div style="background: url(assets/nfc_revamp.png) no-repeat center top; background-size: 282px 90px; width: 100%; height: 120px; display: flex;">
<span style="align-self: flex-end; width: 100%; text-align: center; color: #009900; font-weight: bold; font-size: 14pt;">Using Technology to make life easier</span>
</div>
![No Fuss Computing](assets/nfc_header.png)
Home Page

View File

@ -1,156 +0,0 @@
---
title: Ansible Projects
description: No Fuss Computings Ansible Projects
date: 2023-06-01
template: project.html
about: https://gitlab.com/nofusscomputing/projects/ansible
---
This section of the website contains Ansible projects and the details of how to use said projects. Across All of our Ansible projects we standardize as much as possible.
Any playbooks and roles we create are designed with the below ansible setup in mind. Whilst there are many ways ~~to skin a cat~~ setup ansible, If you deviate from below you will be required to test to ensure that if using our playbooks/roles, that they work as intended. If you find that there is a better way of setting up Ansible, raise an issue with your proposal and we'll discuss.
- [No Fuss Computing playbooks](playbooks/index.md)
- [No Fuss Computing roles](roles/index.md)
## Inventory Setup
The Inventory should be setup under an SCM, git in this case; So that a version history is maintained. This also fosters a full audit trail as part of the complete host lifecycle. Idealy the Inventory along with directories `files` and `templates.` should be contained in it's own git repository. Using this method provides that the git history only pertain to the inventory alone, and therefore any install/configuration of a host.
!!! tip
If you must include `playbooks` and `roles` wihin your inventory repository it's a good idea that these still be their own repositories with them added to the inventory repository as a git sub-module.
Ansible inventory directory structure.
``` bash
.
├── ansible.cfg
├── files
│   ├── all
│   │   ├──
│   │   ├──
│   ├── node1
│   │   ├──
│   │   ├──
│   ├── k3s-prod
│   │   ├── Ingress-manifest-AWX.yaml
│   │   └── deployment-manifest-test_web_server.yaml
│   ├── k3s-testing
│      └── deployment-manifest-test_web_server.yaml
|
├── .inventory_root
├── inventory
│   ├── development
│   │   ├── group_vars
│   │   │   ├── all.yaml
│   │   │   ├── debian.yaml
│   │   ├── hosts.yaml
│   │   └── host_vars
│   │   ├── laptop2.yaml
│   │   ├── node1.yaml
│   │   ├── node2.yaml
│   │   ├── node3.yaml
│   │   ├── node4.yaml
│   │   └── node5.yaml
│   └── production
│   ├── group_vars
│   │   ├── all
│   │   │   ├── main.yaml
│   │   │   ├── vault.yaml
│   │   │   └── versions_software.yaml
│   │   ├── awx.yaml
│   ├── hosts.yaml
│   └── host_vars
│   ├── node1.yaml
│   ├── k3s-prod
│   │   ├── backup.yaml
│   │   ├── kubernetes.yaml
│   │   ├── main.yaml
│   │   └── vault.yaml
│   ├── k3s-testing
│      ├── main.yaml
│      └── vault.yaml
├── playbooks
│   ├── all.yaml
├── README.md
└── templates
├── hosts
   │ └── k3s-prod
    └── HelmChart-manifest-NginX-ingress.yaml
└── groups
```
| name | Type | Description |
|:---|:---:|:---|
| ansible.cfg | _file_ | Ansible configuration file applicable to this inventory |
| files | _directory_ | Contain files that a host may require. Playbook task iterates over by hostname and group name. Sub-directories for hostname / group-name |
| .inventory_root | _file_ | This file is used by `nfc_common` role to determin the root directory of the inventory. |
| inventory | _directory_ | Ansible inventory. If multiple inventories exist can use sub folders. |
| playbooks | _directory_ | Should be a git submodule. _This keeps inventory and playbooks SCM related to each only._ |
| README.md | _file_ | Inventory readme with applicable info. |
| templates | _directory_ | This directory is the same as the `files` directory except contains jinja templates. |
### Inventory
Naming of host inventory files is to use the hostname portion of the FQDN only. i.e. for a host with a FQDN of `myhostname.domain.tld` it's `inventory_hostname` would be `myhostname`. This requirement is a must as many parts of our roles and playbooks depend upon this value matching the DNS system.
#### hosts file
The hosts file `host.yaml` contains all hosts and by which group they are part of.
### Playbooks
### Templates
Templates directory contains only two sub-deirectories `groups` and `hosts` under each of these folders are folders by group/host name that contain template files. Preferernece is leaning toards not using the `.j2` extension as the IDE may loose functionality by using.
Naming of template files is in format `{item-type}-{what-uses}-{friendly name that uses underscores not hyphon}.{file_extension}`
| Item Type | what uses | Notes
|:---|:---:|:---|
| config | bind | Configuration file for bind dns server |
| dnszone | bind | a bind server DNS zone |
| `{kubernetes kind}` | manifest | A kubernetes manifest |
#### Feature gates
Templates when added to the group folder should be setup with a feature gate. This eanbles simple yaml to be defined to allow the template to deploy.
example of yaml declaration that host/group would read.
``` yaml
feature_gates:
is_prime: false
monitoring: true
operator_awx: true
operator_grafana: true
operator_prometheus: true
postgres_cluster: true
rook_ceph: true
```
Seting a feature gate on a template is as simple as enclosing the entire contents of the file with a jinja if statement. i.e. `{% if path.feature_gates.monitoring | default(false) | bool %}the content here{% endif %}`
## AWX / Tower / Automation Platform
### Prime host
If you use a system like AWX / Tower / Automation Platform the inventory should be designed in a way that you have a prime host. The prime host is a single host that once it exists, it's capable of rebuilding all of the infrastructure within the inventory. Using the prime host method, you only require the variable secrets (vault encrypted) of the prime host and only those pertinent to rebuilding the prime host. This should only be the backup decryption key (vault encrypted).
!!! warning Warning
Prime Host requires that the backup decryption key be updated within the inventory whenever it changes. There is also a requirement that the vault encryption key be available and not stored on infrastructure that without or that infrastructure not existing you cant access the vault key. i.e. password manager.
## ToDo
- Explain usage of file `.inventory_root` which must exist as nfc_common _(todo: see kubernetes playbook/master)_ _may no longer be required a project structure is known along with using variable `playbook_dir`_

View File

@ -1,81 +0,0 @@
---
title: Ansible Roles
description: No Fuss Computings Ansible Roles Projects
date: 2023-11-10
template: project.html
about: https://gitlab.com/nofusscomputing/projects/ansible
---
This section of the website contains Ansible roles and the details of how to use said projects. Across All of our Ansible roles we standardize as much as possible. This document will contain the details of said standardization.
Our roles:
- Common
- Docker Management
- [Firewall](firewall/index.md)
- Git Configuration
- [Home Assistant](homeassistant/index.md)
- [Kubernetes](kubernetes/index.md)
- SSH
## Role Requirements
This section covers what by default, be part of all ansible roles we create.
=== "ansible.builtin.set_stats"
As part of the role, setting of ansible stats with `ansible.builtin.set_stats` must be provided. This enables a single variable that can be used after the play has completed. Usage of a role that includes the usage of `ansible.builtin.set_stats` within AWX enables population of the artifacts and passing of the stats between workflows/job templates.
```yaml
- name: Stat Values
ansible.builtin.set_fact:
stat_values: |
{
"host_{{ inventory_hostname | replace('.', '_') | replace('-', '_') }}": {
"roles": {
role_name: {
"enabled": true,
"installed": false,
"empty_list": [],
"empty_dict": {}
}
},
playbooks: {
"{{ inventory_hostname }}": "here"
}
}
}
- name: Create Final Stats not Per Host
ansible.builtin.set_stats:
data: "{{ stat_values | from_yaml }}"
per_host: false
aggregate: true
- name: Clear Stat Values
ansible.builtin.set_fact:
stat_values: null
```
- `Stat Values` is only required if the variable names require expansion. _Can be omitted if no variable expansion required for variable name._
- `Create Final Stats not Per Host` sets the artifacts/stats.
- `Clear Stat Values` remove the stat fact. only required if using `Stat Values`.
!!! tip AWX Gotcha
AWX requires that `per_host` be set to `false` when setting stats for artifacts to work. Hence the structure of the artifacts above use hostname prefixed with `host_`. This method enables programatic checking if by host due to the presence of `host_` in the dictionary name.
=== "Variable naming"
- All Role Variables to be prefixed with the role name.

View File

@ -1,5 +0,0 @@
# Docker GLPI
| :red_circle: This page is a placeholder for the python-gitlab-management repo's docs. |
|:---|
| _If you can see this page there has been an error, please report the issue on gitlab_ |

View File

@ -1,5 +0,0 @@
# Docker GLPI
| :red_circle: This page is a placeholder for the python-gitlab-management repo's docs. |
|:---|
| _If you can see this page there has been an error, please report the issue on gitlab_ |

View File

@ -91,20 +91,18 @@ class Data:
if 'gitlab.com/-/ide/project' not in url: # ignore gitlab ide links
if 'nofusscomputing.com' not in url: # ignore gitlab ide links
link = self.parse_url(url)
link = self.parse_url(url)
hyperlink_source_file = {'name': source_file, 'location': link_location}
hyperlink_source_file = {'name': source_file, 'location': link_location}
if link['url_id'] in data['hyperlinks']:
if link['url_id'] in data['hyperlinks']:
data['hyperlinks'][link['url_id']]['source_files'].append(hyperlink_source_file)
data['hyperlinks'][link['url_id']]['source_files'].append(hyperlink_source_file)
else:
else:
link['source_files'] = [ hyperlink_source_file ]
data['hyperlinks'][link['url_id']] = link
link['source_files'] = [ hyperlink_source_file ]
data['hyperlinks'][link['url_id']] = link
events = [self.process_browser_log_entry(entry) for entry in self.driver.get_log('performance')]

View File

@ -13,11 +13,7 @@ class Test:
def setup_method(self):
self.ignore_url_alive_check = {
'gitlab.com': [
'nofusscomputing/infrastructure/website//-/new/development'
]
}
pass
@pytest.mark.parametrize(
@ -40,29 +36,12 @@ class Test:
packages.urllib3.disable_warnings(category=InsecureRequestWarning)
request = get(data['url'], verify=False)
skip_test = False
print(str(data) + str(request.status_code))
if data['domain'] in self.ignore_url_alive_check:
if data['request_path'] in self.ignore_url_alive_check[data['domain']]:
skip_test = True
if not skip_test:
assert (
request.status_code == 200
or
request.status_code == 401
or
request.status_code == 403
), (
f"Hyperlink [{data['url_id']}] to location [{data['url']}] failed,"
f"with status [{request.status_code}].")
else:
assert True
assert request.status_code == 200, (
f"Hyperlink [{data['url_id']}] to location [{data['url']}] failed,"
f"with status [{request.status_code}].")
@pytest.mark.parametrize(