Compare commits
9 Commits
infrastruc
...
articles
Author | SHA1 | Date | |
---|---|---|---|
fb75dd57b1 | |||
bab0100b1e | |||
455e3832ac | |||
b71d5971d3 | |||
c5da8111b8 | |||
ef7233075a | |||
deafb52e21 | |||
2d300e02c0 | |||
02843243ee |
385
.gitlab-ci.yml
385
.gitlab-ci.yml
@ -5,9 +5,6 @@ variables:
|
||||
|
||||
|
||||
include:
|
||||
|
||||
- local: .gitlab/project_docs.gitlab-ci.yml
|
||||
|
||||
- project: nofusscomputing/projects/gitlab-ci
|
||||
ref: development
|
||||
file:
|
||||
@ -15,87 +12,6 @@ include:
|
||||
- template/automagic.gitlab-ci.yaml
|
||||
|
||||
|
||||
|
||||
Get Project Documentation:
|
||||
extends: .fetch_project_docs
|
||||
parallel:
|
||||
matrix:
|
||||
|
||||
- ASSEMBLE_PROJECT_NAME: phpipam_scan_agent
|
||||
ASSEMBLE_PROJECT_ID: 55052132
|
||||
ASSEMBLE_PROJECT_PATH: projects/ansible/collection/phpipam_scan_agent
|
||||
|
||||
- ASSEMBLE_PROJECT_NAME: gitlab-ci
|
||||
ASSEMBLE_PROJECT_ID: 28543717
|
||||
ASSEMBLE_PROJECT_PATH: projects/gitlab-ci
|
||||
|
||||
- ASSEMBLE_PROJECT_NAME: operations
|
||||
ASSEMBLE_PROJECT_ID: 32419575
|
||||
ASSEMBLE_PROJECT_PATH: operations
|
||||
|
||||
- ASSEMBLE_PROJECT_NAME: git_configuration
|
||||
ASSEMBLE_PROJECT_ID: 45705596
|
||||
ASSEMBLE_PROJECT_PATH: projects/ansible/roles/git_configuration
|
||||
|
||||
- ASSEMBLE_PROJECT_NAME: docker-mail
|
||||
ASSEMBLE_PROJECT_ID: 33611657
|
||||
ASSEMBLE_PROJECT_PATH: projects/docker-mail
|
||||
|
||||
- ASSEMBLE_PROJECT_NAME: execution_environment
|
||||
ASSEMBLE_PROJECT_ID: 45741845
|
||||
ASSEMBLE_PROJECT_PATH: projects/ansible/execution_environment
|
||||
|
||||
- ASSEMBLE_PROJECT_NAME: ldap_self_service
|
||||
ASSEMBLE_PROJECT_ID: 48321671
|
||||
ASSEMBLE_PROJECT_PATH: projects/ldap_self_service
|
||||
|
||||
- ASSEMBLE_PROJECT_NAME: docker-glpi
|
||||
ASSEMBLE_PROJECT_ID: 12928828
|
||||
ASSEMBLE_PROJECT_PATH: projects/glpi
|
||||
|
||||
- ASSEMBLE_PROJECT_NAME: kubernetes_monitoring
|
||||
ASSEMBLE_PROJECT_ID: 50510268
|
||||
ASSEMBLE_PROJECT_PATH: projects/kubernetes_monitoring
|
||||
|
||||
- ASSEMBLE_PROJECT_NAME: ansible_playbooks
|
||||
ASSEMBLE_PROJECT_ID: 46364551
|
||||
ASSEMBLE_PROJECT_PATH: projects/ansible/playbooks
|
||||
|
||||
- ASSEMBLE_PROJECT_NAME: common
|
||||
ASSEMBLE_PROJECT_ID: 52226103
|
||||
ASSEMBLE_PROJECT_PATH: projects/ansible/roles/common
|
||||
|
||||
- ASSEMBLE_PROJECT_NAME: firewall
|
||||
ASSEMBLE_PROJECT_ID: 51640016
|
||||
ASSEMBLE_PROJECT_PATH: projects/ansible/roles/firewall
|
||||
|
||||
- ASSEMBLE_PROJECT_NAME: homeassistant
|
||||
ASSEMBLE_PROJECT_ID: 51020674
|
||||
ASSEMBLE_PROJECT_PATH: projects/ansible/roles/homeassistant
|
||||
|
||||
- ASSEMBLE_PROJECT_NAME: kubernetes
|
||||
ASSEMBLE_PROJECT_ID: 51640029
|
||||
ASSEMBLE_PROJECT_PATH: projects/ansible/roles/kubernetes
|
||||
|
||||
- ASSEMBLE_PROJECT_NAME: itil_runbooks
|
||||
ASSEMBLE_PROJECT_ID: 54680811
|
||||
ASSEMBLE_PROJECT_PATH: projects/itil/runbooks
|
||||
|
||||
# - ASSEMBLE_PROJECT_NAME:
|
||||
# ASSEMBLE_PROJECT_ID:
|
||||
# ASSEMBLE_PROJECT_PATH:
|
||||
|
||||
|
||||
Documentation.Lint:
|
||||
rules:
|
||||
- when: never
|
||||
|
||||
|
||||
Documentation.Build:
|
||||
rules:
|
||||
- when: never
|
||||
|
||||
|
||||
Website.Lint:
|
||||
extends: .Lint_Markdown_Docs
|
||||
variables:
|
||||
@ -108,46 +24,136 @@ Website.Build:
|
||||
resource_group: build
|
||||
|
||||
|
||||
Merge Project Docs:
|
||||
extends: .merge_project_docs
|
||||
needs:
|
||||
- job: Get Project Documentation
|
||||
artifacts: true
|
||||
- job: Website.Build
|
||||
artifacts: true
|
||||
|
||||
|
||||
|
||||
|
||||
pages:
|
||||
stage: deploy
|
||||
variables:
|
||||
GIT_STRATEGY: none
|
||||
Assemble.Website.Prepare:
|
||||
# extends: .MKDocs_Build
|
||||
stage: prepare
|
||||
# image: python:3.7.5-buster
|
||||
# turn mkdocs build template script section to a command template so that customizations can be added.
|
||||
script:
|
||||
- mkdir -p "$CI_PROJECT_DIR/artifacts/$CI_JOB_STAGE/$CI_JOB_NAME/source"
|
||||
# - mkdir -p "$CI_PROJECT_DIR/artifacts/$CI_JOB_STAGE/$CI_JOB_NAME/build"
|
||||
- echo "fetch artifacts from child repo's"
|
||||
- echo "copy static pages source to" "$CI_PROJECT_DIR/artifacts/$CI_JOB_STAGE/$CI_JOB_NAME/source"
|
||||
- echo "copy sub-repo source to (merge)" "$CI_PROJECT_DIR/artifacts/$CI_JOB_STAGE/$CI_JOB_NAME/source"
|
||||
- echo "mkdocs build source dir"
|
||||
# - mkdir -p "$CI_PROJECT_DIR/artifacts/$CI_JOB_STAGE/$CI_JOB_NAME/full-site"
|
||||
- mv "$CI_PROJECT_DIR/artifacts/build/Website.Build/build" "$CI_PROJECT_DIR/artifacts/$CI_JOB_STAGE/$CI_JOB_NAME/"
|
||||
#- ls -laR $CI_PROJECT_DIR
|
||||
# remove ops placeholder index.html
|
||||
|
||||
|
||||
- echo "[DEBUG] fetch operations docs"
|
||||
- 'HTTP_STATUS_FILE=$(curl --location -o /dev/null --silent --head --write-out "%{http_code}" --header "PRIVATE-TOKEN: ${GIT_COMMIT_TOKEN}" "https://gitlab.com/api/v4/projects/32419575/jobs/artifacts/development/download?job=Documentation%2EBuild")'
|
||||
- echo "[DEBUG] HTTP_STATUS_FILE=$HTTP_STATUS_FILE"
|
||||
- |
|
||||
if [ -d "$CI_PROJECT_DIR/artifacts/prepare/Merge.Project.Docs/build" ]; then
|
||||
|
||||
mv "$CI_PROJECT_DIR/artifacts/prepare/Merge.Project.Docs/build" public;
|
||||
|
||||
if [ "0$HTTP_STATUS_FILE" != "0200" ]; then
|
||||
echo "[ERROR] Unable to fetch Job Artifacts due to HTTP status of $HTTP_STATUS_FILE";
|
||||
# exit 1;
|
||||
else
|
||||
|
||||
mv "$CI_PROJECT_DIR/artifacts/build/Website.Build/build" public;
|
||||
|
||||
curl --location --output operations-artifacts.zip --header "PRIVATE-TOKEN: ${GIT_COMMIT_TOKEN}" "https://gitlab.com/api/v4/projects/32419575/jobs/artifacts/development/download?job=Documentation%2EBuild";
|
||||
unzip operations-artifacts.zip;
|
||||
rm -Rf "$CI_PROJECT_DIR/artifacts/$CI_JOB_STAGE/$CI_JOB_NAME/build/operations/index.html";
|
||||
cp -rvf "$CI_PROJECT_DIR/artifacts/build/Documentation.Build/build/operations" "$CI_PROJECT_DIR/artifacts/$CI_JOB_STAGE/$CI_JOB_NAME/build/";
|
||||
rm -Rf "$CI_PROJECT_DIR/artifacts/build/Documentation.Build";
|
||||
fi
|
||||
needs:
|
||||
- job: Website.Build
|
||||
artifacts: true
|
||||
- job: 'Merge Project Docs'
|
||||
optional: true
|
||||
environment:
|
||||
name: Gitlab Pages
|
||||
url: $CI_PAGES_URL
|
||||
|
||||
|
||||
- echo "[DEBUG] fetch gitlab-ci project docs"
|
||||
- 'HTTP_STATUS_FILE=$(curl --location -o /dev/null --silent --head --write-out "%{http_code}" --header "PRIVATE-TOKEN: ${GIT_COMMIT_TOKEN}" "https://gitlab.com/api/v4/projects/28543717/jobs/artifacts/development/download?job=Documentation%2EBuild")'
|
||||
- echo "[DEBUG] HTTP_STATUS_FILE=$HTTP_STATUS_FILE"
|
||||
- |
|
||||
if [ "0$HTTP_STATUS_FILE" != "0200" ]; then
|
||||
echo "[ERROR] Unable to fetch Job Artifacts due to HTTP status of $HTTP_STATUS_FILE";
|
||||
# exit 1;
|
||||
else
|
||||
curl --location --output gitlab-ci-artifacts.zip --header "PRIVATE-TOKEN: ${GIT_COMMIT_TOKEN}" "https://gitlab.com/api/v4/projects/28543717/jobs/artifacts/development/download?job=Documentation%2EBuild";
|
||||
unzip gitlab-ci-artifacts.zip;
|
||||
rm -Rf "$CI_PROJECT_DIR/artifacts/$CI_JOB_STAGE/$CI_JOB_NAME/build/projects/gitlab-ci";
|
||||
cp -rvf "$CI_PROJECT_DIR/artifacts/build/Documentation.Build/build/projects/gitlab-ci" "$CI_PROJECT_DIR/artifacts/$CI_JOB_STAGE/$CI_JOB_NAME/build/projects/gitlab-ci/";
|
||||
rm -Rf "$CI_PROJECT_DIR/artifacts/build/Documentation.Build";
|
||||
fi
|
||||
|
||||
|
||||
- echo "[DEBUG] fetch git_configuration project docs"
|
||||
- 'HTTP_STATUS_FILE=$(curl --location -o /dev/null --silent --head --write-out "%{http_code}" --header "PRIVATE-TOKEN: ${GIT_COMMIT_TOKEN}" "https://gitlab.com/api/v4/projects/45705596/jobs/artifacts/development/download?job=Documentation%2EBuild")'
|
||||
- echo "[DEBUG] HTTP_STATUS_FILE=$HTTP_STATUS_FILE"
|
||||
- |
|
||||
if [ "0$HTTP_STATUS_FILE" != "0200" ]; then
|
||||
echo "[ERROR] Unable to fetch Job Artifacts due to HTTP status of $HTTP_STATUS_FILE";
|
||||
# exit 1;
|
||||
else
|
||||
curl --location --output git_configuration-artifacts.zip --header "PRIVATE-TOKEN: ${GIT_COMMIT_TOKEN}" "https://gitlab.com/api/v4/projects/45705596/jobs/artifacts/development/download?job=Documentation%2EBuild";
|
||||
unzip git_configuration-artifacts.zip;
|
||||
rm -Rf "$CI_PROJECT_DIR/artifacts/$CI_JOB_STAGE/$CI_JOB_NAME/build/projects/git_configuration";
|
||||
cp -rvf "$CI_PROJECT_DIR/artifacts/build/Documentation.Build/build/projects/git_configuration" "$CI_PROJECT_DIR/artifacts/$CI_JOB_STAGE/$CI_JOB_NAME/build/projects/git_configuration/";
|
||||
rm -Rf "$CI_PROJECT_DIR/artifacts/build/Documentation.Build";
|
||||
fi
|
||||
|
||||
|
||||
- echo "[DEBUG] fetch docker-mail project docs"
|
||||
- 'HTTP_STATUS_FILE=$(curl --location -o /dev/null --silent --head --write-out "%{http_code}" --header "PRIVATE-TOKEN: ${GIT_COMMIT_TOKEN}" "https://gitlab.com/api/v4/projects/33611657/jobs/artifacts/development/download?job=Documentation%2EBuild")'
|
||||
- echo "[DEBUG] HTTP_STATUS_FILE=$HTTP_STATUS_FILE"
|
||||
- |
|
||||
if [ "0$HTTP_STATUS_FILE" != "0200" ]; then
|
||||
echo "[ERROR] Unable to fetch Job Artifacts due to HTTP status of $HTTP_STATUS_FILE";
|
||||
# exit 1;
|
||||
else
|
||||
curl --location --output docker-mail-artifacts.zip --header "PRIVATE-TOKEN: ${GIT_COMMIT_TOKEN}" "https://gitlab.com/api/v4/projects/33611657/jobs/artifacts/development/download?job=Documentation%2EBuild";
|
||||
unzip docker-mail-artifacts.zip;
|
||||
rm -Rf "$CI_PROJECT_DIR/artifacts/$CI_JOB_STAGE/$CI_JOB_NAME/build/projects/docker-mail";
|
||||
cp -rvf "$CI_PROJECT_DIR/artifacts/build/Documentation.Build/build/projects/docker-mail" "$CI_PROJECT_DIR/artifacts/$CI_JOB_STAGE/$CI_JOB_NAME/build/projects/docker-mail/";
|
||||
rm -Rf "$CI_PROJECT_DIR/artifacts/build/Documentation.Build";
|
||||
fi
|
||||
|
||||
|
||||
- echo "[DEBUG] fetch execution_environment project docs"
|
||||
- 'HTTP_STATUS_FILE=$(curl --location -o /dev/null --silent --head --write-out "%{http_code}" --header "PRIVATE-TOKEN: ${GIT_COMMIT_TOKEN}" "https://gitlab.com/api/v4/projects/45741845/jobs/artifacts/development/download?job=Documentation%2EBuild")'
|
||||
- echo "[DEBUG] HTTP_STATUS_FILE=$HTTP_STATUS_FILE"
|
||||
- |
|
||||
if [ "0$HTTP_STATUS_FILE" != "0200" ]; then
|
||||
echo "[ERROR] Unable to fetch Job Artifacts due to HTTP status of $HTTP_STATUS_FILE";
|
||||
# exit 1;
|
||||
else
|
||||
curl --location --output execution_environment-artifacts.zip --header "PRIVATE-TOKEN: ${GIT_COMMIT_TOKEN}" "https://gitlab.com/api/v4/projects/45741845/jobs/artifacts/development/download?job=Documentation%2EBuild";
|
||||
unzip execution_environment-artifacts.zip;
|
||||
rm -Rf "$CI_PROJECT_DIR/artifacts/$CI_JOB_STAGE/$CI_JOB_NAME/build/projects/execution_environment";
|
||||
cp -rvf "$CI_PROJECT_DIR/artifacts/build/Documentation.Build/build/projects/execution_environment" "$CI_PROJECT_DIR/artifacts/$CI_JOB_STAGE/$CI_JOB_NAME/build/projects/execution_environment/";
|
||||
rm -Rf "$CI_PROJECT_DIR/artifacts/build/Documentation.Build";
|
||||
fi
|
||||
|
||||
|
||||
# # below 2 lines commented out as need to ffigure out how to download artifacts.
|
||||
# - rm -Rf "$CI_PROJECT_DIR/artifacts/$CI_JOB_STAGE/$CI_JOB_NAME/build/operations/index.html"
|
||||
# - echo "cp -rvn" "$CI_PROJECT_DIR/artifacts/build/Website.Build/build/operations" "$CI_PROJECT_DIR/artifacts/$CI_JOB_STAGE/$CI_JOB_NAME/build/"
|
||||
|
||||
|
||||
# # copy ops pages into main site, not overwriting
|
||||
# #- cp -rvn "$CI_PROJECT_DIR/artifacts/build/Static Pages/build/"* "$CI_PROJECT_DIR/artifacts/$CI_JOB_STAGE/$CI_JOB_NAME/build/"
|
||||
|
||||
|
||||
# # below line commented out as need to ffigure out how to download artifacts.
|
||||
# - cp -rvf "$CI_PROJECT_DIR/artifacts/build/Website.Build/build/operations" "$CI_PROJECT_DIR/artifacts/$CI_JOB_STAGE/$CI_JOB_NAME/build/"
|
||||
|
||||
- echo "copy prepare files (sitemap, search file) to fullsite (overwrite)"
|
||||
- echo "copy each sub-repo build to fullsite (merge)"
|
||||
# - echo mv "$CI_PROJECT_DIR/artifacts/$CI_JOB_STAGE/$CI_JOB_NAME/full-site" public
|
||||
needs:
|
||||
- 'Website.Build'
|
||||
# only available in gitlab premium
|
||||
# use: - "curl -O --header 'PRIVATE-TOKEN: ${GITLAB_API_TOKEN}' https://gitlab.example.com/api/v4/projects/${PROJECT_A_ID}/jobs/${REMOTE_JOB_ID}/artifacts/${REMOTE_FILENAME}"
|
||||
# - project: nofusscomputing/ops
|
||||
# job: Static Pages
|
||||
# ref: development
|
||||
# artifacts: true
|
||||
artifacts:
|
||||
expire_in: 24 hrs
|
||||
when: always
|
||||
paths:
|
||||
- public
|
||||
- "$CI_PROJECT_DIR/artifacts/$CI_JOB_STAGE/$CI_JOB_NAME/*"
|
||||
resource_group: build
|
||||
rules:
|
||||
# - if: '$CI_COMMIT_BRANCH && $CI_COMMIT_BRANCH != "master" && $CI_PIPELINE_SOURCE == "push"'
|
||||
# when: on_success
|
||||
# when: always
|
||||
|
||||
# Build docs on tag so they can be downloaded from the tag job and are always available.
|
||||
- if: # condition_git_tag
|
||||
@ -155,14 +161,14 @@ pages:
|
||||
$CI_COMMIT_BRANCH == null
|
||||
exists:
|
||||
- '{docs/**,pages/**}/*.md'
|
||||
when: on_success
|
||||
when: always
|
||||
|
||||
- if: # condition_master_branch_push
|
||||
$CI_COMMIT_BRANCH == "master" &&
|
||||
$CI_PIPELINE_SOURCE == "push"
|
||||
exists:
|
||||
- '{docs/**,pages/**}/*.md'
|
||||
when: on_success
|
||||
when: always
|
||||
|
||||
- if: # condition_dev_branch_push
|
||||
$CI_COMMIT_BRANCH == "development" &&
|
||||
@ -181,7 +187,7 @@ pages:
|
||||
# paths:
|
||||
# - '{docs/**,pages/**}/*.md'
|
||||
# compare_to: 'master'
|
||||
when: on_success
|
||||
when: always
|
||||
|
||||
- if: # condition_not_master_or_dev_push
|
||||
$CI_COMMIT_BRANCH != "master" &&
|
||||
@ -193,7 +199,73 @@ pages:
|
||||
paths:
|
||||
- '{docs/**,pages/**}/*.md'
|
||||
compare_to: 'development'
|
||||
when: on_success
|
||||
when: always
|
||||
|
||||
- when: never
|
||||
|
||||
|
||||
pages:
|
||||
stage: deploy
|
||||
variables:
|
||||
GIT_STRATEGY: none
|
||||
script:
|
||||
- mv "$CI_PROJECT_DIR/artifacts/prepare/Assemble.Website.Prepare/build" public
|
||||
needs: [ 'Assemble.Website.Prepare' ]
|
||||
environment:
|
||||
name: staging
|
||||
url: $CI_PAGES_URL
|
||||
artifacts:
|
||||
paths:
|
||||
- public
|
||||
rules:
|
||||
# - if: '$CI_COMMIT_BRANCH && $CI_COMMIT_BRANCH != "master" && $CI_PIPELINE_SOURCE == "push"'
|
||||
# when: on_success
|
||||
|
||||
# Build docs on tag so they can be downloaded from the tag job and are always available.
|
||||
- if: # condition_git_tag
|
||||
$CI_COMMIT_TAG != null &&
|
||||
$CI_COMMIT_BRANCH == null
|
||||
exists:
|
||||
- '{docs/**,pages/**}/*.md'
|
||||
when: always
|
||||
|
||||
- if: # condition_master_branch_push
|
||||
$CI_COMMIT_BRANCH == "master" &&
|
||||
$CI_PIPELINE_SOURCE == "push"
|
||||
exists:
|
||||
- '{docs/**,pages/**}/*.md'
|
||||
when: always
|
||||
|
||||
- if: # condition_dev_branch_push
|
||||
$CI_COMMIT_BRANCH == "development" &&
|
||||
(
|
||||
$CI_PIPELINE_SOURCE == "pipeline"
|
||||
||
|
||||
$CI_PIPELINE_SOURCE == "push"
|
||||
||
|
||||
$CI_PIPELINE_SOURCE == "schedule"
|
||||
)
|
||||
# See nofusscomputing/projects/gitlab-ci#34 for extra $CI_PIPELINE_SOURCE
|
||||
exists:
|
||||
- '{docs/**,pages/**}/*.md'
|
||||
# No changes check # See nofusscomputing/projects/gitlab-ci#34
|
||||
# changes:
|
||||
# paths:
|
||||
# - '{docs/**,pages/**}/*.md'
|
||||
# compare_to: 'master'
|
||||
when: always
|
||||
|
||||
- if: # condition_not_master_or_dev_push
|
||||
$CI_COMMIT_BRANCH != "master" &&
|
||||
$CI_COMMIT_BRANCH != "development" &&
|
||||
$CI_PIPELINE_SOURCE == "push"
|
||||
exists:
|
||||
- '{docs/**,pages/**}/*.md'
|
||||
changes:
|
||||
paths:
|
||||
- '{docs/**,pages/**}/*.md'
|
||||
compare_to: 'development'
|
||||
when: always
|
||||
|
||||
- when: never
|
||||
|
||||
@ -290,18 +362,44 @@ Unit Tests:
|
||||
# - echo "placeholder job for integration tests" > "$CI_PROJECT_DIR/artifacts/$CI_JOB_STAGE/$CI_JOB_NAME/DETEMEME.txt"
|
||||
|
||||
|
||||
|
||||
.Add_SSHKey: &Add_SSHKey_Before_Script |
|
||||
mkdir -p ~/.ssh
|
||||
chmod 700 ~/.ssh
|
||||
eval $(ssh-agent -s)
|
||||
SSH_KEY_NAME=SSH_PRIVATE_KEY_${ANSIBLE_USER}
|
||||
echo "Debug SSH_KEY_NAME[$SSH_KEY_NAME]"
|
||||
chmod 700 "${!SSH_KEY_NAME}"
|
||||
ssh-add "${!SSH_KEY_NAME}"
|
||||
#update next line so that ca key can be obtained. original source is ansible repo
|
||||
#HOST_SSH_ID=$(cat roles/openssh-server/files/nww-nl/host_ca.pub)
|
||||
HOST_SSH_ID=$(cat ${SSH_HOST_CA})
|
||||
echo DEBUG HOST_SSH_ID[$HOST_SSH_ID]
|
||||
echo "@cert-authority *.networkedweb.com $HOST_SSH_ID" > ~/.ssh/known_hosts
|
||||
chmod 700 ~/.ssh/known_hosts
|
||||
ls -la ~/.ssh
|
||||
|
||||
|
||||
public_website:
|
||||
stage: publish
|
||||
image: alpine
|
||||
image: debian:buster-slim
|
||||
variables:
|
||||
GIT_STRATEGY: none
|
||||
before_script:
|
||||
- ls -la /html
|
||||
- if [ "0$ANSIBLE_USER" == "0" ]; then ANSIBLE_USER=deploy; fi
|
||||
- echo Debug ANSIBLE_USER[$ANSIBLE_USER]
|
||||
- apt update
|
||||
- apt install --no-install-recommends -y ssh
|
||||
- ls -la "$CI_PROJECT_DIR/artifacts/prepare/Assemble.Website.Prepare/build"
|
||||
- mv "$CI_PROJECT_DIR/artifacts/prepare/Assemble.Website.Prepare/build" "$CI_PROJECT_DIR/public"
|
||||
- rm -Rf "$CI_PROJECT_DIR/public/build"
|
||||
- ls -la "$CI_PROJECT_DIR"
|
||||
- ls -la "$CI_PROJECT_DIR/public"
|
||||
- *Add_SSHKey_Before_Script
|
||||
script:
|
||||
- rm -rf /html/*
|
||||
- cp -r "$CI_PROJECT_DIR/artifacts/prepare/Merge.Project.Docs/build"/* /html/
|
||||
- ls -laR /html/
|
||||
needs: [ 'Merge Project Docs', 'Unit Tests']
|
||||
- ssh ${ANSIBLE_USER}@${HOST_PUBLIC_WEBSITE} sudo rm -Rf ${PUBLIC_WEBSITE_PATH}/*
|
||||
- scp -r public/* ${ANSIBLE_USER}@${HOST_PUBLIC_WEBSITE}:${PUBLIC_WEBSITE_PATH}
|
||||
needs: [ 'Assemble.Website.Prepare', 'Unit Tests']
|
||||
resource_group: production
|
||||
environment:
|
||||
name: production
|
||||
@ -310,6 +408,22 @@ public_website:
|
||||
paths:
|
||||
- public
|
||||
rules:
|
||||
# - if: '$CI_COMMIT_BRANCH && $CI_COMMIT_BRANCH == "master" && $CI_PIPELINE_SOURCE == "push"'
|
||||
# when: on_success
|
||||
# - if: '$CI_COMMIT_BRANCH && $CI_COMMIT_BRANCH == "development" && $CI_PIPELINE_SOURCE != "merge_request_event" && $CI_PIPELINE_SOURCE == "push"'
|
||||
# when: manual
|
||||
# - if: '$CI_COMMIT_BRANCH && $CI_COMMIT_BRANCH != "development" && $CI_COMMIT_BRANCH != "master" && $CI_PIPELINE_SOURCE != "merge_request_event" && $CI_PIPELINE_SOURCE == "push"'
|
||||
# when: manual
|
||||
# allow_failure: true
|
||||
|
||||
# Build docs on tag so they can be downloaded from the tag job and are always available.
|
||||
# - if: # condition_git_tag
|
||||
# $CI_COMMIT_TAG != null &&
|
||||
# $CI_COMMIT_BRANCH == null
|
||||
# exists:
|
||||
# - '{docs/**,pages/**}/*.md'
|
||||
# when: always
|
||||
|
||||
- if: # condition_master_branch_push
|
||||
$CI_COMMIT_BRANCH == "master" &&
|
||||
$CI_PIPELINE_SOURCE == "push"
|
||||
@ -334,9 +448,20 @@ public_website:
|
||||
# paths:
|
||||
# - '{docs/**,pages/**}/*.md'
|
||||
# compare_to: 'master'
|
||||
allow_failure: true
|
||||
when: manual
|
||||
|
||||
# - if: # condition_not_master_or_dev_push
|
||||
# $CI_COMMIT_BRANCH != "master" &&
|
||||
# $CI_COMMIT_BRANCH != "development" &&
|
||||
# $CI_PIPELINE_SOURCE == "push"
|
||||
# exists:
|
||||
# - '{docs/**,pages/**}/*.md'
|
||||
# changes:
|
||||
# paths:
|
||||
# - '{docs/**,pages/**}/*.md'
|
||||
# compare_to: 'development'
|
||||
# when: manual
|
||||
# allow_failure: true
|
||||
|
||||
- when: never
|
||||
tags:
|
||||
- production
|
||||
- website
|
||||
|
||||
|
@ -1,154 +0,0 @@
|
||||
---
|
||||
|
||||
# Variables required:
|
||||
# ASSEMBLE_PROJECT_NAME: ''
|
||||
# ASSEMBLE_PROJECT_ID: ''
|
||||
# ASSEMBLE_PROJECT_PATH: ''
|
||||
.fetch_project_docs:
|
||||
stage: chores
|
||||
needs: []
|
||||
|
||||
variables:
|
||||
GIT_STRATEGY: none
|
||||
before_script:
|
||||
- mkdir -p "$CI_PROJECT_DIR/artifacts/project_docs"
|
||||
|
||||
script:
|
||||
- echo "[DEBUG] Fetch Project ${ASSEMBLE_PROJECT_NAME}"
|
||||
|
||||
- 'HTTP_STATUS_FILE=$(curl --location -o /dev/null --silent --head --write-out "%{http_code}" --header "PRIVATE-TOKEN: ${GIT_COMMIT_TOKEN}" "https://gitlab.com/api/v4/projects/${ASSEMBLE_PROJECT_ID}/jobs/artifacts/development/download?job=Documentation%2EBuild")'
|
||||
|
||||
- echo "[DEBUG] HTTP_STATUS_FILE=$HTTP_STATUS_FILE"
|
||||
|
||||
- |
|
||||
if [ "0$HTTP_STATUS_FILE" != "0200" ]; then
|
||||
|
||||
echo "[ERROR] Unable to fetch Job Artifacts due to HTTP status of $HTTP_STATUS_FILE";
|
||||
|
||||
# exit 1;
|
||||
|
||||
else
|
||||
|
||||
curl --location --output artifacts.zip --header "PRIVATE-TOKEN: ${GIT_COMMIT_TOKEN}" "https://gitlab.com/api/v4/projects/${ASSEMBLE_PROJECT_ID}/jobs/artifacts/development/download?job=Documentation%2EBuild";
|
||||
|
||||
unzip artifacts.zip;
|
||||
|
||||
# rm -Rf "$CI_PROJECT_DIR/artifacts/$CI_JOB_STAGE/$CI_JOB_NAME/build/projects/gitlab-ci";
|
||||
|
||||
cp -rvf "$CI_PROJECT_DIR/artifacts/build/Documentation.Build/build/${ASSEMBLE_PROJECT_PATH}" "$CI_PROJECT_DIR/artifacts/project_docs/$(echo -n "${ASSEMBLE_PROJECT_PATH}" | sed -e "s/\//./g")/";
|
||||
|
||||
rm -Rf "$CI_PROJECT_DIR/artifacts/build/Documentation.Build";
|
||||
fi
|
||||
|
||||
artifacts:
|
||||
expire_in: 24 hrs
|
||||
when: always
|
||||
paths:
|
||||
- "$CI_PROJECT_DIR/artifacts/"
|
||||
rules:
|
||||
|
||||
# Docs must always be built for:
|
||||
# - git tag
|
||||
# - dev branch
|
||||
# - master branch
|
||||
- if: # condition_git_tag
|
||||
(
|
||||
$CI_COMMIT_TAG
|
||||
)
|
||||
||
|
||||
(
|
||||
$CI_COMMIT_BRANCH == "development"
|
||||
&&
|
||||
(
|
||||
$CI_PIPELINE_SOURCE == "pipeline"
|
||||
||
|
||||
$CI_PIPELINE_SOURCE == "push"
|
||||
||
|
||||
$CI_PIPELINE_SOURCE == "schedule"
|
||||
)
|
||||
)
|
||||
||
|
||||
(
|
||||
$CI_COMMIT_BRANCH == "master"
|
||||
&&
|
||||
$CI_PIPELINE_SOURCE == "push"
|
||||
)
|
||||
exists:
|
||||
- '{docs/**,pages/**}/*.md'
|
||||
when: always
|
||||
|
||||
- when: never
|
||||
|
||||
|
||||
|
||||
.merge_project_docs:
|
||||
stage: prepare
|
||||
variables:
|
||||
GIT_STRATEGY: none
|
||||
before_script:
|
||||
- mkdir -p $CI_PROJECT_DIR/artifacts/$CI_JOB_STAGE/Merge.Project.Docs
|
||||
- mv "$CI_PROJECT_DIR/artifacts/build/Website.Build/build" "$CI_PROJECT_DIR/artifacts/$CI_JOB_STAGE/Merge.Project.Docs/"
|
||||
script:
|
||||
|
||||
- ls -l ${CI_PROJECT_DIR}/artifacts/project_docs/
|
||||
|
||||
- | # Merge Project docs to website
|
||||
for i in `ls ${CI_PROJECT_DIR}/artifacts/project_docs/`; do
|
||||
|
||||
echo "Found Path - $i";
|
||||
|
||||
export JOIN_PROJECT_PATH=$(echo -n "${i}" | sed -e "s/\./\//g");
|
||||
|
||||
echo "set as project path [${JOIN_PROJECT_PATH}]";
|
||||
|
||||
rm -Rf $CI_PROJECT_DIR/artifacts/$CI_JOB_STAGE/Merge.Project.Docs/build/${JOIN_PROJECT_PATH};
|
||||
|
||||
cp -rvf $CI_PROJECT_DIR/artifacts/project_docs/${i} $CI_PROJECT_DIR/artifacts/$CI_JOB_STAGE/Merge.Project.Docs/build/${JOIN_PROJECT_PATH}/;
|
||||
|
||||
done
|
||||
|
||||
- | # Assemble website and project sources to build search index
|
||||
echo "ToDo: build website and docs together to get the search index to add to the website";
|
||||
|
||||
- | # Join sitemaps together
|
||||
echo "ToDo: Join all Sitemaps together into one";
|
||||
|
||||
artifacts:
|
||||
expire_in: 24 hrs
|
||||
when: always
|
||||
paths:
|
||||
- "$CI_PROJECT_DIR/artifacts/"
|
||||
|
||||
rules:
|
||||
|
||||
# Docs must always be built for:
|
||||
# - git tag
|
||||
# - dev branch
|
||||
# - master branch
|
||||
- if: # condition_git_tag
|
||||
(
|
||||
$CI_COMMIT_TAG
|
||||
)
|
||||
||
|
||||
(
|
||||
$CI_COMMIT_BRANCH == "development"
|
||||
&&
|
||||
(
|
||||
$CI_PIPELINE_SOURCE == "pipeline"
|
||||
||
|
||||
$CI_PIPELINE_SOURCE == "push"
|
||||
||
|
||||
$CI_PIPELINE_SOURCE == "schedule"
|
||||
)
|
||||
)
|
||||
||
|
||||
(
|
||||
$CI_COMMIT_BRANCH == "master"
|
||||
&&
|
||||
$CI_PIPELINE_SOURCE == "push"
|
||||
)
|
||||
exists:
|
||||
- '{docs/**,pages/**}/*.md'
|
||||
when: always
|
||||
|
||||
- when: never
|
Submodule gitlab-ci updated: 3fa71fe91a...1a03324ecc
96
mkdocs.yml
96
mkdocs.yml
@ -2,7 +2,7 @@ INHERIT: website-template/mkdocs.yml
|
||||
|
||||
repo_name: Website
|
||||
repo_url: https://gitlab.com/nofusscomputing/infrastructure/website
|
||||
edit_uri: '/-/ide/project/nofusscomputing/projects/website/edit/development/-/pages/'
|
||||
edit_uri: '/-/ide/project/nofusscomputing/projects/docker-mail/edit/development/-/pages/'
|
||||
|
||||
nav:
|
||||
- Home: index.md
|
||||
@ -11,9 +11,21 @@ nav:
|
||||
|
||||
- articles/index.md
|
||||
|
||||
- 2023:
|
||||
- 2022:
|
||||
|
||||
- articles/2023/new_website.md
|
||||
- articles/2022/gitlab_pipeline_from_github_actions.md
|
||||
|
||||
- articles/2022/fail2ban_running_considerations.md
|
||||
|
||||
- articles/2022/fail2ban_permanent_whitelist.md
|
||||
|
||||
- articles/2022/fail2ban_permanent_ban_closed_port_access.md
|
||||
|
||||
- articles/2022/fail2ban_block_suspisious_activity.md
|
||||
|
||||
- articles/2022/gitlab_piplines_vscode.md
|
||||
|
||||
- articles/2022/local_gitlab_pipeline.md
|
||||
|
||||
- 2015:
|
||||
|
||||
@ -29,86 +41,20 @@ nav:
|
||||
|
||||
- projects/index.md
|
||||
|
||||
- Ansible:
|
||||
- Ansible Execution Environment:
|
||||
|
||||
- projects/ansible/index.md
|
||||
- projects/execution_environment/index.md
|
||||
|
||||
- Ansible Execution Environment:
|
||||
- Ansible Roles:
|
||||
|
||||
- projects/ansible/execution_environment/index.md
|
||||
- Git Configuration:
|
||||
|
||||
- Collections:
|
||||
- projects/git_configuration/index.md
|
||||
|
||||
- projects/ansible/collection/index.md
|
||||
|
||||
- phpIPAM Scan Agent:
|
||||
|
||||
- projects/ansible/collection/phpipam_scan_agent/index.md
|
||||
|
||||
- Playbooks:
|
||||
|
||||
- projects/ansible/playbooks/index.md
|
||||
|
||||
- Roles:
|
||||
|
||||
- projects/ansible/roles/index.md
|
||||
|
||||
- Common:
|
||||
|
||||
- projects/ansible/roles/common/index.md
|
||||
|
||||
- Firewall:
|
||||
|
||||
- projects/ansible/roles/firewall/index.md
|
||||
|
||||
- Git Configuration:
|
||||
|
||||
- projects/ansible/roles/git_configuration/index.md
|
||||
|
||||
- Home Assistant:
|
||||
|
||||
- projects/ansible/roles/homeassistant/index.md
|
||||
|
||||
- Kubernetes:
|
||||
|
||||
- projects/ansible/roles/kubernetes/index.md
|
||||
|
||||
- Testing:
|
||||
|
||||
- projects/ansible/roles/ansible_test/index.md
|
||||
|
||||
- Docker:
|
||||
|
||||
- projects/docker/index.md
|
||||
- Docker Mail: projects/docker-mail/index.md
|
||||
|
||||
- BIND DNS Server:
|
||||
|
||||
- projects/docker/bind/index.md
|
||||
|
||||
- Docker GLPI:
|
||||
|
||||
- projects/glpi/index.md
|
||||
|
||||
- Docker Mail:
|
||||
|
||||
- projects/docker-mail/index.md
|
||||
|
||||
- Gitlab CI: projects/gitlab-ci/index.md
|
||||
|
||||
- Infrastructure:
|
||||
|
||||
- projects/infrastructure/index.md
|
||||
|
||||
- ITIL:
|
||||
|
||||
- Runbooks:
|
||||
|
||||
- projects/itil/runbooks/index.md
|
||||
|
||||
- Kubernetes Monitoring: projects/kubernetes_monitoring/index.md
|
||||
|
||||
- LDAP Self Service: projects/ldap_self_service/index.md
|
||||
|
||||
- Python Gitlab Management: projects/python-gitlab-management/README.md
|
||||
|
||||
- Operations:
|
||||
|
194
pages/articles/2022/fail2ban_block_suspisious_activity.md
Normal file
194
pages/articles/2022/fail2ban_block_suspisious_activity.md
Normal file
@ -0,0 +1,194 @@
|
||||
---
|
||||
title: Setting up Fail2ban to Monitor Common TCP and UDP Ports for suspicious activity
|
||||
description: An explanation on how to configure fail2ban to block suspisious activity.
|
||||
date: 2022-06-12
|
||||
template: article.html
|
||||
type: blog
|
||||
author: jon
|
||||
about: https://www.fail2ban.org/
|
||||
tags:
|
||||
- Security
|
||||
- Firewall
|
||||
- Fail2ban
|
||||
|
||||
---
|
||||
|
||||
In this article, we'll explore how to set up an existing installation of Fail2ban to monitor common TCP and UDP ports. Fail2ban is a powerful tool that scans log files, detects suspicious activity, and automatically blocks the IP addresses of the offending hosts. By implementing Fail2ban to monitor common ports, we can enhance the security of our system and mitigate potential risks.
|
||||
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- An existing installation of Fail2ban
|
||||
|
||||
- Basic knowledge of working with the command line
|
||||
|
||||
|
||||
## Step 1: Set up iptables Rules
|
||||
|
||||
1. Open the iptables configuration file in a text editor using root privileges:
|
||||
|
||||
```bash
|
||||
sudo nano /etc/iptables/log_closed_ports.v4
|
||||
```
|
||||
|
||||
2. Add the following rules for each specified port to log access to that port:
|
||||
|
||||
```bash
|
||||
-A INPUT -p tcp --dport 80 -j LOG --log-prefix "[http-blocked-port-80] "
|
||||
-A INPUT -p tcp --dport 443 -j LOG --log-prefix "[https-blocked-port-443] "
|
||||
-A INPUT -p udp --dport 53 -j LOG --log-prefix "[dns-blocked-port-53] "
|
||||
-A INPUT -p tcp --dport 22 -j LOG --log-prefix "[ssh-blocked-port-22] "
|
||||
-A INPUT -p tcp --dport 3306 -j LOG --log-prefix "[mysql-blocked-port-3306] "
|
||||
-A INPUT -p tcp --dport 5432 -j LOG --log-prefix "[postgresql-blocked-port-5432] "
|
||||
```
|
||||
|
||||
Adjust the port numbers and log prefixes as needed for each port.
|
||||
|
||||
3. Save and exit the file (`Ctrl+O`, `Enter`, `Ctrl+X` in nano).
|
||||
|
||||
4. Restart the iptables service to apply the changes:
|
||||
|
||||
```bash
|
||||
sudo service iptables restart
|
||||
```
|
||||
|
||||
|
||||
## Step 2: Configure Fail2ban Filters
|
||||
|
||||
1. Open the Fail2ban filters directory in a text editor:
|
||||
|
||||
```bash
|
||||
sudo nano /etc/fail2ban/filter.d/iptables-port.conf
|
||||
```
|
||||
|
||||
2. Add the following content to the file:
|
||||
|
||||
```ini
|
||||
[Definition]
|
||||
failregex = ^.*\[.*\] .* <HOST> .*$
|
||||
ignoreregex =
|
||||
|
||||
actionban = iptables-multiport[logpath="/var/log/fail2ban_blocked_port_access.log", logprefix="[%(date)s] [%(name)s] [%(ip)s] "]
|
||||
```
|
||||
|
||||
3. Save and exit the file (`Ctrl+O`, `Enter`, `Ctrl+X` in nano).
|
||||
|
||||
|
||||
## Step 3: Configure Fail2ban Jail
|
||||
|
||||
1. Open the Fail2ban jail configuration file in a text editor:
|
||||
|
||||
```bash
|
||||
sudo nano /etc/fail2ban/jail.d/custom.conf
|
||||
```
|
||||
|
||||
2. Add the following configuration to the file:
|
||||
|
||||
```ini
|
||||
[http-blocked-port-80]
|
||||
enabled = true
|
||||
filter = iptables-port
|
||||
logpath = /var/log/iptables.log
|
||||
maxretry = 3
|
||||
banaction = iptables-multiport
|
||||
|
||||
[https-blocked-port-443]
|
||||
enabled = true
|
||||
filter = iptables-port
|
||||
logpath = /var/log/iptables.log
|
||||
maxretry = 3
|
||||
banaction = iptables-multiport
|
||||
|
||||
[dns-blocked-port-53]
|
||||
enabled = true
|
||||
filter = iptables-port
|
||||
logpath = /var/log/iptables.log
|
||||
maxretry = 3
|
||||
banaction = iptables-multiport
|
||||
|
||||
[ssh-blocked-port-22]
|
||||
enabled = true
|
||||
filter = iptables-port
|
||||
logpath = /var/log/iptables.log
|
||||
maxretry = 3
|
||||
banaction = iptables-multiport
|
||||
|
||||
[mysql-blocked-port-3306]
|
||||
enabled = true
|
||||
filter = iptables-port
|
||||
logpath = /var/log/iptables.log
|
||||
maxretry = 3
|
||||
banaction = iptables-multiport
|
||||
|
||||
[postgresql-blocked-port-5432]
|
||||
enabled = true
|
||||
filter = iptables-port
|
||||
logpath = /var/log/iptables.log
|
||||
maxretry = 3
|
||||
banaction = iptables-multiport
|
||||
```
|
||||
|
||||
Adjust the configuration as needed for each port.
|
||||
|
||||
3. Save and exit the file (`Ctrl+O`, `Enter`, `Ctrl+X` in nano).
|
||||
|
||||
|
||||
## Step 4: Restart Fail2ban Service
|
||||
|
||||
1. Restart the Fail2ban service to apply the configuration changes:
|
||||
|
||||
```bash
|
||||
sudo service fail2ban restart
|
||||
```
|
||||
|
||||
Congratulations! You have successfully set up Fail2ban to monitor common TCP and UDP ports. Fail2ban will now log access attempts to the specified ports and automatically ban IP addresses that exceed the maximum number of allowed retries. The ban events will be logged in the `/var/log/fail2ban_blocked_port_access.log` file with the date, rule name, and IP address information.
|
||||
|
||||
!!! Alert
|
||||
Please note that the provided configurations are examples, and you may need to modify them based on your specific needs and environment.
|
||||
|
||||
|
||||
## Common Ports
|
||||
|
||||
Within this table you will find some common ports that maybe useful to include additional rules for.
|
||||
|
||||
| Port | Protocol | Description |
|
||||
|------|----------|------------------------------|
|
||||
| 20 | TCP | FTP Data |
|
||||
| 21 | TCP | FTP Control |
|
||||
| 22 | TCP | SSH |
|
||||
| 23 | TCP | Telnet |
|
||||
| 25 | TCP | SMTP |
|
||||
| 53 | TCP/UDP | DNS |
|
||||
| 67 | UDP | DHCP Server |
|
||||
| 68 | UDP | DHCP Client |
|
||||
| 69 | UDP | TFTP |
|
||||
| 80 | TCP | HTTP |
|
||||
| 110 | TCP | POP3 |
|
||||
| 115 | TCP | SFTP |
|
||||
| 123 | UDP | NTP |
|
||||
| 137 | UDP | NetBIOS Name Service |
|
||||
| 138 | UDP | NetBIOS Datagram Service |
|
||||
| 139 | TCP | NetBIOS Session Service |
|
||||
| 143 | TCP | IMAP |
|
||||
| 161 | UDP | SNMP |
|
||||
| 389 | TCP/UDP | LDAP |
|
||||
| 443 | TCP | HTTPS |
|
||||
| 445 | TCP/UDP | SMB |
|
||||
| 465 | TCP | SMTPS |
|
||||
| 514 | TCP/UDP | Syslog |
|
||||
| 587 | TCP | SMTP (Submission) |
|
||||
| 636 | TCP/UDP | LDAPS |
|
||||
| 993 | TCP | IMAPS |
|
||||
| 995 | TCP | POP3S |
|
||||
| 1433 | TCP | MS SQL Server |
|
||||
| 1434 | UDP | MS SQL Server (UDP) |
|
||||
| 1521 | TCP | Oracle Database |
|
||||
| 2049 | TCP/UDP | NFS |
|
||||
| 3306 | TCP | MySQL |
|
||||
| 3389 | TCP | Remote Desktop Protocol (RDP) |
|
||||
| 5432 | TCP | PostgreSQL |
|
||||
| 5900 | TCP | VNC |
|
||||
| 5985 | TCP | WinRM |
|
||||
| 6379 | TCP | Redis |
|
||||
| 8080 | TCP | HTTP (Alternate) |
|
||||
```
|
154
pages/articles/2022/fail2ban_permanent_ban_closed_port_access.md
Normal file
154
pages/articles/2022/fail2ban_permanent_ban_closed_port_access.md
Normal file
@ -0,0 +1,154 @@
|
||||
---
|
||||
title: Managing Permanent Bans in Fail2ban
|
||||
description: An explanation on how to configure fail2ban to permanently ban closed ports access as suspisious activity.
|
||||
date: 2022-06-12
|
||||
template: article.html
|
||||
type: blog
|
||||
author: jon
|
||||
about: https://www.fail2ban.org/
|
||||
tags:
|
||||
- Security
|
||||
- Firewall
|
||||
- Fail2ban
|
||||
|
||||
---
|
||||
|
||||
In this article, we'll continue from where we left off in the article "[Setting up Fail2ban to Monitor Common TCP and UDP Ports for suspicious activity](fail2ban_block_suspisious_activity.md)" and explore how to manage permanent bans in Fail2ban on Debian/Ubuntu. We'll specifically focus on checking the closed port log file, `/var/log/fail2ban_blocked_port_access.log`, and adding hosts to a permanent ban list if they are found three times. This additional step will further enhance the security of your system by permanently blocking repeat offenders.
|
||||
|
||||
When an attacker targets a system, they often perform port scanning to identify open ports that can be exploited. Accessing a closed port, on the other hand, is considered suspicious and indicative of potentially malicious activity. Fail2ban helps detect and respond to such behavior by monitoring the system's log files, including the closed port log. By examining this log, we can identify hosts that repeatedly attempt to access closed ports, indicating a persistent threat.
|
||||
|
||||
In this article, we'll cover the steps to check the closed port log, create a permanent ban list, and configure Fail2ban to add IP addresses to the ban list when they are found three times in the log. By doing so, we can effectively protect our system from attackers who repeatedly attempt to access closed ports.
|
||||
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- An existing installation of Fail2ban on Debian/Ubuntu
|
||||
|
||||
- Basic knowledge of working with the command line
|
||||
|
||||
|
||||
## Step 1: Checking the Closed Port Log
|
||||
|
||||
1. Open the closed port log file, `/var/log/fail2ban_blocked_port_access.log`, using a text editor:
|
||||
|
||||
```bash
|
||||
sudo nano /var/log/fail2ban_blocked_port_access.log
|
||||
```
|
||||
|
||||
2. Inside the log file, you will see entries in the following format:
|
||||
|
||||
```
|
||||
[2023-06-12] [http-blocked-port-80] [192.168.0.1] Host banned permanently.
|
||||
```
|
||||
|
||||
3. Each entry consists of the date, rule name, IP address, and the indication of a permanent ban.
|
||||
|
||||
|
||||
## Step 2: Creating the Permanent Ban List
|
||||
|
||||
1. Open the Fail2ban jail local configuration file in a text editor:
|
||||
|
||||
```bash
|
||||
sudo nano /etc/fail2ban/jail.local
|
||||
```
|
||||
|
||||
2. Scroll to the end of the file and add the following section to create a permanent ban list:
|
||||
|
||||
```ini
|
||||
[permanent-bans]
|
||||
enabled = true
|
||||
filter = permanent-bans
|
||||
logpath = /var/log/fail2ban_blocked_port_access.log
|
||||
maxretry = 1
|
||||
bantime = -1
|
||||
action = iptables-allports
|
||||
```
|
||||
|
||||
Adjust the `filter` parameter and `logpath` as per your system configuration.
|
||||
|
||||
3. Save and exit the file (`Ctrl+O`, `Enter`, `Ctrl+X` in nano).
|
||||
|
||||
|
||||
## Step 3: Creating the Filter for Permanent Bans
|
||||
|
||||
1. Open the Fail2ban filter file for permanent bans in a text editor:
|
||||
|
||||
```bash
|
||||
sudo nano /etc/fail2ban/filter.d/permanent-bans.conf
|
||||
```
|
||||
|
||||
2. Add the following content to the file:
|
||||
|
||||
```ini
|
||||
[Definition]
|
||||
failregex = ^\[\d{4}-\d{2}-\d{2}\] \[.*\] \[(?P<ip>\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\].*Host banned permanently\.$
|
||||
ignoreregex =
|
||||
```
|
||||
|
||||
3. Save and exit the file (`Ctrl+O`, `Enter`, `Ctrl+X` in nano).
|
||||
|
||||
|
||||
## Step 4: Restarting Fail2ban Service
|
||||
|
||||
1. Restart the Fail2ban service to apply the configuration changes:
|
||||
|
||||
```bash
|
||||
sudo service fail2ban restart
|
||||
```
|
||||
|
||||
2. Fail2ban will now read the closed port log file and permanently ban any IP address that appears three times in the log.
|
||||
|
||||
Congratulations! You have successfully set up permanent bans in Fail2ban on Debian/Ubuntu. By monitoring the closed port log and adding repeat offenders to a permanent ban list, you have added an extra layer of security to your system.
|
||||
|
||||
!!! Tip
|
||||
Please note that while permanent bans provide increased protection, they should be used judiciously. Review the closed port log entries carefully before applying permanent bans to avoid unintended consequences. It's recommended to test and fine-tune the configuration in a controlled environment before applying it to production systems.
|
||||
|
||||
If you have any questions or encounter any issues, feel free to reach out. Stay secure!
|
||||
|
||||
## Common Network Ports
|
||||
|
||||
| Port | Protocol | Description |
|
||||
|------|----------|------------------------------|
|
||||
| 20 | TCP | FTP Data |
|
||||
| 21 | TCP | FTP Control |
|
||||
| 22 | TCP | SSH |
|
||||
| 23 | TCP | Telnet |
|
||||
| 25 | TCP | SMTP |
|
||||
| 53 | TCP/UDP | DNS |
|
||||
| 67 | UDP | DHCP Server |
|
||||
| 68 | UDP | DHCP Client |
|
||||
| 69 | UDP | TFTP |
|
||||
| 80 | TCP | HTTP |
|
||||
| 110 | TCP | POP3 |
|
||||
| 115 | TCP | SFTP |
|
||||
| 123 | UDP | NTP |
|
||||
| 137 | UDP | NetBIOS Name Service |
|
||||
| 138 | UDP | NetBIOS Datagram Service |
|
||||
| 139 | TCP | NetBIOS Session Service |
|
||||
| 143 | TCP | IMAP |
|
||||
| 161 | UDP | SNMP |
|
||||
| 389 | TCP/UDP | LDAP |
|
||||
| 443 | TCP | HTTPS |
|
||||
| 445 | TCP/UDP | SMB |
|
||||
| 465 | TCP | SMTPS |
|
||||
| 514 | TCP/UDP | Syslog |
|
||||
| 587 | TCP | SMTP (Submission) |
|
||||
| 636 | TCP/UDP | LDAPS |
|
||||
| 993 | TCP | IMAPS |
|
||||
| 995 | TCP | POP3S |
|
||||
| 1433 | TCP | MS SQL Server |
|
||||
| 1434 | UDP | MS SQL Server (UDP) |
|
||||
| 1521 | TCP | Oracle Database |
|
||||
| 2049 | TCP/UDP | NFS |
|
||||
| 3306 | TCP | MySQL |
|
||||
| 3389 | TCP | Remote Desktop Protocol (RDP) |
|
||||
| 5432 | TCP | PostgreSQL |
|
||||
| 5900 | TCP | VNC |
|
||||
| 5985 | TCP | WinRM |
|
||||
| 6379 | TCP | Redis |
|
||||
| 8080 | TCP | HTTP (Alternate) |
|
||||
|
||||
Feel free to reach out if you have any questions or encounter any issues along the way. Stay secure!
|
||||
|
||||
!!! Note
|
||||
Please note that the provided configurations and port table are examples, and you may need to modify them based on your specific needs and environment.
|
68
pages/articles/2022/fail2ban_permanent_whitelist.md
Normal file
68
pages/articles/2022/fail2ban_permanent_whitelist.md
Normal file
@ -0,0 +1,68 @@
|
||||
---
|
||||
title: Configuring a Permanent Whitelist in Fail2ban
|
||||
description: An explanation on how to configure fail2ban to permanently allow hosts specified on a whitelist.
|
||||
date: 2022-06-12
|
||||
template: article.html
|
||||
type: blog
|
||||
author: jon
|
||||
about: https://www.fail2ban.org/
|
||||
tags:
|
||||
- Security
|
||||
- Firewall
|
||||
- Fail2ban
|
||||
|
||||
---
|
||||
|
||||
In this article, we will explore how to configure a permanent whitelist in Fail2ban. A whitelist allows specific IP addresses or DNS names to bypass any blocking rules enforced by Fail2ban. This can be useful when you want to ensure uninterrupted access for trusted hosts while still benefiting from the protection provided by Fail2ban against suspicious activity.
|
||||
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- An existing installation of Fail2ban on your system
|
||||
|
||||
- Basic knowledge of working with the command line
|
||||
|
||||
|
||||
## Step 1: Open Fail2ban Configuration
|
||||
|
||||
1. Open the Fail2ban configuration file in a text editor using root privileges:
|
||||
|
||||
```bash
|
||||
sudo nano /etc/fail2ban/jail.local
|
||||
```
|
||||
|
||||
2. Locate the `[DEFAULT]` section in the file.
|
||||
|
||||
|
||||
## Step 2: Configure the Permanent Whitelist
|
||||
|
||||
1. Add the `ignoreip` parameter under the `[DEFAULT]` section to specify the IP addresses or DNS names to be whitelisted. You can whitelist multiple entries by separating them with a space.
|
||||
|
||||
```ini
|
||||
[DEFAULT]
|
||||
ignoreip = 192.168.1.100 example.com
|
||||
```
|
||||
|
||||
Replace `192.168.1.100` with the desired IP address or add more IP addresses as needed. You can also include DNS names like `example.com` to whitelist specific domains.
|
||||
|
||||
2. Save and exit the file (`Ctrl+O`, `Enter`, `Ctrl+X` in nano).
|
||||
|
||||
|
||||
## Step 3: Restart Fail2ban Service
|
||||
|
||||
1. Restart the Fail2ban service to apply the configuration changes:
|
||||
|
||||
```bash
|
||||
sudo service fail2ban restart
|
||||
```
|
||||
|
||||
Congratulations! You have successfully configured a permanent whitelist in Fail2ban. The IP addresses or DNS names specified in the `ignoreip` parameter will now be exempted from any blocking rules enforced by Fail2ban.
|
||||
|
||||
It's important to regularly review and update the whitelist to ensure it remains accurate and secure. Remember that introducing DNS names in the whitelist adds a dependency on DNS resolution, so ensure that DNS resolution is functioning properly on your system.
|
||||
|
||||
By configuring a permanent whitelist, you can allow trusted hosts to access your system without being affected by Fail2ban's blocking mechanisms. This helps strike a balance between security and accessibility.
|
||||
|
||||
Feel free to reach out if you have any questions or encounter any issues along the way. Stay secure!
|
||||
|
||||
!!! Note
|
||||
The provided instructions are based on the assumption that you have Fail2ban installed and have administrative privileges on your system. Modify the configuration as per your specific requirements and system configuration.
|
59
pages/articles/2022/fail2ban_running_considerations.md
Normal file
59
pages/articles/2022/fail2ban_running_considerations.md
Normal file
@ -0,0 +1,59 @@
|
||||
---
|
||||
title: Running Fail2ban Considerations
|
||||
description: A food for thought article on running fail2ban and some considerations.
|
||||
date: 2022-06-12
|
||||
template: article.html
|
||||
type: blog
|
||||
author: jon
|
||||
about: https://www.fail2ban.org/
|
||||
tags:
|
||||
- Security
|
||||
- Firewall
|
||||
- Fail2ban
|
||||
|
||||
---
|
||||
|
||||
Fail2ban is a powerful tool for enhancing the security of your system by automatically detecting and blocking suspicious activities. While Fail2ban can be installed and run in various environments, it's important to consider the best practices and potential challenges associated with running Fail2ban effectively. In this article, we will explore different methods of installing and running Fail2ban, and discuss why running Fail2ban within a Docker container may not be the optimal approach.
|
||||
|
||||
|
||||
## Methods of Installing and Running Fail2ban
|
||||
|
||||
There are multiple methods available to install and run Fail2ban, including:
|
||||
|
||||
1. **Package Manager**: Many Linux distributions provide Fail2ban packages through their package managers. This method simplifies the installation process by automatically handling dependencies and providing system integration.
|
||||
|
||||
2. **Source Code**: Installing Fail2ban from source code gives you more control over the installation process and allows for customization. This method involves manually compiling and configuring Fail2ban on your system.
|
||||
|
||||
Now, let's delve into the reasons why running Fail2ban within a Docker container may not be a good idea.
|
||||
|
||||
## Reasons for Not Running Fail2ban within a Docker Container
|
||||
|
||||
While there may be scenarios where running Fail2ban within a Docker container seems appealing, it's important to consider the following reasons why it's generally not recommended:
|
||||
|
||||
1. **Limited Visibility**: Docker containers have their own isolated network stack, which can limit Fail2ban's visibility into the host system's network traffic. This can hinder Fail2ban's ability to accurately monitor and respond to malicious activities.
|
||||
|
||||
2. **Log File Monitoring**: Fail2ban relies on monitoring log files to detect and respond to malicious activities. When running within a Docker container, Fail2ban may have limited access to the host's log files, making it less effective in identifying and blocking malicious behavior.
|
||||
|
||||
3. **Network Filtering Limitations**: Fail2ban utilizes firewall rules to block malicious hosts. Running Fail2ban within a Docker container may limit its ability to apply firewall rules directly on the host system, reducing its effectiveness in mitigating threats.
|
||||
|
||||
4. **Complexity and Configuration Challenges**: Running Fail2ban within a Docker container introduces an additional layer of complexity and potential configuration challenges. It may require custom networking setups, log file sharing between the container and host, and intricate container-to-host communication mechanisms.
|
||||
|
||||
5. **Dependency on Docker Service**: When Fail2ban is running inside a Docker container, it becomes dependent on the Docker service itself. If the Docker service stops or encounters issues, Fail2ban will also be affected and may cease to function properly. This dependency introduces a single point of failure, potentially leaving your system vulnerable to malicious activities.
|
||||
|
||||
6. **Restart and Recovery Challenges**: When the Docker service restarts or if the host system reboots, Docker containers are typically not automatically started in a specific order. This can lead to a delay in Fail2ban being operational, leaving your system exposed to potential threats during that time.
|
||||
|
||||
Considering these reasons, it is generally recommended to install and run Fail2ban directly on the host system. By doing so, you ensure full visibility into the network traffic, unrestricted access to log files, seamless integration with firewall rules, simpler configuration setup, and avoid the potential issues associated with running Fail2ban within a Docker container.
|
||||
|
||||
|
||||
## Conclusion
|
||||
|
||||
Installing and running Fail2ban using the package manager or from source code are common methods to enhance the security of your system. However, when it comes to running Fail2ban within a Docker container, reasons such as limited visibility, log file monitoring challenges, network filtering limitations, increased complexity, and the dependency on the Docker service indicate that it's not the optimal approach.
|
||||
|
||||
By following best practices and running Fail2ban directly on the host system, you can maximize its effectiveness in detecting and blocking malicious activities. Choose the installation method that best suits your needs and ensure regular updates to keep your system secure.
|
||||
|
||||
Remember to consider the reasons, including the scenario of Docker service stopping, presented here and evaluate the trade-offs before deciding to run Fail2ban within a Docker container. Prioritize the security of your system while maintaining simplicity and effectiveness.
|
||||
|
||||
If you have any questions or encounter any issues along the way, feel free to reach out. Stay secure!
|
||||
|
||||
!!! Note
|
||||
The installation methods mentioned in this article are general guidelines. Refer to the official Fail2ban documentation and consult your specific Linux distribution's documentation for detailed instructions and any distribution-specific nuances.
|
245
pages/articles/2022/gitlab_pipeline_from_github_actions.md
Normal file
245
pages/articles/2022/gitlab_pipeline_from_github_actions.md
Normal file
@ -0,0 +1,245 @@
|
||||
---
|
||||
title: Running GitLab Pipeline from GitHub Actions
|
||||
description: An explanation on how to run a GitLab Pipeline or job from GitHub Actions.
|
||||
date: 2022-06-12
|
||||
template: article.html
|
||||
type: blog
|
||||
author: jon
|
||||
about: https://www.fail2ban.org/
|
||||
tags:
|
||||
- Gitlab
|
||||
- Github
|
||||
- Docker
|
||||
- CD/CI
|
||||
- Pipeline
|
||||
---
|
||||
|
||||
Migrating from GitLab to GitHub or having existing GitLab configurations in your repository? No worries! In this article, we will explore how to seamlessly execute GitLab pipelines using GitHub Actions, enabling you to leverage the power of GitHub's CI/CD capabilities while maintaining your existing GitLab configurations.
|
||||
|
||||
If you already have a GitLab CI/CD pipeline defined in your repository, this guide will help you execute it without the need for major modifications. By configuring a self-hosted GitLab Runner Docker container in GitHub Actions and utilizing the .gitlab-ci.yml file, you can easily trigger your GitLab pipeline and benefit from GitHub's collaborative features.
|
||||
|
||||
Let's dive into the steps required to configure GitHub Actions, execute the GitLab Runner Docker container, and seamlessly run your GitLab pipeline from within your GitHub repository.
|
||||
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Access to a GitHub repository with the desired project code.
|
||||
|
||||
- Basic knowledge of GitLab CI/CD and GitHub Actions concepts.
|
||||
|
||||
|
||||
## Step 1: Configure GitHub Actions Workflow and Execute GitLab Runner Docker Container
|
||||
|
||||
1. Open your workflow configuration file (e.g., `.github/workflows/main.yml`) in your GitHub repository.
|
||||
|
||||
2. Specify the runner using the `runs-on` field:
|
||||
|
||||
```yaml
|
||||
jobs:
|
||||
build:
|
||||
runs-on: self-hosted
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
```
|
||||
|
||||
The `runs-on` field is set to `self-hosted`, instructing GitHub Actions to use a self-hosted runner.
|
||||
|
||||
3. Update the workflow file to include the following step for executing the GitLab Runner Docker container:
|
||||
|
||||
```yaml
|
||||
jobs:
|
||||
build:
|
||||
runs-on: self-hosted
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Start GitLab Runner Docker Container
|
||||
run: |
|
||||
docker run -d --name gitlab-runner \
|
||||
-v /var/run/docker.sock:/var/run/docker.sock \
|
||||
-v /path/to/runner/config:/etc/gitlab-runner \
|
||||
-v ${{ github.workspace }}:/${{ github.workspace }} \
|
||||
-w /${{ github.workspace }} \
|
||||
gitlab/gitlab-runner:latest
|
||||
```
|
||||
|
||||
Replace `/path/to/runner/config` with the actual path where you want to store the GitLab Runner configuration files.
|
||||
|
||||
4. Commit and push your workflow configuration file to your GitHub repository.
|
||||
|
||||
|
||||
## Step 2: Use .gitlab-ci.yml for Jobs and Pipelines
|
||||
|
||||
1. Create or update the `.gitlab-ci.yml` file in your GitHub repository to define the jobs and pipelines for your GitLab Runner.
|
||||
|
||||
```yaml
|
||||
stages:
|
||||
- build
|
||||
- test
|
||||
- deploy
|
||||
|
||||
build:
|
||||
stage: build
|
||||
script:
|
||||
- echo "Running build job"
|
||||
|
||||
test:
|
||||
stage: test
|
||||
script:
|
||||
- echo "Running test job"
|
||||
|
||||
deploy:
|
||||
stage: deploy
|
||||
script:
|
||||
- echo "Running deploy job"
|
||||
```
|
||||
|
||||
Customize the jobs and their respective scripts according to your specific CI/CD requirements.
|
||||
|
||||
2. Commit and push the `.gitlab-ci.yml` file to your GitHub repository.
|
||||
|
||||
|
||||
## Step 3: Execute GitLab Pipeline using GitHub Actions
|
||||
|
||||
1. With the changes pushed to your GitHub repository, the self-hosted GitLab Runner Docker container will utilize the `.gitlab-ci.yml` file to execute the defined jobs and pipelines.
|
||||
|
||||
2. To run a specific job, add the job name as a parameter to the GitLab Runner command. For example, to run only the `test` job, modify the workflow configuration file as follows:
|
||||
|
||||
```yaml
|
||||
jobs:
|
||||
build:
|
||||
runs-on: self-hosted
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
|
||||
test:
|
||||
runs-on: self-hosted
|
||||
steps:
|
||||
- name: Run GitLab Runner job
|
||||
run: |
|
||||
docker exec gitlab-runner gitlab-runner exec docker test
|
||||
```
|
||||
|
||||
In this example, the `test` job
|
||||
|
||||
is executed using the `docker exec` command.
|
||||
|
||||
3. To run the entire pipeline defined in `.gitlab-ci.yml`, remove the specific job parameter from the `docker exec` command:
|
||||
|
||||
```yaml
|
||||
jobs:
|
||||
build:
|
||||
runs-on: self-hosted
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
|
||||
test:
|
||||
runs-on: self-hosted
|
||||
steps:
|
||||
- name: Run GitLab Runner pipeline
|
||||
run: |
|
||||
docker exec gitlab-runner gitlab-runner exec docker
|
||||
```
|
||||
|
||||
By removing the job parameter, the entire pipeline will be executed.
|
||||
|
||||
|
||||
## Conclusion
|
||||
|
||||
By configuring a self-hosted GitLab Runner Docker container in GitHub Actions and utilizing the `.gitlab-ci.yml` file, you can seamlessly execute GitLab pipelines from within your GitHub repositories. This enables you to leverage the powerful CI/CD capabilities of GitLab while still benefiting from the collaborative features of GitHub.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
I apologize for the confusion. If you want to focus solely on running GitLab pipelines from GitHub Actions and exclude any other methods, here's the revised article:
|
||||
|
||||
# Article 7: Running GitLab Pipeline from GitHub Actions
|
||||
|
||||
## Introduction
|
||||
If you're looking to migrate from GitLab to GitHub or have existing GitLab configurations in your repository, this article is for you. We'll explore how to seamlessly execute GitLab pipelines using GitHub Actions, leveraging GitHub's CI/CD capabilities while maintaining your GitLab configurations.
|
||||
|
||||
By configuring GitHub Actions to trigger your GitLab pipeline, you can take advantage of GitHub's collaborative features while running your pipeline in a familiar environment.
|
||||
|
||||
## Prerequisites
|
||||
Before we get started, make sure you have the following:
|
||||
|
||||
- A GitHub repository with your GitLab project code
|
||||
- A `.gitlab-ci.yml` file defining your GitLab pipeline jobs
|
||||
|
||||
## Step 1: Configure GitHub Actions Workflow
|
||||
1. In your GitHub repository, navigate to the **Actions** tab.
|
||||
|
||||
2. Click on **Set up a workflow yourself** to create a new workflow file.
|
||||
|
||||
3. Replace the contents of the workflow file with the following:
|
||||
|
||||
```yaml
|
||||
name: Run GitLab Pipeline
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
|
||||
jobs:
|
||||
run-gitlab-pipeline:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Set up GitLab Runner
|
||||
uses: docker://gitlab/gitlab-runner:latest
|
||||
|
||||
- name: Execute GitLab pipeline
|
||||
run: |
|
||||
# Customize this command to match your GitLab Runner configuration
|
||||
gitlab-runner exec docker <your-pipeline-name>
|
||||
```
|
||||
|
||||
Note: Replace `<your-pipeline-name>` with the name of your GitLab pipeline defined in `.gitlab-ci.yml`.
|
||||
|
||||
4. Commit and push the workflow file to your GitHub repository.
|
||||
|
||||
## Step 2: Customize GitLab Runner Configuration
|
||||
1. In your GitHub repository, navigate to the **Settings** tab.
|
||||
|
||||
2. Click on **Secrets** in the left sidebar.
|
||||
|
||||
3. Add any necessary secrets or environment variables required for your GitLab Runner configuration.
|
||||
|
||||
For example, you may need to set the `CI_JOB_TOKEN` secret to authenticate with your GitLab repository.
|
||||
|
||||
## Step 3: Trigger GitLab Pipeline
|
||||
Any push or pull request events on the `main` branch will now trigger the GitHub Actions workflow, which in turn executes your GitLab pipeline.
|
||||
|
||||
## Conclusion
|
||||
Congratulations! You've successfully configured GitHub Actions to run your GitLab pipeline. By leveraging GitHub's CI/CD capabilities, you can seamlessly execute your GitLab pipelines and benefit from the collaborative features provided by GitHub.
|
||||
|
||||
Remember to keep your `.gitlab-ci.yml` file up to date with your desired pipeline configurations. Feel free to explore other features of GitHub Actions to further enhance your CI/CD workflows.
|
||||
|
||||
If you have any questions or encounter any issues along the way, don't hesitate to reach out for assistance. Happy automating!
|
||||
|
||||
Please note that this article assumes you already have a working `.gitlab-ci.yml` file and focuses solely on executing the GitLab pipeline using GitHub Actions.
|
||||
|
||||
I hope this revised version of Article 7 meets your requirements.
|
114
pages/articles/2022/gitlab_piplines_vscode.md
Normal file
114
pages/articles/2022/gitlab_piplines_vscode.md
Normal file
@ -0,0 +1,114 @@
|
||||
---
|
||||
title: Running GitLab Runner with VS Code Tasks
|
||||
description: An explanation on how to setup VSCode Tasks to run Gitlab Pipelines directly from VSCode.
|
||||
date: 2022-06-12
|
||||
template: article.html
|
||||
type: blog
|
||||
author: jon
|
||||
about: https://hub.docker.com/r/gitlab/gitlab-runner
|
||||
tags:
|
||||
- Gitlab
|
||||
- Docker
|
||||
- CD/CI
|
||||
- Pipeline
|
||||
- VSCode
|
||||
---
|
||||
|
||||
In a previous article, we learned how to run GitLab CI/CD pipelines locally using the GitLab Runner container. In this article, we'll explore how to streamline the process by creating VS Code tasks that allow us to launch the GitLab Runner with a simple keyboard shortcut. This will provide a convenient way to execute our pipelines or specific jobs directly from within the VS Code environment.
|
||||
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- VS Code installed on your machine
|
||||
|
||||
- [Completion of the previous steps](local_gitlab_pipeline.md)
|
||||
|
||||
|
||||
## Step 1: Create a `.vscode` Directory
|
||||
|
||||
First, open your project in VS Code. In the root directory of your project, create a new directory called `.vscode` if it doesn't already exist.
|
||||
|
||||
|
||||
## Step 2: Create a `tasks.json` File
|
||||
|
||||
Inside the `.vscode` directory, create a new file called `tasks.json`. This file will define the tasks we want to create.
|
||||
|
||||
Add the following content to the `tasks.json` file:
|
||||
|
||||
```json
|
||||
{
|
||||
"version": "2.0.0",
|
||||
"tasks": [
|
||||
{
|
||||
"label": "Run GitLab CI/CD Pipeline",
|
||||
"type": "shell",
|
||||
"command": "docker run --rm -v /var/run/docker.sock:/var/run/docker.sock -v ${workspaceFolder}:/builds/project -w /builds/project gitlab/gitlab-runner:latest gitlab-runner exec docker --docker-privileged"
|
||||
},
|
||||
{
|
||||
"label": "Run Specific Job",
|
||||
"type": "shell",
|
||||
"command": "docker run --rm -v /var/run/docker.sock:/var/run/docker.sock -v ${workspaceFolder}:/builds/project -w /builds/project gitlab/gitlab-runner:latest gitlab-runner exec docker --docker-privileged",
|
||||
"args": [
|
||||
"--",
|
||||
"<job-name>"
|
||||
],
|
||||
"problemMatcher": []
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
The `tasks` array contains two tasks: "Run GitLab CI/CD Pipeline" and "Run Specific Job". The commands specified in the `command` field are the same as the ones we used previously to execute the GitLab Runner container.
|
||||
|
||||
For the "Run Specific Job" task, we have an additional `args` field where you should replace `<job-name>` with the name of the specific job you want to execute.
|
||||
|
||||
|
||||
## Step 3: Define Keybindings (Optional)
|
||||
|
||||
To make it even more convenient, you can define keybindings for the tasks. Open the keybindings settings in VS Code by going to **File** -> **Preferences** -> **Keyboard Shortcuts** (or by pressing `Ctrl+K Ctrl+S`).
|
||||
|
||||
Add the following keybindings to the keybindings settings file:
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"key": "ctrl+shift+p",
|
||||
"command": "workbench.action.tasks.runTask",
|
||||
"args": "Run GitLab CI/CD Pipeline"
|
||||
},
|
||||
{
|
||||
"key": "ctrl+shift+j",
|
||||
"command": "workbench.action.tasks.runTask",
|
||||
"args": "Run Specific Job"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
These keybindings assign the "Run GitLab CI/CD Pipeline" task to `Ctrl+Shift+P` and the "Run Specific Job" task to `Ctrl+Shift+J`. Feel free to modify the keybindings according to your preference.
|
||||
|
||||
## Step 4: Adjust `.gitignore` File
|
||||
|
||||
To avoid inadvertently including artifacts generated during the local pipeline execution in your git commits, it's
|
||||
|
||||
essential to update your `.gitignore` file.
|
||||
|
||||
Open your project's `.gitignore` file and add the following line to exclude the generated artifacts:
|
||||
|
||||
```
|
||||
# GitLab Runner artifacts
|
||||
/builds/
|
||||
```
|
||||
|
||||
This ensures that any files or directories created within the `/builds/` directory (which is used by the GitLab Runner container) will be ignored by git.
|
||||
|
||||
|
||||
## Step 5: Run the Pipeline or Specific Job
|
||||
|
||||
You're now ready to run your GitLab CI/CD pipeline or specific job using the VS Code tasks. Press the assigned keybinding (`Ctrl+Shift+P` for the pipeline or `Ctrl+Shift+J` for a specific job) to launch the GitLab Runner container and execute the desired task.
|
||||
|
||||
|
||||
## Conclusion
|
||||
|
||||
By creating VS Code tasks, we've made it even easier to run GitLab CI/CD pipelines or specific jobs locally using the GitLab Runner. With a simple keyboard shortcut, we can now execute our pipelines or test individual jobs directly from within the VS Code environment. Additionally, by adjusting the `.gitignore` file, we can ensure that artifacts generated during the local execution are not inadvertently included in git commits.
|
||||
|
||||
That's it! You've learned how to set up VS Code tasks to launch the GitLab Runner and exclude artifacts from being committed. Enjoy the streamlined process of running your pipelines and jobs with ease.
|
65
pages/articles/2022/local_gitlab_pipeline.md
Normal file
65
pages/articles/2022/local_gitlab_pipeline.md
Normal file
@ -0,0 +1,65 @@
|
||||
---
|
||||
title: Running GitLab CI/CD Pipeline Locally with Docker-in-Docker using GitLab Runner Container
|
||||
description: This article details how to use GitLab CI/CD pipelines locally which can be useful for testing and debugging purposes.
|
||||
date: 2022-06-12
|
||||
template: article.html
|
||||
type: blog
|
||||
author: jon
|
||||
about: https://hub.docker.com/r/gitlab/gitlab-runner
|
||||
tags:
|
||||
- Gitlab
|
||||
- Docker
|
||||
- CD/CI
|
||||
- Pipeline
|
||||
- VSCode
|
||||
---
|
||||
|
||||
Running GitLab CI/CD pipelines locally can be useful for testing and debugging purposes. While GitLab provides robust CI/CD capabilities on its platform, there may be scenarios where executing the pipeline locally within your development environment, such as Visual Studio Code (VS Code), can be beneficial. In this blog post, we'll explore how to set up and run a GitLab CI/CD pipeline locally using the GitLab Runner container with Docker-in-Docker (DinD) support.
|
||||
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Docker installed on your machine
|
||||
|
||||
- Basic knowledge of GitLab CI/CD and Docker concepts
|
||||
|
||||
|
||||
## Step 1: Pull the GitLab Runner Image
|
||||
|
||||
To begin, ensure that you have Docker installed on your machine. Open a terminal within VS Code or any command prompt and pull the latest GitLab Runner Docker image by executing the following command:
|
||||
|
||||
```shell
|
||||
docker pull gitlab/gitlab-runner:latest
|
||||
```
|
||||
|
||||
|
||||
## Step 2: Execute the Pipeline or Multiple Jobs with DinD
|
||||
|
||||
Next, navigate to your project's root directory containing the `.gitlab-ci.yml` file. To execute the entire GitLab CI/CD pipeline locally or specific jobs within it with Docker-in-Docker functionality, use the following command:
|
||||
|
||||
```shell
|
||||
docker run --rm -v /var/run/docker.sock:/var/run/docker.sock \
|
||||
-v /path/to/your/project:/builds/project \
|
||||
-w /builds/project gitlab/gitlab-runner:latest \
|
||||
gitlab-runner exec docker --docker-privileged [<job-name1>,<job-name2>,<job-name3>,...]
|
||||
```
|
||||
|
||||
Replace `/path/to/your/project` with the local path to your project directory, and `<job-name1>,<job-name2>,<job-name3>,...` with the comma-separated list of job names you want to execute (optional).
|
||||
|
||||
By mounting the Docker socket (`/var/run/docker.sock`) as a volume inside the container and using the `--docker-privileged` flag, the GitLab Runner container gains access to Docker-in-Docker functionality. This allows the execution of Docker commands within the runner.
|
||||
|
||||
If you don't specify any job names, the GitLab Runner container will execute all the jobs defined in your `.gitlab-ci.yml` file sequentially, providing a local simulation of the GitLab CI/CD pipeline execution.
|
||||
|
||||
If you specify one or more job names, only those specific jobs will be executed within the GitLab Runner container, allowing you to selectively test and debug multiple parts of your pipeline.
|
||||
|
||||
Make sure your project directory contains the necessary files, configurations, and dependencies required for the pipeline or jobs to run successfully.
|
||||
|
||||
Remember to exercise caution when running Docker-in-Docker, as it can have security implications. Ensure that your environment is appropriately secured and follow best practices.
|
||||
|
||||
|
||||
## Conclusion
|
||||
|
||||
Running GitLab CI/CD pipelines locally within your development environment can greatly enhance the development and testing process. By leveraging the GitLab Runner container with Docker-in-Docker support, you can simulate the GitLab CI/CD pipeline execution within tools like VS Code. This enables you to test and validate your pipeline configurations before pushing them to the GitLab platform.
|
||||
|
||||
That's it! You now have the knowledge to run an entire GitLab CI/CD pipeline or multiple jobs locally with Docker-in-Docker using the GitLab Runner container. Happy pipeline testing and debugging!
|
||||
|
@ -1,29 +0,0 @@
|
||||
---
|
||||
title: Site Refurbishment
|
||||
description: it's an update.
|
||||
date: 2023-08-01
|
||||
template: article.html
|
||||
type: blog
|
||||
author: jon
|
||||
copyrightHolder: jon
|
||||
about: https://nofusscomputing.com
|
||||
tags:
|
||||
- Automation
|
||||
- Website
|
||||
- Update
|
||||
---
|
||||
|
||||
It's been a while and for all intents and purposes; Prior to today you would not have been mistaken for thinking that our site was dead in the water. I could give reasons, but the reality is it's an excuse and we all know that *"they're like arseholes, everyones got one!!"* As it currently stands, I find myself with a little extra time on my hands so this site revamp is the start and first visibility of what I've been doing.
|
||||
|
||||
I've spent a good part of a few of a few decades working with computers. Whilst this has been an interesting journey, in the last few years I've discovered Configuration-as-Code. The concept itself wasn't exactly new to me, I just put off learning anything about it past the name. As creatures of habits, us humans, once we have found our way we tend to stick to that routine or better stated, with what we know.
|
||||
Moving from the early days (norton ghost and clonezilla) with manually built images for every different type of machine. which became very monotonous to manually update the images with patches. The opportunity had presented itself resently where for the first time in over two decades, I'm required to rebuild my infrastructre from scratch. As daunting as this sounds, given the leaps and bounds that have occured in the last decade, even more in the last five years. Technologies have evolved to the point where now it takes a fraction of the time to do what used to take upwards of a week. Upgrades now are not rebuild the image from scratch, clone and redeploy. Now, I punch the keyboard and characters show on the screen, then I run a program, Ansible. It reads the jibberish (json/yaml) and presto, Bobs your uncle, a deploy has occured. Fresh deployment or updates, doesn't matter, run the same code again and Ansible ensures it's setup how it's supposed to be. Need to update a piece of software, too easy, change the version number in your config files.
|
||||
|
||||
Other things of note worthy mention:
|
||||
|
||||
- For Family and friends, free of course I host Password vault, <https://vault.nofusscomputing.com>. This enables you to install an app on your phone and within your web browser which lets you sync your passwords, identities and secrets-and using zero-trust full encryption. Best feature of all,you only have to remember your vault password, as everything else is stored in the vault.
|
||||
|
||||
- Helpdesk now deployed publicly, <https://helpdesk.nofusscomputing.com>. Along with automating everything else a Service Catalog is being extended to automate other tasks.
|
||||
|
||||
- Website updating now occurs automagically. We do this via Gitlab using CD/CI pipelines. Now I just edit a file, push the changes and the changes deploy my site on the interwebs.
|
||||
|
||||
- Our [projects](../../projects/index.md) from [GitHub](https://github.com/NoFussComputing) and [GitLab](https://gitlab.com/nofusscomputing) deploy their docs to our site, again automagically.
|
Binary file not shown.
Before Width: | Height: | Size: 136 KiB |
Binary file not shown.
Before Width: | Height: | Size: 136 KiB |
@ -8,7 +8,6 @@ hide:
|
||||
- toc
|
||||
---
|
||||
|
||||
<div style="background: url(assets/nfc_revamp.png) no-repeat center top; background-size: 282px 90px; width: 100%; height: 120px; display: flex;">
|
||||
<span style="align-self: flex-end; width: 100%; text-align: center; color: #009900; font-weight: bold; font-size: 14pt;">Using Technology to make life easier</span>
|
||||
</div>
|
||||

|
||||
|
||||
Home Page
|
||||
|
5
pages/projects/ansible-roles/README.md
Normal file
5
pages/projects/ansible-roles/README.md
Normal file
@ -0,0 +1,5 @@
|
||||
# Ansible Roles
|
||||
|
||||
| :red_circle: This page is a placeholder for the ansible-roles repo's docs. |
|
||||
|:---|
|
||||
| _If you can see this page there has been an error, please report the issue on gitlab_ |
|
@ -1,9 +0,0 @@
|
||||
---
|
||||
title: Ansible Collections
|
||||
description: No Fuss Computings Ansible Collections
|
||||
date: 2024-02-21
|
||||
template: project.html
|
||||
about: https://gitlab.com/nofusscomputing/projects/ansible
|
||||
---
|
||||
|
||||
This section of the website contains Ansible Collection projects and the details of how to use said projects.
|
@ -1,510 +0,0 @@
|
||||
---
|
||||
title: Ansible Projects
|
||||
description: No Fuss Computings Ansible Projects
|
||||
date: 2023-06-01
|
||||
template: project.html
|
||||
about: https://gitlab.com/nofusscomputing/projects/ansible
|
||||
---
|
||||
|
||||
This section of the website contains Ansible projects and the details of how to use said projects. Across All of our Ansible projects we standardize as much as possible.
|
||||
|
||||
Any playbooks and roles we create are designed with the below ansible setup in mind. Whilst there are many ways ~~to skin a cat~~ setup ansible, If you deviate from below you will be required to test to ensure that if using our playbooks/roles, that they work as intended. If you find that there is a better way of setting up Ansible, raise an issue with your proposal and we'll discuss.
|
||||
|
||||
- [No Fuss Computing playbooks](playbooks/index.md)
|
||||
|
||||
- [No Fuss Computing roles](roles/index.md)
|
||||
|
||||
|
||||
## Inventory Setup
|
||||
|
||||
The Inventory should be setup under an SCM, git in this case; So that a version history is maintained. This also fosters a full audit trail as part of the complete host lifecycle. Idealy the Inventory along with directories `files` and `templates.` should be contained in it's own git repository. Using this method provides that the git history only pertain to the inventory alone, and therefore any install/configuration of a host.
|
||||
|
||||
!!! tip
|
||||
If you must include `playbooks` and `roles` wihin your inventory repository it's a good idea that these still be their own repositories with them added to the inventory repository as a git sub-module.
|
||||
|
||||
Ansible inventory directory structure.
|
||||
|
||||
``` bash
|
||||
.
|
||||
├── ansible.cfg
|
||||
├── files
|
||||
│ ├── all
|
||||
│ │ ├──
|
||||
│ │ ├──
|
||||
│ ├── node1
|
||||
│ │ ├──
|
||||
│ │ ├──
|
||||
│ ├── k3s-prod
|
||||
│ │ ├── Ingress-manifest-AWX.yaml
|
||||
│ │ └── deployment-manifest-test_web_server.yaml
|
||||
│ ├── k3s-testing
|
||||
│ └── deployment-manifest-test_web_server.yaml
|
||||
|
|
||||
├── .inventory_root
|
||||
│
|
||||
├── inventory
|
||||
│ ├── development
|
||||
│ │ ├── group_vars
|
||||
│ │ │ ├── all.yaml
|
||||
│ │ │ ├── debian.yaml
|
||||
│ │ ├── hosts.yaml
|
||||
│ │ └── host_vars
|
||||
│ │ ├── laptop2.yaml
|
||||
│ │ ├── node1.yaml
|
||||
│ │ ├── node2.yaml
|
||||
│ │ ├── node3.yaml
|
||||
│ │ ├── node4.yaml
|
||||
│ │ └── node5.yaml
|
||||
│ └── production
|
||||
│ ├── group_vars
|
||||
│ │ ├── all
|
||||
│ │ │ ├── main.yaml
|
||||
│ │ │ ├── vault.yaml
|
||||
│ │ │ └── versions_software.yaml
|
||||
│ │ ├── awx.yaml
|
||||
│ ├── hosts.yaml
|
||||
│ └── host_vars
|
||||
│ ├── node1.yaml
|
||||
│ ├── k3s-prod
|
||||
│ │ ├── backup.yaml
|
||||
│ │ ├── kubernetes.yaml
|
||||
│ │ ├── main.yaml
|
||||
│ │ └── vault.yaml
|
||||
│ ├── k3s-testing
|
||||
│ ├── main.yaml
|
||||
│ └── vault.yaml
|
||||
├── playbooks
|
||||
│ ├── all.yaml
|
||||
├── README.md
|
||||
└── templates
|
||||
├── hosts
|
||||
│ └── k3s-prod
|
||||
│ └── HelmChart-manifest-NginX-ingress.yaml
|
||||
│
|
||||
└── groups
|
||||
```
|
||||
|
||||
| name | Type | Description |
|
||||
|:---|:---:|:---|
|
||||
| ansible.cfg | _file_ | Ansible configuration file applicable to this inventory |
|
||||
| files | _directory_ | Contain files that a host may require. Playbook task iterates over by hostname and group name. Sub-directories for hostname / group-name |
|
||||
| .inventory_root | _file_ | This file is used by `nfc_common` role to determin the root directory of the inventory. |
|
||||
| inventory | _directory_ | Ansible inventory. If multiple inventories exist can use sub folders. |
|
||||
| playbooks | _directory_ | Should be a git submodule. _This keeps inventory and playbooks SCM related to each only._ |
|
||||
| README.md | _file_ | Inventory readme with applicable info. |
|
||||
| templates | _directory_ | This directory is the same as the `files` directory except contains jinja templates. |
|
||||
|
||||
|
||||
### Inventory
|
||||
|
||||
Naming of host inventory files is to use the hostname portion of the FQDN only. i.e. for a host with a FQDN of `myhostname.domain.tld` it's `inventory_hostname` would be `myhostname`. This requirement is a must as many parts of our roles and playbooks depend upon this value matching the DNS system.
|
||||
|
||||
|
||||
#### hosts file
|
||||
|
||||
The hosts file `host.yaml` contains all hosts and by which group they are part of.
|
||||
|
||||
|
||||
### Variable Files
|
||||
|
||||
Preference for variable files is that there is one file per subject. i.e. for the variables for a keycloak deployment, all be in one variable file, and under a directory matching the host/group name.
|
||||
|
||||
|
||||
### Playbooks
|
||||
|
||||
For playbook usage in AWX / Ansible Automation platform, the following changes are required to be made to **all** playbooks:
|
||||
|
||||
- variable `nfc_pb_host` is used for a template survey variable for a host selector for limiting hosts when running a play.
|
||||
|
||||
Example implementaion
|
||||
``` yaml
|
||||
- name: Desktops and Sub-groups
|
||||
hosts: |-
|
||||
{%- if
|
||||
nfc_pb_host is defined
|
||||
and
|
||||
nfc_pb_host in groups.desktops
|
||||
-%}
|
||||
{{ nfc_pb_host }}
|
||||
{%- else -%}
|
||||
{{ groups.desktops }}
|
||||
{%- endif %}
|
||||
```
|
||||
|
||||
!!! warning "Important"
|
||||
The building of the variable is dynamic and must check if the host is part of the group the playbook is for. this ensures that the playbook will only ever run for a host that is part of that group.
|
||||
|
||||
- variable `nfc_pb_kubernetes_cluster_name` is used for a template survey variable for the dynamic building of the cluster group name.
|
||||
|
||||
Example implementaion
|
||||
``` yaml
|
||||
- name: Kubernetes Group and sub-groups
|
||||
hosts: |-
|
||||
{%- if
|
||||
nfc_pb_host is defined
|
||||
and
|
||||
nfc_pb_host in groups.kubernetes
|
||||
-%}
|
||||
{{ nfc_pb_host }}
|
||||
{%- elseif nfc_pb_kubernetes_cluster_name is defined -%}
|
||||
kubernetes_cluster_{{ nfc_pb_kubernetes_cluster_name }}
|
||||
{%- else -%}
|
||||
{{ groups.kubernetes }}
|
||||
{%- endif %}
|
||||
```
|
||||
|
||||
### Templates
|
||||
|
||||
Templates directory contains only two sub-deirectories `groups` and `hosts` under each of these folders are folders by group/host name that contain template files. Preferernece is leaning toards not using the `.j2` extension as the IDE may loose functionality by using.
|
||||
|
||||
Naming of template files is in format `{item-type}-{what-uses}-{friendly name that uses underscores not hyphon}.{file_extension}`
|
||||
|
||||
| Item Type | what uses | Notes
|
||||
|:---|:---:|:---|
|
||||
| config | bind | Configuration file for bind dns server |
|
||||
| dnszone | bind | a bind server DNS zone |
|
||||
| `{kubernetes kind}` | manifest | A kubernetes manifest |
|
||||
|
||||
|
||||
#### Feature gates
|
||||
|
||||
Templates when added to the group folder should be setup with a feature gate. This eanbles simple yaml to be defined to allow the template to deploy.
|
||||
|
||||
example of yaml declaration that host/group would read.
|
||||
``` yaml
|
||||
feature_gates:
|
||||
is_prime: false
|
||||
monitoring: true
|
||||
operator_awx: true
|
||||
operator_grafana: true
|
||||
operator_prometheus: true
|
||||
postgres_cluster: true
|
||||
rook_ceph: true
|
||||
```
|
||||
|
||||
Seting a feature gate on a template is as simple as enclosing the entire contents of the file with a jinja if statement. i.e. `{% if path.feature_gates.monitoring | default(false) | bool %}the content here{% endif %}`
|
||||
|
||||
|
||||
## Playbooks
|
||||
|
||||
Playbooks are used for grouping of hosts and/or groups for a task or set of tasks that are required to be run. All playbooks must return artifacts that exist to serve the purpose of having information on the play that can be used in further automations.
|
||||
|
||||
|
||||
### Artifacts
|
||||
|
||||
The artificates returned are set using the `ansible.builtin.set_stats` module. Prior to setting these facts with the `stats` module they must be set as facts first using the `ansible.builtin.set_fact` module. the latter enables passing of the artifacts via cli and `stats` from within AWX / Ansible Automation Platform.
|
||||
|
||||
!!! tip
|
||||
When setting the artifacts, ensure `per_host=false` is set so that artifacts work within AWX / Ansible Automation Platform.
|
||||
|
||||
Common artifacts structure. **ALL** playbooks must set these variables.
|
||||
|
||||
``` yaml
|
||||
# 'nfc_automation', dict. Global Variable, This is set from within the first playbook
|
||||
# ran and updated as required with the end time updated by the last playbook.
|
||||
nfc_automation:
|
||||
error: 0 # Boolean, 0=no Error, 1=Error occured
|
||||
time:
|
||||
start: "{{ '%Y-%m-%dT%H:%M:%S %z' | strftime }}" # String of date time, set at time of setting 'nfc_automation'
|
||||
end: 0 # String of date time, set when play finished, and updated by subsequent plays
|
||||
# Determin end time of play or duration of play when used with start time, even on error.
|
||||
|
||||
# 'nfc_task', list. every playbook creates its own task dict to add to this list.
|
||||
nfc_task:
|
||||
- name: "glpi"
|
||||
start: "{{ '%Y-%m-%dT%H:%M:%S %z' | strftime }}"
|
||||
tags: "{{ ansible_run_tags }}"
|
||||
|
||||
```
|
||||
|
||||
The above must be set from within every playbook regardless of what else is in the playbooks.
|
||||
|
||||
example playbook to set artifacts and variables
|
||||
|
||||
``` yaml
|
||||
---
|
||||
|
||||
#
|
||||
# Playbook Template
|
||||
#
|
||||
# This playbook template is the base template for All of our playbooks.
|
||||
#
|
||||
# No Fuss Computing <https://nofusscomputing.gitlab.io/projects/ansible/ansible_playbooks/projects/ansible/>
|
||||
#
|
||||
# Requirements:
|
||||
# - ansible >= 2.8
|
||||
#
|
||||
|
||||
- name: Playbook Template
|
||||
hosts: localhost
|
||||
become: false
|
||||
|
||||
|
||||
pre_tasks:
|
||||
|
||||
|
||||
# Play and task set time
|
||||
- name: Set Start Time
|
||||
ansible.builtin.set_fact:
|
||||
nfc_task_starttime: "{{ ('%Y-%m-%dT%H:%M:%S %z' | strftime) | string }}"
|
||||
no_log: "{{ nfc_pb_no_log_setup_facts | default(true) | bool }}"
|
||||
tags:
|
||||
- always
|
||||
|
||||
|
||||
# Setup dictionary 'nfc_automation'
|
||||
- name: Set Automation Facts
|
||||
ansible.builtin.set_fact:
|
||||
nfc_automation: {
|
||||
"time": {
|
||||
"start": "{{ nfc_task_starttime | string }}",
|
||||
"end": 0
|
||||
}
|
||||
}
|
||||
no_log: "{{ nfc_pb_no_log_setup_facts | default(true) | bool }}"
|
||||
when: nfc_automation is not defined
|
||||
tags:
|
||||
- always
|
||||
|
||||
|
||||
# Setup dictionary 'nfc_task'
|
||||
- name: Set Automation Facts
|
||||
ansible.builtin.set_fact:
|
||||
nfc_task: {
|
||||
"name": "{{ ansible_play_name | lower | string }}",
|
||||
"error": 0,
|
||||
"roles": "{{ ansible_play_role_names | string }}",
|
||||
"skip_tags": "{{ ansible_skip_tags | list }}",
|
||||
"start": "{{ nfc_task_starttime | string }}",
|
||||
"tags": "{{ ansible_run_tags | list }}"
|
||||
}
|
||||
no_log: "{{ nfc_pb_no_log_setup_facts | default(true) | bool }}"
|
||||
tags:
|
||||
- always
|
||||
|
||||
|
||||
- name: Block - pre_tasks
|
||||
block:
|
||||
|
||||
|
||||
########################################################################
|
||||
#
|
||||
# Your tasks here
|
||||
#
|
||||
########################################################################
|
||||
|
||||
|
||||
rescue:
|
||||
|
||||
# there was an error, set error object
|
||||
- name: Set error fact
|
||||
ansible.builtin.set_fact:
|
||||
nfc_task: "{{ nfc_task | combine({
|
||||
'error': 1
|
||||
}) }}"
|
||||
no_log: "{{ nfc_pb_no_log_setup_facts | default(true) | bool }}"
|
||||
tags:
|
||||
- always
|
||||
|
||||
|
||||
always:
|
||||
|
||||
|
||||
# Check if error occured and fail task
|
||||
- name: Error Check
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- nfc_task.error | int == 0
|
||||
msg: Error occured, Fail the play run
|
||||
tags:
|
||||
- always
|
||||
|
||||
|
||||
# Don't use the 'roles' section.
|
||||
roles: []
|
||||
# if the included role(s) do not contain a rescue block, the playbook may stop
|
||||
# executing in this section (roles) with the post_tasks not running. This will
|
||||
# cause the artifacts to be incomplete. It's recommended to include your roles
|
||||
# in section(s) 'pre_tasks', 'tasks' or 'post_tasks' and from within a block with
|
||||
# rescue so that the playbook can complete and ensure that all artifacts are set.
|
||||
|
||||
|
||||
tasks:
|
||||
|
||||
|
||||
- name: Block - tasks
|
||||
block:
|
||||
|
||||
# Check for error and fail play on error
|
||||
- name: Error Check
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- nfc_task.error | int == 0
|
||||
msg: Error eccured, follow error path to fail play
|
||||
|
||||
|
||||
########################################################################
|
||||
#
|
||||
# Your tasks here
|
||||
#
|
||||
########################################################################
|
||||
|
||||
rescue:
|
||||
|
||||
|
||||
# there was an error, set error object
|
||||
- name: Set error fact
|
||||
ansible.builtin.set_fact:
|
||||
nfc_task: "{{ nfc_task | combine({
|
||||
'error': 1
|
||||
}) }}"
|
||||
no_log: "{{ nfc_pb_no_log_setup_facts | default(true) | bool }}"
|
||||
tags:
|
||||
- always
|
||||
|
||||
|
||||
always:
|
||||
|
||||
|
||||
# Check if error occured and fail task
|
||||
- name: Error Check
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- nfc_task.error | int == 0
|
||||
msg: Error occured, Fail the play run
|
||||
tags:
|
||||
- always
|
||||
|
||||
|
||||
post_tasks:
|
||||
|
||||
- name: Tasks post_task
|
||||
block:
|
||||
|
||||
|
||||
# Check for error and fail play on error
|
||||
- name: Error Check
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- nfc_task.error | int == 0
|
||||
msg: Error occured, follow error path to fail play
|
||||
tags:
|
||||
- always
|
||||
|
||||
|
||||
########################################################################
|
||||
#
|
||||
# Your tasks here
|
||||
#
|
||||
########################################################################
|
||||
|
||||
|
||||
rescue:
|
||||
|
||||
|
||||
# there was an error, set error object
|
||||
- name: Set error fact
|
||||
ansible.builtin.set_fact:
|
||||
nfc_task: "{{ nfc_task | combine({
|
||||
'error': 1
|
||||
}) }}"
|
||||
no_log: "{{ nfc_pb_no_log_setup_facts | default(true) | bool }}"
|
||||
tags:
|
||||
- always
|
||||
|
||||
|
||||
always:
|
||||
|
||||
|
||||
# Task and automation end time.
|
||||
- name: Fetch End time
|
||||
ansible.builtin.set_fact:
|
||||
nfc_task_endtime: "{{ '%Y-%m-%dT%H:%M:%S %z' | strftime }}"
|
||||
no_log: "{{ nfc_pb_no_log_setup_facts | default(true) | bool }}"
|
||||
tags:
|
||||
- always
|
||||
|
||||
|
||||
# Set task end time
|
||||
- name: Set task Facts
|
||||
ansible.builtin.set_fact:
|
||||
nfc_tasks: "{{ nfc_tasks | default([]) + [ nfc_task | combine({
|
||||
'end': nfc_task_endtime | string
|
||||
}) ] }}"
|
||||
no_log: "{{ nfc_pb_no_log_setup_facts | default(true) | bool }}"
|
||||
tags:
|
||||
- always
|
||||
|
||||
|
||||
# Set Automation end time.
|
||||
# every playbook must set this variable so that the end time
|
||||
# is equal to the fail time or the end of a group of playbooks.
|
||||
- name: Set automation end time
|
||||
ansible.builtin.set_fact:
|
||||
nfc_automation: "{{ nfc_automation | combine({
|
||||
'time': nfc_automation.time | combine({
|
||||
'end': nfc_task_endtime | string
|
||||
})
|
||||
}) }}"
|
||||
nfc_task_endtime: null
|
||||
no_log: "{{ nfc_pb_no_log_setup_facts | default(true) | bool }}"
|
||||
tags:
|
||||
- always
|
||||
|
||||
|
||||
# Set the artifacts as a fact for subsequent playbook useage
|
||||
# Note: variable 'per_host' must be 'false' so that the artifacts
|
||||
# work within AWX / Ansible Automation Platform.
|
||||
- name: Create Automation Artifact
|
||||
ansible.builtin.set_stats:
|
||||
data:
|
||||
nfc_automation: "{{ nfc_automation }}"
|
||||
nfc_tasks: "{{ nfc_tasks }}"
|
||||
per_host: false
|
||||
tags:
|
||||
- always
|
||||
|
||||
|
||||
# Final error check to fail the entire play run on error
|
||||
- name: Error Check
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- nfc_task.error | int == 0
|
||||
msg: Error occured, Fail the play run
|
||||
tags:
|
||||
- always
|
||||
|
||||
|
||||
vars: {}
|
||||
|
||||
|
||||
```
|
||||
|
||||
The above template playbook is designed for post automation should it be required to run. `nfc_automation` is for the entire play/workflow with `nfc_tasks` being a list of `nfc_task` dictionary from each playbook. `nfc_task` is there for you to add your own artifacts to and without any additional effort from you, will be added to the global artifacts.
|
||||
|
||||
|
||||
### Playbook Variables
|
||||
|
||||
Within any playbook that we create any variable that is set within the playbook is to be prefixed with `nfc_pb_`. Currently we have the following variables that are reserved and set as part of how we structure our playbooks.
|
||||
|
||||
- `nfc_automation` Details on the play/run. see artifacts above for details.
|
||||
|
||||
- `nfc_pb_no_log_setup_facts` Boolean value used as a feature gate on whether to log `set_fact` tasks that are for setting up the play. i.e. artifacts. setting this value to `false` will caused the tasks to be logged.
|
||||
|
||||
- `nfc_tasks` List of all `nfc_task` dictionaries of the play. see artifacts above for details.
|
||||
|
||||
|
||||
## AWX / Tower / Automation Platform
|
||||
|
||||
|
||||
### Prime host
|
||||
|
||||
If you use a system like AWX / Tower / Automation Platform the inventory should be designed in a way that you have a prime host. The prime host is a single host that once it exists, it's capable of rebuilding all of the infrastructure within the inventory. Using the prime host method, you only require the variable secrets (vault encrypted) of the prime host and only those pertinent to rebuilding the prime host. This should only be the backup decryption key (vault encrypted).
|
||||
|
||||
!!! warning Warning
|
||||
Prime Host requires that the backup decryption key be updated within the inventory whenever it changes. There is also a requirement that the vault encryption key be available and not stored on infrastructure that without or that infrastructure not existing you cant access the vault key. i.e. password manager.
|
||||
|
||||
|
||||
## ToDo
|
||||
|
||||
- Explain usage of file `.inventory_root` which must exist as nfc_common _(todo: see kubernetes playbook/master)_ _may no longer be required a project structure is known along with using variable `playbook_dir`_
|
@ -1,81 +0,0 @@
|
||||
---
|
||||
title: Ansible Roles
|
||||
description: No Fuss Computings Ansible Roles Projects
|
||||
date: 2023-11-10
|
||||
template: project.html
|
||||
about: https://gitlab.com/nofusscomputing/projects/ansible
|
||||
---
|
||||
|
||||
This section of the website contains Ansible roles and the details of how to use said projects. Across All of our Ansible roles we standardize as much as possible. This document will contain the details of said standardization.
|
||||
|
||||
|
||||
Our roles:
|
||||
|
||||
- Common
|
||||
|
||||
- Docker Management
|
||||
|
||||
- [Firewall](firewall/index.md)
|
||||
|
||||
- Git Configuration
|
||||
|
||||
- [Home Assistant](homeassistant/index.md)
|
||||
|
||||
- [Kubernetes](kubernetes/index.md)
|
||||
|
||||
- SSH
|
||||
|
||||
|
||||
## Role Requirements
|
||||
|
||||
This section covers what by default, be part of all ansible roles we create.
|
||||
|
||||
|
||||
=== "ansible.builtin.set_stats"
|
||||
|
||||
As part of the role, setting of ansible stats with `ansible.builtin.set_stats` must be provided. This enables a single variable that can be used after the play has completed. Usage of a role that includes the usage of `ansible.builtin.set_stats` within AWX enables population of the artifacts and passing of the stats between workflows/job templates.
|
||||
|
||||
|
||||
```yaml
|
||||
- name: Stat Values
|
||||
ansible.builtin.set_fact:
|
||||
stat_values: |
|
||||
{
|
||||
"host_{{ inventory_hostname | replace('.', '_') | replace('-', '_') }}": {
|
||||
"roles": {
|
||||
role_name: {
|
||||
"enabled": true,
|
||||
"installed": false,
|
||||
"empty_list": [],
|
||||
"empty_dict": {}
|
||||
}
|
||||
},
|
||||
playbooks: {
|
||||
"{{ inventory_hostname }}": "here"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
- name: Create Final Stats not Per Host
|
||||
ansible.builtin.set_stats:
|
||||
data: "{{ stat_values | from_yaml }}"
|
||||
per_host: false
|
||||
aggregate: true
|
||||
|
||||
- name: Clear Stat Values
|
||||
ansible.builtin.set_fact:
|
||||
stat_values: null
|
||||
```
|
||||
|
||||
- `Stat Values` is only required if the variable names require expansion. _Can be omitted if no variable expansion required for variable name._
|
||||
|
||||
- `Create Final Stats not Per Host` sets the artifacts/stats.
|
||||
|
||||
- `Clear Stat Values` remove the stat fact. only required if using `Stat Values`.
|
||||
|
||||
!!! tip AWX Gotcha
|
||||
AWX requires that `per_host` be set to `false` when setting stats for artifacts to work. Hence the structure of the artifacts above use hostname prefixed with `host_`. This method enables programatic checking if by host due to the presence of `host_` in the dictionary name.
|
||||
|
||||
=== "Variable naming"
|
||||
|
||||
- All Role Variables to be prefixed with the role name.
|
@ -1,5 +0,0 @@
|
||||
# Docker GLPI
|
||||
|
||||
| :red_circle: This page is a placeholder for the python-gitlab-management repo's docs. |
|
||||
|:---|
|
||||
| _If you can see this page there has been an error, please report the issue on gitlab_ |
|
@ -1,122 +0,0 @@
|
||||
---
|
||||
title: Infrastructure
|
||||
description: No Fuss Computings Infrastructure project using open source software
|
||||
date: 2024-01-13
|
||||
template: project.html
|
||||
about: https://gitlab.com/nofusscomputing/projects/ansible
|
||||
---
|
||||
|
||||
This infrastructure project exists as an example of computing infrastructure that is based on open source software. The idea is to demonstrate ways of using open source software for your computing infrastructure whether it be for the home or enterprise user. Whilst a lot of my hardware is not enterprise grade equipment, The devices I use will serve as a proof of concept. Particular focus of this project is going to be spent on automation. Why? Simply put, it reduces the requirement for additional people to achieve the same economy of effort. _Oh, and cause I can!!_
|
||||
|
||||
|
||||
## Goal
|
||||
|
||||
Design, build and deploy computing infrastructure using open source software that could be used by both the enterprise and home user and wherever possible, simplify to lower the bar of entry.
|
||||
|
||||
|
||||
## Requirements
|
||||
|
||||
- Automated
|
||||
|
||||
- multi-site
|
||||
|
||||
- multi-tenant
|
||||
|
||||
|
||||
## Areas
|
||||
|
||||
The areas of infrastructure that are covered are as detailed:
|
||||
|
||||
- Certificate Authority
|
||||
|
||||
- HyperVisor - _KubeVirt via K3s Kubernetes_
|
||||
|
||||
- Identity Management - _IPA_
|
||||
|
||||
- Identity Provider (SSO) - _Keycloak_
|
||||
|
||||
- IT Operations (ITOPs)
|
||||
|
||||
- Backup Management
|
||||
|
||||
- Computer Lifecycle Management - _GLPI_
|
||||
|
||||
- Help desk - _GLPI_
|
||||
|
||||
- IP Address Management (IPAM) - _phpIPAM_
|
||||
|
||||
- Knowledge Management
|
||||
|
||||
- Playbooks
|
||||
|
||||
- Runbooks
|
||||
|
||||
- Logging Management
|
||||
|
||||
- Metrics Management
|
||||
|
||||
- Remote Desktop Support - _Mesh Central_
|
||||
|
||||
- Software Library - _Pulp?????_
|
||||
|
||||
- IT Service Management (ITSM)
|
||||
|
||||
- Change Management - _GLPI_
|
||||
|
||||
- Config Management - _AWX_
|
||||
|
||||
- Enterprise Service Management (ESM)
|
||||
|
||||
- Incident Management - _GLPI_
|
||||
|
||||
- IT Asset Management - _GLPI_
|
||||
|
||||
- Patch Management - _Foreman_
|
||||
|
||||
- Problem Management - _GLPI_
|
||||
|
||||
- Request Management - _GLPI_
|
||||
|
||||
- Service Catalog - _GLPI_
|
||||
|
||||
- Secrets Management - _Hashicorp Vault_
|
||||
|
||||
- Security Management
|
||||
|
||||
- Networking
|
||||
|
||||
- DNS - _Bind??_
|
||||
|
||||
- DHCP
|
||||
|
||||
- Radius
|
||||
|
||||
- TFTP
|
||||
|
||||
- Password Storage
|
||||
|
||||
- Storage - _Ceph_
|
||||
|
||||
- Virtual Desktop Infrastructure (VDI) - _Mesh Central ???_
|
||||
|
||||
- Website - _Markdown built with MKDocs_
|
||||
|
||||
|
||||
## Workflow
|
||||
|
||||
Within the IT world there are multiple working components to support the business goal. With this in mind, workflows are required for the start and endpoints of the lifecycle of those components. This can be broken down into two items that become the workflows for the infrastructure, they are:
|
||||
|
||||
- Devices
|
||||
|
||||
- People
|
||||
|
||||
For the remainder of the infrastructure and services workflows, fall under one of the areas mentioned above. There is an argument to be made that devices do too; however have defined here due to a potential for a rare occasion that the ITOPS workflows were not followed, that the devices would still be captured.
|
||||
|
||||
### Devices
|
||||
|
||||
Of particular importance is the infrastructure's devices. whether they be Computers, Servers, Laptops etc. There must be workflows that are followed that cater for: discovery, existence and absence; without this it's likely that you will miss a device.
|
||||
|
||||
|
||||
### People
|
||||
|
||||
Without a workflow for people, why are you even building the infrastructure?? As people access the system there must be workflows like devices that cater for: New, discovery, existence, absence and leaving. Defining these workflows will aid in management as well as define the requirements which can be used as the start point for automating the workflows.
|
@ -1,5 +0,0 @@
|
||||
# ITIL Runbooks
|
||||
|
||||
| :red_circle: This page is a placeholder for the python-gitlab-management repo's docs. |
|
||||
|:---|
|
||||
| _If you can see this page there has been an error, please report the issue on gitlab_ |
|
@ -1,5 +0,0 @@
|
||||
# Docker GLPI
|
||||
|
||||
| :red_circle: This page is a placeholder for the python-gitlab-management repo's docs. |
|
||||
|:---|
|
||||
| _If you can see this page there has been an error, please report the issue on gitlab_ |
|
@ -91,20 +91,18 @@ class Data:
|
||||
|
||||
if 'gitlab.com/-/ide/project' not in url: # ignore gitlab ide links
|
||||
|
||||
if 'nofusscomputing.com' not in url: # ignore gitlab ide links
|
||||
link = self.parse_url(url)
|
||||
|
||||
link = self.parse_url(url)
|
||||
hyperlink_source_file = {'name': source_file, 'location': link_location}
|
||||
|
||||
hyperlink_source_file = {'name': source_file, 'location': link_location}
|
||||
if link['url_id'] in data['hyperlinks']:
|
||||
|
||||
if link['url_id'] in data['hyperlinks']:
|
||||
data['hyperlinks'][link['url_id']]['source_files'].append(hyperlink_source_file)
|
||||
|
||||
data['hyperlinks'][link['url_id']]['source_files'].append(hyperlink_source_file)
|
||||
else:
|
||||
|
||||
else:
|
||||
|
||||
link['source_files'] = [ hyperlink_source_file ]
|
||||
data['hyperlinks'][link['url_id']] = link
|
||||
link['source_files'] = [ hyperlink_source_file ]
|
||||
data['hyperlinks'][link['url_id']] = link
|
||||
|
||||
|
||||
events = [self.process_browser_log_entry(entry) for entry in self.driver.get_log('performance')]
|
||||
|
@ -13,11 +13,7 @@ class Test:
|
||||
|
||||
|
||||
def setup_method(self):
|
||||
self.ignore_url_alive_check = {
|
||||
'gitlab.com': [
|
||||
'nofusscomputing/infrastructure/website/-/new/development'
|
||||
]
|
||||
}
|
||||
pass
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
@ -40,29 +36,12 @@ class Test:
|
||||
packages.urllib3.disable_warnings(category=InsecureRequestWarning)
|
||||
|
||||
request = get(data['url'], verify=False)
|
||||
skip_test = False
|
||||
|
||||
print(str(data) + str(request.status_code))
|
||||
|
||||
|
||||
if data['domain'] in self.ignore_url_alive_check:
|
||||
if data['request_path'] in self.ignore_url_alive_check[data['domain']]:
|
||||
skip_test = True
|
||||
|
||||
|
||||
if not skip_test:
|
||||
|
||||
assert (
|
||||
request.status_code == 200
|
||||
or
|
||||
request.status_code == 401
|
||||
or
|
||||
request.status_code == 403
|
||||
), (
|
||||
f"Hyperlink [{data['url_id']}] to location [{data['url']}] failed,"
|
||||
f"with status [{request.status_code}].")
|
||||
else:
|
||||
assert True
|
||||
assert request.status_code == 200, (
|
||||
f"Hyperlink [{data['url_id']}] to location [{data['url']}] failed,"
|
||||
f"with status [{request.status_code}].")
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
|
Submodule website-template updated: f5a82d3604...992b54805b
Reference in New Issue
Block a user