chore!: separated galaxy deps and own collections; modified ansible script generation to use two paths for collections

REQUIRES REGENERATING ansible.cfg!
This commit is contained in:
NaeiKinDus 2025-02-23 00:00:00 +00:00
parent 4af69c31ce
commit 888590ed9f
Signed by: WoodSmellParticle
GPG key ID: 8E52ADFF7CA8AE56
188 changed files with 30 additions and 30 deletions

View file

@ -0,0 +1,3 @@
# Ansible Collection - nullified.infrastructure
Documentation for the collection.

View file

@ -0,0 +1,60 @@
---
# playbook file that contains the call for your role
- name: Fail if molecule group is missing
hosts: localhost
tasks:
- name: Print some info
ansible.builtin.debug:
msg: "{{ groups }}"
- name: Assert group existence
ansible.builtin.assert:
that: "'molecule' in groups"
fail_msg: |
molecule group was not found inside inventory groups: {{ groups }}
- name: Converge
hosts: molecule
gather_facts: true
vars_files:
- ../../../../../../../inventory/group_vars/all/vars.yml
- ../../../../../../../inventory/group_vars/all/vault.yml
- ./platform_vars.yml
tasks:
- include_vars: ../../../../../../../inventory/group_vars/all/vars.yml
- include_vars: ../../../../../../../inventory/group_vars/all/vault.yml
- include_vars: ./platform_vars.yml
- name: Platform hostvars
ansible.builtin.debug:
msg: "{{ hostvars[inventory_hostname] }}"
verbosity: 1
- name: Enable guest console access
become: true
ansible.builtin.systemd_service:
name: serial-getty@ttyS0.service
enabled: true
state: restarted
- name: Testing security role
ansible.builtin.include_role:
name: nullified.infrastructure.security
tasks_from: main.yml
- name: Testing common role
ansible.builtin.include_role:
name: nullified.infrastructure.common
tasks_from: main.yml
- name: Testing server role
ansible.builtin.include_role:
name: nullified.infrastructure.server
tasks_from: main.yml
- name: Testing development role
ansible.builtin.include_role:
name: nullified.infrastructure.development
tasks_from: main.yml
- name: Testing workstation role
ansible.builtin.include_role:
name: nullified.infrastructure.workstation
tasks_from: main.yml
- name: Testing gaming role
ansible.builtin.include_role:
name: nullified.infrastructure.gaming
tasks_from: main.yml

View file

@ -0,0 +1,47 @@
---
# playbook file used for creating the instances and storing data in instance-config
- name: Create
hosts: localhost
gather_facts: false
connection: local
tasks:
- name: Create instances
vagrant:
instances: "{{ molecule_yml.platforms }}"
default_box: "{{ molecule_yml.driver.default_box | default('debian/bookworm64') }}"
provider_name: "{{ molecule_yml.driver.provider.name | default(omit, true) }}"
provision: "{{ molecule_yml.driver.provision | default(omit) }}"
cachier: "{{ molecule_yml.driver.cachier | default(omit) }}"
parallel: "{{ molecule_yml.driver.parallel | default(omit) }}"
state: up
register: server
- name: VMs info
ansible.builtin.debug:
msg: "{{ server.results }}"
- name: Create molecule instances configuration
when: server is changed # noqa no-handler
block:
- name: Populate instance config dict
ansible.builtin.set_fact:
instance_conf_dict:
{
"instance": "{{ item.Host }}",
"address": "{{ item.HostName }}",
"user": "{{ item.User }}",
"port": "{{ item.Port }}",
"identity_file": "{{ item.IdentityFile }}",
}
loop: "{{ server.results }}"
loop_control:
label: "{{ item.Host }}"
register: instance_config_dict
- name: Convert instance config dict to a list
ansible.builtin.set_fact:
instance_conf: "{{ instance_config_dict.results | map(attribute='ansible_facts.instance_conf_dict') | list }}"
- name: Dump instance config
ansible.builtin.copy:
content: "{{ instance_conf | to_json | from_json | to_yaml }}"
dest: "{{ molecule_instance_config }}"
mode: "0600"

View file

@ -0,0 +1,28 @@
---
- name: Destroy
hosts: localhost
connection: local
gather_facts: false
tasks:
- name: Destroy molecule instance(s) # noqa fqcn[action]
vagrant:
instances: "{{ molecule_yml.platforms }}"
default_box: "{{ molecule_yml.driver.default_box | default('debian/bookworm64') }}"
provider_name: "{{ molecule_yml.driver.provider.name | default(omit, true) }}"
cachier: "{{ molecule_yml.driver.cachier | default(omit) }}"
force_stop: "{{ item.force_stop | default(true) }}"
state: destroy
register: server
- name: Populate instance config
ansible.builtin.set_fact:
instance_conf: {}
- name: Dump instance config # noqa no-handler
ansible.builtin.copy:
content: |
# Molecule managed
{{ instance_conf | to_json | from_json | to_yaml }}
dest: "{{ molecule_instance_config }}"
mode: "0600"
when: server.changed | bool

View file

@ -0,0 +1,41 @@
---
# central configuration entry point for Molecule per scenario
dependency:
name: galaxy
options:
requirements-file: requirements.yml
driver:
name: vagrant
provider:
name: libvirt
provision: false
cachier: machine
parallel: true
default_box: debian/bookworm64
platforms:
- name: debian-bookworm
box: debian/bookworm64
memory: 2048
cpus: 4
hostname: debian-bookworm
interfaces:
- auto_config: true
network_name: private_network
type: dhcp
instance_raw_config_args: []
config_options:
ssh.keep_alive: yes
ssh.remote_user: 'vagrant'
provider_options:
video_type: vga
provider_raw_config_args: []
groups:
- molecule
provisioner:
name: ansible
config_options:
defaults:
vault_password_file: ${HOME}/.config/ansible/vault-id
inventory:
group_vars:
molecule:

View file

@ -0,0 +1,4 @@
custom_base_user_account: 'vagrant'
security_firewall_mangle_drop_privatenets: false
security_configure_resolve_conf: true
global_ip_dualstack: false

View file

@ -0,0 +1,4 @@
collections:
- community.docker
- ansible.netcommon
- kubernetes.core

View file

@ -0,0 +1,13 @@
- name: Retrieve container log
ansible.builtin.command:
cmd: >-
{% raw %}
docker logs
{% endraw %}
{{ item.stdout_lines[0] }}
changed_when: false
register: logfile_cmd
- name: Display container log
ansible.builtin.fail:
msg: "{{ logfile_cmd.stderr }}"

View file

@ -0,0 +1,67 @@
### REQUIRED
# The namespace of the collection. This can be a company/brand/organization or product namespace under which all
# content lives. May only contain alphanumeric lowercase characters and underscores. Namespaces cannot start with
# underscores or numbers and cannot contain consecutive underscores
namespace: nullified
# The name of the collection. Has the same character restrictions as 'namespace'
name: infrastructure
# The version of the collection. Must be compatible with semantic versioning
version: 1.0.0
# The path to the Markdown (.md) readme file. This path is relative to the root of the collection
readme: README.md
# A list of the collection's content authors. Can be just the name or in the format 'Full Name <email> (url)
# @nicks:irc/im.site#channel'
authors:
- Florian L. <git@0x2a.ninja>
### OPTIONAL but strongly recommended
# A short summary description of the collection
description: your collection description
# Either a single license or a list of licenses for content inside of a collection. Ansible Galaxy currently only
# accepts L(SPDX,https://spdx.org/licenses/) licenses. This key is mutually exclusive with 'license_file'
license:
- GPL-2.0-or-later
# The path to the license file for the collection. This path is relative to the root of the collection. This key is
# mutually exclusive with 'license'
license_file: ''
# A list of tags you want to associate with the collection for indexing/searching. A tag name has the same character
# requirements as 'namespace' and 'name'
tags: []
# Collections that this collection requires to be installed for it to be usable. The key of the dict is the
# collection label 'namespace.name'. The value is a version range
# L(specifiers,https://python-semanticversion.readthedocs.io/en/latest/#requirement-specification). Multiple version
# range specifiers can be set and are separated by ','
dependencies: {}
# The URL of the originating SCM repository
repository: http://example.com/repository
# The URL to any online docs
documentation: http://docs.example.com
# The URL to the homepage of the collection/project
homepage: http://example.com
# The URL to the collection issue tracker
issues: http://example.com/issue/tracker
# A list of file glob-like patterns used to filter any files or directories that should not be included in the build
# artifact. A pattern is matched from the relative path of the file or directory of the collection directory. This
# uses 'fnmatch' to match the files or directories. Some directories and files like 'galaxy.yml', '*.pyc', '*.retry',
# and '.git' are always filtered. Mutually exclusive with 'manifest'
build_ignore: []
# A dict controlling use of manifest directives used in building the collection artifact. The key 'directives' is a
# list of MANIFEST.in style
# L(directives,https://packaging.python.org/en/latest/guides/using-manifest-in/#manifest-in-commands). The key
# 'omit_default_directives' is a boolean that controls whether the default directives are used. Mutually exclusive
# with 'build_ignore'
# manifest: null

View file

@ -0,0 +1,2 @@
---
requires_ansible: '>=2.9.10'

View file

@ -0,0 +1,31 @@
# Collections Plugins Directory
This directory can be used to ship various plugins inside an Ansible collection. Each plugin is placed in a folder that
is named after the type of plugin it is in. It can also include the `module_utils` and `modules` directory that
would contain module utils and modules respectively.
Here is an example directory of the majority of plugins currently supported by Ansible:
```
└── plugins
├── action
├── become
├── cache
├── callback
├── cliconf
├── connection
├── filter
├── httpapi
├── inventory
├── lookup
├── module_utils
├── modules
├── netconf
├── shell
├── strategy
├── terminal
├── test
└── vars
```
A full list of plugin types can be found at [Working With Plugins](https://docs.ansible.com/ansible-core/2.15/plugins/plugins.html).

View file

@ -0,0 +1,407 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2023, Florian L. <git+ansible@pounce.tech>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
__metaclass__ = type
DOCUMENTATION = r'''
---
module: github_artifact
short_description: fetch assets from a GitHub repository release
description: vaguely similar to a package manager, but for GitHub artifacts.
version_added: "2.15.0"
options:
asset_name:
description: filename of the asset to retrieve, used only for release type; supports templating
type: str
required: false
default: ""
asset_type:
description: whether the asset is a release or just a tagged asset
type: str
required: true
choices:
- release
- tag
cmds:
description: commands to execute in order to install the downloaded asset; supports templating
type: list
elements: str
required: false
default: []
creates:
description: if provided and target file / directory exists, this step will **not** be run unless `--force` is specified
type: str
required: false
force:
description: forces the re-installation of the package no matter its state
type: bool
required: false
default: false
github_token:
description: a GitHub app token if you have one; limits impact of rate-limiting errors
type: str
required: false
repository:
description: repository to query, formatted like "<owner>/<repo>"
required: true
type: str
version:
description: version of the asset to fetch; defaults to `latest`
required: false
type: str
default: latest
notes:
- "Strings that allow the use of templating variables support the following:"
- " V(version): version of the system, B(NOT) the asset's;"
- " V(system): type of the OS, retrieved from C(platform.system), e.g. I(Linux), I(Darwin);"
- " V(machine): machine architecture, retrieved from C(platform.machine), e.g. I(x86_64), I(i386);"
- " V(asset_name): name of the selected asset from the GitHub metadata results, e.g. I(dive_0.11.0_linux_amd64.deb);"
- " V(asset_dirname): directory where the downloaded asset is located, e.g. I(/tmp/ansible-moduletmp-1695757626.5862153-xjpc5ip8);"
- " V(asset_filename): name of the download asset file, e.g. I(dive_0.11.0_linux_amd64bnha_1dr.deb);"
- " V(asset_version): version of the asset, retrieved directly from the GitHub metadata;"
- " all variables defined in C(/etc/os-release), lowercase."
author:
- Florian L. (@NaeiKinDus)
'''
EXAMPLES = r'''
- name: Install dependencies from GitHub
become: true
tags:
- molecule-idempotence-notest
nullified.infrastructure.github_artifact:
asset_type: tag
repository: smxi/inxi
cmds:
- tar -zxf {asset_dirname}/{asset_filename}
- install --group=root --mode=755 --owner=root smxi-inxi-*/inxi /usr/bin
- install --group=root --mode=644 --owner=root smxi-inxi-*/inxi.1 /usr/share/man/man1
- apt-get install libdata-dump-perl
creates: /usr/bin/inxi
'''
RETURN = r'''
# These are examples of possible return values, and in general should use other names for return values.
original_message:
description: The original name param that was passed in.
type: str
returned: always
sample: "hello world"
'''
from ansible.module_utils.basic import AnsibleModule, missing_required_lib # noqa
from ansible.module_utils.urls import fetch_url, fetch_file # noqa
ANSIBLE_MODULE: AnsibleModule | None = None
LIB_IMPORTED = False
GITHUB_API_BASE = "https://api.github.com"
GITHUB_DOWNLOAD_BASE = "https://github.com"
GITHUB_API_VERSION = "2022-11-28"
DEFAULT_HEADERS: dict[str, str] = {
"Content-Type": "application/json",
"Accept": "application/json, application/vnd.github+json;q=0.8",
}
TEMPLATE_ASSET_NAME_VARS: dict[str, str] = {
"version": "", # platform.version(), e.g. "12 (bookworm)"
"system": "", # platform.system(), e.g. "Linux", "Darwin"
"machine": "" # platform.machine(), e.g. "x86_64", "i386"
}
try:
from datetime import datetime
from difflib import SequenceMatcher
from json import loads
from os import environ, sep, path
from platform import system, machine
from typing import Any
LIB_IMPORTED = True
except ModuleNotFoundError as excp:
import traceback
IMPORT_LIB_ERROR = traceback.format_exc()
IMPORT_LIB_NAME = excp.name
try:
from platform import freedesktop_os_release
except (ModuleNotFoundError, ImportError):
FREEDESKTOP_OS_RELEASE_FILE = '/etc/os-release'
def freedesktop_os_release() -> dict[str, str]:
try:
with open(FREEDESKTOP_OS_RELEASE_FILE, 'r') as fd:
data = fd.read()
os_info: dict[str, str] = {key: value.strip('"') for key, value in (line.split('=') for line in data.splitlines())}
except FileNotFoundError:
return dict()
return os_info
def find_compatible_asset(assets_list: list[dict[str, str | int | float]], asset_name: str) -> dict[str, str] | None:
""" takes a list of assets and tries to find the most relevant one; assumes only one asset is required """
best_match: dict[str, str] = {}
matched_name_ratio: float = 0.0
if len(assets_list) == 0:
return None
elif len(assets_list) == 1:
return {
"asset_name": assets_list[0]["name"],
"download_url": assets_list[0]["browser_download_url"],
"match_ratio": "1.0"
}
sm = SequenceMatcher(a=asset_name, b="")
for asset in assets_list:
if asset_name == asset["name"]:
return {
"asset_name": asset["name"],
"download_url": asset["browser_download_url"],
"match_ratio": "1.0"
}
sm.set_seq2(asset["name"])
ratio = sm.ratio()
if ratio > matched_name_ratio:
best_match = asset
matched_name_ratio = ratio
if not best_match:
return None
return {
"asset_name": best_match["name"],
"download_url": best_match["browser_download_url"],
"match_ratio": "{:.5f}".format(matched_name_ratio)
}
def fetch_github_data(url: str) -> tuple[dict | None, dict[str, int]]:
""" query GitHub API and return a JSON formatted response along with HTTP info data """
response, info = fetch_url(ANSIBLE_MODULE, url, headers=DEFAULT_HEADERS)
http_status: int = info.get("status", 999)
if http_status >= 400:
return None, info
return loads(response.read().decode("utf-8")), info
def get_released_asset(artifact: dict[str, str]) -> tuple[dict[str, str], dict[str, int] | None]:
""" fetch asset metadata using the release GitHub API """
repository: str = artifact["repository"]
version: str = artifact["version"]
releases_url: str = "{}/repos/{}/releases/{}{}".format(
GITHUB_API_BASE,
repository,
"tags/" if version != "latest" else "",
version
)
if ANSIBLE_MODULE.check_mode:
return {
"asset_name": "{}/{}.ext".format(repository, version),
"download_url": "download_url",
"match_confidence": "match_ratio",
"version": version
}, {}
response_data, info = fetch_github_data(releases_url)
if not response_data:
return {"error": "No release found for version {}. Requested source: {}".format(version, releases_url)}, info
asset_name = artifact.get("asset_name", "").format(**TEMPLATE_ASSET_NAME_VARS)
asset = find_compatible_asset(response_data["assets"], asset_name=asset_name)
if not asset:
if not asset_name:
return {"error": "No matching asset detected, try specifying the desired asset name in arguments list"}, info
return {"error": "No asset matching name {} found".format(asset_name)}, info
return {
"asset_name": asset["asset_name"],
"download_url": asset["download_url"],
"match_confidence": asset["match_ratio"],
"version": response_data["tag_name"] or response_data["name"]
}, info
def get_tagged_asset(artifact: dict[str, Any]) -> tuple[dict[str, str], dict[str, int] | None]:
""" fetch asset metadata using the tags GitHub API """
repository: str = artifact["repository"]
version: str = artifact["version"]
tags_url: str = "{}/repos/{}/tags?per_page=1".format(GITHUB_API_BASE, repository)
if version != "latest":
return {
"asset_name": "{}.tar.gz".format(version),
"download_url": "{}/{}/archive/refs/tags/{}.tar.gz".format(GITHUB_DOWNLOAD_BASE, repository, version),
"version": version
}, None
if ANSIBLE_MODULE.check_mode:
return {
"asset_name": "asset_name",
"download_url": "download_url",
"version": version
}, {}
response_data, info = fetch_github_data(tags_url)
if not response_data:
return {
"error": "No tagged asset found for '{}'".format(tags_url)
}, info
response_data = response_data[0]
return {
"asset_name": "{}.tar.gz".format(response_data.get("name", "unknown")),
"download_url": response_data.get("tarball_url"),
"version": response_data.get("name", "latest")
}, info
def fetch_metadata(artifact: dict[str, str | list[str]]) -> dict[str, str] | None:
""" retrieve metadata from the specified repository """
if artifact["asset_type"] == "tag":
metadata, info = get_tagged_asset(artifact)
else:
metadata, info = get_released_asset(artifact)
if info:
reset_date = info.get("x-ratelimit-reset", None)
metadata["rate_limit_max"] = info.get("x-ratelimit-limit", "unknown")
metadata["rate_limit_remaining"] = info.get("x-ratelimit-remaining", "unknown")
metadata["rate_limit_reset_date"] = datetime.fromtimestamp(float(reset_date)).isoformat() if reset_date else "unknown"
return metadata
def main():
global ANSIBLE_MODULE
module_args: dict[str, dict[str, Any]] = {
"asset_name": {
"type": "str",
"required": False,
"default": ""
},
"asset_type": {
"type": "str",
"required": True,
"choices": ["release", "tag"],
},
"cmds": {
"type": "list",
"elements": "str",
"required": False,
"default": []
},
"creates": {
"type": "str",
"required": False
},
"force": {
"type": "bool",
"required": False,
"default": False
},
"repository": {
"type": "str",
"required": True
},
"version": {
"type": "str",
"required": False,
"default": "latest"
},
"github_token": {
"type": "str",
"required": False,
"no_log": True
},
}
result: dict[str, Any] = {
"changed": False,
"commands": [],
"failed": False,
"filepath": "",
"msg": "",
"state": "",
"version": ""
}
ANSIBLE_MODULE = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
if "FORCE_CHECK_MODE" in environ:
ANSIBLE_MODULE.check_mode = True if environ.get("FORCE_CHECK_MODE", False) in [True, "1", "True", "true"] else False
if ANSIBLE_MODULE.params["github_token"]:
DEFAULT_HEADERS["Authorization"] = "Bearer {}".format(ANSIBLE_MODULE.params["github_token"])
creates_file: str | None = ANSIBLE_MODULE.params.get("creates", None)
if creates_file and path.exists(creates_file) and not ANSIBLE_MODULE.params.get("force", False):
result["state"] = "ignored"
ANSIBLE_MODULE.exit_json(**result)
if not LIB_IMPORTED:
ANSIBLE_MODULE.fail_json(msg=missing_required_lib(IMPORT_LIB_NAME), exception=IMPORT_LIB_ERROR) # pylint: disable=used-before-assignment
# load local metadata cached file to retrieve installed version
TEMPLATE_ASSET_NAME_VARS.update({key.lower(): value for key, value in freedesktop_os_release().items()})
TEMPLATE_ASSET_NAME_VARS["system"] = system().lower()
TEMPLATE_ASSET_NAME_VARS["machine"] = machine().lower()
artifact: dict[str, str | list[str]] = {}
for param_name in ["asset_name", "asset_type", "cmds", "repository", "version"]:
artifact[param_name] = ANSIBLE_MODULE.params[param_name]
if not artifact["version"]:
artifact["version"] = "latest"
if not artifact["cmds"]:
artifact["cmds"] = []
asset_data: dict[str, str] = fetch_metadata(artifact)
result["rate_limit_remaining"] = asset_data.get("rate_limit_remaining", "unknown")
result["rate_limit_max"] = asset_data.get("rate_limit_max", "unknown")
result["version"] = asset_data.get("version", artifact.get("version"))
if "error" in asset_data:
result["state"] = "fetch failed"
result["msg"] = asset_data.get("error", "unknown error encountered")
result["failed"] = True
ANSIBLE_MODULE.fail_json(**result)
# download artifact
if ANSIBLE_MODULE.check_mode:
result["filepath"] = "unknown"
else:
result["filepath"] = fetch_file(ANSIBLE_MODULE, asset_data.get("download_url", "unknown"), decompress=False)
TEMPLATE_ASSET_NAME_VARS["asset_name"] = asset_data.get("asset_name", "unknown")
TEMPLATE_ASSET_NAME_VARS["asset_version"] = asset_data.get("version", "unknown")
parts = result["filepath"].rsplit(sep, 1)
TEMPLATE_ASSET_NAME_VARS["asset_dirname"] = parts[0] if len(parts) > 1 else ""
TEMPLATE_ASSET_NAME_VARS["asset_filename"] = parts[1] if len(parts) > 1 else parts[0]
# install artifact
artifact_commands = [line.format(**TEMPLATE_ASSET_NAME_VARS) for line in artifact["cmds"]]
if ANSIBLE_MODULE.check_mode:
result["commands"] = artifact_commands
result["state"] = "should be installed" if len(artifact_commands) else "should be downloaded"
else:
for command_line in artifact_commands:
cmd_rc, cmd_out, cmd_err = ANSIBLE_MODULE.run_command(command_line, use_unsafe_shell=True, cwd=ANSIBLE_MODULE.tmpdir)
result["changed"] = True
result["commands"].append({
"command": command_line,
"stdout": cmd_out,
"stderr": cmd_err,
"ret_code": cmd_rc
})
if cmd_rc:
result["state"] = "installation failed"
result["msg"] = cmd_err
result["failed"] = True
ANSIBLE_MODULE.fail_json(**result)
result["state"] = "installed" if len(artifact_commands) else "downloaded"
result["msg"] = "Successful"
ANSIBLE_MODULE.exit_json(**result)
if __name__ == "__main__":
main()

View file

@ -0,0 +1,16 @@
{
"ANSIBLE_MODULE_ARGS": {
"github_token": "",
"asset_type": "tag",
"repository": "smxi/inxi",
"version": "3.3.29-1",
"cmds": [
"echo \"asset_name: {asset_name}\nasset_dirname: {asset_dirname}\"",
"echo \"asset_filename: {asset_filename}\nasset_version: {asset_version}\"",
"echo \"system: {system}\nmachine: {machine}\nversion: {version}\"",
"ls -lahv {asset_dirname}",
"test -f {asset_dirname}/{asset_filename}"
],
"creates": "/usr/local/bin/inxi2"
}
}

View file

@ -0,0 +1,13 @@
---
common_apt_packages: []
common_apt_source_components: ["contrib", "non-free", "non-free-firmware"]
common_apt_use_deb822_format: false
common_git_email: ""
common_git_enabled: false
common_git_force_sign: false
common_git_signing_key: ""
common_git_username: ""
common_github_token: "{{ custom_github_token | default('') }}"
common_install_firmware_tools: true
common_install_fonts: false
common_user_account: "{{ custom_base_user_account }}"

View file

@ -0,0 +1,22 @@
---
galaxy_info:
author: Florian L.
namespace: nullified
description: Setup common tasks (e.g. users, CLI tools)
# issue_tracker_url: http://example.com/issue/tracker
license: MIT
min_ansible_version: 2.15
# https://galaxy.ansible.com/api/v1/platforms/
platforms:
- name: Debian
versions:
- bookworm
galaxy_tags:
- github
- assets
- utils
- system
dependencies: []

View file

@ -0,0 +1,41 @@
---
- name: '[home] get user account information'
ansible.builtin.getent:
database: passwd
key: "{{ common_user_account }}"
split: ":"
changed_when: false
when: ansible_facts['getent_passwd'] is undefined or common_user_account not in ansible_facts['getent_passwd']
- name: '[home] create common directories'
become: true
become_user: "{{ common_user_account }}"
ansible.builtin.file:
path: "{{ ansible_facts['getent_passwd'][common_user_account][4] }}/{{ item }}"
state: directory
mode: '0750'
loop:
- .local/bin
- .local/share/fonts
- .config
- .ssh
- name: '[home] setup home files'
become: true
become_user: "{{ common_user_account }}"
block:
- name: '[home] git configuration'
ansible.builtin.template:
src: ../templates/home/.gitconfig.j2
dest: "{{ ansible_facts['getent_passwd'][common_user_account][4] }}/.gitconfig"
mode: '0640'
when: common_git_enabled is truthy
- name: '[home] basic files'
ansible.builtin.copy:
src: "../templates/home/{{ item.name }}"
dest: "{{ ansible_facts['getent_passwd'][common_user_account][4] }}/{{ item.name }}"
mode: "{{ item.mode | default('0640') }}"
loop:
- { name: ".lessfilter", mode: '0750' }
- { name: ".pythonrc" }

View file

@ -0,0 +1,127 @@
---
- name: '[apt] verify components of default sources'
become: true
block:
- name: '[apt] default source.list'
ansible.builtin.replace:
path: '/etc/apt/sources.list'
regexp: '^(deb((?!{{ item }}).)+)$'
replace: '\1 {{ item }}'
loop: '{{ common_apt_source_components }}'
when: common_apt_use_deb822_format is falsy
- name: '[apt] default deb822 debian.sources'
ansible.builtin.replace:
path: '/etc/apt/sources.list.d/debian.sources'
regexp: '^(Components: ((?!{{ item }}).)+)$'
replace: '\1 {{ item }}'
loop: '{{ common_apt_source_components }}'
when: common_apt_use_deb822_format is truthy
- name: '[apt] install dependencies and tools'
become: true
ansible.builtin.apt:
update_cache: true
force_apt_get: true
cache_valid_time: 0
pkg:
- acl
- apt-transport-https
- alpine
- bzip2
- catimg
- cron
- curl
- dateutils
- emacs-nox
- firmware-misc-nonfree
- firmware-linux-nonfree
- git
- iotop
- ioping
- jq
- knot-dnsutils
- less
- libdata-dump-perl # inxi
- libxml-dumper-perl # inxi
- lm-sensors
- ncdu
- nvme-cli
- procps
- python3-pygments
- rsync
- smartmontools
- tree
- xz-utils
state: present
- name: 'install firmware management tools'
become: true
ansible.builtin.apt:
update_cache: true
force_apt_get: true
cache_valid_time: 0
pkg:
- fwupd
- gir1.2-fwupd-2.0 # fwupd
when: common_install_firmware_tools
- name: '[github] install tools'
become: true
nullified.infrastructure.github_artifact:
asset_name: "{{ item.asset_name | default('') }}"
asset_type: "{{ item.asset_type }}"
cmds: "{{ item.cmds | default([]) }}"
creates: "{{ item.creates | default('') }}"
github_token: "{{ common_github_token }}"
repository: "{{ item.repository }}"
version: "{{ item.version | default('') }}"
loop:
- repository: smxi/inxi
asset_type: tag
cmds:
- tar -zxf {asset_dirname}/{asset_filename}
- install --group=root --mode=755 --owner=root smxi-inxi-*/inxi /usr/local/bin
- install --group=root --mode=644 --owner=root smxi-inxi-*/inxi.1 /usr/share/man/man1
creates: /usr/local/bin/inxi
- repository: sharkdp/bat
asset_name: bat_{version}_amd64.deb
asset_type: release
creates: /usr/bin/bat
cmds:
- dpkg -i {asset_dirname}/{asset_filename}
- repository: aristocratos/btop
asset_name: btop-x86_64-linux-musl.tbz
asset_type: release
creates: /usr/bin/btop
cmds:
- tar -xjf {asset_dirname}/{asset_filename}
- install --group=root --mode=755 --owner=root btop/bin/btop /usr/bin
- mkdir /usr/share/btop || true
- cp -pr btop/themes /usr/share/btop
- repository: eza-community/eza
asset_name: eza_x86_64-unknown-linux-gnu.tar.gz
asset_type: release
creates: /usr/bin/eza
cmds:
- tar -zxf {asset_dirname}/{asset_filename}
- install --group=root --mode=755 --owner=root eza /usr/bin
- repository: muesli/duf
asset_name: duf_{version}_linux_amd64.deb
asset_type: release
creates: /usr/bin/duf
cmds:
- dpkg -i {asset_dirname}/{asset_filename}
- repository: mikefarah/yq
asset_name:
- name: '[apt] install custom packages'
become: true
ansible.builtin.apt:
update_cache: true
force_apt_get: true
cache_valid_time: 3600
pkg: "{{ common_apt_packages }}"
- include_tasks: home_setup.yml
- include_tasks: shell_customization.yml

View file

@ -0,0 +1,89 @@
---
- name: '[home] get user account information'
ansible.builtin.getent:
database: passwd
key: "{{ common_user_account }}"
split: ":"
changed_when: false
when: ansible_facts['getent_passwd'] is undefined or common_user_account not in ansible_facts['getent_passwd']
- name: '[shell] install ZSH and dependencies'
become: true
ansible.builtin.apt:
update_cache: true
force_apt_get: true
cache_valid_time: 3600
pkg:
- git
- zsh
state: present
- name: '[shell] install custom fonts'
become: true
become_user: "{{ common_user_account }}"
block:
- name: '[fonts] add fonts tooling'
become_user: root
ansible.builtin.apt:
update_cache: true
force_apt_get: true
cache_valid_time: 3600
pkg:
- fontconfig
- name: '[fonts] adding fonts'
ansible.builtin.copy:
src: ../assets/fonts/
dest: "{{ ansible_facts['getent_passwd'][common_user_account][4] }}/.local/share/fonts"
mode: '0640'
- name: '[fonts] refresh fonts cache'
ansible.builtin.command:
cmd: fc-cache
changed_when: false
when: common_install_fonts is truthy
- name: '[shell] install Oh-My-ZSH'
become: true
become_user: "{{ common_user_account }}"
block:
- name: '[omz] get install script'
ansible.builtin.get_url:
url: https://raw.githubusercontent.com/ohmyzsh/ohmyzsh/master/tools/install.sh
dest: /tmp/zsh-install.sh
mode: '0750'
- name: '[omz] install OMZ'
ansible.builtin.command:
cmd: sh /tmp/zsh-install.sh --unattended
creates: "{{ ansible_facts['getent_passwd'][common_user_account][4] }}/.oh-my-zsh"
- name: '[shell] install powerlevel10k customization for OMZ'
become: true
become_user: "{{ common_user_account }}"
ansible.builtin.git:
repo: https://github.com/romkatv/powerlevel10k.git
dest: "{{ ansible_facts['getent_passwd'][common_user_account][4] }}/.oh-my-zsh/custom/themes/powerlevel10k"
depth: 1
- name: '[home] copy zsh files'
become: true
become_user: "{{ common_user_account }}"
ansible.builtin.copy:
src: "../templates/home/{{ item }}"
dest: "{{ ansible_facts['getent_passwd'][common_user_account][4] }}/{{ item }}"
mode: '0640'
loop:
- .p10k.zsh
- .zsh_aliases
- .zsh_completions
- .zsh_exports
- .zsh_functions
- .zshrc
- name: '[shell] update user shell to ZSH'
become: true
ansible.builtin.user:
name: "{{ common_user_account }}"
shell: "/usr/bin/zsh"
state: present

View file

@ -0,0 +1,107 @@
[user]
name = {{ common_git_username }}
email = {{ common_git_email }}
{%- if common_git_force_sign and common_git_signing_key +%}
signingkey = {{ common_git_signing_key }}
{%- endif +%}
[commit]
{% if common_git_force_sign -%}
gpgsign = true
{%- else -%}
gpgsign = false
{%- endif +%}
[tag]
{% if common_git_force_sign -%}
gpgsign = true
{%- else -%}
gpgsign = false
{%- endif +%}
{%- raw %}
[alias]
br = "branch"
ci = "commit"
cmp = "!f() { git log --graph --color --boundary --oneline HEAD...origin/$1; }; f"
co = "checkout"
cp = "cherry-pick"
cpo = "cherry-pick --strategy=recursive -Xours --allow-empty"
cpt = "cherry-pick --strategy=recursive -Xtheirs --allow-empty"
dm = "log --graph --color --boundary --oneline HEAD...origin/master"
dup = "!git log --graph --color --boundary --oneline HEAD...origin/$(git rev-parse --abbrev-ref HEAD)"
psuo = "!git push --set-upstream origin $(git rev-parse --abbrev-ref HEAD)"
rf = "!git reflog --date=iso"
ru = "remote update"
rup = "!f() { git remote update && git pull --rebase; }; f"
pr = "pull --rebase"
st = "status"
subup = "!git submodule foreach git remote update"
undo = "!f() { git reset --soft HEAD~${1:-1}; }; f"
lg = lg1
lg1 = lg1-specific --all
lg2 = lg2-specific --all
lg3 = lg3-specific --all
lg1-specific = log --graph --abbrev-commit --decorate --format=format:'%C(bold blue)%h%C(reset) - %C(bold green)(%ar)%C(reset) %C(white)%s%C(reset) %C(dim white)- %an%C(reset)%C(auto)%d%C(reset)'
lg2-specific = log --graph --abbrev-commit --decorate --format=format:'%C(bold blue)%h%C(reset) - %C(bold cyan)%aD%C(reset) %C(bold green)(%ar)%C(reset)%C(auto)%d%C(reset)%n'' %C(white)%s%C(reset) %C(dim white)- %an%C(reset)'
lg3-specific = log --graph --abbrev-commit --decorate --format=format:'%C(bold blue)%h%C(reset) - %C(bold cyan)%aD%C(reset) %C(bold green)(%ar)%C(reset) %C(bold cyan)(committed: %cD)%C(reset) %C(auto)%d%C(reset)%n'' %C(white)%s%C(reset)%n'' %C(dim white)- %an <%ae> %C(reset) %C(dim white)(committer: %cn <%ce>)%C(reset)'
[core]
editor = emacs
autocrlf = input
pager = delta
[color]
status = auto
branch = auto
interactive = auto
diff = auto
ui = true
pager = true
[delta]
navigate = true
line-numbers = true
[push]
default = simple
[merge]
conflictstyle = diff3
[pull]
rebase = true
[diff]
colorMoved = default
[diff "pdf"]
textconv = pdfinfo
[diff "pdfdiff"]
command = diffpdf
[init]
defaultBranch = main
[interactive]
diffFilter = delta --color-only
[submodule]
recurse = true
[advice]
addEmbeddedRepo = false
addEmptyPathspec = false
addIgnoredFile = false
amWorkDir = false
checkoutAmbiguousRemoteBranchName = false
commitBeforeMerge = false
detachedHead = false
fetchShowForcedUpdates = false
ignoredHook = false
implicitIdentity = false
nestedTag = false
pushAlreadyExists = true
pushFetchFirst = true
pushNeedsForce = true
pushNonFFCurrent = true
pushNonFFMatching = true
pushRefNeedsUpdate = true
pushUnqualifiedRefname = true
pushUpdateRejected = true
resetQuiet = false
resolveConflict = true
rmHints = false
sequencerInUse = false
statusAheadBehind = false
statusHints = false
statusUoption = false
submoduleAlternateErrorStrategyDie = false
waitingForEditor = false
{% endraw %}

View file

@ -0,0 +1,68 @@
#!/bin/sh
# Best effort auto-pygmentization with transparent decompression
# (c) Reuben Thomas 2012
# This program is in the public domain.
# Strategy: first see if pygmentize can find a lexer; if not, ask file; if that finds nothing, fail
# Set the environment variable PYGMENTIZE_OPTS to configure pygments.
# This program can be used as a .lessfilter for the less pager to auto-color less's output
if [ `pygmentize -N $1` != "text" ]; then
pygmentize $PYGMENTIZE_OPTS "$1"
exit 0
fi
file_common_opts="--brief --dereference --uncompress"
unset lexer
case `file --mime-type $file_common_opts "$1"` in
application/xml|image/svg+xml) lexer=xml;;
text/html) lexer=html;;
text/troff) lexer=nroff;;
text/x-asm) lexer=nasm;;
text/x-awk) lexer=awk;;
text/x-c) lexer=c;;
text/x-c++) lexer=cpp;;
text/x-diff) lexer=diff;;
text/x-fortran) lexer=fortran;;
text/x-gawk) lexer=gawk;;
text/x-java) lexer=java;;
text/x-lisp) lexer=common-lisp;;
text/x-lua) lexer=lua;;
text/x-makefile) lexer=make;;
text/x-msdos-batch) lexer=bat;;
text/x-nawk) lexer=nawk;;
text/x-pascal) lexer=pascal;;
text/x-perl) lexer=perl;;
text/x-php) lexer=php;;
text/x-po) lexer=po;;
text/x-python) lexer=python;;
text/x-ruby) lexer=ruby;;
text/x-shellscript) lexer=sh;;
text/x-tcl) lexer=tcl;;
text/x-tex|text/x-texinfo) lexer=latex;; # FIXME: texinfo really needs its own lexer
# Types that file outputs which pygmentize didn't support as of file 5.11, pygments 1.5
# text/calendar
# text/PGP
# text/rtf
# text/texmacs
# text/x-bcpl
# text/x-info
# text/x-m4
# text/x-vcard
# text/x-xmcd
esac
encoding=`file --mime-encoding $file_common_opts "$1"`
if [ -n "$lexer" ]; then
# FIXME: Specify input encoding rather than output encoding https://bitbucket.org/birkenfeld/pygments-main/issue/800
# FIXME: Encoding argument ignored on stdin https://bitbucket.org/birkenfeld/pygments-main/issue/799
#zcat "$1" | pygmentize -O encoding=$encoding,outencoding=UTF-8 $PYGMENTIZE_OPTS -l $lexer
pygmentize -O encoding=$encoding,outencoding=UTF-8 $PYGMENTIZE_OPTS -l $lexer $1
exit 0
fi
exit 1

View file

@ -0,0 +1,7 @@
try:
import readline
except ImportError:
print("Module readline not available.")
else:
import rlcompleter
readline.parse_and_bind("tab: complete")

View file

@ -0,0 +1,36 @@
# Utilities
alias dig=ydig
alias e="emacs"
alias grep="egrep --color"
alias cpr="rsync -rlptgoDAXhP --info=all0,progress2"
alias rcp="cpr"
alias ll="eza -lah --color"
alias l="eza -l -g --icons --all --all"
alias ls="eza"
alias xclip="xclip -sel clipboard"
alias git='GIT_COMMITTER_DATE="$(date +%Y-%m-%d) 00:00:00+0000" GIT_AUTHOR_DATE="$(date +%Y-%m-%d) 00:00:00+0000" git'
alias cat=batcat
alias less=batcat
alias bat=batcat
alias emacs='emacsclient --create-frame --alternate-editor=""'
# Dev
alias dcl="docker container ls -a --format='{{ .ID }}\t{{ .Names }}\t{{ index (split .Status \" \") 0 }}' | sort -k3r -k2 | column -t -N ID,Name,State"
alias composer80="composerX 8.0"
alias composer81="composerX 8.1"
alias composer82="composerX 8.2"
alias phpqa="phpqa82"
alias phpqa81='docker run --init -it --rm -v "$(pwd):/project" -v "$(pwd)/tmp-phpqa:/tmp" -w /project jakzal/phpqa:php8.1-alpine'
alias phpqa82='docker run --init -it --rm -v "$(pwd):/project" -v "$(pwd)/tmp-phpqa:/tmp" -w /project jakzal/phpqa:php8.2-alpine'
# DevOps / Admin
alias ssl_scan="docker run -ti --rm drwetter/testssl.sh"
alias tf="tofu"
alias terraform="tofu"
alias ks="kubeshell"
# Personal
alias awesome_test="Xephyr -screen 1440x1080 :5 & sleep 1 ; DISPLAY=:5 awesome"
alias clean_pa='pkill -U ${USER} pulseaudio; systemctl --user stop pulseaudio.socket && systemctl --user start pulseaudio.socket'
alias ssh_jump="ssh -qTNn"
alias x11_paste='sleep 2; xdotool type "$(xclip -o -selection clipboard)"'

View file

@ -0,0 +1,26 @@
# https://github.com/zsh-users/zsh-completions
fpath+=${ZSH_CUSTOM:-${ZSH:-~/.oh-my-zsh}/custom}/plugins/zsh-completions/src
fpath+=/usr/lib/x86_64-linux-gnu/rubygems-integration/3.1.0/gems/vagrant-2.3.8.dev/contrib/zsh
# shellcheck disable=SC1090
if command -v ansible &> /dev/null; then
source <(register-python-argcomplete ansible)
source <(register-python-argcomplete ansible-config)
source <(register-python-argcomplete ansible-console)
source <(register-python-argcomplete ansible-doc)
source <(register-python-argcomplete ansible-galaxy)
source <(register-python-argcomplete ansible-inventory)
source <(register-python-argcomplete ansible-playbook)
source <(register-python-argcomplete ansible-pull)
source <(register-python-argcomplete ansible-vault)
fi
command -v boundary &> /dev/null && complete -o nospace -C /usr/bin/boundary boundary || true
command -v molecule &> /dev/null && source <(_MOLECULE_COMPLETE=zsh_source molecule) || true
command -v helm &> /dev/null && source <(helm completion zsh) || true
command -v noseyparker &> /dev/null && source <(noseyparker shell-completions --shell zsh) || true
[ -s "$NVM_DIR/bash_completion" ] && source "$NVM_DIR/bash_completion"
[ -s "$HOME/.pyenv/completions/pyenv.zsh" ] && source "$HOME/.pyenv/completions/pyenv.zsh"
autoload -U compinit && compinit

View file

@ -0,0 +1,39 @@
# Versions
export RUBY_VERSION="3.1"
# Settings
export DEFAULT_USER=$(id -un)
export EDITOR="emacsclient -t"
export TERM=xterm-256color
export LESS='-R'
export LESSOPEN='|~/.lessfilter %s'
export DOTNET_CLI_TELEMETRY_OPTOUT=1
export SSH_KEY_PATH="${HOME}/.ssh/id_ed25519"
export PAGER=less
export GPG_TTY="${TTY}"
export PINENTRY_USER_DATA="connect/greeter"
{ [ -n "${SSH_CLIENT}" ] || [ -n "${SSH_CONNECTION}" ] || [ -n "${SSH_TTY}" ]; } && export PINENTRY_USER_DATA="connect/ssh"
# Dev
export NVM_DIR="$HOME/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && source "$NVM_DIR/nvm.sh" # This loads nvm
# shellcheck disable=SC1090
[ -d "${HOME}/.local/share/gems/ruby/${RUBY_VERSION}" ] && export GEM_DIR="${HOME}/.local/share/gems/ruby/${RUBY_VERSION}"
[ -d "${HOME}/.cargo/env" ] && source "${HOME}/.cargo/env"
export PYENV_ROOT="$HOME/.pyenv"
if [[ -d $PYENV_ROOT/bin ]]; then
export PATH="$PYENV_ROOT/bin:$PATH"
eval "$(pyenv init - bash)"
eval "$(pyenv virtualenv-init -)"
fi
# Path
export PATH="${PATH}:${HOME}/.local/bin"
[ -d "${HOME}/.nvm/current/bin" ] && export PATH="${PATH}:${HOME}/.nvm/current/bin"
[ -d "${GEM_DIR}/bin" ] && export PATH="${PATH}:${GEM_DIR}/bin"
[ -d "/usr/local/go/bin" ] && export PATH="${PATH}:/usr/local/go/bin"
[ -d "$HOME/.cargo/bin" ] && export PATH="${PATH}:$HOME/.cargo/bin"
[ -d "$HOME/.local/share/JetBrains/Toolbox/scripts" ] && export PATH="${PATH}:$HOME/.local/share/JetBrains/Toolbox/scripts"
[ -d "${HOME}/.config/emacs/bin" ] && export PATH="${PATH}:${HOME}/.config/emacs/bin"

View file

@ -0,0 +1,75 @@
function ydig() {
\dig +yaml "${@}" | yq '.[].message.response_message_data|{"answer": .ANSWER_SECTION, "status": .status}'
}
function sshfs-target() {
local targetHost="${1}"
local remoteDir="${2}"
local mountPoint=${3}
sshfs "${DEFAULT_USER:=$(id -un)}"@"${targetHost}":"${remoteDir}" "${mountPoint}"
}
# Ex: ssh-forward 0.0.0.0:3306 bastion.host protected-sql-server.remote:3306
function ssh-forward() {
local localIp="${1}"
shift
local jumpHost="${1}"
shift
local targetHost="${1}"
shift
ssh -qN -L "${localIp}":"${targetHost}" "${jumpHost}" "${@}"
}
function composerX() {
local composerBinary
local phpBinary
local phpVersion="${1}"
shift
composerBinary=$(command -v composer)
phpBinary=$(command -v php"${phpVersion}")
${phpBinary} "${composerBinary}" "${@}"
}
function kubeshell() {
if [ $# -lt 3 ]; then
print "Invalid parameters: kubeshell NAMESPACE CONTAINER POD "
return 1
fi
local kubeBinary
local namespace="${1}"
shift
local container="${1}"
shift
local pod="${1}"
shift
kubeBinary=$(command -v kubectl)
"${kubeBinary}" exec -i -t -n "${namespace}" "${pod}" -c "${container}" "${@}" -- sh -c "clear; (zsh || bash || ash || sh)"
}
function timer() {
if [ $# != 1 ]; then
printf "Missing duration in \`sleep\` format (e.g. 2m)\n"
return 1
fi
read -r < <(sleep "$1" & echo $!)
local sleep_pid="${REPLY}"
trap 'kill "${sleep_pid}" ; return 0' SIGINT SIGTERM SIGQUIT
while :
do
if ! output_cmd=$(ps -o etime= -o args= -p "${sleep_pid}"); then
echo -ne "Buzz!\033[0K\r\n"
repeat "${TIMER_SOUND_REPEAT_COUNT:-3}" aplay "${TIMER_SOUND_FILE:-${HOME}/Music/Sounds/timer_done.wav}" &> /dev/null
return 0
else
echo -ne "${output_cmd}\033[0K\r" | sed -E 's/\s+([0-9:]+)\s+sleep\s+(.+)/Elapsed: \1 of \2/'
sleep "${TIMER_NOTIF_STEP:-1}"
fi
done
return 1
}

View file

@ -0,0 +1,75 @@
# Enable Powerlevel10k instant prompt. Should stay close to the top of ~/.zshrc.
# Initialization code that may require console input (password prompts, [y/n]
# confirmations, etc.) must go above this block; everything else may go below.
if [[ -r "${XDG_CACHE_HOME:-$HOME/.cache}/p10k-instant-prompt-${(%):-%n}.zsh" ]]; then
source "${XDG_CACHE_HOME:-$HOME/.cache}/p10k-instant-prompt-${(%):-%n}.zsh"
fi
# Path to your oh-my-zsh installation.
export ZSH="${HOME}/.oh-my-zsh"
export TERM="xterm-256color"
ZSH_THEME="powerlevel10k/powerlevel10k"
CASE_SENSITIVE="true"
# Uncomment the following line to use hyphen-insensitive completion.
# Case-sensitive completion must be off. _ and - will be interchangeable.
# HYPHEN_INSENSITIVE="true"
# Uncomment the following line to disable bi-weekly auto-update checks.
# DISABLE_AUTO_UPDATE="true"
# Uncomment the following line to automatically update without prompting.
# DISABLE_UPDATE_PROMPT="true"
# Uncomment the following line to change how often to auto-update (in days).
# export UPDATE_ZSH_DAYS=13
# Uncomment the following line if pasting URLs and other text is messed up.
# DISABLE_MAGIC_FUNCTIONS=true
# Uncomment the following line to disable colors in ls.
# DISABLE_LS_COLORS="true"
# Uncomment the following line to disable auto-setting terminal title.
# DISABLE_AUTO_TITLE="true"
# Uncomment the following line to enable command auto-correction.
# ENABLE_CORRECTION="true"
# Uncomment the following line to display red dots whilst waiting for completion.
# COMPLETION_WAITING_DOTS="true"
HIST_STAMPS="yyyy.mm.dd"
plugins=(
autopep8
aws
branch
colored-man-pages
colorize
composer
docker
docker-compose
git
kubectl
kube-ps1
npm
nvm
pass
pep8
pip
redis-cli
rsync
rust
terraform
)
source $ZSH/oh-my-zsh.sh
[ -f ~/.env ] && source ~/.env
[ -f ~/.zsh_exports ] && source ~/.zsh_exports
[ -f ~/.p10k.zsh ] && source ~/.p10k.zsh
[ -f ~/.zsh_functions ] && source ~/.zsh_functions
[ -f ~/.zsh_aliases ] && source ~/.zsh_aliases
[ -f ~/.zsh_completions ] && source ~/.zsh_completions

View file

@ -0,0 +1,2 @@
localhost

View file

@ -0,0 +1,10 @@
---
development_user_account: "{{ custom_base_user_account }}"
development_github_token: "{{ custom_github_token | default('') }}"
development_virtualbox_version: "7.0"
development_docker_userns: true
development_rust_enabled: true
development_docker_remap_user: "{{ development_user_account }}"
development_docker_remap_group: "{{ development_user_account }}"
development_docker_systemd_slice: docker.slice
development_virtualbox_enabled: false

View file

@ -0,0 +1,8 @@
---
- name: '[docker] restart service'
become: true
ansible.builtin.systemd_service:
name: docker
enabled: true
state: restarted
when: ansible_facts['virtualization_type'] is not match("docker")

View file

@ -0,0 +1,22 @@
---
galaxy_info:
author: Florian L.
namespace: nullified
description: Install tools for development environment
# issue_tracker_url: http://example.com/issue/tracker
license: MIT
min_ansible_version: 2.15
# https://galaxy.ansible.com/api/v1/platforms/
platforms:
- name: Debian
versions:
- bookworm
galaxy_tags:
- github
- assets
- utils
- system
dependencies: []

View file

@ -0,0 +1,342 @@
---
- name: '[setup] gather facts if not already done'
ansible.builtin.setup:
gather_subset:
- distribution
- distribution_release
- kernel
- virtualization_type
- name: '[home] get user account information'
ansible.builtin.getent:
database: passwd
key: "{{ development_user_account }}"
split: ":"
changed_when: false
when: ansible_facts['getent_passwd'] is undefined or development_user_account not in ansible_facts['getent_passwd']
- name: '[apt] install dependencies and tools'
become: true
ansible.builtin.apt:
update_cache: true
force_apt_get: true
cache_valid_time: 3600
pkg:
- apt-transport-https # docker-ce
- autoconf
- automake
- bc
- build-essential
- ca-certificates # docker-ce
- curl
- g++
- gcc
- git
- git-lfs
- gnupg2 # docker-ce
- jq
- libasound2 # draw.io
- libatspi2.0-0 # draw.io
- libcairo2
- libcairo2-dev
- libcurl4-openssl-dev
- libffi-dev
- libgtk-3-0 # draw.io
- libnotify4 # draw.io
- libnss3 # draw.io
- libsecret-1-0 # draw.io
- libssl-dev
- libtool
- libvirt0
- libvirt-clients
- libvirt-clients-qemu
- libvirt-daemon
- libvirt-daemon-config-network
- libvirt-daemon-driver-lxc
- libvirt-daemon-driver-vbox
- libvirt-daemon-system
- libvirt-daemon-system-systemd
- libxss1 # draw.io
- libxtst6 # draw.io
- linux-headers-amd64
- "linux-headers-{{ ansible_facts['kernel'] }}"
- make
- mariadb-client
- pipx
- postgresql-client
- python3-dev
- python3-pip
- python3-virtualenv
- shellcheck
- sqlite3
- vagrant
- valgrind
- xdg-utils # draw.io
state: present
- name: '[github] install tools'
become: true
nullified.infrastructure.github_artifact:
github_token: '{{ development_github_token }}'
asset_name: "{{ item.asset_name | default('') }}"
asset_type: "{{ item.asset_type }}"
cmds: "{{ item.cmds | default([]) }}"
creates: "{{ item.creates | default('') }}"
repository: "{{ item.repository }}"
version: "{{ item.version | default('') }}"
loop:
- asset_name: kind-linux-amd64
asset_type: release
repository: kubernetes-sigs/kind
creates: /usr/local/bin/kind
cmds:
- install --group=root --owner=root --mode=755 {asset_dirname}/{asset_filename} /usr/local/bin/kind
- rm {asset_dirname}/{asset_filename}
- asset_name: dive_{version}_linux_amd64.deb
asset_type: release
repository: wagoodman/dive
creates: /usr/bin/dive
cmds:
- dpkg -i {asset_dirname}/{asset_filename}
- asset_name: kubeconform-linux-amd64.tar.gz
asset_type: release
repository: yannh/kubeconform
creates: /usr/local/bin/kubeconform
cmds:
- tar -zxf {asset_dirname}/{asset_filename}
- install --group=root --mode=755 --owner=root kubeconform /usr/local/bin
- asset_name: git-delta_{version}_amd64.deb
asset_type: release
repository: dandavison/delta
creates: /usr/bin/delta
cmds:
- dpkg -i {asset_dirname}/{asset_filename}
- asset_name: docker-compose-linux-x86_64
asset_type: release
repository: docker/compose
creates: /usr/local/bin/docker-compose
cmds:
- install --group=root --mode=755 --owner=root {asset_dirname}/{asset_filename} /usr/local/bin/docker-compose
- test -d /usr/local/lib/docker/cli-plugins && (rm /usr/local/lib/docker/cli-plugins/docker-compose; ln -s /usr/local/bin/docker-compose /usr/local/lib/docker/cli-plugins) || true
- test -d /usr/local/libexec/docker/cli-plugins && (rm /usr/local/libexec/docker/cli-plugins/docker-compose; ln -s /usr/local/bin/docker-compose /usr/local/libexec/docker/cli-plugins) || true
- test -d /usr/lib/docker/cli-plugins && (rm /usr/lib/docker/cli-plugins/docker-compose; ln -s /usr/local/bin/docker-compose /usr/lib/docker/cli-plugins) || true
- test -d /usr/libexec/docker/cli-plugins && (rm /usr/libexec/docker/cli-plugins/docker-compose; ln -s /usr/local/bin/docker-compose /usr/libexec/docker/cli-plugins) || true
- asset_name: buildx-{version}.linux-amd64
asset_type: release
repository: docker/buildx
creates: /usr/local/bin/docker-buildx
cmds:
- install --group=root --mode=755 --owner=root {asset_dirname}/{asset_filename} /usr/local/bin/docker-buildx
- test -d /usr/local/lib/docker/cli-plugins && (rm /usr/local/lib/docker/cli-plugins/docker-compose; ln -s /usr/local/bin/docker-compose /usr/local/lib/docker/cli-plugins) || true
- test -d /usr/local/libexec/docker/cli-plugins && (rm /usr/local/libexec/docker/cli-plugins/docker-compose; ln -s /usr/local/bin/docker-compose /usr/local/libexec/docker/cli-plugins) || true
- test -d /usr/lib/docker/cli-plugins && (rm /usr/lib/docker/cli-plugins/docker-compose; ln -s /usr/local/bin/docker-compose /usr/lib/docker/cli-plugins) || true
- test -d /usr/libexec/docker/cli-plugins && (rm /usr/libexec/docker/cli-plugins/docker-compose; ln -s /usr/local/bin/docker-compose /usr/libexec/docker/cli-plugins) || true
- asset_name: drawio-amd64-{version}.deb
asset_type: release
repository: jgraph/drawio-desktop
creates: /usr/bin/drawio
cmds:
- dpkg -i {asset_dirname}/{asset_filename}
- asset_name: OpenLens-{version}.amd64.deb
asset_type: release
repository: MuhammedKalkan/OpenLens
creates: /usr/bin/open-lens
cmds:
- dpkg -i {asset_dirname}/{asset_filename}
- asset_name: stern_{version}_linux_amd64.tar.gz
asset_type: release
repository: stern/stern
creates: /usr/local/bin/stern
cmds:
- tar -zxf {asset_dirname}/{asset_filename}
- install --group=root --mode=755 --owner=root stern /usr/local/bin
- asset_name: tofu_{version}_amd64.deb
asset_type: release
repository: opentofu/opentofu
creates: /usr/bin/tofu
cmds:
- dpkg -i {asset_dirname}/{asset_filename}
- name: install kubectl
ansible.builtin.include_role:
name: nullified.infrastructure.kubectl
- name: install helm
ansible.builtin.include_role:
name: nullified.infrastructure.helm
- name: '[vbox] install Virtualbox'
become: true
when: development_virtualbox_enabled is truthy
block:
- name: '[vbox] add repository key'
ansible.builtin.get_url:
url: https://www.virtualbox.org/download/oracle_vbox_2016.asc
dest: /etc/apt/trusted.gpg.d/virtualbox.asc
mode: '0644'
- name: '[apt key] add repository'
ansible.builtin.apt_repository:
repo: "deb [arch=amd64 signed-by=/etc/apt/trusted.gpg.d/virtualbox.asc] https://download.virtualbox.org/virtualbox/{{ ansible_facts['distribution'] | lower }} {{ ansible_facts['distribution_release']}} contrib"
state: present
filename: virtualbox
update_cache: true
- name: '[apt] install Virtualbox'
ansible.builtin.apt:
force_apt_get: true
cache_valid_time: 3600
pkg:
- "virtualbox-{{ development_virtualbox_version }}"
state: present
- name: '[custom] install Docker CE repository'
become: true
block:
- name: '[apt key] add docker key'
ansible.builtin.get_url:
url: "https://download.docker.com/linux/{{ ansible_facts['distribution'] | lower }}/gpg"
dest: /etc/apt/trusted.gpg.d/docker.asc
mode: '0644'
- name: '[apt key] add source'
ansible.builtin.apt_repository:
repo: "deb [arch=amd64 signed-by=/etc/apt/trusted.gpg.d/docker.asc] https://download.docker.com/linux/{{ ansible_facts['distribution'] | lower }} {{ ansible_facts['distribution_release'] }} stable"
state: present
filename: docker
update_cache: true
- name: '[apt] install Docker CE'
ansible.builtin.apt:
update_cache: true
force_apt_get: true
cache_valid_time: 3600
pkg:
- docker-ce
- docker-ce-cli
- containerd.io
state: present
- name: '[docker] update daemon configuration'
ansible.builtin.template:
src: ../templates/docker-ce/daemon.json.j2
dest: /etc/docker/daemon.json
mode: '0644'
when: development_docker_userns is truthy
notify:
- 'development : [docker] restart service'
notify:
- 'development : [docker] restart service'
- name: '[python] install tools'
become: true
become_user: "{{ development_user_account }}"
ansible.builtin.command:
cmd: "pipx install {{ item.cmd }}"
creates: "{{ ansible_facts['getent_passwd'][development_user_account][4] }}/.local/bin/{{ item.creates }}"
loop:
- { "cmd": "black", "creates": "black" }
- { "cmd": "flake8", "creates": "flake8" }
- name: '[python] install pipx packages dependencies'
become: true
become_user: "{{ development_user_account }}"
ansible.builtin.command:
cmd: "pipx inject {{ item.venv }} {{ item.extension }}"
creates:
"{{ ansible_facts['getent_passwd'][development_user_account][4] }}/.local/pipx/venvs/{{ item.venv }}/lib/python3.11/site-packages/{{ item.creates }}"
loop:
- venv: "flake8"
extension: "flake8-annotations-complexity"
creates: "flake8_annotations_complexity"
- venv: "flake8"
extension: "flake8-bandit"
creates: "flake8_bandit.py"
- venv: "flake8"
extension: "flake8-breakpoint"
creates: "flake8_breakpoint"
- venv: "flake8"
extension: "flake8-bugbear"
creates: "bugbear.py"
- venv: "flake8"
extension: "flake8-builtins"
creates: "flake8_builtins.py"
- venv: "flake8"
extension: "flake8-comprehensions"
creates: "flake8_comprehensions"
- venv: "flake8"
extension: "flake8-docstrings"
creates: "flake8_docstrings.py"
- venv: "flake8"
extension: "flake8-eradicate"
creates: "flake8_eradicate.py"
- venv: "flake8"
extension: "flake8-expression-complexity"
creates: "flake8_expression_complexity"
- venv: "flake8"
extension: "flake8-if-expr"
creates: "flake8_if_expr"
- venv: "flake8"
extension: "flake8-isort"
creates: "flake8_isort.py"
- venv: "flake8"
extension: "flake8-logging-format"
creates: "logging_format"
- venv: "flake8"
extension: "flake8-print"
creates: "flake8_print.py"
- venv: "flake8"
extension: "flake8-pytest"
creates: "flake8_pytest.py"
- venv: "flake8"
extension: "flake8-pytest-style"
creates: "flake8_pytest_style"
- venv: "flake8"
extension: "flake8-requirements"
creates: "flake8_requirements"
- venv: "flake8"
extension: "flake8-return"
creates: "flake8_return"
- venv: "flake8"
extension: "flake8-rst-docstrings"
creates: "flake8_rst_docstrings.py"
- venv: "flake8"
extension: "pep8-naming"
creates: "pep8ext_naming.py"
- name: '[rust] check if rust is already installed'
ansible.builtin.file:
path: "{{ ansible_facts['getent_passwd'][development_user_account][4] }}/.cargo/bin/rustc"
register: rustc_stat
changed_when: false
failed_when: false
when: development_rust_enabled is truthy
- name: '[rust] rust'
become: true
when: development_rust_enabled is truthy and rustc_stat.state is match("absent")
block:
- name: '[rust] download installer'
ansible.builtin.get_url:
url: https://sh.rustup.rs
dest: /tmp/rustup.sh
mode: '0750'
owner: "{{ development_user_account }}"
group: "{{ development_user_account }}"
- name: '[rust] install rust toolchain'
become_user: "{{ development_user_account }}"
ansible.builtin.command:
cmd: /tmp/rustup.sh -qy
- name: '[user] add default user to groups'
become: true
ansible.builtin.user:
name: "{{ development_user_account }}"
append: true
groups:
- docker
- kvm
- libvirt
- libvirt-qemu
state: present

View file

@ -0,0 +1,4 @@
{
"userns-remap": "{{ development_docker_remap_user }}:{{ development_docker_remap_group }}",
"cgroup-parent": "{{ development_docker_systemd_slice }}"
}

View file

@ -0,0 +1,2 @@
localhost

View file

@ -0,0 +1,3 @@
---
gaming_user_account: "{{ custom_base_user_account }}"
gaming_github_token: "{{ custom_github_token | default('') }}"

View file

@ -0,0 +1,21 @@
---
galaxy_info:
author: Florian L.
namespace: nullified
description: Install games and gaming related software
# issue_tracker_url: http://example.com/issue/tracker
license: MIT
min_ansible_version: 2.15
# https://galaxy.ansible.com/api/v1/platforms/
platforms:
- name: Debian
versions:
- bookworm
galaxy_tags:
- github
- steam
- games
dependencies: []

View file

@ -0,0 +1,56 @@
---
- name: '[games] install Steam'
become: true
block:
- name: '[system] get existing architectures'
ansible.builtin.command:
cmd: dpkg --print-foreign-architectures
register: dpkg_archs
changed_when: false
when: dpkg_archs is not defined
- name: '[steam] enable i386 architecture'
command:
cmd: dpkg --add-architecture i386
when: dpkg_archs.stdout is not regex("(^|\b)i386($|\b)", multiline = true)
- name: '[apt key] add Steam GPG key'
ansible.builtin.get_url:
url: "https://repo.steampowered.com/steam/archive/stable/steam.gpg"
dest: /usr/share/keyrings/steam.gpg
mode: '0644'
- name: '[apt key] add source'
ansible.builtin.apt_repository:
repo: "{{ item }} [arch=amd64,i386 signed-by=/usr/share/keyrings/steam.gpg] https://repo.steampowered.com/steam/ stable steam"
state: present
filename: steam-stable
update_cache: true
loop:
- deb
- deb-src
- name: '[steam] install dependencies'
ansible.builtin.apt:
update_cache: true
force_apt_get: true
cache_valid_time: 3600
pkg:
- libgl1-mesa-dri:amd64
- libgl1-mesa-dri:i386
- libgl1-mesa-glx:amd64
- libgl1-mesa-glx:i386
- steam-launcher
- name: '[games] install Heroic Games Launcher'
become: true
block:
- name: '[hgl] fetch assets from github'
nullified.infrastructure.github_artifact:
github_token: '{{ gaming_github_token }}'
asset_name: heroic_{version}_amd64.deb
asset_type: release
repository: Heroic-Games-Launcher/HeroicGamesLauncher
creates: /usr/bin/heroic
cmds:
- dpkg -i {asset_dirname}/{asset_filename}

View file

@ -0,0 +1,2 @@
localhost

View file

@ -0,0 +1,29 @@
Helm
=========
This role handles the installation of the Helm binary and optionally supports version pining or unconditional upgrade.
Requirements
------------
Only tested and used on Debian (uses the `ansible.builtin.apt` module).
Role Variables
--------------
### `helm_binary_path`
Path where the Helm binary is installed (no discovery is performed so pre-existing binaries will remain untouched).
**Default value**: `helm_binary_path: /usr/local/bin/helm`
### `helm_install_version`
Version of Helm to install. It allows multiple values:
- empty: will install the binary if not already present, else nothing is changed,
- 'vX.Y.Z': will install the specified version (e.g. `v3.15.2`), upgrading / downgrading the local binary if required,
- latest: will install the latest available version unless the local binary is already up-to-date.
**Default value**: `helm_install_version: latest`
License
-------
MIT

View file

@ -0,0 +1,3 @@
---
helm_binary_path: '/usr/local/bin/helm'
helm_install_version: latest

View file

@ -0,0 +1,2 @@
---
# handlers file for helm

View file

@ -0,0 +1,15 @@
galaxy_info:
author: Florian L.
description: Install helm binary
issue_tracker_url: https://gitlab.0x2a.ninja/infrastructure/configuration
license: MIT
min_ansible_version: 2.15
platforms:
- name: Debian
versions:
- 11
- 12
galaxy_tags:
- helm
- kubernetes
dependencies: []

View file

@ -0,0 +1,64 @@
---
- name: install required packages
become: true
ansible.builtin.apt:
update_cache: true
force_apt_get: true
cache_valid_time: 3600
pkg:
- curl
- jq
- name: find if binary is already installed
ansible.builtin.file:
path: '{{ helm_binary_path }}'
register: helm_stat
changed_when: false
failed_when: false
- name: find current installed version
when: helm_stat.state != "absent"
ansible.builtin.command: "{{ helm_binary_path }} version --template='{% raw %}{{.Version}}{% endraw %}'"
changed_when: false
register: helm_local_version_exec
- name: find latest available version
connection: local
ansible.builtin.shell: |-
curl -sSL https://api.github.com/repos/helm/helm/releases/latest | jq -r '.tag_name'
register: latest_helm_version_exec
when: helm_install_version is falsy or helm_install_version == "latest"
changed_when: false
- name: set helm facts
ansible.builtin.set_fact:
helm_latest_version_available: '{{ latest_helm_version_exec.get("stdout", "") if latest_helm_version_exec is defined }}'
helm_local_version: '{{ helm_local_version_exec.get("stdout", "") if helm_local_version_exec is defined }}'
helm_target_install_version: '{{ helm_install_version if helm_install_version != "latest" else latest_helm_version_exec.get("stdout", "") }}'
- name: install binary
become: true
when: helm_stat.state == "absent" or (helm_local_version != helm_target_install_version and helm_install_version is not falsy)
block:
- name: create temporary directory
ansible.builtin.tempfile:
state: directory
register: tmp_dir
changed_when: false
- name: retrieve archive
ansible.builtin.unarchive:
remote_src: true
src: "https://get.helm.sh/helm-{{ helm_install_version if helm_install_version is not match('^$|^latest$') else helm_latest_version_available }}-linux-amd64.tar.gz"
dest: '{{ tmp_dir.path }}'
- name: install binary
ansible.builtin.copy:
remote_src: true
src: '{{ tmp_dir.path }}/linux-amd64/helm'
dest: '{{ helm_binary_path }}'
owner: root
group: root
mode: '0755'
- name: cleanup
ansible.builtin.file:
path: '{{ tmp_dir.path }}'
state: absent

View file

@ -0,0 +1,2 @@
localhost

View file

@ -0,0 +1,5 @@
---
- hosts: localhost
remote_user: root
roles:
- helm

View file

@ -0,0 +1,2 @@
---
# vars file for helm

View file

@ -0,0 +1,10 @@
---
k3s_cluster_name: default
k3s_cluster_role: server
k3s_kube_context: default
k3s_extra_args: ''
k3s_operator_ips: []
k3s_cluster_cidr: '10.42.0.0/16'
k3s_service_cidr: '10.43.0.0/16'
k3s_cluster_additional_helm_charts: []
k3s_cluster_helm_customizations: []

View file

@ -0,0 +1,14 @@
---
- name: restart firewall service
become: true
ansible.builtin.systemd_service:
name: nftables.service
enabled: true
state: restarted
- name: restart k3s service
become: true
ansible.builtin.systemd_service:
name: k3s.service
enabled: true
state: restarted

View file

@ -0,0 +1,20 @@
---
galaxy_info:
author: Florian L.
namespace: nullified
description: Install and configure K3S and related tools
# issue_tracker_url: http://example.com/issue/tracker
license: MIT
min_ansible_version: 2.15
# https://galaxy.ansible.com/api/v1/platforms/
platforms:
- name: Debian
versions:
- bookworm
galaxy_tags:
- kubernetes
- k3s
dependencies: []

View file

@ -0,0 +1,18 @@
---
# TODO: implement
# TODO: disable swap
- name: operation not supported
ansible.builtin.debug:
msg: Operation currently not supported
failed_when: true
- name: setup firewall rules
become: true
ansible.builtin.template:
src: ../templates/nftables.d/k3s_agents.nft.j2
dest: /etc/nftables.d/k3s_agents.nft
mode: '0600'
notify:
- 'k3s : restart firewall service'
- 'k3s : restart k3s service'

View file

@ -0,0 +1,55 @@
---
- name: group by cluster name
ansible.builtin.group_by:
key: "k3s_clusters_{{ k3s_cluster_name }}_{{ k3s_cluster_role }}"
changed_when: false
- name: determine cluster type and members
ansible.builtin.set_fact:
k3s_cluster_type: "{{ 'ha' if groups['k3s_clusters_' ~ k3s_cluster_name ~ '_' ~ k3s_cluster_role] | length > 1 else 'single' }}"
k3s_cluster_servers: "{{ groups['k3s_clusters_' ~ k3s_cluster_name ~ '_server'] }}"
k3s_cluster_agents: "{{ groups['k3s_clusters_' ~ k3s_cluster_name ~ '_agent'] | default([]) }}"
k3s_nft_servers4: "{{ groups['k3s_clusters_' ~ k3s_cluster_name ~ '_server'] | default([]) | map('extract', hostvars, ['k3s_cluster_ip']) | ansible.utils.ipv4 }}"
k3s_nft_agents4: "{{ groups['k3s_clusters_' ~ k3s_cluster_name ~ '_agent'] | default([]) | map('extract', hostvars, ['k3s_cluster_ip']) | ansible.utils.ipv4 }}"
k3s_nft_servers6: "{{ groups['k3s_clusters_' ~ k3s_cluster_name ~ '_server'] | default([]) | map('extract', hostvars, ['k3s_cluster_ip']) | ansible.utils.ipv6 }}"
k3s_nft_agents6: "{{ groups['k3s_clusters_' ~ k3s_cluster_name ~ '_agent'] | default([]) | map('extract', hostvars, ['k3s_cluster_ip']) | ansible.utils.ipv6 }}"
k3s_nft_operators4: "{{ k3s_operator_ips | ansible.utils.ipv4 }}"
k3s_nft_operators6: "{{ k3s_operator_ips | ansible.utils.ipv6 }}"
changed_when: false
- name: setup permissions
become: true
block:
- name: install sudo
ansible.builtin.apt:
update_cache: true
force_apt_get: true
cache_valid_time: 3600
pkg: [ sudo ]
state: present
- name: add operator to sudoers
ansible.builtin.lineinfile:
backup: true
path: /etc/sudoers
regexp: "^{{ k3s_operator_username }}\b.+$"
line: "{{ k3s_operator_username }} ALL=(ALL) NOPASSWD: ALL"
state: present
register: backup_sudoers
changed_when: false
- name: setup server role
ansible.builtin.include_tasks: server.yml
tags: [helm, opentofu]
when: k3s_cluster_role is match("server")
- name: setup agent role
ansible.builtin.include_tasks: agent.yml
tags: [helm, opentofu]
when: k3s_cluster_role is match("agent")
- name: reset permissions
become: true
ansible.builtin.command:
cmd: "mv {{ backup_sudoers.backup }} /etc/sudoers"
removes: "{{ backup_sudoers.backup }}"
when: backup_sudoers.backup
changed_when: false

View file

@ -0,0 +1,77 @@
- name: deploy OpenTofu resource
connection: local
block:
- name: set TF resource facts
ansible.builtin.set_fact:
k3s_tf_safe_item_name: "{{ item.name | regex_replace('[^\\w]', '') }}"
k3s_tf_project_git_path: "{{ provisioner_facts.artifacts_dir }}/{{ item.name | regex_replace('[^\\w]', '') }}.git"
changed_when: false
- name: check pre-existing TF state file
ansible.builtin.file:
path: "{{ provisioner_facts.k8s_states_dir }}/{{ k3s_tf_safe_item_name }}.tfstate"
register: tfstate_file_info
changed_when: false
failed_when: false
when: item.get("backend_override", false) is falsy
- name: fetch git repository
ansible.builtin.git:
repo: '{{ item.git_repository }}'
dest: '{{ k3s_tf_project_git_path }}'
version: '{{ item.git_revision }}'
force: true
- name: prepare variables file
ansible.builtin.copy:
content: '{{ item.tfvars_content }}'
dest: '{{ k3s_tf_project_git_path }}/{{ item.terraform_dir }}/terraform.tfvars'
mode: '0600'
force: true
- name: prepare tfstate file
ansible.builtin.copy:
src: '{{ provisioner_facts.k8s_states_dir }}/{{ k3s_tf_safe_item_name }}.tfstate'
dest: '{{ k3s_tf_project_git_path }}/{{ item.terraform_dir }}/terraform.tfstate'
force: true
mode: '0600'
when: item.get("backend_override", false) is falsy
- name: dump custom backend override
ansible.builtin.copy:
content: '{{ item.backend_override }}'
dest: '{{ k3s_tf_project_git_path }}/{{ item.terraform_dir }}/backend_override.tf'
mode: '0600'
when: item.get("backend_override", false) is truthy
changed_when: false
- name: deploy k8s resources
community.general.terraform:
binary_path: "{{ provisioner_facts.tofu_binary_path }}"
project_path: '{{ k3s_tf_project_git_path }}/{{ item.terraform_dir }}'
provider_upgrade: true
force_init: true
environment: '{{ item.get("backend_env", {}) }}'
- name: cleanup override file
ansible.builtin.file:
path: '{{ k3s_tf_project_git_path }}/{{ item.terraform_dir }}/backend_override.tf'
state: absent
when: item.get("backend_override", false) is truthy
changed_when: false
- name: backup source state file
ansible.builtin.copy:
src: '{{ provisioner_facts.k8s_states_dir }}/{{ k3s_tf_safe_item_name }}.tfstate'
dest: '{{ provisioner_facts.k8s_states_dir }}/{{ k3s_tf_safe_item_name }}.tfstate.previous'
force: true
mode: '0600'
when: item.get("backend_override", false) is falsy
- name: update source tfstate file
ansible.builtin.copy:
src: '{{ k3s_tf_project_git_path }}/{{ item.terraform_dir }}/terraform.tfstate'
dest: '{{ provisioner_facts.k8s_states_dir }}/{{ k3s_tf_safe_item_name }}.tfstate'
force: true
mode: '0600'
when: item.get("backend_override", false) is falsy

View file

@ -0,0 +1,75 @@
---
# TODO: disable swap
- name: setup firewall rules
become: true
ansible.builtin.template:
src: ../templates/nftables.d/k3s_servers.nft.j2
dest: /etc/nftables.d/k3s_servers.nft
mode: '0600'
notify:
- 'k3s : restart firewall service'
- 'k3s : restart k3s service'
- name: flush handlers
ansible.builtin.meta: flush_handlers
- name: install K3S cluster, single server
connection: local
ansible.builtin.command:
argv:
- "{{ provisioner_facts.k3sup_binary_path }}"
- install
- "--merge"
- "--local-path"
- "{{ provisioner_facts.kubeconfig_repository }}/{{ k3s_cluster_name }}.kubeconfig"
- "--context"
- "{{ k3s_kube_context }}"
- "--k3s-extra-args"
- "{{ k3s_extra_args }}"
- "--user"
- "{{ k3s_operator_username }}"
- "--ssh-key"
- "{{ k3s_operator_ssh_key_path }}"
- "--host"
- "{{ inventory_hostname }}.{{ global_dns_domainname }}"
register: k3s_init
when: k3s_cluster_type is match("single")
changed_when: not "No change detected so skipping service start" in k3s_init.stdout
- name: install K3S cluster, HA
connection: local
debug: msg="Not supported yet"
when: k3s_cluster_type is match("ha")
failed_when: true
- name: install K3S Helm customizations
become: true
ansible.builtin.copy:
dest: "{{ k3s_manifests_dir }}/{{ item.name | regex_replace('[^\\w]', '') }}.yaml"
content: '{{ item.content }}'
mode: '0600'
owner: root
group: root
loop: '{{ k3s_cluster_helm_customizations }}'
loop_control:
label: '{{ item.name }}'
no_log: true
- name: install Helm charts
connection: local
kubernetes.core.helm: '{{ item }}'
loop: '{{ k3s_cluster_additional_helm_charts }}'
loop_control:
label: '{{ item.release_name }}'
no_log: true
tags: [helm]
- name: install OpenTofu resources
ansible.builtin.include_tasks:
file: opentofu.yml
apply:
tags: [opentofu]
loop: '{{ k3s_cluster_additional_tf_resources }}'
loop_control:
label: '{{ item.name }}'
tags: [opentofu]

View file

@ -0,0 +1,22 @@
# K3S source: agents
table inet filter {
chain input {
# inter-node communication
## UDP
{%+ if k3s_nft_servers4 or k3s_nft_agents4 %}ip saddr { {{ (k3s_nft_servers4 + k3s_nft_agents4) | join(', ') }} } udp dport { 8472, 51820 } accept{%- endif +%}
{%+ if k3s_nft_servers6 or k3s_nft_agents6 %}ip6 saddr { {{ (k3s_nft_servers6 + k3s_nft_agents6) | join(', ') }} } udp dport { 8472, 51821 } accept{%- endif +%}
## TCP
{%+ if k3s_nft_servers4 or k3s_nft_agents4 %}ip saddr { {{ (k3s_nft_servers4 + k3s_nft_agents4) | join(', ') }} } tcp dport { 5001, 6443, 10250 } accept{%- endif +%}
{%+ if k3s_nft_servers6 or k3s_nft_agents6 %}ip6 saddr { {{ (k3s_nft_servers6 + k3s_nft_agents6) | join(', ') }} } tcp dport { 5001, 6443, 10250 } accept{%- endif +%}
}
chain output {
# inter-node communication
## UDP
{%+ if k3s_nft_servers4 or k3s_nft_agents4 %}ip daddr { {{ (k3s_nft_servers4 + k3s_nft_agents4) | join(', ') }} } udp dport { 8472, 51820 } accept{%- endif +%}
{%+ if k3s_nft_servers6 or k3s_nft_agents6 %}ip6 daddr { {{ (k3s_nft_servers6 + k3s_nft_agents6) | join(', ') }} } udp dport { 8472, 51821 } accept{%- endif +%}
## TCP
{%+ if k3s_nft_servers4 or k3s_nft_agents4 %}ip daddr { {{ (k3s_nft_servers4 + k3s_nft_agents4) | join(', ') }} } tcp dport { 5001, 6443, 10250 } accept{%- endif +%}
{%+ if k3s_nft_servers6 or k3s_nft_agents6 %}ip6 daddr { {{ (k3s_nft_servers6 + k3s_nft_agents6) | join(', ') }} } tcp dport { 5001, 6443, 10250 } accept{%- endif +%}
}
}

View file

@ -0,0 +1,44 @@
# K3S source: servers
table inet filter {
chain input {
# operators access
{%+ if k3s_nft_operators4 %}ip saddr { {{ k3s_nft_operators4 | join(', ') }} } tcp dport { 6443 } accept{%- endif +%}
{%+ if k3s_nft_operators6 %}ip saddr { {{ k3s_nft_operators6 | join(', ') }} } tcp dport { 6443 } accept{%- endif +%}
# required only for HA with embedded etcd
{%+ if k3s_nft_servers4 %}ip saddr { {{ k3s_nft_servers4 | join(',') }} } tcp dport { 2379, 2380 } accept{%- endif +%}
{%+ if k3s_nft_servers6 %}ip6 saddr { {{ k3s_nft_servers6 | join(',') }} } tcp dport { 2379, 2380 } accept{%- endif +%}
# inter-node communication
## UDP
{%+ if k3s_nft_servers4 or k3s_nft_agents4 %}ip saddr { {{ (k3s_nft_servers4 + k3s_nft_agents4) | join(', ') }} } udp dport { 8472, 51820 } accept{%- endif +%}
{%+ if k3s_nft_servers6 or k3s_nft_agents6 %}ip6 saddr { {{ (k3s_nft_servers6 + k3s_nft_agents6) | join(', ') }} } udp dport { 8472, 51821 } accept{%- endif +%}
## TCP
{%+ if k3s_nft_servers4 or k3s_nft_agents4 %}ip saddr { {{ (k3s_nft_servers4 + k3s_nft_agents4) | join(', ') }} } tcp dport { 5001, 6443, 10250 } accept{%- endif +%}
{%+ if k3s_nft_servers6 or k3s_nft_agents6 %}ip6 saddr { {{ (k3s_nft_servers6 + k3s_nft_agents6) | join(', ') }} } tcp dport { 5001, 6443, 10250 } accept{%- endif +%}
{%+ if k3s_cluster_cidr | ansible.utils.ipv4 %}ip saddr {{ k3s_cluster_cidr }} meta l4proto { tcp, udp } accept{%- endif +%}
{%+ if k3s_cluster_cidr | ansible.utils.ipv6 %}ip6 saddr {{ k3s_cluster_cidr }} meta l4proto { tcp, udp } accept{%- endif +%}
{%+ if k3s_service_cidr | ansible.utils.ipv4 %}ip saddr {{ k3s_service_cidr }} meta l4proto { tcp, udp } accept{%- endif +%}
{%+ if k3s_service_cidr | ansible.utils.ipv6 %}ip6 saddr {{ k3s_service_cidr }} meta l4proto { tcp, udp } accept{%- endif +%}
}
chain output {
# required only for HA with embedded etcd
{%+ if k3s_nft_servers4 %}ip daddr { {{ k3s_nft_servers4 | join(',') }} } tcp dport { 2379, 2380 } accept{%- endif +%}
{%+ if k3s_nft_servers6 %}ip6 daddr { {{ k3s_nft_servers6 | join(',') }} } tcp dport { 2379, 2380 } accept{%- endif +%}
# inter-node communication
## UDP
{%+ if k3s_nft_servers4 or k3s_nft_agents4 %}ip daddr { {{ (k3s_nft_servers4 + k3s_nft_agents4) | join(', ') }} } udp dport { 8472, 51820 } accept{%- endif +%}
{%+ if k3s_nft_servers6 or k3s_nft_agents6 %}ip6 daddr { {{ (k3s_nft_servers6 + k3s_nft_agents6) | join(', ') }} } udp dport { 8472, 51821 } accept{%- endif +%}
## TCP
{%+ if k3s_nft_servers4 or k3s_nft_agents4 %}ip daddr { {{ (k3s_nft_servers4 + k3s_nft_agents4) | join(', ') }} } tcp dport { 5001, 6443, 10250 } accept{%- endif +%}
{%+ if k3s_nft_servers6 or k3s_nft_agents6 %}ip6 daddr { {{ (k3s_nft_servers6 + k3s_nft_agents6) | join(', ') }} } tcp dport { 5001, 6443, 10250 } accept{%- endif +%}
{%+ if k3s_cluster_cidr | ansible.utils.ipv4 %}ip daddr {{ k3s_cluster_cidr }} meta l4proto { tcp, udp } accept{%- endif +%}
{%+ if k3s_cluster_cidr | ansible.utils.ipv6 %}ip6 daddr {{ k3s_cluster_cidr }} meta l4proto { tcp, udp } accept{%- endif +%}
{%+ if k3s_service_cidr | ansible.utils.ipv4 %}ip daddr {{ k3s_service_cidr }} meta l4proto { tcp, udp } accept{%- endif +%}
{%+ if k3s_service_cidr | ansible.utils.ipv6 %}ip6 daddr {{ k3s_service_cidr }} meta l4proto { tcp, udp } accept{%- endif +%}
}
}

View file

@ -0,0 +1,2 @@
localhost

View file

@ -0,0 +1,2 @@
---
k3s_manifests_dir: '/var/lib/rancher/k3s/server/manifests'

View file

@ -0,0 +1,29 @@
Kubectl
=========
This role handles the installation of the kubectl binary and optionally supports version pining or unconditional upgrade.
Requirements
------------
Only tested and used on Debian (uses the `ansible.builtin.apt` module).
Role Variables
--------------
### `kubectl_binary_path`
Path where the kubectl binary is installed (no discovery is performed so pre-existing binaries will remain untouched).
**Default value**: `kubectl_binary_path: /usr/local/bin/kubectl`
### `kubectl_install_version`
Version of kubectl to install. It allows multiple values:
- empty: will install the binary if not already present, else nothing is changed,
- 'vX.Y.Z': will install the specified version (e.g. `v3.15.2`), upgrading / downgrading the local binary if required,
- latest: will install the latest available version unless the local binary is already up-to-date.
**Default value**: `kubectl_install_version: latest`
License
-------
MIT

View file

@ -0,0 +1,3 @@
---
kubectl_binary_path: '/usr/local/bin/kubectl'
kubectl_install_version: latest

View file

@ -0,0 +1,2 @@
---
# handlers file for kubectl

View file

@ -0,0 +1,15 @@
galaxy_info:
author: Florian L.
description: Install kubectl binary
issue_tracker_url: https://gitlab.0x2a.ninja/infrastructure/configuration
license: MIT
min_ansible_version: 2.15
platforms:
- name: Debian
versions:
- 11
- 12
galaxy_tags:
- kubectl
- kubernetes
dependencies: []

View file

@ -0,0 +1,47 @@
---
- name: install required packages
become: true
ansible.builtin.apt:
update_cache: true
force_apt_get: true
cache_valid_time: 3600
pkg:
- curl
- jq
- name: find if binary is already installed
ansible.builtin.file:
path: '{{ kubectl_binary_path }}'
register: kubectl_stat
changed_when: false
failed_when: false
- name: find current installed version
when: kubectl_stat.state != "absent"
ansible.builtin.shell: "{{ kubectl_binary_path }} version --client=true --output=json | jq --raw-output '.clientVersion.gitVersion'"
changed_when: false
register: kubectl_local_version_exec
- name: find latest available version
connection: local
ansible.builtin.command: curl -L -s https://dl.k8s.io/release/stable.txt
register: latest_kubectl_version_exec
when: kubectl_install_version is falsy or kubectl_install_version == "latest"
changed_when: false
- name: set kubectl facts
ansible.builtin.set_fact:
kubectl_latest_version_available: '{{ latest_kubectl_version_exec.get("stdout", "") if latest_kubectl_version_exec is defined }}'
kubectl_local_version: '{{ kubectl_local_version_exec.get("stdout", "") if kubectl_local_version_exec is defined }}'
kubectl_target_install_version: '{{ kubectl_install_version if kubectl_install_version != "latest" else latest_kubectl_version_exec.get("stdout", "") }}'
- name: install binary
become: true
when: kubectl_stat.state == "absent" or (kubectl_local_version != kubectl_target_install_version and kubectl_install_version is not falsy)
ansible.builtin.get_url:
url: "https://dl.k8s.io/release/{{ kubectl_target_install_version }}/bin/linux/amd64/kubectl"
dest: /usr/local/bin/kubectl
owner: root
group: root
mode: '0755'
force: true

View file

@ -0,0 +1,2 @@
localhost

View file

@ -0,0 +1,5 @@
---
- hosts: localhost
remote_user: root
roles:
- kubectl

View file

@ -0,0 +1,2 @@
---
# vars file for kubectl

View file

@ -0,0 +1,12 @@
---
mariadb_install_server: true
mariadb_install_client: false
mariadb_nft_filter_input: false
mariadb_nft_filter_output: false
mariadb_nft_allowed_ingress_list: []
mariadb_nft_allowed_egress_list: []
mariadb_server_port: 3306
mariadb_server_run_init_sql: false
mariadb_server_run_custom_sql: false
mariadb_server_custom_sql: ""
mariadb_server_bind_addresses: false

View file

@ -0,0 +1,7 @@
---
- name: restart mariadb service
become: true
ansible.builtin.systemd_service:
name: mariadb.service
enabled: true
state: restarted

View file

@ -0,0 +1,21 @@
---
galaxy_info:
author: Florian L.
namespace: nullified
description: Install MariaDB and configure it, along with firewall rules
license: MIT
min_ansible_version: 2.15
# https://galaxy.ansible.com/api/v1/platforms/
platforms:
- name: Debian
versions:
- bookworm
galaxy_tags:
- github
- database
- mariadb
- sql
dependencies: []

View file

@ -0,0 +1,10 @@
---
- name: install client packages
become: true
ansible.builtin.apt:
update_cache: true
cache_valid_time: 3600
force_apt_get: true
pkg:
- mariadb-client
- mariadb-client-core

View file

@ -0,0 +1,21 @@
---
- name: setup server
include_tasks: server.yml
when: mariadb_install_server is truthy
- name: setup client
include_tasks: client.yml
when: mariadb_install_client is truthy
- name: install firewall rules
become: true
template:
src: ../templates/nftables.d/mariadb.nft.j2
dest: /etc/nftables.d/mariadb.nft
mode: '0600'
register: nft_rule
- name: load firewall rules
become: true
ansible.builtin.command: /usr/sbin/nft -f /etc/nftables.d/mariadb.nft
when: nft_rule.changed

View file

@ -0,0 +1,36 @@
---
- name: install server packages
become: true
ansible.builtin.apt:
update_cache: true
cache_valid_time: 3600
force_apt_get: true
pkg:
- mariadb-common
- mariadb-server
- mariadb-server-core
- name: initialize database
become: true
block:
- name: export initialization SQL file
ansible.builtin.template:
src: ../templates/mariadb_init.sql.j2
dest: /tmp/mariadb_init.sql
mode: '0600'
register: sql_init
- name: run initialization file
ansible.builtin.shell: mysql < /tmp/mariadb_init.sql
when: sql_init.changed
when: mariadb_server_run_init_sql is truthy or mariadb_server_run_custom_sql is truthy
- name: update bind addresses to allow external connections
become: true
ansible.builtin.lineinfile:
path: /etc/mysql/mariadb.conf.d/50-server.cnf
regexp: '^bind-address\b.+'
line: "bind-address = {{ mariadb_server_bind_addresses|join(',') }}"
state: present
when: mariadb_server_bind_addresses is truthy
notify:
- 'mariadb : restart mariadb service'

View file

@ -0,0 +1,22 @@
{% if mariadb_server_run_init_sql %}
# Run hardening steps from `mysql_secure_installation`
DELETE FROM mysql.global_priv WHERE User='';
DELETE FROM mysql.global_priv WHERE User='root' AND Host NOT IN ('localhost', '127.0.0.1', '::1');
DROP DATABASE IF EXISTS test;
DELETE FROM mysql.db WHERE Db='test' OR Db='test\\_%';
UPDATE mysql.global_priv
SET priv=json_set(
priv,
'$.password_last_changed', UNIX_TIMESTAMP(),
'$.plugin', 'mysql_native_password',
'$.authentication_string', PASSWORD('{{ mariadb_server_root_password }}'),
'$.auth_or', json_array(json_object(), json_object('plugin', 'unix_socket'))
)
WHERE User='root';
FLUSH PRIVILEGES;
{% endif %}
{% if mariadb_server_run_custom_sql and mariadb_server_custom_sql|length %}
{{ mariadb_server_custom_sql }}
{% endif %}

View file

@ -0,0 +1,26 @@
{%- set allowed_ingress_list4 = mariadb_nft_allowed_ingress_list | ansible.utils.ipv4 -%}
{%- set allowed_ingress_list6 = mariadb_nft_allowed_ingress_list | ansible.utils.ipv6 -%}
{%- set allowed_egress_list4 = mariadb_nft_allowed_egress_list | ansible.utils.ipv4 | default([], true) -%}
{%- set allowed_egress_list6 = mariadb_nft_allowed_egress_list | ansible.utils.ipv6 | default([], true) -%}
table inet filter {
{% if mariadb_install_server %}
chain input {
{% if mariadb_nft_filter_input %}
{%+ if allowed_ingress_list4 %}ip saddr { {{ allowed_ingress_list4 | join(', ') }} } tcp dport {{ mariadb_server_port }} accept{% endif +%}
{%+ if allowed_ingress_list6 %}ip6 saddr { {{ allowed_ingress_list6 | join(', ') }} } tcp dport {{ mariadb_server_port }} accept{% endif +%}
{% else %}
tcp dport {{ mariadb_server_port }} accept
{% endif %}
}
{% endif %}
{% if mariadb_install_client %}
chain output {
{% if mariadb_nft_filter_output %}
{%+ if allowed_egress_list4 %}ip daddr { {{ allowed_egress_list4 | join(', ') }} } tcp dport {{ mariadb_server_port }} accept{% endif +%}
{%+ if allowed_egress_list6 %}ip daddr { {{ allowed_egress_list6 | join(', ') }} } tcp dport {{ mariadb_server_port }} accept{% endif +%}
{% else %}
tcp dport {{ mariadb_server_port }} accept
{% endif %}
}
{% endif %}
}

View file

@ -0,0 +1,2 @@
localhost

View file

@ -0,0 +1,8 @@
---
nginx_extra_packages: []
nginx_custom_config: false
nginx_limits_nofile: 524288
nginx_service_user: nginx
nginx_service_group: nginx
nginx_sites: []
nginx_streams: []

View file

@ -0,0 +1,28 @@
---
- name: restart firewall service
become: true
ansible.builtin.systemd_service:
name: nftables.service
enabled: true
state: restarted
- name: reload nginx service
become: true
ansible.builtin.systemd_service:
name: nginx.service
enabled: true
state: reloaded
- name: check configuration is valid
become: true
ansible.builtin.command: /usr/sbin/nginx -t -q
changed_when: false
listen: 'nginx : restart nginx service'
- name: restart service
become: true
ansible.builtin.systemd_service:
name: nginx.service
enabled: true
state: restarted
listen: 'nginx : restart nginx service'

View file

@ -0,0 +1,20 @@
---
galaxy_info:
author: Florian L.
namespace: nullified
description: Install and configure Nginx webserver
# issue_tracker_url: http://example.com/issue/tracker
license: MIT
min_ansible_version: 2.15
# https://galaxy.ansible.com/api/v1/platforms/
platforms:
- name: Debian
versions:
- bookworm
galaxy_tags:
- nginx
- webserver
dependencies: []

View file

@ -0,0 +1,97 @@
---
- name: install requirements
become: true
ansible.builtin.apt:
update_cache: true
force_apt_get: true
cache_valid_time: 3600
pkg:
- ca-certificates
- curl
- debian-archive-keyring
- gnupg2
- lsb-release
- name: install nginx repository
become: true
ansible.builtin.deb822_repository:
allow_downgrade_to_insecure: false
allow_insecure: false
allow_weak: false
components:
- nginx
enabled: true
name: nginx
signed_by: 'https://nginx.org/keys/nginx_signing.key'
state: present
suites: '{{ ansible_facts.distribution_release }}'
trusted: true
uris: 'http://nginx.org/packages/mainline/debian'
- name: pin nginx packages
become: true
ansible.builtin.copy:
content: |-
Package: *
Pin: origin nginx.org
Pin: release o=nginx
Pin-Priority: 900
dest: /etc/apt/preferences.d/55-nginx
mode: '0600'
owner: root
group: root
- name: update cache and install nginx package
become: true
ansible.builtin.apt:
cache_valid_time: 0
force_apt_get: true
update_cache: true
pkg: '{{ nginx_extra_packages | default([]) + ["nginx"] }}'
- ansible.builtin.include_tasks:
file: nginx-config.yml
apply:
tags: [webserver-config]
tags: [webserver-config]
- name: setup firewall rules
become: true
ansible.builtin.template:
src: ../templates/ingress_http_nginx.nft.j2
dest: /etc/nftables.d/ingress_http_nginx.nft
owner: root
group: root
mode: '0600'
notify:
- 'nginx : restart firewall service'
- ansible.builtin.include_tasks:
file: nginx-service-entry.yml
apply:
tags: [webserver-sites]
tags: [webserver-sites]
vars:
nginx_entry_type: site
loop: '{{ nginx_sites }}'
loop_control:
label: '{{ item.name }}'
- ansible.builtin.include_tasks:
file: nginx-service-entry.yml
apply:
tags: [webserver-streams]
tags: [webserver-streams]
vars:
nginx_entry_type: stream
loop: '{{ nginx_streams }}'
loop_control:
label: '{{ item.name }}'
- name: set permissions
become: true
ansible.builtin.file:
path: /etc/nginx
owner: '{{ nginx_service_user }}'
group: '{{ nginx_service_user }}'
mode: 'u=rwX,g=rX,o='
recurse: true

View file

@ -0,0 +1,65 @@
---
- name: setup configuration directories
become: true
ansible.builtin.file:
path: '/etc/nginx/{{ item }}'
state: directory
owner: '{{ nginx_service_user }}'
group: '{{ nginx_service_group }}'
mode: '0750'
loop:
- conf.d
- ssl
- ssl/certificates
- ssl/keys
- sites-available
- sites-enabled
- streams-available
- streams-enabled
- name: remove default unneeded files
become: true
ansible.builtin.file:
path: '/etc/nginx/conf.d/default.conf'
state: absent
- name: generate dhparams.pem file
become: true
ansible.builtin.command:
cmd: /usr/bin/openssl dhparam -out /etc/nginx/ssl/dhparams.pem 4096
creates: /etc/nginx/ssl/dhparams.pem
notify:
- 'nginx : restart nginx service'
- name: setup nginx.conf
become: true
block:
- name: use default configuration
ansible.builtin.template:
src: ../templates/nginx.conf.j2
dest: /etc/nginx/nginx.conf
owner: '{{ nginx_service_user }}'
group: '{{ nginx_service_group }}'
mode: '0640'
when: nginx_custom_config is falsy
- name: use custom configuration
ansible.builtin.copy:
content: '{{ nginx_custom_config }}'
dest: /etc/nginx/nginx.conf
owner: '{{ nginx_service_user }}'
group: '{{ nginx_service_group }}'
mode: '0640'
when: nginx_custom_config is truthy
notify:
- 'nginx : restart nginx service'
- name: set process limits
become: true
ansible.builtin.template:
src: ../templates/nginx_limits.conf.j2
dest: /etc/security/limits.d/nginx.conf
owner: root
group: root
mode: '0600'
notify:
- 'nginx : restart nginx service'

View file

@ -0,0 +1,49 @@
---
- name: set facts
ansible.builtin.set_fact:
safe_filename: "{{ item.name | regex_replace('[^\\w]', '') }}"
nginx_entry_type: '{{ nginx_entry_type | default(item.get("entry_type", None)) }}'
- name: perform sanity checks
ansible.builtin.assert:
that:
- nginx_entry_type in ["stream", "site"]
fail_msg: Invalid value for `nginx_entry_type`; expected "stream" or "site", got "{{ nginx_entry_type }}"
- name: 'copy entry in {{ nginx_entry_type }}s-available'
become: true
ansible.builtin.copy:
content: '{{ item.content }}'
dest: "/etc/nginx/{{ nginx_entry_type }}s-available/{{ safe_filename }}.conf"
owner: '{{ nginx_service_user }}'
group: '{{ nginx_service_user }}'
mode: '0640'
when: item.get('state', 'present') == 'present'
notify:
- 'nginx : reload nginx service'
- name: 'enable {{ nginx_entry_type }}'
become: true
ansible.builtin.file:
src: "/etc/nginx/{{ nginx_entry_type }}s-available/{{ safe_filename }}.conf"
path: "/etc/nginx/{{ nginx_entry_type }}s-enabled/{{ safe_filename }}.conf"
owner: '{{ nginx_service_user }}'
group: '{{ nginx_service_user }}'
state: 'link'
when: item.get('state', 'present') == 'present'
notify:
- 'nginx : reload nginx service'
- name: 'disable {{ nginx_entry_type }}'
become: true
ansible.builtin.file:
path: "/etc/nginx/{{ nginx_entry_type }}s-enabled/{{ safe_filename }}.conf"
state: absent
when: item.get('state', 'present') in ['disabled', 'deleted']
- name: 'remove {{ nginx_entry_type }}'
become: true
ansible.builtin.file:
path: "/etc/nginx/{{ nginx_entry_type }}s-available/{{ safe_filename }}.conf"
state: absent
when: item.get('state', 'present') == 'deleted'

View file

@ -0,0 +1,9 @@
table inet filter {
chain input {
meta nfproto { ipv4, ipv6 } tcp dport { http, https } accept
}
chain output {
meta nfproto { ipv4, ipv6 } tcp sport { http, https } accept
}
}

View file

@ -0,0 +1,46 @@
user {{ nginx_service_user }};
worker_processes auto;
error_log /var/log/nginx/error.log notice;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
include /etc/nginx/conf.d/*.conf;
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
server_tokens off;
ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305;
ssl_dhparam /etc/nginx/ssl/dhparams.pem;
ssl_prefer_server_ciphers off;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ecdh_curve x25519:secp256r1:secp521r1:secp384r1;
keepalive_timeout 65;
include /etc/nginx/sites-enabled/*.conf;
}
stream {
log_format proxy '$remote_addr [$time_local] '
'$protocol $status $bytes_sent $bytes_received '
'$session_time "$upstream_addr" "-- $ssl_preread_server_name --"'
'"$upstream_bytes_sent" "$upstream_bytes_received" "$upstream_connect_time"';
access_log /var/log/nginx/stream-access.log proxy;
include /etc/nginx/streams-enabled/*.conf;
}

View file

@ -0,0 +1 @@
nginx - nofile {{ nginx_limits_nofile }}

View file

@ -0,0 +1,2 @@
localhost

View file

@ -0,0 +1,5 @@
---
- hosts: localhost
remote_user: root
roles:
- nginx

Some files were not shown because too many files have changed in this diff Show more