local lxd tests

This commit is contained in:
RDF 2022-09-15 00:18:11 -03:00
parent 08b37b1b95
commit f65bc3b264
40 changed files with 2571 additions and 98 deletions

4
.gitignore vendored
View File

@ -1,4 +1,6 @@
.idea
ansible.cfg
Vault/
.sudo_pass
.sudo_pass
**/.ssh
**/.*

View File

@ -1,2 +1,7 @@
[ubuntu_lxd]
mps.org.uy ansible_ssh_port=8032
mps.org.uy ansible_ssh_port=8032
[lxd_instances:vars]
ansible_ssh_private_key_file=/root/.ssh/lxd
ansible_user=ubuntu
ansible_ssh_common_args='-o ProxyCommand="ssh -p 8032 -W %h:%p -q renzo@mps.org.uy"'

7
LDIFs/base.ldif Normal file
View File

@ -0,0 +1,7 @@
dn: ou=people,dc=mps,dc=org
objectClass: organizationalUnit
ou: people
dn: ou=groups,dc=mps,dc=org
objectClass: organizationalUnit
ou: groups

View File

@ -8,26 +8,38 @@
- hosts: ubuntu_lxd
gather_facts: false
tags: deploy
vars:
service_name: OpenLDAP-01
DIR: "/home/{{ansible_user}}/OpenLDAP"
ssh_key_passphrase: open
ssh_key_name: OpenLDAP
roles:
- lxc_configure_ssh
service_name: OpenLDAP
service_dir: "/home/{{ansible_user}}/OpenLDAP"
profile: default
ssh_key_file: "/root/.ssh/lxd"
tasks:
# Copio el manifest.
- file: path="{{DIR}}" state=directory mode='0755'
- copy: src=Manifests/OpenLDAP/main.tf dest="{{DIR}}/OpenLDAP-01.tf"
- file: path="{{service_dir}}" state=directory mode='0755'
- copy: src=Manifests/{{service_name}}/main.tf dest="{{service_dir}}/{{service_name}}.tf"
# Ejecuto el manifest, creo la instancia.
# Se necesita sudo para conectarse con el socket de LXD
- community.general.terraform:
project_path: "{{DIR}}"
project_path: "{{service_dir}}"
force_init: true
state: present
binary_path: "/home/renzo/.local/bin/terraform"
become: true
register: terraform
register: terraform
- name: Add LXD instance to group 'lxd_instances'
ansible.builtin.add_host:
name: "{{ terraform.outputs.container_ip_address.value }}"
groups: lxd_instances
ansible_lxd_remote: "{{ inventory_hostname }}"
plugin: community.general.lxd
- hosts: lxd_instances
gather_facts: false
become: true
tasks:
- ansible.builtin.ping:
# Borrar clave ssh de mi pc

View File

@ -2,12 +2,14 @@
- hosts: ubuntu_lxd
gather_facts: true
gather_subset:
- "min"
tags:
- deploy
become: false
vars:
ssh_key_file: "/root/.ssh/lxd"
tasks:
- name: Update, Upgrade & Install dependencies
become: true
- become: true
block:
- apt: update_cache=yes upgrade=full
- package: name="{{item}}" state=present
@ -18,7 +20,7 @@
- unzip
- shell: python3 -m pip install --upgrade pip
#- include_role: name=init_lxd
- include_role: name=install_lxd
- include_role: name=install_terraform
vars:
terraform_version: latest

View File

@ -11,21 +11,22 @@ provider "lxd" {
}
resource "lxd_cached_image" "image" {
source_remote = "ubuntu"
source_image = "focal/amd64"
source_remote = "centos"
source_image = "8/amd64"
}
resource "lxd_container" "container1" {
name = "OpenLDAP"
image = lxd_cached_image.image.fingerprint
ephemeral = false
config = {
"boot.autostart" = true
}
limits = {
cpu = 2
cpu = 1
}
}
output "container_info" {
value = lxd_container.container1
output "container_ip_address" {
value = lxd_container.container1.ip_address
}
output "container_name" {
value = lxd_container.container1.name
}

View File

@ -15,14 +15,18 @@ resource "lxd_cached_image" "image" {
source_image = "focal/amd64"
}
resource "lxd_container" "container1" {
name = "PHPLDAPAdmin"
name = "OpenLDAP-test"
image = lxd_cached_image.image.fingerprint
ephemeral = false
config = {
"boot.autostart" = true
}
limits = {
cpu = 2
cpu = 1
}
}
output "container_ip_address" {
value = lxd_container.container1.ip_address
}
output "container_name" {
value = lxd_container.container1.name
}

View File

View File

@ -0,0 +1,150 @@
# Ansible Role: LXD
Installs the latest version of `lxd`.
## Requirements
- Linux: distributions that support `snap`
- Tested on Debian, Ubuntu, Pop!_OS
## Role Variables
#### Settable Variables
```yaml
lxd:
host: # targets a LXD host installation
preseed:
... # the elements found in an LXD preseed configuration
... # see: https://linuxcontainers.org/lxd/docs/master/preseed/
extra_profiles: # list of profile objects
... # see: https://docs.ansible.com/ansible/latest/collections/community/general/lxd_profile_module.html#parameters
instance: # targets a LXD container/VM installation
create_mode: # mandatory, one of ['skip','build']
name: # the name of the lxc instance
dns_address: # DNS or IP address of the instance
type: # see: https://docs.ansible.com/ansible/latest/collections/community/general/lxd_container_module.html#parameter-type
server: # URL of LXC image host, defaults to https://images.linuxcontainers.org
alias: # as listed when running command `lxc image list images:`
protocol: # defaults to 'simplestreams', one of ['simplestreams','lxd']
devices:
... # see: https://linuxcontainers.org/lxd/docs/master/instances/#devices-configuration
... # see: https://docs.ansible.com/ansible/latest/collections/community/general/lxd_container_module.html#parameter-devices
profiles: # list of the names of profile names declared on the host
config:
... # see: https://linuxcontainers.org/lxd/docs/master/instances/#key-value-configuration
... # see: https://docs.ansible.com/ansible/latest/collections/community/general/lxd_container_module.html#parameter-config
# note that the following fields are mapped onto the above `config`, after converted from human-intuitive description. Any preexisting `config` value will be overridden.
cpu_mem:
cpus: # list of logical core ids
# becomes: `config.limits.cpu`
# ids increment by logical cores, grouped by physical core
# ie. [P0L0,P0L1,P1L0,P1L1] = [0,1,2,3]
memory: # human friendly amount, eg 4GiB
# becomes: `config.limits.memory`
hugepages: # boolean, source memory from hugepages reservation
# becomes: `config.limits.memory.hugepages`
priority: # 1-10, shared CPU scheduling priority
# becomes: `config.limits.cpu.priority`
host_idmap: # names of user and group IDs to map from host to guest
# becomes: `config.raw.idmap`
both: # only for when user and group ID are same value
users: # for just user names
groups: # for just group names
```
Note: for any pre-existing `lxd.host.preseed.storage_pools` declared, having `driver == 'zfs'`, the entire dataset will be destroyed and recreated in the parent zpool.
Note: for any LXC instance, when `create_mode == 'build'`, any pre-existing instance (vm or container) will be deleted to make way for the new one.
## Dependencies
Ansible collection `community.general` >= 4.1.0
- This role requires `community.general.lxd_container` Ansible module, that is version [4.1.0](https://github.com/ansible-collections/community.general/blob/4.1.0/plugins/modules/cloud/lxd/lxd_container.py#L109) or newer. This is because v4.1.0 enables the `type` option, which allows the module to specify a virtual machine under QEMU.
- Note: The version of `community.general` that comes with Ansible CORE may not be recent, and must be [upgraded](https://github.com/ansible-collections/community.general#using-this-collection). Run the following command to upgrade:
```
ansible-galaxy collection install community.general --upgrade
```
- See relevant documentation using:
```
ansible-doc community.general.lxd_container
```
## Example Playbook
Host example
```yaml
- hosts: servers
roles:
- role: lxd
vars:
lxd:
host:
preseed:
networks: []
storage_pools:
- name: default
description: "default storage pool (zfs)"
driver: zfs
config:
source: tank/lxc
profiles:
- name: default
description: "default profile"
config: {}
devices:
root:
path: /
pool: default
type: disk
eth0:
name: eth0
nictype: bridged
parent: br0
type: nic
extra_profiles:
- name: docker_support
description: basic support for docker
config:
security.nesting: "true"
```
Instance example
```yaml
- hosts: lxd_containers
roles:
- role: lxd
vars:
lxd:
instance:
create_mode: build
name: mylxc
dns_address: mylxc.home.lan
alias: ubuntu/focal/cloud/amd64
devices:
eth0:
name: eth0
nictype: bridged
parent: br0
type: nic
hwaddr: F1-09-CE-07-C0-70
profiles:
- default
- docker_support
config:
boot.autostart: "true"
host_idmap:
both:
- root
```
## License
MIT
## TODO:
* Enhance support for `qemu` virtual machines
* Virtual machine feature parity with `libvirt` role
* Run, backup, snapshot, restore to/from ZFS
* --> update ZFS trim & FSTrim roles

View File

@ -0,0 +1,106 @@
---
## ------------------------------------------------ ##
## Installs LXD (includes LXC), and default profile ##
## ------------------------------------------------ ##
#- name: install snapd
# ansible.builtin.package:
# name: snapd
#- name: install snap core
# community.general.snap:
# name: core
# XXX: simplest way to void a prior LXD installation's configuration
#- name: uninstall prior LXD
# ansible.builtin.command: snap remove lxd --purge
# changed_when: true
- name: clear preseed storage_pools
loop: "{{ lxd.host.preseed.storage_pools | default([]) }}"
loop_control:
loop_var: lxd_storage_pool
ansible.builtin.include_tasks: reset_storage.yml
#- name: install LXD
# become: true
# community.general.snap:
# name: lxd
- name: check LXD executable accessible by Ansible
ansible.builtin.shell: which lxd
ignore_errors: true
register: which_lxd
- name: install LXD wrapper
when: which_lxd is failed
vars:
lxd_path: /snap/bin/lxd
block:
- name: verify snap lxd installation exists
ansible.builtin.stat:
path: "{{ lxd_path }}"
get_checksum: false
register: lxd_stat
failed_when: not lxd_stat.stat.exists
- name: create lxd wrapper if lxd is not in PATH
ansible.builtin.template:
src: wrapper.sh.j2
dest: /usr/bin/lxd
owner: root
group: root
mode: u=rx,g=rx,o=rx
vars:
snap_app: lxd
snap_app_path: "{{ lxd_path }}"
- name: check LXC executable accessible by Ansible
ansible.builtin.shell: which lxc
ignore_errors: true
changed_when: false
register: which_lxc
- name: install LXD wrapper
when: which_lxc is failed
vars:
lxc_path: /snap/bin/lxc
block:
- name: detect if lxc is in PATH
ansible.builtin.stat:
path: "{{ lxc_path }}"
get_checksum: false
register: lxc_stat
failed_when: not lxc_stat.stat.exists
- name: create lxc wrapper if lxc is not in PATH
ansible.builtin.template:
src: wrapper.sh.j2
dest: /usr/bin/lxc
owner: root
group: root
mode: u=rx,g=rx,o=rx
vars:
snap_app: lxc
snap_app_path: "{{ lxc_path }}"
- name: init LXD without preseed
when: not lxd.host.preseed is defined
ansible.builtin.command: lxd init --auto
- name: init LXD from preseed
when: lxd.host.preseed is defined
ansible.builtin.command: "lxd init --preseed < "
args:
stdin: "{{ lxd.host.preseed }}"
- name: apply extra profiles
loop: "{{ lxd.host.extra_profiles | default([]) }}"
loop_control:
loop_var: lxd_profile
community.general.lxd_profile:
name: "{{ lxd_profile.name | mandatory }}"
description: "{{ lxd_profile.description | default(omit) }}"
state: "{{ lxd_profile.state | default(omit) }}"
config: "{{ lxd_profile.config | default(omit) }}"
devices: "{{ lxd_profile.devices | default(omit) }}"

View File

@ -0,0 +1,58 @@
---
## -------------------------- ##
## Build or Restore Instance ##
## -------------------------- ##
- name: check invalid create mode
when: lxd.instance.create_mode | mandatory not in ['skip', 'build']
ansible.builtin.fail:
msg: "invalid lxd.instance.create_mode: {{ lxd.instance.create_mode }}"
- name: handle create instance
when: lxd.instance.create_mode == 'build'
block:
- name: clear pre-existing instance
with_items:
- container
- virtual-machine
community.general.lxd_container:
name: "{{ lxd.instance.name | mandatory }}"
type: "{{ item }}"
state: absent
force_stop: true
# XXX: ansible will always indicate a change when volatile options are present in the LXD config
ignore_volatile_options: false
- name: define custom instance configuration 'lxd_custom_config'
ansible.builtin.include_tasks: instance_config.yml
- name: merge and override instance base config with custom config
when: lxd.instance.config is defined
ansible.builtin.set_fact:
lxd_custom_config: "{{ ( lxd.instance.config ) | combine(lxd_custom_config | default({}), recursive=True) }}"
- name: create & (re)start instance
community.general.lxd_container:
name: "{{ lxd.instance.name }}"
type: "{{ lxd.instance.type | default(omit) }}"
state: restarted
source:
server: "{{ lxd.instance.server | default('https://images.linuxcontainers.org') }}"
alias: "{{ lxd.instance.alias | mandatory }}"
# 'simplestreams' seems more reliable than the default protocol 'lxd'
protocol: "{{ lxd.instance.protocol | default('simplestreams') }}"
type: image
mode: pull
# XXX: ansible will always indicate a change when volatile options are present in the LXD config
ignore_volatile_options: false
config: "{{ lxd_custom_config | default(omit) }}"
devices: "{{ lxd.instance.devices | default(omit) }}"
profiles: "{{ lxd.instance.profiles | default(omit) }}"
wait_for_ipv4_addresses: true
timeout: 600
- name: "wait for {{ lxd.instance.name }} to respond with SSHd"
ansible.builtin.wait_for:
host: "{{ lxd.instance.dns_address | mandatory }}"
port: 22

View File

@ -0,0 +1,128 @@
---
## ----------------------------- ##
## Assemble custom configuration ##
## ----------------------------- ##
# this should build `lxd_custom_config` for use externally
- name: process cpu + memory limit overrides
when: lxd.instance.cpu_mem is defined
block:
- name: apply 'config.limits.cpu' override
when: lxd.instance.cpu_mem.cpus is defined
block:
- name: get /proc/cpuinfo
ansible.builtin.command: cat /proc/cpuinfo
changed_when: false
register: proc_cpuinfo
- name: transform 'cpus' into 'config.limits.cpu'
ansible.builtin.set_fact:
lxd_custom_config:
limits.cpu: "{{ lxd.instance.cpu_mem.cpus | asNative(proc_cpuinfo.stdout_lines) | sort | unique }}"
- name: handle multiple cores
when: lxd_custom_config['limits.cpu'] | length > 1
ansible.builtin.set_fact:
lxd_custom_config:
limits.cpu: "{{ lxd_custom_config['limits.cpu'] | join(',') }}"
- name: handle single core
when: lxd_custom_config['limits.cpu'] | length == 1
ansible.builtin.set_fact:
lxd_custom_config:
limits.cpu: "{{ lxd_custom_config['limits.cpu'][0] }}-{{ lxd_custom_config['limits.cpu'][0] }}"
- name: apply 'config.limits.memory' override
when: lxd.instance.cpu_mem.memory is defined
vars:
tmp_lxd_config:
limits.memory: "{{ lxd.instance.cpu_mem.memory }}"
ansible.builtin.set_fact:
lxd_custom_config: "{{ ( lxd_custom_config | default({}) ) | combine(tmp_lxd_config, recursive=True) }}"
- name: apply 'config.limits.memory.hugepages' override
when: lxd.instance.cpu_mem.hugepages is defined
vars:
tmp_lxd_config:
limits.memory.hugepages: "{{ lxd.instance.cpu_mem.hugepages | string }}"
ansible.builtin.set_fact:
lxd_custom_config: "{{ ( lxd_custom_config | default({}) ) | combine(tmp_lxd_config, recursive=True) }}"
- name: apply 'config.limits.cpu.priority'
when: lxd.instance.cpu_mem.cpu_priority is defined
vars:
tmp_lxd_config:
limits.cpu.priority: "{{ lxd.instance.cpu_mem.cpu_priority }}"
ansible.builtin.set_fact:
lxd_custom_config: "{{ ( lxd_custom_config | default({}) ) | combine(tmp_lxd_config, recursive=True) }}"
- name: process user + group + both idmap
when: lxd.instance.host_idmap is defined
block:
- name: load UID map
ansible.builtin.getent:
database: passwd
- name: load GID map
ansible.builtin.getent:
database: group
# the raw.idmap needs to have its ids appear in ascending order
- name: handle combined idmaps
when: lxd.instance.host_idmap.both is defined
block:
- name: get UID from username
loop: "{{ lxd.instance.host_idmap.both }}"
loop_control:
loop_var: lxd_host_id
ansible.builtin.set_fact:
lxd_both_idmap: "{{ lxd_both_idmap | default([]) + [ ansible_facts.getent_passwd[lxd_host_id][1] | int ] }}"
- name: build raw "both" mapping
loop: "{{ lxd_both_idmap | sort }}"
loop_control:
loop_var: lxd_host_id
ansible.builtin.set_fact:
lxd_raw_idmap: "{{ lxd_raw_idmap | default([]) + [ 'both ' + lxd_host_id | string + ' ' + lxd_host_id | string ] }}"
- name: handle user idmaps
when: lxd.instance.host_idmap.users is defined
block:
- name: uid mapping
loop: "{{ lxd.instance.host_idmap.users }}"
loop_control:
loop_var: lxd_host_id
ansible.builtin.set_fact:
lxd_user_idmap: "{{ lxd_user_idmap | default([]) + [ ansible_facts.getent_passwd[lxd_host_id][1] | int ] }}"
- name: build raw "uid" mapping
loop: "{{ lxd_user_idmap | sort }}"
loop_control:
loop_var: lxd_host_id
ansible.builtin.set_fact:
lxd_raw_idmap: "{{ lxd_raw_idmap | default([]) + [ 'uid ' + lxd_host_id | string + ' ' + lxd_host_id | string ] }}"
- name: handle group idmaps
when: lxd.instance.host_idmap.groups is defined
block:
- name: gid mapping
loop: "{{ lxd.instance.host_idmap.groups }}"
loop_control:
loop_var: lxd_host_id
ansible.builtin.set_fact:
lxd_group_idmap: "{{ lxd_group_idmap | default([]) + [ ansible_facts.getent_group[lxd_host_id][1] | int ] }}"
- name: build raw "gid" mapping
loop: "{{ lxd_group_idmap | sort }}"
loop_control:
loop_var: lxd_host_id
ansible.builtin.set_fact:
lxd_raw_idmap: "{{ lxd_raw_idmap | default([]) + [ 'gid ' + lxd_host_id | string + ' ' + lxd_host_id | string ] }}"
- name: transform 'host_idmap' into 'config.raw.idmap'
vars:
tmp_lxd_config:
raw.idmap: "{{ lxd_raw_idmap | join('\n') }}"
ansible.builtin.set_fact:
lxd_custom_config: "{{ ( lxd_custom_config | default({}) ) | combine(tmp_lxd_config, recursive=True) }}"

View File

@ -0,0 +1,23 @@
---
## --------------------------------------------------------------- ##
## Automates the installation and setup of LXD hosts and instances ##
## --------------------------------------------------------------- ##
# XXX: https://github.com/lxc/lxd/tree/master/doc
- name: install LXD to the host
when: lxd.host is defined
ansible.builtin.include_tasks: host.yml
# XXX: externally add user to 'lxd' group
# TODO: where is the default backing storage located? ZFS dataset?
# TODO: permissions supporting running docker in LXC
- name: setup target as lxd instance
when: lxd.instance is defined
ansible.builtin.include_tasks: instance.yml
# TODO: https://lxd.readthedocs.io/en/latest/backup/#container-backup-and-restore
# XXX: adding the user to the user group 'lxd' enables root-like access to LXD
# this is similar in behavior with the `docker` group

View File

@ -0,0 +1,19 @@
---
## ---------------------------------------------------------------------------- ##
## Delete all storage pools that exist outside of the normal LXD uninstallation ##
## ---------------------------------------------------------------------------- ##
- name: clear preseed zfs datasets detected
when: lxd_storage_pool.driver == 'zfs'
block:
- name: "delete ZFS dataset: {{ lxd_storage_pool.driver }}"
ansible.builtin.include_role:
name: zfs
tasks_from: destroy_dataset
vars:
zfs_target_dataset: "{{ lxd_storage_pool.config.source }}"
zfs_expected_children:
- images
- containers
- virtual-machines
- custom

View File

@ -0,0 +1,15 @@
#!/bin/sh
# {{ ansible_managed }}
# this file was directly copied from an Ubuntu/PopOS installation process
if ! [ -x {{ snap_app_path }} ]; then
echo "" >&2
echo "Command '$0' requires the {{ snap_app }} snap to be installed." >&2
echo "Please install it with:" >&2
echo "" >&2
echo "snap install {{ snap_app }}" >&2
echo "" >&2
exit 1
fi
exec {{ snap_app_path }} "$@"

View File

@ -0,0 +1,54 @@
---
## ---------------------------------- ##
## Adds a user to the list of sudoers ##
## ---------------------------------- ##
# TODO: add to "admin" groups when needed on OSx/BSD
# https://stackoverflow.com/questions/33359404/ansible-best-practice-for-maintaining-list-of-sudoers
- name: install sudo package
ansible.builtin.package:
name: sudo
state: present
# determine default group with sudo privileges
- name: get all groups
ansible.builtin.shell: set -o pipefail && getent group | awk -F":" '{print $1}'
args:
executable: bash
changed_when: false
register: getent_groups
- name: find default sudoers group
when: item in getent_groups.stdout_lines
# XXX: in ascending order of precedence
# see: https://wiki.archlinux.org/index.php/Sudo#Example_entries
loop:
- sudo
- wheel
loop_control:
loop_var: sudoer_group
ansible.builtin.set_fact:
default_sudoers_group: "{{ sudoer_group }}"
- name: find preexisting custom sudoers files
ansible.builtin.find:
paths: "/etc/sudoers.d"
patterns: "custom_*"
register: find_custom_sudoers
- name: reset custom sudoers files
when: find_custom_sudoers.files
loop: "{{ find_custom_sudoers.files }}"
loop_control:
loop_var: custom_sudoer_file
ansible.builtin.file:
path: "{{ custom_sudoer_file.path }}"
state: absent
- name: add custom sudoers file
community.general.sudoers:
name: custom_nologin
state: present
group: "{{ default_sudoers_group }}"
commands: ALL
nopassword: true

View File

@ -0,0 +1,33 @@
# ansible-playbook Playbooks/01-UBUNTU-LXD.yml -i Inventories/QA -v -t 'deploy' --become-password-file .sudo_pass
- hosts: localhost
gather_facts: false
become: true
roles:
#- role: lxd # ansible-galaxy collection install rjlasko.ansible
# vars:
# lxd:
# host: {}
tasks:
#- become: true
# apt: update_cache=yes upgrade=full
- community.general.terraform:
project_path: "./terraform/"
force_init: true
state: present
become: true
register: terraform
- name: Add LXD instance to group 'lxd_instances'
ansible.builtin.add_host:
name: "virtualmin"
groups: lxd_instances
ansible_lxd_remote: "localhost"
plugin: community.general.lxd
- hosts: lxd_instances
gather_facts: false
become: true
tasks:
- ansible.builtin.ping: {}
#- ansible.builtin.include_role: name=sudo

View File

@ -0,0 +1,45 @@
#!/bin/bash
set -x
aptGet() {
export DEBIAN_FRONTEND=noninteractive
export APT_LISTCHANGES_FRONTEND=none
apt-get -fuy autoremove
apt-get clean
apt-get update
apt-get upgrade
apt-get dist-upgrade
apt-get install \
openssh-server \
cron \
--yes
}
sshConfig() {
ufw allow ssh
}
## Admin user
U="admin"
P="admin"
G="sudo"
UUID="420"
UHOME="/home/admin"
adduser \
--disabled-login \
--home "$UHOME" \
--ingroup "$G" \
--uid "$UUID" \
--quiet "$U"
echo "$U:$P" | chpasswd
getent group "$G"
#echo "$U ALL=(ALL) /02-Virtualmin.sh" | EDITOR='tee -a' visudo
#chmod 775 /*.sh && chown $U:$G /*.sh
aptGet
sshConfig
sh /02-Virtualmin.sh --verbose --minimal --force --hostname virtualmin.container.test

View File

@ -0,0 +1,985 @@
#!/bin/sh
# shellcheck disable=SC2059 disable=SC2181 disable=SC2154
# virtualmin-install.sh
# Copyright 2005-2019 Virtualmin, Inc.
# Simple script to grab the virtualmin-release and virtualmin-base packages.
# The packages do most of the hard work, so this script can be small-ish and
# lazy-ish.
# WARNING: Anything not listed in the currently supported systems list is not
# going to work, despite the fact that you might see code that detects your
# OS and acts on it. If it isn't in the list, the code is not complete and
# will not work. More importantly, the repos that this script uses do not
# exist, if the OS isn't listed. Don't even bother trying it.
#
# A manual install might work for you though.
# See here: https://www.virtualmin.com/documentation/installation/manual/
# License and version
SERIAL=GPL
KEY=GPL
VER=6.2.2
vm_version=6
# Currently supported systems:
supported=" CentOS/RHEL Linux 7, and 8 on x86_64
Debian 9, and 10 on i386 and amd64
Ubuntu 16.04 LTS, 18.04 LTS, and 20.04 LTS on i386 and amd64"
log=/root/virtualmin-install.log
skipyesno=0
# Print usage info, if --help, set mode, etc.
# Temporary colors
RED="$(tput setaf 1)"
YELLOW="$(tput setaf 3)"
CYAN="$(tput setaf 6)"
NORMAL="$(tput sgr0)"
# Set defaults
bundle='LAMP' # Other option is LEMP
mode='full' # Other option is minimal
usage () {
# shellcheck disable=SC2046
printf "Usage: %s %s [options]\\n" "${CYAN}" $(basename "$0")
echo
echo " If called without arguments, installs Virtualmin."
echo
printf " ${YELLOW}--uninstall|-u${NORMAL} - Removes all Virtualmin packages (do not use on a production system)\\n"
printf " ${YELLOW}--help|-h${NORMAL} - This message\\n"
printf " ${YELLOW}--force|-f${NORMAL} - Skip confirmation message\\n"
printf " ${YELLOW}--hostname|-n${NORMAL} - Set fully qualified hostname\\n"
printf " ${YELLOW}--verbose|-v${NORMAL} - Verbose\\n"
printf " ${YELLOW}--setup|-s${NORMAL} - Setup software repositories and exit (no installation or configuration)\\n"
printf " ${YELLOW}--minimal|-m${NORMAL} - Install a smaller subset of packages for low-memory/low-resource systems\\n"
printf " ${YELLOW}--bundle|-b <name>${NORMAL} - Choose bundle to install (LAMP or LEMP, defaults to LAMP)\\n"
printf " ${YELLOW}--disable <feature>${NORMAL} - Disable feature [SCL]\\n"
echo
}
while [ "$1" != "" ]; do
case $1 in
--help|-h)
usage
exit 0
;;
--uninstall|-u)
shift
mode="uninstall"
;;
--force|-f|--yes|-y)
shift
skipyesno=1
;;
--hostname|-n)
shift
forcehostname=$1
shift
;;
--verbose|-v)
shift
VERBOSE=1
;;
--setup|-s)
shift
setup_only=1
mode='setup'
break
;;
--minimal|-m)
shift
mode='minimal'
;;
--disable)
shift
case "$1" in
SCL)
shift
DISABLE_SCL=1
;;
EPEL)
shift
DISABLE_EPEL=1
;;
*)
printf "Unknown feature ${YELLOW}$1${NORMAL}: exiting\\n"
exit 1
;;
esac
;;
--bundle|-b)
shift
case "$1" in
LAMP)
shift
bundle='LAMP'
;;
LEMP)
shift
bundle='LEMP'
;;
*)
printf "Unknown bundle ${YELLOW}$1${NORMAL}: exiting\\n"
exit 1
;;
esac
;;
*)
printf "Unrecognized option: $1\\n\\n"
usage
exit 1
;;
esac
done
# Make sure Perl is installed
printf "Checking for Perl..." >> $log
# loop until we've got a Perl or until we can't try any more
while true; do
perl="$(which perl 2>/dev/null)"
if [ -z "$perl" ]; then
if [ -x /usr/bin/perl ]; then
perl=/usr/bin/perl
break
elif [ -x /usr/local/bin/perl ]; then
perl=/usr/local/bin/perl
break
elif [ -x /opt/csw/bin/perl ]; then
perl=/opt/csw/bin/perl
break
elif [ "$perl_attempted" = 1 ] ; then
printf "${RED}Perl could not be installed - Installation cannot continue.${NORMAL}\\n"
exit 2
fi
# couldn't find Perl, so we need to try to install it
echo 'Perl was not found on your system - Virtualmin requires it to run.'
echo 'Attempting to install it now...'
if [ -x /usr/bin/dnf ]; then
dnf -y install perl >> $log
elif [ -x /usr/bin/yum ]; then
yum -y install perl >> $log
elif [ -x /usr/bin/apt-get ]; then
apt-get update >> $log
apt-get -q -y install perl >> $log
fi
perl_attempted=1
# Loop. Next loop should either break or exit.
else
break
fi
done
printf "found Perl at $perl\\n" >> $log
# Check for wget or curl or fetch
printf "Checking for HTTP client..." >> $log
while true; do
if [ -x "/usr/bin/wget" ]; then
download="/usr/bin/wget -nv"
break
elif [ -x "/usr/bin/curl" ]; then
download="/usr/bin/curl -f -s -L -O"
break
elif [ -x "/usr/bin/fetch" ]; then
download="/usr/bin/fetch"
break
elif [ "$wget_attempted" = 1 ]; then
printf "${RED}No HTTP client available. Could not install wget. Cannot continue.${NORMAL}\\n"
exit 1
fi
# Made it here without finding a downloader, so try to install one
wget_attempted=1
if [ -x /usr/bin/dnf ]; then
dnf -y install wget >> $log
elif [ -x /usr/bin/yum ]; then
yum -y install wget >> $log
elif [ -x /usr/bin/apt-get ]; then
apt-get update >> /dev/null
apt-get -y -q install wget >> $log
fi
done
if [ -z "$download" ]; then
echo "Tried to install downloader, but failed. Do you have working network and DNS?"
fi
printf "found %s\\n" "$download" >> $log
# Check for gpg, debian 10 doesn't install by default!?
if [ -x /usr/bin/apt-get ]; then
if [ ! -x /usr/bin/gpg ]; then
printf "gpg not found, attempting to install..." >> $log
apt-get update >> /dev/null
apt-get -y -q install gnupg >> $log
fi
fi
arch="$(uname -m)"
if [ "$arch" = "i686" ]; then
arch="i386"
fi
if [ "$SERIAL" = "GPL" ]; then
LOGIN=""
PRODUCT="GPL"
repopath="gpl/"
else
LOGIN="$SERIAL:$KEY@"
PRODUCT="Professional"
repopath=""
fi
# Virtualmin-provided packages
vmgroup="'Virtualmin Core'"
debvmpackages="virtualmin-core"
deps=
sclgroup="'Software Collections PHP 7.2 Environment'"
# This has to be installed before anything else, so it can be disabled during
# install, and turned back on after. This is ridiculous.
debpredeps="fail2ban"
if [ "$mode" = 'full' ]; then
if [ "$bundle" = 'LAMP' ]; then
rhgroup="'Virtualmin LAMP Stack'"
debdeps="postfix virtualmin-lamp-stack"
ubudeps="postfix virtualmin-lamp-stack"
elif [ "$bundle" = 'LEMP' ]; then
rhgroup="'Virtualmin LEMP Stack'"
debdeps="postfix php*-fpm virtualmin-lemp-stack"
ubudeps="postfix php*-fpm virtualmin-lemp-stack"
fi
elif [ "$mode" = 'minimal' ]; then
if [ "$bundle" = 'LAMP' ]; then
rhgroup="'Virtualmin LAMP Stack Minimal'"
debdeps="postfix virtualmin-lamp-stack-minimal"
ubudeps="postfix virtualmin-lamp-stack-minimal"
elif [ "$bundle" = 'LEMP' ]; then
rhgroup="'Virtualmin LEMP Stack Minimal'"
debdeps="postfix php*-fpm virtualmin-lemp-stack-minimal"
ubudeps="postfix php*-fpm virtualmin-lemp-stack-minimal"
fi
fi
# Find temp directory
if [ -z "$TMPDIR" ]; then
TMPDIR=/tmp
fi
# Check whether $TMPDIR is mounted noexec (everything will fail, if so)
# XXX: This check is imperfect. If $TMPDIR is a full path, but the parent dir
# is mounted noexec, this won't catch it.
TMPNOEXEC="$(grep $TMPDIR /etc/mtab | grep noexec)"
if [ -n "$TMPNOEXEC" ]; then
echo "${RED}Fatal:${NORMAL} $TMPDIR directory is mounted noexec. Installation cannot continue."
exit 1
fi
if [ -z "$tempdir" ]; then
tempdir="$TMPDIR/.virtualmin-$$"
if [ -e "$tempdir" ]; then
rm -rf "$tempdir"
fi
mkdir "$tempdir"
fi
# "files" subdir for libs
mkdir "$tempdir/files"
srcdir="$tempdir/files"
if ! cd "$srcdir"; then
echo "Failed to cd to $srcdir"
exit 1
fi
# Download the slib (source: http://github.com/virtualmin/slib)
# Lots of little utility functions.
$download https://software.virtualmin.com/lib/slib.sh
chmod +x slib.sh
# shellcheck disable=SC1091
. ./slib.sh
# Check the serial number and key
serial_ok "$SERIAL" "$KEY"
# Setup slog
# shellcheck disable=SC2034
LOG_PATH="$log"
# Setup run_ok
# shellcheck disable=SC2034
RUN_LOG="$log"
# Exit on any failure during shell stage
# shellcheck disable=SC2034
RUN_ERRORS_FATAL=1
# Console output level; ignore debug level messages.
if [ "$VERBOSE" = "1" ]; then
# shellcheck disable=SC2034
LOG_LEVEL_STDOUT="DEBUG"
else
# shellcheck disable=SC2034
LOG_LEVEL_STDOUT="INFO"
fi
# Log file output level; catch literally everything.
# shellcheck disable=SC2034
LOG_LEVEL_LOG="DEBUG"
# log_fatal calls log_error
log_fatal() {
log_error "$1"
}
remove_virtualmin_release () {
# shellcheck disable=SC2154
case "$os_type" in
"fedora" | "centos" | "rhel" | "amazon" )
run_ok "rpm -e virtualmin-release" "Removing virtualmin-release"
;;
"debian" | "ubuntu" )
grep -v "virtualmin" /etc/apt/sources.list > "$tempdir"/sources.list
mv "$tempdir"/sources.list /etc/apt/sources.list
rm -f /etc/apt/sources.list.d/virtualmin.list
;;
esac
}
fatal () {
echo
log_fatal "Fatal Error Occurred: $1"
printf "${RED}Cannot continue installation.${NORMAL}\\n"
remove_virtualmin_release
if [ -x "$tempdir" ]; then
log_warning "Removing temporary directory and files."
rm -rf "$tempdir"
fi
log_fatal "If you are unsure of what went wrong, you may wish to review the log"
log_fatal "in $log"
exit 1
}
success () {
log_success "$1 Succeeded."
}
# Function to find out if Virtualmin is already installed, so we can get
# rid of some of the warning message. Nobody reads it, and frequently
# folks run the install script on a production system; either to attempt
# to upgrade, or to "fix" something. That's never the right thing.
is_installed () {
if [ -f /etc/virtualmin-license ]; then
# looks like it's been installed before
return 0
fi
# XXX Probably not installed? Maybe we should remove license on uninstall, too.
return 1
}
# This function performs a rough uninstallation of Virtualmin
# It is neither complete, nor correct, but it almost certainly won't break
# anything. It is primarily useful for cleaning up a botched install, so you
# can run the installer again.
uninstall () {
# Very destructive, ask first.
echo
printf " ${REDBG}WARNING${NORMAL}\\n"
echo
echo " This operation is very destructive. It removes nearly all of the packages"
echo " installed by the Virtualmin installer. Never run this on a production system."
echo
printf " Continue? (y/n) "
if ! yesno; then
exit
fi
# This is a crummy way to detect package manager...but going through
# half the installer just to get here is even crummier.
if which rpm 1>/dev/null 2>&1; then package_type=rpm
elif which dpkg 1>/dev/null 2>&1; then package_type=deb
fi
case "$package_type" in
rpm)
yum groupremove -y --setopt="groupremove_leaf_only=true" "Virtualmin Core"
yum groupremove -y --setopt="groupremove_leaf_only=true" "Virtualmin LAMP Stack"
yum groupremove -y --setopt="groupremove_leaf_only=true" "Virtualmin LEMP Stack"
yum groupremove -y --setopt="groupremove_leaf_only=true" "Virtualmin LAMP Stack Minimal"
yum groupremove -y --setopt="groupremove_leaf_only=true" "Virtualmin LEMP Stack Minimal"
yum remove -y virtualmin-base
yum remove -y wbm-virtual-server wbm-virtualmin-htpasswd wbm-virtualmin-dav wbm-virtualmin-mailman wbm-virtualmin-awstats wbm-php-pear wbm-ruby-gems wbm-virtualmin-registrar wbm-virtualmin-init wbm-jailkit wbm-virtualmin-git wbm-virtualmin-slavedns wbm-virtual-server wbm-virtualmin-sqlite wbm-virtualmin-svn
yum remove -y wbt-virtual-server-mobile
yum remove -y virtualmin-config perl-Term-Spinner-Color
yum remove -y webmin usermin awstats
yum remove -y nginx
yum remove -y fail2ban
yum clean all; yum clean all
os_type="centos"
;;
deb)
rm -rf /etc/fail2ban/jail.d/00-firewalld.conf
rm -f /etc/fail2ban/jail.local
apt-get remove --assume-yes --purge virtualmin-base virtualmin-core virtualmin-lamp-stack virtualmin-lemp-stack
apt-get remove --assume-yes --purge virtualmin-lamp-stack-minimal virtualmin-lemp-stack-minimal
apt-get remove --assume-yes --purge virtualmin-config libterm-spinner-color-perl
apt-get remove --assume-yes --purge webmin-virtual-server webmin-virtualmin-htpasswd webmin-virtualmin-git webmin-virtualmin-slavedns webmin-virtualmin-dav webmin-virtualmin-mailman webmin-virtualmin-awstats webmin-php-pear webmin-ruby-gems webmin-virtualmin-registrar webmin-virtualmin-init webmin-jailkit webmin-virtual-server webmin-virtualmin-sqlite webmin-virtualmin-svn
apt-get remove --assume-yes --purge webmin-virtual-server-mobile
apt-get remove --assume-yes --purge fail2ban
apt-get remove --assume-yes --purge apache2*
apt-get remove --assume-yes --purge nginx*
apt-get remove --assume-yes --purge webmin usermin
apt-get autoremove --assume-yes
os_type="debian"
apt-get clean
;;
*)
echo "I don't know how to uninstall on this operating system."
;;
esac
echo 'Removing nameserver 127.0.0.1 from /etc/resolv.conf'
sed -i '/nameserver 127.0.0.1/g' /etc/resolv.conf
echo 'Removing virtualmin repo configuration'
remove_virtualmin_release
echo "Removing /etc/virtualmin-license, if it exists."
rm /etc/virtualmin-license
echo "Done. There's probably quite a bit of related packages and such left behind"
echo "but all of the Virtualmin-specific packages have been removed."
exit 0
}
if [ "$mode" = "uninstall" ]; then
uninstall
fi
# Calculate disk space requirements (this is a guess, for now)
if [ "$mode" = 'minimal' ]; then
disk_space_required=500
else
disk_space_required=650
fi
# Message to display in interactive mode
install_msg() {
cat <<EOF
Welcome to the Virtualmin ${GREEN}$PRODUCT${NORMAL} installer, version ${GREEN}$VER${NORMAL}
This script must be run on a freshly installed supported OS. It does not
perform updates or upgrades (use your system package manager) or license
changes (use the "virtualmin change-license" command).
The systems currently supported by install.sh are:
EOF
echo "${CYAN}$supported${NORMAL}"
cat <<EOF
If your OS/version/arch is not listed, installation ${RED}will fail${NORMAL}. More
details about the systems supported by the script can be found here:
${UNDERLINE}https://www.virtualmin.com/os-support${NORMAL}
The selected package bundle is ${CYAN}${bundle}${NORMAL} and the size of install is
${CYAN}${mode}${NORMAL}. It will require up to ${CYAN}${disk_space_required} MB${NORMAL} of disk space.
Exit and re-run this script with ${CYAN}--help${NORMAL} flag to see available options.
EOF
printf " Continue? (y/n) "
if ! yesno; then
exit
fi
}
if [ "$skipyesno" -ne 1 ] && [ -z "$setup_only" ]; then
install_msg
fi
already_installed_msg() {
# Double check if installed, just in case above error ignored.
if is_installed; then
cat <<EOF
${REDBG}WARNING${NORMAL}
Virtualmin may already be installed. This can happen if an installation failed,
and can be ignored in that case.
But, if Virtualmin has already successfully installed you should not run this
script again! It will cause breakage to your existing configuration.
Updates and upgrades can be performed from within Virtualmin. To change
license details, use the 'virtualmin change-license' command.
Changing the license never requires re-installation.
EOF
printf " Really Continue? (y/n) "
if ! yesno; then
exit
fi
fi
}
if [ "$skipyesno" -ne 1 ] && [ -z "$setup_only" ]; then
already_installed_msg
fi
# Check memory
if [ "$mode" = "full" ]; then
minimum_memory=1610613
else
# minimal mode probably needs less memory to succeed
minimum_memory=1048576
fi
if ! memory_ok "$minimum_memory"; then
log_fatal "Too little memory, and unable to create a swap file. Consider adding swap"
log_fatal "or more RAM to your system."
exit 1
fi
# Check for localhost in /etc/hosts
if [ -z "$setup_only" ]; then
grep localhost /etc/hosts >/dev/null
if [ "$?" != 0 ]; then
log_warning "There is no localhost entry in /etc/hosts. This is required, so one will be added."
run_ok "echo 127.0.0.1 localhost >> /etc/hosts" "Editing /etc/hosts"
if [ "$?" -ne 0 ]; then
log_error "Failed to configure a localhost entry in /etc/hosts."
log_error "This may cause problems, but we'll try to continue."
fi
fi
fi
# download()
# Use $download to download the provided filename or exit with an error.
download() {
# XXX Check this to make sure run_ok is doing the right thing.
# Especially make sure failure gets logged right.
# awk magic prints the filename, rather than whole URL
download_file=$(echo "$1" |awk -F/ '{print $NF}')
run_ok "$download $1" "Downloading $download_file"
if [ $? -ne 0 ]; then
fatal "Failed to download $1. Cannot continue. Check your network connection and DNS settings."
else
return 0
fi
}
# Only root can run this
id | grep -i "uid=0(" >/dev/null
if [ "$?" != "0" ]; then
uname -a | grep -i CYGWIN >/dev/null
if [ "$?" != "0" ]; then
fatal "${RED}Fatal:${NORMAL} The Virtualmin install script must be run as root"
fi
fi
log_info "Started installation log in $log"
echo
if [ -n "$setup_only" ]; then
log_debug "Phase 1 of 1: Setup"
printf "${YELLOW}${NORMAL} Phase ${YELLOW}1${NORMAL} of ${GREEN}1${NORMAL}: Setup\\n"
else
log_debug "Phase 1 of 3: Setup"
printf "${YELLOW}${CYAN}□□${NORMAL} Phase ${YELLOW}1${NORMAL} of ${GREEN}3${NORMAL}: Setup\\n"
fi
# Print out some details that we gather before logging existed
log_debug "Install mode: $mode"
log_debug "Product: Virtualmin $PRODUCT"
log_debug "install.sh version: $VER"
# Check for a fully qualified hostname
log_debug "Checking for fully qualified hostname..."
name="$(hostname -f)"
if [ -n "$forcehostname" ]; then set_hostname "$forcehostname"
elif ! is_fully_qualified "$name"; then set_hostname
fi
# Insert the serial number and password into /etc/virtualmin-license
log_debug "Installing serial number and license key into /etc/virtualmin-license"
echo "SerialNumber=$SERIAL" > /etc/virtualmin-license
echo "LicenseKey=$KEY" >> /etc/virtualmin-license
chmod 700 /etc/virtualmin-license
cd ..
# Populate some distro version globals
get_distro
log_debug "Operating system name: $os_real"
log_debug "Operating system version: $os_version"
log_debug "Operating system type: $os_type"
log_debug "Operating system major: $os_major_version"
install_virtualmin_release () {
# Grab virtualmin-release from the server
log_debug "Configuring package manager for ${os_real} ${os_version}..."
case "$os_type" in
rhel|centos|fedora|amazon)
case "$os_type" in
rhel|centos)
if [ "$os_major_version" -lt 7 ]; then
printf "${RED}${os_type} ${os_version} is not supported by this installer.${NORMAL}\\n"
exit 1
fi
;;
fedora)
if [ "$os_version" -lt 33 ]; then
printf "${RED}${os_type} ${os_version} is not supported by this installer.${NORMAL}\\n"
exit 1
fi
;;
ubuntu)
if [ "$os_version" != "16.04" ] && [ "$os_version" != "18.04" ] && [ "$os_version" != "20.04" ]; then
printf "${RED}${os_type} ${os_version} is not supported by this installer.${NORMAL}\\n"
exit 1
fi
;;
debian)
if [ "$os_major_version" -lt 9 ]; then
printf "${RED}${os_type} ${os_version} is not supported by this installer.${NORMAL}\\n"
exit 1
fi
;;
*)
printf "${RED}This OS/version is not recognized. Can't continue.${NORMAL}\\n"
exit 1
;;
esac
if [ -x /usr/sbin/setenforce ]; then
log_debug "Disabling SELinux during installation..."
if /usr/sbin/setenforce 0; then log_debug " setenforce 0 succeeded"
else log_debug " setenforce 0 failed: $?"
fi
fi
package_type="rpm"
if which dnf 1>/dev/null 2>&1; then
install="dnf -y install"
install_cmd="dnf"
install_group="dnf -y --quiet group install --setopt=group_package_types=mandatory,default"
install_config_manager="dnf config-manager"
if ! $install_config_manager 1>/dev/null 2>&1; then
run_ok "$install dnf-plugins-core"
fi
else
install="/usr/bin/yum -y install"
install_cmd="/usr/bin/yum"
if [ "$os_major_version" -ge 7 ]; then
run_ok "yum --quiet groups mark convert" "Updating yum Groups"
fi
install_group="yum -y --quiet groupinstall --setopt=group_package_types=mandatory,default"
install_config_manager="yum-config-manager"
fi
download "https://${LOGIN}software.virtualmin.com/vm/${vm_version}/${repopath}${os_type}/${os_major_version}/${arch}/virtualmin-release-latest.noarch.rpm"
run_ok "rpm -U --replacepkgs --quiet virtualmin-release-latest.noarch.rpm" "Installing virtualmin-release package"
# XXX This weirdly only seems necessary on CentOS 8, but harmless
# elsewhere.
rpm --import /etc/pki/rpm-gpg/RPM-GPG-KEY-webmin
;;
debian | ubuntu)
package_type="deb"
if [ "$os_type" = "ubuntu" ]; then
deps="$ubudeps"
case "$os_version" in
16.04*)
repos="virtualmin-xenial virtualmin-universal"
;;
18.04*)
repos="virtualmin-bionic virtualmin-universal"
;;
20.04*)
repos="virtualmin-focal virtualmin-universal"
;;
esac
else
deps="$debdeps"
case "$os_version" in
8*)
run_ok "apt-get install --assume-yes apt-transport-https lsb-release ca-certificates" "Installing extra dependencies for Debian 8"
download 'https://packages.sury.org/php/apt.gpg'
run_ok "cp apt.gpg /etc/apt/trusted.gpg.d/php.gpg" "Adding GPG key for PHP7 packages"
echo "deb http://packages.sury.org/php/ $(lsb_release -sc) main" > /etc/apt/sources.list.d/php.list
repos="virtualmin-jessie virtualmin-universal"
;;
9*)
repos="virtualmin-stretch virtualmin-universal"
;;
10*)
repos="virtualmin-buster virtualmin-universal"
;;
esac
fi
log_debug "apt-get repos: ${repos}"
if [ -z "$repos" ]; then # Probably unstable with no version number
log_fatal "No repos available for this OS. Are you running unstable/testing?"
exit 1
fi
# Remove any existing repo config, in case it's a reinstall
remove_virtualmin_release
for repo in $repos; do
printf "deb http://${LOGIN}software.virtualmin.com/vm/${vm_version}/${repopath}apt ${repo} main\\n" >> /etc/apt/sources.list.d/virtualmin.list
done
# Install our keys
log_debug "Installing Webmin and Virtualmin package signing keys..."
download "https://software.virtualmin.com/lib/RPM-GPG-KEY-virtualmin-6"
download "https://software.virtualmin.com/lib/RPM-GPG-KEY-webmin"
run_ok "apt-key add RPM-GPG-KEY-virtualmin-6" "Installing Virtualmin 6 key"
run_ok "apt-key add RPM-GPG-KEY-webmin" "Installing Webmin key"
run_ok "apt-get update" "Updating apt metadata"
run_ok "apt-get update" "Downloading repository metadata"
# Make sure universe repos are available
# XXX Test to make sure this run_ok syntax works as expected (with single quotes inside double)
if [ $os_type = "ubuntu" ]; then
if [ -x "/bin/add-apt-repository" ] || [ -x "/usr/bin/add-apt-repository" ]; then
run_ok "add-apt-repository universe" \
"Enabling universe repositories, if not already available"
else
run_ok "sed -ie '/backports/b; s/#*[ ]*deb \\(.*\\) universe$/deb \\1 universe/' /etc/apt/sources.list" \
"Enabling universe repositories, if not already available"
fi
fi
# XXX Is this still enabled by default on Debian/Ubuntu systems?
run_ok "sed -ie 's/^deb cdrom:/#deb cdrom:/' /etc/apt/sources.list" "Disabling cdrom: repositories"
install="DEBIAN_FRONTEND='noninteractive' /usr/bin/apt-get --quiet --assume-yes --install-recommends -o Dpkg::Options::='--force-confdef' -o Dpkg::Options::='--force-confold' -o Dpkg::Pre-Install-Pkgs::='/usr/sbin/dpkg-preconfigure --apt' install"
#export DEBIAN_FRONTEND=noninteractive
install_updates="$install $deps"
run_ok "apt-get clean" "Cleaning out old metadata"
sed -i "s/\\(deb[[:space:]]file.*\\)/#\\1/" /etc/apt/sources.list
;;
*)
log_error " Your OS is not currently supported by this installer."
log_error " You can probably run Virtualmin Professional on your system, anyway,"
log_error " but you'll have to install it using the manual installation process."
exit 1
;;
esac
return 0
}
if [ -n "$setup_only" ]; then
if install_virtualmin_release; then
log_success "Repository configuration successful. You can now install Virtualmin"
log_success "components using your OS package manager."
else
log_error "Errors occurred during setup of Virtualmin software repositories. You may find more"
log_error "information in ${RUN_LOG}."
fi
exit $?
fi
# Install Functions
install_with_apt () {
# Install Webmin first, because it needs to be already done for the deps
run_ok "$install webmin" "Installing Webmin"
run_ok "$install usermin" "Installing Usermin"
for d in $debpredeps; do
run_ok "$install $d" "Installing $d"
done
if [ $bundle = 'LEMP' ]; then
# This is bloody awful. I can't believe how fragile dpkg is here.
for s in fail2ban ipchains apache2; do
systemctl stop "$s">>${RUN_LOG} 2>&1
systemctl disable "$s">>${RUN_LOG} 2>&1
done
run_ok 'apt-get remove --assume-yes --purge apache2* php*' 'Removing apache2 (if installed) before LEMP installation.'
run_ok 'apt-get autoremove --assume-yes' 'Removing unneeded packages that could conflict with LEMP stack.'
run_ok "$install nginx-common" "Installing nginx-common"
sed -i 's/listen \[::\]:80 default_server;/#listen \[::\]:80 default_server;/' /etc/nginx/sites-available/default
else
# This is bloody awful. I can't believe how fragile dpkg is here.
for s in fail2ban nginx; do
systemctl stop "$s">>${RUN_LOG} 2>&1
systemctl disable "$s">>${RUN_LOG} 2>&1
done
run_ok 'apt-get remove --assume-yes --purge nginx* php*' 'Removing nginx (if installed) before LAMP installation.'
run_ok 'apt-get autoremove --assume-yes' 'Removing unneeded packages that could conflict with LAMP stack.'
fi
for d in ${deps}; do
run_ok "$install ${d}" "Installing $d"
done
run_ok "$install ${debvmpackages}" "Installing Virtualmin and plugins"
if [ $? -ne 0 ]; then
log_warning "apt-get seems to have failed. Are you sure your OS and version is supported?"
log_warning "https://www.virtualmin.com/os-support"
fatal "Installation failed: $?"
fi
# Make sure the time is set properly
/usr/sbin/ntpdate-debian 2>/dev/null 2>&1
return 0
}
install_with_yum () {
# RHEL 8 specific setup
if [ "$os_major_version" -ge 8 ] && [ "$os_type" = "rhel" ]; then
# Important Perl packages are now hidden in CodeReady repo
run_ok "$install_config_manager --set-enabled codeready-builder-for-rhel-$os_major_version-x86_64-rpms" "Enabling Red Hat CodeReady package repository"
run_ok "$install https://dl.fedoraproject.org/pub/epel/epel-release-latest-$os_major_version.noarch.rpm" "Installing EPEL $os_major_version release package"
# RHEL 7 specific setup
elif [ "$os_major_version" -eq 7 ] && [ "$os_type" = "rhel" ]; then
run_ok "$install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm" "Installing EPEL 7 release package"
# install extras from EPEL and SCL
elif [ "$os_type" = "centos" ]; then
install_epel_release
if [ "$os_major_version" -lt 8 ]; then
# No SCL on CentOS 8
install_scl_php
fi
fi
# Important Perl packages are now hidden in PowerTools repo
if [ "$os_major_version" -eq 8 ] && [ "$os_type" = "centos" ]; then
# Detect PowerTools repo name
powertools="PowerTools"
centos_stream=$(dnf repolist all | grep "^powertools")
if [ ! -z "$centos_stream" ]; then
powertools="powertools"
fi
run_ok "$install_config_manager --set-enabled $powertools" "Enabling PowerTools package repository"
fi
# XXX This is so stupid. Why does yum insist on extra commands?
if [ "$os_major_version" -eq 7 ]; then
run_ok "yum --quiet groups mark install $rhgroup" "Marking $rhgroup for install"
run_ok "yum --quiet groups mark install $vmgroup" "Marking $vmgroup for install"
fi
run_ok "$install_group $rhgroup" "Installing dependencies and system packages"
run_ok "$install_group $vmgroup" "Installing Virtualmin and all related packages"
if [ $? -ne 0 ]; then
fatal "Installation failed: $?"
fi
run_ok "$install_cmd clean all" "Cleaning up software repo metadata"
return 0
}
install_virtualmin () {
case "$package_type" in
rpm)
install_with_yum
;;
deb)
install_with_apt
;;
*)
install_with_tar
;;
esac
if [ $? -eq 0 ]; then
return 0
else
return $?
fi
}
install_epel_release () {
if [ -z "$DISABLE_EPEL" ]; then
run_ok "$install epel-release" "Installing EPEL release package"
fi
}
install_scl_php () {
if [ -z "$DISABLE_SCL" ]; then
run_ok "$install yum-utils" "Installing yum-utils"
run_ok "$install_config_manager --enable extras >/dev/null" "Enabling extras repository"
run_ok "$install scl-utils" "Installing scl-utils"
if [ "${os_type}" = "centos" ]; then
run_ok "$install centos-release-scl" "Install Software Collections release package"
elif [ "${os_type}" = "rhel" ]; then
run_ok "$install_config_manager --enable rhel-server-rhscl-${os_major_version}-rpms" "Enabling Server Software Collection"
fi
run_ok "$install_group $sclgroup" "Installing PHP7"
fi
}
# virtualmin-release only exists for one platform...but it's as good a function
# name as any, I guess. Should just be "setup_repositories" or something.
errors=$((0))
install_virtualmin_release
echo
log_debug "Phase 2 of 3: Installation"
printf "${GREEN}${YELLOW}${CYAN}${NORMAL} Phase ${YELLOW}2${NORMAL} of ${GREEN}3${NORMAL}: Installation\\n"
install_virtualmin
if [ "$?" != "0" ]; then
errorlist="${errorlist} ${YELLOW}${NORMAL} Package installation returned an error.\\n"
errors=$((errors + 1))
fi
# We want to make sure we're running our version of packages if we have
# our own version. There's no good way to do this, but we'll
run_ok "$install_updates" "Installing updates to Virtualmin-related packages"
if [ "$?" != "0" ]; then
errorlist="${errorlist} ${YELLOW}${NORMAL} Installing updates returned an error.\\n"
errors=$((errors + 1))
fi
# Reap any clingy processes (like spinner forks)
# get the parent pids (as those are the problem)
allpids="$(ps -o pid= --ppid $$) $allpids"
for pid in $allpids; do
kill "$pid" 1>/dev/null 2>&1
done
# Final step is configuration. Wait here for a moment, hopefully letting any
# apt processes disappear before we start, as they're huge and memory is a
# problem. XXX This is hacky. I'm not sure what's really causing random fails.
sleep 1
echo
log_debug "Phase 3 of 3: Configuration"
printf "${GREEN}▣▣${YELLOW}${NORMAL} Phase ${YELLOW}3${NORMAL} of ${GREEN}3${NORMAL}: Configuration\\n"
if [ "$mode" = "minimal" ]; then
bundle="Mini${bundle}"
fi
virtualmin-config-system --bundle "$bundle"
if [ "$?" != "0" ]; then
errorlist="${errorlist} ${YELLOW}${NORMAL} Postinstall configuration returned an error.\\n"
errors=$((errors + 1))
fi
config_system_pid=$!
# Functions that are used in the OS specific modifications section
disable_selinux () {
seconfigfiles="/etc/selinux/config /etc/sysconfig/selinux"
for i in $seconfigfiles; do
if [ -e "$i" ]; then
perl -pi -e 's/^SELINUX=.*/SELINUX=disabled/' "$i"
fi
done
}
# Changes that are specific to OS
case "$os_type" in
"fedora" | "centos" | "rhel" | "amazon" )
disable_selinux
;;
esac
# kill the virtualmin config-system command, if it's still running
kill "$config_system_pid" 1>/dev/null 2>&1
# Make sure the cursor is back (if spinners misbehaved)
tput cnorm
printf "${GREEN}▣▣▣${NORMAL} Cleaning up\\n"
# Cleanup the tmp files
if [ "$tempdir" != "" ] && [ "$tempdir" != "/" ]; then
log_debug "Cleaning up temporary files in $tempdir."
find "$tempdir" -delete
else
log_error "Could not safely clean up temporary files because TMPDIR set to $tempdir."
fi
if [ -n "$QUOTA_FAILED" ]; then
log_warning "Quotas were not configurable. A reboot may be required. Or, if this is"
log_warning "a VM, configuration may be required at the host level."
fi
echo
if [ $errors -eq "0" ]; then
hostname=$(hostname -f)
detect_ip
log_success "Installation Complete!"
log_success "If there were no errors above, Virtualmin should be ready"
log_success "to configure at https://${hostname}:10000 (or https://${address}:10000)."
log_success "You'll receive a security warning in your browser on your first visit."
else
log_warning "The following errors occurred during installation:"
echo
printf "${errorlist}"
fi
exit 0

View File

@ -0,0 +1 @@
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDIQ/JtvjjxDTZGh0pdwjuIuGoyuj4QS88+MXTdyFbGUTZ9Kx0OHg2S4e2ZnifPTFQ7OIcRi4D2BWA8yEfAbIk+x5LOOkpTx0P5q0s2L9+ZTepYK++K3AbTZ2mcLzxzn4HSzt6/6076qdUDP6DmHs+OsaO1alS3oz02ST3gHn8cbr+PSZXOBnYUX/1W940OObcmNsb6nxo17AjE2CUlvsqKXrzY6py9Y+AnFe2UwIGjvcvThg44vxBXhkipis7x6f2UisuXN/QmAgx03AXSca+PQ8mmr9peMAY02hLrsF8nnYUpjFlUGD2MDbDz4T5NpfJbQhtIXm3JBh64VWSPmXwYHRMR7qgdsai/+nN0VhgX/mnAoOwXYdQIU4rmDEDK5CBqXKmOlNmfjv5d9sdZz0LblAGBWtwsvswrK8C7HwqvSDLQjoe04kVM2DpHqDjWS7tlF0HsfEFbhEqzD4WHbcmX1VkAq6oYmhqIYpAys+JjQKvosiCPHj4M49e1e5OAbFs= vegetalkiller@pop-os

View File

@ -6,10 +6,18 @@
name: lxd
classic: yes
- register: tmp_file_stat
stat: path="{{lxd_init_template_processed_path}}"
- ansible.builtin.meta: end_batch
when: tmp_file_stat.stat.exists
- register: SSH_KEY
become: true
user:
name: "lxd"
generate_ssh_key: yes
ssh_key_type: rsa
ssh_key_bits: 4096
ssh_key_file: "{{ssh_key_file}}"
force: no
- ansible.builtin.tempfile: state=file
register: temp_lxd_init
- ansible.builtin.debug:
var: hostvars[inventory_hostname]['ansible_default_ipv4']['address']
@ -21,20 +29,19 @@
# content: "{{hostvars[inventory_hostname]}}"
# dest: ./test.json
- delegate_to: localhost
delegate_facts: true
ansible.builtin.template:
src: "{{lxd_init_template_config_path}}"
dest: "{{lxd_init_template_processed_path}}"
- ansible.builtin.template: src="{{lxd_init_template_config_path}}" dest="{{temp_lxd_init.path}}"
delegate_to: localhost
## tira el lxd init con los parametros
- name: "Preseeding LXD Init with template: {{lxd_init_template_config_path}}"
become: true
ansible.builtin.shell: |
chmod 775 {{ssh_key_file}} {{ssh_key_file}}.pub
cat <<EOF | lxd init --preseed
{{ lookup('file', lxd_init_template_processed_path) }}
{{ lookup('file', temp_lxd_init.path) }}
EOF
register: lxd_init_output
- ansible.builtin.debug:
var: lxd_init_output
verbosity: 1

View File

@ -1,16 +1,18 @@
# --
config:
images.auto_update_interval: "15"
user.user-data: |
package_upgrade: true
ssh_authorized_keys: {{ SSH_KEY.ssh_public_key }}
# --
networks:
- name: {{network_default_name}}
project: {{lxd_project_name}}
type: bridge
#config:
# IPv4
# ipv4.nat: "true"
# ipv4.address: {{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}
# ipv4.address: hostvars[inventory_hostname]['ansible_default_ipv4']['address']
# IPv6
# ipv6.nat: "true"

View File

@ -1,8 +0,0 @@
---
# defaults file for roles/init_lxd
profile: default
ssh_user: renzo
ssh_key_path: ~/.ssh/lxd_ssh
ssh_key_name: lxd_ssh
ssh_key_passphrase: set_a_password!

View File

@ -1,48 +0,0 @@
---
# profile: default
# lxc
# ssh_user: renzo
# ssh_key_path: ~/.ssh/lxd_ssh
# ssh_key_name: lxd_ssh
# ssh_key_passphrase: set_a_password!
# tasks file for roles/init_lxd
- name: generate SSH key "{{ssh_key_name}}"
register: SSH_KEY
user:
name: "{{ssh_user}}"
generate_ssh_key: yes
ssh_key_type: rsa
ssh_key_bits: 4096
ssh_key_file: "{{ssh_key_path}}"
ssh_key_passphrase: "{{ssh_passphrase}}"
force: no
# Default profile
- shell: "lxc profile show {{profile}}"
become: true
register: profile
# Process of custom profile with my new SSH Key
- set_fact:
lookup_custom_conf: |
config:
user.user-data: |
ssh_authorized_keys:
- {{SSH_KEY.ssh_public_key}}
# Apply merge in LXD
- ansible.builtin.tempfile: {}
register: temp_lxd_config
- set_fact: custom_config="{{ lookup_custom_conf | from_yaml }}"
- set_fact: profile_yaml_path="{{temp_lxd_config.path}}"
- copy:
dest: "{{profile_yaml_path}}"
content: |
{{ profile.stdout | from_yaml | combine(custom_config) | to_yaml }}
- shell: "lxc profile edit {{profile}} < {{profile_yaml_path}}"
become: true
- file: path="{{profile_yaml_path}}" state=absent

2
links Normal file
View File

@ -0,0 +1,2 @@
https://github.com/max1220/lxc-scripts
https://github.com/anti1346/openldap

View File

@ -0,0 +1,150 @@
# Ansible Role: LXD
Installs the latest version of `lxd`.
## Requirements
- Linux: distributions that support `snap`
- Tested on Debian, Ubuntu, Pop!_OS
## Role Variables
#### Settable Variables
```yaml
lxd:
host: # targets a LXD host installation
preseed:
... # the elements found in an LXD preseed configuration
... # see: https://linuxcontainers.org/lxd/docs/master/preseed/
extra_profiles: # list of profile objects
... # see: https://docs.ansible.com/ansible/latest/collections/community/general/lxd_profile_module.html#parameters
instance: # targets a LXD container/VM installation
create_mode: # mandatory, one of ['skip','build']
name: # the name of the lxc instance
dns_address: # DNS or IP address of the instance
type: # see: https://docs.ansible.com/ansible/latest/collections/community/general/lxd_container_module.html#parameter-type
server: # URL of LXC image host, defaults to https://images.linuxcontainers.org
alias: # as listed when running command `lxc image list images:`
protocol: # defaults to 'simplestreams', one of ['simplestreams','lxd']
devices:
... # see: https://linuxcontainers.org/lxd/docs/master/instances/#devices-configuration
... # see: https://docs.ansible.com/ansible/latest/collections/community/general/lxd_container_module.html#parameter-devices
profiles: # list of the names of profile names declared on the host
config:
... # see: https://linuxcontainers.org/lxd/docs/master/instances/#key-value-configuration
... # see: https://docs.ansible.com/ansible/latest/collections/community/general/lxd_container_module.html#parameter-config
# note that the following fields are mapped onto the above `config`, after converted from human-intuitive description. Any preexisting `config` value will be overridden.
cpu_mem:
cpus: # list of logical core ids
# becomes: `config.limits.cpu`
# ids increment by logical cores, grouped by physical core
# ie. [P0L0,P0L1,P1L0,P1L1] = [0,1,2,3]
memory: # human friendly amount, eg 4GiB
# becomes: `config.limits.memory`
hugepages: # boolean, source memory from hugepages reservation
# becomes: `config.limits.memory.hugepages`
priority: # 1-10, shared CPU scheduling priority
# becomes: `config.limits.cpu.priority`
host_idmap: # names of user and group IDs to map from host to guest
# becomes: `config.raw.idmap`
both: # only for when user and group ID are same value
users: # for just user names
groups: # for just group names
```
Note: for any pre-existing `lxd.host.preseed.storage_pools` declared, having `driver == 'zfs'`, the entire dataset will be destroyed and recreated in the parent zpool.
Note: for any LXC instance, when `create_mode == 'build'`, any pre-existing instance (vm or container) will be deleted to make way for the new one.
## Dependencies
Ansible collection `community.general` >= 4.1.0
- This role requires `community.general.lxd_container` Ansible module, that is version [4.1.0](https://github.com/ansible-collections/community.general/blob/4.1.0/plugins/modules/cloud/lxd/lxd_container.py#L109) or newer. This is because v4.1.0 enables the `type` option, which allows the module to specify a virtual machine under QEMU.
- Note: The version of `community.general` that comes with Ansible CORE may not be recent, and must be [upgraded](https://github.com/ansible-collections/community.general#using-this-collection). Run the following command to upgrade:
```
ansible-galaxy collection install community.general --upgrade
```
- See relevant documentation using:
```
ansible-doc community.general.lxd_container
```
## Example Playbook
Host example
```yaml
- hosts: servers
roles:
- role: lxd
vars:
lxd:
host:
preseed:
networks: []
storage_pools:
- name: default
description: "default storage pool (zfs)"
driver: zfs
config:
source: tank/lxc
profiles:
- name: default
description: "default profile"
config: {}
devices:
root:
path: /
pool: default
type: disk
eth0:
name: eth0
nictype: bridged
parent: br0
type: nic
extra_profiles:
- name: docker_support
description: basic support for docker
config:
security.nesting: "true"
```
Instance example
```yaml
- hosts: lxd_containers
roles:
- role: lxd
vars:
lxd:
instance:
create_mode: build
name: mylxc
dns_address: mylxc.home.lan
alias: ubuntu/focal/cloud/amd64
devices:
eth0:
name: eth0
nictype: bridged
parent: br0
type: nic
hwaddr: F1-09-CE-07-C0-70
profiles:
- default
- docker_support
config:
boot.autostart: "true"
host_idmap:
both:
- root
```
## License
MIT
## TODO:
* Enhance support for `qemu` virtual machines
* Virtual machine feature parity with `libvirt` role
* Run, backup, snapshot, restore to/from ZFS
* --> update ZFS trim & FSTrim roles

View File

@ -0,0 +1,106 @@
---
## ------------------------------------------------ ##
## Installs LXD (includes LXC), and default profile ##
## ------------------------------------------------ ##
#- name: install snapd
# ansible.builtin.package:
# name: snapd
#- name: install snap core
# community.general.snap:
# name: core
# XXX: simplest way to void a prior LXD installation's configuration
#- name: uninstall prior LXD
# ansible.builtin.command: snap remove lxd --purge
# changed_when: true
- name: clear preseed storage_pools
loop: "{{ lxd.host.preseed.storage_pools | default([]) }}"
loop_control:
loop_var: lxd_storage_pool
ansible.builtin.include_tasks: reset_storage.yml
#- name: install LXD
# become: true
# community.general.snap:
# name: lxd
- name: check LXD executable accessible by Ansible
ansible.builtin.shell: which lxd
ignore_errors: true
register: which_lxd
- name: install LXD wrapper
when: which_lxd is failed
vars:
lxd_path: /snap/bin/lxd
block:
- name: verify snap lxd installation exists
ansible.builtin.stat:
path: "{{ lxd_path }}"
get_checksum: false
register: lxd_stat
failed_when: not lxd_stat.stat.exists
- name: create lxd wrapper if lxd is not in PATH
ansible.builtin.template:
src: wrapper.sh.j2
dest: /usr/bin/lxd
owner: root
group: root
mode: u=rx,g=rx,o=rx
vars:
snap_app: lxd
snap_app_path: "{{ lxd_path }}"
- name: check LXC executable accessible by Ansible
ansible.builtin.shell: which lxc
ignore_errors: true
changed_when: false
register: which_lxc
- name: install LXD wrapper
when: which_lxc is failed
vars:
lxc_path: /snap/bin/lxc
block:
- name: detect if lxc is in PATH
ansible.builtin.stat:
path: "{{ lxc_path }}"
get_checksum: false
register: lxc_stat
failed_when: not lxc_stat.stat.exists
- name: create lxc wrapper if lxc is not in PATH
ansible.builtin.template:
src: wrapper.sh.j2
dest: /usr/bin/lxc
owner: root
group: root
mode: u=rx,g=rx,o=rx
vars:
snap_app: lxc
snap_app_path: "{{ lxc_path }}"
- name: init LXD without preseed
when: not lxd.host.preseed is defined
ansible.builtin.command: lxd init --auto
- name: init LXD from preseed
when: lxd.host.preseed is defined
ansible.builtin.command: "lxd init --preseed < "
args:
stdin: "{{ lxd.host.preseed }}"
- name: apply extra profiles
loop: "{{ lxd.host.extra_profiles | default([]) }}"
loop_control:
loop_var: lxd_profile
community.general.lxd_profile:
name: "{{ lxd_profile.name | mandatory }}"
description: "{{ lxd_profile.description | default(omit) }}"
state: "{{ lxd_profile.state | default(omit) }}"
config: "{{ lxd_profile.config | default(omit) }}"
devices: "{{ lxd_profile.devices | default(omit) }}"

View File

@ -0,0 +1,58 @@
---
## -------------------------- ##
## Build or Restore Instance ##
## -------------------------- ##
- name: check invalid create mode
when: lxd.instance.create_mode | mandatory not in ['skip', 'build']
ansible.builtin.fail:
msg: "invalid lxd.instance.create_mode: {{ lxd.instance.create_mode }}"
- name: handle create instance
when: lxd.instance.create_mode == 'build'
block:
- name: clear pre-existing instance
with_items:
- container
- virtual-machine
community.general.lxd_container:
name: "{{ lxd.instance.name | mandatory }}"
type: "{{ item }}"
state: absent
force_stop: true
# XXX: ansible will always indicate a change when volatile options are present in the LXD config
ignore_volatile_options: false
- name: define custom instance configuration 'lxd_custom_config'
ansible.builtin.include_tasks: instance_config.yml
- name: merge and override instance base config with custom config
when: lxd.instance.config is defined
ansible.builtin.set_fact:
lxd_custom_config: "{{ ( lxd.instance.config ) | combine(lxd_custom_config | default({}), recursive=True) }}"
- name: create & (re)start instance
community.general.lxd_container:
name: "{{ lxd.instance.name }}"
type: "{{ lxd.instance.type | default(omit) }}"
state: restarted
source:
server: "{{ lxd.instance.server | default('https://images.linuxcontainers.org') }}"
alias: "{{ lxd.instance.alias | mandatory }}"
# 'simplestreams' seems more reliable than the default protocol 'lxd'
protocol: "{{ lxd.instance.protocol | default('simplestreams') }}"
type: image
mode: pull
# XXX: ansible will always indicate a change when volatile options are present in the LXD config
ignore_volatile_options: false
config: "{{ lxd_custom_config | default(omit) }}"
devices: "{{ lxd.instance.devices | default(omit) }}"
profiles: "{{ lxd.instance.profiles | default(omit) }}"
wait_for_ipv4_addresses: true
timeout: 600
- name: "wait for {{ lxd.instance.name }} to respond with SSHd"
ansible.builtin.wait_for:
host: "{{ lxd.instance.dns_address | mandatory }}"
port: 22

View File

@ -0,0 +1,128 @@
---
## ----------------------------- ##
## Assemble custom configuration ##
## ----------------------------- ##
# this should build `lxd_custom_config` for use externally
- name: process cpu + memory limit overrides
when: lxd.instance.cpu_mem is defined
block:
- name: apply 'config.limits.cpu' override
when: lxd.instance.cpu_mem.cpus is defined
block:
- name: get /proc/cpuinfo
ansible.builtin.command: cat /proc/cpuinfo
changed_when: false
register: proc_cpuinfo
- name: transform 'cpus' into 'config.limits.cpu'
ansible.builtin.set_fact:
lxd_custom_config:
limits.cpu: "{{ lxd.instance.cpu_mem.cpus | asNative(proc_cpuinfo.stdout_lines) | sort | unique }}"
- name: handle multiple cores
when: lxd_custom_config['limits.cpu'] | length > 1
ansible.builtin.set_fact:
lxd_custom_config:
limits.cpu: "{{ lxd_custom_config['limits.cpu'] | join(',') }}"
- name: handle single core
when: lxd_custom_config['limits.cpu'] | length == 1
ansible.builtin.set_fact:
lxd_custom_config:
limits.cpu: "{{ lxd_custom_config['limits.cpu'][0] }}-{{ lxd_custom_config['limits.cpu'][0] }}"
- name: apply 'config.limits.memory' override
when: lxd.instance.cpu_mem.memory is defined
vars:
tmp_lxd_config:
limits.memory: "{{ lxd.instance.cpu_mem.memory }}"
ansible.builtin.set_fact:
lxd_custom_config: "{{ ( lxd_custom_config | default({}) ) | combine(tmp_lxd_config, recursive=True) }}"
- name: apply 'config.limits.memory.hugepages' override
when: lxd.instance.cpu_mem.hugepages is defined
vars:
tmp_lxd_config:
limits.memory.hugepages: "{{ lxd.instance.cpu_mem.hugepages | string }}"
ansible.builtin.set_fact:
lxd_custom_config: "{{ ( lxd_custom_config | default({}) ) | combine(tmp_lxd_config, recursive=True) }}"
- name: apply 'config.limits.cpu.priority'
when: lxd.instance.cpu_mem.cpu_priority is defined
vars:
tmp_lxd_config:
limits.cpu.priority: "{{ lxd.instance.cpu_mem.cpu_priority }}"
ansible.builtin.set_fact:
lxd_custom_config: "{{ ( lxd_custom_config | default({}) ) | combine(tmp_lxd_config, recursive=True) }}"
- name: process user + group + both idmap
when: lxd.instance.host_idmap is defined
block:
- name: load UID map
ansible.builtin.getent:
database: passwd
- name: load GID map
ansible.builtin.getent:
database: group
# the raw.idmap needs to have its ids appear in ascending order
- name: handle combined idmaps
when: lxd.instance.host_idmap.both is defined
block:
- name: get UID from username
loop: "{{ lxd.instance.host_idmap.both }}"
loop_control:
loop_var: lxd_host_id
ansible.builtin.set_fact:
lxd_both_idmap: "{{ lxd_both_idmap | default([]) + [ ansible_facts.getent_passwd[lxd_host_id][1] | int ] }}"
- name: build raw "both" mapping
loop: "{{ lxd_both_idmap | sort }}"
loop_control:
loop_var: lxd_host_id
ansible.builtin.set_fact:
lxd_raw_idmap: "{{ lxd_raw_idmap | default([]) + [ 'both ' + lxd_host_id | string + ' ' + lxd_host_id | string ] }}"
- name: handle user idmaps
when: lxd.instance.host_idmap.users is defined
block:
- name: uid mapping
loop: "{{ lxd.instance.host_idmap.users }}"
loop_control:
loop_var: lxd_host_id
ansible.builtin.set_fact:
lxd_user_idmap: "{{ lxd_user_idmap | default([]) + [ ansible_facts.getent_passwd[lxd_host_id][1] | int ] }}"
- name: build raw "uid" mapping
loop: "{{ lxd_user_idmap | sort }}"
loop_control:
loop_var: lxd_host_id
ansible.builtin.set_fact:
lxd_raw_idmap: "{{ lxd_raw_idmap | default([]) + [ 'uid ' + lxd_host_id | string + ' ' + lxd_host_id | string ] }}"
- name: handle group idmaps
when: lxd.instance.host_idmap.groups is defined
block:
- name: gid mapping
loop: "{{ lxd.instance.host_idmap.groups }}"
loop_control:
loop_var: lxd_host_id
ansible.builtin.set_fact:
lxd_group_idmap: "{{ lxd_group_idmap | default([]) + [ ansible_facts.getent_group[lxd_host_id][1] | int ] }}"
- name: build raw "gid" mapping
loop: "{{ lxd_group_idmap | sort }}"
loop_control:
loop_var: lxd_host_id
ansible.builtin.set_fact:
lxd_raw_idmap: "{{ lxd_raw_idmap | default([]) + [ 'gid ' + lxd_host_id | string + ' ' + lxd_host_id | string ] }}"
- name: transform 'host_idmap' into 'config.raw.idmap'
vars:
tmp_lxd_config:
raw.idmap: "{{ lxd_raw_idmap | join('\n') }}"
ansible.builtin.set_fact:
lxd_custom_config: "{{ ( lxd_custom_config | default({}) ) | combine(tmp_lxd_config, recursive=True) }}"

View File

@ -0,0 +1,23 @@
---
## --------------------------------------------------------------- ##
## Automates the installation and setup of LXD hosts and instances ##
## --------------------------------------------------------------- ##
# XXX: https://github.com/lxc/lxd/tree/master/doc
- name: install LXD to the host
when: lxd.host is defined
ansible.builtin.include_tasks: host.yml
# XXX: externally add user to 'lxd' group
# TODO: where is the default backing storage located? ZFS dataset?
# TODO: permissions supporting running docker in LXC
- name: setup target as lxd instance
when: lxd.instance is defined
ansible.builtin.include_tasks: instance.yml
# TODO: https://lxd.readthedocs.io/en/latest/backup/#container-backup-and-restore
# XXX: adding the user to the user group 'lxd' enables root-like access to LXD
# this is similar in behavior with the `docker` group

View File

@ -0,0 +1,19 @@
---
## ---------------------------------------------------------------------------- ##
## Delete all storage pools that exist outside of the normal LXD uninstallation ##
## ---------------------------------------------------------------------------- ##
- name: clear preseed zfs datasets detected
when: lxd_storage_pool.driver == 'zfs'
block:
- name: "delete ZFS dataset: {{ lxd_storage_pool.driver }}"
ansible.builtin.include_role:
name: zfs
tasks_from: destroy_dataset
vars:
zfs_target_dataset: "{{ lxd_storage_pool.config.source }}"
zfs_expected_children:
- images
- containers
- virtual-machines
- custom

View File

@ -0,0 +1,15 @@
#!/bin/sh
# {{ ansible_managed }}
# this file was directly copied from an Ubuntu/PopOS installation process
if ! [ -x {{ snap_app_path }} ]; then
echo "" >&2
echo "Command '$0' requires the {{ snap_app }} snap to be installed." >&2
echo "Please install it with:" >&2
echo "" >&2
echo "snap install {{ snap_app }}" >&2
echo "" >&2
exit 1
fi
exec {{ snap_app_path }} "$@"

View File

@ -0,0 +1,54 @@
---
## ---------------------------------- ##
## Adds a user to the list of sudoers ##
## ---------------------------------- ##
# TODO: add to "admin" groups when needed on OSx/BSD
# https://stackoverflow.com/questions/33359404/ansible-best-practice-for-maintaining-list-of-sudoers
- name: install sudo package
ansible.builtin.package:
name: sudo
state: present
# determine default group with sudo privileges
- name: get all groups
ansible.builtin.shell: set -o pipefail && getent group | awk -F":" '{print $1}'
args:
executable: bash
changed_when: false
register: getent_groups
- name: find default sudoers group
when: item in getent_groups.stdout_lines
# XXX: in ascending order of precedence
# see: https://wiki.archlinux.org/index.php/Sudo#Example_entries
loop:
- sudo
- wheel
loop_control:
loop_var: sudoer_group
ansible.builtin.set_fact:
default_sudoers_group: "{{ sudoer_group }}"
- name: find preexisting custom sudoers files
ansible.builtin.find:
paths: "/etc/sudoers.d"
patterns: "custom_*"
register: find_custom_sudoers
- name: reset custom sudoers files
when: find_custom_sudoers.files
loop: "{{ find_custom_sudoers.files }}"
loop_control:
loop_var: custom_sudoer_file
ansible.builtin.file:
path: "{{ custom_sudoer_file.path }}"
state: absent
- name: add custom sudoers file
community.general.sudoers:
name: custom_nologin
state: present
group: "{{ default_sudoers_group }}"
commands: ALL
nopassword: true

View File

@ -0,0 +1,25 @@
# ansible-playbook local_lxd/run_terraform.yml -v --become-password-file local_lxd/.sudo_pass
- hosts: localhost
gather_facts: false
become: true
roles:
#- role: lxd # ansible-galaxy collection install rjlasko.ansible
# vars:
# lxd:
# host: {}
tasks:
- community.general.terraform: project_path="terraform" force_init=true
register: terraform
- name: Add LXD instance to group 'lxd_instances'
ansible.builtin.add_host:
name: "{{terraform.outputs.container_name}}"
groups: lxd_instances
ansible_lxd_remote: "localhost"
plugin: community.general.lxd
- hosts: lxd_instances
gather_facts: false
tasks:
- ansible.builtin.ping: {}
#- ansible.builtin.include_role: name=sudo

View File

@ -0,0 +1,62 @@
#!/bin/bash
aptGet() {
export DEBIAN_FRONTEND=noninteractive
export APT_LISTCHANGES_FRONTEND=none
apt-get -fuy autoremove
apt-get clean
apt-get update
apt-get upgrade
apt-get dist-upgrade
apt-get install \
openssh-server \
cron \
slapd \
ldap-utils \
wget \
ufw \
--yes
wget -qO ~/AccountManager.deb http://prdownloads.sourceforge.net/lam/ldap-account-manager_7.7-1_all.deb
apt install -f ~/AccountManager.deb --yes
}
sshConfig() {
ufw allow ssh
}
SSL() {
mkdir ~/LDAP-SSL
cd ~/LDAP-SSL || exit
openssl genrsa -aes128 -out LDAP.key 4096
## Admin user
U="letsencrypt"
P="letsencryptPazz.01!!"
G="sudo"
UUID="420"
UHOME="/home/admin"
adduser \
--disabled-login \
--home "$UHOME" \
--ingroup "$G" \
--uid "$UUID" \
--quiet "$U"
echo "$U:$P" | chpasswd
}
adminUser() {
U="admin"
P="admin"
G="sudo"
UUID="420"
UHOME="/home/admin"
adduser \
--disabled-login \
--home "$UHOME" \
--ingroup "$G" \
--uid "$UUID" \
--quiet "$U"
echo "$U:$P" | chpasswd
}
aptGet
#adminUser
sshConfig
#SSL
exit 0

View File

@ -0,0 +1 @@
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDIQ/JtvjjxDTZGh0pdwjuIuGoyuj4QS88+MXTdyFbGUTZ9Kx0OHg2S4e2ZnifPTFQ7OIcRi4D2BWA8yEfAbIk+x5LOOkpTx0P5q0s2L9+ZTepYK++K3AbTZ2mcLzxzn4HSzt6/6076qdUDP6DmHs+OsaO1alS3oz02ST3gHn8cbr+PSZXOBnYUX/1W940OObcmNsb6nxo17AjE2CUlvsqKXrzY6py9Y+AnFe2UwIGjvcvThg44vxBXhkipis7x6f2UisuXN/QmAgx03AXSca+PQ8mmr9peMAY02hLrsF8nnYUpjFlUGD2MDbDz4T5NpfJbQhtIXm3JBh64VWSPmXwYHRMR7qgdsai/+nN0VhgX/mnAoOwXYdQIU4rmDEDK5CBqXKmOlNmfjv5d9sdZz0LblAGBWtwsvswrK8C7HwqvSDLQjoe04kVM2DpHqDjWS7tlF0HsfEFbhEqzD4WHbcmX1VkAq6oYmhqIYpAys+JjQKvosiCPHj4M49e1e5OAbFs= vegetalkiller@pop-os

View File

@ -0,0 +1,80 @@
## sudo terraform destroy --auto-approve && sudo terraform apply --auto-approve
locals {
container = "OpenLDAP"
image = "debian/10/amd64"
lxc_exec = "lxc exec ${local.container} --"
lxc_device = "lxc config device add ${local.container}"
local_exec = [
"${local.lxc_device} http proxy listen=tcp:0.0.0.0:80 connect=tcp:127.0.0.1:80",
"${local.lxc_device} httpS proxy listen=tcp:0.0.0.0:81 connect=tcp:127.0.0.1:443",
"${local.lxc_device} SSH proxy listen=tcp:0.0.0.0:82 connect=tcp:127.0.0.1:22",
"${local.lxc_exec} bash /01-Setup.bash",
]
}
resource "lxd_container" "c1" {
name = local.container
image = lxd_cached_image.image.fingerprint
ephemeral = false
profiles = ["${lxd_profile.p1.name}"]
provisioner "local-exec" {
command = join(" && ", local.local_exec)
}
file {
source = "../scripts/01-Setup.bash"
target_file = "/01-Setup.bash"
}
file {
content = file("../scripts/id_rsa.pub")
target_file = "/root/.ssh/authorized_keys"
create_directories = true
}
}
resource "lxd_profile" "p1" {
name = "${local.container}-profile"
device {
name = "eth0"
type = "nic"
properties = {
nictype = "macvlan"
//nictype = "bridged"
parent = "enp4s0"
}
}
device {
type = "disk"
name = "root"
properties = {
pool = "default"
path = "/"
}
}
//device {
// type = "disk"
// name = "shared"
// properties = {
// source = "/mnt/containerShared"
// path = "/mnt/containerShared"
// }
//}
config = {
"limits.cpu" = 3
}
}
resource "lxd_cached_image" "image" {
source_remote = "images"
source_image = local.image
}
output "container_name" {
value = lxd_container.c1.name
}
terraform {
required_providers {
lxd = {
source = "terraform-lxd/lxd"
}
}
}
provider "lxd" {
generate_client_certificates = true
accept_remote_certificate = true
}

View File

@ -0,0 +1,139 @@
{
"version": 4,
"terraform_version": "1.2.6",
"serial": 79,
"lineage": "fea43a0b-55d0-8487-c919-63e7025f36b1",
"outputs": {
"container_name": {
"value": "OpenLDAP",
"type": "string"
}
},
"resources": [
{
"mode": "managed",
"type": "lxd_cached_image",
"name": "image",
"provider": "provider[\"registry.terraform.io/terraform-lxd/lxd\"]",
"instances": [
{
"schema_version": 0,
"attributes": {
"aliases": null,
"architecture": "x86_64",
"copied_aliases": [],
"copy_aliases": false,
"created_at": 1663113600,
"fingerprint": "0e1edaa21525c07820e0549723f198a06784a32d64dc3e23ebdc19169eafc6d2",
"id": "local/0e1edaa21525c07820e0549723f198a06784a32d64dc3e23ebdc19169eafc6d2",
"remote": null,
"source_image": "debian/10/amd64",
"source_remote": "images"
},
"sensitive_attributes": [],
"private": "bnVsbA=="
}
]
},
{
"mode": "managed",
"type": "lxd_container",
"name": "c1",
"provider": "provider[\"registry.terraform.io/terraform-lxd/lxd\"]",
"instances": [
{
"schema_version": 0,
"attributes": {
"config": null,
"device": [],
"ephemeral": false,
"file": [
{
"content": "",
"create_directories": false,
"gid": 0,
"mode": "",
"source": "../scripts/01-Setup.bash",
"target_file": "/01-Setup.bash",
"uid": 0
},
{
"content": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDIQ/JtvjjxDTZGh0pdwjuIuGoyuj4QS88+MXTdyFbGUTZ9Kx0OHg2S4e2ZnifPTFQ7OIcRi4D2BWA8yEfAbIk+x5LOOkpTx0P5q0s2L9+ZTepYK++K3AbTZ2mcLzxzn4HSzt6/6076qdUDP6DmHs+OsaO1alS3oz02ST3gHn8cbr+PSZXOBnYUX/1W940OObcmNsb6nxo17AjE2CUlvsqKXrzY6py9Y+AnFe2UwIGjvcvThg44vxBXhkipis7x6f2UisuXN/QmAgx03AXSca+PQ8mmr9peMAY02hLrsF8nnYUpjFlUGD2MDbDz4T5NpfJbQhtIXm3JBh64VWSPmXwYHRMR7qgdsai/+nN0VhgX/mnAoOwXYdQIU4rmDEDK5CBqXKmOlNmfjv5d9sdZz0LblAGBWtwsvswrK8C7HwqvSDLQjoe04kVM2DpHqDjWS7tlF0HsfEFbhEqzD4WHbcmX1VkAq6oYmhqIYpAys+JjQKvosiCPHj4M49e1e5OAbFs= vegetalkiller@pop-os\n",
"create_directories": true,
"gid": 0,
"mode": "",
"source": "",
"target_file": "/root/.ssh/authorized_keys",
"uid": 0
}
],
"id": "OpenLDAP",
"image": "0e1edaa21525c07820e0549723f198a06784a32d64dc3e23ebdc19169eafc6d2",
"ip_address": "192.168.1.9",
"ipv4_address": "192.168.1.9",
"ipv6_address": "2800:a4:2611:d400:f8af:67ff:fe77:f7b6",
"limits": null,
"mac_address": "fa:af:67:77:f7:b6",
"name": "OpenLDAP",
"privileged": false,
"profiles": [
"OpenLDAP-profile"
],
"remote": null,
"start_container": true,
"status": "Running",
"target": "none",
"type": "container",
"wait_for_network": true
},
"sensitive_attributes": [],
"private": "bnVsbA==",
"dependencies": [
"lxd_cached_image.image",
"lxd_profile.p1"
]
}
]
},
{
"mode": "managed",
"type": "lxd_profile",
"name": "p1",
"provider": "provider[\"registry.terraform.io/terraform-lxd/lxd\"]",
"instances": [
{
"schema_version": 0,
"attributes": {
"config": {
"limits.cpu": "3"
},
"description": "",
"device": [
{
"name": "eth0",
"properties": {
"nictype": "macvlan",
"parent": "enp4s0"
},
"type": "nic"
},
{
"name": "root",
"properties": {
"path": "/",
"pool": "default"
},
"type": "disk"
}
],
"id": "OpenLDAP-profile",
"name": "OpenLDAP-profile",
"remote": null
},
"sensitive_attributes": [],
"private": "bnVsbA=="
}
]
}
]
}

View File

@ -0,0 +1,8 @@
{
"version": 4,
"terraform_version": "1.2.6",
"serial": 75,
"lineage": "fea43a0b-55d0-8487-c919-63e7025f36b1",
"outputs": {},
"resources": []
}