diff --git a/.github/workflows/10min-iiab-test-install.yml b/.github/workflows/10min-iiab-test-install.yml
new file mode 100644
index 000000000..d2b8bd056
--- /dev/null
+++ b/.github/workflows/10min-iiab-test-install.yml
@@ -0,0 +1,58 @@
+name: '"10 min" IIAB on Ubuntu 24.04 on x86-64'
+# run-name: ${{ github.actor }} is testing out GitHub Actions π
+
+# https://michaelcurrin.github.io/dev-cheatsheets/cheatsheets/ci-cd/github-actions/triggers.html
+on: [push, pull_request, workflow_dispatch]
+
+# on:
+# push:
+#
+# pull_request:
+#
+# # Allows you to run this workflow manually from the Actions tab
+# workflow_dispatch:
+#
+# # Set your workflow to run every day of the week from Monday to Friday at 6:00 UTC
+# schedule:
+# - cron: "0 6 * * 1-5"
+
+jobs:
+ test-install:
+ runs-on: ubuntu-24.04
+ steps:
+ - run: echo "π The job was automatically triggered by a ${{ github.event_name }} event."
+ - run: echo "π The name of your branch is ${{ github.ref }} and your repository is ${{ github.repository }}."
+ #- name: Dump GitHub context (typically almost 500 lines)
+ # env:
+ # GITHUB_CONTEXT: ${{ toJSON(github) }}
+ # run: echo "$GITHUB_CONTEXT"
+ - name: Check out repository code
+ uses: actions/checkout@v4
+ - run: echo "π This job's status is ${{ job.status }}."
+ - name: GitHub Actions "runner" environment
+ run: |
+ uname -a # uname -srm
+ whoami # Typically 'runner' instead of 'root'
+ pwd # /home/runner/work/iiab/iiab == $GITHUB_WORKSPACE == ${{ github.workspace }}
+ # ls
+ # ls $GITHUB_WORKSPACE
+ # ls ${{ github.workspace }}
+ # ls -la /opt # az, containerd, google, hostedtoolcache, microsoft, mssql-tools, pipx, pipx_bin, post-generation, runner, vsts
+ # apt update
+ # apt dist-upgrade -y
+ # apt autoremove -y
+ - name: Set up /opt/iiab/iiab
+ run: |
+ mkdir /opt/iiab
+ mv $GITHUB_WORKSPACE /opt/iiab
+ mkdir $GITHUB_WORKSPACE # OR SUBSEQUENT STEPS WILL FAIL ('working-directory: /opt/iiab/iiab' hacks NOT worth it!)
+ - name: Set up /etc/iiab/local_vars.yml
+ run: |
+ sudo mkdir /etc/iiab
+ # touch /etc/iiab/local_vars.yml
+ sudo cp /opt/iiab/iiab/vars/local_vars_none.yml /etc/iiab/local_vars.yml
+ - run: sudo /opt/iiab/iiab/scripts/ansible
+ - run: sudo ./iiab-install
+ working-directory: /opt/iiab/iiab
+ - run: iiab-summary
+ - run: cat /etc/iiab/iiab_state.yml
diff --git a/.github/workflows/30min-iiab-test-install-deb12-on-rpi3.yml b/.github/workflows/30min-iiab-test-install-deb12-on-rpi3.yml
new file mode 100644
index 000000000..a8703346e
--- /dev/null
+++ b/.github/workflows/30min-iiab-test-install-deb12-on-rpi3.yml
@@ -0,0 +1,65 @@
+name: '"30 min" IIAB on Debian 12 on RPi 3'
+# run-name: ${{ github.actor }} is testing out GitHub Actions π
+
+# https://michaelcurrin.github.io/dev-cheatsheets/cheatsheets/ci-cd/github-actions/triggers.html
+on: [push, pull_request, workflow_dispatch]
+
+# on:
+# push:
+#
+# pull_request:
+#
+# # Allows you to run this workflow manually from the Actions tab
+# workflow_dispatch:
+#
+# # Set your workflow to run every day of the week from Monday to Friday at 6:00 UTC
+# schedule:
+# - cron: "0 6 * * 1-5"
+
+jobs:
+ test-install:
+ runs-on: ubuntu-22.04
+ strategy:
+ matrix:
+ arch: [debian12]
+ include:
+ - arch: debian12
+ cpu: cortex-a7
+ cpu_info: cpuinfo/raspberrypi_3b
+ base_image: https://raspi.debian.net/daily/raspi_3_bookworm.img.xz
+ # source https://raspi.debian.net/daily-images/
+ steps:
+ #- run: echo "π The job was automatically triggered by a ${{ github.event_name }} event."
+ #- run: echo "π The name of your branch is ${{ github.ref }} and your repository is ${{ github.repository }}."
+ #- name: Dump GitHub context (typically almost 500 lines)
+ # env:
+ # GITHUB_CONTEXT: ${{ toJSON(github) }}
+ # run: echo "$GITHUB_CONTEXT"
+ - name: Dump matrix context
+ env:
+ MATRIX_CONTEXT: ${{ toJSON(matrix) }}
+ run: echo "$MATRIX_CONTEXT"
+ - uses: actions/checkout@v3.1.0
+ - uses: pguyot/arm-runner-action@v2
+ with:
+ image_additional_mb: 1024
+ base_image: ${{ matrix.base_image }}
+ cpu: ${{ matrix.cpu }}
+ cpu_info: ${{ matrix.cpu_info }}
+ copy_repository_path: /opt/iiab/iiab
+ commands: |
+ echo "π This job's status is ${{ job.status }}."
+ grep Model /proc/cpuinfo
+ uname -a # uname -srm
+ whoami # Typically 'root' instead of 'runner'
+ pwd # /home/runner/work/iiab/iiab == $GITHUB_WORKSPACE == ${{ github.workspace }}
+ apt-get update -y --allow-releaseinfo-change
+ apt-get install --no-install-recommends -y git
+ ls /opt/iiab/iiab
+ mkdir /etc/iiab
+ cp /opt/iiab/iiab/vars/local_vars_none.yml /etc/iiab/local_vars.yml
+ /opt/iiab/iiab/scripts/ansible
+ ./iiab-install
+ cd /opt/iiab/iiab
+ iiab-summary
+ cat /etc/iiab/iiab_state.yml
diff --git a/.github/workflows/30min-iiab-test-install-raspios-on-zero2w.yml b/.github/workflows/30min-iiab-test-install-raspios-on-zero2w.yml
new file mode 100644
index 000000000..9b521fee6
--- /dev/null
+++ b/.github/workflows/30min-iiab-test-install-raspios-on-zero2w.yml
@@ -0,0 +1,77 @@
+name: '"30 min" IIAB on RasPiOS on Zero 2 W'
+# run-name: ${{ github.actor }} is testing out GitHub Actions π
+
+# https://michaelcurrin.github.io/dev-cheatsheets/cheatsheets/ci-cd/github-actions/triggers.html
+on: [push, pull_request, workflow_dispatch]
+
+# on:
+# push:
+#
+# pull_request:
+#
+# # Allows you to run this workflow manually from the Actions tab
+# workflow_dispatch:
+#
+# # Set your workflow to run every day of the week from Monday to Friday at 6:00 UTC
+# schedule:
+# - cron: "0 6 * * 1-5"
+
+jobs:
+ test-install:
+ runs-on: ubuntu-22.04
+ strategy:
+ matrix:
+ arch: [aarch64] #[zero_raspbian, zero_raspios, zero2_raspios, aarch64]
+ include:
+ #- arch: zero_raspbian
+ # cpu: arm1176
+ # cpu_info: cpuinfo/raspberrypi_zero_w
+ # base_image: raspbian_lite:latest
+ #- arch: zero_raspios
+ # cpu: arm1176
+ # cpu_info: cpuinfo/raspberrypi_zero_w
+ # base_image: raspios_lite:latest
+ #- arch: zero2_raspios
+ # cpu: cortex-a7
+ # cpu_info: cpuinfo/raspberrypi_zero2_w
+ # base_image: raspios_lite:latest
+ - arch: aarch64
+ cpu: cortex-a53
+ cpu_info: cpuinfo/raspberrypi_zero2_w_arm64
+ base_image: raspios_lite_arm64:latest
+ steps:
+ #- run: echo "π The job was automatically triggered by a ${{ github.event_name }} event."
+ #- run: echo "π The name of your branch is ${{ github.ref }} and your repository is ${{ github.repository }}."
+ #- name: Dump GitHub context (typically almost 500 lines)
+ # env:
+ # GITHUB_CONTEXT: ${{ toJSON(github) }}
+ # run: echo "$GITHUB_CONTEXT"
+ - name: Dump matrix context
+ env:
+ MATRIX_CONTEXT: ${{ toJSON(matrix) }}
+ run: echo "$MATRIX_CONTEXT"
+ - uses: actions/checkout@v3.1.0
+ - uses: pguyot/arm-runner-action@v2
+ with:
+ image_additional_mb: 1024
+ base_image: ${{ matrix.base_image }}
+ cpu: ${{ matrix.cpu }}
+ cpu_info: ${{ matrix.cpu_info }}
+ copy_repository_path: /opt/iiab/iiab
+ commands: |
+ echo "π This job's status is ${{ job.status }}."
+ #test `uname -m` = ${{ matrix.arch }}
+ grep Model /proc/cpuinfo
+ uname -a # uname -srm
+ whoami # Typically 'root' instead of 'runner'
+ pwd # /home/runner/work/iiab/iiab == $GITHUB_WORKSPACE == ${{ github.workspace }}
+ apt-get update -y --allow-releaseinfo-change
+ apt-get install --no-install-recommends -y git
+ ls /opt/iiab/iiab
+ mkdir /etc/iiab
+ cp /opt/iiab/iiab/vars/local_vars_none.yml /etc/iiab/local_vars.yml
+ /opt/iiab/iiab/scripts/ansible
+ ./iiab-install
+ cd /opt/iiab/iiab
+ iiab-summary
+ cat /etc/iiab/iiab_state.yml
diff --git a/.gitignore b/.gitignore
index 38f8427ed..62c1c7bcf 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,8 +1,13 @@
-xs-config.spec
+# https://git-scm.com/docs/gitignore
+
build
deprecated
.ansible
*.patches
-*.log
+*.log
*.retry
+
+# Lines below for emacs, which generates even more tmp files since 2022
*~
+.#*
+\#*#
diff --git a/.travis.yml b/.travis.yml.unused
similarity index 100%
rename from .travis.yml
rename to .travis.yml.unused
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index caf8b425b..4ddde9b53 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -1,3 +1,3 @@
-# SEE THE NEW
[github.com/iiab/iiab/wiki/IIAB-Contributors-Guide](https://github.com/iiab/iiab/wiki/IIAB-Contributors-Guide)
+# SEE THE NEW
[github.com/iiab/iiab/wiki/Contributors-Guide-(EN)](https://github.com/iiab/iiab/wiki/Contributors-Guide-(EN))
# THANKS!
diff --git a/LICENSING.md b/LICENSING.md
index fac901b1d..77c1ed0b9 100644
--- a/LICENSING.md
+++ b/LICENSING.md
@@ -15,6 +15,6 @@ this is to include the following two lines at the top of the file:
Licensed under the terms of the GNU GPL v2 or later; see LICENSE for details.
All files not containing an explicit copyright notice or terms of license in
-the file are Copyright Β© 2015-2021, Unleash Kids, and are licensed under the
+the file are Copyright Β© 2015-2025, Unleash Kids, and are licensed under the
terms of the GPLv2 license in the file named LICENSE in the root of the
repository.
diff --git a/README.md b/README.md
index 2158149d1..fbfd30d35 100644
--- a/README.md
+++ b/README.md
@@ -2,26 +2,26 @@
# Internet-in-a-Box (IIAB)
-[Internet-in-a-Box (IIAB)](https://internet-in-a-box.org) is a "learning hotspot" that brings the Internet's crown jewels
-(Wikipedia in any language, thousands of Khan Academy videos, zoomable OpenStreetMap, electronic books, WordPress journaling, Toys from Trash electronics projects, ETC) to those without Internet.
+[Internet-in-a-Box (IIAB)](https://internet-in-a-box.org) is a βlearning hotspotβ that brings the Internet's crown jewels
+(Wikipedia in any language, thousands of Khan Academy videos, zoomable OpenStreetMap, electronic books, WordPress journaling, βToys from Trashβ electronics projects, ETC) to those without Internet.
You can build your own tiny, affordable server (an offline digital library) for your school, your medical clinic, your prison, your region and/or your very own family β accessible with any nearby smartphone, tablet or laptop.
Internet-in-a-Box gives you the DIY tools to:
1. Download then drag-and-drop to arrange the [very best of the Worldβs Free Knowledge](https://internet-in-a-box.org/#quality-content).
-2. Choose among [30+ powerful educational apps](http://FAQ.IIAB.IO#What_services_.28IIAB_apps.29_are_suggested_during_installation.3F) for your school or learning/teaching community, optionally with a complete LMS (learning management system).
+2. Choose among [30+ powerful educational apps](https://wiki.iiab.io/go/FAQ#What_services_%28IIAB_apps%29_are_suggested_during_installation%3F) for your school or learning/teaching community, optionally with a complete LMS (learning management system).
3. Exchange local/indigenous knowledge with nearby communities, using our [Manage Content](https://github.com/iiab/iiab-admin-console/blob/master/roles/console/files/help/InstContent.rst#manage-content) interface and possible mesh networking.
-FYI this [community product](https://en.wikipedia.org/wiki/Internet-in-a-Box) is enabled by professional volunteers working [side-by-side](http://FAQ.IIAB.IO#What_are_the_best_places_for_community_support.3F) with schools, clinics and libraries around the world. *Thank you for being a part of our http://OFF.NETWORK grassroots technology [movement](https://meta.wikimedia.org/wiki/Internet-in-a-Box)!*
+FYI this [community product](https://en.wikipedia.org/wiki/Internet-in-a-Box) is enabled by professional volunteers working [side-by-side](https://wiki.iiab.io/go/FAQ#What_are_the_best_places_for_community_support%3F) with schools, clinics and libraries around the world. *Thank you for being a part of our http://OFF.NETWORK grassroots technology [movement](https://meta.wikimedia.org/wiki/Internet-in-a-Box)!*
## Installation
-Install Internet-in-a-Box (IIAB) from [download.iiab.io](https://download.iiab.io/)
+Install Internet-in-a-Box (IIAB) from: [**download.iiab.io**](https://download.iiab.io/)
-Please see [FAQ.IIAB.IO](http://FAQ.IIAB.IO) which has 40+ questions and answers to help you along the way, as you put together the "local learning hotspot" most suitable for your own teaching/learning community. Here are 2 ways to install IIAB:
+Please see [FAQ.IIAB.IO](https://wiki.iiab.io/go/FAQ) which has 50+ questions and answers to help you along the way (e.g. [βIs a quick installation possible?β](https://wiki.iiab.io/go/FAQ#Is_a_quick_installation_possible%3F)) as you put together the βlocal learning hotspotβ most suitable for your own teaching/learning community. Here are 2 ways to install IIAB:
- Our [1-line installer](https://download.iiab.io/) gets you the very latest, typically within about an hour, on [different Linux distributions](https://github.com/iiab/iiab/wiki/IIAB-Platforms#operating-systems).
-- [Prefab disk images](https://github.com/iiab/iiab/wiki/Raspberry-Pi-Images:-Summary) ([.img files](https://archive.org/search.php?query=iiab%20.img&sort=-publicdate)) are sometimes a few months out of date, but can be flashed directly onto a microSD card, for insertion into Raspberry Pi.
+- [Prefab disk images](https://github.com/iiab/iiab/wiki/Raspberry-Pi-Images-~-Summary#iiab-images-for-raspberry-pi) ([.img files](https://archive.org/search.php?query=iiab%20.img&sort=-publicdate)) are sometimes a few months out of date, but can be flashed directly onto a microSD card, for insertion into Raspberry Pi.
Our [HOW-TO videos](https://www.youtube.com/channel/UC0cBGCxr_WPBPa3IqPVEe3g) can be very helpful and the [Installation](https://github.com/iiab/iiab/wiki/IIAB-Installation) wiki page has more intricate details e.g. if you're trying to install Internet-in-a-Box (IIAB) onto a [another Linux](https://github.com/iiab/iiab/wiki/IIAB-Platforms) that has not yet been tried.
@@ -29,20 +29,22 @@ See our [Tech Docs Wiki](https://github.com/iiab/iiab/wiki) for more about the u
After you've installed the software, you should [add content](https://github.com/iiab/iiab/wiki/IIAB-Installation#add-content), which can of course take time when downloading multi-gigabyte Content Packs!
-Finally, you can [customize your Internet-in-a-Box home page](http://FAQ.IIAB.IO#How_do_I_customize_my_Internet-in-a-Box_home_page.3F) (typically http://box or http://box.lan) using our **drag-and-drop** Admin Console (http://box.lan/admin) — to arrange Content Packs and IIAB Apps (services) for your local community's needs.
+Finally, you can [customize your Internet-in-a-Box home page](https://wiki.iiab.io/go/FAQ#How_do_I_customize_my_Internet-in-a-Box_home_page%3F) (typically http://box or http://box.lan) using our **drag-and-drop** Admin Console (http://box.lan/admin) — to arrange Content Packs and IIAB Apps (services) for your local community's needs.
## Community
-Internet-in-a-Box (IIAB) greatly welcomes contributions from educators, librarians and [IT/UX/QA people](https://github.com/iiab/iiab/wiki/Technical-Contributors-Guide) of all kinds!
+Global community updates and videos are regularly posted to: **[@internet_in_box](https://twitter.com/internet_in_box)**
-If you would like to volunteer, please [make contact](https://internet-in-a-box.org/pages/contributing.html) after looking over "[How can I help?](http://FAQ.IIAB.IO#How_can_I_help.3F)" at: [FAQ.IIAB.IO](http://FAQ.IIAB.IO)
+_Internet-in-a-Box (IIAB) greatly welcomes contributions from educators, librarians and [IT/UX/QA people](https://github.com/iiab/iiab/wiki/Contributors-Guide-(EN)) ([versiΓ³n en espaΓ±ol](https://github.com/iiab/iiab/wiki/Gu%C3%ADa-para-Contribuidores-(ES))) of all kinds!_
+
+If you would like to volunteer, please [make contact](https://internet-in-a-box.org/contributing.html) after looking over [βHow can I help?β](https://wiki.iiab.io/go/FAQ#How_can_I_help%3F) at: [FAQ.IIAB.IO](https://wiki.iiab.io/go/FAQ)
-To learn more about our open community architecture for "offline" learning, check out "[What technical documentation exists?](http://FAQ.IIAB.IO#What_technical_documentation_exists.3F)"
-FYI we use [Ansible](http://FAQ.IIAB.IO#What_is_Ansible_and_what_version_should_I_use.3F) to install, deploy, configure and manage the various software components.
+To learn more about our open community architecture for βofflineβ learning, check out [βWhat technical documentation exists?β](https://wiki.iiab.io/go/FAQ#What_technical_documentation_exists%3F)
+FYI we use [Ansible](https://wiki.iiab.io/go/FAQ#What_is_Ansible_and_what_version_should_I_use%3F) to install, deploy, configure and manage the various software components.
-*Thank you for helping us enable offline access to the Internet's free/open knowledge jewels, as well as "Sneakernet-of-Alexandria" distribution of local/indigenous content, when mass media channels do not serve grassroots voices.*
+*Thank you for helping us enable offline access to the Internet's free/open knowledge jewels, as well as βSneakernet-of-Alexandriaβ distribution of local/indigenous content, when mass media channels do not serve grassroots voices.*
## Versions
@@ -52,4 +54,4 @@ Install our latest pre-release using the 1-line installer at: [**download.iiab.i
You can also consider earlier official releases at: [github.com/iiab/iiab/releases](https://github.com/iiab/iiab/releases)
-For much older versions, see: [github.com/xsce](http://github.com/xsce), [schoolserver.org](http://schoolserver.org)
+For much older versions, see: [github.com/xsce](https://github.com/xsce), [schoolserver.org](http://schoolserver.org)
diff --git a/ansible.cfg b/ansible.cfg
index 4030a931e..deb5328ed 100644
--- a/ansible.cfg
+++ b/ansible.cfg
@@ -5,4 +5,4 @@
# Disallowed by Ansible 2.11+ -- see https://docs.ansible.com/ansible/devel/porting_guides/porting_guide_2.7.html#using-a-loop-on-a-package-module-via-squash-actions
#squash_actions = apk, apt, dnf, homebrew, openbsd_pkg, pacman, pkgng, yum, zypper, package
[defaults]
-interpreter_python=/usr/bin/python3
+interpreter_python=/usr/local/ansible/bin/python3
diff --git a/iiab-from-cmdline.yml b/iiab-from-cmdline.yml
index 74b507b56..68956b3ad 100644
--- a/iiab-from-cmdline.yml
+++ b/iiab-from-cmdline.yml
@@ -3,10 +3,10 @@
become: yes
vars_files:
- - vars/default_vars.yml
- - vars/{{ ansible_local.local_facts.os_ver }}.yml
- - /etc/iiab/local_vars.yml
- - /etc/iiab/iiab_state.yml
+ - vars/default_vars.yml
+ - vars/{{ ansible_local.local_facts.os_ver }}.yml
+ - /etc/iiab/local_vars.yml
+ - /etc/iiab/iiab_state.yml
roles:
- { role: 0-init }
diff --git a/iiab-from-console.yml b/iiab-from-console.yml
index e83aefdb9..fb8282580 100644
--- a/iiab-from-console.yml
+++ b/iiab-from-console.yml
@@ -3,10 +3,10 @@
become: yes
vars_files:
- - vars/default_vars.yml
- - vars/{{ ansible_local.local_facts.os_ver }}.yml
- - /etc/iiab/local_vars.yml
- - /etc/iiab/iiab_state.yml
+ - vars/default_vars.yml
+ - vars/{{ ansible_local.local_facts.os_ver }}.yml
+ - /etc/iiab/local_vars.yml
+ - /etc/iiab/iiab_state.yml
roles:
- { role: 0-init }
diff --git a/iiab-install b/iiab-install
index 037d61b24..45e637c2a 100755
--- a/iiab-install
+++ b/iiab-install
@@ -1,17 +1,57 @@
#!/bin/bash -e
# Running from a git repo
# Add cmdline options for passing to ansible
-# Todo add proper shift to gobble up --debug --reinstall
PLAYBOOK=iiab-stages.yml
INVENTORY=ansible_hosts
IIAB_STATE_FILE=/etc/iiab/iiab_state.yml
-ARGS=""
+ARGS="--extra-vars {" # Needs boolean not string so use JSON list. bash forces {...} to '{...}' for Ansible
+
CWD=`pwd`
OS=`grep ^ID= /etc/os-release | cut -d= -f2`
-OS=${OS//\"/}
-MIN_RPI_KERN=5.4.0 # Do not use 'rpi-update' unless absolutely necessary: https://github.com/iiab/iiab/issues/1993
-MIN_ANSIBLE_VER=2.11.6 # Ansible 2.8.3 and 2.8.6 had serious bugs, preventing their use with IIAB.
+OS=${OS//\"/} # Remove all '"'
+MIN_RPI_KERN=5.4.0 # Do not use 'rpi-update' unless absolutely necessary: https://github.com/iiab/iiab/issues/1993
+MIN_ANSIBLE_VER=2.16.14 # 2024-11-08: ansible-core 2.15 EOL is November 2024 per https://docs.ansible.com/ansible/latest/reference_appendices/release_and_maintenance.html#ansible-core-support-matrix 2022-11-09: Raspberry Pi 3 (and 3 B+ etc?) apparently install (and require?) ansible-core 2.11 for now -- @deldesir can explain more on PR #3419. Historical: Ansible 2.8.3 and 2.8.6 had serious bugs, preventing their use with IIAB.
+
+REINSTALL=false
+DEBUG=false
+SKIP_ROLE_ON_ERROR=false
+
+usage() {
+ echo -e "\n\e[1mUse './iiab-install' for regular installs, or to continue an install."
+ echo -e "Use './iiab-install --risky' to force 'skip_role_on_error: True'"
+ echo -e "Use './iiab-install --reinstall' to force running all Stages 0-9, followed by the Network Role."
+ echo -e "Use './iiab-install --debug' to run Stage 0, followed by Stages 3-9, followed by the Network Role."
+ echo -e "Use './iiab-configure' to run Stage 0, followed by Stages 4-9."
+ echo -e "Use './runrole' to run Stage 0, followed by a single Stage or Role."
+ echo -e "Use './iiab-network' to run Stage 0, followed by the Network Role.\e[0m\n"
+}
+
+# https://stackoverflow.com/questions/192249/how-do-i-parse-command-line-arguments-in-bash/14203146#14203146
+while [[ $# -gt 0 ]]; do
+ case $1 in
+ --reinstall)
+ REINSTALL=true
+ shift
+ ;;
+ --debug)
+ DEBUG=true
+ shift
+ ;;
+ -r|--risky)
+ SKIP_ROLE_ON_ERROR=true
+ shift
+ ;;
+ *)
+ usage
+ exit 1
+ ;;
+ esac
+done
+
+ARGS="$ARGS\"skip_role_on_error\":$SKIP_ROLE_ON_ERROR" # Needs boolean not
+# string so use JSON list. Ansible permits these boolean values: (refresher)
+# https://github.com/iiab/iiab/blob/master/roles/0-init/tasks/validate_vars.yml#L19-L43
if [ ! -f /etc/iiab/local_vars.yml ]; then
@@ -25,13 +65,13 @@ if [ ! -f /etc/iiab/local_vars.yml ]; then
echo -e "ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ\n" >&2
fi
- echo -e "\nEXITING: /opt/iiab/iiab/iiab-install REQUIRES /etc/iiab/local_vars.yml\n" >&2
+ echo -e "\n\e[1mEXITING: /opt/iiab/iiab/iiab-install REQUIRES /etc/iiab/local_vars.yml\e[0m\n" >&2
- echo -e "(1) Please read http://wiki.laptop.org/go/IIAB/local_vars.yml to learn more" >&2
- echo -e "(2) MIN/MEDIUM/BIG samples are included in /opt/iiab/iiab/vars" >&2
+ echo -e "(1) See http://FAQ.IIAB.IO -> What is local_vars.yml and how do I customize it?" >&2
+ echo -e "(2) SMALL/MEDIUM/LARGE samples are included in /opt/iiab/iiab/vars" >&2
echo -e "(3) NO TIME FOR DETAILS? RUN INTERNET-IN-A-BOX'S FRIENDLY 1-LINE INSTALLER:\n" >&2
- echo -e ' http://download.iiab.io\n' >&2
+ echo -e ' https://download.iiab.io\n' >&2
exit 1
fi
@@ -42,14 +82,15 @@ fi
echo -e "\n\n./iiab-install $* BEGUN IN $CWD\n"
echo -e "local_facts.fact DIAGNOSTICS... (A FEW LINES OF ERRORS/WARNINGS BELOW ARE OK!)\n"
-
scripts/local_facts.fact # Exit & advise, if OS not supported.
-if [ ! -f /etc/ansible/facts.d/local_facts.fact ]; then
- mkdir -p /etc/ansible/facts.d
-fi
+mkdir -p /etc/ansible/facts.d
cp scripts/local_facts.fact /etc/ansible/facts.d/local_facts.fact
-echo -e "\nPlaced /etc/ansible/facts.d/local_facts.fact into position."
+echo -e "\nPlaced /etc/ansible/facts.d/local_facts.fact into position.\n"
+
+mkdir -p /etc/iiab/install-flags # MANDATORY since 2022-07-22
+echo -e "/etc/iiab/install-flags directory created/verified."
+echo -e "(e.g. for PR #3318 netwarn pop-ups, asking you to run iiab-network)\n"
if [ ! -f $PLAYBOOK ]; then
echo "EXITING: IIAB Playbook ""$PLAYBOOK"" not found."
@@ -57,16 +98,6 @@ if [ ! -f $PLAYBOOK ]; then
exit 1
fi
-if [ "$1" != "--debug" ] && [ "$1" != "--reinstall" ] && [ "$1" != "" ]; then
- echo "Use './iiab-install' for regular installs, or to continue an install."
- echo "Use './iiab-install --reinstall' to force running all Stages 0-9, followed by the Network Role."
- echo "Use './iiab-install --debug' to run Stage 0, followed by Stages 3-9, followed by the Network Role."
- echo "Use './iiab-configure' to run Stage 0, followed by Stages 4-9."
- echo "Use './runrole' to run Stage 0, followed by a single Stage or Role."
- echo "Use './iiab-network' to run Stage 0, followed by the Network Role."
- exit 1
-fi
-
# Subroutine compares software version numbers. Generates rare false positives
# like "1.0 > 1" and "2.4.0 > 2.4". Avoid risks by structuring conditionals w/
# a consistent # of decimal points e.g. "if version_gt w.x.y.z a.b.c.d; then"
@@ -93,7 +124,7 @@ CURR_ANSIBLE_VER=0
#if [[ $(command -v ansible) ]]; then # Also Works! $(...) nests more easily than backticks
#if [[ `which ansible` ]]; then # "which" misses built-in commands like cd, and is RISKY per https://stackoverflow.com/questions/592620/check-if-a-program-exists-from-a-bash-script
#if [[ `type -P ansible` ]]; then # "type -P" isn't POSIX compliant; it misses built-in commands like "cd"
-if [[ `command -v ansible` ]]; then # "command -v" is POSIX compliant; it catches built-in commands like "cd"
+if [[ $(command -v ansible) ]]; then # "command -v" is POSIX compliant; it catches built-in commands like "cd"
CURR_ANSIBLE_VER=$(ansible --version | head -1 | cut -f 2- -d " " | sed 's/.* \([^ ]*\)\].*/\1/')
# Above works with 'ansible [core 2.11.0rc2]' -- these old ways do not:
#CURR_ANSIBLE_VER=$(ansible --version | head -1 | awk '{print $2}')
@@ -125,41 +156,38 @@ if [ -f /etc/iiab/iiab.env ]; then
fi
fi
- if [ "$1" == "--reinstall" ]; then
+ if $($REINSTALL); then
STAGE=0
- ARGS="$ARGS"" --extra-vars reinstall=True"
+ #ARGS="$ARGS"" --extra-vars reinstall=True"
+ ARGS="$ARGS,\"reinstall\":True" # Needs boolean not string so use JSON list
sed -i 's/^STAGE=.*/STAGE=0/' /etc/iiab/iiab.env
echo "Wrote STAGE=0 (counter) to /etc/iiab/iiab.env"
- elif [ "$STAGE" -ge 2 ] && [ "$1" == "--debug" ]; then
+ elif [ "$STAGE" -ge 2 ] && $($DEBUG); then
STAGE=2
sed -i 's/^STAGE=.*/STAGE=2/' /etc/iiab/iiab.env
echo "Wrote STAGE=2 (counter) to /etc/iiab/iiab.env"
elif [ "$STAGE" -eq 9 ]; then
- echo -e "\nEXITING: STAGE (counter) in /etc/iiab/iiab.env shows Stage 9 Is Already Done."
- echo -e "Use './iiab-install --reinstall' to force running all Stages 0-9, followed by the Network Role."
- echo -e "Use './iiab-install --debug' to run Stage 0, followed by Stages 3-9, followed by the Network Role."
- echo -e "Use './iiab-configure' to run Stage 0, followed by Stages 4-9."
- echo -e "Use './runrole' to run Stage 0, followed by a single Stage or Role."
- echo -e "Use './iiab-network' to run Stage 0, followed by the Network Role.\n\n"
-
- exit 0 # Allows rerunning http://download.iiab.io/install.txt
+ echo -e "\n\e[1mEXITING: STAGE (counter) in /etc/iiab/iiab.env shows Stage 9 Is Already Done.\e[0m"
+ usage
+ exit 0 # Allows rerunning https://download.iiab.io/install.txt
fi
fi
-if [ "$STAGE" -lt 2 ] && [ "$1" == "--debug" ]; then
+if [ "$STAGE" -lt 2 ] && $($DEBUG); then
echo -e "\n'--debug' *ignored* as STAGE (counter) < 2."
fi
# /etc/iiab/iiab_state.yml is mandatory and must be created here. Background:
# Allow iiab-install to read IIAB_STATE_FILE to not repeat installs of previous
# roles that already completed within the stage.
-if [ ! -f $IIAB_STATE_FILE ]; then
- #touch $IIAB_STATE_FILE
+if [ ! -f $IIAB_STATE_FILE ]; then # touch $IIAB_STATE_FILE
+ echo -e "\nCreating... $IIAB_STATE_FILE"
cat > $IIAB_STATE_FILE << EOF
# DO *NOT* MANUALLY EDIT THIS, THANKS!
# IIAB does NOT currently support uninstalling apps/services.
EOF
fi
+
echo -e "\nTRY TO RERUN './iiab-install' IF IT FAILS DUE TO CONNECTIVITY ISSUES ETC!\n"
echo -e "\e[1mRunning local Ansible playbooks...\n...Stage 0 will now run\n...followed by Stages $(($STAGE + 1))-9\n...and then the Network Role.\e[0m\n"
@@ -168,6 +196,8 @@ export ANSIBLE_LOG_PATH="$CWD""/iiab-install.log"
ansible -m setup -i $INVENTORY localhost --connection=local | grep python
ansible -m setup -i $INVENTORY localhost --connection=local >> /dev/null # So vars are recorded in /opt/iiab/iiab/iiab-install.log
-ansible-playbook -i $INVENTORY $PLAYBOOK ${ARGS} --connection=local
+ARGS="$ARGS}"
+echo -e "\nNOW RUN: ansible-playbook -i $INVENTORY $PLAYBOOK $ARGS --connection=local\n"
+ansible-playbook -i $INVENTORY $PLAYBOOK $ARGS --connection=local
echo -e "./iiab-install $* COMPLETED IN $CWD\n\n"
diff --git a/iiab-network b/iiab-network
index 6ff4b5cd3..c888c27bf 100755
--- a/iiab-network
+++ b/iiab-network
@@ -4,11 +4,14 @@
CWD=`pwd`
export ANSIBLE_LOG_PATH="$CWD/iiab-network.log"
-if [ ! -f iiab-network.yml ]; then
- echo "iiab-network.yml not found in current directory."
- echo "Please rerun this command from the top level of the git repo."
- echo "Exiting."
+exit_error() {
+ echo -e "\nEXITING: "$@ | tee -a /opt/iiab/iiab/iiab-network.log
exit 1
+}
+
+if [ ! -f iiab-network.yml ]; then
+ exit_error "iiab-network.yml not found in current directory." \
+ "Please rerun this command from the top level of the git repo."
fi
OS="unknown" # will be overridden below, if /etc/iiab/iiab.env is legit
@@ -19,32 +22,27 @@ if [ -f /etc/iiab/iiab.env ]; then
if grep -q STAGE= /etc/iiab/iiab.env ; then
echo -e "\nExtracted STAGE=$STAGE (counter) from /etc/iiab/iiab.env"
if ! [ "$STAGE" -eq "$STAGE" ] 2> /dev/null; then
- echo -e "\nEXITING: STAGE (counter) value == ""$STAGE"" is non-integer"
- exit 1
+ exit_error "STAGE (counter) value == ""$STAGE"" is non-integer"
elif [ "$STAGE" -lt 0 ] || [ "$STAGE" -gt 9 ]; then
- echo -e "\nEXITING: STAGE (counter) value == ""$STAGE"" is out-of-range"
- exit 1
+ exit_error "STAGE (counter) value == ""$STAGE"" is out-of-range"
elif [ "$STAGE" -lt 3 ]; then
- echo -e "\nEXITING: STAGE (counter) value == ""$STAGE"
- echo -e "\nIIAB Stage 3 not complete."
- echo -e "\nPlease run: ./iiab-install"
- exit 1
+ exit_error "STAGE (counter) value == ""$STAGE" \
+ "\nIIAB Stage 3 not complete." \
+ "\nPlease run: ./iiab-install"
fi
else
- echo -e "\nEXITING: STAGE (counter) not found"
- echo -e "\nIIAB not installed."
- echo -e "\nPlease run: ./iiab-install"
- exit 1
+ exit_error "STAGE (counter) not found" \
+ "\nIIAB not installed." \
+ "\nPlease run: ./iiab-install"
fi
else
- echo -e "\nEXITING: /etc/iiab/iiab.env not found"
- exit 1
+ exit_error "/etc/iiab/iiab.env not found"
fi
echo "Ansible will now run iiab-network.yml -- log file is iiab-network.log"
Start=`date`
ansible -m setup -i ansible_hosts localhost --connection=local | grep python
-ansible-playbook -i ansible_hosts iiab-network.yml --connection=local
+ansible-playbook -i ansible_hosts iiab-network.yml --extra-vars "{\"skip_role_on_error\":false}" --connection=local
End=`date`
@@ -94,3 +92,4 @@ echo "iiab-network run start: $Start"
echo "iiab-network run end: $End"
echo
echo "Please REBOOT to fully verify your network -- graphical desktops MUST reboot!"
+exit 0
diff --git a/iiab-network.yml b/iiab-network.yml
index a72678367..2725a78c5 100644
--- a/iiab-network.yml
+++ b/iiab-network.yml
@@ -3,10 +3,10 @@
become: yes
vars_files:
- - vars/default_vars.yml
- - vars/{{ ansible_local.local_facts.os_ver }}.yml
- - /etc/iiab/local_vars.yml
- - /etc/iiab/iiab_state.yml
+ - vars/default_vars.yml
+ - vars/{{ ansible_local.local_facts.os_ver }}.yml
+ - /etc/iiab/local_vars.yml
+ - /etc/iiab/iiab_state.yml
roles:
- { role: 0-init }
diff --git a/iiab-setup b/iiab-setup
new file mode 100755
index 000000000..359967bc2
--- /dev/null
+++ b/iiab-setup
@@ -0,0 +1,20 @@
+#!/bin/bash -e
+# Running from a git repo
+# Assumes iiab repos are downloaded
+
+apt -y update
+apt -y full-upgrade
+
+apt -y install git curl nano gawk wget pastebinit
+
+cd /opt/iiab/iiab
+scripts/ansible
+
+# 2022-09-27: iiab-install now handles this
+#mkdir -p /etc/iiab/install-flags
+
+if [ ! -f /etc/iiab/local_vars.yml ]; then
+ cp /opt/iiab/iiab/vars/local_vars_none.yml /etc/iiab/local_vars.yml
+fi
+
+reboot
diff --git a/iiab-stages.yml b/iiab-stages.yml
index 4b0940db6..32a6ca751 100644
--- a/iiab-stages.yml
+++ b/iiab-stages.yml
@@ -3,11 +3,11 @@
become: yes
vars_files:
- - roles/0-init/defaults/main.yml
- - vars/default_vars.yml
- - vars/{{ ansible_local.local_facts.os_ver }}.yml
- - /etc/iiab/local_vars.yml
- - /etc/iiab/iiab_state.yml
+ - roles/0-init/defaults/main.yml
+ - vars/default_vars.yml
+ - vars/{{ ansible_local.local_facts.os_ver }}.yml
+ - /etc/iiab/local_vars.yml
+ - /etc/iiab/iiab_state.yml
tasks:
diff --git a/install-support.yml b/install-support.yml.unused
similarity index 51%
rename from install-support.yml
rename to install-support.yml.unused
index f8e6802f6..f2835214d 100644
--- a/install-support.yml
+++ b/install-support.yml.unused
@@ -2,9 +2,9 @@
become: yes
vars_files:
- - vars/default_vars.yml
- - vars/{{ ansible_local.local_facts.os_ver }}.yml
- - /etc/iiab/local_vars.yml
+ - vars/default_vars.yml
+ - vars/{{ ansible_local.local_facts.os_ver }}.yml
+ - /etc/iiab/local_vars.yml
roles:
- { role: 0-init }
diff --git a/roles/0-DEPRECATED-ROLES/httpd/defaults/main.yml b/roles/0-DEPRECATED-ROLES/httpd/defaults/main.yml
index f728ffca8..a28c2da61 100644
--- a/roles/0-DEPRECATED-ROLES/httpd/defaults/main.yml
+++ b/roles/0-DEPRECATED-ROLES/httpd/defaults/main.yml
@@ -8,7 +8,7 @@
# apache_interface: 127.0.0.1
# Make this False to disable http://box/common/services/power_off.php button:
-# apache_allow_sudo: True
+# allow_www_data_poweroff: False
# All above are set in: github.com/iiab/iiab/blob/master/vars/default_vars.yml
# If nec, change them by editing /etc/iiab/local_vars.yml prior to installing!
diff --git a/roles/openvpn/defaults/main.yml b/roles/0-DEPRECATED-ROLES/openvpn/defaults/main.yml
similarity index 62%
rename from roles/openvpn/defaults/main.yml
rename to roles/0-DEPRECATED-ROLES/openvpn/defaults/main.yml
index adc23ec2b..136e01f5d 100644
--- a/roles/openvpn/defaults/main.yml
+++ b/roles/0-DEPRECATED-ROLES/openvpn/defaults/main.yml
@@ -1,13 +1,17 @@
+# SECURITY WARNING: https://wiki.iiab.io/go/Security
+
# openvpn_install: True
# openvpn_enabled: False
-# For /etc/iiab/openvpn_handle
+# Empty string on purpose since ~2016, for /etc/iiab/uuid
+# SEE https://github.com/iiab/iiab/blob/master/roles/openvpn/tasks/main.yml#L5-L20
# openvpn_handle: ""
# cron seems necessary on CentOS:
# openvpn_cron_enabled: False
# openvpn_server: xscenet.net
+# openvpn_server_real_ip: 3.89.148.185
# openvpn_server_virtual_ip: 10.8.0.1
# openvpn_server_port: 1194
diff --git a/roles/openvpn/tasks/enable-or-disable.yml b/roles/0-DEPRECATED-ROLES/openvpn/tasks/enable-or-disable.yml
similarity index 100%
rename from roles/openvpn/tasks/enable-or-disable.yml
rename to roles/0-DEPRECATED-ROLES/openvpn/tasks/enable-or-disable.yml
diff --git a/roles/openvpn/tasks/install.yml b/roles/0-DEPRECATED-ROLES/openvpn/tasks/install.yml
similarity index 92%
rename from roles/openvpn/tasks/install.yml
rename to roles/0-DEPRECATED-ROLES/openvpn/tasks/install.yml
index cbe2e36af..53f5dc7f2 100644
--- a/roles/openvpn/tasks/install.yml
+++ b/roles/0-DEPRECATED-ROLES/openvpn/tasks/install.yml
@@ -1,3 +1,8 @@
+- name: Record (initial) disk space used
+ shell: df -B1 --output=used / | tail -1
+ register: df1
+
+
- name: "Install packages: ncat, nmap, openvpn, sudo"
package:
name:
@@ -100,6 +105,17 @@
# RECORD OpenVPN AS INSTALLED
+- name: Record (final) disk space used
+ shell: df -B1 --output=used / | tail -1
+ register: df2
+
+- name: Add 'openvpn_disk_usage = {{ df2.stdout|int - df1.stdout|int }}' to {{ iiab_ini_file }}
+ ini_file:
+ path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
+ section: openvpn
+ option: openvpn_disk_usage
+ value: "{{ df2.stdout|int - df1.stdout|int }}"
+
- name: "Set 'openvpn_installed: True'"
set_fact:
openvpn_installed: True
diff --git a/roles/openvpn/tasks/main.yml b/roles/0-DEPRECATED-ROLES/openvpn/tasks/main.yml
similarity index 76%
rename from roles/openvpn/tasks/main.yml
rename to roles/0-DEPRECATED-ROLES/openvpn/tasks/main.yml
index 8c8577767..6e71e6374 100644
--- a/roles/openvpn/tasks/main.yml
+++ b/roles/0-DEPRECATED-ROLES/openvpn/tasks/main.yml
@@ -56,21 +56,21 @@
option: "{{ item.option }}"
value: "{{ item.value | string }}"
with_items:
- - option: name
- value: OpenVPN
- - option: description
- value: '"OpenVPN enables live/remote support by connecting machines anywhere on the Internet, via a middleman server, using Virtual Private Network (VPN) techniques to create secure connections."'
- - option: openvpn_install
- value: "{{ openvpn_install }}"
- - option: openvpn_enabled
- value: "{{ openvpn_enabled }}"
- - option: openvpn_handle
- value: "{{ openvpn_handle }}"
- - option: openvpn_cron_enabled
- value: "{{ openvpn_cron_enabled }}"
- - option: openvpn_server
- value: "{{ openvpn_server }}"
- - option: openvpn_server_virtual_ip
- value: "{{ openvpn_server_virtual_ip }}"
- - option: openvpn_server_port
- value: "{{ openvpn_server_port }}"
+ - option: name
+ value: OpenVPN
+ - option: description
+ value: '"OpenVPN enables live/remote support by connecting machines anywhere on the Internet, via a middleman server, using Virtual Private Network (VPN) techniques to create secure connections."'
+ - option: openvpn_install
+ value: "{{ openvpn_install }}"
+ - option: openvpn_enabled
+ value: "{{ openvpn_enabled }}"
+ - option: openvpn_handle
+ value: "{{ openvpn_handle }}"
+ - option: openvpn_cron_enabled
+ value: "{{ openvpn_cron_enabled }}"
+ - option: openvpn_server
+ value: "{{ openvpn_server }}"
+ - option: openvpn_server_virtual_ip
+ value: "{{ openvpn_server_virtual_ip }}"
+ - option: openvpn_server_port
+ value: "{{ openvpn_server_port }}"
diff --git a/roles/openvpn/templates/15-openvpn.unused b/roles/0-DEPRECATED-ROLES/openvpn/templates/15-openvpn.unused
similarity index 100%
rename from roles/openvpn/templates/15-openvpn.unused
rename to roles/0-DEPRECATED-ROLES/openvpn/templates/15-openvpn.unused
diff --git a/roles/openvpn/templates/announce b/roles/0-DEPRECATED-ROLES/openvpn/templates/announce
similarity index 100%
rename from roles/openvpn/templates/announce
rename to roles/0-DEPRECATED-ROLES/openvpn/templates/announce
diff --git a/roles/openvpn/templates/announcer b/roles/0-DEPRECATED-ROLES/openvpn/templates/announcer
similarity index 100%
rename from roles/openvpn/templates/announcer
rename to roles/0-DEPRECATED-ROLES/openvpn/templates/announcer
diff --git a/roles/openvpn/templates/ca.crt b/roles/0-DEPRECATED-ROLES/openvpn/templates/ca.crt
similarity index 100%
rename from roles/openvpn/templates/ca.crt
rename to roles/0-DEPRECATED-ROLES/openvpn/templates/ca.crt
diff --git a/roles/openvpn/templates/client1.crt b/roles/0-DEPRECATED-ROLES/openvpn/templates/client1.crt
similarity index 100%
rename from roles/openvpn/templates/client1.crt
rename to roles/0-DEPRECATED-ROLES/openvpn/templates/client1.crt
diff --git a/roles/openvpn/templates/client1.key b/roles/0-DEPRECATED-ROLES/openvpn/templates/client1.key
similarity index 100%
rename from roles/openvpn/templates/client1.key
rename to roles/0-DEPRECATED-ROLES/openvpn/templates/client1.key
diff --git a/roles/0-DEPRECATED-ROLES/openvpn/templates/iiab-remote-off b/roles/0-DEPRECATED-ROLES/openvpn/templates/iiab-remote-off
new file mode 100755
index 000000000..6d5003b78
--- /dev/null
+++ b/roles/0-DEPRECATED-ROLES/openvpn/templates/iiab-remote-off
@@ -0,0 +1,39 @@
+#!/bin/bash
+
+# /usr/bin/iiab-remote-off should fully turn off multiple remote support
+# services like OpenVPN and others, to reduce risk of remote attacks.
+
+# echo -e '\nWARNING: To disable OpenVPN long-term, it'"'"'s recommended you:\n'
+#
+# echo -e '1) Set this variable in /etc/iiab/local_vars.yml'
+# echo -e ' openvpn_enabled: False\n'
+#
+# echo -e '2) Run:'
+# echo -e ' cd /opt/iiab/iiab'
+# echo -e ' sudo ./runrole openvpn\n'
+
+# Do nothing if OpenVPN not installed
+which openvpn
+if [ $? -ne 0 ]; then
+ echo 'Cannot find the OpenVPN program (openvpn).'
+ exit 1
+fi
+
+if grep -q '^openvpn_enabled:' /etc/iiab/local_vars.yml; then
+ sed -i "s/^openvpn_enabled:.*/openvpn_enabled: False/" /etc/iiab/local_vars.yml
+else
+ echo "openvpn_enabled: False" >> /etc/iiab/local_vars.yml
+fi
+
+systemctl disable openvpn
+systemctl stop openvpn
+
+sleep 5
+ps -e | grep openvpn # 2018-09-05: "ps -e | grep vpn" no longer works (nor would "pgrep vpn") when invoked from iiab-vpn-off (as filename itself causes [multiple] "vpn" instances to appear in process list!)
+if [ $? -eq 0 ]; then
+ echo "OpenVPN failed to stop."
+else
+ echo "OpenVPN's systemd service was successfully stopped and disabled."
+ echo
+ echo "Also, 'openvpn_enabled: False' was set in /etc/iiab/local_vars.yml"
+fi
diff --git a/roles/openvpn/templates/iiab-remote-on.j2 b/roles/0-DEPRECATED-ROLES/openvpn/templates/iiab-remote-on.j2
similarity index 100%
rename from roles/openvpn/templates/iiab-remote-on.j2
rename to roles/0-DEPRECATED-ROLES/openvpn/templates/iiab-remote-on.j2
diff --git a/roles/openvpn/templates/iiab-support b/roles/0-DEPRECATED-ROLES/openvpn/templates/iiab-support
similarity index 98%
rename from roles/openvpn/templates/iiab-support
rename to roles/0-DEPRECATED-ROLES/openvpn/templates/iiab-support
index 352ad1677..1d88a66b4 100755
--- a/roles/openvpn/templates/iiab-support
+++ b/roles/0-DEPRECATED-ROLES/openvpn/templates/iiab-support
@@ -10,11 +10,11 @@ DEBUG=false # Using /usr/bin/true or /usr/bin/false
PLAYBOOK="install-support.yml"
INVENTORY="ansible_hosts"
-# 2021-08-18: bash scripts using default_vars.yml &/or local_vars.yml
+# 2023-02-25: bash scripts using default_vars.yml &/or local_vars.yml
# https://github.com/iiab/iiab-factory/blob/master/iiab
-# https://github.com/iiab/iiab/blob/master/roles/firmware/templates/iiab-check-firmware#L13
+# https://github.com/iiab/iiab/blob/master/roles/firmware/templates/iiab-check-firmware#L10-14
# https://github.com/iiab/iiab/blob/master/roles/network/templates/gateway/iiab-gen-iptables#L48-L52
-# https://github.com/iiab/maps/blob/master/osm-source/pages/viewer/scripts/iiab-install-map-region#L25-L34
+# https://github.com/iiab/maps/blob/master/osm-source/pages/viewer/scripts/iiab-install-map-region#L23-L39
# https://github.com/iiab/iiab/blob/master/roles/openvpn/templates/iiab-support READS AND WRITES, INCL NON-BOOLEAN
# PARSE local_vars.yml JUST AS Ansible & /etc/openvpn/scripts/announcer DO:
diff --git a/roles/openvpn/templates/iiab-support.older b/roles/0-DEPRECATED-ROLES/openvpn/templates/iiab-support.older
similarity index 100%
rename from roles/openvpn/templates/iiab-support.older
rename to roles/0-DEPRECATED-ROLES/openvpn/templates/iiab-support.older
diff --git a/roles/openvpn/templates/openvpn_handle.j2.unused b/roles/0-DEPRECATED-ROLES/openvpn/templates/openvpn_handle.j2.unused
similarity index 100%
rename from roles/openvpn/templates/openvpn_handle.j2.unused
rename to roles/0-DEPRECATED-ROLES/openvpn/templates/openvpn_handle.j2.unused
diff --git a/roles/openvpn/templates/silence b/roles/0-DEPRECATED-ROLES/openvpn/templates/silence
similarity index 100%
rename from roles/openvpn/templates/silence
rename to roles/0-DEPRECATED-ROLES/openvpn/templates/silence
diff --git a/roles/openvpn/templates/xscenet.conf.j2 b/roles/0-DEPRECATED-ROLES/openvpn/templates/xscenet.conf.j2
similarity index 100%
rename from roles/openvpn/templates/xscenet.conf.j2
rename to roles/0-DEPRECATED-ROLES/openvpn/templates/xscenet.conf.j2
diff --git a/roles/0-init/defaults/main.yml b/roles/0-init/defaults/main.yml
index 95cca916b..a07cde5cf 100644
--- a/roles/0-init/defaults/main.yml
+++ b/roles/0-init/defaults/main.yml
@@ -23,14 +23,6 @@
# ...after it is set in 0-init/tasks/main.yml
first_run: False
rpi_model: none # 2021-07-30: Broadly used!
-#xo_model: none # 2021-07-30: No longer used
-# 2021-07-30: Recorded to /etc/iiab/iiab.ini but not used programmatically:
-gw_active: False
-# 2021-07-30: Broadly used, but not in an organized way -- most all IIAB
-# outfitting/provisioning happens online -- in situations where connectivity
-# failures should be reported to the operator, rather than papered over:
-internet_available: False
-discovered_wan_iface: none # 2021-07-30: Very broadly used!
# 2021-07-30: Barely used -- for {named, dhcpd, squid} in
# roles/network/tasks/main.yml -- after being set in 0-init/tasks/network.yml
diff --git a/roles/0-init/tasks/create_iiab_ini.yml b/roles/0-init/tasks/create_iiab_ini.yml
index d29f791c8..75b2b338f 100644
--- a/roles/0-init/tasks/create_iiab_ini.yml
+++ b/roles/0-init/tasks/create_iiab_ini.yml
@@ -1,13 +1,26 @@
-# workaround for fact that auto create does not work on iiab_ini_file (/etc/iiab/iiab.ini)
+- name: Record disk_used_a_priori (permanently, into {{ iiab_ini_file }} below) to later estimate iiab_software_disk_usage
+ shell: df -B1 --output=used / | tail -1
+ register: df1
+
+# workaround for fact that auto create does not work on iiab_ini_file
- name: Create {{ iiab_ini_file }}
file:
- path: "{{ iiab_ini_file }}"
+ path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
state: touch
-- name: Add 'location' variable values to {{ iiab_ini_file }}
+- name: Run command 'dpkg --print-architecture' to identify OS architecture (CPU arch as revealed by ansible_architecture ~= ansible_machine is NOT enough!)
+ command: dpkg --print-architecture
+ register: dpkg_arch
+
+- name: Run command 'dpkg --print-foreign-architectures' (secondary OS arch, if available)
+ command: dpkg --print-foreign-architectures
+ register: dpkg_foreign_arch
+
+
+- name: Add 'summary' variable values to {{ iiab_ini_file }}
ini_file:
path: "{{ iiab_ini_file }}"
- section: location
+ section: summary
option: "{{ item.option }}"
value: "{{ item.value | string }}"
with_items:
@@ -15,29 +28,39 @@
value: "{{ iiab_base }}"
- option: iiab_dir
value: "{{ iiab_dir }}"
+ - option: disk_used_a_priori
+ value: "{{ df1.stdout }}"
-- name: Add 'version' variable values to {{ iiab_ini_file }}
+- name: Add 'initial' variable values to {{ iiab_ini_file }}
ini_file:
path: "{{ iiab_ini_file }}"
- section: version
+ section: initial
option: "{{ item.option }}"
value: "{{ item.value | string }}"
with_items:
+ - option: os_ver
+ value: "{{ os_ver }}"
- option: distribution
- value: "{{ ansible_distribution }}"
+ value: "{{ ansible_facts['distribution'] }}"
- option: arch
value: "{{ ansible_architecture }}"
- - option: iiab_base_ver
- value: "{{ iiab_base_ver }}"
- - option: iiab_branch
- value: "{{ ansible_local.local_facts.iiab_branch }}"
- - option: iiab_commit
- value: "{{ ansible_local.local_facts.iiab_commit }}"
- - option: install_date
- value: "{{ ansible_date_time.iso8601 }}"
- #- option: xo_model
- # value: "{{ xo_model }}"
+ - option: dpkg_arch
+ value: "{{ dpkg_arch.stdout }}"
+ - option: dpkg_foreign_arch
+ value: "{{ dpkg_foreign_arch.stdout }}"
- option: rpi_model
value: "{{ rpi_model }}"
- option: devicetree_model
value: "{{ devicetree_model }}"
+ - option: iiab_base_ver
+ value: "{{ iiab_base_ver }}"
+ - option: iiab_remote_url
+ value: "{{ ansible_local.local_facts.iiab_remote_url }}"
+ - option: iiab_branch
+ value: "{{ ansible_local.local_facts.iiab_branch }}"
+ - option: iiab_commit
+ value: "{{ ansible_local.local_facts.iiab_commit }}"
+ - option: iiab_recent_tag
+ value: "{{ ansible_local.local_facts.iiab_recent_tag }}"
+ - option: install_date
+ value: "{{ ansible_date_time.iso8601 }}"
diff --git a/roles/0-init/tasks/hostname.yml b/roles/0-init/tasks/hostname.yml
index 3d323b3cc..9e3e22214 100644
--- a/roles/0-init/tasks/hostname.yml
+++ b/roles/0-init/tasks/hostname.yml
@@ -1,3 +1,8 @@
+- name: "Set 'iiab_fqdn: {{ iiab_hostname }}.{{ iiab_domain }}'"
+ set_fact:
+ iiab_fqdn: "{{ iiab_hostname }}.{{ iiab_domain }}"
+ FQDN_changed: False
+
- name: Does /etc/cloud/cloud.cfg exist e.g. is this Ubuntu Server 18+ ?
stat:
path: /etc/cloud/cloud.cfg
@@ -17,24 +22,23 @@
# 2021-08-31: Periods in /etc/hostname fail with some WiFi routers (#2904)
# command: hostnamectl set-hostname "{{ iiab_hostname }}.{{ iiab_domain }}"
-#- name: Install /etc/sysconfig/network from template (redhat)
-# template:
-# src: roles/network/templates/network/sysconfig.network.j2
-# dest: /etc/sysconfig/network
-# owner: root
-# group: root
-# mode: 0644
-# when: is_redhat
-
-# roles/network/tasks/hosts.yml [no longer in use] ALSO did this:
+# 2022-07-11: Should the first entry match just hostname and domain move to
+# after localhost? See PR's #1 & #8 -- with discussion on #3302 -- and also:
+# 1. /etc/hosts -- #1815 solved by PR #1847
+# 2. /etc/hostname -- #2904 solved by PR #2973
- name: 'Put FQDN & hostnames in /etc/hosts: "127.0.0.1 {{ iiab_hostname }}.{{ iiab_domain }} localhost.localdomain localhost {{ iiab_hostname }} box box.lan"'
lineinfile:
path: /etc/hosts
regexp: '^127\.0\.0\.1'
line: '127.0.0.1 {{ iiab_hostname }}.{{ iiab_domain }} localhost.localdomain localhost {{ iiab_hostname }} box box.lan'
- #owner: root
- #group: root
- #mode: 0644
+
+# 2021-07-30: FQDN_changed isn't used as in the past -- its remaining use is
+# for {named, dhcpd, squid} in roles/network/tasks/main.yml -- possibly it
+# should be reconsidered? See PR #2876: roles/network might become optional?
+- name: "Also set 'FQDN_changed: True' -- if iiab_fqdn != ansible_fqdn ({{ ansible_fqdn }})"
+ set_fact:
+ FQDN_changed: True
+ when: iiab_fqdn != ansible_fqdn
#- name: Re-configuring httpd - not initial install
# include_tasks: roles/httpd/tasks/main.yml
diff --git a/roles/0-init/tasks/main.yml b/roles/0-init/tasks/main.yml
index 8d93f2441..8eb1668eb 100644
--- a/roles/0-init/tasks/main.yml
+++ b/roles/0-init/tasks/main.yml
@@ -7,17 +7,14 @@
# Higher-level purpose explained at the bottom of:
# https://github.com/iiab/iiab/blob/master/vars/default_vars.yml
-- name: "Ansible just ran /etc/ansible/facts.d/local_facts.fact to set 11 vars -- here we extract 3 of those -- rpi_model: {{ ansible_local.local_facts.rpi_model }}, devicetree_model: {{ ansible_local.local_facts.devicetree_model }}, iiab_stage: {{ ansible_local.local_facts.stage }}"
+- name: "Ansible just ran /etc/ansible/facts.d/local_facts.fact to set 15 vars -- here we extract 6 of those -- iiab_stage: {{ ansible_local.local_facts.stage }}, rpi_model: {{ ansible_local.local_facts.rpi_model }}, devicetree_model: {{ ansible_local.local_facts.devicetree_model }}, os_ver: {{ ansible_local.local_facts.os_ver }}, python_version: {{ ansible_local.local_facts.python_version }}, php_version: {{ ansible_local.local_facts.php_version }}"
set_fact:
+ iiab_stage: "{{ ansible_local.local_facts.stage }}"
rpi_model: "{{ ansible_local.local_facts.rpi_model }}"
devicetree_model: "{{ ansible_local.local_facts.devicetree_model }}"
- #xo_model: "{{ ansible_local.local_facts.xo_model }}"
- iiab_stage: "{{ ansible_local.local_facts.stage }}"
-
-# 2020-10-29: Appears no longer nec (see 3 above ansible_local.local_facts.*)
-#- name: Re-read local_facts.facts from /etc/ansible/facts.d
-# setup:
-# filter: ansible_local
+ os_ver: "{{ ansible_local.local_facts.os_ver }}"
+ python_version: "{{ ansible_local.local_facts.python_version }}"
+ php_version: "{{ ansible_local.local_facts.php_version }}"
# Initialize /etc/iiab/iiab.ini writing the 'location' and 'version' sections
# once and only once, to preserve the install date and git hash.
@@ -26,10 +23,9 @@
when: not iiab_ini_test.stat.exists
# 2021-07-30: The 'first_run' flag isn't much used anymore. In theory it's
-# still used in these 2 places:
-# (1) roles/1-prep/tasks/main.yml for raspberry_pi.yml
-# (2) roles/network/tasks/named.yml for "Stop named before copying files"
-# In practice however, it's no longer important, and might be reconsidered?
+# still used in 1-prep/tasks/hardware.yml for raspberry_pi.yml
+#
+# This needs to be reworked for 0-init speed, and overall understandability.
- name: Set first_run flag
set_fact:
first_run: True
@@ -38,11 +34,23 @@
# Copies the latest/known version of iiab-diagnostics into /usr/bin (so it can
# be run even if local source tree /opt/iiab/iiab is deleted to conserve disk).
-- name: Copy /opt/iiab/iiab/scripts/iiab-diagnostics to /usr/bin/iiab-diagnostics
+- name: Copy iiab-update & iiab-summary & iiab-diagnostics & iiab-root-login from /opt/iiab/iiab/scripts/ to /usr/bin/
copy:
- src: "{{ iiab_dir }}/scripts/iiab-diagnostics"
+ src: "{{ iiab_dir }}/scripts/{{ item }}"
dest: /usr/bin/
mode: '0755'
+ with_items:
+ - iiab-update
+ - iiab-summary
+ - iiab-diagnostics
+ - iiab-root-login
+
+- name: Symlink /usr/bin/iiab-upgrade -> /usr/bin/iiab-update
+ file:
+ src: /usr/bin/iiab-update
+ path: /usr/bin/iiab-upgrade
+ state: link
+ #force: yes
- name: Create globally-writable directory /etc/iiab/diag (0777) so non-root users can run 'iiab-diagnostics'
file:
@@ -53,13 +61,14 @@
- name: Pre-check that IIAB's "XYZ_install" + "XYZ_enabled" vars (1) are defined, (2) are boolean-not-string variables, and (3) contain plausible values. Also checks that "XYZ_install" is True when "XYZ_installed" is defined.
include_tasks: validate_vars.yml
+ when: not (rpi_model | regex_search('\\bW\\b')) # Ansible require double backslashes, e.g. with \b "word boundary" anchors: https://www.regular-expressions.info/wordboundaries.html https://stackoverflow.com/questions/56869119/ansible-regular-expression-to-match-a-string-and-extract-the-line/56869801#56869801
-- name: "Time Zone / TZ: Set symlink /etc/localtime to UTC if it doesn't exist?"
- include_tasks: tz.yml
-
-- name: Test Gateway + Test Internet + Set new hostname/domain (hostname.yml) if nec + Set 'gui_port' to 80 or 443 for Admin Console
- include_tasks: network.yml
+# 2022-12-30: Functionality moved to www_options/tasks/php-settings.yml
+# - name: "Time Zone / TZ: Set symlink /etc/localtime to UTC if it doesn't exist?"
+# include_tasks: tz.yml
+- name: Set hostname / domain (etc) in various places
+ include_tasks: hostname.yml
- name: Add 'runtime' variable values to {{ iiab_ini_file }}
ini_file:
@@ -74,10 +83,14 @@
value: "{{ iiab_base_ver }}"
- option: iiab_revision
value: "{{ iiab_revision }}"
+ - option: iiab_remote_url
+ value: "{{ ansible_local.local_facts.iiab_remote_url }}"
- option: runtime_branch
value: "{{ ansible_local.local_facts.iiab_branch }}"
- option: runtime_commit
value: "{{ ansible_local.local_facts.iiab_commit }}"
+ - option: iiab_recent_tag
+ value: "{{ ansible_local.local_facts.iiab_recent_tag }}"
- option: runtime_date
value: "{{ ansible_date_time.iso8601 }}"
- option: ansible_version
@@ -88,24 +101,22 @@
value: "{{ ansible_memtotal_mb }}"
- option: swap_mb
value: "{{ ansible_swaptotal_mb }}"
- - option: gw_active
- value: "{{ gw_active }}"
- - option: internet_available
- value: "{{ internet_available }}"
- option: rpi_model
value: "{{ rpi_model }}"
- option: devicetree_model
value: "{{ devicetree_model }}"
+ - option: os_ver
+ value: "{{ os_ver }}"
+ - option: python_version
+ value: "{{ python_version }}"
+ - option: php_version
+ value: "{{ php_version }}"
- option: first_run
value: "{{ first_run }}"
- - option: local_tz # e.g. 'EDT' (summer) or 'EST' (winter) after Ansible interprets symlink /etc/localtime -- or 'UTC' if /etc/localtime doesn't exist
- value: "{{ local_tz }}"
- - option: etc_localtime.stdout # e.g. 'America/New_York' direct from symlink /etc/localtime -- or '' if /etc/localtime doesn't exist
- value: "{{ etc_localtime.stdout }}"
- #- option: no_NM_reload
- # value: "{{ no_NM_reload }}"
- #- option: is_F18
- # value: "{{ is_F18 }}"
+ # - option: local_tz # e.g. 'EDT' (summer) or 'EST' (winter) after Ansible interprets symlink /etc/localtime -- or 'UTC' if /etc/localtime doesn't exist
+ # value: "{{ local_tz }}"
+ # - option: etc_localtime.stdout # e.g. 'America/New_York' direct from symlink /etc/localtime -- or '' if /etc/localtime doesn't exist
+ # value: "{{ etc_localtime.stdout }}"
- option: FQDN_changed
value: "{{ FQDN_changed }}"
diff --git a/roles/0-init/tasks/network.yml b/roles/0-init/tasks/network.yml
deleted file mode 100644
index c0d52ba68..000000000
--- a/roles/0-init/tasks/network.yml
+++ /dev/null
@@ -1,74 +0,0 @@
-- name: Do we have a gateway? If 'ip route' specifies a default route, Ansible parses details here...
- debug:
- var: ansible_default_ipv4
-
-- name: "If above ansible_default_ipv4.gateway is defined, set WAN candidate 'discovered_wan_iface: {{ ansible_default_ipv4.alias }}' -- using ansible_default_ipv4.alias"
- set_fact:
- discovered_wan_iface: "{{ ansible_default_ipv4.alias }}"
- when: ansible_default_ipv4.gateway is defined
-
-- name: "Verify gateway active: ping -c4 {{ ansible_default_ipv4.gateway }} -- using ansible_default_ipv4.gateway"
- shell: ping -c4 "{{ ansible_default_ipv4.gateway }}" | grep icmp_seq=4 | wc -l
- register: gw_active_test
- when: discovered_wan_iface != "none"
-
-- name: "If gateway responded, set 'gw_active: True' and 'iiab_wan_iface: {{ discovered_wan_iface }}' -- using discovered_wan_iface"
- set_fact:
- iiab_wan_iface: "{{ discovered_wan_iface }}"
- gw_active: True
- when: discovered_wan_iface != "none" and gw_active_test.stdout == "1"
-
-
-- name: 'Test for Internet access, using: {{ iiab_download_url }}/heart-beat.txt'
- get_url:
- url: "{{ iiab_download_url }}/heart-beat.txt"
- dest: /tmp/heart-beat.txt
- #timeout: "{{ download_timeout }}"
- # @jvonau recommends: 100sec is too much (keep 10sec default)
- ignore_errors: True
- #async: 10
- #poll: 2
- register: internet_access_test
-
-- name: "Set 'internet_available: True' if above download succeeded AND not disregard_network"
- set_fact:
- internet_available: True # Initialized to 'False' in 0-init/defaults/main.yml
- when: not internet_access_test.failed and not disregard_network
-
-- name: Remove downloaded Internet test file /tmp/heart-beat.txt
- file:
- path: /tmp/heart-beat.txt
- state: absent
-
-
-- name: "Set 'iiab_fqdn: {{ iiab_hostname }}.{{ iiab_domain }}'"
- set_fact:
- iiab_fqdn: "{{ iiab_hostname }}.{{ iiab_domain }}"
- FQDN_changed: False
-
-- name: Set hostname / domain (etc) in various places -- if iiab_fqdn != ansible_fqdn ({{ ansible_fqdn }})
- include_tasks: hostname.yml
- when: iiab_fqdn != ansible_fqdn
-
-# 2021-07-30: FQDN_changed isn't used as in the past -- its remaining use is
-# for {named, dhcpd, squid} in roles/network/tasks/main.yml -- possibly it
-# should be reconsidered? See PR #2876: roles/network might become optional?
-- name: "Also set 'FQDN_changed: True' -- if iiab_fqdn != ansible_fqdn ({{ ansible_fqdn }})"
- set_fact:
- FQDN_changed: True
- when: iiab_fqdn != ansible_fqdn
-
-
-# 2021-08-17: (1) iiab-gen-iptables works better if gui_port is set directly in
-# default_vars.yml and/or local_vars.yml (2) Admin Console's iiab-admin.yml
-# and js-menu.yml set 'adm_cons_force_ssl: False'
-
-# - name: "Set 'gui_port: 80' for Admin Console if not adm_cons_force_ssl"
-# set_fact:
-# gui_port: 80
-# when: not adm_cons_force_ssl
-
-# - name: "Set 'gui_port: 443' for Admin Console if adm_cons_force_ssl"
-# set_fact:
-# gui_port: 443
-# when: adm_cons_force_ssl
diff --git a/roles/0-init/tasks/tz.yml b/roles/0-init/tasks/tz.yml.unused
similarity index 97%
rename from roles/0-init/tasks/tz.yml
rename to roles/0-init/tasks/tz.yml.unused
index 017947a05..dc07f8bc4 100644
--- a/roles/0-init/tasks/tz.yml
+++ b/roles/0-init/tasks/tz.yml.unused
@@ -1,3 +1,5 @@
+# 2022-12-30: Functionality moved to www_options/tasks/php-settings.yml
+
- name: "'local_tz: {{ local_tz }}' was set by ansible_date_time.tz in /opt/iiab/iiab/vars/default_vars.yml -- e.g. if Ansible finds symlink /etc/localtime -> ../usr/share/zoneinfo/America/New_York -- it will simplify that to 'EDT' (in the summer) or 'EST' (in the winter)"
command: echo
diff --git a/roles/0-init/tasks/validate_vars.yml b/roles/0-init/tasks/validate_vars.yml
index f29525daf..dee75addd 100644
--- a/roles/0-init/tasks/validate_vars.yml
+++ b/roles/0-init/tasks/validate_vars.yml
@@ -63,38 +63,29 @@
#
# 2020-11-04: Fix validation of 5 [now 4] core dependencies, for ./runrole etc
-- name: Set vars_checklist for 44 + 44 + 40 vars ("XYZ_install" + "XYZ_enabled" + "XYZ_installed") to be checked
+
+- name: Set vars_checklist for 45 + 45 + 40 vars ("XYZ_install" + "XYZ_enabled" + "XYZ_installed") to be checked
set_fact:
vars_checklist:
- hostapd
- - dhcpd
- - named
- dnsmasq
- bluetooth
- #- wondershaper # Unmaintained
- sshd
- - openvpn
+ #- openvpn # Deprecated
+ - tailscale
- remoteit
- admin_console
#- nginx # MANDATORY
#- apache # Unmaintained - former dependency
- #- mysql # MANDATORY
- squid
- #- dansguardian # Unmaintained
- cups
- samba
- usb_lib
- #- xo_services # Unmaintained
- #- activity_server # Unmaintained
- #- ejabberd_xs # Unmaintained
- #- idmgr # Unmaintained
- azuracast
- #- dokuwiki # Unmaintained
- #- ejabberd # Unmaintained
- #- elgg # Unmaintained
- gitea
- jupyterhub
- lokole
+ - mysql # Dependency - excluded from _installed check below
- mediawiki
- mosquitto
- nodejs # Dependency - excluded from _installed check below
@@ -111,6 +102,7 @@
- osm_vector_maps
- transmission
- awstats
+ - matomo
- monit
- munin
- phpmyadmin
@@ -122,6 +114,7 @@
- calibreweb
- calibre
- pbx
+ - network
- name: Assert that {{ vars_checklist | length }} "XYZ_install" vars are all... defined
assert:
@@ -163,5 +156,41 @@
that: "{{ item }}_install or {{ item }}_installed is undefined"
fail_msg: "DISALLOWED: '{{ item }}_install: False' (e.g. in /etc/iiab/local_vars.yml) WHEN '{{ item }}_installed' is defined (e.g. in /etc/iiab/iiab_state.yml) -- IIAB DOES NOT SUPPORT UNINSTALLS -- please verify those 2 files especially, and other places variables are defined?"
quiet: yes
- when: item != 'nodejs' and item != 'postgresql' and item != 'mongodb' and item != 'yarn' # Exclude auto-installed dependencies
+ when: item != 'mysql' and item != 'postgresql' and item != 'mongodb' and item != 'nodejs' and item != 'yarn' # Exclude auto-installed dependencies
loop: "{{ vars_checklist }}"
+
+
+- name: Set vars_deprecated_list for 4+ vars ("XYZ_install") to be checked
+ set_fact:
+ vars_deprecated_list:
+ - dhcpd # Deprecated
+ - named # Deprecated
+ - wondershaper # Deprecated
+ - dansguardian # Deprecated
+ #- xo_services # Unmaintained
+ #- activity_server # Unmaintained
+ #- ejabberd_xs # Unmaintained
+ #- idmgr # Unmaintained
+ #- dokuwiki # Unmaintained
+ #- ejabberd # Unmaintained
+ #- elgg # Unmaintained
+
+- name: 'DISALLOW "XYZ_install: True" if deprecated'
+ assert:
+ that: "{{ item }}_install is undefined or not {{ item }}_install"
+ fail_msg: "DISALLOWED: '{{ item }}_install: True' (e.g. in /etc/iiab/local_vars.yml)"
+ quiet: yes
+ loop: "{{ vars_deprecated_list }}"
+ # 2023-12-04: ansible-core 2.16.1 suddenly no longer allows 'assert' with
+ # 'with_items' below (whereas 'loop' construct above works!) BACKGROUND:
+ #
+ # 'due to mitigation of security issue CVE-2023-5764 in ansible-core 2.16.1,
+ # conditional expressions with embedded template blocks can fail with the
+ # message βConditional is marked as unsafe, and cannot be evaluated.β'
+ # https://docs.ansible.com/ansible-core/2.16/porting_guides/porting_guide_core_2.16.html#playbook
+ #
+ # with_items:
+ # - dhcpd # Deprecated
+ # - named # Deprecated
+ # - wondershaper # Deprecated
+ # - dansguardian # Deprecated
diff --git a/roles/1-prep/README.adoc b/roles/1-prep/README.adoc
index cdbd75142..997ec812b 100644
--- a/roles/1-prep/README.adoc
+++ b/roles/1-prep/README.adoc
@@ -6,7 +6,7 @@ https://github.com/iiab/iiab/wiki/IIAB-Contributors-Guide#ansible[stage]
hardware, low-level OS quirks, and basic security:
* SSHD
-* OpenVPN if/as needed later for remote support
+* Tailscale if/as needed later for remote support
* https://github.com/iiab/iiab/tree/master/roles/iiab-admin#iiab-admin-readme[iiab-admin]
username and group, to log into Admin Console
* dnsmasq (install now, configure later!)
@@ -14,17 +14,16 @@ username and group, to log into Admin Console
* Ubermix (distro) needs /etc/tmpfiles.d/iiab.conf to create essential
/var/log subdirs on each boot
* *_Hardware actions:_*
+ ** link:tasks/install-expand-rootfs.yml[*_install-expand-rootfs.yml_*]:
+ *** Install https://en.wikipedia.org/wiki/APT_(software)[apt] packages parted (reveals last partition) and cloud-guest-utils (for growpart)
+ *** Install link:templates/iiab-expand-rootfs[/usr/sbin/iiab-expand-rootfs] that acts on flag flag `/.expand-rootfs`
+ *** Enable iiab-expand-rootfs.service so this can happen during any future boot-up
** link:tasks/raspberry_pi.yml[*_raspberry_pi.yml_*]:
*** RTC (real-time clock): install udev rule, configure, enable
- *** *_Install packages related to:_*
- **** growpart
- **** swapfile
- **** fake-hwclock (as RTC is often missing or dead!)
- **** Wi-Fi
- *** Increase swap file size
- *** https://github.com/iiab/iiab/blob/master/roles/1-prep/templates/iiab-rpi-max-rootfs.sh[rootfs
- auto-resizing]
+ *** Install apt packages fake-hwclock (as above RTC is often missing or dead!) and dphys-swapfile (for swap file below)
+ *** Increase swap file size (to `pi_swap_file_size`)
** NUC 6 Wi-Fi firmware
+ ** Check for WiFi devices (if so, set `has_wifi_device`)
Recap: Similar to 0-init, 2-common, 3-base-server, 4 server-options and
5-xo-services β this 1st stage installs core server infra (that is not
diff --git a/roles/1-prep/tasks/hardware.yml b/roles/1-prep/tasks/hardware.yml
index 2650c7217..52ef34a97 100644
--- a/roles/1-prep/tasks/hardware.yml
+++ b/roles/1-prep/tasks/hardware.yml
@@ -1,17 +1,37 @@
-## DISCOVER PLATFORMS ######
-# Put conditional actions for hardware platforms here
+- include_tasks: install-expand-rootfs.yml
+
+
+# Conditional hardware actions below:
- include_tasks: raspberry_pi.yml
when: first_run and rpi_model != "none"
-- name: Check if the identifier for Intel's NUC6 built-in WiFi is present
- shell: "lsusb | grep 8087:0a2b | wc | awk '{print $1}'"
- register: usb_NUC6
- ignore_errors: True
-- name: Download {{ iiab_download_url }}/iwlwifi-8000C-13.ucode to /lib/firmware for built-in WiFi on NUC6 # iiab_download_url is http://download.iiab.io/packages
- get_url:
- url: "{{ iiab_download_url }}/iwlwifi-8000C-13.ucode"
- dest: /lib/firmware
- timeout: "{{ download_timeout }}"
- when: usb_NUC6.stdout|int > 0
+# 2024-02-09: Code below appears stale for Shanti's #3707 hardware
+#- name: Check if the identifier for Intel's NUC6 built-in WiFi is present
+# shell: "lsusb | grep 8087:0a2b | wc | awk '{print $1}'"
+# register: usb_NUC6
+# ignore_errors: True
+#
+#- name: Download {{ iiab_download_url }}/iwlwifi-8000C-13.ucode to /lib/firmware for built-in WiFi on NUC6
+# get_url:
+# url: "{{ iiab_download_url }}/Old/iwlwifi-8000C-13.ucode" # https://download.iiab.io/packages
+# dest: /lib/firmware
+# timeout: "{{ download_timeout }}"
+# when: usb_NUC6.stdout|int > 0
+
+
+- name: "Look for any WiFi devices present: ls -la /sys/class/net/*/phy80211 | cut -d/ -f5"
+ shell: ls -la /sys/class/net/*/phy80211 | cut -d/ -f5
+ register: wifi_devices
+ ignore_errors: True
+ changed_when: False
+
+- name: "Set has_wifi_device: True, if output (from above) shows device(s) here: {{ wifi_devices.stdout_lines }}"
+ set_fact:
+ has_wifi_device: True
+ when: wifi_devices is defined and wifi_devices.stdout_lines | length > 0
+ # when: wifi_devices is defined and wifi_devices.stdout | trim != ""
+
+- debug:
+ var: has_wifi_device
diff --git a/roles/1-prep/tasks/install-expand-rootfs.yml b/roles/1-prep/tasks/install-expand-rootfs.yml
new file mode 100644
index 000000000..b5296e5c1
--- /dev/null
+++ b/roles/1-prep/tasks/install-expand-rootfs.yml
@@ -0,0 +1,20 @@
+- name: Install packages 'parted' and 'cloud-guest-utils' (for /usr/bin/growpart, though raspi-config uses fdisk)
+ package:
+ name:
+ - parted # 2022-03-15: RasPiOS and Ubuntu install this regardless -- so rarely nec, but just in case.
+ - cloud-guest-utils # 2022-04-02: For growpart command -- whereas RasPiOS's 'raspi-config --expand-rootfs' instead uses fdisk (requiring a reboot, see do_expand_rootfs() in https://github.com/RPi-Distro/raspi-config/blob/master/raspi-config). FYI Ubuntu pre-installs cloud-guest-utils, for use with cloud-init.
+ state: present
+
+- name: "Install from templates: /usr/sbin/iiab-expand-rootfs, /etc/systemd/system/iiab-expand-rootfs.service"
+ template:
+ src: "{{ item.src }}"
+ dest: "{{ item.dest }}"
+ mode: "{{ item.mode }}"
+ with_items:
+ - { src: 'iiab-expand-rootfs', dest: '/usr/sbin/', mode: '0755' }
+ - { src: 'iiab-expand-rootfs.service', dest: '/etc/systemd/system/', mode: '0644' }
+
+- name: Enable iiab-expand-rootfs.service
+ systemd:
+ name: iiab-expand-rootfs
+ enabled: yes
diff --git a/roles/1-prep/tasks/main.yml b/roles/1-prep/tasks/main.yml
index 0732c6dd8..16cf5976e 100644
--- a/roles/1-prep/tasks/main.yml
+++ b/roles/1-prep/tasks/main.yml
@@ -3,33 +3,47 @@
- name: ...IS BEGINNING ============================================
meta: noop
-- name: SSHD -- required by OpenVPN below -- also run by roles/4-server-options/tasks/main.yml
+- name: SSHD
include_role:
name: sshd
when: sshd_install
-- name: OPENVPN
+- name: TAILSCALE (VPN)
include_role:
- name: openvpn
- when: openvpn_install
+ name: tailscale
+ when: tailscale_install
- name: REMOTE.IT
include_role:
name: remoteit
when: remoteit_install
-- name: IIAB-ADMIN -- includes roles/iiab-admin/tasks/access.yml
+- name: IIAB-ADMIN -- includes {lynx, screen, sudo-prereqs.yml, admin-user.yml, pwd-warnings.yml}
include_role:
name: iiab-admin
#when: iiab_admin_install # Flag might be created in future?
-- name: Install dnsmasq -- configure LATER in 'network', after Stage 9
- include_tasks: roles/network/tasks/dnsmasq.yml
- #when: dnsmasq_install # Flag might be used in future?
+- name: Copy iiab-apps-to-be-installed from {{ iiab_dir }}/scripts to /usr/bin/
+ copy:
+ src: "{{ iiab_dir }}/scripts/iiab-apps-to-be-installed" # /opt/iiab/iiab
+ dest: /usr/bin/
+ mode: '0755'
+
+- name: Copy iiab-network from {{ iiab_dir }}/scripts to /usr/local/bin/
+ copy:
+ src: "{{ iiab_dir }}/scripts/iiab-network"
+ dest: /usr/local/bin/
+ mode: '0755'
+
+- name: Install ~12 network/wifi/related packages + Squid if necessary + configure /etc/sysctl.conf -- full configuration LATER in 'network', after Stage 9
+ include_tasks: roles/network/tasks/install.yml
+ when: network_install and network_installed is undefined
- include_tasks: uuid.yml
- include_tasks: ubermix.yml
-- include_tasks: hardware.yml # Can run raspberry_pi.yml
+
+- name: install-expand-rootfs.yml, raspberry_pi.yml, NUC6 WiFi firmware, check for WiFi devices
+ include_tasks: hardware.yml
# Debian 10 "Buster" is apparently enabling AppArmor in 2019:
@@ -60,7 +74,10 @@
# when: not is_debuntu and selinux_disabled is defined and selinux_disabled.changed
-- name: Recording STAGE 1 HAS COMPLETED ============================
+- name: Install {{ iiab_env_file }} from template -- FYI this file can be run as a script if absolutely nec -- e.g. 'source /etc/iiab/iiab.env && echo $WWWROOT'
template:
src: roles/1-prep/templates/iiab.env.j2
- dest: "{{ iiab_env_file }}" # Can also be run as a script if absolutely nec, e.g. 'source /etc/iiab/iiab.env && echo $WWWROOT'
+ dest: "{{ iiab_env_file }}"
+
+- name: Recording STAGE 1 HAS COMPLETED ============================
+ meta: noop
diff --git a/roles/1-prep/tasks/raspberry_pi.yml b/roles/1-prep/tasks/raspberry_pi.yml
index bc54f58af..fbc8cc784 100644
--- a/roles/1-prep/tasks/raspberry_pi.yml
+++ b/roles/1-prep/tasks/raspberry_pi.yml
@@ -4,9 +4,6 @@
template:
src: 92-rtc-i2c.rules
dest: /etc/udev/rules.d/92-rtc-i2c.rules
- #owner: root
- #group: root
- #mode: 0644
when: rtc_id is defined and rtc_id != "none"
# RTC requires a change to the device tree (and reboot)
@@ -24,39 +21,14 @@
state: present
when: rtc_id is defined and rtc_id != "none" and is_ubuntu # CLARIF: Ubuntu runs increasingly well on RPi hardware, starting in 2020 especially
-#- name: Enable bluetooth in /boot/firmware/syscfg.txt on Ubuntu (needs reboot)
-# lineinfile:
-# path: /boot/firmware/syscfg.txt
-# regexp: '^include*'
-# line: 'include btcfg.txt'
-# when: is_ubuntu
-
-- name: '2021-07-27: SEE ALSO ~4 networking packages LATER installed by https://github.com/iiab/iiab/blob/master/roles/2-common/tasks/packages.yml'
- meta: noop
-
-- name: '2021-07-27: SEE ALSO 4-5 networking packages LATER installed by https://github.com/iiab/iiab/blob/master/roles/2-common/tasks/network.yml'
- meta: noop
-
-# 2021-07-27 explanation from @jvonau: The 3 BELOW (iw, rfkill, wireless-tools)
-# are provided by RaspiOS. Ubuntu|Debian on the other hand are hit or miss:
-# desktops might have some/all 3 preinstalled, while servers tend not to have
-# these present at all, but are needed to be installed if you want to take full
-# advantage of WiFi on Ubuntu and friends -- but it's only enforced on RPi
-# hardware where we know in advance of the likelihood of WiFi being present.
-
-- name: 'Install packages: cloud-guest-utils, dphys-swapfile, fake-hwclock, iw, rfkill, wireless-tools'
+- name: 'Install packages: fake-hwclock, dphys-swapfile'
package:
name:
- - cloud-guest-utils # Contains 'growpart' for resizing a partition during boot, which is normally done with the aid of cloud-init
- - dphys-swapfile # 2021-07-27: RaspiOS installs this regardless -- autogenerate and use a swap file
- - fake-hwclock # 2021-07-27: RaspiOS installs this regardless -- save/restore system clock on machines without working RTC hardware
- - iw # 2021-07-27: RaspiOS installs this regardless -- configure Linux wireless devices -- hard dependence for ap0 creation, SEE https://github.com/iiab/iiab/blob/master/roles/network/templates/hostapd/iiab-clone-wifi.service.j2
- - rfkill # 2021-07-27: RaspiOS installs this regardless -- enable & disable wireless devices
- - wireless-tools # 2021-07-27: RaspiOS installs this regardless -- manipulate Linux Wireless Extensions
+ - fake-hwclock # 2021-03-15: Missing on Ubuntu etc. RasPiOS installs this regardless -- to save/restore system clock on machines w/o working RTC (above).
+ - dphys-swapfile # 2021-03-15: Missing on Ubuntu etc. RasPiOS installs this regardless -- to autogenerate and use a swap file (below).
state: present
-
- name: Increase swap file size (to CONF_SWAPSIZE={{ pi_swap_file_size }} in /etc/dphys-swapfile) as kalite pip download fails
lineinfile:
path: /etc/dphys-swapfile
@@ -70,18 +42,9 @@
state: restarted
-- name: Install RPi rootfs resizing (/usr/sbin/iiab-rpi-max-rootfs.sh) and its systemd service (/etc/systemd/system/iiab-rpi-root-resize.service), from templates (root:root by default)
- template:
- src: "{{ item.src }}"
- dest: "{{ item.dest }}"
- #owner: root
- #group: root
- mode: "{{ item.mode }}"
- with_items:
- - { src: 'iiab-rpi-max-rootfs.sh', dest: '/usr/sbin/', mode: '0755' }
- - { src: 'iiab-rpi-root-resize.service', dest: '/etc/systemd/system/', mode: '0644' }
-
-- name: Enable RPi rootfs resizing (systemd service iiab-rpi-root-resize.service)
- systemd:
- name: iiab-rpi-root-resize
- enabled: yes
+#- name: Enable bluetooth in /boot/firmware/syscfg.txt on Ubuntu (needs reboot)
+# lineinfile:
+# path: /boot/firmware/syscfg.txt
+# regexp: '^include*'
+# line: 'include btcfg.txt'
+# when: is_ubuntu
diff --git a/roles/1-prep/templates/iiab-expand-rootfs b/roles/1-prep/templates/iiab-expand-rootfs
new file mode 100644
index 000000000..89d2bd552
--- /dev/null
+++ b/roles/1-prep/templates/iiab-expand-rootfs
@@ -0,0 +1,72 @@
+#!/bin/bash -xe
+
+# Expand rootfs partition to its maximum size, if /.expand-rootfs exists.
+# Used by /etc/systemd/system/iiab-expand-rootfs.service on IIAB boot.
+
+# Should work with all Linux OS's boot disks -- regardless whether Raspberry Pi
+# microSD cards, external USB drives, internal spinning disks or SSD's, etc.
+
+# Verifies that rootfs is the last partition.
+
+# RELATED:
+# 1. https://github.com/iiab/iiab-factory/blob/master/box/rpi/min-sd
+# 2. https://github.com/iiab/iiab-factory/blob/master/box/rpi/cp-sd
+# 3. https://github.com/iiab/iiab-factory/blob/master/box/rpi/xz-json-sd
+# OR https://github.com/iiab/iiab-factory/blob/master/box/rpi/exp-sd
+
+if [ -f /.expand-rootfs ] || [ -f /.resize-rootfs ]; then
+ echo "$0: Expanding rootfs partition"
+
+ if [ -x /usr/bin/raspi-config ]; then # Raspberry Pi OS -- WARNING: their fdisk-centric approach of course FAILS with "Hybrid MBR" or GPT partition tables, as required by any drive > 2TB :/
+ # 2022-02-17: Uses do_expand_rootfs() from:
+ # https://github.com/RPi-Distro/raspi-config/blob/master/raspi-config
+ # 2023-10-05: Official new RPi instructions:
+ # sudo raspi-config nonint do_expand_rootfs
+ # https://www.raspberrypi.com/documentation/computers/configuration.html#expand-filesystem-nonint
+ raspi-config --expand-rootfs # REQUIRES A REBOOT
+ rm -f /.expand-rootfs /.resize-rootfs
+ reboot # In future, we might warn interactive users that a reboot is coming?
+ else # REQUIRES NO REBOOT; BEWARE iiab-expand-rootfs.service RACE CONDITION WITH fsck (PR #2522 & #3325)
+ # 2022-03-15: Borrows from above raspi-config URL's do_expand_rootfs()
+ ROOT_PART="$(findmnt / -o SOURCE -n)" # e.g. /dev/sda2 or /dev/mmcblk0p2
+ ROOT_DEV="/dev/$(lsblk -no pkname "$ROOT_PART")" # e.g. /dev/sda or /dev/mmcblk0
+
+ ROOT_PART_NUM="$(echo "$ROOT_PART" | grep -o "[[:digit:]]*$")" # e.g. 2
+ # SLOW (~10 seconds) but it works!
+ LAST_PART_NUM=$(parted "$ROOT_DEV" -ms unit s p | tail -n 1 | cut -f 1 -d:)
+
+ if [ $ROOT_PART_NUM -ne $LAST_PART_NUM ]; then
+ echo "ERROR: $ROOT_PART partition ($ROOT_PART_NUM) is not the last partition ($LAST_PART_NUM). Don't know how to expand."
+ exit 1
+ fi
+
+ # Expand partition
+ growpart $ROOT_DEV $ROOT_PART_NUM || true # raspi-config instead uses fdisk (assuming MBR). They really should transition to gdisk, as required by any drive > 2TB. WARNING: growpart RC 2 is more severe than RC 1, and should possibly be handled separately in future?
+ rc=$? # Make Return Code visible, for 'bash -x'
+ resize2fs $ROOT_PART
+ rc=$? # Make RC visible (as above)
+
+ # 2022-03-15: Legacy code below worked with Raspberry Pi microSD cards
+ # but *not* with USB boot drives, internal spinning disks/SSD's, etc.
+
+ # # ASSUMES SD CARD STYLE PARTITION NAME LIKE p
+ # # e.g. /dev/mmcblk0p2 mounts at / (typical RasPiOS microSD)
+ # # BUT /dev/sda2 mounts at /media/usb1 (RasPiOS USB boot disk...
+ # # ...WON'T WORK BELOW; recap @ PR #3121)
+
+ # # Calculate root partition
+ # root_part=`lsblk -aP -o NAME,MOUNTPOINT | grep 'MOUNTPOINT="/"' | awk -F\" '{ print $2 }'` # e.g. mmcblk0p2
+ # root_dev=${root_part:0:-2} # e.g. mmcblk0
+ # # bash substring expansion: "negative offset [below, but not above]
+ # # must be separated from the colon by at least one space to avoid
+ # # being confused with the β:-β expansion"
+ # # https://www.gnu.org/software/bash/manual/html_node/Shell-Parameter-Expansion.html
+ # root_part_no=${root_part: -1} # e.g. 2
+
+ # # Resize partition
+ # growpart /dev/$root_dev $root_part_no
+ # resize2fs /dev/$root_part
+
+ rm -f /.expand-rootfs /.resize-rootfs
+ fi
+fi
diff --git a/roles/1-prep/templates/iiab-expand-rootfs.service b/roles/1-prep/templates/iiab-expand-rootfs.service
new file mode 100644
index 000000000..91de4cc5b
--- /dev/null
+++ b/roles/1-prep/templates/iiab-expand-rootfs.service
@@ -0,0 +1,24 @@
+[Unit]
+Description=Root Filesystem Auto-Expander
+DefaultDependencies=no
+# 2022-08-08: IIAB's 4 core OS's have 'After=systemd-fsck-root.service' WITHIN
+# systemd-remount-fs.service, allowing us to avoid #3325 race condition w/ fsck
+After=systemd-remount-fs.service
+# 2022-08-08: While dphys-swapfile.service doesn't exist on Ubuntu, Mint
+# and pure Debian, the following line may still serve a purpose on RasPiOS:
+Before=dphys-swapfile.service
+
+[Service]
+Environment=TERM=linux
+Type=oneshot
+ExecStart=/usr/sbin/iiab-expand-rootfs
+# 2022-08-08: By default, systemd dangerously kills rootfs expansion after just
+# 90s (1TB microSD cards take ~8 min to expand). Let's remove the time limit:
+TimeoutSec=infinity
+# "Standard output type syslog is obsolete"
+# StandardError=syslog
+# WHEREAS StandardError=journal is the default, per https://www.freedesktop.org/software/systemd/man/systemd.exec.html#StandardOutput=
+RemainAfterExit=yes
+
+[Install]
+WantedBy=local-fs.target
diff --git a/roles/1-prep/templates/iiab-rpi-max-rootfs.sh b/roles/1-prep/templates/iiab-rpi-max-rootfs.sh
deleted file mode 100644
index d24788602..000000000
--- a/roles/1-prep/templates/iiab-rpi-max-rootfs.sh
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash -x
-# Resize rootfs and its partition on the rpi SD card to maximum size
-# To be used by systemd service on boot
-# Only resizes if /.resize-rootfs exists
-# Assumes root is last partition
-# Only works on F22 + where resizepart command exists
-# Assumes sd card style partition name like p
-
-if [ -f /.resize-rootfs ];then
- echo "$0: maximizing rootfs partion"
- # Calculate root partition
- root_part=`lsblk -aP -o NAME,MOUNTPOINT|grep 'MOUNTPOINT="/"' |awk -F\" '{ print $2 }'`
- root_dev=${root_part:0:-2}
- root_part_no=${root_part: (-1)}
-
- # Resize partition
- growpart /dev/$root_dev $root_part_no
- resize2fs /dev/$root_part
- rm /.resize-rootfs
-fi
diff --git a/roles/1-prep/templates/iiab-rpi-root-resize.service b/roles/1-prep/templates/iiab-rpi-root-resize.service
deleted file mode 100644
index 2cd33ec11..000000000
--- a/roles/1-prep/templates/iiab-rpi-root-resize.service
+++ /dev/null
@@ -1,12 +0,0 @@
-[Unit]
-Description=Root Filesystem Auto-Resizer
-
-[Service]
-Environment=TERM=linux
-Type=oneshot
-ExecStart=/usr/sbin/iiab-rpi-max-rootfs.sh
-StandardError=syslog
-RemainAfterExit=no
-
-[Install]
-WantedBy=multi-user.target
diff --git a/roles/2-common/README.adoc b/roles/2-common/README.adoc
index 83943e761..3397bf2a3 100644
--- a/roles/2-common/README.adoc
+++ b/roles/2-common/README.adoc
@@ -9,10 +9,8 @@ https://internet-in-a-box.org/[Internet-in-a-Box (IIAB)] server.
These are (partially) put in place:
* IIAB directory structure (link:tasks/fl.yml[file layout])
-* Common https://en.wikipedia.org/wiki/APT_(software)[apt] software
-packages
-* Networking (including the
-https://en.wikipedia.org/wiki/Iptables[iptables] firewall)
+* Common https://en.wikipedia.org/wiki/APT_(software)[apt] software packages
+* Networking apt packages (including many WiFi tools, and also iptables-persistent for the https://en.wikipedia.org/wiki/Iptables[iptables] firewall)
* link:tasks/iiab-startup.yml[/usr/libexec/iiab-startup.sh] similar to
AUTOEXEC.BAT and /etc/rc.local, in order to run jobs on boot
diff --git a/roles/2-common/tasks/fl.yml b/roles/2-common/tasks/fl.yml
index 0235c2522..829b8dfbf 100644
--- a/roles/2-common/tasks/fl.yml
+++ b/roles/2-common/tasks/fl.yml
@@ -1,6 +1,6 @@
# fl.yml signifies "file layout"
-- name: "File Layout - Create directories: 1 in /etc, 1 in {{ py3_dist_path }}, 3 in {{ iiab_base }}, 17 in {{ content_base }}" # iiab_base: /opt/iiab
+- name: "File Layout - Create directories: 1 in {{ py3_dist_path }}, 2 in {{ iiab_base }}, 17 in {{ content_base }}" # iiab_base: /opt/iiab
file:
path: "{{ item }}"
# owner: root
@@ -8,11 +8,11 @@
# mode: '0755'
state: directory
with_items:
- - /etc/sysconfig/olpc-scripts/setup.d/installed/
+ #- /etc/sysconfig/olpc-scripts/setup.d/installed/
- "{{ py3_dist_path }}/iiab" # /usr/lib/python3/dist-packages
- - "{{ yum_packages_dir }}" # /opt/iiab/yum-packages
+ #- "{{ yum_packages_dir }}" # /opt/iiab/yum-packages
- "{{ pip_packages_dir }}" # /opt/iiab/pip-packages
- - "{{ downloads_dir }}" # /opt/iiab/downloads -- generally already done by Stage 1's roles/remoteit/tasks/install.yml
+ - "{{ downloads_dir }}" # /opt/iiab/downloads
#- "{{ content_base }}/downloads" # /library/downloads auto-created just below
- "{{ content_base }}/downloads/zims"
- "{{ content_base }}/downloads/maps"
diff --git a/roles/2-common/tasks/main.yml b/roles/2-common/tasks/main.yml
index a89928992..c6fb3f21c 100644
--- a/roles/2-common/tasks/main.yml
+++ b/roles/2-common/tasks/main.yml
@@ -8,8 +8,14 @@
- include_tasks: packages.yml
-- name: "Network prep, including partial setup of iptables (firewall) -- SEE ALSO: 1-prep/tasks/raspberry_pi.yml"
- include_tasks: network.yml
+- name: "Use 'sysctl' to set 'kernel.core_uses_pid: 1' in /etc/sysctl.conf"
+ sysctl: # Places these settings in /etc/sysctl.conf, to survive reboot
+ name: "{{ item.name }}"
+ value: "{{ item.value }}"
+ with_items:
+ #- { name: 'kernel.sysrq', value: '1' } # OS values differ, Ok?
+ - { name: 'kernel.core_uses_pid', value: '1' }
+ #- { name: 'kernel.shmmax', value: '268435456' } # OS values differ, Ok?
- include_tasks: iiab-startup.yml
diff --git a/roles/2-common/tasks/network.yml b/roles/2-common/tasks/network.yml
deleted file mode 100644
index 435c0bb1f..000000000
--- a/roles/2-common/tasks/network.yml
+++ /dev/null
@@ -1,45 +0,0 @@
-- name: '2021-07-27: SEE ALSO ~3 networking packages EARLIER installed by https://github.com/iiab/iiab/blob/master/roles/1-prep/tasks/raspberry_pi.yml'
- meta: noop
-
-- name: '2021-07-27: SEE ALSO ~4 networking packages EARLIER installed by https://github.com/iiab/iiab/blob/master/roles/2-common/tasks/packages.yml'
- meta: noop
-
-- name: Install package networkd-dispatcher (OS's other than RaspiOS)
- package:
- name: networkd-dispatcher # Dispatcher service for systemd-networkd connection status changes
- state: present
- when: not is_raspbian
-
-- name: 'Install network packages: hostapd, iproute2, iptables-persistent, netmask -- later used by https://github.com/iiab/iiab/tree/master/roles/network'
- package:
- name:
- - hostapd # IEEE 802.11 AP and IEEE 802.1X/WPA/WPA2/EAP Authenticator -- has its service masked out of the box, and only used when IIAB's network roles detects the presence of WiFi and an AP is desired
- - iproute2 # 2021-07-27: RaspiOS installs this regardless -- the new networking and traffic control tools, meant to replace net-tools
- - iptables-persistent # Boot-time loader for netfilter rules, iptables (firewall) plugin -- however Netfilter / nftables is ever moving forward so keep an eye on it!
- - netmask # Handy utility -- helps determine network masks
- state: present
-
-# 2021-08-17: Debian ignores this, according to 2013 post:
-# https://serverfault.com/questions/511099/debian-ignores-etc-network-if-pre-up-d-iptables
-# - name: Install /etc/network/if-pre-up.d/iptables from template (0755)
-# template:
-# src: iptables
-# dest: /etc/network/if-pre-up.d/iptables
-# mode: '0755'
-
-# Ongoing rework (e.g. PR #2652) arising from ansible.posix collection changes:
-- name: "Use 'sysctl' to set 'kernel.core_uses_pid: 1' + 4 network settings in /etc/sysctl.conf -- e.g. disabling IPv6 (this might be overkill, as IPv6 should really only be disabled on the LAN side, i.e. br0)"
- sysctl: # Places these settings in /etc/sysctl.conf, to survive reboot
- name: "{{ item.name }}"
- value: "{{ item.value }}"
- with_items:
- - { name: 'net.ipv4.ip_forward', value: '1' } # Masquerading LAN->Internet
- - { name: 'net.ipv4.conf.default.rp_filter', value: '1' }
- - { name: 'net.ipv4.conf.default.accept_source_route', value: '0' }
- #- { name: 'kernel.sysrq', value: '1' } # OS values differ, Ok?
- - { name: 'kernel.core_uses_pid', value: '1' }
- #- { name: 'net.ipv4.tcp_syncookies', value: '1' } # Very standard in 2020
- #- { name: 'kernel.shmmax', value: '268435456' } # OS values differ, Ok?
- - { name: 'net.ipv6.conf.all.disable_ipv6', value: '1' } # IPv6 disabled
- #- { name: 'net.ipv6.conf.default.disable_ipv6', value: '1' } # AUTO-SET
- #- { name: 'net.ipv6.conf.lo.disable_ipv6', value: '1' } # BY ABOVE
diff --git a/roles/2-common/tasks/packages.yml b/roles/2-common/tasks/packages.yml
index 81bc35c94..681d8b83f 100644
--- a/roles/2-common/tasks/packages.yml
+++ b/roles/2-common/tasks/packages.yml
@@ -1,47 +1,39 @@
-- name: '2021-07-27: SEE ALSO ~3 networking packages EARLIER installed by https://github.com/iiab/iiab/blob/master/roles/1-prep/tasks/raspberry_pi.yml'
- meta: noop
+# 2022-03-16: 'apt show | grep Size' revealed download sizes, on 64-bit RasPiOS with desktop.
-- name: '2021-07-27: SEE ALSO 4-5 networking packages LATER installed by https://github.com/iiab/iiab/blob/master/roles/2-common/tasks/network.yml'
- meta: noop
-
-- name: "Install 20 common packages: acpid, avahi-daemon, bzip2, curl, gawk, htop, i2c-tools, libnss-mdns, logrotate, mlocate, net-tools, pandoc, pastebinit, rsync, sqlite3, tar, unzip, usbutils, wget, wpasupplicant"
+- name: "Install 19 common packages: acpid, bzip2, cron, curl, gawk, gpg, htop, i2c-tools, logrotate, lshw, pandoc, pastebinit, plocate, rsync, sqlite3, tar, unzip, usbutils, wget"
package:
name:
- - acpid # Daemon for ACPI (power mgmt) events
- - avahi-daemon # 2021-07-27: RaspiOS (and package libnss-mnds, below) install this regardless -- holdover from the XO days and used to advertise ssh/admin-console being available via avahi-daemon -- used with https://github.com/iiab/iiab/blob/master/roles/network/tasks/avahi.yml
- #- avahi-discover # 2021-07-27: Commented out long ago
- - bzip2 # 2021-04-26: Prob not used, but can't hurt?
- - curl # Used to install roles/nodejs and roles/nodered
- #- etckeeper # "nobody is really using etckeeper and it's bloating the filesystem every time apt runs" per @jvonau at https://github.com/iiab/iiab/issues/1146
- #- exfat-fuse # 2021-07-27: Should no longer be nec with 5.4+ kernels, so let's try commenting it out
- #- exfat-utils # Ditto! See also 'ntfs-3g' below
- - gawk
- - htop
- - i2c-tools # Low-level bus/chip/register/EEPROM tools e.g. for RTC
- #- inetutils-syslogd # 2021-07-27: Error logging facility -- holdover from the XO days, journalctl has replaced this in newer distros
- #- iproute2 # Installed by roles/2-common/tasks/network.yml
- - logrotate
- - libnss-mdns # 2021-07-27: RaspiOS (and package avahi-daemon, above) install this regardless -- client-side library -- provides name resolution via mDNS (Multicast DNS) using Zeroconf/Bonjour e.g. Avahi
- #- lynx # Installed by 1-prep's roles/iiab-admin/tasks/access.yml
- #- make # 2021-07-27: Currently used by roles/pbx and no other roles
- - mlocate
- - net-tools # 2021-04-26: @jvonau suggests possibly deleting this...unless oldtimers really want these older commands in iiab-diagnostics output?
- #- ntfs-3g # 2021-07-31: RaspiOS installs this regardless -- but this should no longer be nec with 5.4+ kernels, similar to exfat packages above -- however, see also this symlink warning: https://superuser.com/questions/1050544/mount-with-kernel-ntfs-and-not-ntfs-3g -- and upcoming kernel 5.15 improvements: https://www.phoronix.com/scan.php?page=news_item&px=New-NTFS-Likely-For-Linux-5.15
- #- openssh-server # ssh (Raspbian) or openssh-server (other OS's) already installed by 1-prep's roles/sshd/tasks/main.yml
- - pandoc # For /usr/bin/iiab-refresh-wiki-docs
- - pastebinit # For /usr/bin/iiab-diagnostics
- #- python3-pip # 2021-07-29: Already installed by /opt/iiab/iiab/scripts/ansible -- this auto-installs 'python3-setuptools' and 'python3' etc
- #- python3-venv # 2021-07-30: For Ansible module 'pip' used in roles like {calibre-web, jupyterhub, lokole} -- whereas roles/kalite uses (virtual) package 'virtualenv' for Python 2 -- all these 3+1 IIAB roles install 'python3-venv' for themselves. FYI: Debian 11 auto-installs 'python3-venv' when you install 'python3' -- whereas Ubuntu (e.g. 20.04 & 21.10) and RaspiOS 10 do not.
- - rsync
- #- screen # Installed by 1-prep's roles/iiab-admin/tasks/access.yml
- - sqlite3
- #- sudo # (1) Should be installed prior to installing IIAB, (2) Can also be installed by roles/1-prep's roles/openvpn/tasks/install.yml, (3) Is definitely installed by 1-prep's roles/iiab-admin/tasks/sudo-prereqs.yml
- - tar
- - unzip
- #- usbmount # Moved to roles/usb_lib/tasks/install.yml
- - usbutils # 2021-07-27: RaspiOS installs this regardless -- move to roles/usb_lib/tasks/install.yml ?
- - wget
- - wpasupplicant # 2021-07-27: RaspiOS installs this regardless -- client library for connections to a WiFi AP
+ - acpid # 55kB download: Daemon for ACPI (power mgmt) events
+ - bzip2 # 47kB download: RasPiOS installs this regardless -- 2021-04-26: Prob not used, but can't hurt?
+ - cron # 98kB download: RasPiOS installs this regardless -- 2022-10-13: Debian 12 needs this added (for now?)
+ - curl # 254kB download: RasPiOS installs this regardless -- Used to install roles/nodejs and roles/nodered
+ #- etckeeper # 54kB download: "nobody is really using etckeeper and it's bloating the filesystem every time apt runs" per @jvonau at https://github.com/iiab/iiab/issues/1146
+ #- exfat-fuse # 28kB download: 2021-07-27: Should no longer be nec with 5.4+ kernels, so let's try commenting it out
+ #- exfat-utils # 41kB download: Ditto! See also 'ntfs-3g' below
+ - gawk # 533kB download
+ - gpg # 884kB download: Debian 12+ (especially!) require this for apt installs of gitea, kolibri, mongodb, yarn
+ - htop # 109kB download: RasPiOS installs this regardless
+ - i2c-tools # 78kB download: Low-level bus/chip/register/EEPROM tools e.g. for RTC
+ - logrotate # 67kB download: RasPiOS installs this regardless
+ - lshw # 257kB download: For 'lshw -C network' in iiab-diagnostics
+ #- lynx # 505kB download: Installed by 1-prep's roles/iiab-admin/tasks/main.yml
+ #- make # 376kB download: 2021-07-27: Currently used by roles/pbx and no other roles
+ #- ntfs-3g # 379kB download: RasPiOS installs this regardless -- 2021-07-31: But this should no longer be nec with 5.4+ kernels, similar to exfat packages above -- however, see also this symlink warning: https://superuser.com/questions/1050544/mount-with-kernel-ntfs-and-not-ntfs-3g -- and upcoming kernel 5.15 improvements: https://www.phoronix.com/scan.php?page=news_item&px=New-NTFS-Likely-For-Linux-5.15
+ #- openssh-server # 318kB download: RasPiOS installs this regardless -- this is also installed by 1-prep's roles/sshd/tasks/main.yml to cover all OS's
+ - pandoc # 19kB download: For /usr/bin/iiab-refresh-wiki-docs
+ - pastebinit # 47kB download: For /usr/bin/iiab-diagnostics
+ #- mlocate # 92kB download
+ - plocate # 97kB download: Faster & smaller than locate & mlocate
+ #- python3-pip # 337kB download: 2023-03-22: Used to be installed by /opt/iiab/iiab/scripts/ansible -- which would auto-install 'python3-setuptools' and 'python3' etc
+ #- python3-venv # 1188kB download: 2023-03-22: Already installed by /opt/iiab/iiab/scripts/ansible -- used by roles like {calibre-web, jupyterhub, lokole} -- whereas roles/kalite uses (virtual) package 'virtualenv' for Python 2 -- all these 3+1 IIAB roles install 'python3-venv' for themselves. FYI: Debian 11 no longer auto-installs 'python3-venv' when you install 'python3'
+ - rsync # 351kB download: RasPiOS installs this regardless
+ #- screen # 551kB download: Installed by 1-prep's roles/iiab-admin/tasks/main.yml
+ - sqlite3 # 1054kB download
+ - tar # 799kB download: RasPiOS installs this regardless
+ - unzip # 151kB download: RasPiOS installs this regardless
+ #- usbmount # 18kB download: Moved to roles/usb_lib/tasks/install.yml
+ - usbutils # 67kB download: RasPiOS installs this regardless -- 2021-07-27: move to roles/usb_lib/tasks/install.yml ?
+ - wget # 922kB download: RasPiOS installs this regardless
state: present
#- name: "Install 10 yum/dnf packages: avahi, avahi-tools, createrepo, linux-firmware, nss-mdns, openssl, syslog, wpa_supplicant, xml-common, yum-utils (redhat)"
diff --git a/roles/3-base-server/README.rst b/roles/3-base-server/README.rst
index e458d7be0..1f22db3a2 100644
--- a/roles/3-base-server/README.rst
+++ b/roles/3-base-server/README.rst
@@ -1,10 +1,21 @@
+.. |ss| raw:: html
+
+
+
+.. |se| raw:: html
+
+
+
+.. |nbsp| unicode:: 0xA0
+ :trim:
+
====================
3-base-server README
====================
This 3rd `stage `_ installs base server infra that `Internet-in-a-Box (IIAB) `_ requires, including:
-- `MySQL `_ (database underlying many/most user-facing apps). This IIAB role also installs apt package:
+- |ss| `MySQL `_ (database underlying many/most user-facing apps). |se| |nbsp| *As of 2023-11-05, MySQL / MariaDB is NO LONGER INSTALLED by 3-base-server β instead it's installed on-demand β as a dependency of Matomo, MediaWiki, Nextcloud, PBX (for FreePBX), WordPress &/or Admin Console.* This IIAB role (roles/mysql) also installs apt package:
- **php{{ php_version }}-mysql** β which forcibly installs **php{{ php_version }}-common**
- `NGINX `_ web server (with Apache in some lingering cases). This IIAB role also installs apt package:
- **php{{ php_version }}-fpm** β which forcibly installs **php{{ php_version }}-cli**, **php{{ php_version }}-common** and **libsodium23**
diff --git a/roles/3-base-server/tasks/main.yml b/roles/3-base-server/tasks/main.yml
index 5e2e7355d..efe1c93e2 100644
--- a/roles/3-base-server/tasks/main.yml
+++ b/roles/3-base-server/tasks/main.yml
@@ -3,10 +3,13 @@
- name: ...IS BEGINNING =====================================
meta: noop
-- name: MYSQL + CORE PHP
- include_role:
- name: mysql
- #when: mysql_install
+# 2023-11-05: MySQL (actually MariaDB) had been mandatory, installed on every
+# IIAB by 3-base-server. Now installed on demand -- as a dependency of Matomo,
+# MediaWiki, Nextcloud, PBX (for FreePBX), WordPress &/or Admin Console.
+# - name: MYSQL + CORE PHP
+# include_role:
+# name: mysql
+# #when: mysql_install
# 2021-05-21: Apache role 'httpd' is installed as nec by any of these 6 roles:
#
diff --git a/roles/4-server-options/README.rst b/roles/4-server-options/README.rst
index 6355f85e7..11458d97e 100644
--- a/roles/4-server-options/README.rst
+++ b/roles/4-server-options/README.rst
@@ -2,7 +2,7 @@
4-server-options README
=======================
-Whereas 3-base-server installs critical packages needed by all, this 4th `stage `_ installs a broad array of *options* β β depending on which server apps will be installed in later stages β β as specified in `/etc/iiab/local_vars.yml `_
+Whereas 3-base-server installs critical packages needed by all, this 4th `stage `_ installs a broad array of *options* β β depending on which server apps will be installed in later stages β β as specified in `/etc/iiab/local_vars.yml `_
This includes more networking fundamentals, that may further be configured later on.
@@ -11,7 +11,7 @@ Specifically, these might be installed:
- Python libraries
- SSH daemon
- Bluetooth for Raspberry Pi
-- Instant-sharing of `USB stick content `_
+- Instant-sharing of `USB stick content `_
- CUPS Printing
- Samba for Windows filesystems
- `www_options `_
diff --git a/roles/4-server-options/tasks/main.yml b/roles/4-server-options/tasks/main.yml
index 8ccf6b88b..583cb763d 100644
--- a/roles/4-server-options/tasks/main.yml
+++ b/roles/4-server-options/tasks/main.yml
@@ -19,28 +19,6 @@
#when: pylibs_installed is undefined
#when: pylibs_install # Flag might be created in future?
-- name: SSHD -- also run by roles/1-prep/tasks/main.yml as required by OpenVPN
- include_role:
- name: sshd
- when: sshd_install
-
-
-# UNMAINTAINED
-- name: Install named / BIND
- include_tasks: roles/network/tasks/named.yml
- when: named_install is defined and named_install
-
-# UNMAINTAINED
-- name: Install dhcpd
- include_tasks: roles/network/tasks/dhcpd.yml
- when: dhcpd_install is defined and dhcpd_install
-
-# LESS MAINTAINED
-- name: Install Squid
- include_tasks: roles/network/tasks/squid.yml
- when: squid_install and squid_installed is undefined
-
-
- name: Install Bluetooth - only on Raspberry Pi
include_role:
name: bluetooth
diff --git a/roles/6-generic-apps/tasks/main.yml b/roles/6-generic-apps/tasks/main.yml
index f241095f6..f43e878f3 100644
--- a/roles/6-generic-apps/tasks/main.yml
+++ b/roles/6-generic-apps/tasks/main.yml
@@ -3,11 +3,6 @@
- name: ...IS BEGINNING ====================================
meta: noop
-- name: AZURACAST
- include_role:
- name: azuracast
- when: azuracast_install is defined and azuracast_install
-
# UNMAINTAINED
- name: DOKUWIKI
include_role:
@@ -36,10 +31,11 @@
name: jupyterhub
when: jupyterhub_install
+# UNMAINTAINED
- name: LOKOLE
include_role:
name: lokole
- when: lokole_install
+ when: lokole_install is defined and lokole_install
- name: MEDIAWIKI
include_role:
diff --git a/roles/7-edu-apps/tasks/main.yml b/roles/7-edu-apps/tasks/main.yml
index 8f223b1fd..69d1b0788 100644
--- a/roles/7-edu-apps/tasks/main.yml
+++ b/roles/7-edu-apps/tasks/main.yml
@@ -6,12 +6,13 @@
- name: KALITE
include_role:
name: kalite
- when: kalite_install
+ when: kalite_install and (is_ubuntu_2204 or is_ubuntu_2310 or is_debian_12) # Also covers is_linuxmint_21 and is_raspbian_12
- name: KOLIBRI
include_role:
name: kolibri
when: kolibri_install
+ #when: kolibri_install and python_version is version('3.12', '<') # Debian 13 still uses Python 3.11 (for now!) so really this just avoids Ubuntu 24.04 and 24.10 pre-releases during initial iiab-install. CLARIF: This is all TEMPORARY until learningequality/kolibri#11316 brings Python 3.12 support to Kolibri 0.17 pre-releases (expected very soon).
- name: KIWIX
include_role:
@@ -40,10 +41,23 @@
name: pathagar
when: pathagar_install is defined and pathagar_install
+# WARNING: Since March 2023, 32-bit RasPiOS can act as 64-bit on RPi 4 and
+# RPi 400 (unlike RPi 3!) SEE: https://github.com/iiab/iiab/pull/3422 and #3516
+- name: Run command 'dpkg --print-architecture' to identify OS architecture (CPU arch as revealed by ansible_architecture ~= ansible_machine is NO LONGER enough!)
+ command: dpkg --print-architecture
+ register: dpkg_arch
+ when: sugarizer_install
+
+- name: Explain bypassing of Sugarizer install if 32-bit OS
+ fail: # FORCE IT RED THIS ONCE!
+ msg: "BYPASSING SUGARIZER INSTALL ATTEMPT, as Sugarizer Server 1.5.0 requires MongoDB 3.2+ which is NO LONGER SUPPORTED on 32-bit Raspberry Pi OS. 'dpkg --print-architecture' output for your OS: {{ dpkg_arch.stdout }}"
+ when: sugarizer_install and not dpkg_arch.stdout is search("64")
+ ignore_errors: True
+
- name: SUGARIZER
include_role:
name: sugarizer
- when: sugarizer_install
+ when: sugarizer_install and dpkg_arch.stdout is search("64")
- name: Recording STAGE 7 HAS COMPLETED ========================
lineinfile:
diff --git a/roles/8-mgmt-tools/tasks/main.yml b/roles/8-mgmt-tools/tasks/main.yml
index e75f97e23..c6d497f15 100644
--- a/roles/8-mgmt-tools/tasks/main.yml
+++ b/roles/8-mgmt-tools/tasks/main.yml
@@ -6,23 +6,23 @@
- name: TRANSMISSION
include_role:
name: transmission
- when: transmission_install
+ when: transmission_install and not (is_ubuntu_2404 or is_ubuntu_2410 or is_ubuntu_2504) # Also excludes is_linuxmint_22, for #3756 (whereas Debian 13 works great!)
- name: AWSTATS
include_role:
name: awstats
when: awstats_install
-
+
+- name: MATOMO
+ include_role:
+ name: matomo
+ when: matomo_install
+
- name: MONIT
include_role:
name: monit
when: monit_install
-- name: MUNIN
- include_role:
- name: munin
- when: munin_install
-
- name: PHPMYADMIN
include_role:
name: phpmyadmin
diff --git a/roles/9-local-addons/tasks/main.yml b/roles/9-local-addons/tasks/main.yml
index 54420a743..337a74445 100644
--- a/roles/9-local-addons/tasks/main.yml
+++ b/roles/9-local-addons/tasks/main.yml
@@ -3,16 +3,34 @@
- name: ...IS BEGINNING ====================================
meta: noop
-# Is porting to Python 3 complete, and if so does this belong elsewhere?
+- name: AZURACAST
+ include_role:
+ name: azuracast
+ when: azuracast_install
+
+# Porting to Python 3 is complete: does this belong elsewhere?
- name: CAPTIVE PORTAL
include_role:
name: captiveportal
when: captiveportal_install
+# WARNING: Since March 2023, 32-bit RasPiOS can act as 64-bit on RPi 4 and
+# RPi 400 (unlike RPi 3!) SEE: https://github.com/iiab/iiab/pull/3516
+- name: Run command 'dpkg --print-architecture' to identify OS architecture (CPU arch as revealed by ansible_architecture ~= ansible_machine is NO LONGER enough!)
+ command: dpkg --print-architecture
+ register: dpkg_arch
+ when: internetarchive_install
+
+- name: Explain bypassing of Internet Archive install if 32-bit OS
+ fail: # FORCE IT RED THIS ONCE!
+ msg: "BYPASSING INTERNET ARCHIVE PER https://github.com/iiab/iiab/issues/3641 -- 'dpkg --print-architecture' output for your OS: {{ dpkg_arch.stdout }}"
+ when: internetarchive_install and not dpkg_arch.stdout is search("64")
+ ignore_errors: True
+
- name: INTERNETARCHIVE
include_role:
name: internetarchive
- when: internetarchive_install
+ when: internetarchive_install and dpkg_arch.stdout is search("64")
- name: MINETEST
include_role:
@@ -37,12 +55,46 @@
name: pbx
when: pbx_install
-- name: "2021-06-27 TEMPORARY CODE TO INSTALL 'php-pear' UNTIL ADMIN CONSOLE DECLARES ITS OWN DEPENDENCY FOR: https://github.com/iiab/iiab-admin-console/blob/master/roles/cmdsrv/tasks/main.yml#L19"
- package:
- name: php-pear # WARNING: this also drags in 'php{{ php_version }}-xml' (also installed by MediaWiki, Nextcloud, roles/pbx's FreePBX, WordPress) AND 'php{{ php_version }}-cgi' (also installed by roles/pbx's FreePBX)
- state: present
+
+- name: '2023-11-05 / TEMPORARY UNTIL ADMIN CONSOLE DECLARES ITS DEPENDENCY: Install MySQL (MariaDB) if admin_console_install (for setup-feedback and record_feedback.php)'
+ set_fact:
+ mysql_install: True
+ mysql_enabled: True
when: admin_console_install
+- name: '2023-11-05 / TEMPORARY UNTIL ADMIN CONSOLE DECLARES ITS DEPENDENCY: Install MySQL (MariaDB) if admin_console_install (for setup-feedback and record_feedback.php)'
+ include_role:
+ name: mysql
+ when: admin_console_install
+
+- name: '2023-11-05 / TEMPORARY UNTIL ADMIN CONSOLE DECLARES ITS DEPENDENCY: Install MySQL (MariaDB) if admin_console_install (for setup-feedback and record_feedback.php)'
+ fail:
+ msg: "Admin Console install cannot proceed, as MySQL / MariaDB is not installed."
+ when: admin_console_install and mysql_installed is undefined
+
+
+# 2023-11-05: Moved from Stage 8, as it acts on mysql_installed (that might be set just above!)
+- name: MUNIN
+ include_role:
+ name: munin
+ when: munin_install
+
+
+- name: Read 'disk_used_a_priori' from /etc/iiab/iiab.ini
+ set_fact:
+ df1: "{{ lookup('ansible.builtin.ini', 'disk_used_a_priori', section='summary', file=iiab_ini_file) }}"
+
+- name: Record currently used disk space, to compare with original 'disk_used_a_priori'
+ shell: df -B1 --output=used / | tail -1
+ register: df2
+
+- name: Add ESTIMATED 'iiab_software_disk_usage = {{ df2.stdout|int - df1|int }}' to {{ iiab_ini_file }}
+ ini_file:
+ path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
+ section: summary
+ option: iiab_software_disk_usage
+ value: "{{ df2.stdout|int - df1|int }}"
+
- name: Recording STAGE 9 HAS COMPLETED ====================
lineinfile:
path: "{{ iiab_env_file }}"
diff --git a/roles/awstats/tasks/install.yml b/roles/awstats/tasks/install.yml
index 905ef68e7..24a005c74 100644
--- a/roles/awstats/tasks/install.yml
+++ b/roles/awstats/tasks/install.yml
@@ -1,3 +1,8 @@
+- name: Record (initial) disk space used
+ shell: df -B1 --output=used / | tail -1
+ register: df1
+
+
- name: 'Install package: awstats'
package:
name: awstats
@@ -83,7 +88,7 @@
# when: awstats_enabled and not is_debuntu
- name: "Summarize logs up to now: /usr/bin/perl /usr/lib/cgi-bin/awstats.pl -config=schoolserver -update"
- shell: /usr/bin/perl /usr/lib/cgi-bin/awstats.pl -config=schoolserver -update
+ command: /usr/bin/perl /usr/lib/cgi-bin/awstats.pl -config=schoolserver -update
- name: Install /etc/nginx/cgi-bin.php from template
template:
@@ -93,6 +98,17 @@
# RECORD AWStats AS INSTALLED
+- name: Record (final) disk space used
+ shell: df -B1 --output=used / | tail -1
+ register: df2
+
+- name: Add 'awstats_disk_usage = {{ df2.stdout|int - df1.stdout|int }}' to {{ iiab_ini_file }}
+ ini_file:
+ path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
+ section: awstats
+ option: awstats_disk_usage
+ value: "{{ df2.stdout|int - df1.stdout|int }}"
+
- name: "Set 'awstats_installed: True'"
set_fact:
awstats_installed: True
diff --git a/roles/awstats/tasks/main.yml b/roles/awstats/tasks/main.yml
index 79615d71a..47ae7b247 100644
--- a/roles/awstats/tasks/main.yml
+++ b/roles/awstats/tasks/main.yml
@@ -19,27 +19,34 @@
quiet: yes
-- name: Install AWStats if 'awstats_installed' not defined, e.g. in {{ iiab_state_file }} # /etc/iiab/iiab_state.yml
- include_tasks: install.yml
- when: awstats_installed is undefined
+- block:
+ - name: Install AWStats if 'awstats_installed' not defined, e.g. in {{ iiab_state_file }} # /etc/iiab/iiab_state.yml
+ include_tasks: install.yml
+ when: awstats_installed is undefined
-- name: Enable/Disable/Restart NGINX
- include_tasks: nginx.yml
+ - name: Enable/Disable/Restart NGINX
+ include_tasks: nginx.yml
+ - name: Add 'awstats' variable values to {{ iiab_ini_file }}
+ ini_file:
+ path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
+ section: awstats
+ option: "{{ item.option }}"
+ value: "{{ item.value | string }}"
+ with_items:
+ - option: name
+ value: AWStats
+ - option: description
+ value: '"AWStats (originally known as Advanced Web Statistics) is a package written in Perl which generates static or dynamic html summaries based upon web server logs."'
+ - option: awstats_install
+ value: "{{ awstats_install }}"
+ - option: awstats_enabled
+ value: "{{ awstats_enabled }}"
-- name: Add 'awstats' variable values to {{ iiab_ini_file }}
- ini_file:
- path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
- section: awstats
- option: "{{ item.option }}"
- value: "{{ item.value | string }}"
- with_items:
- - option: name
- value: AWStats
- - option: description
- value: '"AWStats (originally known as Advanced Web Statistics) is a package written in Perl which generates static or dynamic html summaries based upon web server logs."'
- - option: awstats_install
- value: "{{ awstats_install }}"
- - option: awstats_enabled
- value: "{{ awstats_enabled }}"
+ rescue:
+
+ - name: 'SEE ERROR ABOVE (skip_role_on_error: {{ skip_role_on_error }})'
+ fail:
+ msg: ""
+ when: not skip_role_on_error
diff --git a/roles/awstats/templates/awstats.schoolserver.conf.j2 b/roles/awstats/templates/awstats.schoolserver.conf.j2
index 40816fef5..2a5e8802d 100644
--- a/roles/awstats/templates/awstats.schoolserver.conf.j2
+++ b/roles/awstats/templates/awstats.schoolserver.conf.j2
@@ -261,7 +261,7 @@ AllowToUpdateStatsFromBrowser=1
# 3 - Possible on CLI and CGI
# Default: 2
#
-AllowFullYearView=2
+AllowFullYearView=3
diff --git a/roles/azuracast/README.rst b/roles/azuracast/README.rst
index 420bfb92d..24d1277da 100644
--- a/roles/azuracast/README.rst
+++ b/roles/azuracast/README.rst
@@ -1,19 +1,47 @@
-==========
+================
AzuraCast README
-==========
+================
-This playbook adds `AzuraCast `_ to Internet-in-a-Box (IIAB) for network radio station functionality. With 'AzuraCast' you and your community can schedule podcasts, music, and even do live streaming of audio content. A variety of streaming formats are supported.
+Install `AzuraCast `_ with your `Internet-in-a-Box (IIAB) `_ if you want a simple, self-hosted "web radio station" with a modern web UI/UX. You and your community can then schedule newscasts, podcasts, music, and even do live streaming of audio content (video streaming might also be possible in future!)
-Please see AzuraCast's `screenshots `_.
+As soon as you install AzuraCast with IIAB, it can stream MP3 files (and similar files) using `LiquidSoap `_ to help you schedule or randomize playback of MP3 songs (and similar).
-As of 2019-08-04, this will only run on Ubuntu 18.04, and tentatively on Debian 10 "Buster" (`#1766 `_). Support for Raspberry Pi remains a goal for now β please if you can, consider helping us solve this critical challenge (`#1772 `_, `AzuraCast/AzuraCast#332 `_).
+Please see AzuraCast's `screenshots `_ and `docs <./README.rst#azuracast-docs>`_. Community implementation examples:
+
+* https://twitter.com/internet_in_box/status/1564986581664014342
+* https://youtu.be/XfiFiOi46mk
+
+Optionally, live-streaming can also be made to work, e.g. if you install `Mixxx or BUTT `_ on your own. (If so, you have many options to configure streaming with `Icecast `_, `Shoutcast `_, etc.)
+
+Requirements
+------------
+
+AzuraCast recommends `2-to-4 GB RAM minimum `_.
+
+As of 2022-08-31, AzuraCast should run on Ubuntu 22.04 and **64-bit** Raspberry Pi OS: `#1772 `_, `AzuraCast/AzuraCast#332 `_, `PR #2946 `_
+
+Other Linux distributions may also work, at your own risk, especially if Docker runs smoothly.
+
+NOTE: AzuraCast was designed to be installed *just once* on a fresh OS. So ``./runrole --reinstall azuracast`` is not supported in general. However, if you accidentally damage your AzuraCast software, IIAB has posted `technical tips <./tasks/install.yml>`_ *(use at your own risk!)* in case of emergency.
Using It
--------
-* Do a normal IIAB install (http://download.iiab.io), making sure to set both variables ``azuracast_install`` and ``azuracast_enabled`` to ``True`` when it prompts you to edit `/etc/iiab/local_vars.yml `_, as you begin the installation.
-* When the IIAB software install completes, it will ask you to reboot, and AzuraCast's console will then be available at http://box.lan:10080
-* This console site will prompt you to complete AzuraCast's initial setup: user accounts, managing stations, radio streams, etc.
+* Do a normal IIAB install (https://download.iiab.io), making sure to set both variables ``azuracast_install`` and ``azuracast_enabled`` to ``True`` when IIAB's installer prompts you to edit `/etc/iiab/local_vars.yml `_
+* When the IIAB software install completes, it will ask you to reboot, and AzuraCast's console will then be available at http://box.lan:12080
+* That console site will prompt you to complete AzuraCast's initial setup: user accounts, managing stations, radio streams, etc.
* Finally, check out some `how-to videos `_ to learn to manage your own radio station!
-Note: When creating a station using AzuraCast's console, its default streaming ports for ``station`` and ``autodj`` need to be in the `port range 10000-10100 `_.
+NOTE: When creating a station using AzuraCast's console, its default streaming ports for ``station`` and ``autodj`` need to be in the `port range 10000-10499 `_ (ports 12080 and 12443 may also be required!)
+
+AzuraCast Docs
+--------------
+
+- https://docs.azuracast.com
+- https://docs.azuracast.com/en/getting-started/installation/post-installation-steps
+- https://docs.azuracast.com/en/getting-started/settings
+- https://docs.azuracast.com/en/getting-started/updates (can *DAMAGE* AzuraCast as of 2022-09-28)
+- https://docs.azuracast.com/en/user-guide/streaming-software
+- https://docs.azuracast.com/en/user-guide/troubleshooting
+- https://docs.azuracast.com/en/user-guide/logs
+- https://docs.azuracast.com/en/administration/docker
diff --git a/roles/azuracast/defaults/main.yml b/roles/azuracast/defaults/main.yml
index 6e505649f..a3cd24908 100644
--- a/roles/azuracast/defaults/main.yml
+++ b/roles/azuracast/defaults/main.yml
@@ -1,15 +1,15 @@
-# A full-featured online radio station suite.
-# Works on Ubuntu 18.04, Debian 9, 10. Uses docker
+# A full-featured online radio station suite. Uses Docker.
+# README: https://github.com/iiab/iiab/tree/master/roles/azuracast#readme
# azuracast_install: False
-# azuracast_enabled: False
+# azuracast_enabled: False # This var is currently IGNORED
-# azuracast_http_port: 10080
-# azuracast_https_port: 10443
+# azuracast_http_port: 12080
+# azuracast_https_port: 12443
-## AzuraCast needs many ports in the 8000:8100 range by default, but IIAB services
-## conflict with those ports so this variable below sets a sane prefix.
-## e.g. setting the below variable to 10 will result in port ranges 10000-10100
+## AzuraCast needs many ports in the 8000:8496 range by default, but IIAB
+## services conflict, so this variable below sets a sane prefix.
+## e.g. setting the below variable to 10 will result in port range 10000-10499
## being reserved for AzuraCast:
# azuracast_port_range_prefix: 10
diff --git a/roles/azuracast/templates/docker-compose.override.yml.j2 b/roles/azuracast/docker-compose.override.yml.j2.unused
similarity index 100%
rename from roles/azuracast/templates/docker-compose.override.yml.j2
rename to roles/azuracast/docker-compose.override.yml.j2.unused
diff --git a/roles/azuracast/templates/env.j2 b/roles/azuracast/env.j2.unused
similarity index 100%
rename from roles/azuracast/templates/env.j2
rename to roles/azuracast/env.j2.unused
diff --git a/roles/azuracast/tasks/install.yml b/roles/azuracast/tasks/install.yml
index 9f1b1e2ab..bd25d96fb 100644
--- a/roles/azuracast/tasks/install.yml
+++ b/roles/azuracast/tasks/install.yml
@@ -1,23 +1,49 @@
+# 2022-09-29: './runrole --reinstall azuracast' is NOT supported!
+#
+# 1. But if you must, first completely uninstall Docker + WIPE AzuraCast data:
+#
+# apt purge docker-ce docker-ce-cli containerd.io docker-compose-plugin docker-scan-plugin
+# rm -rf /library/docker /var/lib/docker /var/lib/containerd
+#
+# Per https://docs.docker.com/engine/install/ubuntu/#uninstall-docker-engine
+#
+# 2. REBOOT to avoid later problems with 'systemctl status docker' -- if you
+# don't reboot, Ansible will fail below when 'docker.sh install' fails to
+# start docker.service -- likewise if you run './docker.sh install-docker'
+# manually in /opt/azuracast. Either way, 'systemctl restart docker' won't
+# work for ~2 minutes. (Rebooting avoids all these hassles!)
+#
+# 3. Just FYI the Docker install process will rebuild its 11 core directories
+# in /var/lib/docker -> /library/docker: (as 'docker.sh install' begins)
+#
+# buildkit containers image network overlay2 plugins runtimes swarm tmp trust volumes
+#
+# 4. Just FYI both MySQL passwords (MYSQL_PASSWORD & MYSQL_ROOT_PASSWORD) will
+# be WIPED from /opt/azuracast/azuracast.env (and new passwords
+# auto-generated below, for use inside AzuraCast's Docker container).
+#
+# 5. Run './runrole --reinstall azuracast' in /opt/iiab/iiab
+
+
+- name: Record (initial) disk space used
+ shell: df -B1 --output=used / | tail -1
+ register: df1
+
+
- name: AzuraCast - Make config directory {{ azuracast_host_dir }}
- file:
+ file:
path: "{{ azuracast_host_dir }}"
state: directory
- name: AzuraCast - Install {{ azuracast_host_dir }}/.env from template
template:
- src: env.j2
+ src: prod.env.j2
dest: "{{ azuracast_host_dir }}/.env"
- #owner: root
- #group: root
- mode: 0644
-- name: AzuraCast - Install {{ azuracast_host_dir }}/docker-compose.override.yml from template
+- name: AzuraCast - Install {{ azuracast_host_dir }}/azuracast.env for altered ports
template:
- src: docker-compose.override.yml.j2
- dest: "{{ azuracast_host_dir }}/docker-compose.override.yml"
- #owner: root
- #group: root
- mode: 0644
+ src: azuracast.env.j2
+ dest: "{{ azuracast_host_dir }}/azuracast.env"
- name: AzuraCast - Download {{ docker_sh_url }} to {{ azuracast_host_dir }}
get_url:
@@ -26,13 +52,6 @@
mode: 0755
timeout: "{{ download_timeout }}"
-- name: AzuraCast - Download AzuraCast's docker-compose.yml sample from GitHub to {{ azuracast_host_dir }}
- get_url:
- url: "{{ docker_compose_url }}"
- dest: "{{ azuracast_host_dir }}/docker-compose.yml"
- mode: 0755
- timeout: "{{ download_timeout }}"
-
#- name: AzuraCast - Make changes to docker.sh script so it runs headless
# lineinfile:
# path: "{{ azuracast_host_dir }}/docker.sh"
@@ -40,27 +59,45 @@
# line: "\\1reply='Y'"
# backrefs: yes
+# 2022-09-28: https://docs.azuracast.com/en/getting-started/installation/docker
+# (& testing) confirm this is done automatically by 'docker.sh install' below.
+#
+# - name: AzuraCast - Download AzuraCast's docker-compose.yml sample from GitHub to {{ azuracast_host_dir }}
+# get_url:
+# url: "{{ docker_compose_url }}"
+# dest: "{{ azuracast_host_dir }}/docker-compose.yml"
+# timeout: "{{ download_timeout }}"
+
+#- name: AzuraCast - Install {{ azuracast_host_dir }}/docker-compose.override.yml from template
+# template:
+# src: docker-compose.override.yml.j2
+# dest: "{{ azuracast_host_dir }}/docker-compose.override.yml"
+
+#- name: Change default port number range 8xxx:8xxx to {{ azuracast_port_range_prefix }}xxx:{{ azuracast_port_range_prefix }}xxx icecast-stations in docker-compose.yml
+# replace:
+# path: "{{ azuracast_host_dir }}/docker-compose.yml"
+# regexp: "^( *- \\')8([0-9]{3})\\:8([0-9]{3}\\'.*)$"
+# replace: "\\g<1>{{ azuracast_port_range_prefix }}\\g<2>:{{ azuracast_port_range_prefix }}\\g<3>"
+
- name: AzuraCast - Make directory {{ docker_container_dir }}
- file:
+ file:
path: "{{ docker_container_dir }}"
state: directory
-
+
- name: AzuraCast - Symlink /var/lib/docker -> {{ docker_container_dir }}
file:
src: "{{ docker_container_dir }}"
path: /var/lib/docker
- state: link
+ state: link
-- name: Change default port number range 8xxx:8xxx to {{ azuracast_port_range_prefix }}xxx:{{ azuracast_port_range_prefix }}xxx icecast-stations in docker-compose.yml
- replace:
- path: "{{ azuracast_host_dir }}/docker-compose.yml"
- regexp: "^( *- \\')8([0-9]{3})\\:8([0-9]{3}\\'.*)$"
- replace: "\\g<1>{{ azuracast_port_range_prefix }}\\g<2>:{{ azuracast_port_range_prefix }}\\g<3>"
-
-- name: AzuraCast - Setup for stable channel install
- shell: "yes 'Y' | /bin/bash docker.sh setup-release"
- args:
- chdir: "{{ azuracast_host_dir }}"
+# 2022-09-28: "yes 'Y'" toggled whatever it found in /opt/azuracast/.env (e.g.
+# AZURACAST_VERSION=stable from templates/prod.env.j2) to the opposite (e.g.
+# AZURACAST_VERSION=latest). Let's not modify /opt/azuracast/.env unless nec!
+#
+# - name: AzuraCast - Setup for stable channel install
+# shell: "yes 'Y' | /bin/bash docker.sh setup-release"
+# args:
+# chdir: "{{ azuracast_host_dir }}"
- name: AzuraCast - Run the installer
shell: "yes '' | /bin/bash docker.sh install"
@@ -70,6 +107,17 @@
# RECORD AzuraCast AS INSTALLED
+- name: Record (final) disk space used
+ shell: df -B1 --output=used / | tail -1
+ register: df2
+
+- name: Add 'azuracast_disk_usage = {{ df2.stdout|int - df1.stdout|int }}' to {{ iiab_ini_file }}
+ ini_file:
+ path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
+ section: azuracast
+ option: azuracast_disk_usage
+ value: "{{ df2.stdout|int - df1.stdout|int }}"
+
- name: "Set 'azuracast_installed: True'"
set_fact:
azuracast_installed: True
diff --git a/roles/azuracast/tasks/main.yml b/roles/azuracast/tasks/main.yml
index ef9c28914..6bc11b878 100644
--- a/roles/azuracast/tasks/main.yml
+++ b/roles/azuracast/tasks/main.yml
@@ -19,25 +19,32 @@
quiet: yes
-- name: Install AzuraCast if 'azuracast_installed' not defined, e.g. in {{ iiab_state_file }} # /etc/iiab/iiab_state.yml
- include_tasks: install.yml
- when: azuracast_installed is undefined
+- block:
+ - name: Install AzuraCast if 'azuracast_installed' not defined, e.g. in {{ iiab_state_file }} # /etc/iiab/iiab_state.yml
+ include_tasks: install.yml
+ when: azuracast_installed is undefined
-# TODO figure out what to turn off/on for AzuraCast
-# - include_tasks: enable-or-disable.yml
+ # TODO figure out what to turn off/on for AzuraCast
+ # - include_tasks: enable-or-disable.yml
+ - name: Add 'azuracast' variable values to {{ iiab_ini_file }}
+ ini_file:
+ path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
+ section: azuracast
+ option: "{{ item.option }}"
+ value: "{{ item.value | string }}"
+ with_items:
+ - option: name
+ value: azuracast
+ - option: description
+ value: '"AzuraCast is simple, self-hosted web radio. Use it to schedule student newscasts, podcasts, music (e.g. MP3''s and similar) and even do live-streaming."'
+ - option: enabled
+ value: "{{ azuracast_enabled }}"
-- name: Add 'azuracast' variable values to {{ iiab_ini_file }}
- ini_file:
- path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
- section: azuracast
- option: "{{ item.option }}"
- value: "{{ item.value | string }}"
- with_items:
- - option: name
- value: azuracast
- - option: description
- value: '"AzuraCast is a self-hosted, all-in-one radio station platform. Use AzuraCast to schedule podcasts, music, and even do live streaming of audio content. A variety of streaming formats are supported."'
- - option: enabled
- value: "{{ azuracast_enabled }}"
+ rescue:
+
+ - name: 'SEE ERROR ABOVE (skip_role_on_error: {{ skip_role_on_error }})'
+ fail:
+ msg: ""
+ when: not skip_role_on_error
diff --git a/roles/azuracast/templates/azuracast-nginx.conf.j2.unused b/roles/azuracast/templates/azuracast-nginx.conf.j2.unused
new file mode 100644
index 000000000..33ae25476
--- /dev/null
+++ b/roles/azuracast/templates/azuracast-nginx.conf.j2.unused
@@ -0,0 +1,16 @@
+# work in progress might never be ready as the web interface has setting that would need to match
+location /azuracast/
+ proxy_set_header Host $http_host;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Scheme $scheme;
+ proxy_set_header X-Script-Name /azureacast;
+ proxy_pass http://127.0.0.1:{{ azuracast_http_port }};
+}
+
+location /radio/ {
+ proxy_set_header Host $http_host;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Scheme $scheme;
+ proxy_set_header X-Script-Name /radio;
+ proxy_pass http://127.0.0.1:{{ azuracast_http_port }};
+}
diff --git a/roles/azuracast/templates/azuracast.env.j2 b/roles/azuracast/templates/azuracast.env.j2
new file mode 100644
index 000000000..313fffc59
--- /dev/null
+++ b/roles/azuracast/templates/azuracast.env.j2
@@ -0,0 +1,155 @@
+# IIAB version for altered ports
+#
+# AzuraCast Customization
+#
+
+# The application environment.
+# Valid options: production, development, testing
+APPLICATION_ENV=production
+
+# Manually modify the logging level.
+# This allows you to log debug-level errors temporarily (for problem-solving) or reduce
+# the volume of logs that are produced by your installation, without needing to modify
+# whether your installation is a production or development instance.
+# Valid options: debug, info, notice, warning, error, critical, alert, emergency
+# LOG_LEVEL=notice
+
+# Enable the composer "merge" functionality to combine the main application's
+# composer.json file with any plugins' composer files.
+# This can have performance implications, so you should only use it if
+# you use one or more plugins with their own Composer dependencies.
+# Valid options: true, false
+COMPOSER_PLUGIN_MODE=false
+
+# The minimum port number to use when automatically assigning ports to a station.
+# By default, this matches the first forwarded port on the "stations" container.
+# You can modify this variable if your station port range is different.
+# Be sure to also forward the necessary ports via `docker-compose.yml`
+# (and nginx, if you want to use the built-in port-80/443 proxy)!
+AUTO_ASSIGN_PORT_MIN="{{ azuracast_port_range_prefix }}000"
+
+# The maximum port number to use when automatically assigning ports to a station.
+# See AUTO_ASSIGN_PORT_MIN.
+AUTO_ASSIGN_PORT_MAX="{{ azuracast_port_range_prefix }}499"
+
+#
+# Database Configuration
+# --
+# Once the database has been installed, DO NOT CHANGE these values!
+#
+
+# The host to connect to. Leave this as the default value unless you're connecting
+# to an external database server.
+# Default: mariadb
+MYSQL_HOST=mariadb
+
+# The port to connect to. Leave this as the default value unless you're connecting
+# to an external database server.
+# Default: 3306
+MYSQL_PORT=3306
+
+# The username AzuraCast will use to connect to the database.
+# Default: azuracast
+MYSQL_USER=azuracast
+
+# The password AzuraCast will use to connect to the database.
+# By default, the database is not exposed to the Internet at all and this is only
+# an internal password used by the service itself.
+# Default: azur4c457
+MYSQL_PASSWORD=azur4c457
+
+# The name of the AzuraCast database.
+# Default: azuracast
+MYSQL_DATABASE=azuracast
+
+# Automatically generate a random root password upon the first database spin-up.
+# This password will be visible in the mariadb container's logs.
+# Default: yes
+MYSQL_RANDOM_ROOT_PASSWORD=yes
+
+# Log slower queries for the purpose of diagnosing issues. Only turn this on when
+# you need to, by uncommenting this and switching it to 1.
+# To read the slow query log once enabled, run:
+# docker-compose exec mariadb slow_queries
+# Default: 0
+MYSQL_SLOW_QUERY_LOG=0
+
+# Set the amount of allowed connections to the database. This value should be increased
+# if you are seeing the `Too many connections` error in the logs.
+# Default: 100
+MYSQL_MAX_CONNECTIONS=100
+
+#
+# Redis Configuration
+#
+# Uncomment these fields if you are using a third-party Redis host instead of the one provided with AzuraCast.
+# Do not modify these fields if you are using the standard AzuraCast Redis host.
+#
+
+# Whether to use the Redis cache; if set to false, will disable Redis and use flatfile cache instead.
+# Default: true
+# ENABLE_REDIS=true
+
+# Name of the Redis host.
+# Default: redis
+# REDIS_HOST=redis
+
+# Port to connect to on the Redis host.
+# Default: 6379
+# REDIS_PORT=6379
+
+# Database index to use on the Redis host.
+# Default: 1
+# REDIS_DB=1
+
+#
+# Advanced Configuration
+#
+
+# PHP's maximum POST body size and max upload filesize.
+# PHP_MAX_FILE_SIZE=25M
+
+# PHP's maximum memory limit.
+# PHP_MEMORY_LIMIT=128M
+
+# PHP's maximum script execution time (in seconds).
+# PHP_MAX_EXECUTION_TIME=30
+
+# The maximum execution time (and lock timeout) for the 15-second, 1-minute and 5-minute synchronization tasks.
+# SYNC_SHORT_EXECUTION_TIME=600
+
+# The maximum execution time (and lock timeout) for the 1-hour synchronization task.
+# SYNC_LONG_EXECUTION_TIME=1800
+
+# Maximum number of PHP-FPM worker processes to spawn.
+# PHP_FPM_MAX_CHILDREN=5
+
+#
+# PHP-SPX profiling extension Configuration
+#
+# These environment variables allow you to enable and configure the PHP-SPX profiling extension
+# which can be helpful when debugging resource issues in AzuraCast.
+#
+# The profiling dashboard can be accessed by visting https://yourdomain.com/?SPX_KEY=dev&SPX_UI_URI=/
+# If you change the PROFILING_EXTENSION_HTTP_KEY variable change the value for SPX_KEY accordingly.
+#
+
+# Enable the profiling extension.
+# Profiling data can be viewed by visiting http://your-azuracast-site/?SPX_KEY=dev&SPX_UI_URI=/
+# Default: 0
+# PROFILING_EXTENSION_ENABLED=0
+
+# Profile ALL requests made to this account.
+# This will have significant performance impact on your installation and should only be used in test circumstances.
+# Default: 0
+# PROFILING_EXTENSION_ALWAYS_ON=0
+
+# Configure the value for the SPX_KEY parameter needed to access the profiling dashboard
+# Default: dev
+# PROFILING_EXTENSION_HTTP_KEY=dev
+
+# Configure the IP whitelist for the profiling dashboard
+# By default only localhost is allowed to access this page.
+# Uncomment this line to enable access for you.
+# Default: 127.0.0.1
+# PROFILING_EXTENSION_HTTP_IP_WHITELIST=*
diff --git a/roles/azuracast/templates/azuracast.sample.env b/roles/azuracast/templates/azuracast.sample.env
new file mode 100644
index 000000000..bc9ffc204
--- /dev/null
+++ b/roles/azuracast/templates/azuracast.sample.env
@@ -0,0 +1,155 @@
+# https://github.com/AzuraCast/AzuraCast/blob/main/azuracast.sample.env
+#
+# AzuraCast Customization
+#
+
+# The application environment.
+# Valid options: production, development, testing
+APPLICATION_ENV=production
+
+# Manually modify the logging level.
+# This allows you to log debug-level errors temporarily (for problem-solving) or reduce
+# the volume of logs that are produced by your installation, without needing to modify
+# whether your installation is a production or development instance.
+# Valid options: debug, info, notice, warning, error, critical, alert, emergency
+# LOG_LEVEL=notice
+
+# Enable the composer "merge" functionality to combine the main application's
+# composer.json file with any plugins' composer files.
+# This can have performance implications, so you should only use it if
+# you use one or more plugins with their own Composer dependencies.
+# Valid options: true, false
+COMPOSER_PLUGIN_MODE=false
+
+# The minimum port number to use when automatically assigning ports to a station.
+# By default, this matches the first forwarded port on the "stations" container.
+# You can modify this variable if your station port range is different.
+# Be sure to also forward the necessary ports via `docker-compose.yml`
+# (and nginx, if you want to use the built-in port-80/443 proxy)!
+AUTO_ASSIGN_PORT_MIN=8000
+
+# The maximum port number to use when automatically assigning ports to a station.
+# See AUTO_ASSIGN_PORT_MIN.
+AUTO_ASSIGN_PORT_MAX=8499
+
+#
+# Database Configuration
+# --
+# Once the database has been installed, DO NOT CHANGE these values!
+#
+
+# The host to connect to. Leave this as the default value unless you're connecting
+# to an external database server.
+# Default: mariadb
+MYSQL_HOST=mariadb
+
+# The port to connect to. Leave this as the default value unless you're connecting
+# to an external database server.
+# Default: 3306
+MYSQL_PORT=3306
+
+# The username AzuraCast will use to connect to the database.
+# Default: azuracast
+MYSQL_USER=azuracast
+
+# The password AzuraCast will use to connect to the database.
+# By default, the database is not exposed to the Internet at all and this is only
+# an internal password used by the service itself.
+# Default: azur4c457
+MYSQL_PASSWORD=azur4c457
+
+# The name of the AzuraCast database.
+# Default: azuracast
+MYSQL_DATABASE=azuracast
+
+# Automatically generate a random root password upon the first database spin-up.
+# This password will be visible in the mariadb container's logs.
+# Default: yes
+MYSQL_RANDOM_ROOT_PASSWORD=yes
+
+# Log slower queries for the purpose of diagnosing issues. Only turn this on when
+# you need to, by uncommenting this and switching it to 1.
+# To read the slow query log once enabled, run:
+# docker-compose exec mariadb slow_queries
+# Default: 0
+MYSQL_SLOW_QUERY_LOG=0
+
+# Set the amount of allowed connections to the database. This value should be increased
+# if you are seeing the `Too many connections` error in the logs.
+# Default: 100
+MYSQL_MAX_CONNECTIONS=100
+
+#
+# Redis Configuration
+#
+# Uncomment these fields if you are using a third-party Redis host instead of the one provided with AzuraCast.
+# Do not modify these fields if you are using the standard AzuraCast Redis host.
+#
+
+# Whether to use the Redis cache; if set to false, will disable Redis and use flatfile cache instead.
+# Default: true
+# ENABLE_REDIS=true
+
+# Name of the Redis host.
+# Default: redis
+# REDIS_HOST=redis
+
+# Port to connect to on the Redis host.
+# Default: 6379
+# REDIS_PORT=6379
+
+# Database index to use on the Redis host.
+# Default: 1
+# REDIS_DB=1
+
+#
+# Advanced Configuration
+#
+
+# PHP's maximum POST body size and max upload filesize.
+# PHP_MAX_FILE_SIZE=25M
+
+# PHP's maximum memory limit.
+# PHP_MEMORY_LIMIT=128M
+
+# PHP's maximum script execution time (in seconds).
+# PHP_MAX_EXECUTION_TIME=30
+
+# The maximum execution time (and lock timeout) for the 15-second, 1-minute and 5-minute synchronization tasks.
+# SYNC_SHORT_EXECUTION_TIME=600
+
+# The maximum execution time (and lock timeout) for the 1-hour synchronization task.
+# SYNC_LONG_EXECUTION_TIME=1800
+
+# Maximum number of PHP-FPM worker processes to spawn.
+# PHP_FPM_MAX_CHILDREN=5
+
+#
+# PHP-SPX profiling extension Configuration
+#
+# These environment variables allow you to enable and configure the PHP-SPX profiling extension
+# which can be helpful when debugging resource issues in AzuraCast.
+#
+# The profiling dashboard can be accessed by visting https://yourdomain.com/?SPX_KEY=dev&SPX_UI_URI=/
+# If you change the PROFILING_EXTENSION_HTTP_KEY variable change the value for SPX_KEY accordingly.
+#
+
+# Enable the profiling extension.
+# Profiling data can be viewed by visiting http://your-azuracast-site/?SPX_KEY=dev&SPX_UI_URI=/
+# Default: 0
+# PROFILING_EXTENSION_ENABLED=0
+
+# Profile ALL requests made to this account.
+# This will have significant performance impact on your installation and should only be used in test circumstances.
+# Default: 0
+# PROFILING_EXTENSION_ALWAYS_ON=0
+
+# Configure the value for the SPX_KEY parameter needed to access the profiling dashboard
+# Default: dev
+# PROFILING_EXTENSION_HTTP_KEY=dev
+
+# Configure the IP whitelist for the profiling dashboard
+# By default only localhost is allowed to access this page.
+# Uncomment this line to enable access for you.
+# Default: 127.0.0.1
+# PROFILING_EXTENSION_HTTP_IP_WHITELIST=*
diff --git a/roles/azuracast/templates/prod.env.j2 b/roles/azuracast/templates/prod.env.j2
new file mode 100644
index 000000000..58fc11d3e
--- /dev/null
+++ b/roles/azuracast/templates/prod.env.j2
@@ -0,0 +1,54 @@
+# This file was automatically generated by AzuraCast and modified for IIAB
+# You can modify it as necessary. To apply changes, restart the Docker containers.
+# Remove the leading "#" symbol from lines to uncomment them.
+
+# (Docker Compose) All Docker containers are prefixed by this name. Do not change this after installation.
+# Default: azuracast
+COMPOSE_PROJECT_NAME=azuracast
+
+# (Docker Compose) The amount of time to wait before a Docker Compose operation fails. Increase this on lower performance computers.
+# Default: 300
+COMPOSE_HTTP_TIMEOUT=300
+
+# Release Channel
+# Valid options: latest, stable
+# Default: latest
+AZURACAST_VERSION=stable
+
+NGINX_TIMEOUT=1800
+# HTTP Port
+# The main port AzuraCast listens to for insecure HTTP connections.
+# Default: 80
+AZURACAST_HTTP_PORT={{ azuracast_http_port }}
+
+# HTTPS Port
+# The main port AzuraCast listens to for secure HTTPS connections.
+# Default: 443
+AZURACAST_HTTPS_PORT={{ azuracast_https_port }}
+
+# SFTP Port
+# The port AzuraCast listens to for SFTP file management connections.
+# Default: 2022
+AZURACAST_SFTP_PORT=2022
+
+# Station Ports
+# The ports AzuraCast should listen to for station broadcasts and incoming DJ
+# connections.
+# Default: 8000,8005,8006,8010,8015,8016,8020,8025,8026,8030,8035,8036,8040,8045,8046,8050,8055,8056,8060,8065,8066,8070,8075,8076,8090,8095,8096,8100,8105,8106,8110,8115,8116,8120,8125,8126,8130,8135,8136,8140,8145,8146,8150,8155,8156,8160,8165,8166,8170,8175,8176,8180,8185,8186,8190,8195,8196,8200,8205,8206,8210,8215,8216,8220,8225,8226,8230,8235,8236,8240,8245,8246,8250,8255,8256,8260,8265,8266,8270,8275,8276,8280,8285,8286,8290,8295,8296,8300,8305,8306,8310,8315,8316,8320,8325,8326,8330,8335,8336,8340,8345,8346,8350,8355,8356,8360,8365,8366,8370,8375,8376,8380,8385,8386,8390,8395,8396,8400,8405,8406,8410,8415,8416,8420,8425,8426,8430,8435,8436,8440,8445,8446,8450,8455,8456,8460,8465,8466,8470,8475,8476,8480,8485,8486,8490,8495,8496
+AZURACAST_STATION_PORTS=10000,10005,10006,10010,10015,10016,10020,10025,10026,10030,10035,10036,10040,10045,10046,10050,10055,10056,10060,10065,10066,10070,10075,10076,10080,10085,10086,10090,10095,10096,10100,10105,10106,10110,10115,10116,10120,10125,10126,10130,10135,10136,10140,10145,10146,10150,10155,10156,10160,10165,10166,10170,10175,10176,10180,10185,10186,10190,10195,10196,10200,10205,10206,10210,10215,10216,10220,10225,10226,10230,10235,10236,10240,10245,10246,10250,10255,10256,10260,10265,10266,10270,10275,10276,10280,10285,10286,10290,10295,10296,10300,10305,10306,10310,10315,10316,10320,10325,10326,10330,10335,10336,10340,10345,10346,10350,10355,10356,10360,10365,10366,10370,10375,10376,10380,10385,10386,10390,10395,10396,10400,10405,10406,10410,10415,10416,10420,10425,10426,10430,10435,10436,10440,10445,10446,10450,10455,10456,10460,10465,10466,10470,10475,10476,10480,10485,10486,10490,10495,10496
+
+# Docker User UID
+# Set the UID of the user running inside the Docker containers. Matching this
+# with your host UID can fix permission issues.
+# Default: 1000
+AZURACAST_PUID=1000
+
+# Docker User GID
+# Set the GID of the user running inside the Docker containers. Matching this
+# with your host GID can fix permission issues.
+# Default: 1000
+AZURACAST_PGID=1000
+
+# Advanced: Use Privileged Docker Settings
+# Default: true
+AZURACAST_COMPOSE_PRIVILEGED=true
diff --git a/roles/azuracast/upstream/azuracast.sample.env b/roles/azuracast/upstream/azuracast.sample.env
new file mode 100644
index 000000000..225b0fb4e
--- /dev/null
+++ b/roles/azuracast/upstream/azuracast.sample.env
@@ -0,0 +1,161 @@
+#
+# AzuraCast Customization
+#
+
+# The application environment.
+# Valid options: production, development, testing
+APPLICATION_ENV=production
+
+# Manually modify the logging level.
+# This allows you to log debug-level errors temporarily (for problem-solving) or reduce
+# the volume of logs that are produced by your installation, without needing to modify
+# whether your installation is a production or development instance.
+# Valid options: debug, info, notice, warning, error, critical, alert, emergency
+# LOG_LEVEL=notice
+
+# Enable the composer "merge" functionality to combine the main application's
+# composer.json file with any plugins' composer files.
+# This can have performance implications, so you should only use it if
+# you use one or more plugins with their own Composer dependencies.
+# Valid options: true, false
+COMPOSER_PLUGIN_MODE=false
+
+# The minimum port number to use when automatically assigning ports to a station.
+# By default, this matches the first forwarded port on the "stations" container.
+# You can modify this variable if your station port range is different.
+# Be sure to also forward the necessary ports via `docker-compose.yml`
+# (and nginx, if you want to use the built-in port-80/443 proxy)!
+AUTO_ASSIGN_PORT_MIN=8000
+
+# The maximum port number to use when automatically assigning ports to a station.
+# See AUTO_ASSIGN_PORT_MIN.
+AUTO_ASSIGN_PORT_MAX=8499
+
+# This allows you to debug Slim Application Errors you may encounter
+# By default, this is disabled to prevent users from seeing privileged information
+# Please report any Slim Application Error logs to the development team on GitHub
+# Valid options: true, false
+SHOW_DETAILED_ERRORS=false
+
+
+#
+# Database Configuration
+# --
+# Once the database has been installed, DO NOT CHANGE these values!
+#
+
+# The host to connect to. Leave this as the default value unless you're connecting
+# to an external database server.
+# Default: localhost
+# MYSQL_HOST=localhost
+
+# The port to connect to. Leave this as the default value unless you're connecting
+# to an external database server.
+# Default: 3306
+# MYSQL_PORT=3306
+
+# The username AzuraCast will use to connect to the database.
+# Default: azuracast
+# MYSQL_USER=azuracast
+
+# The password AzuraCast will use to connect to the database.
+# By default, the database is not exposed to the Internet at all and this is only
+# an internal password used by the service itself.
+# Default: azur4c457
+MYSQL_PASSWORD=azur4c457
+
+# The name of the AzuraCast database.
+# Default: azuracast
+# MYSQL_DATABASE=azuracast
+
+# Automatically generate a random root password upon the first database spin-up.
+# This password will be visible in the mariadb container's logs.
+# Default: yes
+MYSQL_RANDOM_ROOT_PASSWORD=yes
+
+# Log slower queries for the purpose of diagnosing issues. Only turn this on when
+# you need to, by uncommenting this and switching it to 1.
+# To read the slow query log once enabled, run:
+# docker-compose exec mariadb slow_queries
+# Default: 0
+# MYSQL_SLOW_QUERY_LOG=0
+
+# Set the amount of allowed connections to the database. This value should be increased
+# if you are seeing the `Too many connections` error in the logs.
+# Default: 100
+# MYSQL_MAX_CONNECTIONS=100
+
+#
+# Redis Configuration
+#
+# Uncomment these fields if you are using a third-party Redis host instead of the one provided with AzuraCast.
+# Do not modify these fields if you are using the standard AzuraCast Redis host.
+#
+
+# Whether to use the Redis cache; if set to false, will disable Redis and use flatfile cache instead.
+# Default: true
+# ENABLE_REDIS=true
+
+# Name of the Redis host.
+# Default: localhost
+# REDIS_HOST=localhost
+
+# Port to connect to on the Redis host.
+# Default: 6379
+# REDIS_PORT=6379
+
+# Database index to use on the Redis host.
+# Default: 1
+# REDIS_DB=1
+
+#
+# Advanced Configuration
+#
+
+# PHP's maximum POST body size and max upload filesize.
+# PHP_MAX_FILE_SIZE=25M
+
+# PHP's maximum memory limit.
+# PHP_MEMORY_LIMIT=128M
+
+# PHP's maximum script execution time (in seconds).
+# PHP_MAX_EXECUTION_TIME=30
+
+# The maximum execution time (and lock timeout) for the 15-second, 1-minute and 5-minute synchronization tasks.
+# SYNC_SHORT_EXECUTION_TIME=600
+
+# The maximum execution time (and lock timeout) for the 1-hour synchronization task.
+# SYNC_LONG_EXECUTION_TIME=1800
+
+# Maximum number of PHP-FPM worker processes to spawn.
+# PHP_FPM_MAX_CHILDREN=5
+
+#
+# PHP-SPX profiling extension Configuration
+#
+# These environment variables allow you to enable and configure the PHP-SPX profiling extension
+# which can be helpful when debugging resource issues in AzuraCast.
+#
+# The profiling dashboard can be accessed by visting https://yourdomain.com/?SPX_KEY=dev&SPX_UI_URI=/
+# If you change the PROFILING_EXTENSION_HTTP_KEY variable change the value for SPX_KEY accordingly.
+#
+
+# Enable the profiling extension.
+# Profiling data can be viewed by visiting http://your-azuracast-site/?SPX_KEY=dev&SPX_UI_URI=/
+# Default: 0
+# PROFILING_EXTENSION_ENABLED=0
+
+# Profile ALL requests made to this account.
+# This will have significant performance impact on your installation and should only be used in test circumstances.
+# Default: 0
+# PROFILING_EXTENSION_ALWAYS_ON=0
+
+# Configure the value for the SPX_KEY parameter needed to access the profiling dashboard
+# Default: dev
+# PROFILING_EXTENSION_HTTP_KEY=dev
+
+# Configure the IP whitelist for the profiling dashboard
+# By default only localhost is allowed to access this page.
+# Uncomment this line to enable access for you.
+# Default: 127.0.0.1
+# PROFILING_EXTENSION_HTTP_IP_WHITELIST=*
diff --git a/roles/azuracast/upstream/docker-compose.sample.yml b/roles/azuracast/upstream/docker-compose.sample.yml
new file mode 100644
index 000000000..3c3759957
--- /dev/null
+++ b/roles/azuracast/upstream/docker-compose.sample.yml
@@ -0,0 +1,214 @@
+#
+# AzuraCast Docker Compose Configuration File
+#
+# When updating, you will be prompted to replace this file with a new
+# version; you should do this whenever possible to take advantage of
+# new updates.
+#
+# If you need to customize this file, you can create a new file named:
+# docker-compose.override.yml
+# with any changes you need to make.
+#
+
+services:
+ web:
+ container_name: azuracast
+ image: "ghcr.io/azuracast/azuracast:${AZURACAST_VERSION:-latest}"
+ # Want to customize the HTTP/S ports? Follow the instructions here:
+ # https://docs.azuracast.com/en/administration/docker#using-non-standard-ports
+ ports:
+ - '${AZURACAST_HTTP_PORT:-80}:80'
+ - '${AZURACAST_HTTPS_PORT:-443}:443'
+ - '${AZURACAST_SFTP_PORT:-2022}:2022'
+ # This default mapping is the outgoing and incoming ports for the first 50 stations.
+ # You can override this port mapping in your own docker-compose.override.yml file.
+ # For instructions, see:
+ # https://docs.azuracast.com/en/administration/docker#expanding-the-station-port-range
+ - '8000:8000'
+ - '8005:8005'
+ - '8006:8006'
+ - '8010:8010'
+ - '8015:8015'
+ - '8016:8016'
+ - '8020:8020'
+ - '8025:8025'
+ - '8026:8026'
+ - '8030:8030'
+ - '8035:8035'
+ - '8036:8036'
+ - '8040:8040'
+ - '8045:8045'
+ - '8046:8046'
+ - '8050:8050'
+ - '8055:8055'
+ - '8056:8056'
+ - '8060:8060'
+ - '8065:8065'
+ - '8066:8066'
+ - '8070:8070'
+ - '8075:8075'
+ - '8076:8076'
+ - '8090:8090'
+ - '8095:8095'
+ - '8096:8096'
+ - '8100:8100'
+ - '8105:8105'
+ - '8106:8106'
+ - '8110:8110'
+ - '8115:8115'
+ - '8116:8116'
+ - '8120:8120'
+ - '8125:8125'
+ - '8126:8126'
+ - '8130:8130'
+ - '8135:8135'
+ - '8136:8136'
+ - '8140:8140'
+ - '8145:8145'
+ - '8146:8146'
+ - '8150:8150'
+ - '8155:8155'
+ - '8156:8156'
+ - '8160:8160'
+ - '8165:8165'
+ - '8166:8166'
+ - '8170:8170'
+ - '8175:8175'
+ - '8176:8176'
+ - '8180:8180'
+ - '8185:8185'
+ - '8186:8186'
+ - '8190:8190'
+ - '8195:8195'
+ - '8196:8196'
+ - '8200:8200'
+ - '8205:8205'
+ - '8206:8206'
+ - '8210:8210'
+ - '8215:8215'
+ - '8216:8216'
+ - '8220:8220'
+ - '8225:8225'
+ - '8226:8226'
+ - '8230:8230'
+ - '8235:8235'
+ - '8236:8236'
+ - '8240:8240'
+ - '8245:8245'
+ - '8246:8246'
+ - '8250:8250'
+ - '8255:8255'
+ - '8256:8256'
+ - '8260:8260'
+ - '8265:8265'
+ - '8266:8266'
+ - '8270:8270'
+ - '8275:8275'
+ - '8276:8276'
+ - '8280:8280'
+ - '8285:8285'
+ - '8286:8286'
+ - '8290:8290'
+ - '8295:8295'
+ - '8296:8296'
+ - '8300:8300'
+ - '8305:8305'
+ - '8306:8306'
+ - '8310:8310'
+ - '8315:8315'
+ - '8316:8316'
+ - '8320:8320'
+ - '8325:8325'
+ - '8326:8326'
+ - '8330:8330'
+ - '8335:8335'
+ - '8336:8336'
+ - '8340:8340'
+ - '8345:8345'
+ - '8346:8346'
+ - '8350:8350'
+ - '8355:8355'
+ - '8356:8356'
+ - '8360:8360'
+ - '8365:8365'
+ - '8366:8366'
+ - '8370:8370'
+ - '8375:8375'
+ - '8376:8376'
+ - '8380:8380'
+ - '8385:8385'
+ - '8386:8386'
+ - '8390:8390'
+ - '8395:8395'
+ - '8396:8396'
+ - '8400:8400'
+ - '8405:8405'
+ - '8406:8406'
+ - '8410:8410'
+ - '8415:8415'
+ - '8416:8416'
+ - '8420:8420'
+ - '8425:8425'
+ - '8426:8426'
+ - '8430:8430'
+ - '8435:8435'
+ - '8436:8436'
+ - '8440:8440'
+ - '8445:8445'
+ - '8446:8446'
+ - '8450:8450'
+ - '8455:8455'
+ - '8456:8456'
+ - '8460:8460'
+ - '8465:8465'
+ - '8466:8466'
+ - '8470:8470'
+ - '8475:8475'
+ - '8476:8476'
+ - '8480:8480'
+ - '8485:8485'
+ - '8486:8486'
+ - '8490:8490'
+ - '8495:8495'
+ - '8496:8496'
+ env_file: azuracast.env
+ environment:
+ LANG: ${LANG:-en_US.UTF-8}
+ AZURACAST_DC_REVISION: 14
+ AZURACAST_VERSION: ${AZURACAST_VERSION:-latest}
+ AZURACAST_SFTP_PORT: ${AZURACAST_SFTP_PORT:-2022}
+ NGINX_TIMEOUT: ${NGINX_TIMEOUT:-1800}
+ LETSENCRYPT_HOST: ${LETSENCRYPT_HOST:-}
+ LETSENCRYPT_EMAIL: ${LETSENCRYPT_EMAIL:-}
+ PUID: ${AZURACAST_PUID:-1000}
+ PGID: ${AZURACAST_PGID:-1000}
+ volumes:
+ - www_uploads:/var/azuracast/uploads
+ - station_data:/var/azuracast/stations
+ - shoutcast2_install:/var/azuracast/servers/shoutcast2
+ - stereo_tool_install:/var/azuracast/servers/stereo_tool
+ - geolite_install:/var/azuracast/geoip
+ - sftpgo_data:/var/azuracast/sftpgo/persist
+ - backups:/var/azuracast/backups
+ - acme:/var/azuracast/acme
+ - db_data:/var/lib/mysql
+ restart: unless-stopped
+ ulimits: &default-ulimits
+ nofile:
+ soft: 65536
+ hard: 65536
+ logging: &default-logging
+ options:
+ max-size: "1m"
+ max-file: "5"
+
+volumes:
+ db_data: { }
+ acme: { }
+ shoutcast2_install: { }
+ stereo_tool_install: { }
+ geolite_install: { }
+ sftpgo_data: { }
+ station_data: { }
+ www_uploads: { }
+ backups: { }
diff --git a/roles/azuracast/upstream/docker.sh b/roles/azuracast/upstream/docker.sh
new file mode 100755
index 000000000..dfa1c2b81
--- /dev/null
+++ b/roles/azuracast/upstream/docker.sh
@@ -0,0 +1,840 @@
+#!/usr/bin/env bash
+# shellcheck disable=SC2145,SC2178,SC2120,SC2162
+
+# Functions to manage .env files
+__dotenv=
+__dotenv_file=
+__dotenv_cmd=.env
+
+.env() {
+ REPLY=()
+ [[ $__dotenv_file || ${1-} == -* ]] || .env.--file .env || return
+ if declare -F -- ".env.${1-}" >/dev/null; then
+ .env."$@"
+ return
+ fi
+ return 64
+}
+
+.env.-f() { .env.--file "$@"; }
+
+.env.get() {
+ .env::arg "get requires a key" "$@" &&
+ [[ "$__dotenv" =~ ^(.*(^|$'\n'))([ ]*)"$1="(.*)$ ]] &&
+ REPLY=${BASH_REMATCH[4]%%$'\n'*} && REPLY=${REPLY%"${REPLY##*[![:space:]]}"}
+}
+
+.env.parse() {
+ local line key
+ while IFS= read -r line; do
+ line=${line#"${line%%[![:space:]]*}"} # trim leading whitespace
+ line=${line%"${line##*[![:space:]]}"} # trim trailing whitespace
+ if [[ ! "$line" || "$line" == '#'* ]]; then continue; fi
+ if (($#)); then
+ for key; do
+ if [[ $key == "${line%%=*}" ]]; then
+ REPLY+=("$line")
+ break
+ fi
+ done
+ else
+ REPLY+=("$line")
+ fi
+ done <<<"$__dotenv"
+ ((${#REPLY[@]}))
+}
+
+.env.export() { ! .env.parse "$@" || export "${REPLY[@]}"; }
+
+.env.set() {
+ .env::file load || return
+ local key saved=$__dotenv
+ while (($#)); do
+ key=${1#+}
+ key=${key%%=*}
+ if .env.get "$key"; then
+ REPLY=()
+ if [[ $1 == +* ]]; then
+ shift
+ continue # skip if already found
+ elif [[ $1 == *=* ]]; then
+ __dotenv=${BASH_REMATCH[1]}${BASH_REMATCH[3]}$1$'\n'${BASH_REMATCH[4]#*$'\n'}
+ else
+ __dotenv=${BASH_REMATCH[1]}${BASH_REMATCH[4]#*$'\n'}
+ continue # delete all occurrences
+ fi
+ elif [[ $1 == *=* ]]; then
+ __dotenv+="${1#+}"$'\n'
+ fi
+ shift
+ done
+ [[ $__dotenv == "$saved" ]] || .env::file save
+}
+
+.env.puts() { echo "${1-}" >>"$__dotenv_file" && __dotenv+="$1"$'\n'; }
+
+.env.generate() {
+ .env::arg "key required for generate" "$@" || return
+ .env.get "$1" && return || REPLY=$("${@:2}") || return
+ .env::one "generate: ouptut of '${*:2}' has more than one line" "$REPLY" || return
+ .env.puts "$1=$REPLY"
+}
+
+.env.--file() {
+ .env::arg "filename required for --file" "$@" || return
+ __dotenv_file=$1
+ .env::file load || return
+ (($# < 2)) || .env "${@:2}"
+}
+
+.env::arg() { [[ "${2-}" ]] || {
+ echo "$__dotenv_cmd: $1" >&2
+ return 64
+}; }
+
+.env::one() { [[ "$2" != *$'\n'* ]] || .env::arg "$1"; }
+
+.env::file() {
+ local REPLY=$__dotenv_file
+ case "$1" in
+ load)
+ __dotenv=
+ ! [[ -f "$REPLY" ]] || __dotenv="$(<"$REPLY")"$'\n' || return
+ ;;
+ save)
+ if [[ -L "$REPLY" ]] && declare -F -- realpath.resolved >/dev/null; then
+ realpath.resolved "$REPLY"
+ fi
+ { [[ ! -f "$REPLY" ]] || cp -p "$REPLY" "$REPLY.bak"; } &&
+ printf %s "$__dotenv" >"$REPLY.bak" && mv "$REPLY.bak" "$REPLY"
+ ;;
+ esac
+}
+
+# Shortcut to convert semver version (x.yyy.zzz) into a comparable number.
+version-number() {
+ echo "$@" | awk -F. '{ printf("%03d%03d%03d\n", $1,$2,$3); }'
+}
+
+# Get the current release channel for AzuraCast
+get-release-channel() {
+ local AZURACAST_VERSION="latest"
+ if [[ -f .env ]]; then
+ .env --file .env get AZURACAST_VERSION
+ AZURACAST_VERSION="${REPLY:-latest}"
+ fi
+
+ echo "$AZURACAST_VERSION"
+}
+
+get-release-branch-name() {
+ if [[ $(get-release-channel) == "stable" ]]; then
+ echo "stable"
+ else
+ echo "main"
+ fi
+}
+
+# This is a general-purpose function to ask Yes/No questions in Bash, either
+# with or without a default answer. It keeps repeating the question until it
+# gets a valid answer.
+ask() {
+ # https://djm.me/ask
+ local prompt default reply
+
+ while true; do
+
+ if [[ "${2:-}" == "Y" ]]; then
+ prompt="Y/n"
+ default=Y
+ elif [[ "${2:-}" == "N" ]]; then
+ prompt="y/N"
+ default=N
+ else
+ prompt="y/n"
+ default=
+ fi
+
+ # Ask the question (not using "read -p" as it uses stderr not stdout)
+ echo -n "$1 [$prompt] "
+
+ read reply
+
+ # Default?
+ if [[ -z "$reply" ]]; then
+ reply=${default}
+ fi
+
+ # Check if the reply is valid
+ case "$reply" in
+ Y* | y*) return 0 ;;
+ N* | n*) return 1 ;;
+ esac
+
+ done
+}
+
+# Generate a prompt to set an environment file value.
+envfile-set() {
+ local VALUE INPUT
+
+ .env --file .env
+
+ .env get "$1"
+ VALUE=${REPLY:-$2}
+
+ echo -n "$3 [$VALUE]: "
+ read INPUT
+
+ VALUE=${INPUT:-$VALUE}
+
+ .env set "${1}=${VALUE}"
+}
+
+#
+# Configure the ports used by AzuraCast.
+#
+setup-ports() {
+ envfile-set "AZURACAST_HTTP_PORT" "80" "Port to use for HTTP connections"
+ envfile-set "AZURACAST_HTTPS_PORT" "443" "Port to use for HTTPS connections"
+ envfile-set "AZURACAST_SFTP_PORT" "2022" "Port to use for SFTP connections"
+}
+
+#
+# Configure release mode settings.
+#
+setup-release() {
+ if [[ ! -f .env ]]; then
+ curl -fsSL https://raw.githubusercontent.com/AzuraCast/AzuraCast/main/sample.env -o .env
+ fi
+
+ local OLD_RELEASE_CHANNEL
+ .env --file .env get AZURACAST_VERSION
+ OLD_RELEASE_CHANNEL="${REPLY:-latest}"
+
+ local AZURACAST_VERSION="${OLD_RELEASE_CHANNEL}"
+
+ if [[ $AZURACAST_VERSION == "latest" ]]; then
+ if ask "Your current release channel is 'Rolling Release'. Switch to 'Stable' release channel?" N; then
+ AZURACAST_VERSION="stable"
+ fi
+ elif [[ $AZURACAST_VERSION == "stable" ]]; then
+ if ask "Your current release channel is 'Stable'. Switch to 'Rolling Release' release channel?" N; then
+ AZURACAST_VERSION="latest"
+ fi
+ fi
+
+ .env --file .env set AZURACAST_VERSION=${AZURACAST_VERSION}
+
+ if [[ $AZURACAST_VERSION != $OLD_RELEASE_CHANNEL ]]; then
+ if ask "You should update the Docker Utility Script after changing release channels. Automatically update it now?" Y; then
+ update-self
+ fi
+ fi
+}
+
+check-install-requirements() {
+ local CURRENT_OS CURRENT_ARCH REQUIRED_COMMANDS SCRIPT_DIR
+
+ set -e
+
+ echo "Checking installation requirements for AzuraCast..."
+
+ CURRENT_OS=$(uname -s)
+ if [[ $CURRENT_OS == "Linux" ]]; then
+ echo -en "\e[32m[PASS]\e[0m Operating System: ${CURRENT_OS}\n"
+ else
+ echo -en "\e[41m[FAIL]\e[0m Operating System: ${CURRENT_OS}\n"
+
+ echo " You are running an unsupported operating system."
+ echo " Automated AzuraCast installation is not currently supported on this"
+ echo " operating system."
+ exit 1
+ fi
+
+ CURRENT_ARCH=$(uname -m)
+ if [[ $CURRENT_ARCH == "x86_64" ]]; then
+ echo -en "\e[32m[PASS]\e[0m Architecture: ${CURRENT_ARCH}\n"
+ elif [[ $CURRENT_ARCH == "aarch64" ]]; then
+ echo -en "\e[32m[PASS]\e[0m Architecture: ${CURRENT_ARCH}\n"
+ else
+ echo -en "\e[41m[FAIL]\e[0m Architecture: ${CURRENT_ARCH}\n"
+
+ echo " You are running an unsupported processor architecture."
+ echo " Automated AzuraCast installation is not currently supported on this "
+ echo " operating system."
+ exit 1
+ fi
+
+ REQUIRED_COMMANDS=(curl awk)
+ for COMMAND in "${REQUIRED_COMMANDS[@]}" ; do
+ if [[ $(command -v "$COMMAND") ]]; then
+ echo -en "\e[32m[PASS]\e[0m Command Present: ${COMMAND}\n"
+ else
+ echo -en "\e[41m[FAIL]\e[0m Command Present: ${COMMAND}\n"
+
+ echo " ${COMMAND} does not appear to be installed."
+ echo " Install ${COMMAND} using your host's package manager,"
+ echo " then continue installing using this script."
+ exit 1
+ fi
+ done
+
+ if [[ $EUID -ne 0 ]]; then
+ if [[ $(command -v sudo) ]]; then
+ echo -en "\e[32m[PASS]\e[0m User Permissions\n"
+ else
+ echo -en "\e[41m[FAIL]\e[0m User Permissions\n"
+
+ echo " You are not currently the root user, and "
+ echo " 'sudo' does not appear to be installed."
+ echo " Install sudo using your host's package manager,"
+ echo " then continue installing using this script."
+ exit 1
+ fi
+ else
+ echo -en "\e[32m[PASS]\e[0m User Permissions\n"
+ fi
+
+ SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
+ if [[ $SCRIPT_DIR == "/var/azuracast" ]]; then
+ echo -en "\e[32m[PASS]\e[0m Installation Directory\n"
+ else
+ echo -en "\e[93m[WARN]\e[0m Installation Directory\n"
+ echo " AzuraCast is not installed in /var/azuracast, as is recommended"
+ echo " for most installations. This will not prevent AzuraCast from"
+ echo " working, but you will need to update any instructions in our"
+ echo " documentation to reflect your current directory:"
+ echo " $SCRIPT_DIR"
+ fi
+
+ echo -en "\e[32m[PASS]\e[0m All requirements met!\n"
+
+ set +e
+}
+
+install-docker() {
+ set -e
+
+ curl -fsSL get.docker.com -o get-docker.sh
+ sh get-docker.sh
+ rm get-docker.sh
+
+ if [[ $EUID -ne 0 ]]; then
+ sudo usermod -aG docker "$(whoami)"
+
+ echo "You must log out or restart to apply necessary Docker permissions changes."
+ echo "Restart, then continue installing using this script."
+ exit 1
+ fi
+
+ set +e
+}
+
+install-docker-compose() {
+ set -e
+ echo "Installing Docker Compose..."
+
+ curl -fsSL -o docker-compose https://github.com/docker/compose/releases/download/v2.4.1/docker-compose-linux-$(uname -m)
+
+ ARCHITECTURE=amd64
+ if [ "$(uname -m)" = "aarch64" ]; then
+ ARCHITECTURE=arm64
+ fi
+ curl -fsSL -o docker-compose-switch https://github.com/docker/compose-switch/releases/download/v1.0.4/docker-compose-linux-${ARCHITECTURE}
+
+ if [[ $EUID -ne 0 ]]; then
+ sudo chmod a+x ./docker-compose
+ sudo chmod a+x ./docker-compose-switch
+
+ sudo mv ./docker-compose /usr/libexec/docker/cli-plugins/docker-compose
+ sudo mv ./docker-compose-switch /usr/local/bin/docker-compose
+ else
+ chmod a+x ./docker-compose
+ chmod a+x ./docker-compose-switch
+
+ mv ./docker-compose /usr/libexec/docker/cli-plugins/docker-compose
+ mv ./docker-compose-switch /usr/local/bin/docker-compose
+ fi
+
+ echo "Docker Compose updated!"
+ set +e
+}
+
+run-installer() {
+ local AZURACAST_RELEASE_BRANCH
+ AZURACAST_RELEASE_BRANCH=$(get-release-branch-name)
+
+ if [[ ! -f .env ]]; then
+ curl -fsSL https://raw.githubusercontent.com/AzuraCast/AzuraCast/$AZURACAST_RELEASE_BRANCH/sample.env -o .env
+ fi
+ if [[ ! -f azuracast.env ]]; then
+ curl -fsSL https://raw.githubusercontent.com/AzuraCast/AzuraCast/$AZURACAST_RELEASE_BRANCH/azuracast.sample.env -o azuracast.env
+ fi
+ if [[ ! -f docker-compose.yml ]]; then
+ curl -fsSL https://raw.githubusercontent.com/AzuraCast/AzuraCast/$AZURACAST_RELEASE_BRANCH/docker-compose.sample.yml -o docker-compose.yml
+ fi
+
+ touch docker-compose.new.yml
+
+ local dc_config_test=$(docker-compose -f docker-compose.new.yml config 2>/dev/null)
+ if [ $? -ne 0 ]; then
+ if ask "Docker Compose needs to be updated to continue. Update to latest version?" Y; then
+ install-docker-compose
+ fi
+ fi
+
+ curl -fsSL https://raw.githubusercontent.com/AzuraCast/AzuraCast/$AZURACAST_RELEASE_BRANCH/docker-compose.installer.yml -o docker-compose.installer.yml
+
+ docker-compose -p azuracast_installer -f docker-compose.installer.yml pull
+ docker-compose -p azuracast_installer -f docker-compose.installer.yml run --rm installer install "$@"
+
+ rm docker-compose.installer.yml
+}
+
+#
+# Run the initial installer of Docker and AzuraCast.
+# Usage: ./docker.sh install
+#
+install() {
+ check-install-requirements
+
+ if [[ $(command -v docker) && $(docker --version) ]]; then
+ echo "Docker is already installed! Continuing..."
+ else
+ if ask "Docker does not appear to be installed. Install Docker now?" Y; then
+ install-docker
+ fi
+ fi
+
+ if [[ $(command -v docker-compose) ]]; then
+ echo "Docker Compose is already installed. Continuing..."
+ else
+ if ask "Docker Compose does not appear to be installed. Install Docker Compose now?" Y; then
+ install-docker-compose
+ fi
+ fi
+
+ setup-release
+
+ run-installer "$@"
+
+ # Installer creates a file at docker-compose.new.yml; copy it to the main spot.
+ if [[ -s docker-compose.new.yml ]]; then
+ if [[ -f docker-compose.yml ]]; then
+ rm docker-compose.yml
+ fi
+
+ mv docker-compose.new.yml docker-compose.yml
+ fi
+
+ # If this script is running as a non-root user, set the PUID/PGID in the environment vars appropriately.
+ if [[ $EUID -ne 0 ]]; then
+ .env --file .env set AZURACAST_PUID="$(id -u)"
+ .env --file .env set AZURACAST_PGID="$(id -g)"
+ fi
+
+ docker-compose pull
+
+ docker-compose run --rm web -- azuracast_install "$@"
+ docker-compose up -d
+ exit
+}
+
+install-dev() {
+ if [[ $(command -v docker) && $(docker --version) ]]; then
+ echo "Docker is already installed! Continuing..."
+ else
+ if ask "Docker does not appear to be installed. Install Docker now?" Y; then
+ install-docker
+ fi
+ fi
+
+ if [[ $(command -v docker-compose) ]]; then
+ echo "Docker Compose is already installed. Continuing..."
+ else
+ if ask "Docker Compose does not appear to be installed. Install Docker Compose now?" Y; then
+ install-docker-compose
+ fi
+ fi
+
+ if [[ ! -f docker-compose.yml ]]; then
+ cp docker-compose.sample.yml docker-compose.yml
+ fi
+ if [[ ! -f docker-compose.override.yml ]]; then
+ cp docker-compose.dev.yml docker-compose.override.yml
+ fi
+ if [[ ! -f .env ]]; then
+ cp dev.env .env
+ fi
+ if [[ ! -f azuracast.env ]]; then
+ cp azuracast.dev.env azuracast.env
+
+ echo "Customize azuracast.env file now before continuing. Re-run this command to continue installation."
+ exit
+ fi
+
+ # If this script is running as a non-root user, set the PUID/PGID in the environment vars appropriately.
+ if [[ $EUID -ne 0 ]]; then
+ .env --file .env set AZURACAST_PUID="$(id -u)"
+ .env --file .env set AZURACAST_PGID="$(id -g)"
+ fi
+
+ chmod 777 ./frontend/ ./web/ ./vendor/ \
+ ./web/static/ ./web/static/api/ \
+ ./web/static/dist/ ./web/static/img/
+
+ docker-compose build
+ docker-compose run --rm web -- azuracast_install "$@"
+
+ docker-compose -p azuracast_frontend -f docker-compose.frontend.yml build
+ docker-compose -p azuracast_frontend -f docker-compose.frontend.yml run --rm frontend npm run build
+
+ docker-compose up -d
+ exit
+}
+
+#
+# Update the Docker images and codebase.
+# Usage: ./docker.sh update
+#
+update() {
+ echo "[NOTICE] Before you continue, please make sure you have a recent snapshot of your system and or backed it up."
+ if ask "Are you ready to continue with the update?" Y; then
+
+ # Check for a new Docker Utility Script.
+ local AZURACAST_RELEASE_BRANCH
+ AZURACAST_RELEASE_BRANCH=$(get-release-branch-name)
+
+ curl -fsSL https://raw.githubusercontent.com/AzuraCast/AzuraCast/$AZURACAST_RELEASE_BRANCH/docker.sh -o docker.new.sh
+
+ local UTILITY_FILES_MATCH
+ UTILITY_FILES_MATCH="$(
+ cmp --silent docker.sh docker.new.sh
+ echo $?
+ )"
+
+ local UPDATE_UTILITY=0
+ if [[ ${UTILITY_FILES_MATCH} -ne 0 ]]; then
+ if ask "The Docker Utility Script has changed since your version. Update to latest version?" Y; then
+ UPDATE_UTILITY=1
+ fi
+ fi
+
+ if [[ ${UPDATE_UTILITY} -ne 0 ]]; then
+ mv docker.new.sh docker.sh
+ chmod a+x docker.sh
+
+ echo "A new Docker Utility Script has been downloaded."
+ echo "Please re-run the update process to continue."
+ exit
+ else
+ rm docker.new.sh
+ fi
+
+ run-installer --update "$@"
+
+ # Check for updated Docker Compose config.
+ local COMPOSE_FILES_MATCH
+
+ if [[ ! -s docker-compose.new.yml ]]; then
+ curl -fsSL https://raw.githubusercontent.com/AzuraCast/AzuraCast/$AZURACAST_RELEASE_BRANCH/docker-compose.sample.yml -o docker-compose.new.yml
+ fi
+
+ COMPOSE_FILES_MATCH="$(
+ cmp --silent docker-compose.yml docker-compose.new.yml
+ echo $?
+ )"
+
+ if [[ ${COMPOSE_FILES_MATCH} -ne 0 ]]; then
+ docker-compose -f docker-compose.new.yml pull
+ docker-compose down
+
+ cp docker-compose.yml docker-compose.backup.yml
+ mv docker-compose.new.yml docker-compose.yml
+ else
+ rm docker-compose.new.yml
+
+ docker-compose pull
+ docker-compose down
+ fi
+
+ docker-compose run --rm web -- azuracast_update "$@"
+ docker-compose up -d
+
+ if ask "Clean up all stopped Docker containers and images to save space?" Y; then
+ docker system prune -f
+ fi
+
+ echo "Update complete!"
+ fi
+ exit
+}
+
+#
+# Update this Docker utility script.
+# Usage: ./docker.sh update-self
+#
+update-self() {
+ local AZURACAST_RELEASE_BRANCH
+ AZURACAST_RELEASE_BRANCH=$(get-release-branch-name)
+
+ curl -fsSL https://raw.githubusercontent.com/AzuraCast/AzuraCast/$AZURACAST_RELEASE_BRANCH/docker.sh -o docker.sh
+ chmod a+x docker.sh
+
+ echo "New Docker utility script downloaded."
+ exit
+}
+
+#
+# Run a CLI command inside the Docker container.
+# Usage: ./docker.sh cli [command]
+#
+cli() {
+ docker-compose exec --user="azuracast" web azuracast_cli "$@"
+ exit
+}
+
+#
+# Enter the bash terminal of the running web container.
+# Usage: ./docker.sh bash
+#
+bash() {
+ docker-compose exec --user="azuracast" web bash
+ exit
+}
+
+#
+# Enter the MariaDB database management terminal with the correct credentials.
+#
+db() {
+ local MYSQL_HOST MYSQL_PORT MYSQL_USER MYSQL_PASSWORD MYSQL_DATABASE
+
+ .env --file azuracast.env get MYSQL_HOST
+ MYSQL_HOST="${REPLY:-localhost}"
+
+ .env --file azuracast.env get MYSQL_PORT
+ MYSQL_PORT="${REPLY:-3306}"
+
+ .env --file azuracast.env get MYSQL_USER
+ MYSQL_USER="${REPLY:-azuracast}"
+
+ .env --file azuracast.env get MYSQL_PASSWORD
+ MYSQL_PASSWORD="${REPLY:-azur4c457}"
+
+ .env --file azuracast.env get MYSQL_DATABASE
+ MYSQL_DATABASE="${REPLY:-azuracast}"
+
+ docker-compose exec --user="mysql" web mysql --user=${MYSQL_USER} --password=${MYSQL_PASSWORD} \
+ --host=${MYSQL_HOST} --port=${MYSQL_PORT} --database=${MYSQL_DATABASE}
+
+ exit
+}
+
+#
+# Back up the Docker volumes to a .tar.gz file.
+# Usage:
+# ./docker.sh backup [/custom/backup/dir/custombackupname.zip]
+#
+backup() {
+ local BACKUP_PATH BACKUP_DIR BACKUP_FILENAME BACKUP_EXT
+ BACKUP_PATH=$(readlink -f ${1:-"./backup.tar.gz"})
+ BACKUP_DIR=$(dirname -- "$BACKUP_PATH")
+ BACKUP_FILENAME=$(basename -- "$BACKUP_PATH")
+ BACKUP_EXT="${BACKUP_FILENAME##*.}"
+ shift
+
+ # Prepare permissions
+ if [[ $EUID -ne 0 ]]; then
+ .env --file .env set AZURACAST_PUID="$(id -u)"
+ .env --file .env set AZURACAST_PGID="$(id -g)"
+ fi
+
+ docker-compose exec --user="azuracast" web azuracast_cli azuracast:backup "/var/azuracast/backups/${BACKUP_FILENAME}" "$@"
+
+ # Move from Docker volume to local filesystem
+ docker run --rm -v "azuracast_backups:/backup_src" \
+ -v "$BACKUP_DIR:/backup_dest" \
+ busybox mv "/backup_src/${BACKUP_FILENAME}" "/backup_dest/${BACKUP_FILENAME}"
+}
+
+#
+# Restore an AzuraCast backup into Docker.
+# Usage:
+# ./docker.sh restore [/custom/backup/dir/custombackupname.zip]
+#
+restore() {
+ if [[ ! -f .env ]] || [[ ! -f azuracast.env ]]; then
+ echo "AzuraCast hasn't been installed yet on this server."
+ echo "You should run './docker.sh install' first before restoring."
+ exit 1
+ fi
+
+ if ask "Restoring will remove any existing AzuraCast installation data, replacing it with your backup. Continue?" Y; then
+ if [[ $1 != "" ]]; then
+ local BACKUP_PATH BACKUP_DIR BACKUP_FILENAME BACKUP_EXT
+ BACKUP_PATH=$(readlink -f ${1:-"./backup.tar.gz"})
+ BACKUP_DIR=$(dirname -- "$BACKUP_PATH")
+ BACKUP_FILENAME=$(basename -- "$BACKUP_PATH")
+ BACKUP_EXT="${BACKUP_FILENAME##*.}"
+ shift
+
+ if [[ ! -f ${BACKUP_PATH} ]]; then
+ echo "File '${BACKUP_PATH}' does not exist. Nothing to restore."
+ exit 1
+ fi
+
+ docker-compose down -v
+ docker volume create azuracast_backups
+
+ # Move from local filesystem to Docker volume
+ docker run --rm -v "$BACKUP_DIR:/backup_src" \
+ -v "azuracast_backups:/backup_dest" \
+ busybox mv "/backup_src/${BACKUP_FILENAME}" "/backup_dest/${BACKUP_FILENAME}"
+
+ # Prepare permissions
+ if [[ $EUID -ne 0 ]]; then
+ .env --file .env set AZURACAST_PUID="$(id -u)"
+ .env --file .env set AZURACAST_PGID="$(id -g)"
+ fi
+
+ docker-compose run --rm web -- azuracast_restore "/var/azuracast/backups/${BACKUP_FILENAME}" "$@"
+
+ # Move file back from volume to local filesystem
+ docker run --rm -v "azuracast_backups:/backup_src" \
+ -v "$BACKUP_DIR:/backup_dest" \
+ busybox mv "/backup_src/${BACKUP_FILENAME}" "/backup_dest/${BACKUP_FILENAME}"
+
+ docker-compose down
+ docker-compose up -d
+ else
+ docker-compose down
+
+ # Remove all volumes except the backup volume.
+ docker volume rm -f $(docker volume ls | grep -v "azuracast_backups" | awk 'NR>1 {print $2}')
+
+ docker-compose run --rm web -- azuracast_restore "$@"
+
+ docker-compose down
+ docker-compose up -d
+ fi
+ fi
+ exit
+}
+
+#
+# Restore the Docker volumes from a legacy backup format .tar.gz file.
+# Usage:
+# ./docker.sh restore [/custom/backup/dir/custombackupname.tar.gz]
+#
+restore-legacy() {
+ local APP_BASE_DIR BACKUP_PATH BACKUP_DIR BACKUP_FILENAME
+
+ APP_BASE_DIR=$(pwd)
+
+ BACKUP_PATH=${1:-"./backup.tar.gz"}
+ BACKUP_DIR=$(cd "$(dirname "$BACKUP_PATH")" && pwd)
+ BACKUP_FILENAME=$(basename "$BACKUP_PATH")
+
+ cd "$APP_BASE_DIR" || exit
+
+ if [ -f "$BACKUP_PATH" ]; then
+ docker-compose down
+
+ docker volume rm azuracast_db_data azuracast_station_data
+ docker volume create azuracast_db_data
+ docker volume create azuracast_station_data
+
+ docker run --rm -v "$BACKUP_DIR:/backup" \
+ -v azuracast_db_data:/azuracast/db \
+ -v azuracast_station_data:/azuracast/stations \
+ busybox tar zxvf "/backup/$BACKUP_FILENAME"
+
+ docker-compose up -d
+ else
+ echo "File $BACKUP_PATH does not exist in this directory. Nothing to restore."
+ exit 1
+ fi
+
+ exit
+}
+
+#
+# DEVELOPER TOOL:
+# Access the static console as a developer.
+# Usage: ./docker.sh static [static_container_command]
+#
+static() {
+ docker-compose -f docker-compose.frontend.yml down -v
+ docker-compose -f docker-compose.frontend.yml build
+ docker-compose --env-file=.env -f docker-compose.frontend.yml run --rm frontend "$@"
+ exit
+}
+
+#
+# Stop all Docker containers and remove related volumes.
+# Usage: ./docker.sh uninstall
+#
+uninstall() {
+ if ask "This operation is destructive and will wipe your existing Docker containers. Continue?" N; then
+
+ docker-compose down -v
+ docker-compose rm -f
+ docker volume prune -f
+
+ echo "All AzuraCast Docker containers and volumes were removed."
+ echo "To remove *all* Docker containers and volumes, run:"
+ echo " docker stop \$(docker ps -a -q)"
+ echo " docker rm \$(docker ps -a -q)"
+ echo " docker volume prune -f"
+ echo ""
+ fi
+
+ exit
+}
+
+#
+# LetsEncrypt: Now managed via the Web UI.
+#
+setup-letsencrypt() {
+ echo "LetsEncrypt is now managed from within the web interface."
+}
+
+letsencrypt-create() {
+ setup-letsencrypt
+ exit
+}
+
+#
+# Utility script to facilitate switching ports.
+# Usage: ./docker.sh change-ports
+#
+change-ports() {
+ setup-ports
+
+ docker-compose down
+ docker-compose up -d
+}
+
+#
+# Helper scripts for basic Docker Compose functions
+#
+up() {
+ echo "Starting up AzuraCast services..."
+ docker-compose up -d
+}
+
+down() {
+ echo "Shutting down AzuraCast services..."
+ docker-compose down
+}
+
+restart() {
+ down
+ up
+}
+
+# Ensure we're in the same directory as this script.
+cd "$( dirname "${BASH_SOURCE[0]}" )" || exit
+
+"$@"
diff --git a/roles/azuracast/upstream/notes.txt b/roles/azuracast/upstream/notes.txt
new file mode 100644
index 000000000..47be401bc
--- /dev/null
+++ b/roles/azuracast/upstream/notes.txt
@@ -0,0 +1 @@
+The three file found here are mentioned in docker.sh's run-installer() with 'sample.' added to the filename
diff --git a/roles/azuracast/upstream/sample.env b/roles/azuracast/upstream/sample.env
new file mode 100644
index 000000000..303905de1
--- /dev/null
+++ b/roles/azuracast/upstream/sample.env
@@ -0,0 +1,11 @@
+COMPOSE_PROJECT_NAME=azuracast
+
+AZURACAST_HTTP_PORT=80
+AZURACAST_HTTPS_PORT=443
+
+AZURACAST_SFTP_PORT=2022
+
+AZURACAST_PUID=1000
+AZURACAST_PGID=1000
+
+NGINX_TIMEOUT=1800
diff --git a/roles/calibre-web/README.rst b/roles/calibre-web/README.rst
index 84b5cadae..a29896573 100644
--- a/roles/calibre-web/README.rst
+++ b/roles/calibre-web/README.rst
@@ -13,101 +13,179 @@
Calibre-Web README
==================
-Calibre-Web provides a clean interface for browsing, reading and downloading
-e-books using an existing Calibre database. Teachers can upload e-books,
-adjust e-book metadata, and create custom e-book collections ("bookshelves"):
-https://github.com/janeczku/calibre-web#about
+This Ansible role installs
+`Calibre-Web `_ as a modern
+client-server alternative to Calibre, for your
+`Internet-in-a-Box (IIAB) `_.
-This Ansible role installs Calibre-Web as part of your Internet-in-a-Box (IIAB)
-as a possible alternative to Calibre.
+Calibre-Web provides a clean web interface for students to browse, read and
+download e-books using a
+`Calibre-compatible database `_.
-*WARNING: Calibre-Web depends on Calibre's own /usr/bin/ebook-convert program,
-so we strongly recommend you also install Calibre during your IIAB
-installation!*
+Teachers upload e-books, adjust e-book metadata, and create custom "bookshelf"
+collections β to help students build the best local community library!
-Please note Calibre-Web's Ansible playbook is ``/opt/iiab/iiab/roles/calibre-web``
-whereas its Ansible variables ``calibreweb_*`` do **not** include the dash,
-per Ansible recommendations.
+**NEW AS OF JANUARY 2024:** `IIAB's experimental new version of Calibre-Web `_
+**also lets you add YouTube and Vimeo videos (and local videos, e.g. from
+teachers' phones) to expand your indigenous/local/family learning library!**
+
+.. image:: https://www.yankodesign.com/images/design_news/2019/05/221758/luo_beetle_library_8.jpg
+
+π GURU TIPS π
+
+* Calibre-Web takes advantage of Calibre's own `/usr/bin/ebook-convert
+ `_ program
+ if that's installed β so consider also installing
+ `Calibre `_ during your IIAB
+ installation β *if you tolerate the weighty ~1 GB (of graphical OS libraries)
+ that Calibre mandates!*
+
+* If you choose to also install Calibre (e.g. by running
+ ``sudo apt install calibre``) then you'll get useful e-book
+ importing/organizing tools like
+ `/usr/bin/calibredb `_.
+
+Install It
+----------
+
+Install Calibre-Web by setting these 2 variables in
+`/etc/iiab/local_vars.yml `_::
+
+ calibreweb_install: True
+ calibreweb_enabled: True
+
+Then install IIAB (`download.iiab.io `_). Or if
+IIAB's already installed, run::
+
+ cd /opt/iiab/iiab
+ sudo ./runrole calibre-web
+
+NOTE: Calibre-Web's Ansible role (playbook) in
+`/opt/iiab/iiab/roles `_ is
+``calibre-web`` which contains a hyphen β *whereas its Ansible variables*
+``calibreweb_*`` *do NOT contain a hyphen!*
Using It
--------
-After installation, try out Calibre-Web at http://box/books (or box.lan/books).
+Try Calibre-Web on your own IIAB by browsing to http://box/books (or
+http://box.lan/books).
-Typically students access it without a password (to read and download books)
-whereas teachers add books using an administrative account, as follows::
+*Students* access it without a password (to read and download books).
+
+*Teachers* add and arrange books using an administrative account, by clicking
+**Guest** then logging in with::
Username: Admin
Password: changeme
-If the default configuration is not found, the Calibre-Web server creates a
-new settings file with calibre-web's own default administrative account::
+π GURU TIPS π
- Username: admin
- Password: admin123
+* If Calibre-Web's configuration file (app.db) goes missing, the administrative
+ account will revert to::
-Backend
--------
+ Username: admin
+ Password: admin123
-You can manage the backend Calibre-Web server with these systemd commands::
-
- systemctl enable calibre-web
- systemctl restart calibre-web
- systemctl status calibre-web
- systemctl stop calibre-web
+* If you lose your password, you can change it with the
+ ``-s [username]:[newpassword]`` command-line option:
+ https://github.com/janeczku/calibre-web/wiki/FAQ#what-do-i-do-if-i-lose-my-admin-password
Configuration
-------------
-To configure Calibre-Web, log in as user 'Admin' then click 'Admin' on top.
-Check 'Configuration' options near the bottom of the page.
+To configure Calibre-Web browse to http://box/books then click **Guest** to log
+in as user **Admin** (default passwords above!)
-Critical settings are stored in::
+Then click the leftmost **Admin** button to administer β considering all 3
+**Configuration** buttons further below.
+
+These critical settings are stored in::
/library/calibre-web/config/app.db
-Your e-book metadata is stored in a Calibre-style database::
+Whereas your e-book metadata is stored in a Calibre-style database::
/library/calibre-web/metadata.db
+Videos' metadata is stored in database::
+
+ /library/calibre-web/xklb-metadata.db
+
See also::
/library/calibre-web/metadata_db_prefs_backup.json
-See the official docs on Calibre-Web's `Runtime Configuration Options `_.
+Finally, take note of Calibre-Web's
+`FAQ `_ and official docs on
+its
+`Runtime Configuration Options `_
+and
+`Command Line Interface `_.
+
+Backend
+-------
+
+You can manage the backend Calibre-Web server with systemd commands like::
+
+ systemctl status calibre-web
+ systemctl stop calibre-web
+ systemctl restart calibre-web
+
+Run all commands
+`as root `_.
+
+Errors and warnings can be seen if you run::
+
+ journalctl -u calibre-web
+
+Log verbosity level can be
+`adjusted `_
+within Calibre-Web's **Configuration > Basic Configuration > Logfile
+Configuration**.
+
+Finally, http://box/live/stats (Calibre-Web's **About** page) can be a very
+useful list of ~42 `Calibre-Web dependencies `_
+(mostly Python packages, and the version number of each that's installed).
Back Up Everything
------------------
Please back up the entire folder ``/library/calibre-web`` before upgrading β
-as it contains your Calibre-Web content **and** settings!
+as it contains your Calibre-Web content **and** configuration settings!
Upgrading
---------
-Reinstalling Calibre-Web automatically upgrades to the latest version if your
-Internet-in-a-Box (IIAB) is online.
+Please see our `new/automated upgrade technique (iiab-update) `_
+introduced in July 2024.
-But first: back up your content **and** settings, as explained above.
+But first: back up your content **and** configuration settings, as outlined
+above!
-**Then move your /library/calibre-web/metadata.db out of the way, if you're
-sure you want to (re)install bare/minimal metadata, and force all Calibre-Web
-settings to the default. Then run**::
+**Conversely if you're sure you want to fully reset your Calibre-Web settings,
+and remove all existing e-book/video/media metadata β then move your
+/library/calibre-web/config/app.db, /library/calibre-web/metadata.db and
+/library/calibre-web/xklb-metadata.db out of the way.**
+
+RECAP: Either way, "reinstalling" Calibre-Web automatically installs the latest
+version β so long as your Internet-in-a-Box (IIAB) is online. Most people
+should stick with the new ``iiab-update`` technique above. However if you must
+use the older/manual approach, you would need to run, as root::
cd /opt/iiab/iiab
- ./runrole calibre-web
-
-Or, to reinstall all of IIAB::
+ ./runrole --reinstall calibre-web
- cd /opt/iiab/iiab
- ./iiab-install --reinstall
+Or, if there's a need to try updating Calibre-Web's code alone::
-Or, if you just want to upgrade Calibre-Web code alone, prior to proceeding
-manually::
-
- cd /opt/iiab/calibre-web
+ cd /usr/local/calibre-web-py3
git pull
+Finally, this much older way is *no longer recommended*::
+
+ cd /opt/iiab/iiab
+ ./iiab-install --reinstall # OR: ./iiab-configure
+
Known Issues
------------
@@ -153,7 +231,7 @@ Known Issues
* |ss| Imagemagick policy prevents generating thumbnails for PDF's during upload: `#1530 `_ `janeczku/calibre-web#827 `_ |se|
-* Upload of not supported file formats gives no feedback to the user: `janeczku/calibre-web#828 `_
+* |ss| Upload of not supported file formats gives no feedback to the user: `janeczku/calibre-web#828 `_ |se| |nbsp| Fixed by `361a124 `_ on 2019-02-27.
-* *Please assist us in reporting serious issues here:*
- https://github.com/janeczku/calibre-web/issues
+* *Please report serious issues here:*
+ https://github.com/iiab/calibre-web/issues
diff --git a/roles/calibre-web/defaults/main.yml b/roles/calibre-web/defaults/main.yml
index 903b3b6e5..3e99725a9 100644
--- a/roles/calibre-web/defaults/main.yml
+++ b/roles/calibre-web/defaults/main.yml
@@ -14,23 +14,26 @@
# All above are set in: github.com/iiab/iiab/blob/master/vars/default_vars.yml
# If nec, change them by editing /etc/iiab/local_vars.yml prior to installing!
+calibreweb_repo_url: https://github.com/iiab/calibre-web # Or use upstream: https://github.com/janeczku/calibre-web
calibreweb_version: master # WAS: master, 0.6.4, 0.6.5, 0.6.6, 0.6.7, 0.6.8, 0.6.9
+calibreweb_venv_wipe: False # 2023-12-04: NEW default TDD (Test-Driven Dev!)
calibreweb_venv_path: /usr/local/calibre-web-py3
calibreweb_exec_path: "{{ calibreweb_venv_path }}/cps.py"
-# Config files put in:
+# Config files (in reality just app.db) put in:
calibreweb_config: "{{ calibreweb_home }}/config"
-# Calibre-Web will be provisioned with default administrative account,
-# metadata.db and language if /library/calibre-web/metadata.db does not exist.
-# NOT CURRENTLY IN USE: calibreweb_provision: True
-calibreweb_settings_database: app.db
-calibreweb_database: metadata.db
+# 2022-03-07: Calibre-Web will be reset to default settings if (re)installed
+# when /library/calibre-web/config/app.db doesn't exist:
+calibreweb_settings_database: app.db # /library/calibre-web/config/app.db
+
+# UNUSED var as of 2022-03-07:
+# calibreweb_database: metadata.db # /library/calibre-web/metadata.db
# Files owned by:
calibreweb_user: root
-# UNUSED variables, as of March 2019:
+# UNUSED vars, as of March 2019:
# calibreweb_admin_user: Admin
# calibreweb_admin_password: changeme
diff --git a/roles/calibre-web/files/app.db b/roles/calibre-web/files/app.db
index 31a8b716a..3183544da 100644
Binary files a/roles/calibre-web/files/app.db and b/roles/calibre-web/files/app.db differ
diff --git a/roles/calibre-web/tasks/enable-or-disable.yml b/roles/calibre-web/tasks/enable-or-disable.yml
new file mode 100644
index 000000000..493703dc7
--- /dev/null
+++ b/roles/calibre-web/tasks/enable-or-disable.yml
@@ -0,0 +1,52 @@
+- name: Enable & Restart 'calibre-web' systemd service, if calibreweb_enabled
+ systemd:
+ name: calibre-web
+ daemon_reload: yes
+ enabled: yes
+ state: restarted
+ when: calibreweb_enabled
+
+- name: Disable & Stop 'calibre-web' systemd service, if not calibreweb_enabled
+ systemd:
+ name: calibre-web
+ enabled: no
+ state: stopped
+ when: not calibreweb_enabled
+
+
+# TO DO: restore http://box/libros & http://box/livres etc, alongside English (#2195)
+# RELATED: https://github.com/janeczku/calibre-web/wiki/Setup-Reverse-Proxy
+
+- name: Enable http://box{{ calibreweb_url1 }} via NGINX, by installing {{ nginx_conf_dir }}/calibre-web-nginx.conf from template # http://box/books
+ template:
+ src: calibre-web-nginx.conf.j2
+ dest: "{{ nginx_conf_dir }}/calibre-web-nginx.conf" # /etc/nginx/conf.d
+ when: calibreweb_enabled
+
+- name: If enabling with Calibre-Web enhanced for large audio/video "books" too, also append onto calibre-web-nginx.conf AND symlink /library/www/html/calibre-web -> /library/calibre-web (WIP)
+ shell: |
+ if [ -f {{ calibreweb_venv_path }}/scripts/calibre-web-nginx.conf ]; then
+ cat {{ calibreweb_venv_path }}/scripts/calibre-web-nginx.conf >> {{ nginx_conf_dir }}/calibre-web-nginx.conf
+ # 2023-12-05: Not needed as a result of PR iiab/calibre-web#57
+ # ln -sf {{ calibreweb_home }} {{ doc_root }}/calibre-web
+ fi
+ when: calibreweb_enabled
+
+
+- name: Disable http://box{{ calibreweb_url1 }} via NGINX, by removing {{ nginx_conf_dir }}/calibre-web-nginx.conf
+ file:
+ path: "{{ nginx_conf_dir }}/calibre-web-nginx.conf"
+ state: absent
+ when: not calibreweb_enabled
+
+- name: If disabling, also remove symlink /library/www/html/calibre-web (WIP)
+ file:
+ path: "{{ doc_root }}/calibre-web" # /library/www/html
+ state: absent
+ when: not calibreweb_enabled
+
+
+- name: Restart 'nginx' systemd service
+ systemd:
+ name: nginx
+ state: restarted
diff --git a/roles/calibre-web/tasks/install.yml b/roles/calibre-web/tasks/install.yml
index 7d82f6677..d293afe23 100644
--- a/roles/calibre-web/tasks/install.yml
+++ b/roles/calibre-web/tasks/install.yml
@@ -1,10 +1,50 @@
-- name: "Install packages: imagemagick, python3-venv"
+# Or try 'iiab-update -f' for a more rapid upgrade of IIAB Calibre-Web:
+#
+# https://wiki.iiab.io/go/FAQ#Can_I_upgrade_IIAB_software%3F
+# https://github.com/iiab/calibre-web/wiki#upgrading
+# https://github.com/iiab/iiab/blob/master/scripts/iiab-update
+# https://github.com/iiab/iiab/tree/master/roles/calibre-web#upgrading
+
+
+- name: Record (initial) disk space used
+ shell: df -B1 --output=used / | tail -1
+ register: df1
+
+
+- name: Stop 'calibre-web' systemd service for safety (RED ERROR CAN BE IGNORED!)
+ systemd:
+ name: calibre-web
+ state: stopped
+ ignore_errors: True # Shows red errors, and continue...
+ #failed_when: False # Hides red errors, and continue...
+
+# Official upstream instructions:
+# apt install python3-pip python3-venv
+# https://github.com/janeczku/calibre-web/wiki/Manual-installation
+- name: "Install package: imagemagick"
package:
name:
- imagemagick
- - python3-venv
+ #- python3-cryptography # Was needed on Raspberry Pi OS (SEE iiab/calibre-web#260, janeczku/calibre-web#3183)
+ #- python3-netifaces
state: present
+# https://github.com/iiab/iiab/pull/3496#issuecomment-1475094542
+#- name: "Install packages: python3-dev, gcc to compile 'netifaces'"
+# package:
+# name:
+# - python3-dev # header files
+# - gcc # compiler
+# state: present
+# when: python_version is version('3.10', '>=')
+
+- name: Does /etc/ImageMagick-6/policy.xml exist?
+ stat:
+ path: /etc/ImageMagick-6/policy.xml
+ register: imagemagick6_policy_xml
+
+# 2024-12-16: Debian 13 uses /etc/ImageMagick-7/policy.xml instead, which doesn't need this lineinfile surgery:
+# https://stackoverflow.com/questions/52998331/imagemagick-security-policy-pdf-blocking-conversion
- name: Allow ImageMagick to read PDFs, per /etc/ImageMagick-6/policy.xml, to create book cover thumbnails
lineinfile:
path: /etc/ImageMagick-6/policy.xml
@@ -12,43 +52,111 @@
backrefs: yes
line: ' '
state: present
+ when: imagemagick6_policy_xml.stat.exists
-- name: "Create 3 Calibre-Web folders to store data and config files: {{ calibreweb_home }}, {{ calibreweb_venv_path }}, {{ calibreweb_config }} (all set to {{ calibreweb_user }}:{{ apache_user }}) (default to 0755)"
+- name: "Create 2 Calibre-Web folders to store data and config files: {{ calibreweb_home }}, {{ calibreweb_config }} (each set to {{ calibreweb_user }}:{{ apache_user }}, default to 0755)"
file:
state: directory
path: "{{ item }}"
owner: "{{ calibreweb_user }}" # root
group: "{{ apache_user }}" # www-data on debuntu
- #mode: '0755'
with_items:
- "{{ calibreweb_home }}" # /library/calibre-web
- "{{ calibreweb_config }}" # /library/calibre-web/config
- - "{{ calibreweb_venv_path }}" # /usr/local/calibre-web-py3
-## TODO: Calibre-web future release might get into pypi https://github.com/janeczku/calibre-web/issues/456
-- name: Clone i.e. download Calibre-Web ({{ calibreweb_version }}) from https://github.com/janeczku/calibre-web.git to {{ calibreweb_venv_path }} (~94 MB initially, ~115+ MB later)
+# FYI since May 2021, Calibre-Web (major releases) can be installed with pip:
+# https://pypi.org/project/calibreweb/
+# https://github.com/janeczku/calibre-web/issues/456
+# https://github.com/janeczku/calibre-web/issues/677
+# https://github.com/janeczku/calibre-web/pull/927
+# https://github.com/janeczku/calibre-web/pull/1459
+
+- name: "Remove previous virtual environment {{ calibreweb_venv_path }} -- if 'calibreweb_venv_wipe: True'"
+ file:
+ path: "{{ calibreweb_venv_path }}" # /usr/local/calibre-web-py3
+ state: absent
+ when: calibreweb_venv_wipe
+
+- name: Does {{ calibreweb_venv_path }} exist?
+ stat:
+ path: "{{ calibreweb_venv_path }}"
+ register: calibreweb_venv
+
+- name: git clone Calibre-Web ({{ calibreweb_version }}) from {{ calibreweb_repo_url }} to {{ calibreweb_venv_path }} (~122 MB initially, ~191+ or ~203+ MB later) -- if {{ calibreweb_venv_path }} doesns't exist
git:
- repo: https://github.com/janeczku/calibre-web.git
- dest: "{{ calibreweb_venv_path }}" # /usr/local/calibre-web
- force: yes
- depth: 1
- version: "{{ calibreweb_version }}" # e.g. master, 0.6.5
+ repo: "{{ calibreweb_repo_url }}" # e.g. https://github.com/iiab/calibre-web or https://github.com/janeczku/calibre-web
+ dest: "{{ calibreweb_venv_path }}"
+ #force: True # CLAIM: "If true, any modified files in the working repository will be discarded" -- REALITY: even if `force: no`, Ansible destructively reclones (also removing all test branch commits etc!) -- unless a git credential is provided to Ansible?
+ #depth: 1 # 2023-11-04: Full clone for now, to help @deldesir & wider community testing
+ version: "{{ calibreweb_version }}" # e.g. master, 0.6.22
+ when: not calibreweb_venv.stat.exists
-## Ansible Pip Bug: Cannot use 'chdir' with 'env' https://github.com/ansible/ansible/issues/37912 (Patch landed)
-#- name: Download calibre-web dependencies into vendor subdirectory.
-# pip:
-# requirements: "{{ calibreweb_path }}/requirements.txt"
-# chdir: "{{ calibreweb_path }}"
-# extra_args: '--target vendor'
-# ignore_errors: True
-##
-# Implementing this with Ansible command module for now.
-- name: Download Calibre-Web dependencies (using pip) into python3 virtual environment {{ calibreweb_venv_path }}
+- name: cd {{ calibreweb_venv_path }} ; git pull {{ calibreweb_repo_url }} {{ calibreweb_version }} --no-rebase --no-edit -- if {{ calibreweb_venv_path }} exists
+ command: git pull "{{ calibreweb_repo_url }}" "{{ calibreweb_version }}" --no-rebase --no-edit
+ args:
+ chdir: "{{ calibreweb_venv_path }}"
+ when: calibreweb_venv.stat.exists
+
+- debug:
+ msg:
+ - "NEED BETTER/EXPERIMENTAL YouTube SCRAPING? RUN THE NEXT LINE -- for the latest yt-dlp 'nightly' release:"
+ - sudo pipx inject --pip-args='--upgrade --pre' -f library yt-dlp[default]
+
+- name: If Calibre-Web is being enhanced with audio/video "books" too, install/upgrade additional prereqs -- SEE https://github.com/iiab/calibre-web/wiki
+ shell: |
+ if [ -f {{ calibreweb_venv_path }}/scripts/lb-wrapper ]; then
+ apt install ffmpeg pipx -y
+ if lb --version; then
+ if pipx list | grep -q 'xklb'; then
+ pipx uninstall xklb
+ pipx install library
+ else
+ pipx reinstall library
+ fi
+ else
+ pipx install library
+ fi
+ ln -sf /root/.local/bin/lb /usr/local/bin/lb
+ if [ -f /root/.local/share/pipx/venvs/library/bin/yt-dlp ]; then
+ ln -sf /root/.local/share/pipx/venvs/library/bin/yt-dlp /usr/local/bin/yt-dlp
+ elif [ -f /root/.local/pipx/venvs/library/bin/yt-dlp ]; then
+ ln -sf /root/.local/pipx/venvs/library/bin/yt-dlp /usr/local/bin/yt-dlp
+ else
+ echo "ERROR: yt-dlp NOT FOUND"
+ fi
+ # NEED BETTER/EXPERIMENTAL YouTube SCRAPING? UNCOMMENT THE NEXT LINE -- for the latest yt-dlp "nightly" release:
+ # pipx inject --pip-args="--upgrade --pre" -f library yt-dlp[default]
+ #
+ # https://github.com/yt-dlp/yt-dlp-nightly-builds/releases
+ # https://pypi.org/project/yt-dlp/#history
+ cp {{ calibreweb_venv_path }}/scripts/lb-wrapper /usr/local/bin/
+ chmod a+x /usr/local/bin/lb-wrapper
+ fi
+
+- name: Download Calibre-Web dependencies from 'requirements.txt' into python3 virtual environment {{ calibreweb_venv_path }}
pip:
requirements: "{{ calibreweb_venv_path }}/requirements.txt"
virtualenv: "{{ calibreweb_venv_path }}" # /usr/local/calibre-web-py3
- virtualenv_site_packages: no
+ #virtualenv_site_packages: no
+ #virtualenv_command: python3 -m venv --system-site-packages {{ calibreweb_venv_path }}
virtualenv_command: python3 -m venv {{ calibreweb_venv_path }}
+ extra_args: --prefer-binary # 2023-10-01: Lifesaver when recent wheels (e.g. piwheels.org) are inevitably not yet built! SEE #3560
+
+# 2023-10-11: RasPiOS Bookworm doc for Python with venv (PEP 668 now enforced!)
+# https://www.raspberrypi.com/documentation/computers/os.html#use-python-on-a-raspberry-pi
+# https://www.raspberrypi.com/documentation/computers/os.html#install-python-packages-using-apt
+# https://www.raspberrypi.com/documentation/computers/os.html#install-python-libraries-using-pip
+
+# VIRTUALENV EXAMPLE COMMANDS:
+# python3 -m venv /usr/local/calibre-web-py3 (create venv)
+# cd /usr/local/calibre-web-py3
+# . bin/activate (or 'source bin/activate' -- this prepends '/usr/local/calibre-web-py3/bin' to yr PATH)
+# python3 -m pip list ('pip list' sufficient *IF* path set above!)
+# python3 -m pip freeze > /tmp/requirements.txt
+# python3 -m pip install -r requirements.txt
+# deactivate
+# https://pip.pypa.io/en/stable/user_guide/#requirements-files
+# https://pip.pypa.io/en/latest/reference/requirements-file-format/
- name: Install /etc/systemd/system/calibre-web.service from template
template:
@@ -66,28 +174,40 @@
dest: "{{ calibreweb_home }}" # /library/calibre-web
owner: "{{ calibreweb_user }}" # root
group: "{{ apache_user }}" # www-data on debuntu
- #mode: '0644'
backup: yes
with_items:
- roles/calibre-web/files/metadata.db
- roles/calibre-web/files/metadata_db_prefs_backup.json
when: not metadatadb.stat.exists
- #when: calibreweb_provision
-- name: Provision/Copy default admin settings to {{ calibreweb_config }}/app.db IF metadata.db did not exist
+- name: Does /library/calibre-web/config/app.db exist?
+ stat:
+ path: /library/calibre-web/config/app.db
+ register: appdb
+
+- name: Provision/Copy default admin settings to {{ calibreweb_config }}/app.db IF it did not exist
copy:
src: roles/calibre-web/files/app.db
dest: "{{ calibreweb_config }}" # /library/calibre-web/config
owner: "{{ calibreweb_user }}" # root
group: "{{ apache_user }}" # www-data on debuntu
- #mode: '0644'
backup: yes
- when: not metadatadb.stat.exists
- #when: calibreweb_provision
+ when: not appdb.stat.exists
# RECORD Calibre-Web AS INSTALLED
+- name: Record (final) disk space used
+ shell: df -B1 --output=used / | tail -1
+ register: df2
+
+- name: Add 'calibreweb_disk_usage = {{ df2.stdout|int - df1.stdout|int }}' to {{ iiab_ini_file }}
+ ini_file:
+ path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
+ section: calibre-web
+ option: calibreweb_disk_usage
+ value: "{{ df2.stdout|int - df1.stdout|int }}"
+
- name: "Set 'calibreweb_installed: True'"
set_fact:
calibreweb_installed: True
diff --git a/roles/calibre-web/tasks/main.yml b/roles/calibre-web/tasks/main.yml
index 40515eb57..cc0e89850 100644
--- a/roles/calibre-web/tasks/main.yml
+++ b/roles/calibre-web/tasks/main.yml
@@ -19,56 +19,47 @@
quiet: yes
-- name: Install Calibre-Web if 'calibreweb_installed' not defined, e.g. in {{ iiab_state_file }} # /etc/iiab/iiab_state.yml
- include_tasks: install.yml
- when: calibreweb_installed is undefined
+- block:
+ - name: Install Calibre-Web if 'calibreweb_installed' not defined, e.g. in {{ iiab_state_file }} # /etc/iiab/iiab_state.yml
+ include_tasks: install.yml
+ when: calibreweb_installed is undefined
-- name: Enable & Restart 'calibre-web' systemd service, if calibreweb_enabled
- systemd:
- name: calibre-web
- daemon_reload: yes
- enabled: yes
- state: restarted
- when: calibreweb_enabled
+ - include_tasks: enable-or-disable.yml
-- name: Disable & Stop 'calibre-web' systemd service, if not calibreweb_enabled
- systemd:
- name: calibre-web
- enabled: no
- state: stopped
- when: not calibreweb_enabled
+ - name: Add 'calibre-web' variable values to {{ iiab_ini_file }}
+ ini_file:
+ path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
+ section: calibre-web
+ option: "{{ item.option }}"
+ value: "{{ item.value | string }}"
+ with_items:
+ - option: name
+ value: Calibre-Web
+ - option: description
+ value: '"Calibre-Web is a web app providing a clean interface for browsing, reading and downloading e-books."'
+ - option: calibreweb_install
+ value: "{{ calibreweb_install }}"
+ - option: calibreweb_enabled
+ value: "{{ calibreweb_enabled }}"
+ - option: calibreweb_url1
+ value: "{{ calibreweb_url1 }}"
+ - option: calibreweb_url2
+ value: "{{ calibreweb_url2 }}"
+ - option: calibreweb_url3
+ value: "{{ calibreweb_url3 }}"
+ - option: calibreweb_path
+ value: "{{ calibreweb_venv_path }}"
+ - option: calibreweb_home
+ value: "{{ calibreweb_home }}"
+ - option: calibreweb_port
+ value: "{{ calibreweb_port }}"
+ - option: calibreweb_settings_database
+ value: "{{ calibreweb_settings_database }}"
-- name: Enable/Disable/Restart NGINX
- include_tasks: nginx.yml
+ rescue:
-
-- name: Add 'calibre-web' variable values to {{ iiab_ini_file }}
- ini_file:
- path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
- section: calibre-web
- option: "{{ item.option }}"
- value: "{{ item.value | string }}"
- with_items:
- - option: name
- value: Calibre-Web
- - option: description
- value: '"Calibre-Web is a web app providing a clean interface for browsing, reading and downloading e-books."'
- - option: calibreweb_install
- value: "{{ calibreweb_install }}"
- - option: calibreweb_enabled
- value: "{{ calibreweb_enabled }}"
- - option: calibreweb_url1
- value: "{{ calibreweb_url1 }}"
- - option: calibreweb_url2
- value: "{{ calibreweb_url2 }}"
- - option: calibreweb_url3
- value: "{{ calibreweb_url3 }}"
- - option: calibreweb_path
- value: "{{ calibreweb_venv_path }}"
- - option: calibreweb_home
- value: "{{ calibreweb_home }}"
- - option: calibreweb_port
- value: "{{ calibreweb_port }}"
- - option: calibreweb_database
- value: "{{ calibreweb_database }}"
+ - name: 'SEE ERROR ABOVE (skip_role_on_error: {{ skip_role_on_error }})'
+ fail:
+ msg: ""
+ when: not skip_role_on_error
diff --git a/roles/calibre-web/tasks/nginx.yml b/roles/calibre-web/tasks/nginx.yml
deleted file mode 100644
index 046bc65b8..000000000
--- a/roles/calibre-web/tasks/nginx.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-# TO DO: restore http://box/libros & http://box/livres etc, alongside English (#2195)
-# RELATED: https://github.com/janeczku/calibre-web/wiki/Setup-Reverse-Proxy
-
-- name: Enable http://box{{ calibreweb_url1 }} via NGINX, by installing {{ nginx_conf_dir }}/calibre-web-nginx.conf from template # http://box/books
- template:
- src: calibre-web-nginx.conf.j2
- dest: "{{ nginx_conf_dir }}/calibre-web-nginx.conf" # /etc/nginx/conf.d
- when: calibreweb_enabled
-
-- name: Disable http://box{{ calibreweb_url1 }} via NGINX, by removing {{ nginx_conf_dir }}/calibre-web-nginx.conf
- file:
- path: "{{ nginx_conf_dir }}/calibre-web-nginx.conf" # /etc/nginx/conf.d
- state: absent
- when: not calibreweb_enabled
-
-- name: Restart 'nginx' systemd service
- systemd:
- name: nginx
- state: restarted
diff --git a/roles/calibre-web/templates/calibre-web-nginx.conf.j2 b/roles/calibre-web/templates/calibre-web-nginx.conf.j2
index d1f2da25b..2ebfe47fe 100644
--- a/roles/calibre-web/templates/calibre-web-nginx.conf.j2
+++ b/roles/calibre-web/templates/calibre-web-nginx.conf.j2
@@ -5,7 +5,7 @@ location {{ calibreweb_url1 }}/ {
proxy_set_header Host $http_host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Scheme $scheme;
- proxy_set_header X-Script-Name {{ calibreweb_url1 }};
+ proxy_set_header X-Script-Name "{{ calibreweb_url1 }}";
proxy_pass http://127.0.0.1:8083;
}
@@ -14,7 +14,7 @@ location {{ calibreweb_url2 }}/ {
proxy_set_header Host $http_host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Scheme $scheme;
- proxy_set_header X-Script-Name {{ calibreweb_url2 }};
+ proxy_set_header X-Script-Name "{{ calibreweb_url2 }}";
proxy_pass http://127.0.0.1:8083;
}
@@ -23,6 +23,6 @@ location {{ calibreweb_url3 }}/ {
proxy_set_header Host $http_host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Scheme $scheme;
- proxy_set_header X-Script-Name {{ calibreweb_url3 }};
+ proxy_set_header X-Script-Name "{{ calibreweb_url3 }}";
proxy_pass http://127.0.0.1:8083;
}
diff --git a/roles/calibre/defaults/main.yml b/roles/calibre/defaults/main.yml
index b41475e23..0f3643735 100644
--- a/roles/calibre/defaults/main.yml
+++ b/roles/calibre/defaults/main.yml
@@ -34,11 +34,11 @@ calibre_userdb: "{{ calibre_dbpath }}/users.sqlite"
# calibre-server --manage-users --userdb /library/calibre/users.sqlite
calibre_sample_book: "Metamorphosis-jackson.epub"
-# Must be downloadable from http://download.iiab.io/packages
+# Must be downloadable from https://download.iiab.io/packages
calibre_src_url: "https://raw.githubusercontent.com/kovidgoyal/calibre/master/setup/linux-installer.py"
-calibre_deb_url: "{{ iiab_download_url }}" # http://download.iiab.io/packages
+calibre_deb_url: "{{ iiab_download_url }}" # https://download.iiab.io/packages
# Above URL must offer both .deb files below: (for scripts/calibre-install-pinned-rpi.sh to run)
calibre_deb_pin_version: 3.33.1+dfsg-1 # for calibre_3.33.1+dfsg-1_all.deb (24M, 2018-10-21)
calibre_bin_deb_pin_version: "{{ calibre_deb_pin_version }}" # for calibre-bin_3.33.1+dfsg-1_armhf.deb (706K, 2018-10-23)
diff --git a/roles/calibre/tasks/enable-or-disable.yml b/roles/calibre/tasks/enable-or-disable.yml
new file mode 100644
index 000000000..07a0cc911
--- /dev/null
+++ b/roles/calibre/tasks/enable-or-disable.yml
@@ -0,0 +1,29 @@
+# http://box:8080 & http://box:8080/mobile WORK BUT OTHER URL'S LIKE http://box/calibre ARE A MESS (BOOKS RARELY DISPLAY)
+#
+# 2018-08-27 POSSIBLE FIX...CONSIDER THIS ProxyPass / ProxyPassReverse TECHNIQUE:
+# https://github.com/iiab/iiab/tree/master/roles/calibre-web/templates/calibre-web.conf.j2
+# (anyway this works great for calibre-web, allowing http://box/books
+# to work even better than http://box:8083 when box == 192.168.0.x !)
+#
+#- name: Attempt to enable http://box/calibre via Apache (UNTESTED)
+# command: a2ensite calibre.conf
+# when: apache_installed and calibre_enabled
+#
+#- name: Attempt to disable http://box/calibre via Apache (UNTESTED)
+# command: a2dissite calibre.conf
+# when: apache_installed and not calibre_enabled
+
+- name: Enable & (Re)Start 'calibre-serve' service, if calibre_enabled
+ systemd:
+ daemon_reload: yes
+ name: calibre-serve
+ enabled: yes
+ state: restarted
+ when: calibre_enabled
+
+- name: Disable & Stop 'calibre-serve' service, if not calibre_enabled
+ systemd:
+ name: calibre-serve
+ enabled: no
+ state: stopped
+ when: not calibre_enabled
diff --git a/roles/calibre/tasks/install.yml b/roles/calibre/tasks/install.yml
index 840440583..82218ada0 100644
--- a/roles/calibre/tasks/install.yml
+++ b/roles/calibre/tasks/install.yml
@@ -1,4 +1,9 @@
-# 1. INSTALL CALIBRE 3.39.1+ or 4.12+ (calibre, calibredb, calibre-server etc) ON ALL OS'S
+- name: Record (initial) disk space used
+ shell: df -B1 --output=used / | tail -1
+ register: df1
+
+
+# 1. APT INSTALL CALIBRE 4.12+ or 5.12+ (calibre, calibredb, calibre-server etc) ON ALL OS'S
- name: "Install OS's latest packages: calibre, calibre-bin"
package:
@@ -79,6 +84,17 @@
# 5. RECORD Calibre AS INSTALLED
+- name: Record (final) disk space used
+ shell: df -B1 --output=used / | tail -1
+ register: df2
+
+- name: Add 'calibre_disk_usage = {{ df2.stdout|int - df1.stdout|int }}' to {{ iiab_ini_file }}
+ ini_file:
+ path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
+ section: calibre
+ option: calibre_disk_usage
+ value: "{{ df2.stdout|int - df1.stdout|int }}"
+
- name: "Set 'calibre_installed: True'"
set_fact:
calibre_installed: True
diff --git a/roles/calibre/tasks/main.yml b/roles/calibre/tasks/main.yml
index 2c05b42de..a6504b658 100644
--- a/roles/calibre/tasks/main.yml
+++ b/roles/calibre/tasks/main.yml
@@ -19,65 +19,37 @@
quiet: yes
-- name: Install Calibre if 'calibre_installed' not defined, e.g. in {{ iiab_state_file }} # /etc/iiab/iiab_state.yml
- include_tasks: install.yml
- when: calibre_installed is undefined
+- block:
-# http://box:8080 & http://box:8080/mobile WORK BUT OTHER URL'S LIKE http://box/calibre ARE A MESS (BOOKS RARELY DISPLAY)
-#
-# 2018-08-27 POSSIBLE FIX...CONSIDER THIS ProxyPass / ProxyPassReverse TECHNIQUE:
-# https://github.com/iiab/iiab/tree/master/roles/calibre-web/templates/calibre-web.conf.j2
-# (anyway this works great for calibre-web, allowing http://box/books
-# to work even better than http://box:8083 when box == 192.168.0.x !)
-#
-#- name: Attempt to enable http://box/calibre via Apache (UNTESTED)
-# command: a2ensite calibre.conf
-# when: apache_installed and calibre_enabled
-#
-#- name: Attempt to disable http://box/calibre via Apache (UNTESTED)
-# command: a2dissite calibre.conf
-# when: apache_installed and not calibre_enabled
+ - name: Install Calibre if 'calibre_installed' not defined, e.g. in {{ iiab_state_file }} # /etc/iiab/iiab_state.yml
+ include_tasks: install.yml
+ when: calibre_installed is undefined
-- name: Enable & (Re)Start 'calibre-serve' service, if calibre_enabled
- systemd:
- daemon_reload: yes
- name: calibre-serve
- enabled: yes
- state: restarted
- when: calibre_enabled
+ - include_tasks: enable-or-disable.yml
-- name: Disable & Stop 'calibre-serve' service, if not calibre_enabled
- systemd:
- name: calibre-serve
- enabled: no
- state: stopped
- when: not calibre_enabled
+ - name: Add 'calibre' variable values to {{ iiab_ini_file }}
+ ini_file:
+ path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
+ section: calibre
+ option: "{{ item.option }}"
+ value: "{{ item.value | string }}"
+ with_items:
+ - option: name
+ value: Calibre
+ - option: description
+ value: '"Calibre is an extremely popular personal library system for e-books."'
+ - option: calibre_src_url
+ value: "{{ calibre_src_url }}"
+ - option: calibre_dbpath
+ value: "{{ calibre_dbpath }}"
+ - option: calibre_port
+ value: "{{ calibre_port }}"
+ - option: calibre_enabled
+ value: "{{ calibre_enabled }}"
-#- name: Enable/Disable/Restart Apache if primary
-# include_tasks: apache.yml
-# when: not nginx_enabled
-#
-#- name: Enable/Disable/Restart NGINX if primary
-# include_tasks: nginx.yml
-# when: nginx_enabled
+ rescue:
-
-- name: Add 'calibre' variable values to {{ iiab_ini_file }}
- ini_file:
- path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
- section: calibre
- option: "{{ item.option }}"
- value: "{{ item.value | string }}"
- with_items:
- - option: name
- value: Calibre
- - option: description
- value: '"Calibre is an extremely popular personal library system for e-books."'
- - option: calibre_src_url
- value: "{{ calibre_src_url }}"
- - option: calibre_dbpath
- value: "{{ calibre_dbpath }}"
- - option: calibre_port
- value: "{{ calibre_port }}"
- - option: calibre_enabled
- value: "{{ calibre_enabled }}"
+ - name: 'SEE ERROR ABOVE (skip_role_on_error: {{ skip_role_on_error }})'
+ fail:
+ msg: ""
+ when: not skip_role_on_error
diff --git a/roles/captiveportal/README.md b/roles/captiveportal/README.md
index 11736aadd..ddacbe311 100644
--- a/roles/captiveportal/README.md
+++ b/roles/captiveportal/README.md
@@ -1,4 +1,4 @@
-_Please Also See: http://FAQ.IIAB.IO > ["Captive Portal Administration: What tips & tricks exist?"](http://wiki.laptop.org/go/IIAB/FAQ#Captive_Portal_Administration:_What_tips_.26_tricks_exist.3F)_
+_Please Also See: http://FAQ.IIAB.IO > ["Captive Portal Administration: What tips & tricks exist?"](https://wiki.iiab.io/go/FAQ#Captive_Portal_Administration:_What_tips_&_tricks_exist%3F)_
## Theory of Operation
diff --git a/roles/captiveportal/tasks/install.yml b/roles/captiveportal/tasks/install.yml
index af022f0e9..b76ad39b0 100644
--- a/roles/captiveportal/tasks/install.yml
+++ b/roles/captiveportal/tasks/install.yml
@@ -1,3 +1,8 @@
+- name: Record (initial) disk space used
+ shell: df -B1 --output=used / | tail -1
+ register: df1
+
+
- name: "Install packages: python3-dateutil, python3-jinja2"
package:
name:
@@ -26,7 +31,7 @@
mode: "{{ item.mode }}"
with_items:
- { src: roles/captiveportal/templates/checkurls, dest: /opt/iiab/captiveportal/, mode: '0644' }
- - { src: roles/captiveportal/templates/iiab-divert-to-nginx, dest: /usr/sbin/, mode: '0755' }
+ - { src: roles/captiveportal/templates/iiab-divert-to-nginx.j2, dest: /usr/sbin/iiab-divert-to-nginx, mode: '0755' }
- { src: roles/captiveportal/templates/iiab-make-cp-servers.py, dest: /usr/sbin/, mode: '0755' }
- name: Install /opt/iiab/captiveportal/capture-wsgi.py from template, mode '0755' (creates the server)
@@ -51,6 +56,17 @@
# RECORD Captive Portal AS INSTALLED
+- name: Record (final) disk space used
+ shell: df -B1 --output=used / | tail -1
+ register: df2
+
+- name: Add 'captiveportal_disk_usage = {{ df2.stdout|int - df1.stdout|int }}' to {{ iiab_ini_file }}
+ ini_file:
+ path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
+ section: captiveportal
+ option: captiveportal_disk_usage
+ value: "{{ df2.stdout|int - df1.stdout|int }}"
+
- name: "Set 'captiveportal_installed: True'"
set_fact:
captiveportal_installed: True
diff --git a/roles/captiveportal/tasks/main.yml b/roles/captiveportal/tasks/main.yml
index 0b3408b75..bd24b7186 100644
--- a/roles/captiveportal/tasks/main.yml
+++ b/roles/captiveportal/tasks/main.yml
@@ -19,27 +19,33 @@
quiet: yes
-- name: Install Captive Portal if 'captiveportal_installed' not defined, e.g. in {{ iiab_state_file }} # /etc/iiab/iiab_state.yml
- include_tasks: install.yml
- when: captiveportal_installed is undefined
+- block:
+ - name: Install Captive Portal if 'captiveportal_installed' not defined, e.g. in {{ iiab_state_file }} # /etc/iiab/iiab_state.yml
+ include_tasks: install.yml
+ when: captiveportal_installed is undefined
-- name: Enable or Disable Captive Portal
- include_tasks: enable-or-disable.yml
+ - include_tasks: enable-or-disable.yml
+ - name: Add 'captiveportal' variable values to {{ iiab_ini_file }}
+ ini_file:
+ path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
+ section: captiveportal
+ option: "{{ item.option }}"
+ value: "{{ item.value | string }}"
+ with_items:
+ - option: name
+ value: Captive Portal
+ - option: description
+ value: '"Captive Portal tries to open the browser automatically, so users don''t have to type in URL''s like http://box.lan in support of kiosk-like situations, in multilingual and less literate communities."'
+ - option: captiveportal_install
+ value: "{{ captiveportal_install }}"
+ - option: captiveportal_enabled
+ value: "{{ captiveportal_enabled }}"
-- name: Add 'captiveportal' variable values to {{ iiab_ini_file }}
- ini_file:
- path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
- section: captiveportal
- option: "{{ item.option }}"
- value: "{{ item.value | string }}"
- with_items:
- - option: name
- value: Captive Portal
- - option: description
- value: '"Captive Portal tries to open the browser automatically, so users don''t have to type in URL''s like http://box.lan in support of kiosk-like situations, in multilingual and less literate communities."'
- - option: captiveportal_install
- value: "{{ captiveportal_install }}"
- - option: captiveportal_enabled
- value: "{{ captiveportal_enabled }}"
+ rescue:
+
+ - name: 'SEE ERROR ABOVE (skip_role_on_error: {{ skip_role_on_error }})'
+ fail:
+ msg: ""
+ when: not skip_role_on_error
diff --git a/roles/captiveportal/templates/iiab-divert-to-nginx b/roles/captiveportal/templates/iiab-divert-to-nginx.j2
similarity index 63%
rename from roles/captiveportal/templates/iiab-divert-to-nginx
rename to roles/captiveportal/templates/iiab-divert-to-nginx.j2
index c708de87a..8d6d06b2e 100755
--- a/roles/captiveportal/templates/iiab-divert-to-nginx
+++ b/roles/captiveportal/templates/iiab-divert-to-nginx.j2
@@ -1,4 +1,4 @@
#!/bin/bash -x
-awk '{print("address=/" $1 "/172.18.96.1")}' /opt/iiab/captiveportal/checkurls > /etc/dnsmasq.d/capture
+awk '{print("address=/" $1 "/{{ lan_ip }}")}' /opt/iiab/captiveportal/checkurls > /etc/dnsmasq.d/capture
echo "#following tells windows 7 that captive portal is active" >> /etc/dnsmasq.d/capture
echo "address=/dns.msftncsi.com/131.107.255.255" >> /etc/dnsmasq.d/capture
diff --git a/roles/cups/README.md b/roles/cups/README.md
index 493673715..cd727be5f 100644
--- a/roles/cups/README.md
+++ b/roles/cups/README.md
@@ -2,13 +2,13 @@
[CUPS](https://en.wikipedia.org/wiki/CUPS) (also known as the "Common UNIX Printing System") is the standards-based, open source printing system for Linux and macOS.
-It allows your [Internet-in-a-Box (IIAB)](http://internet-in-a-box.org) to act as a print server.
+It allows your [Internet-in-a-Box (IIAB)](https://internet-in-a-box.org) to act as a print server.
This can be useful if a printer is attached to your IIAB — so student/teacher print jobs from client computers and phones can be processed — and then sent to the appropriate printer.
## Using it
-Make sure your IIAB was installed with these 2 lines in [/etc/iiab/local_vars.yml](http://faq.iiab.io/#What_is_local_vars.yml_and_how_do_I_customize_it.3F) :
+Make sure your IIAB was installed with these 2 lines in [/etc/iiab/local_vars.yml](http://faq.iiab.io/#What_is_local_vars.yml_and_how_do_I_customize_it%3F) :
```
cups_install: True
diff --git a/roles/cups/tasks/install.yml b/roles/cups/tasks/install.yml
index ea073de16..55209a50b 100644
--- a/roles/cups/tasks/install.yml
+++ b/roles/cups/tasks/install.yml
@@ -2,6 +2,11 @@
# (OR ANY MEMBER OF LINUX GROUP 'lpadmin') AS SET UP BELOW...
+- name: Record (initial) disk space used
+ shell: df -B1 --output=used / | tail -1
+ register: df1
+
+
- name: Install 'cups' package
package:
name: cups
@@ -49,19 +54,34 @@
blockinfile:
path: /etc/cups/cupsd.conf
insertafter: '^$'
- block: |2 # Indent with 2 spaces, and surround block with 2 comment lines: "# BEGIN ANSIBLE MANAGED BLOCK", "# END ANSIBLE MANAGED BLOCK"
+ block: |2 # |n MEANS: Set the block's left edge n CHARACTERS TO THE RIGHT of *this line's* indentation -- where n is {1..9} -- instead of setting its left edge to the 1st non-blank line's indentation below. Also surround block with comment lines: "# BEGIN ANSIBLE MANAGED BLOCK", "# END ANSIBLE MANAGED BLOCK"
AuthType Default
Require user @SYSTEM
-- name: "CUPS web administration: Create Linux username 'Admin' with password 'changeme' in Linux group 'lpadmin' (shell: /usr/sbin/nologin, create_home: no)"
+- name: "CUPS web administration: Create Linux username 'Admin' in Linux group 'lpadmin' (shell: /usr/sbin/nologin, create_home: no)"
user:
name: Admin
append: yes # Don't clobber other groups, that other IIAB Apps might need.
groups: lpadmin
- password: "{{ 'changeme' | password_hash('sha512') }}" # Random salt. Presumably runs 5000 rounds of SHA-512 per /etc/login.defs & /etc/pam.d/common-password -- https://docs.ansible.com/ansible/latest/user_guide/playbooks_filters.html#encrypting-and-checksumming-strings-and-passwords
+ #password: "{{ 'changeme' | password_hash('sha512') }}" # Random salt. Presumably runs 5000 rounds of SHA-512 per /etc/login.defs & /etc/pam.d/common-password -- https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_filters.html#hashing-and-encrypting-strings-and-passwords
create_home: no
shell: /usr/sbin/nologin # Debian/Ubuntu norm -- instead of /sbin/nologin, /bin/false
+# 2024-05-01: Above password-setting approach no longer works w/ Ansible 2.17 RC1 (#3727).
+# Ansible STOPS with this error...
+#
+# "[DEPRECATION WARNING]: Encryption using the Python crypt module is deprecated. The Python crypt module is
+# deprecated and will be removed from Python 3.13. Install the passlib library for continued encryption
+# functionality. This feature will be removed in version 2.17. Deprecation warnings can be disabled by
+# setting deprecation_warnings=False in ansible.cfg."
+#
+# ...so we instead use Linux's "chpasswd" command (below!)
+
+- name: Use chpasswd to set Linux username 'Admin' password to 'changeme'
+ command: chpasswd
+ args:
+ stdin: Admin:changeme
+
# - name: Add user '{{ iiab_admin_user }}' to Linux group 'lpadmin' -- for CUPS web administration (or modify default 'SystemGroup lpadmin' in /etc/cups/cups-files.conf -- in coordination with ~14 -> ~15 '@SYSTEM' lines in /etc/cups/cupsd.conf)
# #command: "gpasswd -a {{ iiab_admin_user | quote }} lpadmin"
# #command: "gpasswd -d {{ iiab_admin_user | quote }} lpadmin"
@@ -76,14 +96,14 @@
name: cups
state: started
-# - name: "Authorize Nearby IP Addresses: Run 'cupsctl --remote-admin --share-printers --user-cancel-any' to enable http://192.168.0.x:631 AND http://172.18.96.1:631 (if cups_enabled) -- REPEATED USE OF 'cupsctl' COMMANDS CAN *DAMAGE* /etc/cups/cupsd.conf BY ADDING DUPLICATE LINES (AND WORSE!) -- SO PLEASE ALSO MANUALLY RUN 'sudo cupsctl' AND 'sudo cupsd -t' TO VERIFY /etc/cups/cupsd.conf"
+# - name: "Authorize Nearby IP Addresses: Run 'cupsctl --remote-admin --share-printers --user-cancel-any' to enable http://192.168.0.x:631 AND http://{{ lan_ip }}:631 (if cups_enabled) -- REPEATED USE OF 'cupsctl' COMMANDS CAN *DAMAGE* /etc/cups/cupsd.conf BY ADDING DUPLICATE LINES (AND WORSE!) -- SO PLEASE ALSO MANUALLY RUN 'sudo cupsctl' AND 'sudo cupsd -t' TO VERIFY /etc/cups/cupsd.conf"
# command: cupsctl --remote-admin --share-printers --user-cancel-any
# 2021-07-11: BOTH FLAGS *CANNOT* BE USED TOGETHER -- CHOOSE ONE OR THE OTHER:
# (1) '--remote-admin' AS ABOVE, OR (2) '--remote-any' AS BELOW.
# (RUN 'cupsctl' WITHOUT PARAMETERS TO CONFIRM THIS!)
-- name: "Authorize All IP Addresses: Run 'cupsctl --remote-any --share-printers --user-cancel-any' to enable http://192.168.0.x:631 AND http://172.18.96.1:631 AND http://10.8.0.y:631 (if cups_enabled) -- REPEATED USE OF 'cupsctl' COMMANDS CAN *DAMAGE* /etc/cups/cupsd.conf BY ADDING DUPLICATE LINES (AND WORSE!) -- SO PLEASE ALSO MANUALLY RUN 'sudo cupsctl' AND 'sudo cupsd -t' TO VERIFY /etc/cups/cupsd.conf"
+- name: "Authorize All IP Addresses: Run 'cupsctl --remote-any --share-printers --user-cancel-any' to enable http://192.168.0.x:631 AND http://{{ lan_ip }}:631 AND http://10.8.0.y:631 (if cups_enabled) -- REPEATED USE OF 'cupsctl' COMMANDS CAN *DAMAGE* /etc/cups/cupsd.conf BY ADDING DUPLICATE LINES (AND WORSE!) -- SO PLEASE ALSO MANUALLY RUN 'sudo cupsctl' AND 'sudo cupsd -t' TO VERIFY /etc/cups/cupsd.conf"
command: cupsctl --remote-any --share-printers --user-cancel-any
# 2021-07-11: In theory 'cupsctl' stanzas could be put in enable-or-disable.yml
@@ -96,7 +116,7 @@
# command: cupsctl --no-remote-admin --no-remote-any --no-share-printers --no-user-cancel-any --no-debug-logging
# when: not cups_enabled
-# - name: "2021-07-14: EXPERIMENTALLY ADD DIRECTIVES TO /etc/cups/cupsd.conf followed by 'systemctl restart cups'. As should no longer be nec thanks to NEW cups/templates/cups.conf for /etc/nginx/conf.d/cups.conf (followed by 'systemctl restart nginx'). Which FIXED URL'S LIKE: http://box/print, http://box.lan/print, http://192.168.0.x/print, http://172.18.96.1/print and http://10.8.0.x/print (WITH OR WITHOUT THE TRAILING SLASH!) RECAP: (1) So be it that these 2 URL'S STILL DON'T WORK: http://box:631, http://box.lan:631 (due to CUPS' internal web server's overly stringent hostname checks, i.e. '400 Bad Request' and 'Request from \"localhost\" using invalid Host: field \"box[.lan]:631\".' in /var/log/cups/error_log) -- (2) While these 2 URL'S STILL DO WORK: http://localhost:631, http://127.0.0.1:631 -- (3) Whereas these 3 URL'S MAY WORK, DEPENDING ON 'cupsctl' COMMAND(S) ABOVE: http://192.168.0.x:631, http://172.18.96.1:631, http://10.8.0.x:631"
+# - name: "2021-07-14: EXPERIMENTALLY ADD DIRECTIVES TO /etc/cups/cupsd.conf followed by 'systemctl restart cups'. As should no longer be nec thanks to NEW cups/templates/cups.conf for /etc/nginx/conf.d/cups.conf (followed by 'systemctl restart nginx'). Which FIXED URL'S LIKE: http://box/print, http://box.lan/print, http://192.168.0.x/print, http://{{ lan_ip }}/print and http://10.8.0.x/print (WITH OR WITHOUT THE TRAILING SLASH!) RECAP: (1) So be it that these 2 URL'S STILL DON'T WORK: http://box:631, http://box.lan:631 (due to CUPS' internal web server's overly stringent hostname checks, i.e. '400 Bad Request' and 'Request from \"localhost\" using invalid Host: field \"box[.lan]:631\".' in /var/log/cups/error_log) -- (2) While these 2 URL'S STILL DO WORK: http://localhost:631, http://127.0.0.1:631 -- (3) Whereas these 3 URL'S MAY WORK, DEPENDING ON 'cupsctl' COMMAND(S) ABOVE: http://192.168.0.x:631, http://{{ lan_ip }}:631, http://10.8.0.x:631"
# lineinfile:
# path: /etc/cups/cupsd.conf
# line: "{{ item }}"
@@ -105,7 +125,7 @@
# - "HostNameLookups On" # More False Leads: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=530027
# - "ServerAlias *"
# - "#ServerName {{ iiab_hostname }}.{{ iiab_domain }}" # box.lan
-# - "#Listen {{ lan_ip }}:631" # 172.18.96.1
+# - "#Listen {{ lan_ip }}:631" # e.g. 10.10.10.10
# - "#Listen 127.0.0.1:631"
# - "#Listen 0.0.0.0:631"
# - "#Listen *:631"
@@ -124,6 +144,17 @@
# RECORD CUPS AS INSTALLED
+- name: Record (final) disk space used
+ shell: df -B1 --output=used / | tail -1
+ register: df2
+
+- name: Add 'cups_disk_usage = {{ df2.stdout|int - df1.stdout|int }}' to {{ iiab_ini_file }}
+ ini_file:
+ path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
+ section: cups
+ option: cups_disk_usage
+ value: "{{ df2.stdout|int - df1.stdout|int }}"
+
- name: "Set 'cups_installed: True'"
set_fact:
cups_installed: True
diff --git a/roles/cups/tasks/main.yml b/roles/cups/tasks/main.yml
index 2c9531814..a709ac090 100644
--- a/roles/cups/tasks/main.yml
+++ b/roles/cups/tasks/main.yml
@@ -23,26 +23,33 @@
quiet: yes
-- name: Install CUPS if 'cups_installed' not defined, e.g. in {{ iiab_state_file }} # /etc/iiab/iiab_state.yml
- include_tasks: install.yml
- when: cups_installed is undefined
+- block:
+ - name: Install CUPS if 'cups_installed' not defined, e.g. in {{ iiab_state_file }} # /etc/iiab/iiab_state.yml
+ include_tasks: install.yml
+ when: cups_installed is undefined
-- include_tasks: enable-or-disable.yml
+ - include_tasks: enable-or-disable.yml
+ - name: Add 'cups' variable values to {{ iiab_ini_file }}
+ ini_file:
+ path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
+ section: cups
+ option: "{{ item.option }}"
+ value: "{{ item.value | string }}"
+ with_items:
+ - option: name
+ value: CUPS
+ - option: description
+ value: '"CUPS (Common UNIX Printing System) is a modular printing system that allows a computer to act as a print server. A computer running CUPS is a host that can accept print jobs from client computers, process them, and send them to the appropriate printer."'
+ - option: cups_install
+ value: "{{ cups_install }}"
+ - option: cups_enabled
+ value: "{{ cups_enabled }}"
-- name: Add 'cups' variable values to {{ iiab_ini_file }}
- ini_file:
- path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
- section: cups
- option: "{{ item.option }}"
- value: "{{ item.value | string }}"
- with_items:
- - option: name
- value: CUPS
- - option: description
- value: '"CUPS (Common UNIX Printing System) is a modular printing system that allows a computer to act as a print server. A computer running CUPS is a host that can accept print jobs from client computers, process them, and send them to the appropriate printer."'
- - option: cups_install
- value: "{{ cups_install }}"
- - option: cups_enabled
- value: "{{ cups_enabled }}"
+ rescue:
+
+ - name: 'SEE ERROR ABOVE (skip_role_on_error: {{ skip_role_on_error }})'
+ fail:
+ msg: ""
+ when: not skip_role_on_error
diff --git a/roles/cups/templates/cups.conf.j2 b/roles/cups/templates/cups.conf.j2
index abec5152d..3d4f4f53f 100644
--- a/roles/cups/templates/cups.conf.j2
+++ b/roles/cups/templates/cups.conf.j2
@@ -21,7 +21,7 @@ location ~ ^/print(|/.*)$ { # '~' -> '~*' for case-insensitive regex
return 301 http://localhost:631;
}
- return 301 http://$host:631; # For 192.168.0.x, 172.18.96.1, 10.8.0.y ETC
+ return 301 http://$host:631; # For 192.168.0.x, 10.10.10.10, 172.18.96.1, 10.8.0.y ETC
}
diff --git a/roles/firmware/tasks/download.yml b/roles/firmware/tasks/download.yml
index d35147013..abda2e6b4 100644
--- a/roles/firmware/tasks/download.yml
+++ b/roles/firmware/tasks/download.yml
@@ -1,22 +1,48 @@
-- name: Back up original e.g. OS-provided firmware (for RPi internal WiFi)
- copy:
- src: "/lib/firmware/brcm/{{ item }}"
- dest: "/lib/firmware/brcm/{{ item }}.orig"
- with_items:
- - brcmfmac43430-sdio.bin
- - brcmfmac43455-sdio.bin
- - brcmfmac43455-sdio.clm_blob
+# 2023-02-25: MONITOR FIRMWARE UPDATES in 3 places especially...
+#
+# 1. apt changelog firmware-brcm80211
+# https://github.com/RPi-Distro/firmware-nonfree -> debian/config/brcm80211 (brcm, cypress)
+# https://archive.raspberrypi.org/debian/dists/bullseye/main/binary-arm64/Packages (1.1MB text file, look inside for summary of latest firmware-brcm80211)
+# https://archive.raspberrypi.org/debian/pool/main/f/firmware-nonfree/ -> firmware-brcm80211_* e.g.:
+# https://archive.raspberrypi.org/debian/pool/main/f/firmware-nonfree/firmware-brcm80211_20190114-1+rpt11_all.deb from 2021-01-25
+# https://archive.raspberrypi.org/debian/pool/main/f/firmware-nonfree/firmware-brcm80211_20210315-3+rpt4_all.deb from 2021-12-06
+# https://archive.raspberrypi.org/debian/pool/main/f/firmware-nonfree/firmware-brcm80211_20221012-1~bpo11+1+rpt1_all.deb from 2022-11-17
+# 2. apt changelog linux-firmware-raspi
+# https://packages.ubuntu.com/search?keywords=linux-firmware-raspi
+# 3. https://github.com/moodlebox/moodlebox/blob/main/roles/accesspoint/tasks/main.yml
-- name: Download high-capacity older firmware (for RPi internal WiFi, per https://github.com/iiab/iiab/issues/823#issuecomment-662285202)
+#- name: Back up 4 OS-provided WiFi firmware files (incl symlink contents) to /lib/firmware/cypress/*.orig
+- name: Back up 4 OS-provided WiFi firmware files (replicate any symlinks) to /lib/firmware/cypress/*.orig -- /usr/bin/iiab-check-firmware will later do similar (e.g. as firmware install completes) -- moving 2-or-4 of these to .YYYY-MM-DD-HH:MM:SS ("doubly timestamping" to preserve BOTH last-modif & moving date)
+ # copy:
+ # src: /lib/firmware/cypress/{{ item }}
+ # dest: /lib/firmware/cypress/{{ item }}.orig
+ # #local_follow: False # FAILS TO PRESERVE LINKS (ansible/ansible#74777) e.g. /lib/firmware/cypress/cyfmac43455-sdio.bin -> /etc/alternatives/cyfmac43455-sdio.bin -> ...
+ # 2023-05-01 CLARIF OF BELOW:
+ # 1) Even if 'mv' fails, no matter it'll continue to 'cp' below
+ # 2) 'cp -P' == 'cp --no-dereference' sufficient to replicate these symlinks and files ('cp -d' & 'cp -a' are incrementally stronger, and so probably can't hurt)
+ shell: |
+ mv /lib/firmware/cypress/{{ item }}.orig /lib/firmware/cypress/{{ item }}.orig.$(date +%F-%T)
+ cp -a /lib/firmware/cypress/{{ item }} /lib/firmware/cypress/{{ item }}.orig
+ with_items:
+ - cyfmac43430-sdio.bin
+ - cyfmac43430-sdio.clm_blob
+ - cyfmac43455-sdio.bin
+ - cyfmac43455-sdio.clm_blob
+ #ignore_errors: yes # 2023-02-25: Let's INTENTIONALLY surface any errors, e.g. if any future RasPiOS or Ubuntu-on-Rpi lack some of the above 4 files/links?
+
+- name: Download higher-capacity firmwares (for RPi internal WiFi, per https://github.com/iiab/iiab/issues/823#issuecomment-662285202 and https://github.com/iiab/iiab/issues/2853)
get_url:
- url: "{{ item.url }}"
- dest: "{{ item.dest }}"
+ url: "{{ iiab_download_url }}/{{ item }}"
+ dest: /lib/firmware/cypress/
timeout: "{{ download_timeout }}"
with_items:
- - { url: 'http://d.iiab.io/packages/brcmfmac43430-sdio.bin_2018-09-11_7.45.98.65', dest: '/lib/firmware/brcm/brcmfmac43430-sdio.bin.iiab' }
- - { url: 'http://d.iiab.io/packages/brcmfmac43430-sdio.clm_blob_2018-09-11_7.45.98.65', dest: '/lib/firmware/brcm/brcmfmac43430-sdio.clm_blob.iiab' }
- - { url: 'http://d.iiab.io/packages/brcmfmac43455-sdio.bin_2015-03-01_7.45.18.0_ub19.10.1', dest: '/lib/firmware/brcm/brcmfmac43455-sdio.bin.iiab' }
- - { url: 'http://d.iiab.io/packages/brcmfmac43455-sdio.clm_blob_2018-02-26_rpi', dest: '/lib/firmware/brcm/brcmfmac43455-sdio.clm_blob.iiab' }
+ - brcmfmac43455-sdio.bin_2021-11-30_minimal # 19 -- SAME AS RASPIOS & UBUNTU'S https://github.com/RPi-Distro/firmware-nonfree/blob/feeeda21e930c2e182484e8e1269b61cca2a8451/debian/config/brcm80211/cypress/cyfmac43455-sdio-minimal.bin
+ - brcmfmac43455-sdio.bin_2021-10-05_3rd-trial-minimal # 24 -- from https://github.com/iiab/iiab/issues/2853#issuecomment-934293015
+ - brcmfmac43455-sdio.clm_blob_2021-11-17_rpi # Works w/ both above -- SAME AS RASPIOS & UBUNTU'S https://github.com/RPi-Distro/firmware-nonfree/blob/dc406650e840705957f8403efeacf71d2d7543b3/debian/config/brcm80211/cypress/cyfmac43455-sdio.clm_blob
+ - brcmfmac43455-sdio.bin_2015-03-01_7.45.18.0_ub19.10.1 # 32 -- from https://github.com/iiab/iiab/issues/823#issuecomment-662285202
+ - brcmfmac43455-sdio.clm_blob_2018-02-26_rpi
+ - brcmfmac43430-sdio.bin_2018-09-11_7.45.98.65 # 30 -- from https://github.com/iiab/iiab/issues/823#issuecomment-662285202
+ - brcmfmac43430-sdio.clm_blob_2018-09-11_7.45.98.65
# RECORD firmware AS DOWNLOADED
diff --git a/roles/firmware/tasks/install.yml b/roles/firmware/tasks/install.yml
index ce7004ea2..4f323ca72 100644
--- a/roles/firmware/tasks/install.yml
+++ b/roles/firmware/tasks/install.yml
@@ -2,6 +2,75 @@
include_tasks: download.yml
when: firmware_downloaded is undefined # SEE ALSO firmware_installed below
+
+# Set 2 symlinks for RPi 3 B+ and 4 (43455)
+# COMPARE: update-alternatives --display cyfmac43455-sdio.bin
+# https://github.com/moodlebox/moodlebox/blob/main/roles/accesspoint/tasks/main.yml#L3-L6
+
+- name: Populate rpi3bplus_rpi4_wifi_firmwares dictionary (lookup table for operator-chosen .bin and .clm_blob files in /lib/firmware/cypress)
+ set_fact:
+ rpi3bplus_rpi4_wifi_firmwares: # Dictionary keys (left side) are always strings, e.g. "19"
+ os:
+ - cyfmac43455-sdio.bin.orig # 2023-02-25: 7.45.241 from 2021-11-01 on Ubuntu 22.04.2 too (cyfmac43455-sdio-standard.bin)
+ - cyfmac43455-sdio.clm_blob.orig # On Ubuntu 22.04.2 too (brcmfmac43455-sdio.clm_blob_2021-11-17_rpi)
+ ub:
+ - cyfmac43455-sdio.bin.distrib # 2023-02-25: STALE 7.45.234 from 2021-04-15; on Ubuntu 22.04.2 NOT RasPiOS
+ - cyfmac43455-sdio.clm_blob.distrib # 4.7K instead of 2.7K w/ above "os"
+ 19:
+ - brcmfmac43455-sdio.bin_2021-11-30_minimal # On Ubuntu 22.04.2 too (cyfmac43455-sdio-minimal.bin)
+ - brcmfmac43455-sdio.clm_blob_2021-11-17_rpi # On Ubuntu 22.04.2 too (cyfmac43455-sdio.clm_blob)
+ 24:
+ - brcmfmac43455-sdio.bin_2021-10-05_3rd-trial-minimal
+ - brcmfmac43455-sdio.clm_blob_2021-11-17_rpi # On Ubuntu 22.04.2 too (cyfmac43455-sdio.clm_blob)
+ 32:
+ - brcmfmac43455-sdio.bin_2015-03-01_7.45.18.0_ub19.10.1
+ - brcmfmac43455-sdio.clm_blob_2018-02-26_rpi # 14K instead of 2.7K w/ above "os"
+
+- name: Symlink /lib/firmware/cypress/cyfmac43455-sdio.bin.iiab -> {{ rpi3bplus_rpi4_wifi_firmwares[rpi3bplus_rpi4_wifi_firmware][0] }} (as rpi3bplus_rpi4_wifi_firmware is "{{ rpi3bplus_rpi4_wifi_firmware }}")
+ file:
+ src: "{{ rpi3bplus_rpi4_wifi_firmwares[rpi3bplus_rpi4_wifi_firmware][0] }}"
+ path: /lib/firmware/cypress/cyfmac43455-sdio.bin.iiab
+ state: link
+ force: yes
+
+- name: Symlink /lib/firmware/cypress/cyfmac43455-sdio.clm_blob.iiab -> {{ rpi3bplus_rpi4_wifi_firmwares[rpi3bplus_rpi4_wifi_firmware][1] }} (as rpi3bplus_rpi4_wifi_firmware is "{{ rpi3bplus_rpi4_wifi_firmware }}")
+ file:
+ src: "{{ rpi3bplus_rpi4_wifi_firmwares[rpi3bplus_rpi4_wifi_firmware][1] }}"
+ path: /lib/firmware/cypress/cyfmac43455-sdio.clm_blob.iiab
+ state: link
+ force: yes
+
+
+# Set 2 symlinks for RPi Zero W and 3 (43430)
+
+- name: Populate rpizerow_rpi3_wifi_firmwares dictionary (lookup table for operator-chosen .bin and .clm_blob files in /lib/firmware/cypress)
+ set_fact:
+ rpizerow_rpi3_wifi_firmwares:
+ os:
+ - cyfmac43430-sdio.bin.orig # 2023-02-25: 7.45.98 from 2021-07-19 on Ubuntu 22.04.2 too
+ - cyfmac43430-sdio.clm_blob.orig # On Ubuntu 22.04.2 too
+ ub:
+ - cyfmac43430-sdio.bin.distrib # 2023-02-25: STALE 7.45.98.118 from 2021-03-30; on Ubuntu 22.04.2 NOT RasPiOS
+ - cyfmac43430-sdio.clm_blob.distrib # Identical to above 4.7K cyfmac43430-sdio.clm_blob
+ 30:
+ - brcmfmac43430-sdio.bin_2018-09-11_7.45.98.65
+ - brcmfmac43430-sdio.clm_blob_2018-09-11_7.45.98.65 # 14K instead of 4.7K w/ above "os" & "ub"
+
+- name: Symlink /lib/firmware/cypress/cyfmac43430-sdio.bin.iiab -> {{ rpizerow_rpi3_wifi_firmwares[rpizerow_rpi3_wifi_firmware][0] }} (as rpizerow_rpi3_wifi_firmware is "{{ rpizerow_rpi3_wifi_firmware }}")
+ file:
+ src: "{{ rpizerow_rpi3_wifi_firmwares[rpizerow_rpi3_wifi_firmware][0] }}"
+ path: /lib/firmware/cypress/cyfmac43430-sdio.bin.iiab
+ state: link
+ force: yes
+
+- name: Symlink /lib/firmware/cypress/cyfmac43430-sdio.clm_blob.iiab -> {{ rpizerow_rpi3_wifi_firmwares[rpizerow_rpi3_wifi_firmware][1] }} (as rpizerow_rpi3_wifi_firmware is "{{ rpizerow_rpi3_wifi_firmware }}")
+ file:
+ src: "{{ rpizerow_rpi3_wifi_firmwares[rpizerow_rpi3_wifi_firmware][1] }}"
+ path: /lib/firmware/cypress/cyfmac43430-sdio.clm_blob.iiab
+ state: link
+ force: yes
+
+
- name: 'Install from template: /usr/bin/iiab-check-firmware, /etc/systemd/system/iiab-check-firmware.service & /etc/profile.d/iiab-firmware-warn.sh'
template:
src: "{{ item.src }}"
@@ -12,7 +81,7 @@
- { src: 'iiab-check-firmware.service', dest: '/etc/systemd/system/', mode: '0644' }
- { src: 'iiab-firmware-warn.sh', dest: '/etc/profile.d/', mode: '0644' }
-- name: Enable & (Re)Start iiab-check-firmware.service (also runs on each boot)
+- name: Enable & (Re)Start iiab-check-firmware.service (also runs on each boot) -- finalizing 2-or-4 symlink chains e.g. /lib/firmware/cypress/X.{bin|blob} -> /lib/firmware/cypress/X.{bin|blob}.iiab -> CHOSEN-FIRMWARE-FILE-OR-LINK
systemd:
name: iiab-check-firmware.service
daemon_reload: yes
diff --git a/roles/firmware/tasks/main.yml b/roles/firmware/tasks/main.yml
index a199f2630..dfd094acb 100644
--- a/roles/firmware/tasks/main.yml
+++ b/roles/firmware/tasks/main.yml
@@ -1,14 +1,30 @@
-# Please set 'wifi_hotspot_capacity_rpi_fix: True' in /etc/iiab/local_vars.yml
-# to restore support for 30-32 WiFi client devices on most Raspberry Pis that
-# have internal WiFi. This installs firmware 7.45.98.65 for Zero W and RPi 3
-# and firmware 7.45.18.0 for RPi 3 B+ and RPi 4. Capacity testing writeup:
-# https://github.com/iiab/iiab/issues/823#issuecomment-662285202
+# Plz set 'rpi3bplus_rpi4_wifi_firmware' and 'rpizerow_rpi3_wifi_firmware' in
+# /etc/iiab/local_vars.yml to increase (or modify) the number of student WiFi
+# client devices that can access your Raspberry Pi's internal WiFi hotspot.
+
+# If IIAB's already installed, you should then run 'cd /opt/iiab/iiab' and
+# then 'sudo ./runrole firmware' (DO RUN iiab-check-firmware FOR MORE TIPS!)
+
+# 2018-2023 Background & Progress:
+#
+# Raspberry Pi 3 used to support 32 WiFi connections but is now limited to [4-10]
+# https://github.com/iiab/iiab/issues/823#issuecomment-662285202
+# Opinions about Pi 4B/3B+ WiFi features [practical AP firmware for schools!]
+# https://github.com/iiab/iiab/issues/2853#issuecomment-957836892
+# RPi WiFi hotspot firmware reliability fix, incl new/better choices for 3B+ & 4
+# https://github.com/iiab/iiab/pull/3103
+# Set WiFi firmware in /lib/firmware/cypress due to RasPiOS & Ubuntu changes
+# https://github.com/iiab/iiab/pull/3482
+# RISK: What USB 3.0 stick/drive patterns degrade a Raspberry Pi's 2.4GHz WiFi?
+# https://github.com/iiab/iiab/issues/2638
+
+# βΊ SEE "MONITOR FIRMWARE UPDATES in 3 places especially" in tasks/download.yml β
- name: Install firmware (for RPi internal WiFi)
include_tasks: install.yml
- #when: firmware_installed is undefined
+ when: firmware_installed is undefined
-# Two variable are placed in /etc/iiab/iiab_state.yml:
+# Two variables are placed in /etc/iiab/iiab_state.yml:
#
# - firmware_downloaded (set in download.yml) is used in install.yml
#
diff --git a/roles/firmware/templates/iiab-check-firmware b/roles/firmware/templates/iiab-check-firmware
index aeda2366e..f10cd752b 100644
--- a/roles/firmware/templates/iiab-check-firmware
+++ b/roles/firmware/templates/iiab-check-firmware
@@ -1,66 +1,71 @@
#!/bin/bash
-WARN=0
-DATE=$(date +%F-%T)
+# The 1st time /usr/bin/iiab-check-firmware runs (at the end of
+# firmware/tasks/install.yml) 2-4 lynchpin top links are put in place,
+# finalizing symlink chains like:
+#
+# /lib/firmware/cypress/X.{bin|blob} ->
+# /lib/firmware/cypress/X.{bin|blob}.iiab ->
+# CHOSEN-FIRMWARE-FILE-OR-LINK
+#
+# Also backing up top-of-chain originals (file or link!) by moving these to:
+#
+# /lib/firmware/cypress/.YYYY-MM-DD-HH:MM:SS
+#
+# NOTE these are "doubly timestamped" to preserve BOTH last-modif & moving date.
-# 2021-08-18: bash scripts using default_vars.yml &/or local_vars.yml
+# 2023-02-25: bash scripts using default_vars.yml &/or local_vars.yml
# https://github.com/iiab/iiab-factory/blob/master/iiab
-# https://github.com/iiab/iiab/blob/master/roles/firmware/templates/iiab-check-firmware#L13
+# https://github.com/iiab/iiab/blob/master/roles/firmware/templates/iiab-check-firmware#L10-14
# https://github.com/iiab/iiab/blob/master/roles/network/templates/gateway/iiab-gen-iptables#L48-L52
-# https://github.com/iiab/maps/blob/master/osm-source/pages/viewer/scripts/iiab-install-map-region#L25-L34
-# https://github.com/iiab/iiab/blob/master/roles/openvpn/templates/iiab-support READS AND WRITES, INCL NON-BOOLEAN
+# https://github.com/iiab/maps/blob/master/osm-source/pages/viewer/scripts/iiab-install-map-region#L23-L39
+# https://github.com/iiab/iiab/blob/master/roles/0-DEPRECATED-ROLES/openvpn/templates/iiab-support READS AND WRITES, INCL NON-BOOLEAN
-if grep -q '^wifi_hotspot_capacity_rpi_fix:\s\+[fF]alse\b' /etc/iiab/local_vars.yml ; then
- echo "'wifi_hotspot_capacity_rpi_fix: False' found in /etc/iiab/local_vars.yml"
- echo "...so WiFi firmware will NOT be checked or replaced."
+iiab_var_value() {
+ v1=$(grep "^$1:\s" /opt/iiab/iiab/vars/default_vars.yml | tail -1 | sed "s/^$1:\s\+//; s/#.*//; s/\s*$//; s/^\(['\"]\)\(.*\)\1$/\2/")
+ v2=$(grep "^$1:\s" /etc/iiab/local_vars.yml | tail -1 | sed "s/^$1:\s\+//; s/#.*//; s/\s*$//; s/^\(['\"]\)\(.*\)\1$/\2/")
+ [ "$v2" != "" ] && echo $v2 || echo $v1 # [ "$v2" ] ALSO WORKS
+}
- exit 0
-fi
-
-echo -e "'wifi_hotspot_capacity_rpi_fix: True' presumed..."
-echo -e "...in /etc/iiab/local_vars.yml (or /opt/iiab/iiab/vars/default_vars.yml ?)\n"
-
-if ! $(diff -q /lib/firmware/brcm/brcmfmac43455-sdio.bin.iiab /lib/firmware/brcm/brcmfmac43455-sdio.bin); then
- mv /lib/firmware/brcm/brcmfmac43455-sdio.bin /lib/firmware/brcm/brcmfmac43455-sdio.bin.$DATE
- cp /lib/firmware/brcm/brcmfmac43455-sdio.bin.iiab /lib/firmware/brcm/brcmfmac43455-sdio.bin
- echo "Replacing /lib/firmware/brcm/brcmfmac43455-sdio.bin"
- WARN=1
-fi
-
-if ! $(diff -q /lib/firmware/brcm/brcmfmac43455-sdio.clm_blob.iiab /lib/firmware/brcm/brcmfmac43455-sdio.clm_blob); then
- mv /lib/firmware/brcm/brcmfmac43455-sdio.clm_blob /lib/firmware/brcm/brcmfmac43455-sdio.clm_blob.$DATE
- cp /lib/firmware/brcm/brcmfmac43455-sdio.clm_blob.iiab /lib/firmware/brcm/brcmfmac43455-sdio.clm_blob
- echo "Replacing /lib/firmware/brcm/brcmfmac43455-sdio.clm_blob"
- WARN=1
-fi
-
-if ! $(diff -q /lib/firmware/brcm/brcmfmac43430-sdio.bin.iiab /lib/firmware/brcm/brcmfmac43430-sdio.bin); then
- mv /lib/firmware/brcm/brcmfmac43430-sdio.bin /lib/firmware/brcm/brcmfmac43430-sdio.bin.$DATE
- cp /lib/firmware/brcm/brcmfmac43430-sdio.bin.iiab /lib/firmware/brcm/brcmfmac43430-sdio.bin
- cp /lib/firmware/brcm/brcmfmac43430-sdio.clm_blob.iiab /lib/firmware/brcm/brcmfmac43430-sdio.clm_blob
- echo "Replacing /lib/firmware/brcm/brcmfmac43430-sdio.bin"
- WARN=1
-fi
-
-if ! $(diff -q /lib/firmware/brcm/brcmfmac43430-sdio.clm_blob.iiab /lib/firmware/brcm/brcmfmac43430-sdio.clm_blob); then
- mv /lib/firmware/brcm/brcmfmac43430-sdio.clm_blob /lib/firmware/brcm/brcmfmac43430-sdio.clm_blob.$DATE
- cp /lib/firmware/brcm/brcmfmac43430-sdio.clm_blob.iiab /lib/firmware/brcm/brcmfmac43430-sdio.clm_blob
- echo "Replacing /lib/firmware/brcm/brcmfmac43430-sdio.clm_blob"
- WARN=1
-fi
-
-if [ "$WARN" = "1" ]; then
- echo -e "\n \e[41;1mWiFi Firmware has been replaced, per iiab/iiab#823.\e[0m"
- echo -e " \e[41;1mReboot is required to activate.\e[0m\n"
- touch /.fw_replaced
- #echo "rebooting..."
- #reboot
-else
- echo -e " WiFi Firmware check \e[42;1mPASSED\e[0m, per iiab/iiab#823." # Or \e[92m for green on black
- echo -e " (Assuming you've rebooted since it was replaced!)\n"
- if [ -f /.fw_replaced ]; then
- rm /.fw_replaced
+link_fw() {
+ if [[ $(readlink /lib/firmware/cypress/$1) != $1.iiab ]] ; then
+ echo
+ mv /lib/firmware/cypress/$1 /lib/firmware/cypress/$1.$(date +%F-%T)
+ ln -s $1.iiab /lib/firmware/cypress/$1
+ echo -e "\e[1mSymlinked /lib/firmware/cypress/$1 -> $1.iiab\e[0m"
+ touch /tmp/.fw_modified
fi
+}
+
+if [[ $(iiab_var_value rpi3bplus_rpi4_wifi_firmware) != "os" ]] ; then
+ link_fw cyfmac43455-sdio.bin
+ link_fw cyfmac43455-sdio.clm_blob
fi
-# exit 0
+if [[ $(iiab_var_value rpizerow_rpi3_wifi_firmware) != "os" ]] ; then
+ link_fw cyfmac43430-sdio.bin
+ link_fw cyfmac43430-sdio.clm_blob
+fi
+
+if [ -f /tmp/.fw_modified ]; then
+ bash /etc/profile.d/iiab-firmware-warn.sh
+else
+ echo -e "\n\e[1mWiFi Firmware links in /lib/firmware/cypress appear \e[92mCORRECT\e[0m\e[1m, per iiab/iiab#3482\e[0m"
+ echo
+ echo -e "\e[100;1m(No reboot appears necessary!)\e[0m"
+ echo
+ echo -e "NOTE: If you change rpi3bplus_rpi4_wifi_firmware or rpizerow_rpi3_wifi_firmware"
+ echo -e "settings in /etc/iiab/local_vars.yml, please then run:"
+ echo
+ echo -e " cd /opt/iiab/iiab"
+ echo -e " sudo iiab-hotspot-off # NO LONGER NEC? eg to restore 'wifi_up_down: True'"
+ echo -e " sudo ./runrole --reinstall firmware"
+ echo -e " sudo iiab-network # SOMETIMES NECESSARY"
+ echo -e " sudo iiab-hotspot-on # NO LONGER NEC? eg to restore 'wifi_up_down: True'"
+ echo -e " sudo reboot\n"
+ #echo
+ #echo -e "Disconnect your power cord before rebooting, for better WiFi firmware results.\n"
+fi
+
+# \e[1m = bright white \e[100;1m = bright white, on gray \n\e[41;1m = bright white, on red
+# \e[42;1m = bright white, on bright green \e[92m = green on black
diff --git a/roles/firmware/templates/iiab-firmware-warn.sh b/roles/firmware/templates/iiab-firmware-warn.sh
index 55120ffbd..f9507496b 100644
--- a/roles/firmware/templates/iiab-firmware-warn.sh
+++ b/roles/firmware/templates/iiab-firmware-warn.sh
@@ -1,12 +1,9 @@
#!/bin/bash
-if [ -f /.fw_replaced ]; then
- echo -e "\n \e[41;1mWiFi Firmware has been replaced, per iiab/iiab#823.\e[0m"
- if grep -q '^wifi_hotspot_capacity_rpi_fix:\s\+[fF]alse\b' /etc/iiab/local_vars.yml ; then
- echo -e " \e[100;1mIf you want these warnings to stop, run:\e[0m"
- echo
- echo -e " \e[100;1msudo rm /.fw_replaced\e[0m\n"
- else
- echo -e " \e[41;1mReboot is required to activate.\e[0m\n"
- fi
+if [ -f /tmp/.fw_modified ]; then
+ echo -e "\n\e[41;1mWiFi Firmware link(s) modified, per iiab/iiab#3482: PLEASE REBOOT!\e[0m"
+ echo
+ echo -e "If you want this warning to stop, reboot to remove /tmp/.fw_modified\n"
fi
+
+# \e[1m = bright white \e[100;1m = bright white, on gray \n\e[41;1m = bright white, on red
diff --git a/roles/gitea/defaults/main.yml b/roles/gitea/defaults/main.yml
index 20164beb2..0032f1d3d 100644
--- a/roles/gitea/defaults/main.yml
+++ b/roles/gitea/defaults/main.yml
@@ -9,7 +9,7 @@
# Info needed to install Gitea:
-gitea_version: 1.15 # 2021-03-07: Grabs latest point release from this branch. Rather than hardcoding (e.g. 1.14.5) every few weeks.
+gitea_version: "1.22" # 2022-01-30: Grabs latest from this MAJOR/MINOR release branch. Rather than exhaustively hard-coding point releases (e.g. 1.14.5) every few weeks. Quotes nec if trailing zero.
iset_suffixes:
i386: 386
x86_64: amd64
@@ -17,9 +17,9 @@ iset_suffixes:
armv6l: arm-6
armv7l: arm-6 # "arm-7" used to work, but no longer since 2019-04-20's Gitea 1.8.0: https://github.com/iiab/iiab/issues/1673 https://github.com/iiab/iiab/pull/1713 -- 2019-07-31: ARM7 support will return at some point, according to: https://github.com/go-gitea/gitea/pull/7037#issuecomment-516735216 (what about ARM8 support for RPi 4?)
-gitea_iset_suffix: "{{ iset_suffixes[ansible_architecture] | default('unknown') }}"
+gitea_iset_suffix: "{{ iset_suffixes[ansible_machine] | default('unknown') }}" # A bit safer than ansible_architecture (see kiwix/defaults/main.yml)
-gitea_download_url: "https://dl.gitea.io/gitea/{{ gitea_version }}/gitea-{{ gitea_version }}-linux-{{ gitea_iset_suffix }}"
+gitea_download_url: "https://dl.gitea.com/gitea/{{ gitea_version }}/gitea-{{ gitea_version }}-linux-{{ gitea_iset_suffix }}"
gitea_integrity_url: "{{ gitea_download_url }}.asc"
gitea_root_directory: "{{ content_base }}/gitea" # /library/gitea
diff --git a/roles/gitea/tasks/nginx.yml b/roles/gitea/tasks/enable-or-disable.yml
similarity index 62%
rename from roles/gitea/tasks/nginx.yml
rename to roles/gitea/tasks/enable-or-disable.yml
index 2014a0d03..3401c3fdd 100644
--- a/roles/gitea/tasks/nginx.yml
+++ b/roles/gitea/tasks/enable-or-disable.yml
@@ -1,3 +1,19 @@
+- name: Enable & Restart 'gitea' systemd service, if gitea_enabled
+ systemd:
+ name: gitea
+ daemon_reload: yes
+ enabled: yes
+ state: restarted
+ when: gitea_enabled
+
+- name: Disable & Stop 'gitea' systemd service, if not gitea_enabled
+ systemd:
+ name: gitea
+ enabled: no
+ state: stopped
+ when: not gitea_enabled
+
+
- name: Enable http://box{{ gitea_url }} via NGINX, by installing {{ nginx_conf_dir }}/gitea-nginx.conf from template
template:
src: gitea-nginx.conf.j2
diff --git a/roles/gitea/tasks/install.yml b/roles/gitea/tasks/install.yml
index fa934c71b..eed1559f8 100644
--- a/roles/gitea/tasks/install.yml
+++ b/roles/gitea/tasks/install.yml
@@ -1,3 +1,8 @@
+- name: Record (initial) disk space used
+ shell: df -B1 --output=used / | tail -1
+ register: df1
+
+
# 1. Prepare to install Gitea: create user and directory structure
- name: Shut down existing Gitea instance (if we're reinstalling)
@@ -43,10 +48,10 @@
msg: "Could not find a binary for the CPU architecture \"{{ ansible_architecture }}\""
when: gitea_iset_suffix == "unknown"
-- name: Download Gitea binary {{ gitea_download_url }} to {{ gitea_install_path }} (0775, ~104 MB)
+- name: Download Gitea binary {{ gitea_download_url }} to {{ gitea_install_path }} (0775, ~134 MB, SLOW DOWNLOAD CAN TAKE ~15 MIN)
get_url:
url: "{{ gitea_download_url }}"
- dest: "{{ gitea_install_path }}" # e.g. /library/gitea/bin/gitea-1.14
+ dest: "{{ gitea_install_path }}" # e.g. /library/gitea/bin/gitea-1.21
mode: 0775
timeout: "{{ download_timeout }}"
@@ -56,16 +61,16 @@
dest: "{{ gitea_checksum_path }}"
timeout: "{{ download_timeout }}"
-- name: Verify Gitea binary with GPG signature
+- name: Verify Gitea binary with GPG signature ("BAD signature" FALSE ALARMS continue as of 2023-07-16, despite their claims at https://docs.gitea.com/installation/install-from-binary#verify-gpg-signature)
shell: |
- gpg --keyserver pgp.mit.edu --recv {{ gitea_gpg_key }}
+ gpg --keyserver keys.openpgp.org --recv {{ gitea_gpg_key }}
gpg --verify {{ gitea_checksum_path }} {{ gitea_install_path }}
ignore_errors: yes
- name: Symlink {{ gitea_link_path }} -> {{ gitea_install_path }}
file:
src: "{{ gitea_install_path }}"
- path: "{{ gitea_link_path }}"
+ path: "{{ gitea_link_path }}" # /library/gitea/gitea
owner: gitea
group: gitea
state: link
@@ -105,6 +110,17 @@
# 5. RECORD Gitea AS INSTALLED
+- name: Record (final) disk space used
+ shell: df -B1 --output=used / | tail -1
+ register: df2
+
+- name: Add 'gitea_disk_usage = {{ df2.stdout|int - df1.stdout|int }}' to {{ iiab_ini_file }}
+ ini_file:
+ path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
+ section: gitea
+ option: gitea_disk_usage
+ value: "{{ df2.stdout|int - df1.stdout|int }}"
+
- name: "Set 'gitea_installed: True'"
set_fact:
gitea_installed: True
diff --git a/roles/gitea/tasks/main.yml b/roles/gitea/tasks/main.yml
index 265532558..335911c96 100644
--- a/roles/gitea/tasks/main.yml
+++ b/roles/gitea/tasks/main.yml
@@ -19,46 +19,37 @@
quiet: yes
-- name: Install Gitea {{ gitea_version }} if 'gitea_installed' not defined, e.g. in {{ iiab_state_file }} # /etc/iiab/iiab_state.yml
- include_tasks: install.yml
- when: gitea_installed is undefined
+- block:
+ - name: Install Gitea {{ gitea_version }} if 'gitea_installed' not defined, e.g. in {{ iiab_state_file }} # /etc/iiab/iiab_state.yml
+ include_tasks: install.yml
+ when: gitea_installed is undefined
-- name: Enable & Restart 'gitea' systemd service, if gitea_enabled
- systemd:
- name: gitea
- daemon_reload: yes
- enabled: yes
- state: restarted
- when: gitea_enabled
+ - include_tasks: enable-or-disable.yml
-- name: Disable & Stop 'gitea' systemd service, if not gitea_enabled
- systemd:
- name: gitea
- enabled: no
- state: stopped
- when: not gitea_enabled
+ - name: Add 'gitea' variable values to {{ iiab_ini_file }}
+ ini_file:
+ path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
+ section: gitea
+ option: "{{ item.option }}"
+ value: "{{ item.value | string }}"
+ with_items:
+ - option: name
+ value: Gitea
+ - option: description
+ value: '"Gitea is like GitHub for more offline communities: Git with a cup of tea"'
+ - option: gitea_install
+ value: "{{ gitea_install }}"
+ - option: gitea_enabled
+ value: "{{ gitea_enabled }}"
+ - option: gitea_run_directory
+ value: "{{ gitea_run_directory }}"
+ - option: gitea_url
+ value: "{{ gitea_url }}"
-- name: Enable/Disable/Restart NGINX
- include_tasks: nginx.yml
+ rescue:
-
-- name: Add 'gitea' to list of services at {{ iiab_ini_file }}
- ini_file:
- path: "{{ iiab_ini_file }}" # /etc/iiab/iiab_state.yml
- section: gitea
- option: "{{ item.option }}"
- value: "{{ item.value | string }}"
- with_items:
- - option: name
- value: Gitea
- - option: description
- value: '"Gitea is like GitHub for more offline communities: Git with a cup of tea"'
- - option: gitea_install
- value: "{{ gitea_install }}"
- - option: gitea_enabled
- value: "{{ gitea_enabled }}"
- - option: gitea_run_directory
- value: "{{ gitea_run_directory }}"
- - option: gitea_url
- value: "{{ gitea_url }}"
+ - name: 'SEE ERROR ABOVE (skip_role_on_error: {{ skip_role_on_error }})'
+ fail:
+ msg: ""
+ when: not skip_role_on_error
diff --git a/roles/gitea/templates/app.ini.j2 b/roles/gitea/templates/app.ini.j2
index 00d503fdb..98add8b15 100644
--- a/roles/gitea/templates/app.ini.j2
+++ b/roles/gitea/templates/app.ini.j2
@@ -2,7 +2,8 @@
; Copy required sections to your own app.ini (default is custom/conf/app.ini)
; and modify as needed.
-; see https://docs.gitea.io/en-us/config-cheat-sheet/ for additional documentation.
+; see https://docs.gitea.com/administration/config-cheat-sheet for additional documentation.
+; https://docs.gitea.com/next/administration/config-cheat-sheet
; App name that shows in every page title
APP_NAME = {{ gitea_display_name }}
@@ -23,9 +24,11 @@ DEFAULT_PRIVATE = last
; Global limit of repositories per user, applied at creation time. -1 means no limit
MAX_CREATION_LIMIT = -1
; Mirror sync queue length, increase if mirror syncing starts hanging
-MIRROR_QUEUE_LENGTH = 1000
+; 2023-07-16 ERROR: MIRROR_QUEUE_LENGTH = 1000
+; `[repository].MIRROR_QUEUE_LENGTH`. Use new options in `[queue.mirror]`
; Patch test queue length, increase if pull request patch testing starts hanging
-PULL_REQUEST_QUEUE_LENGTH = 1000
+; 2023-07-16 ERROR: PULL_REQUEST_QUEUE_LENGTH = 1000
+; `[repository].PULL_REQUEST_QUEUE_LENGTH`. Use new options in `[queue.pr_patch_checker]`
; Preferred Licenses to place at the top of the List
; The name here must match the filename in conf/license or custom/conf/license
PREFERRED_LICENSES = Apache License 2.0,MIT License
@@ -201,13 +204,22 @@ PPROF_DATA_PATH = data/tmp/pprof
LANDING_PAGE = home
; Enables git-lfs support. true or false, default is false.
LFS_START_SERVER = false
-; Where your lfs files reside, default is data/lfs.
-LFS_CONTENT_PATH = {{ gitea_lfs_root }}
; LFS authentication secret, change this yourself
LFS_JWT_SECRET =
; LFS authentication validity period (in time.Duration), pushes taking longer than this may fail.
LFS_HTTP_AUTH_EXPIRY = 20m
+; lfs [Large File Storage] storage will override storage
+;
+[lfs]
+;STORAGE_TYPE = local
+;
+; Where your lfs files reside, default is data/lfs.
+PATH = {{ gitea_lfs_root }}
+;
+; override the minio base path if storage type is minio
+;MINIO_BASE_PATH = lfs/
+
; Define allowed algorithms and their minimum key length (use -1 to disable a type)
[ssh.minimum_key_sizes]
ED25519 = 256
@@ -240,7 +252,8 @@ ISSUE_INDEXER_PATH = indexers/issues.bleve
; repo indexer by default disabled, since it uses a lot of disk space
REPO_INDEXER_ENABLED = false
REPO_INDEXER_PATH = indexers/repos.bleve
-UPDATE_BUFFER_LEN = 20
+; 2023-07-16 ERROR: UPDATE_BUFFER_LEN = 20
+; `[indexer].UPDATE_BUFFER_LEN`. Use new options in `[queue.issue_indexer]`
MAX_FILE_SIZE = 1048576
[admin]
@@ -360,7 +373,8 @@ PAGING_NUM = 10
[mailer]
ENABLED = false
; Buffer length of channel, keep it as it is if you don't know what it is.
-SEND_BUFFER_LEN = 100
+; 2023-07-16 ERROR: SEND_BUFFER_LEN = 100
+; `[mailer].SEND_BUFFER_LEN`. Use new options in `[queue.mailer]`
; Name displayed in mail title
SUBJECT = %(APP_NAME)s
; Mail server
diff --git a/roles/iiab-admin/README.rst b/roles/iiab-admin/README.rst
index 55def7bde..2dbadaa62 100644
--- a/roles/iiab-admin/README.rst
+++ b/roles/iiab-admin/README.rst
@@ -13,7 +13,7 @@
iiab-admin README
=================
-`Internet-in-a-Box `_ (IIAB) encourages you to pay attention to the security of your learning community.
+`Internet-in-a-Box `_ (IIAB) encourages you to pay attention to the security of your learning community.
This Ansible playbook is one of the very first that runs when you install IIAB, and we hope reading this helps you understand your choices:
@@ -21,11 +21,11 @@ Configure user 'iiab-admin'
---------------------------
* `admin-user.yml `_ configures a Linux user that will give you access to IIAB's Admin Console (http://box.lan/admin) after IIAB is installed β and can also help you at the command-line with IIAB community support commands like {iiab-diagnostics, iiab-hotspot-on, iiab-check-firmware, etc}.
- * If initial creation of the user and password was somehow not already taken care of by IIAB's 1-line installer (http://download.iiab.io) or by your underlying OS, that too will be taken care of here.
+ * If initial creation of the user and password was somehow not already taken care of by IIAB's 1-line installer (https://download.iiab.io) or by your underlying OS, that too will be taken care of here.
* By default this user is ``iiab-admin`` with password ``g0adm1n``
* *Do change the default password if you haven't yet, by running:* **sudo passwd iiab-admin**
* After IIAB is installed, you can also change the password by logging into Admin Console (http://box.lan/admin) > Utilities > Change Password.
-* If you prefer to use a pre-existing user like ``pi`` or ``ubuntu`` (or any other username) customize the variable ``iiab_admin_user`` in your `/etc/iiab/local_vars.yml `_ (preferably do this prior to installing IIAB!)
+* If you prefer to use a pre-existing user like ``pi`` or ``ubuntu`` (or any other username) customize the variable ``iiab_admin_user`` in your `/etc/iiab/local_vars.yml `_ (preferably do this prior to installing IIAB!)
* You can set ``iiab_admin_can_sudo: False`` if you want a strict security lockdown (if you're really sure you won't need IIAB community support commands like `/usr/bin/iiab-diagnostics <../../scripts/iiab-diagnostics.README.md>`_, `/usr/bin/iiab-hotspot-on <../network/templates/network/iiab-hotspot-on>`_, `iiab-check-firmware <../firmware/templates/iiab-check-firmware>`_, etc!)
* You can also set ``iiab_admin_user_install: False`` if you're sure you know how to do all this `account and sudo configuration `_ manually.
@@ -36,14 +36,14 @@ Security
#. ``iiab-admin`` (specified by ``admin_console_group`` in `/opt/iiab/iiab/vars/default_vars.yml <../../vars/default_vars.yml>`_ and `/opt/iiab/iiab-admin-console/vars/default_vars.yml `_)
#. ``sudo``
* Please read much more about what escalated (root) actions are authorized when you log into IIAB's Admin Console, and how this works: https://github.com/iiab/iiab-admin-console/blob/master/Authentication.md
-* If your IIAB includes OpenVPN, ``/root/.ssh/authorized_keys`` should be installed by `roles/openvpn/tasks/install.yml <../openvpn/tasks/install.yml>`_ to facilitate remote community support. Feel free to remove this as mentioned here: http://wiki.laptop.org/go/IIAB/Security
+* If your IIAB includes Tailscale (VPN), ``/root/.ssh/authorized_keys`` should be installed by `roles/tailscale/tasks/install.yml <../tailscale/tasks/install.yml>`_ to facilitate remote community support. Feel free to remove this as mentioned here: https://wiki.iiab.io/go/Security
* Auto-checking for the default/published password (as specified by ``iiab_admin_published_pwd`` in `/opt/iiab/iiab/vars/default_vars.yml <../../vars/default_vars.yml>`_) is implemented in `/etc/profile.d `_ (and `/etc/xdg/lxsession/LXDE-pi `_ when it exists, i.e. on Raspberry Pi OS with desktop).
Example
=======
* If you later change your mind about ``sudo`` privileges for user 'iiab-admin' (as specified by ``iiab_admin_user``) then do this:
- #. Go ahead and change the value of ``iiab_admin_can_sudo`` (to either True or False) in `/etc/iiab/local_vars.yml `_
+ #. Go ahead and change the value of ``iiab_admin_can_sudo`` (to either True or False) in `/etc/iiab/local_vars.yml `_
#. Make sure that ``iiab_admin_user_install: True`` is also set.
#. Then re-run this Ansible playbook, by running ``cd /opt/iiab/iiab`` followed by ``sudo ./runrole --reinstall iiab-admin``
@@ -56,16 +56,16 @@ Historical Notes
Remote Support Tools
--------------------
-The `iiab-diagnostics <../../scripts/iiab-diagnostics.README.md>`_ and `OpenVPN `_ options mentioned above can greatly help you empower your community, typically during the implementation phase of your project, even if Linux is new to you.
+The `iiab-diagnostics <../../scripts/iiab-diagnostics.README.md>`_ and `Tailscale (VPN) `_ options mentioned above can greatly help you empower your community, typically during the implementation phase of your project, even if Linux is new to you.
-Similarly, `access.yml `_ adds a couple text mode tools β extremely helpful over expensive / low-bandwidth connections:
+Similarly, `tasks/main.yml `_ adds a couple text mode tools β extremely helpful over expensive / low-bandwidth connections:
* `lynx `_
* `screen `_
*More great tools to help you jumpstart community action at a distance:*
-* http://FAQ.IIAB.IO > "How can I remotely manage my Internet-in-a-Box?"
+* `FAQ.IIAB.IO `_ > "How can I remotely manage my Internet-in-a-Box?"
Admin Console
-------------
diff --git a/roles/iiab-admin/tasks/access.yml b/roles/iiab-admin/tasks/access.yml
deleted file mode 100644
index e7281c4dc..000000000
--- a/roles/iiab-admin/tasks/access.yml
+++ /dev/null
@@ -1,6 +0,0 @@
-- name: "Install text mode packages, useful during remote access: screen, lynx"
- package:
- name:
- - lynx
- - screen
- state: present
diff --git a/roles/iiab-admin/tasks/access.yml.unused b/roles/iiab-admin/tasks/access.yml.unused
new file mode 100644
index 000000000..639a3d8a6
--- /dev/null
+++ b/roles/iiab-admin/tasks/access.yml.unused
@@ -0,0 +1,6 @@
+- name: "Install text-mode packages, useful during remote access: lynx, screen"
+ package:
+ name:
+ - lynx
+ - screen
+ state: present
diff --git a/roles/iiab-admin/tasks/main.yml b/roles/iiab-admin/tasks/main.yml
index ce4451003..fabe0bffe 100644
--- a/roles/iiab-admin/tasks/main.yml
+++ b/roles/iiab-admin/tasks/main.yml
@@ -2,8 +2,17 @@
# https://github.com/iiab/iiab/blob/master/roles/iiab-admin/README.rst
-- name: Install lynx, screen
- include_tasks: access.yml
+- name: Record (initial) disk space used
+ shell: df -B1 --output=used / | tail -1
+ register: df1
+
+
+- name: "Install text-mode packages, useful during remote access: lynx, screen"
+ package:
+ name:
+ - lynx
+ - screen
+ state: present
- name: Install sudo & /etc/sudoers with logging to /var/log/sudo.log
include_tasks: sudo-prereqs.yml
@@ -19,7 +28,7 @@
# (1) by the OS installer
# (2) by the OS's graphical desktop tools
# (3) at the command-line: sudo passwd iiab-admin
-# (4) by IIAB's 1-line installer: http://download.iiab.io
+# (4) by IIAB's 1-line installer: https://download.iiab.io
# (5) by this role: roles/iiab-admin/tasks/admin-user.yml
# (6) by IIAB's Admin Console during installation
# ...and/or...
@@ -31,6 +40,17 @@
# RECORD iiab-admin AS INSTALLED
+- name: Record (final) disk space used
+ shell: df -B1 --output=used / | tail -1
+ register: df2
+
+- name: Add 'iiab_admin_disk_usage = {{ df2.stdout|int - df1.stdout|int }}' to {{ iiab_ini_file }}
+ ini_file:
+ path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
+ section: iiab-admin
+ option: iiab_admin_disk_usage
+ value: "{{ df2.stdout|int - df1.stdout|int }}"
+
- name: "Set 'iiab_admin_installed: True'"
set_fact:
iiab_admin_installed: True
diff --git a/roles/iiab-admin/tasks/pwd-warnings.yml b/roles/iiab-admin/tasks/pwd-warnings.yml
index d1379b3fb..600a935bb 100644
--- a/roles/iiab-admin/tasks/pwd-warnings.yml
+++ b/roles/iiab-admin/tasks/pwd-warnings.yml
@@ -1,31 +1,36 @@
-- name: Install /etc/profile.d/sshpwd-profile-iiab.sh from template, to issue warnings (during shell/ssh logins) if iiab-admin password is the default
+# 2022-07-22: SIMILAR TO roles/www_options/tasks/main.yml FOR browser
+# AND roles/network/tasks/netwarn.yml FOR iiab-network
+
+
+- name: Install /etc/profile.d/iiab-pwdwarn-profile.sh from template, to issue warnings (during shell/ssh logins) if iiab-admin password is the default
template:
- src: sshpwd-profile-iiab.sh.j2
- dest: /etc/profile.d/sshpwd-profile-iiab.sh
+ src: iiab-pwdwarn-profile.sh.j2
+ dest: /etc/profile.d/iiab-pwdwarn-profile.sh
mode: '0644'
-- name: Is /etc/xdg/lxsession/LXDE-pi a directory?
+- name: Does directory /home/{{ iiab_admin_user }}/.config/labwc/ exist?
stat:
- path: /etc/xdg/lxsession/LXDE-pi
- register: lx
+ path: /home/{{ iiab_admin_user }}/.config/labwc/
+ register: labwc_dir
-- name: "If so, install from template: /etc/xdg/lxsession/LXDE-pi/sshpwd-lxde-iiab.sh"
+- name: "If so, install from template: /usr/local/sbin/iiab-pwdwarn-labwc"
template:
- src: sshpwd-lxde-iiab.sh.j2
- dest: /etc/xdg/lxsession/LXDE-pi/sshpwd-lxde-iiab.sh
+ src: iiab-pwdwarn-labwc.j2
+ dest: /usr/local/sbin/iiab-pwdwarn-labwc
mode: '0755'
- when: lx.stat.isdir is defined and lx.stat.isdir # and is_raspbian
+ when: labwc_dir.stat.exists and labwc_dir.stat.isdir
-# 2019-03-07: This popup (/etc/xdg/lxsession/LXDE-pi/sshpwd-lxde-iiab.sh) does
+# 2019-03-07: This pop-up (/etc/xdg/lxsession/LXDE-pi/sshpwd-lxde-iiab.sh) did
# not actually appear when triggered by /etc/xdg/autostart/pprompt-iiab.desktop
# (or pprompt.desktop as Raspbian has working since 2018-11-13!) Too bad as it
-# would be really nice to standardize this popup across Ubermix & all distros..
+# would be really nice to standardize pop-ups across Ubermix & all distros...
# Is this a permissions/security issue presumably? Official autostart spec is:
# https://specifications.freedesktop.org/autostart-spec/autostart-spec-latest.html
# Raspbian's 2016-2018 evolution here: https://github.com/iiab/iiab/issues/1537
-- name: ...and put a line in /etc/xdg/lxsession/LXDE-pi/autostart to trigger popups
+- name: ...and put a line in /home/{{ iiab_admin_user }}/.config/labwc/autostart to trigger iiab-pwdwarn-labwc (& pop-up as nec)
lineinfile:
- path: /etc/xdg/lxsession/LXDE-pi/autostart
- line: "@/etc/xdg/lxsession/LXDE-pi/sshpwd-lxde-iiab.sh"
- when: lx.stat.isdir is defined and lx.stat.isdir # and is_raspbian
+ path: /home/{{ iiab_admin_user }}/.config/labwc/autostart # iiab-admin
+ create: yes
+ line: '/usr/local/sbin/iiab-pwdwarn-labwc &'
+ when: labwc_dir.stat.exists and labwc_dir.stat.isdir
diff --git a/roles/iiab-admin/tasks/sudo-prereqs.yml b/roles/iiab-admin/tasks/sudo-prereqs.yml
index 1b608fef1..9370666b2 100644
--- a/roles/iiab-admin/tasks/sudo-prereqs.yml
+++ b/roles/iiab-admin/tasks/sudo-prereqs.yml
@@ -1,6 +1,6 @@
- name: 'Install package: sudo'
package:
- name: sudo # (1) Should be installed prior to installing IIAB, (2) Can also be installed by roles/1-prep's roles/openvpn/tasks/install.yml, (3) Is definitely installed by 1-prep here, (4) Used to be installed by roles/2-common/tasks/packages.yml (but that's too late!)
+ name: sudo # (1) Should be installed prior to installing IIAB, (2) Can be installed by 1-prep's roles/tailscale/tasks/install.yml, (3) Can be installed by 1-prep's roles/iiab-admin/tasks/sudo-prereqs.yml here, (4) Used to be installed by roles/2-common/tasks/packages.yml (but that's too late!)
- name: Temporarily make file /etc/sudoers editable (0640)
file:
diff --git a/roles/iiab-admin/templates/sshpwd-lxde-iiab.sh.j2 b/roles/iiab-admin/templates/iiab-pwdwarn-labwc.j2
similarity index 85%
rename from roles/iiab-admin/templates/sshpwd-lxde-iiab.sh.j2
rename to roles/iiab-admin/templates/iiab-pwdwarn-labwc.j2
index fe7e8ae1b..373d3888d 100755
--- a/roles/iiab-admin/templates/sshpwd-lxde-iiab.sh.j2
+++ b/roles/iiab-admin/templates/iiab-pwdwarn-labwc.j2
@@ -19,14 +19,18 @@ check_user_pwd() {
# enough when user does not exist. Or uncomment to FORCE ERROR CODE 2.
# Either way, overall bash script still returns exit code 0 ("success")
- # sudo works below (unlike in sshpwd-profile-iiab.sh) b/c RaspiOS ships w/
+ # sudo works below (unlike in sshpwd-profile-iiab.sh) b/c RasPiOS ships w/
# /etc/sudoers.d/010_pi-nopasswd containing "pi ALL=(ALL) NOPASSWD: ALL"
# (read access to /etc/shadow is otherwise restricted to just root and
# group www-data i.e. Apache, NGINX get special access). SEE: #2431, #2561
# 2021-08-28: New OS's use 'yescrypt' so use Perl instead of Python (#2949)
# This also helps avoid parsing the (NEW) 4th sub-field in $y$j9T$SALT$HASH
- field2=$(grep "^$1:" /etc/shadow | cut -d: -f2)
+
+ # 2022-09-21 #3368: Sets field2 to "" if sudo -n fails to read /etc/shadow
+ # 2022-10-18 #3404: Redirect stderr to /dev/null, as RasPiOS might one day
+ # force an annoying pop-up, as Mint did (due to sshpwd-profile-iiab.sh.j2)
+ field2=$(sudo -n grep "^$1:" /etc/shadow 2>/dev/null | cut -d: -f2)
[[ $(perl -e "print crypt('$2', '$field2')") == $field2 ]]
# # $meth (hashing method) is typically '6' which implies 5000 rounds
@@ -37,8 +41,8 @@ check_user_pwd() {
# [ $(python3 -c "import crypt; print(crypt.crypt('$2', '\$$meth\$$salt'))") == "\$$meth\$$salt\$$hash" ]
}
-#grep -q "^PasswordAuthentication\s\+no\b" /etc/ssh/sshd_config && return
-#systemctl is-active {{ sshd_service }} || return
+# grep -q "^PasswordAuthentication\s\+no\b" /etc/ssh/sshd_config && return
+# systemctl is-active ssh || return # #3444: Or use Ansible var sshd_service
if check_user_pwd "{{ iiab_admin_user }}" "{{ iiab_admin_published_pwd }}" ; then # iiab-admin g0adm1n
zenity --warning --width=600 --text="Published password in use by user '{{ iiab_admin_user }}'.\n\nTHIS IS A SECURITY RISK - please change its password using IIAB's Admin Console (http://box.lan/admin) -> Utilities -> Change Password.\n\nSee 'What are the default passwords?' at http://FAQ.IIAB.IO"
diff --git a/roles/iiab-admin/templates/sshpwd-profile-iiab.sh.j2 b/roles/iiab-admin/templates/iiab-pwdwarn-profile.sh.j2
similarity index 91%
rename from roles/iiab-admin/templates/sshpwd-profile-iiab.sh.j2
rename to roles/iiab-admin/templates/iiab-pwdwarn-profile.sh.j2
index 24d87886c..9d18eece1 100755
--- a/roles/iiab-admin/templates/sshpwd-profile-iiab.sh.j2
+++ b/roles/iiab-admin/templates/iiab-pwdwarn-profile.sh.j2
@@ -16,7 +16,8 @@
check_user_pwd() {
#[ $(id -un) = "root" ] || return 2
#[ $(id -un) = "root" ] || [ $(id -un) = "iiab-admin" ] || return 2
- [ -r /etc/shadow ] || return 2 # FORCE ERROR if /etc/shadow not readable
+
+ #[ -r /etc/shadow ] || return 2 # FORCE ERROR if /etc/shadow not readable
# *BUT* overall bash script still returns exit code 0 ("success").
#id -u $1 > /dev/null 2>&1 || return 2 # Not needed if return 1 is good
@@ -25,7 +26,10 @@ check_user_pwd() {
# 2021-08-28: New OS's use 'yescrypt' so use Perl instead of Python (#2949)
# This also helps avoid parsing the (NEW) 4th sub-field in $y$j9T$SALT$HASH
- field2=$(grep "^$1:" /etc/shadow | cut -d: -f2)
+
+ # 2022-09-21 #3368: Sets field2 to "" if sudo -n fails to read /etc/shadow
+ # 2022-10-18 #3404: Redirect stderr to /dev/null, to avoid Mint pop-up
+ field2=$(sudo -n grep "^$1:" /etc/shadow 2> /dev/null | cut -d: -f2)
[[ $(perl -e "print crypt('$2', '$field2')") == $field2 ]]
# # $meth (hashing method) is typically '6' which implies 5000 rounds
diff --git a/roles/internetarchive/README.md b/roles/internetarchive/README.md
index 49d8d3f0f..bd32849c3 100644
--- a/roles/internetarchive/README.md
+++ b/roles/internetarchive/README.md
@@ -8,7 +8,7 @@ Access to our library of millions of books, journals, audio and video recordings
This Ansible role installs the Internet Archive's dweb-mirror project on
Internet-in-a-Box (IIAB). Use this to build up a dynamic offline library
-arising from the materials you can explore at http://dweb.archive.org
+arising from the materials you can explore at https://dweb.archive.org
The Offline Internet Archive server:
@@ -248,7 +248,7 @@ and just checks the content is up to date.
## Managing collections on Internet Archive
-You can create and manage your own collections on the [Internet Archive site](http://www.archive.org).
+You can create and manage your own collections on the [Internet Archive site](https://www.archive.org).
Other people can then crawl those collections.
First get in touch with Mitra Ardron at `mitra@archive.org`, as processes may have changed since this is written.
diff --git a/roles/internetarchive/tasks/nginx.yml b/roles/internetarchive/tasks/enable-or-disable.yml
similarity index 62%
rename from roles/internetarchive/tasks/nginx.yml
rename to roles/internetarchive/tasks/enable-or-disable.yml
index 0469e58c4..47cebe214 100644
--- a/roles/internetarchive/tasks/nginx.yml
+++ b/roles/internetarchive/tasks/enable-or-disable.yml
@@ -1,3 +1,19 @@
+- name: Enable & Restart 'internetarchive' systemd service, if internetarchive_enabled
+ systemd:
+ name: internetarchive
+ daemon_reload: yes
+ enabled: yes
+ state: restarted
+ when: internetarchive_enabled
+
+- name: Disable & Stop 'internetarchive' systemd service, if not internetarchive_enabled
+ systemd:
+ name: internetarchive
+ enabled: no
+ state: stopped
+ when: not internetarchive_enabled
+
+
- name: Enable http://box/archive via NGINX, by installing {{ nginx_conf_dir }}/internetarchive-nginx.conf from template
template:
src: internetarchive-nginx.conf.j2 # TO DO: roles/internetarchive/templates/internetarchive-nginx.conf.j2
diff --git a/roles/internetarchive/tasks/install.yml b/roles/internetarchive/tasks/install.yml
index d44586042..2821a9ffd 100644
--- a/roles/internetarchive/tasks/install.yml
+++ b/roles/internetarchive/tasks/install.yml
@@ -9,10 +9,10 @@
include_role:
name: nodejs
-- name: Assert that 10.x <= nodejs_version ({{ nodejs_version }}) <= 16.x
+- name: Assert that 10.x <= nodejs_version ({{ nodejs_version }}) <= 22.x
assert:
- that: nodejs_version is version('10.x', '>=') and nodejs_version is version('16.x', '<=')
- fail_msg: "Internet Archive install cannot proceed, as it currently requires Node.js 10.x - 16.x, and your nodejs_version is set to {{ nodejs_version }}. Please check the value of nodejs_version in /opt/iiab/iiab/vars/default_vars.yml and possibly also /etc/iiab/local_vars.yml"
+ that: nodejs_version is version('10.x', '>=') and nodejs_version is version('22.x', '<=')
+ fail_msg: "Internet Archive install cannot proceed, as it currently requires Node.js 10.x - 22.x, and your nodejs_version is set to {{ nodejs_version }}. Please check the value of nodejs_version in /opt/iiab/iiab/vars/default_vars.yml and possibly also /etc/iiab/local_vars.yml"
quiet: yes
- name: "Set 'yarn_install: True' and 'yarn_enabled: True'"
@@ -30,6 +30,11 @@
state: present
+- name: Record (initial) disk space used
+ shell: df -B1 --output=used / | tail -1
+ register: df1
+
+
# 2. CREATE 2 DIRS, WIPE /opt/iiab/internetarchive/node_modules & RUN YARN
- name: mkdir {{ internetarchive_dir }}
@@ -42,8 +47,8 @@
state: absent
path: "{{ internetarchive_dir }}/node_modules"
-- name: Run 'yarn add @internetarchive/dweb-mirror' to download/populate {{ internetarchive_dir }}/node_modules (CAN TAKE ~5 MINUTES)
- shell: yarn config set child-concurrency 1 && yarn add @internetarchive/dweb-mirror
+- name: Run 'yarn add https://github.com/internetarchive/dweb-mirror' to download/populate {{ internetarchive_dir }}/node_modules (CAN TAKE ~5 MINUTES)
+ shell: yarn config set child-concurrency 1 && yarn add https://github.com/internetarchive/dweb-mirror
args:
chdir: "{{ internetarchive_dir }}"
creates: "{{ internetarchive_dir }}/node_modules/@internetarchive/dweb-mirror/internetarchive"
@@ -64,6 +69,17 @@
# 4. RECORD Internet Archive AS INSTALLED
+- name: Record (final) disk space used
+ shell: df -B1 --output=used / | tail -1
+ register: df2
+
+- name: Add 'internetarchive_disk_usage = {{ df2.stdout|int - df1.stdout|int }}' to {{ iiab_ini_file }}
+ ini_file:
+ path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
+ section: internetarchive
+ option: internetarchive_disk_usage
+ value: "{{ df2.stdout|int - df1.stdout|int }}"
+
- name: "Set 'internetarchive_installed: True'"
set_fact:
internetarchive_installed: True
diff --git a/roles/internetarchive/tasks/main.yml b/roles/internetarchive/tasks/main.yml
index c878287a3..7e3a8a2dd 100644
--- a/roles/internetarchive/tasks/main.yml
+++ b/roles/internetarchive/tasks/main.yml
@@ -19,76 +19,60 @@
quiet: yes
-# 2020-02-11: @mitra42 & @holta agree (#2247) that the following 2-stanza
-# "UPDATE internetarchive" block should run whenever one isn't installing
-# (or reinstalling) internetarchive, for now. We're aware this means slowness
-# during "./runrole internetarchive" but that's very intentional for now -- as
-# it leads to more testing of more recent versions of internetarchive, which
-# is strongly desired. Finally, these current norms can and probably will be
-# changed in future, when broader IIAB norms develop around "./runrole
-# --upgrade internetarchive" or "./runrole --update internetarchive" or such,
-# as may evolve @ https://github.com/iiab/iiab/pull/2238#discussion_r376168178
+- block:
-- block: # BEGIN 2-STANZA BLOCK
+ # 2020-02-11: @mitra42 & @holta agree (#2247) that the following 2-stanza
+ # "UPDATE internetarchive" portion should run whenever one isn't installing
+ # (or reinstalling) internetarchive, for now. We're aware this means slowness
+ # during "./runrole internetarchive" but that's very intentional for now -- as
+ # it leads to more testing of more recent versions of internetarchive, which
+ # is strongly desired. Finally, these current norms can and probably will be
+ # changed in future, when broader IIAB norms develop around "./runrole
+ # --upgrade internetarchive" or "./runrole --update internetarchive" or such,
+ # as may evolve @ https://github.com/iiab/iiab/pull/2238#discussion_r376168178
- name: "UPGRADE: Stop 'internetarchive' systemd service, if internetarchive_installed is defined"
systemd:
name: internetarchive
daemon_reload: yes
state: stopped
+ when: internetarchive_installed is defined
- name: "UPGRADE: Run 'yarn upgrade' in {{ internetarchive_dir }}, if internetarchive_installed is defined"
shell: yarn config set child-concurrency 1 && yarn install && yarn upgrade
args:
chdir: "{{ internetarchive_dir }}"
+ when: internetarchive_installed is defined
- when: internetarchive_installed is defined # END 2-STANZA BLOCK
+ # "ELSE" INSTALL...
-# "ELSE" INSTALL...
-
-- name: Install Internet Archive if 'internetarchive_installed' not defined, e.g. in {{ iiab_state_file }} # /etc/iiab/iiab_state.yml
- include_tasks: install.yml
- when: internetarchive_installed is undefined
+ - name: Install Internet Archive if 'internetarchive_installed' not defined, e.g. in {{ iiab_state_file }} # /etc/iiab/iiab_state.yml
+ include_tasks: install.yml
+ when: internetarchive_installed is undefined
-# ENABLE/DISABLE/RESTART SYSTEMD SERVICE & WEB SERVERS AS NEC ?
-
-- name: Enable & Restart 'internetarchive' systemd service, if internetarchive_enabled
- systemd:
- name: internetarchive
- daemon_reload: yes
- enabled: yes
- state: restarted
- when: internetarchive_enabled
-
-- name: Disable & Stop 'internetarchive' systemd service, if not internetarchive_enabled
- systemd:
- name: internetarchive
- enabled: no
- state: stopped
- when: not internetarchive_enabled
-
-# - name: Enable/Disable/Restart Apache if primary
-# include_tasks: apache.yml
-# when: apache_installed is defined and not nginx_enabled
-
-- name: Enable/Disable/Restart NGINX if primary
- include_tasks: nginx.yml
- #when: nginx_enabled
+ - include_tasks: enable-or-disable.yml
-- name: Add 'internetarchive' variable values to {{ iiab_ini_file }}
- ini_file:
- path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
- section: internetarchive
- option: "{{ item.option }}"
- value: "{{ item.value | string }}"
- with_items:
- - option: name
- value: Internet Archive
- - option: description
- value: '"Take the Internet Archive experience and materials offline, in a decentralized way!"'
- - option: internetarchive_install
- value: "{{ internetarchive_install }}"
- - option: internetarchive_enabled
- value: "{{ internetarchive_enabled }}"
+ - name: Add 'internetarchive' variable values to {{ iiab_ini_file }}
+ ini_file:
+ path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
+ section: internetarchive
+ option: "{{ item.option }}"
+ value: "{{ item.value | string }}"
+ with_items:
+ - option: name
+ value: Internet Archive
+ - option: description
+ value: '"Take the Internet Archive experience and materials offline, in a decentralized way!"'
+ - option: internetarchive_install
+ value: "{{ internetarchive_install }}"
+ - option: internetarchive_enabled
+ value: "{{ internetarchive_enabled }}"
+
+ rescue:
+
+ - name: 'SEE ERROR ABOVE (skip_role_on_error: {{ skip_role_on_error }})'
+ fail:
+ msg: ""
+ when: not skip_role_on_error
diff --git a/roles/jupyterhub/README.md b/roles/jupyterhub/README.md
index be6fc5719..6a48cacf5 100644
--- a/roles/jupyterhub/README.md
+++ b/roles/jupyterhub/README.md
@@ -1,5 +1,7 @@
## JupyterHub programming environment with student Notebooks
+### CAUTION: Internet-in-a-Box (IIAB) does not support JupyterHub on 32-bit OS's, where installation will likely fail ([#3639](https://github.com/iiab/iiab/issues/3639)).
+
#### Secondary schools may want to consider JupyterHub to integrate coding with dynamic interactive graphing β A New Way to Think About Programming β allowing students to integrate science experiment results and program output within their own blog-like "Jupyter Notebooks."
* Jupyter Notebooks are widely used in the scientific community:
@@ -9,10 +11,11 @@
* [JupyterHub changelog](https://jupyterhub.readthedocs.io/en/stable/changelog.html#changelog)
* Students create their own accounts on first use β e.g. at http://box.lan/jupyterhub β just as if they're logging in regularly (unfortunately the login screen doesn't make that clear, but the teacher _does not_ need to be involved!)
* A student can then sign in with their username and password, to gain access to their files (Jupyter Notebooks).
- * The teacher should set and protect JupyterHub's overall `Admin` password, just in case. As with student accounts, the login screen doesn't make that clear β so just log in with username `Admin` β using any password that you want to become permanent.
-* Individual student folders are created in `/var/lib/private/` on the Internet-in-a-Box (IIAB) server:
+ * The teacher should set and protect JupyterHub's overall `Admin` password, just in case. As with student accounts, the login screen unfortunately doesn't make that clear β so just log in with username `Admin` β using any password that you want to become permanent.
+* Individual student folders are created in `/var/lib/private/` on your Internet-in-a-Box (IIAB) server:
* A student will only be able to see their own work β they do not have privileges outside of their own folder.
* Students may upload Jupyter Notebooks to the IIAB server, and download the current state of their work via a normal browser.
+ * Linux administrators can read more about JupyterHub's [Local Users](https://github.com/jupyterhub/systemdspawner#local-users) and [c.SystemdSpawner.dynamic_users = True](https://github.com/jupyterhub/systemdspawner#dynamic_users)
### Settings
@@ -26,10 +29,11 @@ In some rare circumstances, it may be necessary to restart JupyterHub's systemd
sudo systemctl restart jupyterhub
```
-FYI `/opt/iiab/jupyterhub` is a Python 3 virtual environment, that can be activated with the usual formula:
+FYI `/opt/iiab/jupyterhub` is a Python 3 virtual environment, that can be activated (and deactivated) with the usual:
```
source /opt/iiab/jupyterhub/bin/activate
+(jupyterhub) root@box:~# deactivate
```
Passwords are hashed using 4096 rounds of the latest Blowfish (bcrypt's $2b$ algorithm) and stored in:
@@ -42,19 +46,19 @@ Passwords are hashed using 4096 rounds of the latest Blowfish (bcrypt's $2b$ alg
Users can change their password by logging in, and then visiting URL: http://box.lan/jupyterhub/auth/change-password
-NOTE: This is the only way to change the password for user 'Admin', because Control Panel > Admin (below) does not permit deletion of this account.
+NOTE: This is the only way to change the password for user `Admin`, because **File > Hub Control Panel > Admin** (below) does not permit deletion of this account.
-### Control Panel > Admin page, to manage other accounts
+### File > Hub Control Panel > Admin, to manage accounts
The `Admin` user (and any users given `Admin` privilege) can reset user passwords by deleting the user from JupyterHub's **Admin** page (below). This logs the user out, but does not remove any of their data or home directories. The user can then set a new password in the usual way β simply by logging in. Example:
-1. As a user with `Admin` privilege, click **Control Panel** in the top right of your JupyterHub:
+1. As a user with `Admin` privilege, click **File > Hub Control Panel** in your JupyterHub:
- 
+ 
-2. In the Control Panel, open the **Admin** link in the top left:
+2. At the top of the Control Panel, click **Admin**:
- 
+ 
This opens up the JupyterHub Admin page, where you can add / delete users, start / stop peoplesβ servers and see who is online.
@@ -70,8 +74,22 @@ The `Admin` user (and any users given `Admin` privilege) can reset user password
_WARNING: If on login users see "500 : Internal Server Error", you may need to remove ALL files of the form_ `/run/jupyter-johndoe-singleuser`
+### Logging
+
+To see JupyterHub's (typically very long!) log, run:
+
+```
+journalctl -u jupyterhub
+```
+
+Sometimes other logs might also be available, e.g.:
+
+```
+journalctl -u jupyter-admin-singleuser
+```
+
### PAWS/Jupyter Notebooks for Python Beginners
While PAWS is a little bit off topic, if you have an interest in Wikipedia, please do see this 23m 42s video ["Intro to PAWS/Jupyter notebooks for Python beginners"](https://www.youtube.com/watch?v=AUZkioRI-aA&list=PLeoTcBlDanyNQXBqI1rVXUqUTSSiuSIXN&index=8) by Chico Venancio, from 2021-06-01.
-He explains PAWS as a "powerful Python execution environment http://paws.wmcloud.org [allowing] ordinary folks to write interactive scripts to work with Wikimedia content."
+He explains PAWS as a "powerful Python execution environment https://paws.wmcloud.org = https://wikitech.wikimedia.org/wiki/PAWS [allowing] ordinary folks to write interactive scripts to work with Wikimedia content."
diff --git a/roles/jupyterhub/tasks/install.yml b/roles/jupyterhub/tasks/install.yml
index 38f98b370..57a503014 100644
--- a/roles/jupyterhub/tasks/install.yml
+++ b/roles/jupyterhub/tasks/install.yml
@@ -13,10 +13,20 @@
when: nodejs_installed is undefined
-- name: "Install package: python3-venv"
- package:
- name: python3-venv
- state: present
+- name: Record (initial) disk space used
+ shell: df -B1 --output=used / | tail -1
+ register: df1
+
+# 2025-02-16
+#- name: "Install package: python3-psutil"
+# package:
+# name: python3-psutil
+# state: present
+
+- name: Remove previous virtual environment {{ jupyterhub_venv }}
+ file:
+ path: "{{ jupyterhub_venv }}"
+ state: absent
- name: Make 3 directories to hold JupyterHub config
file:
@@ -33,21 +43,35 @@
global: yes
state: latest
-- name: "pip install 7 packages into virtual environment: {{ jupyterhub_venv }} (~229 MB)"
+- name: "pip install 3 packages into virtual environment: {{ jupyterhub_venv }} (~316 MB total, after 2 Ansible calls)"
pip:
name:
- pip
- wheel
- - ipywidgets
- jupyterhub
+ virtualenv: "{{ jupyterhub_venv }}" # /opt/iiab/jupyterhub
+ #virtualenv_site_packages: no
+ virtualenv_command: python3 -m venv "{{ jupyterhub_venv }}" # 2025-02-16
+ #virtualenv_command: python3 -m venv --system-site-packages "{{ jupyterhub_venv }}" # 2021-07-29: This works on RasPiOS 10, Debian 11, Ubuntu 20.04 and Mint 20 -- however if you absolutely must use the older Debian 10 -- you can work around errors "can't find Rust compiler" and "This package requires Rust >=1.41.0" if you (1) revert this line to 'virtualenv_command: virtualenv' AND (2) uncomment the line just below
+ #virtualenv_python: python3 # 2021-07-29: Was needed when above line was 'virtualenv_command: virtualenv' (generally for Python 2)
+ extra_args: "--no-cache-dir --prefer-binary" # 2021-11-30, 2022-07-07: The "--pre" flag had earlier been needed, for beta-like pre-releases of JupyterHub 2.0.0
+
+# 2022-07-07: Attempting to "pip install" all 7 together (3 above + 4 below)
+# fails on OS's like 64-bit RasPiOS (but interestingly works on Ubuntu 22.04!)
+# https://github.com/iiab/iiab/issues/3283
+
+- name: Break up jupyterhub/jupyterlab pip installs into 2 parts (3 packages above + 4 packages here) due to mutual dependency deadlock on some OS's
+ pip:
+ name:
- jupyterlab
- jupyterhub_firstuseauthenticator
- jupyterhub-systemdspawner
- virtualenv: "{{ jupyterhub_venv }}" # /opt/iiab/jupyterhub
- virtualenv_site_packages: no
- virtualenv_command: python3 -m venv "{{ jupyterhub_venv }}" # 2021-07-29: This works on RaspiOS 10, Debian 11, Ubuntu 20.04 and Mint 20 -- however if you absolutely must use the older Debian 10 -- you can work around errors "can't find Rust compiler" and "This package requires Rust >=1.41.0" if you (1) revert this line to 'virtualenv_command: virtualenv' AND (2) uncomment the line just below
- #virtualenv_python: python3 # 2021-07-29: Was needed when above line was 'virtualenv_command: virtualenv' (generally for Python 2)
- extra_args: "--no-cache-dir --pre" # 2021-11-30: The "--pre" flag should likely be removed after JupyterHub 2.0.0 is released.
+ - ipywidgets
+ virtualenv: "{{ jupyterhub_venv }}"
+ #virtualenv_site_packages: no
+ virtualenv_command: python3 -m venv "{{ jupyterhub_venv }}" # 2025-02-16
+ #virtualenv_command: python3 -m venv --system-site-packages "{{ jupyterhub_venv }}"
+ extra_args: "--no-cache-dir --prefer-binary" # 2023-10-01: Lifesaver when recent wheels (e.g. piwheels.org) are inevitably not yet built! SEE #3560
- name: "Install from template: {{ jupyterhub_venv }}/etc/jupyterhub/jupyterhub_config.py"
template:
@@ -59,20 +83,21 @@
src: jupyterhub.service.j2
dest: /etc/systemd/system/jupyterhub.service
-- name: Install {{ jupyterhub_venv }}/bin/getsite.py from template, to fetch site_packages path, e.g. {{ jupyterhub_venv }}/lib/python{{ python_ver }}/site-packages
- template:
- src: getsite.py.j2
- dest: "{{ jupyterhub_venv }}/bin/getsite.py"
- mode: 0755
-
-- name: Install patch_FUA.sh from template -- to (1) fix async password-changing page, and (2) force usernames to lowercase -- patching $SITE_PACKAGES/firstuseauthenticator/firstuseauthenticator.py
- template:
- src: patch_FUA.sh.j2
- dest: "{{ jupyterhub_venv }}/bin/patch_FUA.sh"
- mode: 0755
-
-- name: "Run the above two, via: {{ jupyterhub_venv }}/bin/patch_FUA.sh"
- command: "{{ jupyterhub_venv }}/bin/patch_FUA.sh"
+# 2022-07-07: No longer needed, thx to upstream fixes
+# - name: Install {{ jupyterhub_venv }}/bin/getsite.py from template, to fetch site_packages path, e.g. {{ jupyterhub_venv }}/lib/python{{ python_version }}/site-packages
+# template:
+# src: getsite.py.j2
+# dest: "{{ jupyterhub_venv }}/bin/getsite.py"
+# mode: 0755
+#
+# - name: Install patch_FUA.sh from template -- to (1) fix async password-changing page, and (2) force usernames to lowercase -- patching $SITE_PACKAGES/firstuseauthenticator/firstuseauthenticator.py
+# template:
+# src: patch_FUA.sh.j2
+# dest: "{{ jupyterhub_venv }}/bin/patch_FUA.sh"
+# mode: 0755
+#
+# - name: "Run the above two, via: {{ jupyterhub_venv }}/bin/patch_FUA.sh"
+# command: "{{ jupyterhub_venv }}/bin/patch_FUA.sh"
- name: Install patch_http-warning.sh from template, to turn off the warning about http insecurity, in {{ jupyterhub_venv }}/share/jupyterhub/templates/login.html
template:
@@ -86,6 +111,17 @@
# RECORD JupyterHub AS INSTALLED
+- name: Record (final) disk space used
+ shell: df -B1 --output=used / | tail -1
+ register: df2
+
+- name: Add 'jupyterhub_disk_usage = {{ df2.stdout|int - df1.stdout|int }}' to {{ iiab_ini_file }}
+ ini_file:
+ path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
+ section: jupyterhub
+ option: jupyterhub_disk_usage
+ value: "{{ df2.stdout|int - df1.stdout|int }}"
+
- name: "Set 'jupyterhub_installed: True'"
set_fact:
jupyterhub_installed: True
diff --git a/roles/jupyterhub/tasks/main.yml b/roles/jupyterhub/tasks/main.yml
index 01acf8154..9f2d31d13 100644
--- a/roles/jupyterhub/tasks/main.yml
+++ b/roles/jupyterhub/tasks/main.yml
@@ -19,26 +19,33 @@
quiet: yes
-- name: Install Jupyter if jupyterhub_installed not defined, e.g. in {{ iiab_state_file }} # /etc/iiab/iiab_state.yml
- include_tasks: install.yml
- when: jupyterhub_installed is undefined
+- block:
+ - name: Install Jupyter if jupyterhub_installed not defined, e.g. in {{ iiab_state_file }} # /etc/iiab/iiab_state.yml
+ include_tasks: install.yml
+ when: jupyterhub_installed is undefined
-- include_tasks: enable-or-disable.yml
+ - include_tasks: enable-or-disable.yml
+ - name: Add 'jupyterhub' variable values to {{ iiab_ini_file }}
+ ini_file:
+ path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
+ section: jupyterhub
+ option: "{{ item.option }}"
+ value: "{{ item.value | string }}"
+ with_items:
+ - option: name
+ value: JupyterHub
+ - option: description
+ value: '"High Schools may want to consider JupyterHub to integrate coding with dynamic interactive graphing β A New Way to Think About Programming β allowing students to integrate science experiment results and program output within their notebook/document/blog."'
+ - option: jupyterhub_install
+ value: "{{ jupyterhub_install }}"
+ - option: jupyterhub_enabled
+ value: "{{ jupyterhub_enabled }}"
-- name: Add 'jupyterhub' variable values to {{ iiab_ini_file }}
- ini_file:
- path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
- section: jupyterhub
- option: "{{ item.option }}"
- value: "{{ item.value | string }}"
- with_items:
- - option: name
- value: JupyterHub
- - option: description
- value: '"High Schools may want to consider JupyterHub to integrate coding with dynamic interactive graphing β A New Way to Think About Programming β allowing students to integrate science experiment results and program output within their notebook/document/blog."'
- - option: jupyterhub_install
- value: "{{ jupyterhub_install }}"
- - option: jupyterhub_enabled
- value: "{{ jupyterhub_enabled }}"
+ rescue:
+
+ - name: 'SEE ERROR ABOVE (skip_role_on_error: {{ skip_role_on_error }})'
+ fail:
+ msg: ""
+ when: not skip_role_on_error
diff --git a/roles/jupyterhub/templates/getsite.py.j2 b/roles/jupyterhub/templates/getsite.py.j2.unused
similarity index 100%
rename from roles/jupyterhub/templates/getsite.py.j2
rename to roles/jupyterhub/templates/getsite.py.j2.unused
diff --git a/roles/jupyterhub/templates/jupyterhub_config.py.j2 b/roles/jupyterhub/templates/jupyterhub_config.py.j2
index a3c5b077f..5abc7deb5 100644
--- a/roles/jupyterhub/templates/jupyterhub_config.py.j2
+++ b/roles/jupyterhub/templates/jupyterhub_config.py.j2
@@ -1,5 +1,18 @@
+# 2023-02-10 /opt/iiab/jupyterhub/etc/jupyterhub/jupyterhub_config.py update:
+# https://jupyterhub.readthedocs.io/en/stable/getting-started/config-basics.html
+#
+# 1) To generate this 1500+ line stub, I first ran JupyterHub 3.1.1's:
+# /opt/iiab/jupyterhub/bin/jupyterhub --generate-config
+# 2) Then I manually inserted 8 of IIAB's 10 legacy custom lines below, from:
+# grep ^c /opt/iiab/iiab/roles/jupyterhub/templates/jupyterhub_config.py.j2
+# 3) Finally I added the following line on @jvonau's suggestion: (#3475)
+# c.ConfigurableHTTPProxy.pid_file = "/run/jupyterhub-proxy.pid"
+
+
# Configuration file for jupyterhub.
+c = get_config() #noqa
+
#------------------------------------------------------------------------------
# Application(SingletonConfigurable) configuration
#------------------------------------------------------------------------------
@@ -18,6 +31,53 @@
# Default: 30
# c.Application.log_level = 30
+## Configure additional log handlers.
+#
+# The default stderr logs handler is configured by the log_level, log_datefmt
+# and log_format settings.
+#
+# This configuration can be used to configure additional handlers (e.g. to
+# output the log to a file) or for finer control over the default handlers.
+#
+# If provided this should be a logging configuration dictionary, for more
+# information see:
+# https://docs.python.org/3/library/logging.config.html#logging-config-
+# dictschema
+#
+# This dictionary is merged with the base logging configuration which defines
+# the following:
+#
+# * A logging formatter intended for interactive use called
+# ``console``.
+# * A logging handler that writes to stderr called
+# ``console`` which uses the formatter ``console``.
+# * A logger with the name of this application set to ``DEBUG``
+# level.
+#
+# This example adds a new handler that writes to a file:
+#
+# .. code-block:: python
+#
+# c.Application.logging_config = {
+# 'handlers': {
+# 'file': {
+# 'class': 'logging.FileHandler',
+# 'level': 'DEBUG',
+# 'filename': '',
+# }
+# },
+# 'loggers': {
+# '': {
+# 'level': 'DEBUG',
+# # NOTE: if you don't list the default "console"
+# # handler here then it will be disabled
+# 'handlers': ['console', 'file'],
+# },
+# }
+# }
+# Default: {}
+# c.Application.logging_config = {}
+
## Instead of starting the Application, dump configuration to stdout
# Default: False
# c.Application.show_config = False
@@ -60,11 +120,13 @@
# Default: 30
# c.JupyterHub.activity_resolution = 30
-## Grant admin users permission to access single-user servers.
+## DEPRECATED since version 2.0.0.
#
-# Users should be properly informed if this is enabled.
+# The default admin role has full permissions, use custom RBAC scopes instead to
+# create restricted administrator roles.
+# https://jupyterhub.readthedocs.io/en/stable/rbac/index.html
# Default: False
-c.JupyterHub.admin_access = True
+# c.JupyterHub.admin_access = False
## DEPRECATED since version 0.7.2, use Authenticator.admin_users instead.
# Default: set()
@@ -78,14 +140,23 @@ c.JupyterHub.admin_access = True
# Default: False
# c.JupyterHub.answer_yes = False
+## The default amount of records returned by a paginated endpoint
+# Default: 50
+# c.JupyterHub.api_page_default_limit = 50
+
+## The maximum amount of records that can be returned at once
+# Default: 200
+# c.JupyterHub.api_page_max_limit = 200
+
## PENDING DEPRECATION: consider using services
#
-# Dict of token:username to be loaded into the database.
+# Dict of token:username to be loaded into the database.
#
-# Allows ahead-of-time generation of API tokens for use by externally managed
-# services, which authenticate as JupyterHub users.
+# Allows ahead-of-time generation of API tokens for use by externally managed services,
+# which authenticate as JupyterHub users.
#
-# Consider using services for general services that talk to the JupyterHub API.
+# Consider using services for general services that talk to the
+# JupyterHub API.
# Default: {}
# c.JupyterHub.api_tokens = {}
@@ -112,49 +183,53 @@ c.JupyterHub.admin_access = True
# Currently installed:
# - default: jupyterhub.auth.PAMAuthenticator
# - dummy: jupyterhub.auth.DummyAuthenticator
+# - null: jupyterhub.auth.NullAuthenticator
# - pam: jupyterhub.auth.PAMAuthenticator
# Default: 'jupyterhub.auth.PAMAuthenticator'
+# c.JupyterHub.authenticator_class = 'jupyterhub.auth.PAMAuthenticator'
c.JupyterHub.authenticator_class = 'firstuseauthenticator.FirstUseAuthenticator'
## The base URL of the entire application.
#
-# Add this to the beginning of all JupyterHub URLs. Use base_url to run
-# JupyterHub within an existing website.
+# Add this to the beginning of all JupyterHub URLs.
+# Use base_url to run JupyterHub within an existing website.
#
-# .. deprecated: 0.9
-# Use JupyterHub.bind_url
+# .. deprecated: 0.9
+# Use JupyterHub.bind_url
# Default: '/'
+# c.JupyterHub.base_url = '/'
c.JupyterHub.base_url = '/jupyterhub'
## The public facing URL of the whole JupyterHub application.
#
-# This is the address on which the proxy will bind. Sets protocol, ip, base_url
+# This is the address on which the proxy will bind.
+# Sets protocol, ip, base_url
# Default: 'http://:8000'
# c.JupyterHub.bind_url = 'http://:8000'
## Whether to shutdown the proxy when the Hub shuts down.
#
-# Disable if you want to be able to teardown the Hub while leaving the proxy
-# running.
+# Disable if you want to be able to teardown the Hub while leaving the
+# proxy running.
#
-# Only valid if the proxy was starting by the Hub process.
+# Only valid if the proxy was starting by the Hub process.
#
-# If both this and cleanup_servers are False, sending SIGINT to the Hub will
-# only shutdown the Hub, leaving everything else running.
+# If both this and cleanup_servers are False, sending SIGINT to the Hub will
+# only shutdown the Hub, leaving everything else running.
#
-# The Hub should be able to resume from database state.
+# The Hub should be able to resume from database state.
# Default: True
# c.JupyterHub.cleanup_proxy = True
## Whether to shutdown single-user servers when the Hub shuts down.
#
-# Disable if you want to be able to teardown the Hub while leaving the single-
-# user servers running.
+# Disable if you want to be able to teardown the Hub while leaving the
+# single-user servers running.
#
-# If both this and cleanup_proxy are False, sending SIGINT to the Hub will only
-# shutdown the Hub, leaving everything else running.
+# If both this and cleanup_proxy are False, sending SIGINT to the Hub will
+# only shutdown the Hub, leaving everything else running.
#
-# The Hub should be able to resume from database state.
+# The Hub should be able to resume from database state.
# Default: True
# c.JupyterHub.cleanup_servers = True
@@ -184,33 +259,54 @@ c.JupyterHub.base_url = '/jupyterhub'
# Default: False
# c.JupyterHub.confirm_no_ssl = False
-## Number of days for a login cookie to be valid. Default is two weeks.
+## Number of days for a login cookie to be valid.
+# Default is two weeks.
# Default: 14
# c.JupyterHub.cookie_max_age_days = 14
## The cookie secret to use to encrypt cookies.
#
-# Loaded from the JPY_COOKIE_SECRET env variable by default.
+# Loaded from the JPY_COOKIE_SECRET env variable by default.
#
-# Should be exactly 256 bits (32 bytes).
-# Default: b''
+# Should be exactly 256 bits (32 bytes).
+# Default: traitlets.Undefined
+# c.JupyterHub.cookie_secret = traitlets.Undefined
c.JupyterHub.cookie_secret = b'helloiiabitsrainingb123456789012'
## File in which to store the cookie secret.
# Default: 'jupyterhub_cookie_secret'
# c.JupyterHub.cookie_secret_file = 'jupyterhub_cookie_secret'
-## The location of jupyterhub data files (e.g. /usr/local/share/jupyterhub)
-# Default: '/opt/iiab/jupyter/share/jupyterhub'
-# c.JupyterHub.data_files_path = '/opt/iiab/jupyter/share/jupyterhub'
+## Custom scopes to define.
+#
+# For use when defining custom roles,
+# to grant users granular permissions
+#
+# All custom scopes must have a description,
+# and must start with the prefix `custom:`.
+#
+# For example::
+#
+# custom_scopes = {
+# "custom:jupyter_server:read": {
+# "description": "read-only access to a single-user server",
+# },
+# }
+# Default: {}
+# c.JupyterHub.custom_scopes = {}
-## Include any kwargs to pass to the database connection. See
-# sqlalchemy.create_engine for details.
+## The location of jupyterhub data files (e.g. /usr/local/share/jupyterhub)
+# Default: '/opt/iiab/jupyterhub/share/jupyterhub'
+# c.JupyterHub.data_files_path = '/opt/iiab/jupyterhub/share/jupyterhub'
+
+## Include any kwargs to pass to the database connection.
+# See sqlalchemy.create_engine for details.
# Default: {}
# c.JupyterHub.db_kwargs = {}
## url for the database. e.g. `sqlite:///jupyterhub.sqlite`
# Default: 'sqlite:///jupyterhub.sqlite'
+# c.JupyterHub.db_url = 'sqlite:///jupyterhub.sqlite'
c.JupyterHub.db_url = 'sqlite:///{{ jupyterhub_venv }}/jupyterhub.sqlite'
## log all database transactions. This has A LOT of output
@@ -221,8 +317,13 @@ c.JupyterHub.db_url = 'sqlite:///{{ jupyterhub_venv }}/jupyterhub.sqlite'
# Default: False
# c.JupyterHub.debug_proxy = False
-## If named servers are enabled, default name of server to spawn or open, e.g. by
-# user-redirect.
+## If named servers are enabled, default name of server to spawn or open when no
+# server is specified, e.g. by user-redirect.
+#
+# Note: This has no effect if named servers are not enabled, and does _not_
+# change the existence or behavior of the default server named `''` (the empty
+# string). This only affects which named server is launched when no server is
+# specified, e.g. by links to `/hub/user-redirect/lab/tree/mynotebook.ipynb`.
# Default: ''
# c.JupyterHub.default_server_name = ''
@@ -245,30 +346,28 @@ c.JupyterHub.db_url = 'sqlite:///{{ jupyterhub_venv }}/jupyterhub.sqlite'
# Default: traitlets.Undefined
# c.JupyterHub.default_url = traitlets.Undefined
-## Dict authority:dict(files). Specify the key, cert, and/or ca file for an
-# authority. This is useful for externally managed proxies that wish to use
-# internal_ssl.
+## Dict authority:dict(files). Specify the key, cert, and/or
+# ca file for an authority. This is useful for externally managed
+# proxies that wish to use internal_ssl.
#
-# The files dict has this format (you must specify at least a cert)::
+# The files dict has this format (you must specify at least a cert)::
#
-# {
-# 'key': '/path/to/key.key',
-# 'cert': '/path/to/cert.crt',
-# 'ca': '/path/to/ca.crt'
-# }
+# {
+# 'key': '/path/to/key.key',
+# 'cert': '/path/to/cert.crt',
+# 'ca': '/path/to/ca.crt'
+# }
#
-# The authorities you can override: 'hub-ca', 'notebooks-ca', 'proxy-api-ca',
-# 'proxy-client-ca', and 'services-ca'.
+# The authorities you can override: 'hub-ca', 'notebooks-ca',
+# 'proxy-api-ca', 'proxy-client-ca', and 'services-ca'.
#
-# Use with internal_ssl
+# Use with internal_ssl
# Default: {}
# c.JupyterHub.external_ssl_authorities = {}
-## Register extra tornado Handlers for jupyterhub.
+## DEPRECATED.
#
-# Should be of the form ``("", Handler)``
-#
-# The Hub prefix will be added, so `/my-page` will be served at `/hub/my-page`.
+# If you need to register additional HTTP endpoints please use services instead.
# Default: []
# c.JupyterHub.extra_handlers = []
@@ -282,6 +381,14 @@ c.JupyterHub.db_url = 'sqlite:///{{ jupyterhub_venv }}/jupyterhub.sqlite'
# Default: []
# c.JupyterHub.extra_log_handlers = []
+## Alternate header to use as the Host (e.g., X-Forwarded-Host)
+# when determining whether a request is cross-origin
+#
+# This may be useful when JupyterHub is running behind a proxy that rewrites
+# the Host header.
+# Default: ''
+# c.JupyterHub.forwarded_host_header = ''
+
## Generate certs used for internal ssl
# Default: False
# c.JupyterHub.generate_certs = False
@@ -303,19 +410,19 @@ c.JupyterHub.db_url = 'sqlite:///{{ jupyterhub_venv }}/jupyterhub.sqlite'
# Default: ''
# c.JupyterHub.hub_bind_url = ''
-## The ip or hostname for proxies and spawners to use for connecting to the Hub.
+## The ip or hostname for proxies and spawners to use
+# for connecting to the Hub.
#
-# Use when the bind address (`hub_ip`) is 0.0.0.0, :: or otherwise different
-# from the connect address.
+# Use when the bind address (`hub_ip`) is 0.0.0.0, :: or otherwise different
+# from the connect address.
#
-# Default: when `hub_ip` is 0.0.0.0 or ::, use `socket.gethostname()`, otherwise
-# use `hub_ip`.
+# Default: when `hub_ip` is 0.0.0.0 or ::, use `socket.gethostname()`,
+# otherwise use `hub_ip`.
#
-# Note: Some spawners or proxy implementations might not support hostnames.
-# Check your spawner or proxy documentation to see if they have extra
-# requirements.
+# Note: Some spawners or proxy implementations might not support hostnames. Check your
+# spawner or proxy documentation to see if they have extra requirements.
#
-# .. versionadded:: 0.8
+# .. versionadded:: 0.8
# Default: ''
# c.JupyterHub.hub_connect_ip = ''
@@ -346,39 +453,59 @@ c.JupyterHub.db_url = 'sqlite:///{{ jupyterhub_venv }}/jupyterhub.sqlite'
## The ip address for the Hub process to *bind* to.
#
-# By default, the hub listens on localhost only. This address must be accessible
-# from the proxy and user servers. You may need to set this to a public ip or ''
-# for all interfaces if the proxy or user servers are in containers or on a
-# different host.
+# By default, the hub listens on localhost only. This address must be accessible from
+# the proxy and user servers. You may need to set this to a public ip or '' for all
+# interfaces if the proxy or user servers are in containers or on a different host.
#
-# See `hub_connect_ip` for cases where the bind and connect address should
-# differ, or `hub_bind_url` for setting the full bind URL.
+# See `hub_connect_ip` for cases where the bind and connect address should differ,
+# or `hub_bind_url` for setting the full bind URL.
# Default: '127.0.0.1'
# c.JupyterHub.hub_ip = '127.0.0.1'
## The internal port for the Hub process.
#
-# This is the internal port of the hub itself. It should never be accessed
-# directly. See JupyterHub.port for the public port to use when accessing
-# jupyterhub. It is rare that this port should be set except in cases of port
-# conflict.
+# This is the internal port of the hub itself. It should never be accessed directly.
+# See JupyterHub.port for the public port to use when accessing jupyterhub.
+# It is rare that this port should be set except in cases of port conflict.
#
-# See also `hub_ip` for the ip and `hub_bind_url` for setting the full bind URL.
+# See also `hub_ip` for the ip and `hub_bind_url` for setting the full
+# bind URL.
# Default: 8081
# c.JupyterHub.hub_port = 8081
+## The routing prefix for the Hub itself.
+#
+# Override to send only a subset of traffic to the Hub. Default is to use the
+# Hub as the default route for all requests.
+#
+# This is necessary for normal jupyterhub operation, as the Hub must receive
+# requests for e.g. `/user/:name` when the user's server is not running.
+#
+# However, some deployments using only the JupyterHub API may want to handle
+# these events themselves, in which case they can register their own default
+# target with the proxy and set e.g. `hub_routespec = /hub/` to serve only the
+# hub's own pages, or even `/hub/api/` for api-only operation.
+#
+# Note: hub_routespec must include the base_url, if any.
+#
+# .. versionadded:: 1.4
+# Default: '/'
+# c.JupyterHub.hub_routespec = '/'
+
## Trigger implicit spawns after this many seconds.
#
-# When a user visits a URL for a server that's not running, they are shown a
-# page indicating that the requested server is not running with a button to
-# spawn the server.
+# When a user visits a URL for a server that's not running,
+# they are shown a page indicating that the requested server
+# is not running with a button to spawn the server.
#
-# Setting this to a positive value will redirect the user after this many
-# seconds, effectively clicking this button automatically for the users,
-# automatically beginning the spawn process.
+# Setting this to a positive value will redirect the user
+# after this many seconds, effectively clicking this button
+# automatically for the users,
+# automatically beginning the spawn process.
#
-# Warning: this can result in errors and surprising behavior when sharing access
-# URLs to actual servers, since the wrong server is likely to be started.
+# Warning: this can result in errors and surprising behavior
+# when sharing access URLs to actual servers,
+# since the wrong server is likely to be started.
# Default: 0
# c.JupyterHub.implicit_spawn_seconds = 0
@@ -398,29 +525,30 @@ c.JupyterHub.db_url = 'sqlite:///{{ jupyterhub_venv }}/jupyterhub.sqlite'
# Default: 10
# c.JupyterHub.init_spawners_timeout = 10
-## The location to store certificates automatically created by JupyterHub.
+## The location to store certificates automatically created by
+# JupyterHub.
#
-# Use with internal_ssl
+# Use with internal_ssl
# Default: 'internal-ssl'
# c.JupyterHub.internal_certs_location = 'internal-ssl'
## Enable SSL for all internal communication
#
-# This enables end-to-end encryption between all JupyterHub components.
-# JupyterHub will automatically create the necessary certificate authority and
-# sign notebook certificates as they're created.
+# This enables end-to-end encryption between all JupyterHub components.
+# JupyterHub will automatically create the necessary certificate
+# authority and sign notebook certificates as they're created.
# Default: False
# c.JupyterHub.internal_ssl = False
-## The public facing ip of the whole JupyterHub application (specifically
-# referred to as the proxy).
+## The public facing ip of the whole JupyterHub application
+# (specifically referred to as the proxy).
#
-# This is the address on which the proxy will listen. The default is to listen
-# on all interfaces. This is the only address through which JupyterHub should be
-# accessed by users.
+# This is the address on which the proxy will listen. The default is to
+# listen on all interfaces. This is the only address through which JupyterHub
+# should be accessed by users.
#
-# .. deprecated: 0.9
-# Use JupyterHub.bind_url
+# .. deprecated: 0.9
+# Use JupyterHub.bind_url
# Default: ''
# c.JupyterHub.ip = ''
@@ -434,14 +562,36 @@ c.JupyterHub.db_url = 'sqlite:///{{ jupyterhub_venv }}/jupyterhub.sqlite'
## Dict of 'group': ['usernames'] to load at startup.
#
-# This strictly *adds* groups and users to groups.
+# This strictly *adds* groups and users to groups.
#
-# Loading one set of groups, then starting JupyterHub again with a different set
-# will not remove users or groups from previous launches. That must be done
-# through the API.
+# Loading one set of groups, then starting JupyterHub again with a different
+# set will not remove users or groups from previous launches.
+# That must be done through the API.
# Default: {}
# c.JupyterHub.load_groups = {}
+## List of predefined role dictionaries to load at startup.
+#
+# For instance::
+#
+# load_roles = [
+# {
+# 'name': 'teacher',
+# 'description': 'Access to users' information and group membership',
+# 'scopes': ['users', 'groups'],
+# 'users': ['cyclops', 'gandalf'],
+# 'services': [],
+# 'groups': []
+# }
+# ]
+#
+# All keys apart from 'name' are optional.
+# See all the available scopes in the JupyterHub REST API documentation.
+#
+# Default roles are defined in roles.py.
+# Default: []
+# c.JupyterHub.load_roles = []
+
## The date format used by logging formatters for %(asctime)s
# See also: Application.log_datefmt
# c.JupyterHub.log_datefmt = '%Y-%m-%d %H:%M:%S'
@@ -454,6 +604,10 @@ c.JupyterHub.db_url = 'sqlite:///{{ jupyterhub_venv }}/jupyterhub.sqlite'
# See also: Application.log_level
# c.JupyterHub.log_level = 30
+##
+# See also: Application.logging_config
+# c.JupyterHub.logging_config = {}
+
## Specify path to a logo image to override the Jupyter logo in the banner.
# Default: ''
# c.JupyterHub.logo_file = ''
@@ -464,20 +618,62 @@ c.JupyterHub.db_url = 'sqlite:///{{ jupyterhub_venv }}/jupyterhub.sqlite'
# Setting this can limit the total resources a user can consume.
#
# If set to 0, no limit is enforced.
+#
+# Can be an integer or a callable/awaitable based on the handler object:
+#
+# ::
+#
+# def named_server_limit_per_user_fn(handler):
+# user = handler.current_user
+# if user and user.admin:
+# return 0
+# return 5
+#
+# c.JupyterHub.named_server_limit_per_user = named_server_limit_per_user_fn
# Default: 0
# c.JupyterHub.named_server_limit_per_user = 0
-## File to write PID Useful for daemonizing JupyterHub.
+## Expiry (in seconds) of OAuth access tokens.
+#
+# The default is to expire when the cookie storing them expires,
+# according to `cookie_max_age_days` config.
+#
+# These are the tokens stored in cookies when you visit
+# a single-user server or service.
+# When they expire, you must re-authenticate with the Hub,
+# even if your Hub authentication is still valid.
+# If your Hub authentication is valid,
+# logging in may be a transparent redirect as you refresh the page.
+#
+# This does not affect JupyterHub API tokens in general,
+# which do not expire by default.
+# Only tokens issued during the oauth flow
+# accessing services and single-user servers are affected.
+#
+# .. versionadded:: 1.4
+# OAuth token expires_in was not previously configurable.
+# .. versionchanged:: 1.4
+# Default now uses cookie_max_age_days so that oauth tokens
+# which are generally stored in cookies,
+# expire when the cookies storing them expire.
+# Previously, it was one hour.
+# Default: 0
+# c.JupyterHub.oauth_token_expires_in = 0
+
+## File to write PID
+# Useful for daemonizing JupyterHub.
# Default: ''
# c.JupyterHub.pid_file = ''
+c.ConfigurableHTTPProxy.pid_file = "/run/jupyterhub-proxy.pid"
## The public facing port of the proxy.
#
-# This is the port on which the proxy will listen. This is the only port through
-# which JupyterHub should be accessed by users.
+# This is the port on which the proxy will listen.
+# This is the only port through which JupyterHub
+# should be accessed by users.
#
-# .. deprecated: 0.9
-# Use JupyterHub.bind_url
+# .. deprecated: 0.9
+# Use JupyterHub.bind_url
# Default: 8000
# c.JupyterHub.port = 8000
@@ -493,9 +689,9 @@ c.JupyterHub.db_url = 'sqlite:///{{ jupyterhub_venv }}/jupyterhub.sqlite'
# Default: ''
# c.JupyterHub.proxy_auth_token = ''
-## Interval (in seconds) at which to check if the proxy is running.
-# Default: 30
-# c.JupyterHub.proxy_check_interval = 30
+## DEPRECATED since version 0.8: Use ConfigurableHTTPProxy.check_running_interval
+# Default: 5
+# c.JupyterHub.proxy_check_interval = 5
## The class to use for configuring the JupyterHub proxy.
#
@@ -517,9 +713,9 @@ c.JupyterHub.db_url = 'sqlite:///{{ jupyterhub_venv }}/jupyterhub.sqlite'
## Recreate all certificates used within JupyterHub on restart.
#
-# Note: enabling this feature requires restarting all notebook servers.
+# Note: enabling this feature requires restarting all notebook servers.
#
-# Use with internal_ssl
+# Use with internal_ssl
# Default: False
# c.JupyterHub.recreate_internal_certs = False
@@ -538,29 +734,29 @@ c.JupyterHub.db_url = 'sqlite:///{{ jupyterhub_venv }}/jupyterhub.sqlite'
## Dict of token:servicename to be loaded into the database.
#
-# Allows ahead-of-time generation of API tokens for use by externally managed
-# services.
+# Allows ahead-of-time generation of API tokens for use by externally
+# managed services.
# Default: {}
# c.JupyterHub.service_tokens = {}
## List of service specification dictionaries.
#
-# A service
+# A service
#
-# For instance::
+# For instance::
#
-# services = [
-# {
-# 'name': 'cull_idle',
-# 'command': ['/path/to/cull_idle_servers.py'],
-# },
-# {
-# 'name': 'formgrader',
-# 'url': 'http://127.0.0.1:1234',
-# 'api_token': 'super-secret',
-# 'environment':
-# }
-# ]
+# services = [
+# {
+# 'name': 'cull_idle',
+# 'command': ['/path/to/cull_idle_servers.py'],
+# },
+# {
+# 'name': 'formgrader',
+# 'url': 'http://127.0.0.1:1234',
+# 'api_token': 'super-secret',
+# 'environment':
+# }
+# ]
# Default: []
# c.JupyterHub.services = []
@@ -585,21 +781,25 @@ c.JupyterHub.db_url = 'sqlite:///{{ jupyterhub_venv }}/jupyterhub.sqlite'
# e.g. `c.JupyterHub.spawner_class = 'localprocess'`
#
# Currently installed:
+# - systemd: systemdspawner.SystemdSpawner
+# - systemdspawner: systemdspawner.SystemdSpawner
# - default: jupyterhub.spawner.LocalProcessSpawner
# - localprocess: jupyterhub.spawner.LocalProcessSpawner
# - simple: jupyterhub.spawner.SimpleLocalProcessSpawner
# Default: 'jupyterhub.spawner.LocalProcessSpawner'
+# c.JupyterHub.spawner_class = 'jupyterhub.spawner.LocalProcessSpawner'
c.JupyterHub.spawner_class = 'systemdspawner.SystemdSpawner'
+c.SystemdSpawner.dynamic_users = True
## Path to SSL certificate file for the public facing interface of the proxy
#
-# When setting this, you should also set ssl_key
+# When setting this, you should also set ssl_key
# Default: ''
# c.JupyterHub.ssl_cert = ''
## Path to SSL key file for the public facing interface of the proxy
#
-# When setting this, you should also set ssl_cert
+# When setting this, you should also set ssl_cert
# Default: ''
# c.JupyterHub.ssl_key = ''
@@ -618,17 +818,18 @@ c.JupyterHub.spawner_class = 'systemdspawner.SystemdSpawner'
## Run single-user servers on subdomains of this host.
#
-# This should be the full `https://hub.domain.tld[:port]`.
+# This should be the full `https://hub.domain.tld[:port]`.
#
-# Provides additional cross-site protections for javascript served by single-
-# user servers.
+# Provides additional cross-site protections for javascript served by
+# single-user servers.
#
-# Requires `.hub.domain.tld` to resolve to the same host as
+# Requires `.hub.domain.tld` to resolve to the same host as
# `hub.domain.tld`.
#
-# In general, this is most easily achieved with wildcard DNS.
+# In general, this is most easily achieved with wildcard DNS.
#
-# When using SSL (i.e. always) this also requires a wildcard SSL certificate.
+# When using SSL (i.e. always) this also requires a wildcard SSL
+# certificate.
# Default: ''
# c.JupyterHub.subdomain_host = ''
@@ -644,54 +845,69 @@ c.JupyterHub.spawner_class = 'systemdspawner.SystemdSpawner'
# Default: {}
# c.JupyterHub.tornado_settings = {}
-## Trust user-provided tokens (via JupyterHub.service_tokens) to have good
-# entropy.
+## Trust user-provided tokens (via JupyterHub.service_tokens)
+# to have good entropy.
#
-# If you are not inserting additional tokens via configuration file, this flag
-# has no effect.
+# If you are not inserting additional tokens via configuration file,
+# this flag has no effect.
#
-# In JupyterHub 0.8, internally generated tokens do not pass through additional
-# hashing because the hashing is costly and does not increase the entropy of
-# already-good UUIDs.
+# In JupyterHub 0.8, internally generated tokens do not
+# pass through additional hashing because the hashing is costly
+# and does not increase the entropy of already-good UUIDs.
#
-# User-provided tokens, on the other hand, are not trusted to have good entropy
-# by default, and are passed through many rounds of hashing to stretch the
-# entropy of the key (i.e. user-provided tokens are treated as passwords instead
-# of random keys). These keys are more costly to check.
+# User-provided tokens, on the other hand, are not trusted to have good entropy by default,
+# and are passed through many rounds of hashing to stretch the entropy of the key
+# (i.e. user-provided tokens are treated as passwords instead of random keys).
+# These keys are more costly to check.
#
-# If your inserted tokens are generated by a good-quality mechanism, e.g.
-# `openssl rand -hex 32`, then you can set this flag to True to reduce the cost
-# of checking authentication tokens.
+# If your inserted tokens are generated by a good-quality mechanism,
+# e.g. `openssl rand -hex 32`, then you can set this flag to True
+# to reduce the cost of checking authentication tokens.
# Default: False
# c.JupyterHub.trust_user_provided_tokens = False
## Names to include in the subject alternative name.
#
-# These names will be used for server name verification. This is useful if
-# JupyterHub is being run behind a reverse proxy or services using ssl are on
-# different hosts.
+# These names will be used for server name verification. This is useful
+# if JupyterHub is being run behind a reverse proxy or services using ssl
+# are on different hosts.
#
-# Use with internal_ssl
+# Use with internal_ssl
# Default: []
# c.JupyterHub.trusted_alt_names = []
## Downstream proxy IP addresses to trust.
#
-# This sets the list of IP addresses that are trusted and skipped when
-# processing the `X-Forwarded-For` header. For example, if an external proxy is
-# used for TLS termination, its IP address should be added to this list to
-# ensure the correct client IP addresses are recorded in the logs instead of the
-# proxy server's IP address.
+# This sets the list of IP addresses that are trusted and skipped when processing
+# the `X-Forwarded-For` header. For example, if an external proxy is used for TLS
+# termination, its IP address should be added to this list to ensure the correct
+# client IP addresses are recorded in the logs instead of the proxy server's IP
+# address.
# Default: []
# c.JupyterHub.trusted_downstream_ips = []
## Upgrade the database automatically on start.
#
-# Only safe if database is regularly backed up. Only SQLite databases will be
-# backed up to a local file automatically.
+# Only safe if database is regularly backed up.
+# Only SQLite databases will be backed up to a local file automatically.
# Default: False
# c.JupyterHub.upgrade_db = False
+## Return 503 rather than 424 when request comes in for a non-running server.
+#
+# Prior to JupyterHub 2.0, we returned a 503 when any request came in for a user
+# server that was currently not running. By default, JupyterHub 2.0 will return
+# a 424 - this makes operational metric dashboards more useful.
+#
+# JupyterLab < 3.2 expected the 503 to know if the user server is no longer
+# running, and prompted the user to start their server. Set this config to true
+# to retain the old behavior, so JupyterLab < 3.2 can continue to show the
+# appropriate UI when the user server is stopped.
+#
+# This option will be removed in a future release.
+# Default: False
+# c.JupyterHub.use_legacy_stopped_server_status_code = False
+
## Callable to affect behavior of /user-redirect/
#
# Receives 4 parameters: 1. path - URL path that was provided after /user-
@@ -709,13 +925,17 @@ c.JupyterHub.spawner_class = 'systemdspawner.SystemdSpawner'
#------------------------------------------------------------------------------
## Base class for spawning single-user notebook servers.
#
-# Subclass this, and override the following methods:
+# Subclass this, and override the following methods:
#
-# - load_state - get_state - start - stop - poll
+# - load_state
+# - get_state
+# - start
+# - stop
+# - poll
#
-# As JupyterHub supports multiple users, an instance of the Spawner subclass is
-# created for each user. If there are 20 JupyterHub users, there will be 20
-# instances of the subclass.
+# As JupyterHub supports multiple users, an instance of the Spawner subclass
+# is created for each user. If there are 20 JupyterHub users, there will be 20
+# instances of the subclass.
## Extra arguments to be passed to the single-user server.
#
@@ -862,12 +1082,32 @@ c.JupyterHub.spawner_class = 'systemdspawner.SystemdSpawner'
# Default: 30
# c.Spawner.http_timeout = 30
+## The URL the single-user server should connect to the Hub.
+#
+# If the Hub URL set in your JupyterHub config is not reachable from spawned
+# notebooks, you can set differnt URL by this config.
+#
+# Is None if you don't need to change the URL.
+# Default: None
+# c.Spawner.hub_connect_url = None
+
## The IP address (or hostname) the single-user server should listen on.
#
+# Usually either '127.0.0.1' (default) or '0.0.0.0'.
+#
# The JupyterHub proxy implementation should be able to send packets to this
# interface.
-# Default: ''
-# c.Spawner.ip = ''
+#
+# Subclasses which launch remotely or in containers should override the default
+# to '0.0.0.0'.
+#
+# .. versionchanged:: 2.0
+# Default changed to '127.0.0.1', from ''.
+# In most cases, this does not result in a change in behavior,
+# as '' was interpreted as 'unspecified',
+# which used the subprocesses' own default, itself usually '127.0.0.1'.
+# Default: '127.0.0.1'
+# c.Spawner.ip = '127.0.0.1'
## Minimum number of bytes a single-user notebook server is guaranteed to have
# available.
@@ -918,6 +1158,35 @@ c.JupyterHub.spawner_class = 'systemdspawner.SystemdSpawner'
# Default: ''
# c.Spawner.notebook_dir = ''
+## Allowed scopes for oauth tokens issued by this server's oauth client.
+#
+# This sets the maximum and default scopes
+# assigned to oauth tokens issued by a single-user server's
+# oauth client (i.e. tokens stored in browsers after authenticating with the server),
+# defining what actions the server can take on behalf of logged-in users.
+#
+# Default is an empty list, meaning minimal permissions to identify users,
+# no actions can be taken on their behalf.
+#
+# If callable, will be called with the Spawner as a single argument.
+# Callables may be async.
+# Default: traitlets.Undefined
+# c.Spawner.oauth_client_allowed_scopes = traitlets.Undefined
+
+## Allowed roles for oauth tokens.
+#
+# Deprecated in 3.0: use oauth_client_allowed_scopes
+#
+# This sets the maximum and default roles
+# assigned to oauth tokens issued by a single-user server's
+# oauth client (i.e. tokens stored in browsers after authenticating with the server),
+# defining what actions the server can take on behalf of logged-in users.
+#
+# Default is an empty list, meaning minimal permissions to identify users,
+# no actions can be taken on their behalf.
+# Default: traitlets.Undefined
+# c.Spawner.oauth_roles = traitlets.Undefined
+
## An HTML form for options a user can specify on launching their server.
#
# The surrounding `