1
0
Fork 0
mirror of https://github.com/iiab/iiab.git synced 2025-03-09 15:40:17 +00:00

Compare commits

..

No commits in common. "master" and "7.2-preview-3" have entirely different histories.

453 changed files with 4868 additions and 13428 deletions

View file

@ -1,58 +0,0 @@
name: '"10 min" IIAB on Ubuntu 24.04 on x86-64'
# run-name: ${{ github.actor }} is testing out GitHub Actions 🚀
# https://michaelcurrin.github.io/dev-cheatsheets/cheatsheets/ci-cd/github-actions/triggers.html
on: [push, pull_request, workflow_dispatch]
# on:
# push:
#
# pull_request:
#
# # Allows you to run this workflow manually from the Actions tab
# workflow_dispatch:
#
# # Set your workflow to run every day of the week from Monday to Friday at 6:00 UTC
# schedule:
# - cron: "0 6 * * 1-5"
jobs:
test-install:
runs-on: ubuntu-24.04
steps:
- run: echo "🎉 The job was automatically triggered by a ${{ github.event_name }} event."
- run: echo "🔎 The name of your branch is ${{ github.ref }} and your repository is ${{ github.repository }}."
#- name: Dump GitHub context (typically almost 500 lines)
# env:
# GITHUB_CONTEXT: ${{ toJSON(github) }}
# run: echo "$GITHUB_CONTEXT"
- name: Check out repository code
uses: actions/checkout@v4
- run: echo "🍏 This job's status is ${{ job.status }}."
- name: GitHub Actions "runner" environment
run: |
uname -a # uname -srm
whoami # Typically 'runner' instead of 'root'
pwd # /home/runner/work/iiab/iiab == $GITHUB_WORKSPACE == ${{ github.workspace }}
# ls
# ls $GITHUB_WORKSPACE
# ls ${{ github.workspace }}
# ls -la /opt # az, containerd, google, hostedtoolcache, microsoft, mssql-tools, pipx, pipx_bin, post-generation, runner, vsts
# apt update
# apt dist-upgrade -y
# apt autoremove -y
- name: Set up /opt/iiab/iiab
run: |
mkdir /opt/iiab
mv $GITHUB_WORKSPACE /opt/iiab
mkdir $GITHUB_WORKSPACE # OR SUBSEQUENT STEPS WILL FAIL ('working-directory: /opt/iiab/iiab' hacks NOT worth it!)
- name: Set up /etc/iiab/local_vars.yml
run: |
sudo mkdir /etc/iiab
# touch /etc/iiab/local_vars.yml
sudo cp /opt/iiab/iiab/vars/local_vars_none.yml /etc/iiab/local_vars.yml
- run: sudo /opt/iiab/iiab/scripts/ansible
- run: sudo ./iiab-install
working-directory: /opt/iiab/iiab
- run: iiab-summary
- run: cat /etc/iiab/iiab_state.yml

View file

@ -1,65 +0,0 @@
name: '"30 min" IIAB on Debian 12 on RPi 3'
# run-name: ${{ github.actor }} is testing out GitHub Actions 🚀
# https://michaelcurrin.github.io/dev-cheatsheets/cheatsheets/ci-cd/github-actions/triggers.html
on: [push, pull_request, workflow_dispatch]
# on:
# push:
#
# pull_request:
#
# # Allows you to run this workflow manually from the Actions tab
# workflow_dispatch:
#
# # Set your workflow to run every day of the week from Monday to Friday at 6:00 UTC
# schedule:
# - cron: "0 6 * * 1-5"
jobs:
test-install:
runs-on: ubuntu-22.04
strategy:
matrix:
arch: [debian12]
include:
- arch: debian12
cpu: cortex-a7
cpu_info: cpuinfo/raspberrypi_3b
base_image: https://raspi.debian.net/daily/raspi_3_bookworm.img.xz
# source https://raspi.debian.net/daily-images/
steps:
#- run: echo "🎉 The job was automatically triggered by a ${{ github.event_name }} event."
#- run: echo "🔎 The name of your branch is ${{ github.ref }} and your repository is ${{ github.repository }}."
#- name: Dump GitHub context (typically almost 500 lines)
# env:
# GITHUB_CONTEXT: ${{ toJSON(github) }}
# run: echo "$GITHUB_CONTEXT"
- name: Dump matrix context
env:
MATRIX_CONTEXT: ${{ toJSON(matrix) }}
run: echo "$MATRIX_CONTEXT"
- uses: actions/checkout@v3.1.0
- uses: pguyot/arm-runner-action@v2
with:
image_additional_mb: 1024
base_image: ${{ matrix.base_image }}
cpu: ${{ matrix.cpu }}
cpu_info: ${{ matrix.cpu_info }}
copy_repository_path: /opt/iiab/iiab
commands: |
echo "🍏 This job's status is ${{ job.status }}."
grep Model /proc/cpuinfo
uname -a # uname -srm
whoami # Typically 'root' instead of 'runner'
pwd # /home/runner/work/iiab/iiab == $GITHUB_WORKSPACE == ${{ github.workspace }}
apt-get update -y --allow-releaseinfo-change
apt-get install --no-install-recommends -y git
ls /opt/iiab/iiab
mkdir /etc/iiab
cp /opt/iiab/iiab/vars/local_vars_none.yml /etc/iiab/local_vars.yml
/opt/iiab/iiab/scripts/ansible
./iiab-install
cd /opt/iiab/iiab
iiab-summary
cat /etc/iiab/iiab_state.yml

View file

@ -1,77 +0,0 @@
name: '"30 min" IIAB on RasPiOS on Zero 2 W'
# run-name: ${{ github.actor }} is testing out GitHub Actions 🚀
# https://michaelcurrin.github.io/dev-cheatsheets/cheatsheets/ci-cd/github-actions/triggers.html
on: [push, pull_request, workflow_dispatch]
# on:
# push:
#
# pull_request:
#
# # Allows you to run this workflow manually from the Actions tab
# workflow_dispatch:
#
# # Set your workflow to run every day of the week from Monday to Friday at 6:00 UTC
# schedule:
# - cron: "0 6 * * 1-5"
jobs:
test-install:
runs-on: ubuntu-22.04
strategy:
matrix:
arch: [aarch64] #[zero_raspbian, zero_raspios, zero2_raspios, aarch64]
include:
#- arch: zero_raspbian
# cpu: arm1176
# cpu_info: cpuinfo/raspberrypi_zero_w
# base_image: raspbian_lite:latest
#- arch: zero_raspios
# cpu: arm1176
# cpu_info: cpuinfo/raspberrypi_zero_w
# base_image: raspios_lite:latest
#- arch: zero2_raspios
# cpu: cortex-a7
# cpu_info: cpuinfo/raspberrypi_zero2_w
# base_image: raspios_lite:latest
- arch: aarch64
cpu: cortex-a53
cpu_info: cpuinfo/raspberrypi_zero2_w_arm64
base_image: raspios_lite_arm64:latest
steps:
#- run: echo "🎉 The job was automatically triggered by a ${{ github.event_name }} event."
#- run: echo "🔎 The name of your branch is ${{ github.ref }} and your repository is ${{ github.repository }}."
#- name: Dump GitHub context (typically almost 500 lines)
# env:
# GITHUB_CONTEXT: ${{ toJSON(github) }}
# run: echo "$GITHUB_CONTEXT"
- name: Dump matrix context
env:
MATRIX_CONTEXT: ${{ toJSON(matrix) }}
run: echo "$MATRIX_CONTEXT"
- uses: actions/checkout@v3.1.0
- uses: pguyot/arm-runner-action@v2
with:
image_additional_mb: 1024
base_image: ${{ matrix.base_image }}
cpu: ${{ matrix.cpu }}
cpu_info: ${{ matrix.cpu_info }}
copy_repository_path: /opt/iiab/iiab
commands: |
echo "🍏 This job's status is ${{ job.status }}."
#test `uname -m` = ${{ matrix.arch }}
grep Model /proc/cpuinfo
uname -a # uname -srm
whoami # Typically 'root' instead of 'runner'
pwd # /home/runner/work/iiab/iiab == $GITHUB_WORKSPACE == ${{ github.workspace }}
apt-get update -y --allow-releaseinfo-change
apt-get install --no-install-recommends -y git
ls /opt/iiab/iiab
mkdir /etc/iiab
cp /opt/iiab/iiab/vars/local_vars_none.yml /etc/iiab/local_vars.yml
/opt/iiab/iiab/scripts/ansible
./iiab-install
cd /opt/iiab/iiab
iiab-summary
cat /etc/iiab/iiab_state.yml

9
.gitignore vendored
View file

@ -1,13 +1,8 @@
# https://git-scm.com/docs/gitignore
xs-config.spec
build
deprecated
.ansible
*.patches
*.log
*.log
*.retry
# Lines below for emacs, which generates even more tmp files since 2022
*~
.#*
\#*#

View file

@ -1,3 +1,3 @@
# SEE THE NEW<br>[github.com/iiab/iiab/wiki/Contributors-Guide-(EN)](https://github.com/iiab/iiab/wiki/Contributors-Guide-(EN))
# SEE THE NEW<br>[github.com/iiab/iiab/wiki/IIAB-Contributors-Guide](https://github.com/iiab/iiab/wiki/IIAB-Contributors-Guide)
# THANKS!

View file

@ -15,6 +15,6 @@ this is to include the following two lines at the top of the file:
Licensed under the terms of the GNU GPL v2 or later; see LICENSE for details.
All files not containing an explicit copyright notice or terms of license in
the file are Copyright © 2015-2025, Unleash Kids, and are licensed under the
the file are Copyright © 2015-2021, Unleash Kids, and are licensed under the
terms of the GPLv2 license in the file named LICENSE in the root of the
repository.

View file

@ -2,26 +2,26 @@
# Internet-in-a-Box (IIAB)
[Internet-in-a-Box (IIAB)](https://internet-in-a-box.org) is a “learning hotspot” that brings the Internet's crown jewels
(Wikipedia in any language, thousands of Khan Academy videos, zoomable OpenStreetMap, electronic books, WordPress journaling, Toys from Trash electronics projects, ETC) to those without Internet.
[Internet-in-a-Box (IIAB)](https://internet-in-a-box.org) is a "learning hotspot" that brings the Internet's crown jewels
(Wikipedia in any language, thousands of Khan Academy videos, zoomable OpenStreetMap, electronic books, WordPress journaling, Toys from Trash electronics projects, ETC) to those without Internet.
You can build your own tiny, affordable server (an offline digital library) for your school, your medical clinic, your prison, your region and/or your very own family — accessible with any nearby smartphone, tablet or laptop.
Internet-in-a-Box gives you the DIY tools to:
1. Download then drag-and-drop to arrange the [very best of the Worlds Free Knowledge](https://internet-in-a-box.org/#quality-content).
2. Choose among [30+ powerful educational apps](https://wiki.iiab.io/go/FAQ#What_services_%28IIAB_apps%29_are_suggested_during_installation%3F) for your school or learning/teaching community, optionally with a complete LMS (learning management system).
2. Choose among [30+ powerful educational apps](http://FAQ.IIAB.IO#What_services_.28IIAB_apps.29_are_suggested_during_installation.3F) for your school or learning/teaching community, optionally with a complete LMS (learning management system).
3. Exchange local/indigenous knowledge with nearby communities, using our [Manage Content](https://github.com/iiab/iiab-admin-console/blob/master/roles/console/files/help/InstContent.rst#manage-content) interface and possible mesh networking.
FYI this [community product](https://en.wikipedia.org/wiki/Internet-in-a-Box) is enabled by professional volunteers working [side-by-side](https://wiki.iiab.io/go/FAQ#What_are_the_best_places_for_community_support%3F) with schools, clinics and libraries around the world. *Thank you for being a part of our http://OFF.NETWORK grassroots technology [movement](https://meta.wikimedia.org/wiki/Internet-in-a-Box)!*
FYI this [community product](https://en.wikipedia.org/wiki/Internet-in-a-Box) is enabled by professional volunteers working [side-by-side](http://FAQ.IIAB.IO#What_are_the_best_places_for_community_support.3F) with schools, clinics and libraries around the world. *Thank you for being a part of our http://OFF.NETWORK grassroots technology [movement](https://meta.wikimedia.org/wiki/Internet-in-a-Box)!*
## Installation
Install Internet-in-a-Box (IIAB) from: [**download.iiab.io**](https://download.iiab.io/)
Install Internet-in-a-Box (IIAB) from [download.iiab.io](https://download.iiab.io/)
Please see [FAQ.IIAB.IO](https://wiki.iiab.io/go/FAQ) which has 50+ questions and answers to help you along the way (e.g. [“Is a quick installation possible?”](https://wiki.iiab.io/go/FAQ#Is_a_quick_installation_possible%3F)) as you put together the <!--digital--> “local learning hotspot” most suitable for your own teaching/learning community. Here are 2 ways to install IIAB:
Please see [FAQ.IIAB.IO](http://FAQ.IIAB.IO) which has 40+ questions and answers to help you along the way, as you put together the <!--digital--> "local learning hotspot" most suitable for your own teaching/learning community. Here are 2 ways to install IIAB:
- Our [1-line installer](https://download.iiab.io/) gets you the very latest, typically within about an hour, on [different Linux distributions](https://github.com/iiab/iiab/wiki/IIAB-Platforms#operating-systems).
- [Prefab disk images](https://github.com/iiab/iiab/wiki/Raspberry-Pi-Images-~-Summary#iiab-images-for-raspberry-pi) ([.img files](https://archive.org/search.php?query=iiab%20.img&sort=-publicdate)) are sometimes a few months out of date, but can be flashed directly onto a microSD card, for insertion into Raspberry Pi.
- [Prefab disk images](https://github.com/iiab/iiab/wiki/Raspberry-Pi-Images:-Summary) ([.img files](https://archive.org/search.php?query=iiab%20.img&sort=-publicdate)) are sometimes a few months out of date, but can be flashed directly onto a microSD card, for insertion into Raspberry Pi.
Our [HOW-TO videos](https://www.youtube.com/channel/UC0cBGCxr_WPBPa3IqPVEe3g) can be very helpful and the [Installation](https://github.com/iiab/iiab/wiki/IIAB-Installation) wiki page has more intricate details e.g. if you're trying to install Internet-in-a-Box (IIAB) onto a [another Linux](https://github.com/iiab/iiab/wiki/IIAB-Platforms) that has not yet been tried.
@ -29,22 +29,20 @@ See our [Tech Docs Wiki](https://github.com/iiab/iiab/wiki) for more about the u
After you've installed the software, you should [add content](https://github.com/iiab/iiab/wiki/IIAB-Installation#add-content), which can of course take time when downloading multi-gigabyte Content Packs!
Finally, you can [customize your Internet-in-a-Box home page](https://wiki.iiab.io/go/FAQ#How_do_I_customize_my_Internet-in-a-Box_home_page%3F) (typically http://box or http://box.lan) using our **drag-and-drop** Admin Console (http://box.lan/admin) &mdash; to arrange Content Packs and IIAB Apps (services) for your local community's needs.
Finally, you can [customize your Internet-in-a-Box home page](http://FAQ.IIAB.IO#How_do_I_customize_my_Internet-in-a-Box_home_page.3F) (typically http://box or http://box.lan) using our **drag-and-drop** Admin Console (http://box.lan/admin) &mdash; to arrange Content Packs and IIAB Apps (services) for your local community's needs.
## Community
Global community updates and videos are regularly posted to: **[@internet_in_box](https://twitter.com/internet_in_box)**
Internet-in-a-Box (IIAB) greatly welcomes contributions from educators, librarians and [IT/UX/QA people](https://github.com/iiab/iiab/wiki/Technical-Contributors-Guide) of all kinds!
_Internet-in-a-Box (IIAB) greatly welcomes contributions from educators, librarians and [IT/UX/QA people](https://github.com/iiab/iiab/wiki/Contributors-Guide-(EN)) ([versión en español](https://github.com/iiab/iiab/wiki/Gu%C3%ADa-para-Contribuidores-(ES))) of all kinds!_
If you would like to volunteer, please [make contact](https://internet-in-a-box.org/contributing.html) after looking over [“How can I help?”](https://wiki.iiab.io/go/FAQ#How_can_I_help%3F) at: [FAQ.IIAB.IO](https://wiki.iiab.io/go/FAQ)
If you would like to volunteer, please [make contact](https://internet-in-a-box.org/pages/contributing.html) after looking over "[How can I help?](http://FAQ.IIAB.IO#How_can_I_help.3F)" at: [FAQ.IIAB.IO](http://FAQ.IIAB.IO)
<!-- To learn about our software architecture, check out our [Contributors Guide](https://github.com/iiab/iiab/wiki/IIAB-Contributors-Guide).-->
To learn more about our open community architecture for “offline” learning, check out [“What technical documentation exists?”](https://wiki.iiab.io/go/FAQ#What_technical_documentation_exists%3F)
FYI we use [Ansible](https://wiki.iiab.io/go/FAQ#What_is_Ansible_and_what_version_should_I_use%3F) <!--as the underlying technology--> to install, deploy, configure and manage the various software components.
To learn more about our open community architecture for "offline" learning, check out "[What technical documentation exists?](http://FAQ.IIAB.IO#What_technical_documentation_exists.3F)"
FYI we use [Ansible](http://FAQ.IIAB.IO#What_is_Ansible_and_what_version_should_I_use.3F) <!--as the underlying technology--> to install, deploy, configure and manage the various software components.
*Thank you for helping us enable offline access to the Internet's free/open knowledge jewels, as well as “Sneakernet-of-Alexandria” distribution of local/indigenous content, when mass media channels do not serve grassroots voices.*
*Thank you for helping us enable offline access to the Internet's free/open knowledge jewels, as well as "Sneakernet-of-Alexandria" distribution of local/indigenous content, when mass media channels do not serve grassroots voices.*
## Versions
@ -54,4 +52,4 @@ Install our latest pre-release using the 1-line installer at: [**download.iiab.i
You can also consider <!--latest Internet-in-a-Box (IIAB)--> earlier official releases at: [github.com/iiab/iiab/releases](https://github.com/iiab/iiab/releases)
For much older versions, see: [github.com/xsce](https://github.com/xsce), [schoolserver.org](http://schoolserver.org)
For much older versions, see: [github.com/xsce](http://github.com/xsce), [schoolserver.org](http://schoolserver.org)

View file

@ -5,4 +5,4 @@
# Disallowed by Ansible 2.11+ -- see https://docs.ansible.com/ansible/devel/porting_guides/porting_guide_2.7.html#using-a-loop-on-a-package-module-via-squash-actions
#squash_actions = apk, apt, dnf, homebrew, openbsd_pkg, pacman, pkgng, yum, zypper, package
[defaults]
interpreter_python=/usr/local/ansible/bin/python3
interpreter_python=/usr/bin/python3

View file

@ -3,10 +3,10 @@
become: yes
vars_files:
- vars/default_vars.yml
- vars/{{ ansible_local.local_facts.os_ver }}.yml
- /etc/iiab/local_vars.yml
- /etc/iiab/iiab_state.yml
- vars/default_vars.yml
- vars/{{ ansible_local.local_facts.os_ver }}.yml
- /etc/iiab/local_vars.yml
- /etc/iiab/iiab_state.yml
roles:
- { role: 0-init }

View file

@ -3,10 +3,10 @@
become: yes
vars_files:
- vars/default_vars.yml
- vars/{{ ansible_local.local_facts.os_ver }}.yml
- /etc/iiab/local_vars.yml
- /etc/iiab/iiab_state.yml
- vars/default_vars.yml
- vars/{{ ansible_local.local_facts.os_ver }}.yml
- /etc/iiab/local_vars.yml
- /etc/iiab/iiab_state.yml
roles:
- { role: 0-init }

View file

@ -1,57 +1,17 @@
#!/bin/bash -e
# Running from a git repo
# Add cmdline options for passing to ansible
# Todo add proper shift to gobble up --debug --reinstall
PLAYBOOK=iiab-stages.yml
INVENTORY=ansible_hosts
IIAB_STATE_FILE=/etc/iiab/iiab_state.yml
ARGS="--extra-vars {" # Needs boolean not string so use JSON list. bash forces {...} to '{...}' for Ansible
ARGS=""
CWD=`pwd`
OS=`grep ^ID= /etc/os-release | cut -d= -f2`
OS=${OS//\"/} # Remove all '"'
MIN_RPI_KERN=5.4.0 # Do not use 'rpi-update' unless absolutely necessary: https://github.com/iiab/iiab/issues/1993
MIN_ANSIBLE_VER=2.16.14 # 2024-11-08: ansible-core 2.15 EOL is November 2024 per https://docs.ansible.com/ansible/latest/reference_appendices/release_and_maintenance.html#ansible-core-support-matrix 2022-11-09: Raspberry Pi 3 (and 3 B+ etc?) apparently install (and require?) ansible-core 2.11 for now -- @deldesir can explain more on PR #3419. Historical: Ansible 2.8.3 and 2.8.6 had serious bugs, preventing their use with IIAB.
REINSTALL=false
DEBUG=false
SKIP_ROLE_ON_ERROR=false
usage() {
echo -e "\n\e[1mUse './iiab-install' for regular installs, or to continue an install."
echo -e "Use './iiab-install --risky' to force 'skip_role_on_error: True'"
echo -e "Use './iiab-install --reinstall' to force running all Stages 0-9, followed by the Network Role."
echo -e "Use './iiab-install --debug' to run Stage 0, followed by Stages 3-9, followed by the Network Role."
echo -e "Use './iiab-configure' to run Stage 0, followed by Stages 4-9."
echo -e "Use './runrole' to run Stage 0, followed by a single Stage or Role."
echo -e "Use './iiab-network' to run Stage 0, followed by the Network Role.\e[0m\n"
}
# https://stackoverflow.com/questions/192249/how-do-i-parse-command-line-arguments-in-bash/14203146#14203146
while [[ $# -gt 0 ]]; do
case $1 in
--reinstall)
REINSTALL=true
shift
;;
--debug)
DEBUG=true
shift
;;
-r|--risky)
SKIP_ROLE_ON_ERROR=true
shift
;;
*)
usage
exit 1
;;
esac
done
ARGS="$ARGS\"skip_role_on_error\":$SKIP_ROLE_ON_ERROR" # Needs boolean not
# string so use JSON list. Ansible permits these boolean values: (refresher)
# https://github.com/iiab/iiab/blob/master/roles/0-init/tasks/validate_vars.yml#L19-L43
OS=${OS//\"/}
MIN_RPI_KERN=5.4.0 # Do not use 'rpi-update' unless absolutely necessary: https://github.com/iiab/iiab/issues/1993
MIN_ANSIBLE_VER=2.11.6 # Ansible 2.8.3 and 2.8.6 had serious bugs, preventing their use with IIAB.
if [ ! -f /etc/iiab/local_vars.yml ]; then
@ -65,13 +25,13 @@ if [ ! -f /etc/iiab/local_vars.yml ]; then
echo -e "████████████████████████████████████████████████████████████████████████████████\n" >&2
fi
echo -e "\n\e[1mEXITING: /opt/iiab/iiab/iiab-install REQUIRES /etc/iiab/local_vars.yml\e[0m\n" >&2
echo -e "\nEXITING: /opt/iiab/iiab/iiab-install REQUIRES /etc/iiab/local_vars.yml\n" >&2
echo -e "(1) See http://FAQ.IIAB.IO -> What is local_vars.yml and how do I customize it?" >&2
echo -e "(2) SMALL/MEDIUM/LARGE samples are included in /opt/iiab/iiab/vars" >&2
echo -e "(1) Please read http://wiki.laptop.org/go/IIAB/local_vars.yml to learn more" >&2
echo -e "(2) MIN/MEDIUM/BIG samples are included in /opt/iiab/iiab/vars" >&2
echo -e "(3) NO TIME FOR DETAILS? RUN INTERNET-IN-A-BOX'S FRIENDLY 1-LINE INSTALLER:\n" >&2
echo -e ' https://download.iiab.io\n' >&2
echo -e ' http://download.iiab.io\n' >&2
exit 1
fi
@ -82,15 +42,14 @@ fi
echo -e "\n\n./iiab-install $* BEGUN IN $CWD\n"
echo -e "local_facts.fact DIAGNOSTICS... (A FEW LINES OF ERRORS/WARNINGS BELOW ARE OK!)\n"
scripts/local_facts.fact # Exit & advise, if OS not supported.
mkdir -p /etc/ansible/facts.d
if [ ! -f /etc/ansible/facts.d/local_facts.fact ]; then
mkdir -p /etc/ansible/facts.d
fi
cp scripts/local_facts.fact /etc/ansible/facts.d/local_facts.fact
echo -e "\nPlaced /etc/ansible/facts.d/local_facts.fact into position.\n"
mkdir -p /etc/iiab/install-flags # MANDATORY since 2022-07-22
echo -e "/etc/iiab/install-flags directory created/verified."
echo -e "(e.g. for PR #3318 netwarn pop-ups, asking you to run iiab-network)\n"
echo -e "\nPlaced /etc/ansible/facts.d/local_facts.fact into position."
if [ ! -f $PLAYBOOK ]; then
echo "EXITING: IIAB Playbook ""$PLAYBOOK"" not found."
@ -98,6 +57,16 @@ if [ ! -f $PLAYBOOK ]; then
exit 1
fi
if [ "$1" != "--debug" ] && [ "$1" != "--reinstall" ] && [ "$1" != "" ]; then
echo "Use './iiab-install' for regular installs, or to continue an install."
echo "Use './iiab-install --reinstall' to force running all Stages 0-9, followed by the Network Role."
echo "Use './iiab-install --debug' to run Stage 0, followed by Stages 3-9, followed by the Network Role."
echo "Use './iiab-configure' to run Stage 0, followed by Stages 4-9."
echo "Use './runrole' to run Stage 0, followed by a single Stage or Role."
echo "Use './iiab-network' to run Stage 0, followed by the Network Role."
exit 1
fi
# Subroutine compares software version numbers. Generates rare false positives
# like "1.0 > 1" and "2.4.0 > 2.4". Avoid risks by structuring conditionals w/
# a consistent # of decimal points e.g. "if version_gt w.x.y.z a.b.c.d; then"
@ -124,7 +93,7 @@ CURR_ANSIBLE_VER=0
#if [[ $(command -v ansible) ]]; then # Also Works! $(...) nests more easily than backticks
#if [[ `which ansible` ]]; then # "which" misses built-in commands like cd, and is RISKY per https://stackoverflow.com/questions/592620/check-if-a-program-exists-from-a-bash-script
#if [[ `type -P ansible` ]]; then # "type -P" isn't POSIX compliant; it misses built-in commands like "cd"
if [[ $(command -v ansible) ]]; then # "command -v" is POSIX compliant; it catches built-in commands like "cd"
if [[ `command -v ansible` ]]; then # "command -v" is POSIX compliant; it catches built-in commands like "cd"
CURR_ANSIBLE_VER=$(ansible --version | head -1 | cut -f 2- -d " " | sed 's/.* \([^ ]*\)\].*/\1/')
# Above works with 'ansible [core 2.11.0rc2]' -- these old ways do not:
#CURR_ANSIBLE_VER=$(ansible --version | head -1 | awk '{print $2}')
@ -156,38 +125,41 @@ if [ -f /etc/iiab/iiab.env ]; then
fi
fi
if $($REINSTALL); then
if [ "$1" == "--reinstall" ]; then
STAGE=0
#ARGS="$ARGS"" --extra-vars reinstall=True"
ARGS="$ARGS,\"reinstall\":True" # Needs boolean not string so use JSON list
ARGS="$ARGS"" --extra-vars reinstall=True"
sed -i 's/^STAGE=.*/STAGE=0/' /etc/iiab/iiab.env
echo "Wrote STAGE=0 (counter) to /etc/iiab/iiab.env"
elif [ "$STAGE" -ge 2 ] && $($DEBUG); then
elif [ "$STAGE" -ge 2 ] && [ "$1" == "--debug" ]; then
STAGE=2
sed -i 's/^STAGE=.*/STAGE=2/' /etc/iiab/iiab.env
echo "Wrote STAGE=2 (counter) to /etc/iiab/iiab.env"
elif [ "$STAGE" -eq 9 ]; then
echo -e "\n\e[1mEXITING: STAGE (counter) in /etc/iiab/iiab.env shows Stage 9 Is Already Done.\e[0m"
usage
exit 0 # Allows rerunning https://download.iiab.io/install.txt
echo -e "\nEXITING: STAGE (counter) in /etc/iiab/iiab.env shows Stage 9 Is Already Done."
echo -e "Use './iiab-install --reinstall' to force running all Stages 0-9, followed by the Network Role."
echo -e "Use './iiab-install --debug' to run Stage 0, followed by Stages 3-9, followed by the Network Role."
echo -e "Use './iiab-configure' to run Stage 0, followed by Stages 4-9."
echo -e "Use './runrole' to run Stage 0, followed by a single Stage or Role."
echo -e "Use './iiab-network' to run Stage 0, followed by the Network Role.\n\n"
exit 0 # Allows rerunning http://download.iiab.io/install.txt
fi
fi
if [ "$STAGE" -lt 2 ] && $($DEBUG); then
if [ "$STAGE" -lt 2 ] && [ "$1" == "--debug" ]; then
echo -e "\n'--debug' *ignored* as STAGE (counter) < 2."
fi
# /etc/iiab/iiab_state.yml is mandatory and must be created here. Background:
# Allow iiab-install to read IIAB_STATE_FILE to not repeat installs of previous
# roles that already completed within the stage.
if [ ! -f $IIAB_STATE_FILE ]; then # touch $IIAB_STATE_FILE
echo -e "\nCreating... $IIAB_STATE_FILE"
if [ ! -f $IIAB_STATE_FILE ]; then
#touch $IIAB_STATE_FILE
cat > $IIAB_STATE_FILE << EOF
# DO *NOT* MANUALLY EDIT THIS, THANKS!
# IIAB does NOT currently support uninstalling apps/services.
EOF
fi
echo -e "\nTRY TO RERUN './iiab-install' IF IT FAILS DUE TO CONNECTIVITY ISSUES ETC!\n"
echo -e "\e[1mRunning local Ansible playbooks...\n...Stage 0 will now run\n...followed by Stages $(($STAGE + 1))-9\n...and then the Network Role.\e[0m\n"
@ -196,8 +168,6 @@ export ANSIBLE_LOG_PATH="$CWD""/iiab-install.log"
ansible -m setup -i $INVENTORY localhost --connection=local | grep python
ansible -m setup -i $INVENTORY localhost --connection=local >> /dev/null # So vars are recorded in /opt/iiab/iiab/iiab-install.log
ARGS="$ARGS}"
echo -e "\nNOW RUN: ansible-playbook -i $INVENTORY $PLAYBOOK $ARGS --connection=local\n"
ansible-playbook -i $INVENTORY $PLAYBOOK $ARGS --connection=local
ansible-playbook -i $INVENTORY $PLAYBOOK ${ARGS} --connection=local
echo -e "./iiab-install $* COMPLETED IN $CWD\n\n"

View file

@ -4,14 +4,11 @@
CWD=`pwd`
export ANSIBLE_LOG_PATH="$CWD/iiab-network.log"
exit_error() {
echo -e "\nEXITING: "$@ | tee -a /opt/iiab/iiab/iiab-network.log
exit 1
}
if [ ! -f iiab-network.yml ]; then
exit_error "iiab-network.yml not found in current directory." \
"Please rerun this command from the top level of the git repo."
echo "iiab-network.yml not found in current directory."
echo "Please rerun this command from the top level of the git repo."
echo "Exiting."
exit 1
fi
OS="unknown" # will be overridden below, if /etc/iiab/iiab.env is legit
@ -22,27 +19,32 @@ if [ -f /etc/iiab/iiab.env ]; then
if grep -q STAGE= /etc/iiab/iiab.env ; then
echo -e "\nExtracted STAGE=$STAGE (counter) from /etc/iiab/iiab.env"
if ! [ "$STAGE" -eq "$STAGE" ] 2> /dev/null; then
exit_error "STAGE (counter) value == ""$STAGE"" is non-integer"
echo -e "\nEXITING: STAGE (counter) value == ""$STAGE"" is non-integer"
exit 1
elif [ "$STAGE" -lt 0 ] || [ "$STAGE" -gt 9 ]; then
exit_error "STAGE (counter) value == ""$STAGE"" is out-of-range"
echo -e "\nEXITING: STAGE (counter) value == ""$STAGE"" is out-of-range"
exit 1
elif [ "$STAGE" -lt 3 ]; then
exit_error "STAGE (counter) value == ""$STAGE" \
"\nIIAB Stage 3 not complete." \
"\nPlease run: ./iiab-install"
echo -e "\nEXITING: STAGE (counter) value == ""$STAGE"
echo -e "\nIIAB Stage 3 not complete."
echo -e "\nPlease run: ./iiab-install"
exit 1
fi
else
exit_error "STAGE (counter) not found" \
"\nIIAB not installed." \
"\nPlease run: ./iiab-install"
echo -e "\nEXITING: STAGE (counter) not found"
echo -e "\nIIAB not installed."
echo -e "\nPlease run: ./iiab-install"
exit 1
fi
else
exit_error "/etc/iiab/iiab.env not found"
echo -e "\nEXITING: /etc/iiab/iiab.env not found"
exit 1
fi
echo "Ansible will now run iiab-network.yml -- log file is iiab-network.log"
Start=`date`
ansible -m setup -i ansible_hosts localhost --connection=local | grep python
ansible-playbook -i ansible_hosts iiab-network.yml --extra-vars "{\"skip_role_on_error\":false}" --connection=local
ansible-playbook -i ansible_hosts iiab-network.yml --connection=local
End=`date`
@ -92,4 +94,3 @@ echo "iiab-network run start: $Start"
echo "iiab-network run end: $End"
echo
echo "Please REBOOT to fully verify your network -- graphical desktops MUST reboot!"
exit 0

View file

@ -3,10 +3,10 @@
become: yes
vars_files:
- vars/default_vars.yml
- vars/{{ ansible_local.local_facts.os_ver }}.yml
- /etc/iiab/local_vars.yml
- /etc/iiab/iiab_state.yml
- vars/default_vars.yml
- vars/{{ ansible_local.local_facts.os_ver }}.yml
- /etc/iiab/local_vars.yml
- /etc/iiab/iiab_state.yml
roles:
- { role: 0-init }

View file

@ -1,20 +0,0 @@
#!/bin/bash -e
# Running from a git repo
# Assumes iiab repos are downloaded
apt -y update
apt -y full-upgrade
apt -y install git curl nano gawk wget pastebinit
cd /opt/iiab/iiab
scripts/ansible
# 2022-09-27: iiab-install now handles this
#mkdir -p /etc/iiab/install-flags
if [ ! -f /etc/iiab/local_vars.yml ]; then
cp /opt/iiab/iiab/vars/local_vars_none.yml /etc/iiab/local_vars.yml
fi
reboot

View file

@ -3,11 +3,11 @@
become: yes
vars_files:
- roles/0-init/defaults/main.yml
- vars/default_vars.yml
- vars/{{ ansible_local.local_facts.os_ver }}.yml
- /etc/iiab/local_vars.yml
- /etc/iiab/iiab_state.yml
- roles/0-init/defaults/main.yml
- vars/default_vars.yml
- vars/{{ ansible_local.local_facts.os_ver }}.yml
- /etc/iiab/local_vars.yml
- /etc/iiab/iiab_state.yml
tasks:

View file

@ -2,9 +2,9 @@
become: yes
vars_files:
- vars/default_vars.yml
- vars/{{ ansible_local.local_facts.os_ver }}.yml
- /etc/iiab/local_vars.yml
- vars/default_vars.yml
- vars/{{ ansible_local.local_facts.os_ver }}.yml
- /etc/iiab/local_vars.yml
roles:
- { role: 0-init }

View file

@ -8,7 +8,7 @@
# apache_interface: 127.0.0.1
# Make this False to disable http://box/common/services/power_off.php button:
# allow_www_data_poweroff: False
# apache_allow_sudo: True
# All above are set in: github.com/iiab/iiab/blob/master/vars/default_vars.yml
# If nec, change them by editing /etc/iiab/local_vars.yml prior to installing!

View file

@ -1,39 +0,0 @@
#!/bin/bash
# /usr/bin/iiab-remote-off should fully turn off multiple remote support
# services like OpenVPN and others, to reduce risk of remote attacks.
# echo -e '\nWARNING: To disable OpenVPN long-term, it'"'"'s recommended you:\n'
#
# echo -e '1) Set this variable in /etc/iiab/local_vars.yml'
# echo -e ' openvpn_enabled: False\n'
#
# echo -e '2) Run:'
# echo -e ' cd /opt/iiab/iiab'
# echo -e ' sudo ./runrole openvpn\n'
# Do nothing if OpenVPN not installed
which openvpn
if [ $? -ne 0 ]; then
echo 'Cannot find the OpenVPN program (openvpn).'
exit 1
fi
if grep -q '^openvpn_enabled:' /etc/iiab/local_vars.yml; then
sed -i "s/^openvpn_enabled:.*/openvpn_enabled: False/" /etc/iiab/local_vars.yml
else
echo "openvpn_enabled: False" >> /etc/iiab/local_vars.yml
fi
systemctl disable openvpn
systemctl stop openvpn
sleep 5
ps -e | grep openvpn # 2018-09-05: "ps -e | grep vpn" no longer works (nor would "pgrep vpn") when invoked from iiab-vpn-off (as filename itself causes [multiple] "vpn" instances to appear in process list!)
if [ $? -eq 0 ]; then
echo "OpenVPN failed to stop."
else
echo "OpenVPN's systemd service was successfully stopped and disabled."
echo
echo "Also, 'openvpn_enabled: False' was set in /etc/iiab/local_vars.yml"
fi

View file

@ -23,6 +23,14 @@
# ...after it is set in 0-init/tasks/main.yml
first_run: False
rpi_model: none # 2021-07-30: Broadly used!
#xo_model: none # 2021-07-30: No longer used
# 2021-07-30: Recorded to /etc/iiab/iiab.ini but not used programmatically:
gw_active: False
# 2021-07-30: Broadly used, but not in an organized way -- most all IIAB
# outfitting/provisioning happens online -- in situations where connectivity
# failures should be reported to the operator, rather than papered over:
internet_available: False
discovered_wan_iface: none # 2021-07-30: Very broadly used!
# 2021-07-30: Barely used -- for {named, dhcpd, squid} in
# roles/network/tasks/main.yml -- after being set in 0-init/tasks/network.yml

View file

@ -1,26 +1,13 @@
- name: Record disk_used_a_priori (permanently, into {{ iiab_ini_file }} below) to later estimate iiab_software_disk_usage
shell: df -B1 --output=used / | tail -1
register: df1
# workaround for fact that auto create does not work on iiab_ini_file
# workaround for fact that auto create does not work on iiab_ini_file (/etc/iiab/iiab.ini)
- name: Create {{ iiab_ini_file }}
file:
path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
path: "{{ iiab_ini_file }}"
state: touch
- name: Run command 'dpkg --print-architecture' to identify OS architecture (CPU arch as revealed by ansible_architecture ~= ansible_machine is NOT enough!)
command: dpkg --print-architecture
register: dpkg_arch
- name: Run command 'dpkg --print-foreign-architectures' (secondary OS arch, if available)
command: dpkg --print-foreign-architectures
register: dpkg_foreign_arch
- name: Add 'summary' variable values to {{ iiab_ini_file }}
- name: Add 'location' variable values to {{ iiab_ini_file }}
ini_file:
path: "{{ iiab_ini_file }}"
section: summary
section: location
option: "{{ item.option }}"
value: "{{ item.value | string }}"
with_items:
@ -28,39 +15,29 @@
value: "{{ iiab_base }}"
- option: iiab_dir
value: "{{ iiab_dir }}"
- option: disk_used_a_priori
value: "{{ df1.stdout }}"
- name: Add 'initial' variable values to {{ iiab_ini_file }}
- name: Add 'version' variable values to {{ iiab_ini_file }}
ini_file:
path: "{{ iiab_ini_file }}"
section: initial
section: version
option: "{{ item.option }}"
value: "{{ item.value | string }}"
with_items:
- option: os_ver
value: "{{ os_ver }}"
- option: distribution
value: "{{ ansible_facts['distribution'] }}"
value: "{{ ansible_distribution }}"
- option: arch
value: "{{ ansible_architecture }}"
- option: dpkg_arch
value: "{{ dpkg_arch.stdout }}"
- option: dpkg_foreign_arch
value: "{{ dpkg_foreign_arch.stdout }}"
- option: rpi_model
value: "{{ rpi_model }}"
- option: devicetree_model
value: "{{ devicetree_model }}"
- option: iiab_base_ver
value: "{{ iiab_base_ver }}"
- option: iiab_remote_url
value: "{{ ansible_local.local_facts.iiab_remote_url }}"
- option: iiab_branch
value: "{{ ansible_local.local_facts.iiab_branch }}"
- option: iiab_commit
value: "{{ ansible_local.local_facts.iiab_commit }}"
- option: iiab_recent_tag
value: "{{ ansible_local.local_facts.iiab_recent_tag }}"
- option: install_date
value: "{{ ansible_date_time.iso8601 }}"
#- option: xo_model
# value: "{{ xo_model }}"
- option: rpi_model
value: "{{ rpi_model }}"
- option: devicetree_model
value: "{{ devicetree_model }}"

View file

@ -1,8 +1,3 @@
- name: "Set 'iiab_fqdn: {{ iiab_hostname }}.{{ iiab_domain }}'"
set_fact:
iiab_fqdn: "{{ iiab_hostname }}.{{ iiab_domain }}"
FQDN_changed: False
- name: Does /etc/cloud/cloud.cfg exist e.g. is this Ubuntu Server 18+ ?
stat:
path: /etc/cloud/cloud.cfg
@ -22,23 +17,24 @@
# 2021-08-31: Periods in /etc/hostname fail with some WiFi routers (#2904)
# command: hostnamectl set-hostname "{{ iiab_hostname }}.{{ iiab_domain }}"
# 2022-07-11: Should the first entry match just hostname and domain move to
# after localhost? See PR's #1 & #8 -- with discussion on #3302 -- and also:
# 1. /etc/hosts -- #1815 solved by PR #1847
# 2. /etc/hostname -- #2904 solved by PR #2973
#- name: Install /etc/sysconfig/network from template (redhat)
# template:
# src: roles/network/templates/network/sysconfig.network.j2
# dest: /etc/sysconfig/network
# owner: root
# group: root
# mode: 0644
# when: is_redhat
# roles/network/tasks/hosts.yml [no longer in use] ALSO did this:
- name: 'Put FQDN & hostnames in /etc/hosts: "127.0.0.1 {{ iiab_hostname }}.{{ iiab_domain }} localhost.localdomain localhost {{ iiab_hostname }} box box.lan"'
lineinfile:
path: /etc/hosts
regexp: '^127\.0\.0\.1'
line: '127.0.0.1 {{ iiab_hostname }}.{{ iiab_domain }} localhost.localdomain localhost {{ iiab_hostname }} box box.lan'
# 2021-07-30: FQDN_changed isn't used as in the past -- its remaining use is
# for {named, dhcpd, squid} in roles/network/tasks/main.yml -- possibly it
# should be reconsidered? See PR #2876: roles/network might become optional?
- name: "Also set 'FQDN_changed: True' -- if iiab_fqdn != ansible_fqdn ({{ ansible_fqdn }})"
set_fact:
FQDN_changed: True
when: iiab_fqdn != ansible_fqdn
#owner: root
#group: root
#mode: 0644
#- name: Re-configuring httpd - not initial install
# include_tasks: roles/httpd/tasks/main.yml

View file

@ -7,14 +7,17 @@
# Higher-level purpose explained at the bottom of:
# https://github.com/iiab/iiab/blob/master/vars/default_vars.yml
- name: "Ansible just ran /etc/ansible/facts.d/local_facts.fact to set 15 vars -- here we extract 6 of those -- iiab_stage: {{ ansible_local.local_facts.stage }}, rpi_model: {{ ansible_local.local_facts.rpi_model }}, devicetree_model: {{ ansible_local.local_facts.devicetree_model }}, os_ver: {{ ansible_local.local_facts.os_ver }}, python_version: {{ ansible_local.local_facts.python_version }}, php_version: {{ ansible_local.local_facts.php_version }}"
- name: "Ansible just ran /etc/ansible/facts.d/local_facts.fact to set 11 vars -- here we extract 3 of those -- rpi_model: {{ ansible_local.local_facts.rpi_model }}, devicetree_model: {{ ansible_local.local_facts.devicetree_model }}, iiab_stage: {{ ansible_local.local_facts.stage }}"
set_fact:
iiab_stage: "{{ ansible_local.local_facts.stage }}"
rpi_model: "{{ ansible_local.local_facts.rpi_model }}"
devicetree_model: "{{ ansible_local.local_facts.devicetree_model }}"
os_ver: "{{ ansible_local.local_facts.os_ver }}"
python_version: "{{ ansible_local.local_facts.python_version }}"
php_version: "{{ ansible_local.local_facts.php_version }}"
#xo_model: "{{ ansible_local.local_facts.xo_model }}"
iiab_stage: "{{ ansible_local.local_facts.stage }}"
# 2020-10-29: Appears no longer nec (see 3 above ansible_local.local_facts.*)
#- name: Re-read local_facts.facts from /etc/ansible/facts.d
# setup:
# filter: ansible_local
# Initialize /etc/iiab/iiab.ini writing the 'location' and 'version' sections
# once and only once, to preserve the install date and git hash.
@ -23,9 +26,10 @@
when: not iiab_ini_test.stat.exists
# 2021-07-30: The 'first_run' flag isn't much used anymore. In theory it's
# still used in 1-prep/tasks/hardware.yml for raspberry_pi.yml
#
# This needs to be reworked for 0-init speed, and overall understandability.
# still used in these 2 places:
# (1) roles/1-prep/tasks/main.yml for raspberry_pi.yml
# (2) roles/network/tasks/named.yml for "Stop named before copying files"
# In practice however, it's no longer important, and might be reconsidered?
- name: Set first_run flag
set_fact:
first_run: True
@ -34,23 +38,11 @@
# Copies the latest/known version of iiab-diagnostics into /usr/bin (so it can
# be run even if local source tree /opt/iiab/iiab is deleted to conserve disk).
- name: Copy iiab-update & iiab-summary & iiab-diagnostics & iiab-root-login from /opt/iiab/iiab/scripts/ to /usr/bin/
- name: Copy /opt/iiab/iiab/scripts/iiab-diagnostics to /usr/bin/iiab-diagnostics
copy:
src: "{{ iiab_dir }}/scripts/{{ item }}"
src: "{{ iiab_dir }}/scripts/iiab-diagnostics"
dest: /usr/bin/
mode: '0755'
with_items:
- iiab-update
- iiab-summary
- iiab-diagnostics
- iiab-root-login
- name: Symlink /usr/bin/iiab-upgrade -> /usr/bin/iiab-update
file:
src: /usr/bin/iiab-update
path: /usr/bin/iiab-upgrade
state: link
#force: yes
- name: Create globally-writable directory /etc/iiab/diag (0777) so non-root users can run 'iiab-diagnostics'
file:
@ -61,14 +53,13 @@
- name: Pre-check that IIAB's "XYZ_install" + "XYZ_enabled" vars (1) are defined, (2) are boolean-not-string variables, and (3) contain plausible values. Also checks that "XYZ_install" is True when "XYZ_installed" is defined.
include_tasks: validate_vars.yml
when: not (rpi_model | regex_search('\\bW\\b')) # Ansible require double backslashes, e.g. with \b "word boundary" anchors: https://www.regular-expressions.info/wordboundaries.html https://stackoverflow.com/questions/56869119/ansible-regular-expression-to-match-a-string-and-extract-the-line/56869801#56869801
# 2022-12-30: Functionality moved to www_options/tasks/php-settings.yml
# - name: "Time Zone / TZ: Set symlink /etc/localtime to UTC if it doesn't exist?"
# include_tasks: tz.yml
- name: "Time Zone / TZ: Set symlink /etc/localtime to UTC if it doesn't exist?"
include_tasks: tz.yml
- name: Test Gateway + Test Internet + Set new hostname/domain (hostname.yml) if nec + Set 'gui_port' to 80 or 443 for Admin Console
include_tasks: network.yml
- name: Set hostname / domain (etc) in various places
include_tasks: hostname.yml
- name: Add 'runtime' variable values to {{ iiab_ini_file }}
ini_file:
@ -83,14 +74,10 @@
value: "{{ iiab_base_ver }}"
- option: iiab_revision
value: "{{ iiab_revision }}"
- option: iiab_remote_url
value: "{{ ansible_local.local_facts.iiab_remote_url }}"
- option: runtime_branch
value: "{{ ansible_local.local_facts.iiab_branch }}"
- option: runtime_commit
value: "{{ ansible_local.local_facts.iiab_commit }}"
- option: iiab_recent_tag
value: "{{ ansible_local.local_facts.iiab_recent_tag }}"
- option: runtime_date
value: "{{ ansible_date_time.iso8601 }}"
- option: ansible_version
@ -101,22 +88,24 @@
value: "{{ ansible_memtotal_mb }}"
- option: swap_mb
value: "{{ ansible_swaptotal_mb }}"
- option: gw_active
value: "{{ gw_active }}"
- option: internet_available
value: "{{ internet_available }}"
- option: rpi_model
value: "{{ rpi_model }}"
- option: devicetree_model
value: "{{ devicetree_model }}"
- option: os_ver
value: "{{ os_ver }}"
- option: python_version
value: "{{ python_version }}"
- option: php_version
value: "{{ php_version }}"
- option: first_run
value: "{{ first_run }}"
# - option: local_tz # e.g. 'EDT' (summer) or 'EST' (winter) after Ansible interprets symlink /etc/localtime -- or 'UTC' if /etc/localtime doesn't exist
# value: "{{ local_tz }}"
# - option: etc_localtime.stdout # e.g. 'America/New_York' direct from symlink /etc/localtime -- or '' if /etc/localtime doesn't exist
# value: "{{ etc_localtime.stdout }}"
- option: local_tz # e.g. 'EDT' (summer) or 'EST' (winter) after Ansible interprets symlink /etc/localtime -- or 'UTC' if /etc/localtime doesn't exist
value: "{{ local_tz }}"
- option: etc_localtime.stdout # e.g. 'America/New_York' direct from symlink /etc/localtime -- or '' if /etc/localtime doesn't exist
value: "{{ etc_localtime.stdout }}"
#- option: no_NM_reload
# value: "{{ no_NM_reload }}"
#- option: is_F18
# value: "{{ is_F18 }}"
- option: FQDN_changed
value: "{{ FQDN_changed }}"

View file

@ -0,0 +1,74 @@
- name: Do we have a gateway? If 'ip route' specifies a default route, Ansible parses details here...
debug:
var: ansible_default_ipv4
- name: "If above ansible_default_ipv4.gateway is defined, set WAN candidate 'discovered_wan_iface: {{ ansible_default_ipv4.alias }}' -- using ansible_default_ipv4.alias"
set_fact:
discovered_wan_iface: "{{ ansible_default_ipv4.alias }}"
when: ansible_default_ipv4.gateway is defined
- name: "Verify gateway active: ping -c4 {{ ansible_default_ipv4.gateway }} -- using ansible_default_ipv4.gateway"
shell: ping -c4 "{{ ansible_default_ipv4.gateway }}" | grep icmp_seq=4 | wc -l
register: gw_active_test
when: discovered_wan_iface != "none"
- name: "If gateway responded, set 'gw_active: True' and 'iiab_wan_iface: {{ discovered_wan_iface }}' -- using discovered_wan_iface"
set_fact:
iiab_wan_iface: "{{ discovered_wan_iface }}"
gw_active: True
when: discovered_wan_iface != "none" and gw_active_test.stdout == "1"
- name: 'Test for Internet access, using: {{ iiab_download_url }}/heart-beat.txt'
get_url:
url: "{{ iiab_download_url }}/heart-beat.txt"
dest: /tmp/heart-beat.txt
#timeout: "{{ download_timeout }}"
# @jvonau recommends: 100sec is too much (keep 10sec default)
ignore_errors: True
#async: 10
#poll: 2
register: internet_access_test
- name: "Set 'internet_available: True' if above download succeeded AND not disregard_network"
set_fact:
internet_available: True # Initialized to 'False' in 0-init/defaults/main.yml
when: not internet_access_test.failed and not disregard_network
- name: Remove downloaded Internet test file /tmp/heart-beat.txt
file:
path: /tmp/heart-beat.txt
state: absent
- name: "Set 'iiab_fqdn: {{ iiab_hostname }}.{{ iiab_domain }}'"
set_fact:
iiab_fqdn: "{{ iiab_hostname }}.{{ iiab_domain }}"
FQDN_changed: False
- name: Set hostname / domain (etc) in various places -- if iiab_fqdn != ansible_fqdn ({{ ansible_fqdn }})
include_tasks: hostname.yml
when: iiab_fqdn != ansible_fqdn
# 2021-07-30: FQDN_changed isn't used as in the past -- its remaining use is
# for {named, dhcpd, squid} in roles/network/tasks/main.yml -- possibly it
# should be reconsidered? See PR #2876: roles/network might become optional?
- name: "Also set 'FQDN_changed: True' -- if iiab_fqdn != ansible_fqdn ({{ ansible_fqdn }})"
set_fact:
FQDN_changed: True
when: iiab_fqdn != ansible_fqdn
# 2021-08-17: (1) iiab-gen-iptables works better if gui_port is set directly in
# default_vars.yml and/or local_vars.yml (2) Admin Console's iiab-admin.yml
# and js-menu.yml set 'adm_cons_force_ssl: False'
# - name: "Set 'gui_port: 80' for Admin Console if not adm_cons_force_ssl"
# set_fact:
# gui_port: 80
# when: not adm_cons_force_ssl
# - name: "Set 'gui_port: 443' for Admin Console if adm_cons_force_ssl"
# set_fact:
# gui_port: 443
# when: adm_cons_force_ssl

View file

@ -1,5 +1,3 @@
# 2022-12-30: Functionality moved to www_options/tasks/php-settings.yml
- name: "'local_tz: {{ local_tz }}' was set by ansible_date_time.tz in /opt/iiab/iiab/vars/default_vars.yml -- e.g. if Ansible finds symlink /etc/localtime -> ../usr/share/zoneinfo/America/New_York -- it will simplify that to 'EDT' (in the summer) or 'EST' (in the winter)"
command: echo

View file

@ -63,29 +63,38 @@
#
# 2020-11-04: Fix validation of 5 [now 4] core dependencies, for ./runrole etc
- name: Set vars_checklist for 45 + 45 + 40 vars ("XYZ_install" + "XYZ_enabled" + "XYZ_installed") to be checked
- name: Set vars_checklist for 44 + 44 + 40 vars ("XYZ_install" + "XYZ_enabled" + "XYZ_installed") to be checked
set_fact:
vars_checklist:
- hostapd
- dhcpd
- named
- dnsmasq
- bluetooth
#- wondershaper # Unmaintained
- sshd
#- openvpn # Deprecated
- tailscale
- openvpn
- remoteit
- admin_console
#- nginx # MANDATORY
#- apache # Unmaintained - former dependency
#- mysql # MANDATORY
- squid
#- dansguardian # Unmaintained
- cups
- samba
- usb_lib
#- xo_services # Unmaintained
#- activity_server # Unmaintained
#- ejabberd_xs # Unmaintained
#- idmgr # Unmaintained
- azuracast
#- dokuwiki # Unmaintained
#- ejabberd # Unmaintained
#- elgg # Unmaintained
- gitea
- jupyterhub
- lokole
- mysql # Dependency - excluded from _installed check below
- mediawiki
- mosquitto
- nodejs # Dependency - excluded from _installed check below
@ -102,7 +111,6 @@
- osm_vector_maps
- transmission
- awstats
- matomo
- monit
- munin
- phpmyadmin
@ -114,7 +122,6 @@
- calibreweb
- calibre
- pbx
- network
- name: Assert that {{ vars_checklist | length }} "XYZ_install" vars are all... defined
assert:
@ -156,41 +163,5 @@
that: "{{ item }}_install or {{ item }}_installed is undefined"
fail_msg: "DISALLOWED: '{{ item }}_install: False' (e.g. in /etc/iiab/local_vars.yml) WHEN '{{ item }}_installed' is defined (e.g. in /etc/iiab/iiab_state.yml) -- IIAB DOES NOT SUPPORT UNINSTALLS -- please verify those 2 files especially, and other places variables are defined?"
quiet: yes
when: item != 'mysql' and item != 'postgresql' and item != 'mongodb' and item != 'nodejs' and item != 'yarn' # Exclude auto-installed dependencies
when: item != 'nodejs' and item != 'postgresql' and item != 'mongodb' and item != 'yarn' # Exclude auto-installed dependencies
loop: "{{ vars_checklist }}"
- name: Set vars_deprecated_list for 4+ vars ("XYZ_install") to be checked
set_fact:
vars_deprecated_list:
- dhcpd # Deprecated
- named # Deprecated
- wondershaper # Deprecated
- dansguardian # Deprecated
#- xo_services # Unmaintained
#- activity_server # Unmaintained
#- ejabberd_xs # Unmaintained
#- idmgr # Unmaintained
#- dokuwiki # Unmaintained
#- ejabberd # Unmaintained
#- elgg # Unmaintained
- name: 'DISALLOW "XYZ_install: True" if deprecated'
assert:
that: "{{ item }}_install is undefined or not {{ item }}_install"
fail_msg: "DISALLOWED: '{{ item }}_install: True' (e.g. in /etc/iiab/local_vars.yml)"
quiet: yes
loop: "{{ vars_deprecated_list }}"
# 2023-12-04: ansible-core 2.16.1 suddenly no longer allows 'assert' with
# 'with_items' below (whereas 'loop' construct above works!) BACKGROUND:
#
# 'due to mitigation of security issue CVE-2023-5764 in ansible-core 2.16.1,
# conditional expressions with embedded template blocks can fail with the
# message “Conditional is marked as unsafe, and cannot be evaluated.”'
# https://docs.ansible.com/ansible-core/2.16/porting_guides/porting_guide_core_2.16.html#playbook
#
# with_items:
# - dhcpd # Deprecated
# - named # Deprecated
# - wondershaper # Deprecated
# - dansguardian # Deprecated

View file

@ -6,7 +6,7 @@ https://github.com/iiab/iiab/wiki/IIAB-Contributors-Guide#ansible[stage]
hardware, low-level OS quirks, and basic security:
* SSHD
* Tailscale if/as needed later for remote support
* OpenVPN if/as needed later for remote support
* https://github.com/iiab/iiab/tree/master/roles/iiab-admin#iiab-admin-readme[iiab-admin]
username and group, to log into Admin Console
* dnsmasq (install now, configure later!)
@ -14,16 +14,17 @@ username and group, to log into Admin Console
* Ubermix (distro) needs /etc/tmpfiles.d/iiab.conf to create essential
/var/log subdirs on each boot
* *_Hardware actions:_*
** link:tasks/install-expand-rootfs.yml[*_install-expand-rootfs.yml_*]:
*** Install https://en.wikipedia.org/wiki/APT_(software)[apt] packages parted (reveals last partition) and cloud-guest-utils (for growpart)
*** Install link:templates/iiab-expand-rootfs[/usr/sbin/iiab-expand-rootfs] that acts on flag flag `/.expand-rootfs`
*** Enable iiab-expand-rootfs.service so this can happen during any future boot-up
** link:tasks/raspberry_pi.yml[*_raspberry_pi.yml_*]:
*** RTC (real-time clock): install udev rule, configure, enable
*** Install apt packages fake-hwclock (as above RTC is often missing or dead!) and dphys-swapfile (for swap file below)
*** Increase swap file size (to `pi_swap_file_size`)
*** *_Install packages related to:_*
**** growpart
**** swapfile
**** fake-hwclock (as RTC is often missing or dead!)
**** Wi-Fi
*** Increase swap file size
*** https://github.com/iiab/iiab/blob/master/roles/1-prep/templates/iiab-rpi-max-rootfs.sh[rootfs
auto-resizing]
** NUC 6 Wi-Fi firmware
** Check for WiFi devices (if so, set `has_wifi_device`)
Recap: Similar to 0-init, 2-common, 3-base-server, 4 server-options and
5-xo-services — this 1st stage installs core server infra (that is not

View file

@ -1,37 +1,17 @@
- include_tasks: install-expand-rootfs.yml
# Conditional hardware actions below:
## DISCOVER PLATFORMS ######
# Put conditional actions for hardware platforms here
- include_tasks: raspberry_pi.yml
when: first_run and rpi_model != "none"
# 2024-02-09: Code below appears stale for Shanti's #3707 hardware
#- name: Check if the identifier for Intel's NUC6 built-in WiFi is present
# shell: "lsusb | grep 8087:0a2b | wc | awk '{print $1}'"
# register: usb_NUC6
# ignore_errors: True
#
#- name: Download {{ iiab_download_url }}/iwlwifi-8000C-13.ucode to /lib/firmware for built-in WiFi on NUC6
# get_url:
# url: "{{ iiab_download_url }}/Old/iwlwifi-8000C-13.ucode" # https://download.iiab.io/packages
# dest: /lib/firmware
# timeout: "{{ download_timeout }}"
# when: usb_NUC6.stdout|int > 0
- name: "Look for any WiFi devices present: ls -la /sys/class/net/*/phy80211 | cut -d/ -f5"
shell: ls -la /sys/class/net/*/phy80211 | cut -d/ -f5
register: wifi_devices
- name: Check if the identifier for Intel's NUC6 built-in WiFi is present
shell: "lsusb | grep 8087:0a2b | wc | awk '{print $1}'"
register: usb_NUC6
ignore_errors: True
changed_when: False
- name: "Set has_wifi_device: True, if output (from above) shows device(s) here: {{ wifi_devices.stdout_lines }}"
set_fact:
has_wifi_device: True
when: wifi_devices is defined and wifi_devices.stdout_lines | length > 0
# when: wifi_devices is defined and wifi_devices.stdout | trim != ""
- debug:
var: has_wifi_device
- name: Download {{ iiab_download_url }}/iwlwifi-8000C-13.ucode to /lib/firmware for built-in WiFi on NUC6 # iiab_download_url is http://download.iiab.io/packages
get_url:
url: "{{ iiab_download_url }}/iwlwifi-8000C-13.ucode"
dest: /lib/firmware
timeout: "{{ download_timeout }}"
when: usb_NUC6.stdout|int > 0

View file

@ -1,20 +0,0 @@
- name: Install packages 'parted' and 'cloud-guest-utils' (for /usr/bin/growpart, though raspi-config uses fdisk)
package:
name:
- parted # 2022-03-15: RasPiOS and Ubuntu install this regardless -- so rarely nec, but just in case.
- cloud-guest-utils # 2022-04-02: For growpart command -- whereas RasPiOS's 'raspi-config --expand-rootfs' instead uses fdisk (requiring a reboot, see do_expand_rootfs() in https://github.com/RPi-Distro/raspi-config/blob/master/raspi-config). FYI Ubuntu pre-installs cloud-guest-utils, for use with cloud-init.
state: present
- name: "Install from templates: /usr/sbin/iiab-expand-rootfs, /etc/systemd/system/iiab-expand-rootfs.service"
template:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
mode: "{{ item.mode }}"
with_items:
- { src: 'iiab-expand-rootfs', dest: '/usr/sbin/', mode: '0755' }
- { src: 'iiab-expand-rootfs.service', dest: '/etc/systemd/system/', mode: '0644' }
- name: Enable iiab-expand-rootfs.service
systemd:
name: iiab-expand-rootfs
enabled: yes

View file

@ -3,47 +3,33 @@
- name: ...IS BEGINNING ============================================
meta: noop
- name: SSHD
- name: SSHD -- required by OpenVPN below -- also run by roles/4-server-options/tasks/main.yml
include_role:
name: sshd
when: sshd_install
- name: TAILSCALE (VPN)
- name: OPENVPN
include_role:
name: tailscale
when: tailscale_install
name: openvpn
when: openvpn_install
- name: REMOTE.IT
include_role:
name: remoteit
when: remoteit_install
- name: IIAB-ADMIN -- includes {lynx, screen, sudo-prereqs.yml, admin-user.yml, pwd-warnings.yml}
- name: IIAB-ADMIN -- includes roles/iiab-admin/tasks/access.yml
include_role:
name: iiab-admin
#when: iiab_admin_install # Flag might be created in future?
- name: Copy iiab-apps-to-be-installed from {{ iiab_dir }}/scripts to /usr/bin/
copy:
src: "{{ iiab_dir }}/scripts/iiab-apps-to-be-installed" # /opt/iiab/iiab
dest: /usr/bin/
mode: '0755'
- name: Copy iiab-network from {{ iiab_dir }}/scripts to /usr/local/bin/
copy:
src: "{{ iiab_dir }}/scripts/iiab-network"
dest: /usr/local/bin/
mode: '0755'
- name: Install ~12 network/wifi/related packages + Squid if necessary + configure /etc/sysctl.conf -- full configuration LATER in 'network', after Stage 9
include_tasks: roles/network/tasks/install.yml
when: network_install and network_installed is undefined
- name: Install dnsmasq -- configure LATER in 'network', after Stage 9
include_tasks: roles/network/tasks/dnsmasq.yml
#when: dnsmasq_install # Flag might be used in future?
- include_tasks: uuid.yml
- include_tasks: ubermix.yml
- name: install-expand-rootfs.yml, raspberry_pi.yml, NUC6 WiFi firmware, check for WiFi devices
include_tasks: hardware.yml
- include_tasks: hardware.yml # Can run raspberry_pi.yml
# Debian 10 "Buster" is apparently enabling AppArmor in 2019:
@ -74,10 +60,7 @@
# when: not is_debuntu and selinux_disabled is defined and selinux_disabled.changed
- name: Install {{ iiab_env_file }} from template -- FYI this file can be run as a script if absolutely nec -- e.g. 'source /etc/iiab/iiab.env && echo $WWWROOT'
- name: Recording STAGE 1 HAS COMPLETED ============================
template:
src: roles/1-prep/templates/iiab.env.j2
dest: "{{ iiab_env_file }}"
- name: Recording STAGE 1 HAS COMPLETED ============================
meta: noop
dest: "{{ iiab_env_file }}" # Can also be run as a script if absolutely nec, e.g. 'source /etc/iiab/iiab.env && echo $WWWROOT'

View file

@ -4,6 +4,9 @@
template:
src: 92-rtc-i2c.rules
dest: /etc/udev/rules.d/92-rtc-i2c.rules
#owner: root
#group: root
#mode: 0644
when: rtc_id is defined and rtc_id != "none"
# RTC requires a change to the device tree (and reboot)
@ -21,14 +24,39 @@
state: present
when: rtc_id is defined and rtc_id != "none" and is_ubuntu # CLARIF: Ubuntu runs increasingly well on RPi hardware, starting in 2020 especially
#- name: Enable bluetooth in /boot/firmware/syscfg.txt on Ubuntu (needs reboot)
# lineinfile:
# path: /boot/firmware/syscfg.txt
# regexp: '^include*'
# line: 'include btcfg.txt'
# when: is_ubuntu
- name: 'Install packages: fake-hwclock, dphys-swapfile'
- name: '2021-07-27: SEE ALSO ~4 networking packages LATER installed by https://github.com/iiab/iiab/blob/master/roles/2-common/tasks/packages.yml'
meta: noop
- name: '2021-07-27: SEE ALSO 4-5 networking packages LATER installed by https://github.com/iiab/iiab/blob/master/roles/2-common/tasks/network.yml'
meta: noop
# 2021-07-27 explanation from @jvonau: The 3 BELOW (iw, rfkill, wireless-tools)
# are provided by RaspiOS. Ubuntu|Debian on the other hand are hit or miss:
# desktops might have some/all 3 preinstalled, while servers tend not to have
# these present at all, but are needed to be installed if you want to take full
# advantage of WiFi on Ubuntu and friends -- but it's only enforced on RPi
# hardware where we know in advance of the likelihood of WiFi being present.
- name: 'Install packages: cloud-guest-utils, dphys-swapfile, fake-hwclock, iw, rfkill, wireless-tools'
package:
name:
- fake-hwclock # 2021-03-15: Missing on Ubuntu etc. RasPiOS installs this regardless -- to save/restore system clock on machines w/o working RTC (above).
- dphys-swapfile # 2021-03-15: Missing on Ubuntu etc. RasPiOS installs this regardless -- to autogenerate and use a swap file (below).
- cloud-guest-utils # Contains 'growpart' for resizing a partition during boot, which is normally done with the aid of cloud-init
- dphys-swapfile # 2021-07-27: RaspiOS installs this regardless -- autogenerate and use a swap file
- fake-hwclock # 2021-07-27: RaspiOS installs this regardless -- save/restore system clock on machines without working RTC hardware
- iw # 2021-07-27: RaspiOS installs this regardless -- configure Linux wireless devices -- hard dependence for ap0 creation, SEE https://github.com/iiab/iiab/blob/master/roles/network/templates/hostapd/iiab-clone-wifi.service.j2
- rfkill # 2021-07-27: RaspiOS installs this regardless -- enable & disable wireless devices
- wireless-tools # 2021-07-27: RaspiOS installs this regardless -- manipulate Linux Wireless Extensions
state: present
- name: Increase swap file size (to CONF_SWAPSIZE={{ pi_swap_file_size }} in /etc/dphys-swapfile) as kalite pip download fails
lineinfile:
path: /etc/dphys-swapfile
@ -42,9 +70,18 @@
state: restarted
#- name: Enable bluetooth in /boot/firmware/syscfg.txt on Ubuntu (needs reboot)
# lineinfile:
# path: /boot/firmware/syscfg.txt
# regexp: '^include*'
# line: 'include btcfg.txt'
# when: is_ubuntu
- name: Install RPi rootfs resizing (/usr/sbin/iiab-rpi-max-rootfs.sh) and its systemd service (/etc/systemd/system/iiab-rpi-root-resize.service), from templates (root:root by default)
template:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
#owner: root
#group: root
mode: "{{ item.mode }}"
with_items:
- { src: 'iiab-rpi-max-rootfs.sh', dest: '/usr/sbin/', mode: '0755' }
- { src: 'iiab-rpi-root-resize.service', dest: '/etc/systemd/system/', mode: '0644' }
- name: Enable RPi rootfs resizing (systemd service iiab-rpi-root-resize.service)
systemd:
name: iiab-rpi-root-resize
enabled: yes

View file

@ -1,72 +0,0 @@
#!/bin/bash -xe
# Expand rootfs partition to its maximum size, if /.expand-rootfs exists.
# Used by /etc/systemd/system/iiab-expand-rootfs.service on IIAB boot.
# Should work with all Linux OS's boot disks -- regardless whether Raspberry Pi
# microSD cards, external USB drives, internal spinning disks or SSD's, etc.
# Verifies that rootfs is the last partition.
# RELATED:
# 1. https://github.com/iiab/iiab-factory/blob/master/box/rpi/min-sd
# 2. https://github.com/iiab/iiab-factory/blob/master/box/rpi/cp-sd
# 3. https://github.com/iiab/iiab-factory/blob/master/box/rpi/xz-json-sd
# OR https://github.com/iiab/iiab-factory/blob/master/box/rpi/exp-sd
if [ -f /.expand-rootfs ] || [ -f /.resize-rootfs ]; then
echo "$0: Expanding rootfs partition"
if [ -x /usr/bin/raspi-config ]; then # Raspberry Pi OS -- WARNING: their fdisk-centric approach of course FAILS with "Hybrid MBR" or GPT partition tables, as required by any drive > 2TB :/
# 2022-02-17: Uses do_expand_rootfs() from:
# https://github.com/RPi-Distro/raspi-config/blob/master/raspi-config
# 2023-10-05: Official new RPi instructions:
# sudo raspi-config nonint do_expand_rootfs
# https://www.raspberrypi.com/documentation/computers/configuration.html#expand-filesystem-nonint
raspi-config --expand-rootfs # REQUIRES A REBOOT
rm -f /.expand-rootfs /.resize-rootfs
reboot # In future, we might warn interactive users that a reboot is coming?
else # REQUIRES NO REBOOT; BEWARE iiab-expand-rootfs.service RACE CONDITION WITH fsck (PR #2522 & #3325)
# 2022-03-15: Borrows from above raspi-config URL's do_expand_rootfs()
ROOT_PART="$(findmnt / -o SOURCE -n)" # e.g. /dev/sda2 or /dev/mmcblk0p2
ROOT_DEV="/dev/$(lsblk -no pkname "$ROOT_PART")" # e.g. /dev/sda or /dev/mmcblk0
ROOT_PART_NUM="$(echo "$ROOT_PART" | grep -o "[[:digit:]]*$")" # e.g. 2
# SLOW (~10 seconds) but it works!
LAST_PART_NUM=$(parted "$ROOT_DEV" -ms unit s p | tail -n 1 | cut -f 1 -d:)
if [ $ROOT_PART_NUM -ne $LAST_PART_NUM ]; then
echo "ERROR: $ROOT_PART partition ($ROOT_PART_NUM) is not the last partition ($LAST_PART_NUM). Don't know how to expand."
exit 1
fi
# Expand partition
growpart $ROOT_DEV $ROOT_PART_NUM || true # raspi-config instead uses fdisk (assuming MBR). They really should transition to gdisk, as required by any drive > 2TB. WARNING: growpart RC 2 is more severe than RC 1, and should possibly be handled separately in future?
rc=$? # Make Return Code visible, for 'bash -x'
resize2fs $ROOT_PART
rc=$? # Make RC visible (as above)
# 2022-03-15: Legacy code below worked with Raspberry Pi microSD cards
# but *not* with USB boot drives, internal spinning disks/SSD's, etc.
# # ASSUMES SD CARD STYLE PARTITION NAME LIKE <device>p<partition number>
# # e.g. /dev/mmcblk0p2 mounts at / (typical RasPiOS microSD)
# # BUT /dev/sda2 mounts at /media/usb1 (RasPiOS USB boot disk...
# # ...WON'T WORK BELOW; recap @ PR #3121)
# # Calculate root partition
# root_part=`lsblk -aP -o NAME,MOUNTPOINT | grep 'MOUNTPOINT="/"' | awk -F\" '{ print $2 }'` # e.g. mmcblk0p2
# root_dev=${root_part:0:-2} # e.g. mmcblk0
# # bash substring expansion: "negative offset [below, but not above]
# # must be separated from the colon by at least one space to avoid
# # being confused with the :- expansion"
# # https://www.gnu.org/software/bash/manual/html_node/Shell-Parameter-Expansion.html
# root_part_no=${root_part: -1} # e.g. 2
# # Resize partition
# growpart /dev/$root_dev $root_part_no
# resize2fs /dev/$root_part
rm -f /.expand-rootfs /.resize-rootfs
fi
fi

View file

@ -1,24 +0,0 @@
[Unit]
Description=Root Filesystem Auto-Expander
DefaultDependencies=no
# 2022-08-08: IIAB's 4 core OS's have 'After=systemd-fsck-root.service' WITHIN
# systemd-remount-fs.service, allowing us to avoid #3325 race condition w/ fsck
After=systemd-remount-fs.service
# 2022-08-08: While dphys-swapfile.service doesn't exist on Ubuntu, Mint
# and pure Debian, the following line may still serve a purpose on RasPiOS:
Before=dphys-swapfile.service
[Service]
Environment=TERM=linux
Type=oneshot
ExecStart=/usr/sbin/iiab-expand-rootfs
# 2022-08-08: By default, systemd dangerously kills rootfs expansion after just
# 90s (1TB microSD cards take ~8 min to expand). Let's remove the time limit:
TimeoutSec=infinity
# "Standard output type syslog is obsolete"
# StandardError=syslog
# WHEREAS StandardError=journal is the default, per https://www.freedesktop.org/software/systemd/man/systemd.exec.html#StandardOutput=
RemainAfterExit=yes
[Install]
WantedBy=local-fs.target

View file

@ -0,0 +1,20 @@
#!/bin/bash -x
# Resize rootfs and its partition on the rpi SD card to maximum size
# To be used by systemd service on boot
# Only resizes if /.resize-rootfs exists
# Assumes root is last partition
# Only works on F22 + where resizepart command exists
# Assumes sd card style partition name like <device>p<partition number>
if [ -f /.resize-rootfs ];then
echo "$0: maximizing rootfs partion"
# Calculate root partition
root_part=`lsblk -aP -o NAME,MOUNTPOINT|grep 'MOUNTPOINT="/"' |awk -F\" '{ print $2 }'`
root_dev=${root_part:0:-2}
root_part_no=${root_part: (-1)}
# Resize partition
growpart /dev/$root_dev $root_part_no
resize2fs /dev/$root_part
rm /.resize-rootfs
fi

View file

@ -0,0 +1,12 @@
[Unit]
Description=Root Filesystem Auto-Resizer
[Service]
Environment=TERM=linux
Type=oneshot
ExecStart=/usr/sbin/iiab-rpi-max-rootfs.sh
StandardError=syslog
RemainAfterExit=no
[Install]
WantedBy=multi-user.target

View file

@ -9,8 +9,10 @@ https://internet-in-a-box.org/[Internet-in-a-Box (IIAB)] server.
These are (partially) put in place:
* IIAB directory structure (link:tasks/fl.yml[file layout])
* Common https://en.wikipedia.org/wiki/APT_(software)[apt] software packages
* Networking apt packages (including many WiFi tools, and also iptables-persistent for the https://en.wikipedia.org/wiki/Iptables[iptables] firewall)
* Common https://en.wikipedia.org/wiki/APT_(software)[apt] software
packages
* Networking (including the
https://en.wikipedia.org/wiki/Iptables[iptables] firewall)
* link:tasks/iiab-startup.yml[/usr/libexec/iiab-startup.sh] similar to
AUTOEXEC.BAT and /etc/rc.local, in order to run jobs on boot

View file

@ -1,6 +1,6 @@
# fl.yml signifies "file layout"
- name: "File Layout - Create directories: 1 in {{ py3_dist_path }}, 2 in {{ iiab_base }}, 17 in {{ content_base }}" # iiab_base: /opt/iiab
- name: "File Layout - Create directories: 1 in /etc, 1 in {{ py3_dist_path }}, 3 in {{ iiab_base }}, 17 in {{ content_base }}" # iiab_base: /opt/iiab
file:
path: "{{ item }}"
# owner: root
@ -8,11 +8,11 @@
# mode: '0755'
state: directory
with_items:
#- /etc/sysconfig/olpc-scripts/setup.d/installed/
- /etc/sysconfig/olpc-scripts/setup.d/installed/
- "{{ py3_dist_path }}/iiab" # /usr/lib/python3/dist-packages
#- "{{ yum_packages_dir }}" # /opt/iiab/yum-packages
- "{{ yum_packages_dir }}" # /opt/iiab/yum-packages
- "{{ pip_packages_dir }}" # /opt/iiab/pip-packages
- "{{ downloads_dir }}" # /opt/iiab/downloads
- "{{ downloads_dir }}" # /opt/iiab/downloads -- generally already done by Stage 1's roles/remoteit/tasks/install.yml
#- "{{ content_base }}/downloads" # /library/downloads auto-created just below
- "{{ content_base }}/downloads/zims"
- "{{ content_base }}/downloads/maps"

View file

@ -8,14 +8,8 @@
- include_tasks: packages.yml
- name: "Use 'sysctl' to set 'kernel.core_uses_pid: 1' in /etc/sysctl.conf"
sysctl: # Places these settings in /etc/sysctl.conf, to survive reboot
name: "{{ item.name }}"
value: "{{ item.value }}"
with_items:
#- { name: 'kernel.sysrq', value: '1' } # OS values differ, Ok?
- { name: 'kernel.core_uses_pid', value: '1' }
#- { name: 'kernel.shmmax', value: '268435456' } # OS values differ, Ok?
- name: "Network prep, including partial setup of iptables (firewall) -- SEE ALSO: 1-prep/tasks/raspberry_pi.yml"
include_tasks: network.yml
- include_tasks: iiab-startup.yml

View file

@ -0,0 +1,45 @@
- name: '2021-07-27: SEE ALSO ~3 networking packages EARLIER installed by https://github.com/iiab/iiab/blob/master/roles/1-prep/tasks/raspberry_pi.yml'
meta: noop
- name: '2021-07-27: SEE ALSO ~4 networking packages EARLIER installed by https://github.com/iiab/iiab/blob/master/roles/2-common/tasks/packages.yml'
meta: noop
- name: Install package networkd-dispatcher (OS's other than RaspiOS)
package:
name: networkd-dispatcher # Dispatcher service for systemd-networkd connection status changes
state: present
when: not is_raspbian
- name: 'Install network packages: hostapd, iproute2, iptables-persistent, netmask -- later used by https://github.com/iiab/iiab/tree/master/roles/network'
package:
name:
- hostapd # IEEE 802.11 AP and IEEE 802.1X/WPA/WPA2/EAP Authenticator -- has its service masked out of the box, and only used when IIAB's network roles detects the presence of WiFi and an AP is desired
- iproute2 # 2021-07-27: RaspiOS installs this regardless -- the new networking and traffic control tools, meant to replace net-tools
- iptables-persistent # Boot-time loader for netfilter rules, iptables (firewall) plugin -- however Netfilter / nftables is ever moving forward so keep an eye on it!
- netmask # Handy utility -- helps determine network masks
state: present
# 2021-08-17: Debian ignores this, according to 2013 post:
# https://serverfault.com/questions/511099/debian-ignores-etc-network-if-pre-up-d-iptables
# - name: Install /etc/network/if-pre-up.d/iptables from template (0755)
# template:
# src: iptables
# dest: /etc/network/if-pre-up.d/iptables
# mode: '0755'
# Ongoing rework (e.g. PR #2652) arising from ansible.posix collection changes:
- name: "Use 'sysctl' to set 'kernel.core_uses_pid: 1' + 4 network settings in /etc/sysctl.conf -- e.g. disabling IPv6 (this might be overkill, as IPv6 should really only be disabled on the LAN side, i.e. br0)"
sysctl: # Places these settings in /etc/sysctl.conf, to survive reboot
name: "{{ item.name }}"
value: "{{ item.value }}"
with_items:
- { name: 'net.ipv4.ip_forward', value: '1' } # Masquerading LAN->Internet
- { name: 'net.ipv4.conf.default.rp_filter', value: '1' }
- { name: 'net.ipv4.conf.default.accept_source_route', value: '0' }
#- { name: 'kernel.sysrq', value: '1' } # OS values differ, Ok?
- { name: 'kernel.core_uses_pid', value: '1' }
#- { name: 'net.ipv4.tcp_syncookies', value: '1' } # Very standard in 2020
#- { name: 'kernel.shmmax', value: '268435456' } # OS values differ, Ok?
- { name: 'net.ipv6.conf.all.disable_ipv6', value: '1' } # IPv6 disabled
#- { name: 'net.ipv6.conf.default.disable_ipv6', value: '1' } # AUTO-SET
#- { name: 'net.ipv6.conf.lo.disable_ipv6', value: '1' } # BY ABOVE

View file

@ -1,39 +1,47 @@
# 2022-03-16: 'apt show <pkg> | grep Size' revealed download sizes, on 64-bit RasPiOS with desktop.
- name: '2021-07-27: SEE ALSO ~3 networking packages EARLIER installed by https://github.com/iiab/iiab/blob/master/roles/1-prep/tasks/raspberry_pi.yml'
meta: noop
- name: "Install 19 common packages: acpid, bzip2, cron, curl, gawk, gpg, htop, i2c-tools, logrotate, lshw, pandoc, pastebinit, plocate, rsync, sqlite3, tar, unzip, usbutils, wget"
- name: '2021-07-27: SEE ALSO 4-5 networking packages LATER installed by https://github.com/iiab/iiab/blob/master/roles/2-common/tasks/network.yml'
meta: noop
- name: "Install 20 common packages: acpid, avahi-daemon, bzip2, curl, gawk, htop, i2c-tools, libnss-mdns, logrotate, mlocate, net-tools, pandoc, pastebinit, rsync, sqlite3, tar, unzip, usbutils, wget, wpasupplicant"
package:
name:
- acpid # 55kB download: Daemon for ACPI (power mgmt) events
- bzip2 # 47kB download: RasPiOS installs this regardless -- 2021-04-26: Prob not used, but can't hurt?
- cron # 98kB download: RasPiOS installs this regardless -- 2022-10-13: Debian 12 needs this added (for now?)
- curl # 254kB download: RasPiOS installs this regardless -- Used to install roles/nodejs and roles/nodered
#- etckeeper # 54kB download: "nobody is really using etckeeper and it's bloating the filesystem every time apt runs" per @jvonau at https://github.com/iiab/iiab/issues/1146
#- exfat-fuse # 28kB download: 2021-07-27: Should no longer be nec with 5.4+ kernels, so let's try commenting it out
#- exfat-utils # 41kB download: Ditto! See also 'ntfs-3g' below
- gawk # 533kB download
- gpg # 884kB download: Debian 12+ (especially!) require this for apt installs of gitea, kolibri, mongodb, yarn
- htop # 109kB download: RasPiOS installs this regardless
- i2c-tools # 78kB download: Low-level bus/chip/register/EEPROM tools e.g. for RTC
- logrotate # 67kB download: RasPiOS installs this regardless
- lshw # 257kB download: For 'lshw -C network' in iiab-diagnostics
#- lynx # 505kB download: Installed by 1-prep's roles/iiab-admin/tasks/main.yml
#- make # 376kB download: 2021-07-27: Currently used by roles/pbx and no other roles
#- ntfs-3g # 379kB download: RasPiOS installs this regardless -- 2021-07-31: But this should no longer be nec with 5.4+ kernels, similar to exfat packages above -- however, see also this symlink warning: https://superuser.com/questions/1050544/mount-with-kernel-ntfs-and-not-ntfs-3g -- and upcoming kernel 5.15 improvements: https://www.phoronix.com/scan.php?page=news_item&px=New-NTFS-Likely-For-Linux-5.15
#- openssh-server # 318kB download: RasPiOS installs this regardless -- this is also installed by 1-prep's roles/sshd/tasks/main.yml to cover all OS's
- pandoc # 19kB download: For /usr/bin/iiab-refresh-wiki-docs
- pastebinit # 47kB download: For /usr/bin/iiab-diagnostics
#- mlocate # 92kB download
- plocate # 97kB download: Faster & smaller than locate & mlocate
#- python3-pip # 337kB download: 2023-03-22: Used to be installed by /opt/iiab/iiab/scripts/ansible -- which would auto-install 'python3-setuptools' and 'python3' etc
#- python3-venv # 1188kB download: 2023-03-22: Already installed by /opt/iiab/iiab/scripts/ansible -- used by roles like {calibre-web, jupyterhub, lokole} -- whereas roles/kalite uses (virtual) package 'virtualenv' for Python 2 -- all these 3+1 IIAB roles install 'python3-venv' for themselves. FYI: Debian 11 no longer auto-installs 'python3-venv' when you install 'python3'
- rsync # 351kB download: RasPiOS installs this regardless
#- screen # 551kB download: Installed by 1-prep's roles/iiab-admin/tasks/main.yml
- sqlite3 # 1054kB download
- tar # 799kB download: RasPiOS installs this regardless
- unzip # 151kB download: RasPiOS installs this regardless
#- usbmount # 18kB download: Moved to roles/usb_lib/tasks/install.yml
- usbutils # 67kB download: RasPiOS installs this regardless -- 2021-07-27: move to roles/usb_lib/tasks/install.yml ?
- wget # 922kB download: RasPiOS installs this regardless
- acpid # Daemon for ACPI (power mgmt) events
- avahi-daemon # 2021-07-27: RaspiOS (and package libnss-mnds, below) install this regardless -- holdover from the XO days and used to advertise ssh/admin-console being available via avahi-daemon -- used with https://github.com/iiab/iiab/blob/master/roles/network/tasks/avahi.yml
#- avahi-discover # 2021-07-27: Commented out long ago
- bzip2 # 2021-04-26: Prob not used, but can't hurt?
- curl # Used to install roles/nodejs and roles/nodered
#- etckeeper # "nobody is really using etckeeper and it's bloating the filesystem every time apt runs" per @jvonau at https://github.com/iiab/iiab/issues/1146
#- exfat-fuse # 2021-07-27: Should no longer be nec with 5.4+ kernels, so let's try commenting it out
#- exfat-utils # Ditto! See also 'ntfs-3g' below
- gawk
- htop
- i2c-tools # Low-level bus/chip/register/EEPROM tools e.g. for RTC
#- inetutils-syslogd # 2021-07-27: Error logging facility -- holdover from the XO days, journalctl has replaced this in newer distros
#- iproute2 # Installed by roles/2-common/tasks/network.yml
- logrotate
- libnss-mdns # 2021-07-27: RaspiOS (and package avahi-daemon, above) install this regardless -- client-side library -- provides name resolution via mDNS (Multicast DNS) using Zeroconf/Bonjour e.g. Avahi
#- lynx # Installed by 1-prep's roles/iiab-admin/tasks/access.yml
#- make # 2021-07-27: Currently used by roles/pbx and no other roles
- mlocate
- net-tools # 2021-04-26: @jvonau suggests possibly deleting this...unless oldtimers really want these older commands in iiab-diagnostics output?
#- ntfs-3g # 2021-07-31: RaspiOS installs this regardless -- but this should no longer be nec with 5.4+ kernels, similar to exfat packages above -- however, see also this symlink warning: https://superuser.com/questions/1050544/mount-with-kernel-ntfs-and-not-ntfs-3g -- and upcoming kernel 5.15 improvements: https://www.phoronix.com/scan.php?page=news_item&px=New-NTFS-Likely-For-Linux-5.15
#- openssh-server # ssh (Raspbian) or openssh-server (other OS's) already installed by 1-prep's roles/sshd/tasks/main.yml
- pandoc # For /usr/bin/iiab-refresh-wiki-docs
- pastebinit # For /usr/bin/iiab-diagnostics
#- python3-pip # 2021-07-29: Already installed by /opt/iiab/iiab/scripts/ansible -- this auto-installs 'python3-setuptools' and 'python3' etc
#- python3-venv # 2021-07-30: For Ansible module 'pip' used in roles like {calibre-web, jupyterhub, lokole} -- whereas roles/kalite uses (virtual) package 'virtualenv' for Python 2 -- all these 3+1 IIAB roles install 'python3-venv' for themselves. FYI: Debian 11 auto-installs 'python3-venv' when you install 'python3' -- whereas Ubuntu (e.g. 20.04 & 21.10) and RaspiOS 10 do not.
- rsync
#- screen # Installed by 1-prep's roles/iiab-admin/tasks/access.yml
- sqlite3
#- sudo # (1) Should be installed prior to installing IIAB, (2) Can also be installed by roles/1-prep's roles/openvpn/tasks/install.yml, (3) Is definitely installed by 1-prep's roles/iiab-admin/tasks/sudo-prereqs.yml
- tar
- unzip
#- usbmount # Moved to roles/usb_lib/tasks/install.yml
- usbutils # 2021-07-27: RaspiOS installs this regardless -- move to roles/usb_lib/tasks/install.yml ?
- wget
- wpasupplicant # 2021-07-27: RaspiOS installs this regardless -- client library for connections to a WiFi AP
state: present
#- name: "Install 10 yum/dnf packages: avahi, avahi-tools, createrepo, linux-firmware, nss-mdns, openssl, syslog, wpa_supplicant, xml-common, yum-utils (redhat)"

View file

@ -1,21 +1,10 @@
.. |ss| raw:: html
<strike>
.. |se| raw:: html
</strike>
.. |nbsp| unicode:: 0xA0
:trim:
====================
3-base-server README
====================
This 3rd `stage <https://github.com/iiab/iiab/wiki/IIAB-Contributors-Guide#ansible>`_ installs base server infra that `Internet-in-a-Box (IIAB) <https://internet-in-a-box.org/>`_ requires, including:
- |ss| `MySQL <https://github.com/iiab/iiab/blob/master/roles/mysql>`_ (database underlying many/most user-facing apps). |se| |nbsp| *As of 2023-11-05, MySQL / MariaDB is NO LONGER INSTALLED by 3-base-server — instead it's installed on-demand — as a dependency of Matomo, MediaWiki, Nextcloud, PBX (for FreePBX), WordPress &/or Admin Console.* This IIAB role (roles/mysql) also installs apt package:
- `MySQL <https://github.com/iiab/iiab/blob/master/roles/mysql>`_ (database underlying many/most user-facing apps). This IIAB role also installs apt package:
- **php{{ php_version }}-mysql** — which forcibly installs **php{{ php_version }}-common**
- `NGINX <https://github.com/iiab/iiab/blob/master/roles/nginx>`_ web server (with Apache in some lingering cases). This IIAB role also installs apt package:
- **php{{ php_version }}-fpm** — which forcibly installs **php{{ php_version }}-cli**, **php{{ php_version }}-common** and **libsodium23**

View file

@ -3,13 +3,10 @@
- name: ...IS BEGINNING =====================================
meta: noop
# 2023-11-05: MySQL (actually MariaDB) had been mandatory, installed on every
# IIAB by 3-base-server. Now installed on demand -- as a dependency of Matomo,
# MediaWiki, Nextcloud, PBX (for FreePBX), WordPress &/or Admin Console.
# - name: MYSQL + CORE PHP
# include_role:
# name: mysql
# #when: mysql_install
- name: MYSQL + CORE PHP
include_role:
name: mysql
#when: mysql_install
# 2021-05-21: Apache role 'httpd' is installed as nec by any of these 6 roles:
#

View file

@ -2,7 +2,7 @@
4-server-options README
=======================
Whereas 3-base-server installs critical packages needed by all, this 4th `stage <https://github.com/iiab/iiab/wiki/IIAB-Contributors-Guide#ansible>`_ installs a broad array of *options* ⁠— depending on which server apps will be installed in later stages ⁠— as specified in `/etc/iiab/local_vars.yml <http://FAQ.IIAB.IO#What_is_local_vars.yml_and_how_do_I_customize_it%3F>`_
Whereas 3-base-server installs critical packages needed by all, this 4th `stage <https://github.com/iiab/iiab/wiki/IIAB-Contributors-Guide#ansible>`_ installs a broad array of *options* ⁠— depending on which server apps will be installed in later stages ⁠— as specified in `/etc/iiab/local_vars.yml <http://FAQ.IIAB.IO#What_is_local_vars.yml_and_how_do_I_customize_it.3F>`_
This includes more networking fundamentals, that may further be configured later on.
@ -11,7 +11,7 @@ Specifically, these might be installed:
- Python libraries
- SSH daemon
- Bluetooth for Raspberry Pi
- Instant-sharing of `USB stick content <https://wiki.iiab.io/go/FAQ#Can_teachers_display_their_own_content%3F>`_
- Instant-sharing of `USB stick content <https://wiki.iiab.io/go/FAQ#Can_teachers_display_their_own_content.3F>`_
- CUPS Printing
- Samba for Windows filesystems
- `www_options <https://github.com/iiab/iiab/blob/master/roles/www_options/tasks/main.yml>`_

View file

@ -19,6 +19,28 @@
#when: pylibs_installed is undefined
#when: pylibs_install # Flag might be created in future?
- name: SSHD -- also run by roles/1-prep/tasks/main.yml as required by OpenVPN
include_role:
name: sshd
when: sshd_install
# UNMAINTAINED
- name: Install named / BIND
include_tasks: roles/network/tasks/named.yml
when: named_install is defined and named_install
# UNMAINTAINED
- name: Install dhcpd
include_tasks: roles/network/tasks/dhcpd.yml
when: dhcpd_install is defined and dhcpd_install
# LESS MAINTAINED
- name: Install Squid
include_tasks: roles/network/tasks/squid.yml
when: squid_install and squid_installed is undefined
- name: Install Bluetooth - only on Raspberry Pi
include_role:
name: bluetooth

View file

@ -3,6 +3,11 @@
- name: ...IS BEGINNING ====================================
meta: noop
- name: AZURACAST
include_role:
name: azuracast
when: azuracast_install is defined and azuracast_install
# UNMAINTAINED
- name: DOKUWIKI
include_role:
@ -31,11 +36,10 @@
name: jupyterhub
when: jupyterhub_install
# UNMAINTAINED
- name: LOKOLE
include_role:
name: lokole
when: lokole_install is defined and lokole_install
when: lokole_install
- name: MEDIAWIKI
include_role:

View file

@ -6,13 +6,12 @@
- name: KALITE
include_role:
name: kalite
when: kalite_install and (is_ubuntu_2204 or is_ubuntu_2310 or is_debian_12) # Also covers is_linuxmint_21 and is_raspbian_12
when: kalite_install
- name: KOLIBRI
include_role:
name: kolibri
when: kolibri_install
#when: kolibri_install and python_version is version('3.12', '<') # Debian 13 still uses Python 3.11 (for now!) so really this just avoids Ubuntu 24.04 and 24.10 pre-releases during initial iiab-install. CLARIF: This is all TEMPORARY until learningequality/kolibri#11316 brings Python 3.12 support to Kolibri 0.17 pre-releases (expected very soon).
- name: KIWIX
include_role:
@ -41,23 +40,10 @@
name: pathagar
when: pathagar_install is defined and pathagar_install
# WARNING: Since March 2023, 32-bit RasPiOS can act as 64-bit on RPi 4 and
# RPi 400 (unlike RPi 3!) SEE: https://github.com/iiab/iiab/pull/3422 and #3516
- name: Run command 'dpkg --print-architecture' to identify OS architecture (CPU arch as revealed by ansible_architecture ~= ansible_machine is NO LONGER enough!)
command: dpkg --print-architecture
register: dpkg_arch
when: sugarizer_install
- name: Explain bypassing of Sugarizer install if 32-bit OS
fail: # FORCE IT RED THIS ONCE!
msg: "BYPASSING SUGARIZER INSTALL ATTEMPT, as Sugarizer Server 1.5.0 requires MongoDB 3.2+ which is NO LONGER SUPPORTED on 32-bit Raspberry Pi OS. 'dpkg --print-architecture' output for your OS: {{ dpkg_arch.stdout }}"
when: sugarizer_install and not dpkg_arch.stdout is search("64")
ignore_errors: True
- name: SUGARIZER
include_role:
name: sugarizer
when: sugarizer_install and dpkg_arch.stdout is search("64")
when: sugarizer_install
- name: Recording STAGE 7 HAS COMPLETED ========================
lineinfile:

View file

@ -6,23 +6,23 @@
- name: TRANSMISSION
include_role:
name: transmission
when: transmission_install and not (is_ubuntu_2404 or is_ubuntu_2410 or is_ubuntu_2504) # Also excludes is_linuxmint_22, for #3756 (whereas Debian 13 works great!)
when: transmission_install
- name: AWSTATS
include_role:
name: awstats
when: awstats_install
- name: MATOMO
include_role:
name: matomo
when: matomo_install
- name: MONIT
include_role:
name: monit
when: monit_install
- name: MUNIN
include_role:
name: munin
when: munin_install
- name: PHPMYADMIN
include_role:
name: phpmyadmin

View file

@ -3,34 +3,16 @@
- name: ...IS BEGINNING ====================================
meta: noop
- name: AZURACAST
include_role:
name: azuracast
when: azuracast_install
# Porting to Python 3 is complete: does this belong elsewhere?
# Is porting to Python 3 complete, and if so does this belong elsewhere?
- name: CAPTIVE PORTAL
include_role:
name: captiveportal
when: captiveportal_install
# WARNING: Since March 2023, 32-bit RasPiOS can act as 64-bit on RPi 4 and
# RPi 400 (unlike RPi 3!) SEE: https://github.com/iiab/iiab/pull/3516
- name: Run command 'dpkg --print-architecture' to identify OS architecture (CPU arch as revealed by ansible_architecture ~= ansible_machine is NO LONGER enough!)
command: dpkg --print-architecture
register: dpkg_arch
when: internetarchive_install
- name: Explain bypassing of Internet Archive install if 32-bit OS
fail: # FORCE IT RED THIS ONCE!
msg: "BYPASSING INTERNET ARCHIVE PER https://github.com/iiab/iiab/issues/3641 -- 'dpkg --print-architecture' output for your OS: {{ dpkg_arch.stdout }}"
when: internetarchive_install and not dpkg_arch.stdout is search("64")
ignore_errors: True
- name: INTERNETARCHIVE
include_role:
name: internetarchive
when: internetarchive_install and dpkg_arch.stdout is search("64")
when: internetarchive_install
- name: MINETEST
include_role:
@ -55,46 +37,12 @@
name: pbx
when: pbx_install
- name: '2023-11-05 / TEMPORARY UNTIL ADMIN CONSOLE DECLARES ITS DEPENDENCY: Install MySQL (MariaDB) if admin_console_install (for setup-feedback and record_feedback.php)'
set_fact:
mysql_install: True
mysql_enabled: True
- name: "2021-06-27 TEMPORARY CODE TO INSTALL 'php-pear' UNTIL ADMIN CONSOLE DECLARES ITS OWN DEPENDENCY FOR: https://github.com/iiab/iiab-admin-console/blob/master/roles/cmdsrv/tasks/main.yml#L19"
package:
name: php-pear # WARNING: this also drags in 'php{{ php_version }}-xml' (also installed by MediaWiki, Nextcloud, roles/pbx's FreePBX, WordPress) AND 'php{{ php_version }}-cgi' (also installed by roles/pbx's FreePBX)
state: present
when: admin_console_install
- name: '2023-11-05 / TEMPORARY UNTIL ADMIN CONSOLE DECLARES ITS DEPENDENCY: Install MySQL (MariaDB) if admin_console_install (for setup-feedback and record_feedback.php)'
include_role:
name: mysql
when: admin_console_install
- name: '2023-11-05 / TEMPORARY UNTIL ADMIN CONSOLE DECLARES ITS DEPENDENCY: Install MySQL (MariaDB) if admin_console_install (for setup-feedback and record_feedback.php)'
fail:
msg: "Admin Console install cannot proceed, as MySQL / MariaDB is not installed."
when: admin_console_install and mysql_installed is undefined
# 2023-11-05: Moved from Stage 8, as it acts on mysql_installed (that might be set just above!)
- name: MUNIN
include_role:
name: munin
when: munin_install
- name: Read 'disk_used_a_priori' from /etc/iiab/iiab.ini
set_fact:
df1: "{{ lookup('ansible.builtin.ini', 'disk_used_a_priori', section='summary', file=iiab_ini_file) }}"
- name: Record currently used disk space, to compare with original 'disk_used_a_priori'
shell: df -B1 --output=used / | tail -1
register: df2
- name: Add ESTIMATED 'iiab_software_disk_usage = {{ df2.stdout|int - df1|int }}' to {{ iiab_ini_file }}
ini_file:
path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
section: summary
option: iiab_software_disk_usage
value: "{{ df2.stdout|int - df1|int }}"
- name: Recording STAGE 9 HAS COMPLETED ====================
lineinfile:
path: "{{ iiab_env_file }}"

View file

@ -1,8 +1,3 @@
- name: Record (initial) disk space used
shell: df -B1 --output=used / | tail -1
register: df1
- name: 'Install package: awstats'
package:
name: awstats
@ -88,7 +83,7 @@
# when: awstats_enabled and not is_debuntu
- name: "Summarize logs up to now: /usr/bin/perl /usr/lib/cgi-bin/awstats.pl -config=schoolserver -update"
command: /usr/bin/perl /usr/lib/cgi-bin/awstats.pl -config=schoolserver -update
shell: /usr/bin/perl /usr/lib/cgi-bin/awstats.pl -config=schoolserver -update
- name: Install /etc/nginx/cgi-bin.php from template
template:
@ -98,17 +93,6 @@
# RECORD AWStats AS INSTALLED
- name: Record (final) disk space used
shell: df -B1 --output=used / | tail -1
register: df2
- name: Add 'awstats_disk_usage = {{ df2.stdout|int - df1.stdout|int }}' to {{ iiab_ini_file }}
ini_file:
path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
section: awstats
option: awstats_disk_usage
value: "{{ df2.stdout|int - df1.stdout|int }}"
- name: "Set 'awstats_installed: True'"
set_fact:
awstats_installed: True

View file

@ -19,34 +19,27 @@
quiet: yes
- block:
- name: Install AWStats if 'awstats_installed' not defined, e.g. in {{ iiab_state_file }} # /etc/iiab/iiab_state.yml
include_tasks: install.yml
when: awstats_installed is undefined
- name: Install AWStats if 'awstats_installed' not defined, e.g. in {{ iiab_state_file }} # /etc/iiab/iiab_state.yml
include_tasks: install.yml
when: awstats_installed is undefined
- name: Enable/Disable/Restart NGINX
include_tasks: nginx.yml
- name: Enable/Disable/Restart NGINX
include_tasks: nginx.yml
- name: Add 'awstats' variable values to {{ iiab_ini_file }}
ini_file:
path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
section: awstats
option: "{{ item.option }}"
value: "{{ item.value | string }}"
with_items:
- option: name
value: AWStats
- option: description
value: '"AWStats (originally known as Advanced Web Statistics) is a package written in Perl which generates static or dynamic html summaries based upon web server logs."'
- option: awstats_install
value: "{{ awstats_install }}"
- option: awstats_enabled
value: "{{ awstats_enabled }}"
rescue:
- name: 'SEE ERROR ABOVE (skip_role_on_error: {{ skip_role_on_error }})'
fail:
msg: ""
when: not skip_role_on_error
- name: Add 'awstats' variable values to {{ iiab_ini_file }}
ini_file:
path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
section: awstats
option: "{{ item.option }}"
value: "{{ item.value | string }}"
with_items:
- option: name
value: AWStats
- option: description
value: '"AWStats (originally known as Advanced Web Statistics) is a package written in Perl which generates static or dynamic html summaries based upon web server logs."'
- option: awstats_install
value: "{{ awstats_install }}"
- option: awstats_enabled
value: "{{ awstats_enabled }}"

View file

@ -261,7 +261,7 @@ AllowToUpdateStatsFromBrowser=1
# 3 - Possible on CLI and CGI
# Default: 2
#
AllowFullYearView=3
AllowFullYearView=2

View file

@ -1,47 +1,19 @@
================
==========
AzuraCast README
================
==========
Install `AzuraCast <https://azuracast.com/>`_ with your `Internet-in-a-Box (IIAB) <https://internet-in-a-box.org/>`_ if you want a simple, self-hosted "web radio station" with a modern web UI/UX. You and your community can then schedule newscasts, podcasts, music, and even do live streaming of audio content (video streaming might also be possible in future!)
This playbook adds `AzuraCast <https://azuracast.com/>`_ to Internet-in-a-Box (IIAB) for network radio station functionality. With 'AzuraCast' you and your community can schedule podcasts, music, and even do live streaming of audio content. A variety of streaming formats are supported.
As soon as you install AzuraCast with IIAB, it can stream MP3 files (and similar files) using `LiquidSoap <https://docs.azuracast.com/en/developers/liquidsoap>`_ to help you schedule or randomize playback of MP3 songs (and similar).
Please see AzuraCast's `screenshots <https://www.azuracast.com/about/screenshots.html>`_.
Please see AzuraCast's `screenshots <https://www.google.com/search?q=azuracast+screenshot&tbm=isch>`_ and `docs <./README.rst#azuracast-docs>`_. Community implementation examples:
* https://twitter.com/internet_in_box/status/1564986581664014342
* https://youtu.be/XfiFiOi46mk
Optionally, live-streaming can also be made to work, e.g. if you install `Mixxx or BUTT <https://docs.azuracast.com/en/user-guide/streaming-software>`_ on your own. (If so, you have many options to configure streaming with `Icecast <https://icecast.org/>`_, `Shoutcast <https://www.shoutcast.com/>`_, etc.)
Requirements
------------
AzuraCast recommends `2-to-4 GB RAM minimum <https://docs.azuracast.com/en/getting-started/requirements#system-requirements>`_.
As of 2022-08-31, AzuraCast should run on Ubuntu 22.04 and **64-bit** Raspberry Pi OS: `#1772 <https://github.com/iiab/iiab/issues/1772>`_, `AzuraCast/AzuraCast#332 <https://github.com/AzuraCast/AzuraCast/issues/332>`_, `PR #2946 <https://github.com/iiab/iiab/pull/2946>`_
Other Linux distributions may also work, at your own risk, especially if Docker runs smoothly.
NOTE: AzuraCast was designed to be installed *just once* on a fresh OS. So ``./runrole --reinstall azuracast`` is not supported in general. However, if you accidentally damage your AzuraCast software, IIAB has posted `technical tips <./tasks/install.yml>`_ *(use at your own risk!)* in case of emergency.
As of 2019-08-04, this will only run on Ubuntu 18.04, and tentatively on Debian 10 "Buster" (`#1766 <https://github.com/iiab/iiab/issues/1766>`_). Support for Raspberry Pi remains a goal for now — please if you can, consider helping us solve this critical challenge (`#1772 <https://github.com/iiab/iiab/issues/1772>`_, `AzuraCast/AzuraCast#332 <https://github.com/AzuraCast/AzuraCast/issues/332>`_).
Using It
--------
* Do a normal IIAB install (https://download.iiab.io), making sure to set both variables ``azuracast_install`` and ``azuracast_enabled`` to ``True`` when IIAB's installer prompts you to edit `/etc/iiab/local_vars.yml <http://FAQ.IIAB.IO#What_is_local_vars.yml_and_how_do_I_customize_it%3F>`_
* When the IIAB software install completes, it will ask you to reboot, and AzuraCast's console will then be available at http://box.lan:12080
* That console site will prompt you to complete AzuraCast's initial setup: user accounts, managing stations, radio streams, etc.
* Do a normal IIAB install (http://download.iiab.io), making sure to set both variables ``azuracast_install`` and ``azuracast_enabled`` to ``True`` when it prompts you to edit `/etc/iiab/local_vars.yml <http://FAQ.IIAB.IO#What_is_local_vars.yml_and_how_do_I_customize_it.3F>`_, as you begin the installation.
* When the IIAB software install completes, it will ask you to reboot, and AzuraCast's console will then be available at http://box.lan:10080
* This console site will prompt you to complete AzuraCast's initial setup: user accounts, managing stations, radio streams, etc.
* Finally, check out some `how-to videos <https://www.youtube.com/watch?v=b1Rxlu5P804>`_ to learn to manage your own radio station!
NOTE: When creating a station using AzuraCast's console, its default streaming ports for ``station`` and ``autodj`` need to be in the `port range 10000-10499 <https://github.com/iiab/iiab/wiki/IIAB-Networking#list-of-ports--services>`_ (ports 12080 and 12443 may also be required!)
AzuraCast Docs
--------------
- https://docs.azuracast.com
- https://docs.azuracast.com/en/getting-started/installation/post-installation-steps
- https://docs.azuracast.com/en/getting-started/settings
- https://docs.azuracast.com/en/getting-started/updates (can *DAMAGE* AzuraCast as of 2022-09-28)
- https://docs.azuracast.com/en/user-guide/streaming-software
- https://docs.azuracast.com/en/user-guide/troubleshooting
- https://docs.azuracast.com/en/user-guide/logs
- https://docs.azuracast.com/en/administration/docker
Note: When creating a station using AzuraCast's console, its default streaming ports for ``station`` and ``autodj`` need to be in the `port range 10000-10100 <https://github.com/iiab/iiab/wiki/IIAB-Networking#list-of-ports--services>`_.

View file

@ -1,15 +1,15 @@
# A full-featured online radio station suite. Uses Docker.
# README: https://github.com/iiab/iiab/tree/master/roles/azuracast#readme
# A full-featured online radio station suite.
# Works on Ubuntu 18.04, Debian 9, 10. Uses docker
# azuracast_install: False
# azuracast_enabled: False # This var is currently IGNORED
# azuracast_enabled: False
# azuracast_http_port: 12080
# azuracast_https_port: 12443
# azuracast_http_port: 10080
# azuracast_https_port: 10443
## AzuraCast needs many ports in the 8000:8496 range by default, but IIAB
## services conflict, so this variable below sets a sane prefix.
## e.g. setting the below variable to 10 will result in port range 10000-10499
## AzuraCast needs many ports in the 8000:8100 range by default, but IIAB services
## conflict with those ports so this variable below sets a sane prefix.
## e.g. setting the below variable to 10 will result in port ranges 10000-10100
## being reserved for AzuraCast:
# azuracast_port_range_prefix: 10

View file

@ -1,49 +1,23 @@
# 2022-09-29: './runrole --reinstall azuracast' is NOT supported!
#
# 1. But if you must, first completely uninstall Docker + WIPE AzuraCast data:
#
# apt purge docker-ce docker-ce-cli containerd.io docker-compose-plugin docker-scan-plugin
# rm -rf /library/docker /var/lib/docker /var/lib/containerd
#
# Per https://docs.docker.com/engine/install/ubuntu/#uninstall-docker-engine
#
# 2. REBOOT to avoid later problems with 'systemctl status docker' -- if you
# don't reboot, Ansible will fail below when 'docker.sh install' fails to
# start docker.service -- likewise if you run './docker.sh install-docker'
# manually in /opt/azuracast. Either way, 'systemctl restart docker' won't
# work for ~2 minutes. (Rebooting avoids all these hassles!)
#
# 3. Just FYI the Docker install process will rebuild its 11 core directories
# in /var/lib/docker -> /library/docker: (as 'docker.sh install' begins)
#
# buildkit containers image network overlay2 plugins runtimes swarm tmp trust volumes
#
# 4. Just FYI both MySQL passwords (MYSQL_PASSWORD & MYSQL_ROOT_PASSWORD) will
# be WIPED from /opt/azuracast/azuracast.env (and new passwords
# auto-generated below, for use inside AzuraCast's Docker container).
#
# 5. Run './runrole --reinstall azuracast' in /opt/iiab/iiab
- name: Record (initial) disk space used
shell: df -B1 --output=used / | tail -1
register: df1
- name: AzuraCast - Make config directory {{ azuracast_host_dir }}
file:
file:
path: "{{ azuracast_host_dir }}"
state: directory
- name: AzuraCast - Install {{ azuracast_host_dir }}/.env from template
template:
src: prod.env.j2
src: env.j2
dest: "{{ azuracast_host_dir }}/.env"
#owner: root
#group: root
mode: 0644
- name: AzuraCast - Install {{ azuracast_host_dir }}/azuracast.env for altered ports
- name: AzuraCast - Install {{ azuracast_host_dir }}/docker-compose.override.yml from template
template:
src: azuracast.env.j2
dest: "{{ azuracast_host_dir }}/azuracast.env"
src: docker-compose.override.yml.j2
dest: "{{ azuracast_host_dir }}/docker-compose.override.yml"
#owner: root
#group: root
mode: 0644
- name: AzuraCast - Download {{ docker_sh_url }} to {{ azuracast_host_dir }}
get_url:
@ -52,6 +26,13 @@
mode: 0755
timeout: "{{ download_timeout }}"
- name: AzuraCast - Download AzuraCast's docker-compose.yml sample from GitHub to {{ azuracast_host_dir }}
get_url:
url: "{{ docker_compose_url }}"
dest: "{{ azuracast_host_dir }}/docker-compose.yml"
mode: 0755
timeout: "{{ download_timeout }}"
#- name: AzuraCast - Make changes to docker.sh script so it runs headless
# lineinfile:
# path: "{{ azuracast_host_dir }}/docker.sh"
@ -59,45 +40,27 @@
# line: "\\1reply='Y'"
# backrefs: yes
# 2022-09-28: https://docs.azuracast.com/en/getting-started/installation/docker
# (& testing) confirm this is done automatically by 'docker.sh install' below.
#
# - name: AzuraCast - Download AzuraCast's docker-compose.yml sample from GitHub to {{ azuracast_host_dir }}
# get_url:
# url: "{{ docker_compose_url }}"
# dest: "{{ azuracast_host_dir }}/docker-compose.yml"
# timeout: "{{ download_timeout }}"
#- name: AzuraCast - Install {{ azuracast_host_dir }}/docker-compose.override.yml from template
# template:
# src: docker-compose.override.yml.j2
# dest: "{{ azuracast_host_dir }}/docker-compose.override.yml"
#- name: Change default port number range 8xxx:8xxx to {{ azuracast_port_range_prefix }}xxx:{{ azuracast_port_range_prefix }}xxx icecast-stations in docker-compose.yml
# replace:
# path: "{{ azuracast_host_dir }}/docker-compose.yml"
# regexp: "^( *- \\')8([0-9]{3})\\:8([0-9]{3}\\'.*)$"
# replace: "\\g<1>{{ azuracast_port_range_prefix }}\\g<2>:{{ azuracast_port_range_prefix }}\\g<3>"
- name: AzuraCast - Make directory {{ docker_container_dir }}
file:
file:
path: "{{ docker_container_dir }}"
state: directory
- name: AzuraCast - Symlink /var/lib/docker -> {{ docker_container_dir }}
file:
src: "{{ docker_container_dir }}"
path: /var/lib/docker
state: link
state: link
# 2022-09-28: "yes 'Y'" toggled whatever it found in /opt/azuracast/.env (e.g.
# AZURACAST_VERSION=stable from templates/prod.env.j2) to the opposite (e.g.
# AZURACAST_VERSION=latest). Let's not modify /opt/azuracast/.env unless nec!
#
# - name: AzuraCast - Setup for stable channel install
# shell: "yes 'Y' | /bin/bash docker.sh setup-release"
# args:
# chdir: "{{ azuracast_host_dir }}"
- name: Change default port number range 8xxx:8xxx to {{ azuracast_port_range_prefix }}xxx:{{ azuracast_port_range_prefix }}xxx icecast-stations in docker-compose.yml
replace:
path: "{{ azuracast_host_dir }}/docker-compose.yml"
regexp: "^( *- \\')8([0-9]{3})\\:8([0-9]{3}\\'.*)$"
replace: "\\g<1>{{ azuracast_port_range_prefix }}\\g<2>:{{ azuracast_port_range_prefix }}\\g<3>"
- name: AzuraCast - Setup for stable channel install
shell: "yes 'Y' | /bin/bash docker.sh setup-release"
args:
chdir: "{{ azuracast_host_dir }}"
- name: AzuraCast - Run the installer
shell: "yes '' | /bin/bash docker.sh install"
@ -107,17 +70,6 @@
# RECORD AzuraCast AS INSTALLED
- name: Record (final) disk space used
shell: df -B1 --output=used / | tail -1
register: df2
- name: Add 'azuracast_disk_usage = {{ df2.stdout|int - df1.stdout|int }}' to {{ iiab_ini_file }}
ini_file:
path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
section: azuracast
option: azuracast_disk_usage
value: "{{ df2.stdout|int - df1.stdout|int }}"
- name: "Set 'azuracast_installed: True'"
set_fact:
azuracast_installed: True

View file

@ -19,32 +19,25 @@
quiet: yes
- block:
- name: Install AzuraCast if 'azuracast_installed' not defined, e.g. in {{ iiab_state_file }} # /etc/iiab/iiab_state.yml
include_tasks: install.yml
when: azuracast_installed is undefined
- name: Install AzuraCast if 'azuracast_installed' not defined, e.g. in {{ iiab_state_file }} # /etc/iiab/iiab_state.yml
include_tasks: install.yml
when: azuracast_installed is undefined
# TODO figure out what to turn off/on for AzuraCast
# - include_tasks: enable-or-disable.yml
# TODO figure out what to turn off/on for AzuraCast
# - include_tasks: enable-or-disable.yml
- name: Add 'azuracast' variable values to {{ iiab_ini_file }}
ini_file:
path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
section: azuracast
option: "{{ item.option }}"
value: "{{ item.value | string }}"
with_items:
- option: name
value: azuracast
- option: description
value: '"AzuraCast is simple, self-hosted web radio. Use it to schedule student newscasts, podcasts, music (e.g. MP3''s and similar) and even do live-streaming."'
- option: enabled
value: "{{ azuracast_enabled }}"
rescue:
- name: 'SEE ERROR ABOVE (skip_role_on_error: {{ skip_role_on_error }})'
fail:
msg: ""
when: not skip_role_on_error
- name: Add 'azuracast' variable values to {{ iiab_ini_file }}
ini_file:
path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
section: azuracast
option: "{{ item.option }}"
value: "{{ item.value | string }}"
with_items:
- option: name
value: azuracast
- option: description
value: '"AzuraCast is a self-hosted, all-in-one radio station platform. Use AzuraCast to schedule podcasts, music, and even do live streaming of audio content. A variety of streaming formats are supported."'
- option: enabled
value: "{{ azuracast_enabled }}"

View file

@ -1,16 +0,0 @@
# work in progress might never be ready as the web interface has setting that would need to match
location /azuracast/
proxy_set_header Host $http_host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Scheme $scheme;
proxy_set_header X-Script-Name /azureacast;
proxy_pass http://127.0.0.1:{{ azuracast_http_port }};
}
location /radio/ {
proxy_set_header Host $http_host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Scheme $scheme;
proxy_set_header X-Script-Name /radio;
proxy_pass http://127.0.0.1:{{ azuracast_http_port }};
}

View file

@ -1,155 +0,0 @@
# IIAB version for altered ports
#
# AzuraCast Customization
#
# The application environment.
# Valid options: production, development, testing
APPLICATION_ENV=production
# Manually modify the logging level.
# This allows you to log debug-level errors temporarily (for problem-solving) or reduce
# the volume of logs that are produced by your installation, without needing to modify
# whether your installation is a production or development instance.
# Valid options: debug, info, notice, warning, error, critical, alert, emergency
# LOG_LEVEL=notice
# Enable the composer "merge" functionality to combine the main application's
# composer.json file with any plugins' composer files.
# This can have performance implications, so you should only use it if
# you use one or more plugins with their own Composer dependencies.
# Valid options: true, false
COMPOSER_PLUGIN_MODE=false
# The minimum port number to use when automatically assigning ports to a station.
# By default, this matches the first forwarded port on the "stations" container.
# You can modify this variable if your station port range is different.
# Be sure to also forward the necessary ports via `docker-compose.yml`
# (and nginx, if you want to use the built-in port-80/443 proxy)!
AUTO_ASSIGN_PORT_MIN="{{ azuracast_port_range_prefix }}000"
# The maximum port number to use when automatically assigning ports to a station.
# See AUTO_ASSIGN_PORT_MIN.
AUTO_ASSIGN_PORT_MAX="{{ azuracast_port_range_prefix }}499"
#
# Database Configuration
# --
# Once the database has been installed, DO NOT CHANGE these values!
#
# The host to connect to. Leave this as the default value unless you're connecting
# to an external database server.
# Default: mariadb
MYSQL_HOST=mariadb
# The port to connect to. Leave this as the default value unless you're connecting
# to an external database server.
# Default: 3306
MYSQL_PORT=3306
# The username AzuraCast will use to connect to the database.
# Default: azuracast
MYSQL_USER=azuracast
# The password AzuraCast will use to connect to the database.
# By default, the database is not exposed to the Internet at all and this is only
# an internal password used by the service itself.
# Default: azur4c457
MYSQL_PASSWORD=azur4c457
# The name of the AzuraCast database.
# Default: azuracast
MYSQL_DATABASE=azuracast
# Automatically generate a random root password upon the first database spin-up.
# This password will be visible in the mariadb container's logs.
# Default: yes
MYSQL_RANDOM_ROOT_PASSWORD=yes
# Log slower queries for the purpose of diagnosing issues. Only turn this on when
# you need to, by uncommenting this and switching it to 1.
# To read the slow query log once enabled, run:
# docker-compose exec mariadb slow_queries
# Default: 0
MYSQL_SLOW_QUERY_LOG=0
# Set the amount of allowed connections to the database. This value should be increased
# if you are seeing the `Too many connections` error in the logs.
# Default: 100
MYSQL_MAX_CONNECTIONS=100
#
# Redis Configuration
#
# Uncomment these fields if you are using a third-party Redis host instead of the one provided with AzuraCast.
# Do not modify these fields if you are using the standard AzuraCast Redis host.
#
# Whether to use the Redis cache; if set to false, will disable Redis and use flatfile cache instead.
# Default: true
# ENABLE_REDIS=true
# Name of the Redis host.
# Default: redis
# REDIS_HOST=redis
# Port to connect to on the Redis host.
# Default: 6379
# REDIS_PORT=6379
# Database index to use on the Redis host.
# Default: 1
# REDIS_DB=1
#
# Advanced Configuration
#
# PHP's maximum POST body size and max upload filesize.
# PHP_MAX_FILE_SIZE=25M
# PHP's maximum memory limit.
# PHP_MEMORY_LIMIT=128M
# PHP's maximum script execution time (in seconds).
# PHP_MAX_EXECUTION_TIME=30
# The maximum execution time (and lock timeout) for the 15-second, 1-minute and 5-minute synchronization tasks.
# SYNC_SHORT_EXECUTION_TIME=600
# The maximum execution time (and lock timeout) for the 1-hour synchronization task.
# SYNC_LONG_EXECUTION_TIME=1800
# Maximum number of PHP-FPM worker processes to spawn.
# PHP_FPM_MAX_CHILDREN=5
#
# PHP-SPX profiling extension Configuration
#
# These environment variables allow you to enable and configure the PHP-SPX profiling extension
# which can be helpful when debugging resource issues in AzuraCast.
#
# The profiling dashboard can be accessed by visting https://yourdomain.com/?SPX_KEY=dev&SPX_UI_URI=/
# If you change the PROFILING_EXTENSION_HTTP_KEY variable change the value for SPX_KEY accordingly.
#
# Enable the profiling extension.
# Profiling data can be viewed by visiting http://your-azuracast-site/?SPX_KEY=dev&SPX_UI_URI=/
# Default: 0
# PROFILING_EXTENSION_ENABLED=0
# Profile ALL requests made to this account.
# This will have significant performance impact on your installation and should only be used in test circumstances.
# Default: 0
# PROFILING_EXTENSION_ALWAYS_ON=0
# Configure the value for the SPX_KEY parameter needed to access the profiling dashboard
# Default: dev
# PROFILING_EXTENSION_HTTP_KEY=dev
# Configure the IP whitelist for the profiling dashboard
# By default only localhost is allowed to access this page.
# Uncomment this line to enable access for you.
# Default: 127.0.0.1
# PROFILING_EXTENSION_HTTP_IP_WHITELIST=*

View file

@ -1,155 +0,0 @@
# https://github.com/AzuraCast/AzuraCast/blob/main/azuracast.sample.env
#
# AzuraCast Customization
#
# The application environment.
# Valid options: production, development, testing
APPLICATION_ENV=production
# Manually modify the logging level.
# This allows you to log debug-level errors temporarily (for problem-solving) or reduce
# the volume of logs that are produced by your installation, without needing to modify
# whether your installation is a production or development instance.
# Valid options: debug, info, notice, warning, error, critical, alert, emergency
# LOG_LEVEL=notice
# Enable the composer "merge" functionality to combine the main application's
# composer.json file with any plugins' composer files.
# This can have performance implications, so you should only use it if
# you use one or more plugins with their own Composer dependencies.
# Valid options: true, false
COMPOSER_PLUGIN_MODE=false
# The minimum port number to use when automatically assigning ports to a station.
# By default, this matches the first forwarded port on the "stations" container.
# You can modify this variable if your station port range is different.
# Be sure to also forward the necessary ports via `docker-compose.yml`
# (and nginx, if you want to use the built-in port-80/443 proxy)!
AUTO_ASSIGN_PORT_MIN=8000
# The maximum port number to use when automatically assigning ports to a station.
# See AUTO_ASSIGN_PORT_MIN.
AUTO_ASSIGN_PORT_MAX=8499
#
# Database Configuration
# --
# Once the database has been installed, DO NOT CHANGE these values!
#
# The host to connect to. Leave this as the default value unless you're connecting
# to an external database server.
# Default: mariadb
MYSQL_HOST=mariadb
# The port to connect to. Leave this as the default value unless you're connecting
# to an external database server.
# Default: 3306
MYSQL_PORT=3306
# The username AzuraCast will use to connect to the database.
# Default: azuracast
MYSQL_USER=azuracast
# The password AzuraCast will use to connect to the database.
# By default, the database is not exposed to the Internet at all and this is only
# an internal password used by the service itself.
# Default: azur4c457
MYSQL_PASSWORD=azur4c457
# The name of the AzuraCast database.
# Default: azuracast
MYSQL_DATABASE=azuracast
# Automatically generate a random root password upon the first database spin-up.
# This password will be visible in the mariadb container's logs.
# Default: yes
MYSQL_RANDOM_ROOT_PASSWORD=yes
# Log slower queries for the purpose of diagnosing issues. Only turn this on when
# you need to, by uncommenting this and switching it to 1.
# To read the slow query log once enabled, run:
# docker-compose exec mariadb slow_queries
# Default: 0
MYSQL_SLOW_QUERY_LOG=0
# Set the amount of allowed connections to the database. This value should be increased
# if you are seeing the `Too many connections` error in the logs.
# Default: 100
MYSQL_MAX_CONNECTIONS=100
#
# Redis Configuration
#
# Uncomment these fields if you are using a third-party Redis host instead of the one provided with AzuraCast.
# Do not modify these fields if you are using the standard AzuraCast Redis host.
#
# Whether to use the Redis cache; if set to false, will disable Redis and use flatfile cache instead.
# Default: true
# ENABLE_REDIS=true
# Name of the Redis host.
# Default: redis
# REDIS_HOST=redis
# Port to connect to on the Redis host.
# Default: 6379
# REDIS_PORT=6379
# Database index to use on the Redis host.
# Default: 1
# REDIS_DB=1
#
# Advanced Configuration
#
# PHP's maximum POST body size and max upload filesize.
# PHP_MAX_FILE_SIZE=25M
# PHP's maximum memory limit.
# PHP_MEMORY_LIMIT=128M
# PHP's maximum script execution time (in seconds).
# PHP_MAX_EXECUTION_TIME=30
# The maximum execution time (and lock timeout) for the 15-second, 1-minute and 5-minute synchronization tasks.
# SYNC_SHORT_EXECUTION_TIME=600
# The maximum execution time (and lock timeout) for the 1-hour synchronization task.
# SYNC_LONG_EXECUTION_TIME=1800
# Maximum number of PHP-FPM worker processes to spawn.
# PHP_FPM_MAX_CHILDREN=5
#
# PHP-SPX profiling extension Configuration
#
# These environment variables allow you to enable and configure the PHP-SPX profiling extension
# which can be helpful when debugging resource issues in AzuraCast.
#
# The profiling dashboard can be accessed by visting https://yourdomain.com/?SPX_KEY=dev&SPX_UI_URI=/
# If you change the PROFILING_EXTENSION_HTTP_KEY variable change the value for SPX_KEY accordingly.
#
# Enable the profiling extension.
# Profiling data can be viewed by visiting http://your-azuracast-site/?SPX_KEY=dev&SPX_UI_URI=/
# Default: 0
# PROFILING_EXTENSION_ENABLED=0
# Profile ALL requests made to this account.
# This will have significant performance impact on your installation and should only be used in test circumstances.
# Default: 0
# PROFILING_EXTENSION_ALWAYS_ON=0
# Configure the value for the SPX_KEY parameter needed to access the profiling dashboard
# Default: dev
# PROFILING_EXTENSION_HTTP_KEY=dev
# Configure the IP whitelist for the profiling dashboard
# By default only localhost is allowed to access this page.
# Uncomment this line to enable access for you.
# Default: 127.0.0.1
# PROFILING_EXTENSION_HTTP_IP_WHITELIST=*

View file

@ -1,54 +0,0 @@
# This file was automatically generated by AzuraCast and modified for IIAB
# You can modify it as necessary. To apply changes, restart the Docker containers.
# Remove the leading "#" symbol from lines to uncomment them.
# (Docker Compose) All Docker containers are prefixed by this name. Do not change this after installation.
# Default: azuracast
COMPOSE_PROJECT_NAME=azuracast
# (Docker Compose) The amount of time to wait before a Docker Compose operation fails. Increase this on lower performance computers.
# Default: 300
COMPOSE_HTTP_TIMEOUT=300
# Release Channel
# Valid options: latest, stable
# Default: latest
AZURACAST_VERSION=stable
NGINX_TIMEOUT=1800
# HTTP Port
# The main port AzuraCast listens to for insecure HTTP connections.
# Default: 80
AZURACAST_HTTP_PORT={{ azuracast_http_port }}
# HTTPS Port
# The main port AzuraCast listens to for secure HTTPS connections.
# Default: 443
AZURACAST_HTTPS_PORT={{ azuracast_https_port }}
# SFTP Port
# The port AzuraCast listens to for SFTP file management connections.
# Default: 2022
AZURACAST_SFTP_PORT=2022
# Station Ports
# The ports AzuraCast should listen to for station broadcasts and incoming DJ
# connections.
# Default: 8000,8005,8006,8010,8015,8016,8020,8025,8026,8030,8035,8036,8040,8045,8046,8050,8055,8056,8060,8065,8066,8070,8075,8076,8090,8095,8096,8100,8105,8106,8110,8115,8116,8120,8125,8126,8130,8135,8136,8140,8145,8146,8150,8155,8156,8160,8165,8166,8170,8175,8176,8180,8185,8186,8190,8195,8196,8200,8205,8206,8210,8215,8216,8220,8225,8226,8230,8235,8236,8240,8245,8246,8250,8255,8256,8260,8265,8266,8270,8275,8276,8280,8285,8286,8290,8295,8296,8300,8305,8306,8310,8315,8316,8320,8325,8326,8330,8335,8336,8340,8345,8346,8350,8355,8356,8360,8365,8366,8370,8375,8376,8380,8385,8386,8390,8395,8396,8400,8405,8406,8410,8415,8416,8420,8425,8426,8430,8435,8436,8440,8445,8446,8450,8455,8456,8460,8465,8466,8470,8475,8476,8480,8485,8486,8490,8495,8496
AZURACAST_STATION_PORTS=10000,10005,10006,10010,10015,10016,10020,10025,10026,10030,10035,10036,10040,10045,10046,10050,10055,10056,10060,10065,10066,10070,10075,10076,10080,10085,10086,10090,10095,10096,10100,10105,10106,10110,10115,10116,10120,10125,10126,10130,10135,10136,10140,10145,10146,10150,10155,10156,10160,10165,10166,10170,10175,10176,10180,10185,10186,10190,10195,10196,10200,10205,10206,10210,10215,10216,10220,10225,10226,10230,10235,10236,10240,10245,10246,10250,10255,10256,10260,10265,10266,10270,10275,10276,10280,10285,10286,10290,10295,10296,10300,10305,10306,10310,10315,10316,10320,10325,10326,10330,10335,10336,10340,10345,10346,10350,10355,10356,10360,10365,10366,10370,10375,10376,10380,10385,10386,10390,10395,10396,10400,10405,10406,10410,10415,10416,10420,10425,10426,10430,10435,10436,10440,10445,10446,10450,10455,10456,10460,10465,10466,10470,10475,10476,10480,10485,10486,10490,10495,10496
# Docker User UID
# Set the UID of the user running inside the Docker containers. Matching this
# with your host UID can fix permission issues.
# Default: 1000
AZURACAST_PUID=1000
# Docker User GID
# Set the GID of the user running inside the Docker containers. Matching this
# with your host GID can fix permission issues.
# Default: 1000
AZURACAST_PGID=1000
# Advanced: Use Privileged Docker Settings
# Default: true
AZURACAST_COMPOSE_PRIVILEGED=true

View file

@ -1,161 +0,0 @@
#
# AzuraCast Customization
#
# The application environment.
# Valid options: production, development, testing
APPLICATION_ENV=production
# Manually modify the logging level.
# This allows you to log debug-level errors temporarily (for problem-solving) or reduce
# the volume of logs that are produced by your installation, without needing to modify
# whether your installation is a production or development instance.
# Valid options: debug, info, notice, warning, error, critical, alert, emergency
# LOG_LEVEL=notice
# Enable the composer "merge" functionality to combine the main application's
# composer.json file with any plugins' composer files.
# This can have performance implications, so you should only use it if
# you use one or more plugins with their own Composer dependencies.
# Valid options: true, false
COMPOSER_PLUGIN_MODE=false
# The minimum port number to use when automatically assigning ports to a station.
# By default, this matches the first forwarded port on the "stations" container.
# You can modify this variable if your station port range is different.
# Be sure to also forward the necessary ports via `docker-compose.yml`
# (and nginx, if you want to use the built-in port-80/443 proxy)!
AUTO_ASSIGN_PORT_MIN=8000
# The maximum port number to use when automatically assigning ports to a station.
# See AUTO_ASSIGN_PORT_MIN.
AUTO_ASSIGN_PORT_MAX=8499
# This allows you to debug Slim Application Errors you may encounter
# By default, this is disabled to prevent users from seeing privileged information
# Please report any Slim Application Error logs to the development team on GitHub
# Valid options: true, false
SHOW_DETAILED_ERRORS=false
#
# Database Configuration
# --
# Once the database has been installed, DO NOT CHANGE these values!
#
# The host to connect to. Leave this as the default value unless you're connecting
# to an external database server.
# Default: localhost
# MYSQL_HOST=localhost
# The port to connect to. Leave this as the default value unless you're connecting
# to an external database server.
# Default: 3306
# MYSQL_PORT=3306
# The username AzuraCast will use to connect to the database.
# Default: azuracast
# MYSQL_USER=azuracast
# The password AzuraCast will use to connect to the database.
# By default, the database is not exposed to the Internet at all and this is only
# an internal password used by the service itself.
# Default: azur4c457
MYSQL_PASSWORD=azur4c457
# The name of the AzuraCast database.
# Default: azuracast
# MYSQL_DATABASE=azuracast
# Automatically generate a random root password upon the first database spin-up.
# This password will be visible in the mariadb container's logs.
# Default: yes
MYSQL_RANDOM_ROOT_PASSWORD=yes
# Log slower queries for the purpose of diagnosing issues. Only turn this on when
# you need to, by uncommenting this and switching it to 1.
# To read the slow query log once enabled, run:
# docker-compose exec mariadb slow_queries
# Default: 0
# MYSQL_SLOW_QUERY_LOG=0
# Set the amount of allowed connections to the database. This value should be increased
# if you are seeing the `Too many connections` error in the logs.
# Default: 100
# MYSQL_MAX_CONNECTIONS=100
#
# Redis Configuration
#
# Uncomment these fields if you are using a third-party Redis host instead of the one provided with AzuraCast.
# Do not modify these fields if you are using the standard AzuraCast Redis host.
#
# Whether to use the Redis cache; if set to false, will disable Redis and use flatfile cache instead.
# Default: true
# ENABLE_REDIS=true
# Name of the Redis host.
# Default: localhost
# REDIS_HOST=localhost
# Port to connect to on the Redis host.
# Default: 6379
# REDIS_PORT=6379
# Database index to use on the Redis host.
# Default: 1
# REDIS_DB=1
#
# Advanced Configuration
#
# PHP's maximum POST body size and max upload filesize.
# PHP_MAX_FILE_SIZE=25M
# PHP's maximum memory limit.
# PHP_MEMORY_LIMIT=128M
# PHP's maximum script execution time (in seconds).
# PHP_MAX_EXECUTION_TIME=30
# The maximum execution time (and lock timeout) for the 15-second, 1-minute and 5-minute synchronization tasks.
# SYNC_SHORT_EXECUTION_TIME=600
# The maximum execution time (and lock timeout) for the 1-hour synchronization task.
# SYNC_LONG_EXECUTION_TIME=1800
# Maximum number of PHP-FPM worker processes to spawn.
# PHP_FPM_MAX_CHILDREN=5
#
# PHP-SPX profiling extension Configuration
#
# These environment variables allow you to enable and configure the PHP-SPX profiling extension
# which can be helpful when debugging resource issues in AzuraCast.
#
# The profiling dashboard can be accessed by visting https://yourdomain.com/?SPX_KEY=dev&SPX_UI_URI=/
# If you change the PROFILING_EXTENSION_HTTP_KEY variable change the value for SPX_KEY accordingly.
#
# Enable the profiling extension.
# Profiling data can be viewed by visiting http://your-azuracast-site/?SPX_KEY=dev&SPX_UI_URI=/
# Default: 0
# PROFILING_EXTENSION_ENABLED=0
# Profile ALL requests made to this account.
# This will have significant performance impact on your installation and should only be used in test circumstances.
# Default: 0
# PROFILING_EXTENSION_ALWAYS_ON=0
# Configure the value for the SPX_KEY parameter needed to access the profiling dashboard
# Default: dev
# PROFILING_EXTENSION_HTTP_KEY=dev
# Configure the IP whitelist for the profiling dashboard
# By default only localhost is allowed to access this page.
# Uncomment this line to enable access for you.
# Default: 127.0.0.1
# PROFILING_EXTENSION_HTTP_IP_WHITELIST=*

View file

@ -1,214 +0,0 @@
#
# AzuraCast Docker Compose Configuration File
#
# When updating, you will be prompted to replace this file with a new
# version; you should do this whenever possible to take advantage of
# new updates.
#
# If you need to customize this file, you can create a new file named:
# docker-compose.override.yml
# with any changes you need to make.
#
services:
web:
container_name: azuracast
image: "ghcr.io/azuracast/azuracast:${AZURACAST_VERSION:-latest}"
# Want to customize the HTTP/S ports? Follow the instructions here:
# https://docs.azuracast.com/en/administration/docker#using-non-standard-ports
ports:
- '${AZURACAST_HTTP_PORT:-80}:80'
- '${AZURACAST_HTTPS_PORT:-443}:443'
- '${AZURACAST_SFTP_PORT:-2022}:2022'
# This default mapping is the outgoing and incoming ports for the first 50 stations.
# You can override this port mapping in your own docker-compose.override.yml file.
# For instructions, see:
# https://docs.azuracast.com/en/administration/docker#expanding-the-station-port-range
- '8000:8000'
- '8005:8005'
- '8006:8006'
- '8010:8010'
- '8015:8015'
- '8016:8016'
- '8020:8020'
- '8025:8025'
- '8026:8026'
- '8030:8030'
- '8035:8035'
- '8036:8036'
- '8040:8040'
- '8045:8045'
- '8046:8046'
- '8050:8050'
- '8055:8055'
- '8056:8056'
- '8060:8060'
- '8065:8065'
- '8066:8066'
- '8070:8070'
- '8075:8075'
- '8076:8076'
- '8090:8090'
- '8095:8095'
- '8096:8096'
- '8100:8100'
- '8105:8105'
- '8106:8106'
- '8110:8110'
- '8115:8115'
- '8116:8116'
- '8120:8120'
- '8125:8125'
- '8126:8126'
- '8130:8130'
- '8135:8135'
- '8136:8136'
- '8140:8140'
- '8145:8145'
- '8146:8146'
- '8150:8150'
- '8155:8155'
- '8156:8156'
- '8160:8160'
- '8165:8165'
- '8166:8166'
- '8170:8170'
- '8175:8175'
- '8176:8176'
- '8180:8180'
- '8185:8185'
- '8186:8186'
- '8190:8190'
- '8195:8195'
- '8196:8196'
- '8200:8200'
- '8205:8205'
- '8206:8206'
- '8210:8210'
- '8215:8215'
- '8216:8216'
- '8220:8220'
- '8225:8225'
- '8226:8226'
- '8230:8230'
- '8235:8235'
- '8236:8236'
- '8240:8240'
- '8245:8245'
- '8246:8246'
- '8250:8250'
- '8255:8255'
- '8256:8256'
- '8260:8260'
- '8265:8265'
- '8266:8266'
- '8270:8270'
- '8275:8275'
- '8276:8276'
- '8280:8280'
- '8285:8285'
- '8286:8286'
- '8290:8290'
- '8295:8295'
- '8296:8296'
- '8300:8300'
- '8305:8305'
- '8306:8306'
- '8310:8310'
- '8315:8315'
- '8316:8316'
- '8320:8320'
- '8325:8325'
- '8326:8326'
- '8330:8330'
- '8335:8335'
- '8336:8336'
- '8340:8340'
- '8345:8345'
- '8346:8346'
- '8350:8350'
- '8355:8355'
- '8356:8356'
- '8360:8360'
- '8365:8365'
- '8366:8366'
- '8370:8370'
- '8375:8375'
- '8376:8376'
- '8380:8380'
- '8385:8385'
- '8386:8386'
- '8390:8390'
- '8395:8395'
- '8396:8396'
- '8400:8400'
- '8405:8405'
- '8406:8406'
- '8410:8410'
- '8415:8415'
- '8416:8416'
- '8420:8420'
- '8425:8425'
- '8426:8426'
- '8430:8430'
- '8435:8435'
- '8436:8436'
- '8440:8440'
- '8445:8445'
- '8446:8446'
- '8450:8450'
- '8455:8455'
- '8456:8456'
- '8460:8460'
- '8465:8465'
- '8466:8466'
- '8470:8470'
- '8475:8475'
- '8476:8476'
- '8480:8480'
- '8485:8485'
- '8486:8486'
- '8490:8490'
- '8495:8495'
- '8496:8496'
env_file: azuracast.env
environment:
LANG: ${LANG:-en_US.UTF-8}
AZURACAST_DC_REVISION: 14
AZURACAST_VERSION: ${AZURACAST_VERSION:-latest}
AZURACAST_SFTP_PORT: ${AZURACAST_SFTP_PORT:-2022}
NGINX_TIMEOUT: ${NGINX_TIMEOUT:-1800}
LETSENCRYPT_HOST: ${LETSENCRYPT_HOST:-}
LETSENCRYPT_EMAIL: ${LETSENCRYPT_EMAIL:-}
PUID: ${AZURACAST_PUID:-1000}
PGID: ${AZURACAST_PGID:-1000}
volumes:
- www_uploads:/var/azuracast/uploads
- station_data:/var/azuracast/stations
- shoutcast2_install:/var/azuracast/servers/shoutcast2
- stereo_tool_install:/var/azuracast/servers/stereo_tool
- geolite_install:/var/azuracast/geoip
- sftpgo_data:/var/azuracast/sftpgo/persist
- backups:/var/azuracast/backups
- acme:/var/azuracast/acme
- db_data:/var/lib/mysql
restart: unless-stopped
ulimits: &default-ulimits
nofile:
soft: 65536
hard: 65536
logging: &default-logging
options:
max-size: "1m"
max-file: "5"
volumes:
db_data: { }
acme: { }
shoutcast2_install: { }
stereo_tool_install: { }
geolite_install: { }
sftpgo_data: { }
station_data: { }
www_uploads: { }
backups: { }

View file

@ -1,840 +0,0 @@
#!/usr/bin/env bash
# shellcheck disable=SC2145,SC2178,SC2120,SC2162
# Functions to manage .env files
__dotenv=
__dotenv_file=
__dotenv_cmd=.env
.env() {
REPLY=()
[[ $__dotenv_file || ${1-} == -* ]] || .env.--file .env || return
if declare -F -- ".env.${1-}" >/dev/null; then
.env."$@"
return
fi
return 64
}
.env.-f() { .env.--file "$@"; }
.env.get() {
.env::arg "get requires a key" "$@" &&
[[ "$__dotenv" =~ ^(.*(^|$'\n'))([ ]*)"$1="(.*)$ ]] &&
REPLY=${BASH_REMATCH[4]%%$'\n'*} && REPLY=${REPLY%"${REPLY##*[![:space:]]}"}
}
.env.parse() {
local line key
while IFS= read -r line; do
line=${line#"${line%%[![:space:]]*}"} # trim leading whitespace
line=${line%"${line##*[![:space:]]}"} # trim trailing whitespace
if [[ ! "$line" || "$line" == '#'* ]]; then continue; fi
if (($#)); then
for key; do
if [[ $key == "${line%%=*}" ]]; then
REPLY+=("$line")
break
fi
done
else
REPLY+=("$line")
fi
done <<<"$__dotenv"
((${#REPLY[@]}))
}
.env.export() { ! .env.parse "$@" || export "${REPLY[@]}"; }
.env.set() {
.env::file load || return
local key saved=$__dotenv
while (($#)); do
key=${1#+}
key=${key%%=*}
if .env.get "$key"; then
REPLY=()
if [[ $1 == +* ]]; then
shift
continue # skip if already found
elif [[ $1 == *=* ]]; then
__dotenv=${BASH_REMATCH[1]}${BASH_REMATCH[3]}$1$'\n'${BASH_REMATCH[4]#*$'\n'}
else
__dotenv=${BASH_REMATCH[1]}${BASH_REMATCH[4]#*$'\n'}
continue # delete all occurrences
fi
elif [[ $1 == *=* ]]; then
__dotenv+="${1#+}"$'\n'
fi
shift
done
[[ $__dotenv == "$saved" ]] || .env::file save
}
.env.puts() { echo "${1-}" >>"$__dotenv_file" && __dotenv+="$1"$'\n'; }
.env.generate() {
.env::arg "key required for generate" "$@" || return
.env.get "$1" && return || REPLY=$("${@:2}") || return
.env::one "generate: ouptut of '${*:2}' has more than one line" "$REPLY" || return
.env.puts "$1=$REPLY"
}
.env.--file() {
.env::arg "filename required for --file" "$@" || return
__dotenv_file=$1
.env::file load || return
(($# < 2)) || .env "${@:2}"
}
.env::arg() { [[ "${2-}" ]] || {
echo "$__dotenv_cmd: $1" >&2
return 64
}; }
.env::one() { [[ "$2" != *$'\n'* ]] || .env::arg "$1"; }
.env::file() {
local REPLY=$__dotenv_file
case "$1" in
load)
__dotenv=
! [[ -f "$REPLY" ]] || __dotenv="$(<"$REPLY")"$'\n' || return
;;
save)
if [[ -L "$REPLY" ]] && declare -F -- realpath.resolved >/dev/null; then
realpath.resolved "$REPLY"
fi
{ [[ ! -f "$REPLY" ]] || cp -p "$REPLY" "$REPLY.bak"; } &&
printf %s "$__dotenv" >"$REPLY.bak" && mv "$REPLY.bak" "$REPLY"
;;
esac
}
# Shortcut to convert semver version (x.yyy.zzz) into a comparable number.
version-number() {
echo "$@" | awk -F. '{ printf("%03d%03d%03d\n", $1,$2,$3); }'
}
# Get the current release channel for AzuraCast
get-release-channel() {
local AZURACAST_VERSION="latest"
if [[ -f .env ]]; then
.env --file .env get AZURACAST_VERSION
AZURACAST_VERSION="${REPLY:-latest}"
fi
echo "$AZURACAST_VERSION"
}
get-release-branch-name() {
if [[ $(get-release-channel) == "stable" ]]; then
echo "stable"
else
echo "main"
fi
}
# This is a general-purpose function to ask Yes/No questions in Bash, either
# with or without a default answer. It keeps repeating the question until it
# gets a valid answer.
ask() {
# https://djm.me/ask
local prompt default reply
while true; do
if [[ "${2:-}" == "Y" ]]; then
prompt="Y/n"
default=Y
elif [[ "${2:-}" == "N" ]]; then
prompt="y/N"
default=N
else
prompt="y/n"
default=
fi
# Ask the question (not using "read -p" as it uses stderr not stdout)
echo -n "$1 [$prompt] "
read reply
# Default?
if [[ -z "$reply" ]]; then
reply=${default}
fi
# Check if the reply is valid
case "$reply" in
Y* | y*) return 0 ;;
N* | n*) return 1 ;;
esac
done
}
# Generate a prompt to set an environment file value.
envfile-set() {
local VALUE INPUT
.env --file .env
.env get "$1"
VALUE=${REPLY:-$2}
echo -n "$3 [$VALUE]: "
read INPUT
VALUE=${INPUT:-$VALUE}
.env set "${1}=${VALUE}"
}
#
# Configure the ports used by AzuraCast.
#
setup-ports() {
envfile-set "AZURACAST_HTTP_PORT" "80" "Port to use for HTTP connections"
envfile-set "AZURACAST_HTTPS_PORT" "443" "Port to use for HTTPS connections"
envfile-set "AZURACAST_SFTP_PORT" "2022" "Port to use for SFTP connections"
}
#
# Configure release mode settings.
#
setup-release() {
if [[ ! -f .env ]]; then
curl -fsSL https://raw.githubusercontent.com/AzuraCast/AzuraCast/main/sample.env -o .env
fi
local OLD_RELEASE_CHANNEL
.env --file .env get AZURACAST_VERSION
OLD_RELEASE_CHANNEL="${REPLY:-latest}"
local AZURACAST_VERSION="${OLD_RELEASE_CHANNEL}"
if [[ $AZURACAST_VERSION == "latest" ]]; then
if ask "Your current release channel is 'Rolling Release'. Switch to 'Stable' release channel?" N; then
AZURACAST_VERSION="stable"
fi
elif [[ $AZURACAST_VERSION == "stable" ]]; then
if ask "Your current release channel is 'Stable'. Switch to 'Rolling Release' release channel?" N; then
AZURACAST_VERSION="latest"
fi
fi
.env --file .env set AZURACAST_VERSION=${AZURACAST_VERSION}
if [[ $AZURACAST_VERSION != $OLD_RELEASE_CHANNEL ]]; then
if ask "You should update the Docker Utility Script after changing release channels. Automatically update it now?" Y; then
update-self
fi
fi
}
check-install-requirements() {
local CURRENT_OS CURRENT_ARCH REQUIRED_COMMANDS SCRIPT_DIR
set -e
echo "Checking installation requirements for AzuraCast..."
CURRENT_OS=$(uname -s)
if [[ $CURRENT_OS == "Linux" ]]; then
echo -en "\e[32m[PASS]\e[0m Operating System: ${CURRENT_OS}\n"
else
echo -en "\e[41m[FAIL]\e[0m Operating System: ${CURRENT_OS}\n"
echo " You are running an unsupported operating system."
echo " Automated AzuraCast installation is not currently supported on this"
echo " operating system."
exit 1
fi
CURRENT_ARCH=$(uname -m)
if [[ $CURRENT_ARCH == "x86_64" ]]; then
echo -en "\e[32m[PASS]\e[0m Architecture: ${CURRENT_ARCH}\n"
elif [[ $CURRENT_ARCH == "aarch64" ]]; then
echo -en "\e[32m[PASS]\e[0m Architecture: ${CURRENT_ARCH}\n"
else
echo -en "\e[41m[FAIL]\e[0m Architecture: ${CURRENT_ARCH}\n"
echo " You are running an unsupported processor architecture."
echo " Automated AzuraCast installation is not currently supported on this "
echo " operating system."
exit 1
fi
REQUIRED_COMMANDS=(curl awk)
for COMMAND in "${REQUIRED_COMMANDS[@]}" ; do
if [[ $(command -v "$COMMAND") ]]; then
echo -en "\e[32m[PASS]\e[0m Command Present: ${COMMAND}\n"
else
echo -en "\e[41m[FAIL]\e[0m Command Present: ${COMMAND}\n"
echo " ${COMMAND} does not appear to be installed."
echo " Install ${COMMAND} using your host's package manager,"
echo " then continue installing using this script."
exit 1
fi
done
if [[ $EUID -ne 0 ]]; then
if [[ $(command -v sudo) ]]; then
echo -en "\e[32m[PASS]\e[0m User Permissions\n"
else
echo -en "\e[41m[FAIL]\e[0m User Permissions\n"
echo " You are not currently the root user, and "
echo " 'sudo' does not appear to be installed."
echo " Install sudo using your host's package manager,"
echo " then continue installing using this script."
exit 1
fi
else
echo -en "\e[32m[PASS]\e[0m User Permissions\n"
fi
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
if [[ $SCRIPT_DIR == "/var/azuracast" ]]; then
echo -en "\e[32m[PASS]\e[0m Installation Directory\n"
else
echo -en "\e[93m[WARN]\e[0m Installation Directory\n"
echo " AzuraCast is not installed in /var/azuracast, as is recommended"
echo " for most installations. This will not prevent AzuraCast from"
echo " working, but you will need to update any instructions in our"
echo " documentation to reflect your current directory:"
echo " $SCRIPT_DIR"
fi
echo -en "\e[32m[PASS]\e[0m All requirements met!\n"
set +e
}
install-docker() {
set -e
curl -fsSL get.docker.com -o get-docker.sh
sh get-docker.sh
rm get-docker.sh
if [[ $EUID -ne 0 ]]; then
sudo usermod -aG docker "$(whoami)"
echo "You must log out or restart to apply necessary Docker permissions changes."
echo "Restart, then continue installing using this script."
exit 1
fi
set +e
}
install-docker-compose() {
set -e
echo "Installing Docker Compose..."
curl -fsSL -o docker-compose https://github.com/docker/compose/releases/download/v2.4.1/docker-compose-linux-$(uname -m)
ARCHITECTURE=amd64
if [ "$(uname -m)" = "aarch64" ]; then
ARCHITECTURE=arm64
fi
curl -fsSL -o docker-compose-switch https://github.com/docker/compose-switch/releases/download/v1.0.4/docker-compose-linux-${ARCHITECTURE}
if [[ $EUID -ne 0 ]]; then
sudo chmod a+x ./docker-compose
sudo chmod a+x ./docker-compose-switch
sudo mv ./docker-compose /usr/libexec/docker/cli-plugins/docker-compose
sudo mv ./docker-compose-switch /usr/local/bin/docker-compose
else
chmod a+x ./docker-compose
chmod a+x ./docker-compose-switch
mv ./docker-compose /usr/libexec/docker/cli-plugins/docker-compose
mv ./docker-compose-switch /usr/local/bin/docker-compose
fi
echo "Docker Compose updated!"
set +e
}
run-installer() {
local AZURACAST_RELEASE_BRANCH
AZURACAST_RELEASE_BRANCH=$(get-release-branch-name)
if [[ ! -f .env ]]; then
curl -fsSL https://raw.githubusercontent.com/AzuraCast/AzuraCast/$AZURACAST_RELEASE_BRANCH/sample.env -o .env
fi
if [[ ! -f azuracast.env ]]; then
curl -fsSL https://raw.githubusercontent.com/AzuraCast/AzuraCast/$AZURACAST_RELEASE_BRANCH/azuracast.sample.env -o azuracast.env
fi
if [[ ! -f docker-compose.yml ]]; then
curl -fsSL https://raw.githubusercontent.com/AzuraCast/AzuraCast/$AZURACAST_RELEASE_BRANCH/docker-compose.sample.yml -o docker-compose.yml
fi
touch docker-compose.new.yml
local dc_config_test=$(docker-compose -f docker-compose.new.yml config 2>/dev/null)
if [ $? -ne 0 ]; then
if ask "Docker Compose needs to be updated to continue. Update to latest version?" Y; then
install-docker-compose
fi
fi
curl -fsSL https://raw.githubusercontent.com/AzuraCast/AzuraCast/$AZURACAST_RELEASE_BRANCH/docker-compose.installer.yml -o docker-compose.installer.yml
docker-compose -p azuracast_installer -f docker-compose.installer.yml pull
docker-compose -p azuracast_installer -f docker-compose.installer.yml run --rm installer install "$@"
rm docker-compose.installer.yml
}
#
# Run the initial installer of Docker and AzuraCast.
# Usage: ./docker.sh install
#
install() {
check-install-requirements
if [[ $(command -v docker) && $(docker --version) ]]; then
echo "Docker is already installed! Continuing..."
else
if ask "Docker does not appear to be installed. Install Docker now?" Y; then
install-docker
fi
fi
if [[ $(command -v docker-compose) ]]; then
echo "Docker Compose is already installed. Continuing..."
else
if ask "Docker Compose does not appear to be installed. Install Docker Compose now?" Y; then
install-docker-compose
fi
fi
setup-release
run-installer "$@"
# Installer creates a file at docker-compose.new.yml; copy it to the main spot.
if [[ -s docker-compose.new.yml ]]; then
if [[ -f docker-compose.yml ]]; then
rm docker-compose.yml
fi
mv docker-compose.new.yml docker-compose.yml
fi
# If this script is running as a non-root user, set the PUID/PGID in the environment vars appropriately.
if [[ $EUID -ne 0 ]]; then
.env --file .env set AZURACAST_PUID="$(id -u)"
.env --file .env set AZURACAST_PGID="$(id -g)"
fi
docker-compose pull
docker-compose run --rm web -- azuracast_install "$@"
docker-compose up -d
exit
}
install-dev() {
if [[ $(command -v docker) && $(docker --version) ]]; then
echo "Docker is already installed! Continuing..."
else
if ask "Docker does not appear to be installed. Install Docker now?" Y; then
install-docker
fi
fi
if [[ $(command -v docker-compose) ]]; then
echo "Docker Compose is already installed. Continuing..."
else
if ask "Docker Compose does not appear to be installed. Install Docker Compose now?" Y; then
install-docker-compose
fi
fi
if [[ ! -f docker-compose.yml ]]; then
cp docker-compose.sample.yml docker-compose.yml
fi
if [[ ! -f docker-compose.override.yml ]]; then
cp docker-compose.dev.yml docker-compose.override.yml
fi
if [[ ! -f .env ]]; then
cp dev.env .env
fi
if [[ ! -f azuracast.env ]]; then
cp azuracast.dev.env azuracast.env
echo "Customize azuracast.env file now before continuing. Re-run this command to continue installation."
exit
fi
# If this script is running as a non-root user, set the PUID/PGID in the environment vars appropriately.
if [[ $EUID -ne 0 ]]; then
.env --file .env set AZURACAST_PUID="$(id -u)"
.env --file .env set AZURACAST_PGID="$(id -g)"
fi
chmod 777 ./frontend/ ./web/ ./vendor/ \
./web/static/ ./web/static/api/ \
./web/static/dist/ ./web/static/img/
docker-compose build
docker-compose run --rm web -- azuracast_install "$@"
docker-compose -p azuracast_frontend -f docker-compose.frontend.yml build
docker-compose -p azuracast_frontend -f docker-compose.frontend.yml run --rm frontend npm run build
docker-compose up -d
exit
}
#
# Update the Docker images and codebase.
# Usage: ./docker.sh update
#
update() {
echo "[NOTICE] Before you continue, please make sure you have a recent snapshot of your system and or backed it up."
if ask "Are you ready to continue with the update?" Y; then
# Check for a new Docker Utility Script.
local AZURACAST_RELEASE_BRANCH
AZURACAST_RELEASE_BRANCH=$(get-release-branch-name)
curl -fsSL https://raw.githubusercontent.com/AzuraCast/AzuraCast/$AZURACAST_RELEASE_BRANCH/docker.sh -o docker.new.sh
local UTILITY_FILES_MATCH
UTILITY_FILES_MATCH="$(
cmp --silent docker.sh docker.new.sh
echo $?
)"
local UPDATE_UTILITY=0
if [[ ${UTILITY_FILES_MATCH} -ne 0 ]]; then
if ask "The Docker Utility Script has changed since your version. Update to latest version?" Y; then
UPDATE_UTILITY=1
fi
fi
if [[ ${UPDATE_UTILITY} -ne 0 ]]; then
mv docker.new.sh docker.sh
chmod a+x docker.sh
echo "A new Docker Utility Script has been downloaded."
echo "Please re-run the update process to continue."
exit
else
rm docker.new.sh
fi
run-installer --update "$@"
# Check for updated Docker Compose config.
local COMPOSE_FILES_MATCH
if [[ ! -s docker-compose.new.yml ]]; then
curl -fsSL https://raw.githubusercontent.com/AzuraCast/AzuraCast/$AZURACAST_RELEASE_BRANCH/docker-compose.sample.yml -o docker-compose.new.yml
fi
COMPOSE_FILES_MATCH="$(
cmp --silent docker-compose.yml docker-compose.new.yml
echo $?
)"
if [[ ${COMPOSE_FILES_MATCH} -ne 0 ]]; then
docker-compose -f docker-compose.new.yml pull
docker-compose down
cp docker-compose.yml docker-compose.backup.yml
mv docker-compose.new.yml docker-compose.yml
else
rm docker-compose.new.yml
docker-compose pull
docker-compose down
fi
docker-compose run --rm web -- azuracast_update "$@"
docker-compose up -d
if ask "Clean up all stopped Docker containers and images to save space?" Y; then
docker system prune -f
fi
echo "Update complete!"
fi
exit
}
#
# Update this Docker utility script.
# Usage: ./docker.sh update-self
#
update-self() {
local AZURACAST_RELEASE_BRANCH
AZURACAST_RELEASE_BRANCH=$(get-release-branch-name)
curl -fsSL https://raw.githubusercontent.com/AzuraCast/AzuraCast/$AZURACAST_RELEASE_BRANCH/docker.sh -o docker.sh
chmod a+x docker.sh
echo "New Docker utility script downloaded."
exit
}
#
# Run a CLI command inside the Docker container.
# Usage: ./docker.sh cli [command]
#
cli() {
docker-compose exec --user="azuracast" web azuracast_cli "$@"
exit
}
#
# Enter the bash terminal of the running web container.
# Usage: ./docker.sh bash
#
bash() {
docker-compose exec --user="azuracast" web bash
exit
}
#
# Enter the MariaDB database management terminal with the correct credentials.
#
db() {
local MYSQL_HOST MYSQL_PORT MYSQL_USER MYSQL_PASSWORD MYSQL_DATABASE
.env --file azuracast.env get MYSQL_HOST
MYSQL_HOST="${REPLY:-localhost}"
.env --file azuracast.env get MYSQL_PORT
MYSQL_PORT="${REPLY:-3306}"
.env --file azuracast.env get MYSQL_USER
MYSQL_USER="${REPLY:-azuracast}"
.env --file azuracast.env get MYSQL_PASSWORD
MYSQL_PASSWORD="${REPLY:-azur4c457}"
.env --file azuracast.env get MYSQL_DATABASE
MYSQL_DATABASE="${REPLY:-azuracast}"
docker-compose exec --user="mysql" web mysql --user=${MYSQL_USER} --password=${MYSQL_PASSWORD} \
--host=${MYSQL_HOST} --port=${MYSQL_PORT} --database=${MYSQL_DATABASE}
exit
}
#
# Back up the Docker volumes to a .tar.gz file.
# Usage:
# ./docker.sh backup [/custom/backup/dir/custombackupname.zip]
#
backup() {
local BACKUP_PATH BACKUP_DIR BACKUP_FILENAME BACKUP_EXT
BACKUP_PATH=$(readlink -f ${1:-"./backup.tar.gz"})
BACKUP_DIR=$(dirname -- "$BACKUP_PATH")
BACKUP_FILENAME=$(basename -- "$BACKUP_PATH")
BACKUP_EXT="${BACKUP_FILENAME##*.}"
shift
# Prepare permissions
if [[ $EUID -ne 0 ]]; then
.env --file .env set AZURACAST_PUID="$(id -u)"
.env --file .env set AZURACAST_PGID="$(id -g)"
fi
docker-compose exec --user="azuracast" web azuracast_cli azuracast:backup "/var/azuracast/backups/${BACKUP_FILENAME}" "$@"
# Move from Docker volume to local filesystem
docker run --rm -v "azuracast_backups:/backup_src" \
-v "$BACKUP_DIR:/backup_dest" \
busybox mv "/backup_src/${BACKUP_FILENAME}" "/backup_dest/${BACKUP_FILENAME}"
}
#
# Restore an AzuraCast backup into Docker.
# Usage:
# ./docker.sh restore [/custom/backup/dir/custombackupname.zip]
#
restore() {
if [[ ! -f .env ]] || [[ ! -f azuracast.env ]]; then
echo "AzuraCast hasn't been installed yet on this server."
echo "You should run './docker.sh install' first before restoring."
exit 1
fi
if ask "Restoring will remove any existing AzuraCast installation data, replacing it with your backup. Continue?" Y; then
if [[ $1 != "" ]]; then
local BACKUP_PATH BACKUP_DIR BACKUP_FILENAME BACKUP_EXT
BACKUP_PATH=$(readlink -f ${1:-"./backup.tar.gz"})
BACKUP_DIR=$(dirname -- "$BACKUP_PATH")
BACKUP_FILENAME=$(basename -- "$BACKUP_PATH")
BACKUP_EXT="${BACKUP_FILENAME##*.}"
shift
if [[ ! -f ${BACKUP_PATH} ]]; then
echo "File '${BACKUP_PATH}' does not exist. Nothing to restore."
exit 1
fi
docker-compose down -v
docker volume create azuracast_backups
# Move from local filesystem to Docker volume
docker run --rm -v "$BACKUP_DIR:/backup_src" \
-v "azuracast_backups:/backup_dest" \
busybox mv "/backup_src/${BACKUP_FILENAME}" "/backup_dest/${BACKUP_FILENAME}"
# Prepare permissions
if [[ $EUID -ne 0 ]]; then
.env --file .env set AZURACAST_PUID="$(id -u)"
.env --file .env set AZURACAST_PGID="$(id -g)"
fi
docker-compose run --rm web -- azuracast_restore "/var/azuracast/backups/${BACKUP_FILENAME}" "$@"
# Move file back from volume to local filesystem
docker run --rm -v "azuracast_backups:/backup_src" \
-v "$BACKUP_DIR:/backup_dest" \
busybox mv "/backup_src/${BACKUP_FILENAME}" "/backup_dest/${BACKUP_FILENAME}"
docker-compose down
docker-compose up -d
else
docker-compose down
# Remove all volumes except the backup volume.
docker volume rm -f $(docker volume ls | grep -v "azuracast_backups" | awk 'NR>1 {print $2}')
docker-compose run --rm web -- azuracast_restore "$@"
docker-compose down
docker-compose up -d
fi
fi
exit
}
#
# Restore the Docker volumes from a legacy backup format .tar.gz file.
# Usage:
# ./docker.sh restore [/custom/backup/dir/custombackupname.tar.gz]
#
restore-legacy() {
local APP_BASE_DIR BACKUP_PATH BACKUP_DIR BACKUP_FILENAME
APP_BASE_DIR=$(pwd)
BACKUP_PATH=${1:-"./backup.tar.gz"}
BACKUP_DIR=$(cd "$(dirname "$BACKUP_PATH")" && pwd)
BACKUP_FILENAME=$(basename "$BACKUP_PATH")
cd "$APP_BASE_DIR" || exit
if [ -f "$BACKUP_PATH" ]; then
docker-compose down
docker volume rm azuracast_db_data azuracast_station_data
docker volume create azuracast_db_data
docker volume create azuracast_station_data
docker run --rm -v "$BACKUP_DIR:/backup" \
-v azuracast_db_data:/azuracast/db \
-v azuracast_station_data:/azuracast/stations \
busybox tar zxvf "/backup/$BACKUP_FILENAME"
docker-compose up -d
else
echo "File $BACKUP_PATH does not exist in this directory. Nothing to restore."
exit 1
fi
exit
}
#
# DEVELOPER TOOL:
# Access the static console as a developer.
# Usage: ./docker.sh static [static_container_command]
#
static() {
docker-compose -f docker-compose.frontend.yml down -v
docker-compose -f docker-compose.frontend.yml build
docker-compose --env-file=.env -f docker-compose.frontend.yml run --rm frontend "$@"
exit
}
#
# Stop all Docker containers and remove related volumes.
# Usage: ./docker.sh uninstall
#
uninstall() {
if ask "This operation is destructive and will wipe your existing Docker containers. Continue?" N; then
docker-compose down -v
docker-compose rm -f
docker volume prune -f
echo "All AzuraCast Docker containers and volumes were removed."
echo "To remove *all* Docker containers and volumes, run:"
echo " docker stop \$(docker ps -a -q)"
echo " docker rm \$(docker ps -a -q)"
echo " docker volume prune -f"
echo ""
fi
exit
}
#
# LetsEncrypt: Now managed via the Web UI.
#
setup-letsencrypt() {
echo "LetsEncrypt is now managed from within the web interface."
}
letsencrypt-create() {
setup-letsencrypt
exit
}
#
# Utility script to facilitate switching ports.
# Usage: ./docker.sh change-ports
#
change-ports() {
setup-ports
docker-compose down
docker-compose up -d
}
#
# Helper scripts for basic Docker Compose functions
#
up() {
echo "Starting up AzuraCast services..."
docker-compose up -d
}
down() {
echo "Shutting down AzuraCast services..."
docker-compose down
}
restart() {
down
up
}
# Ensure we're in the same directory as this script.
cd "$( dirname "${BASH_SOURCE[0]}" )" || exit
"$@"

View file

@ -1 +0,0 @@
The three file found here are mentioned in docker.sh's run-installer() with 'sample.' added to the filename

View file

@ -1,11 +0,0 @@
COMPOSE_PROJECT_NAME=azuracast
AZURACAST_HTTP_PORT=80
AZURACAST_HTTPS_PORT=443
AZURACAST_SFTP_PORT=2022
AZURACAST_PUID=1000
AZURACAST_PGID=1000
NGINX_TIMEOUT=1800

View file

@ -13,179 +13,101 @@
Calibre-Web README
==================
This Ansible role installs
`Calibre-Web <https://github.com/janeczku/calibre-web#readme>`_ as a modern
client-server alternative to Calibre, for your
`Internet-in-a-Box (IIAB) <https://internet-in-a-box.org>`_.
Calibre-Web provides a clean interface for browsing, reading and downloading
e-books using an existing Calibre database. Teachers can upload e-books,
adjust e-book metadata, and create custom e-book collections ("bookshelves"):
https://github.com/janeczku/calibre-web#about
Calibre-Web provides a clean web interface for students to browse, read and
download e-books using a
`Calibre-compatible database <https://manual.calibre-ebook.com/db_api.html>`_.
This Ansible role installs Calibre-Web as part of your Internet-in-a-Box (IIAB)
as a possible alternative to Calibre.
Teachers upload e-books, adjust e-book metadata, and create custom "bookshelf"
collections — to help students build the best local community library!
*WARNING: Calibre-Web depends on Calibre's own /usr/bin/ebook-convert program,
so we strongly recommend you also install Calibre during your IIAB
installation!*
**NEW AS OF JANUARY 2024:** `IIAB's experimental new version of Calibre-Web <https://github.com/iiab/calibre-web/wiki>`_
**also lets you add YouTube and Vimeo videos (and local videos, e.g. from
teachers' phones) to expand your indigenous/local/family learning library!**
.. image:: https://www.yankodesign.com/images/design_news/2019/05/221758/luo_beetle_library_8.jpg
🍒 GURU TIPS 🍒
* Calibre-Web takes advantage of Calibre's own `/usr/bin/ebook-convert
<https://manual.calibre-ebook.com/generated/en/ebook-convert.html>`_ program
if that's installed — so consider also installing
`Calibre <https://calibre-ebook.com/whats-new>`_ during your IIAB
installation — *if you tolerate the weighty ~1 GB (of graphical OS libraries)
that Calibre mandates!*
* If you choose to also install Calibre (e.g. by running
``sudo apt install calibre``) then you'll get useful e-book
importing/organizing tools like
`/usr/bin/calibredb <https://manual.calibre-ebook.com/generated/en/calibredb.html>`_.
Install It
----------
Install Calibre-Web by setting these 2 variables in
`/etc/iiab/local_vars.yml <https://wiki.iiab.io/go/FAQ#What_is_local_vars.yml_and_how_do_I_customize_it%3F>`_::
calibreweb_install: True
calibreweb_enabled: True
Then install IIAB (`download.iiab.io <https://download.iiab.io>`_). Or if
IIAB's already installed, run::
cd /opt/iiab/iiab
sudo ./runrole calibre-web
NOTE: Calibre-Web's Ansible role (playbook) in
`/opt/iiab/iiab/roles <https://github.com/iiab/iiab/tree/master/roles>`_ is
``calibre-web`` which contains a hyphen — *whereas its Ansible variables*
``calibreweb_*`` *do NOT contain a hyphen!*
Please note Calibre-Web's Ansible playbook is ``/opt/iiab/iiab/roles/calibre-web``
whereas its Ansible variables ``calibreweb_*`` do **not** include the dash,
per Ansible recommendations.
Using It
--------
Try Calibre-Web on your own IIAB by browsing to http://box/books (or
http://box.lan/books).
After installation, try out Calibre-Web at http://box/books (or box.lan/books).
*Students* access it without a password (to read and download books).
*Teachers* add and arrange books using an administrative account, by clicking
**Guest** then logging in with::
Typically students access it without a password (to read and download books)
whereas teachers add books using an administrative account, as follows::
Username: Admin
Password: changeme
🍒 GURU TIPS 🍒
If the default configuration is not found, the Calibre-Web server creates a
new settings file with calibre-web's own default administrative account::
* If Calibre-Web's configuration file (app.db) goes missing, the administrative
account will revert to::
Username: admin
Password: admin123
Username: admin
Password: admin123
Backend
-------
* If you lose your password, you can change it with the
``-s [username]:[newpassword]`` command-line option:
https://github.com/janeczku/calibre-web/wiki/FAQ#what-do-i-do-if-i-lose-my-admin-password
You can manage the backend Calibre-Web server with these systemd commands::
systemctl enable calibre-web
systemctl restart calibre-web
systemctl status calibre-web
systemctl stop calibre-web
Configuration
-------------
To configure Calibre-Web browse to http://box/books then click **Guest** to log
in as user **Admin** (default passwords above!)
To configure Calibre-Web, log in as user 'Admin' then click 'Admin' on top.
Check 'Configuration' options near the bottom of the page.
Then click the leftmost **Admin** button to administer — considering all 3
**Configuration** buttons further below.
These critical settings are stored in::
Critical settings are stored in::
/library/calibre-web/config/app.db
Whereas your e-book metadata is stored in a Calibre-style database::
Your e-book metadata is stored in a Calibre-style database::
/library/calibre-web/metadata.db
Videos' metadata is stored in database::
/library/calibre-web/xklb-metadata.db
See also::
/library/calibre-web/metadata_db_prefs_backup.json
Finally, take note of Calibre-Web's
`FAQ <https://github.com/janeczku/calibre-web/wiki/FAQ>`_ and official docs on
its
`Runtime Configuration Options <https://github.com/janeczku/calibre-web/wiki/Configuration>`_
and
`Command Line Interface <https://github.com/janeczku/calibre-web/wiki/Command-Line-Interface>`_.
Backend
-------
You can manage the backend Calibre-Web server with systemd commands like::
systemctl status calibre-web
systemctl stop calibre-web
systemctl restart calibre-web
Run all commands
`as root <https://unix.stackexchange.com/questions/3063/how-do-i-run-a-command-as-the-system-administrator-root>`_.
Errors and warnings can be seen if you run::
journalctl -u calibre-web
Log verbosity level can be
`adjusted <https://github.com/janeczku/calibre-web/wiki/Configuration#logfile-configuration>`_
within Calibre-Web's **Configuration > Basic Configuration > Logfile
Configuration**.
Finally, http://box/live/stats (Calibre-Web's **About** page) can be a very
useful list of ~42 `Calibre-Web dependencies <https://github.com/janeczku/calibre-web/wiki/Dependencies-in-Calibre-Web-Linux-and-Windows>`_
(mostly Python packages, and the version number of each that's installed).
See the official docs on Calibre-Web's `Runtime Configuration Options <https://github.com/janeczku/calibre-web/wiki/Configuration>`_.
Back Up Everything
------------------
Please back up the entire folder ``/library/calibre-web`` before upgrading —
as it contains your Calibre-Web content **and** configuration settings!
as it contains your Calibre-Web content **and** settings!
Upgrading
---------
Please see our `new/automated upgrade technique (iiab-update) <https://github.com/iiab/calibre-web/wiki#upgrading>`_
introduced in July 2024.
Reinstalling Calibre-Web automatically upgrades to the latest version if your
Internet-in-a-Box (IIAB) is online.
But first: back up your content **and** configuration settings, as outlined
above!
But first: back up your content **and** settings, as explained above.
**Conversely if you're sure you want to fully reset your Calibre-Web settings,
and remove all existing e-book/video/media metadata — then move your
/library/calibre-web/config/app.db, /library/calibre-web/metadata.db and
/library/calibre-web/xklb-metadata.db out of the way.**
RECAP: Either way, "reinstalling" Calibre-Web automatically installs the latest
version — so long as your Internet-in-a-Box (IIAB) is online. Most people
should stick with the new ``iiab-update`` technique above. However if you must
use the older/manual approach, you would need to run, as root::
**Then move your /library/calibre-web/metadata.db out of the way, if you're
sure you want to (re)install bare/minimal metadata, and force all Calibre-Web
settings to the default. Then run**::
cd /opt/iiab/iiab
./runrole --reinstall calibre-web
./runrole calibre-web
Or, to reinstall all of IIAB::
Or, if there's a need to try updating Calibre-Web's code alone::
cd /opt/iiab/iiab
./iiab-install --reinstall
cd /usr/local/calibre-web-py3
Or, if you just want to upgrade Calibre-Web code alone, prior to proceeding
manually::
cd /opt/iiab/calibre-web
git pull
Finally, this much older way is *no longer recommended*::
cd /opt/iiab/iiab
./iiab-install --reinstall # OR: ./iiab-configure
Known Issues
------------
@ -231,7 +153,7 @@ Known Issues
* |ss| Imagemagick policy prevents generating thumbnails for PDF's during upload: `#1530 <https://github.com/iiab/iiab/issues/1530>`_ `janeczku/calibre-web#827 <https://github.com/janeczku/calibre-web/issues/827>`_ |se|
* |ss| Upload of not supported file formats gives no feedback to the user: `janeczku/calibre-web#828 <https://github.com/janeczku/calibre-web/issues/828>`_ |se| |nbsp| Fixed by `361a124 <https://github.com/janeczku/calibre-web/commit/361a1243d732116e6f520fabbaae017068b86037>`_ on 2019-02-27.
* Upload of not supported file formats gives no feedback to the user: `janeczku/calibre-web#828 <https://github.com/janeczku/calibre-web/issues/828>`_
* *Please report serious issues here:*
https://github.com/iiab/calibre-web/issues
* *Please assist us in reporting serious issues here:*
https://github.com/janeczku/calibre-web/issues

View file

@ -14,26 +14,23 @@
# All above are set in: github.com/iiab/iiab/blob/master/vars/default_vars.yml
# If nec, change them by editing /etc/iiab/local_vars.yml prior to installing!
calibreweb_repo_url: https://github.com/iiab/calibre-web # Or use upstream: https://github.com/janeczku/calibre-web
calibreweb_version: master # WAS: master, 0.6.4, 0.6.5, 0.6.6, 0.6.7, 0.6.8, 0.6.9
calibreweb_venv_wipe: False # 2023-12-04: NEW default TDD (Test-Driven Dev!)
calibreweb_venv_path: /usr/local/calibre-web-py3
calibreweb_exec_path: "{{ calibreweb_venv_path }}/cps.py"
# Config files (in reality just app.db) put in:
# Config files put in:
calibreweb_config: "{{ calibreweb_home }}/config"
# 2022-03-07: Calibre-Web will be reset to default settings if (re)installed
# when /library/calibre-web/config/app.db doesn't exist:
calibreweb_settings_database: app.db # /library/calibre-web/config/app.db
# UNUSED var as of 2022-03-07:
# calibreweb_database: metadata.db # /library/calibre-web/metadata.db
# Calibre-Web will be provisioned with default administrative account,
# metadata.db and language if /library/calibre-web/metadata.db does not exist.
# NOT CURRENTLY IN USE: calibreweb_provision: True
calibreweb_settings_database: app.db
calibreweb_database: metadata.db
# Files owned by:
calibreweb_user: root
# UNUSED vars, as of March 2019:
# UNUSED variables, as of March 2019:
# calibreweb_admin_user: Admin
# calibreweb_admin_password: changeme

Binary file not shown.

View file

@ -1,52 +0,0 @@
- name: Enable & Restart 'calibre-web' systemd service, if calibreweb_enabled
systemd:
name: calibre-web
daemon_reload: yes
enabled: yes
state: restarted
when: calibreweb_enabled
- name: Disable & Stop 'calibre-web' systemd service, if not calibreweb_enabled
systemd:
name: calibre-web
enabled: no
state: stopped
when: not calibreweb_enabled
# TO DO: restore http://box/libros & http://box/livres etc, alongside English (#2195)
# RELATED: https://github.com/janeczku/calibre-web/wiki/Setup-Reverse-Proxy
- name: Enable http://box{{ calibreweb_url1 }} via NGINX, by installing {{ nginx_conf_dir }}/calibre-web-nginx.conf from template # http://box/books
template:
src: calibre-web-nginx.conf.j2
dest: "{{ nginx_conf_dir }}/calibre-web-nginx.conf" # /etc/nginx/conf.d
when: calibreweb_enabled
- name: If enabling with Calibre-Web enhanced for large audio/video "books" too, also append onto calibre-web-nginx.conf AND symlink /library/www/html/calibre-web -> /library/calibre-web (WIP)
shell: |
if [ -f {{ calibreweb_venv_path }}/scripts/calibre-web-nginx.conf ]; then
cat {{ calibreweb_venv_path }}/scripts/calibre-web-nginx.conf >> {{ nginx_conf_dir }}/calibre-web-nginx.conf
# 2023-12-05: Not needed as a result of PR iiab/calibre-web#57
# ln -sf {{ calibreweb_home }} {{ doc_root }}/calibre-web
fi
when: calibreweb_enabled
- name: Disable http://box{{ calibreweb_url1 }} via NGINX, by removing {{ nginx_conf_dir }}/calibre-web-nginx.conf
file:
path: "{{ nginx_conf_dir }}/calibre-web-nginx.conf"
state: absent
when: not calibreweb_enabled
- name: If disabling, also remove symlink /library/www/html/calibre-web (WIP)
file:
path: "{{ doc_root }}/calibre-web" # /library/www/html
state: absent
when: not calibreweb_enabled
- name: Restart 'nginx' systemd service
systemd:
name: nginx
state: restarted

View file

@ -1,50 +1,10 @@
# Or try 'iiab-update -f' for a more rapid upgrade of IIAB Calibre-Web:
#
# https://wiki.iiab.io/go/FAQ#Can_I_upgrade_IIAB_software%3F
# https://github.com/iiab/calibre-web/wiki#upgrading
# https://github.com/iiab/iiab/blob/master/scripts/iiab-update
# https://github.com/iiab/iiab/tree/master/roles/calibre-web#upgrading
- name: Record (initial) disk space used
shell: df -B1 --output=used / | tail -1
register: df1
- name: Stop 'calibre-web' systemd service for safety (RED ERROR CAN BE IGNORED!)
systemd:
name: calibre-web
state: stopped
ignore_errors: True # Shows red errors, and continue...
#failed_when: False # Hides red errors, and continue...
# Official upstream instructions:
# apt install python3-pip python3-venv
# https://github.com/janeczku/calibre-web/wiki/Manual-installation
- name: "Install package: imagemagick"
- name: "Install packages: imagemagick, python3-venv"
package:
name:
- imagemagick
#- python3-cryptography # Was needed on Raspberry Pi OS (SEE iiab/calibre-web#260, janeczku/calibre-web#3183)
#- python3-netifaces
- python3-venv
state: present
# https://github.com/iiab/iiab/pull/3496#issuecomment-1475094542
#- name: "Install packages: python3-dev, gcc to compile 'netifaces'"
# package:
# name:
# - python3-dev # header files
# - gcc # compiler
# state: present
# when: python_version is version('3.10', '>=')
- name: Does /etc/ImageMagick-6/policy.xml exist?
stat:
path: /etc/ImageMagick-6/policy.xml
register: imagemagick6_policy_xml
# 2024-12-16: Debian 13 uses /etc/ImageMagick-7/policy.xml instead, which doesn't need this lineinfile surgery:
# https://stackoverflow.com/questions/52998331/imagemagick-security-policy-pdf-blocking-conversion
- name: Allow ImageMagick to read PDFs, per /etc/ImageMagick-6/policy.xml, to create book cover thumbnails
lineinfile:
path: /etc/ImageMagick-6/policy.xml
@ -52,111 +12,43 @@
backrefs: yes
line: ' <policy domain="coder" rights="read" pattern="PDF" />'
state: present
when: imagemagick6_policy_xml.stat.exists
- name: "Create 2 Calibre-Web folders to store data and config files: {{ calibreweb_home }}, {{ calibreweb_config }} (each set to {{ calibreweb_user }}:{{ apache_user }}, default to 0755)"
- name: "Create 3 Calibre-Web folders to store data and config files: {{ calibreweb_home }}, {{ calibreweb_venv_path }}, {{ calibreweb_config }} (all set to {{ calibreweb_user }}:{{ apache_user }}) (default to 0755)"
file:
state: directory
path: "{{ item }}"
owner: "{{ calibreweb_user }}" # root
group: "{{ apache_user }}" # www-data on debuntu
#mode: '0755'
with_items:
- "{{ calibreweb_home }}" # /library/calibre-web
- "{{ calibreweb_config }}" # /library/calibre-web/config
- "{{ calibreweb_venv_path }}" # /usr/local/calibre-web-py3
# FYI since May 2021, Calibre-Web (major releases) can be installed with pip:
# https://pypi.org/project/calibreweb/
# https://github.com/janeczku/calibre-web/issues/456
# https://github.com/janeczku/calibre-web/issues/677
# https://github.com/janeczku/calibre-web/pull/927
# https://github.com/janeczku/calibre-web/pull/1459
- name: "Remove previous virtual environment {{ calibreweb_venv_path }} -- if 'calibreweb_venv_wipe: True'"
file:
path: "{{ calibreweb_venv_path }}" # /usr/local/calibre-web-py3
state: absent
when: calibreweb_venv_wipe
- name: Does {{ calibreweb_venv_path }} exist?
stat:
path: "{{ calibreweb_venv_path }}"
register: calibreweb_venv
- name: git clone Calibre-Web ({{ calibreweb_version }}) from {{ calibreweb_repo_url }} to {{ calibreweb_venv_path }} (~122 MB initially, ~191+ or ~203+ MB later) -- if {{ calibreweb_venv_path }} doesns't exist
## TODO: Calibre-web future release might get into pypi https://github.com/janeczku/calibre-web/issues/456
- name: Clone i.e. download Calibre-Web ({{ calibreweb_version }}) from https://github.com/janeczku/calibre-web.git to {{ calibreweb_venv_path }} (~94 MB initially, ~115+ MB later)
git:
repo: "{{ calibreweb_repo_url }}" # e.g. https://github.com/iiab/calibre-web or https://github.com/janeczku/calibre-web
dest: "{{ calibreweb_venv_path }}"
#force: True # CLAIM: "If true, any modified files in the working repository will be discarded" -- REALITY: even if `force: no`, Ansible destructively reclones (also removing all test branch commits etc!) -- unless a git credential is provided to Ansible?
#depth: 1 # 2023-11-04: Full clone for now, to help @deldesir & wider community testing
version: "{{ calibreweb_version }}" # e.g. master, 0.6.22
when: not calibreweb_venv.stat.exists
repo: https://github.com/janeczku/calibre-web.git
dest: "{{ calibreweb_venv_path }}" # /usr/local/calibre-web
force: yes
depth: 1
version: "{{ calibreweb_version }}" # e.g. master, 0.6.5
- name: cd {{ calibreweb_venv_path }} ; git pull {{ calibreweb_repo_url }} {{ calibreweb_version }} --no-rebase --no-edit -- if {{ calibreweb_venv_path }} exists
command: git pull "{{ calibreweb_repo_url }}" "{{ calibreweb_version }}" --no-rebase --no-edit
args:
chdir: "{{ calibreweb_venv_path }}"
when: calibreweb_venv.stat.exists
- debug:
msg:
- "NEED BETTER/EXPERIMENTAL YouTube SCRAPING? RUN THE NEXT LINE -- for the latest yt-dlp 'nightly' release:"
- sudo pipx inject --pip-args='--upgrade --pre' -f library yt-dlp[default]
- name: If Calibre-Web is being enhanced with audio/video "books" too, install/upgrade additional prereqs -- SEE https://github.com/iiab/calibre-web/wiki
shell: |
if [ -f {{ calibreweb_venv_path }}/scripts/lb-wrapper ]; then
apt install ffmpeg pipx -y
if lb --version; then
if pipx list | grep -q 'xklb'; then
pipx uninstall xklb
pipx install library
else
pipx reinstall library
fi
else
pipx install library
fi
ln -sf /root/.local/bin/lb /usr/local/bin/lb
if [ -f /root/.local/share/pipx/venvs/library/bin/yt-dlp ]; then
ln -sf /root/.local/share/pipx/venvs/library/bin/yt-dlp /usr/local/bin/yt-dlp
elif [ -f /root/.local/pipx/venvs/library/bin/yt-dlp ]; then
ln -sf /root/.local/pipx/venvs/library/bin/yt-dlp /usr/local/bin/yt-dlp
else
echo "ERROR: yt-dlp NOT FOUND"
fi
# NEED BETTER/EXPERIMENTAL YouTube SCRAPING? UNCOMMENT THE NEXT LINE -- for the latest yt-dlp "nightly" release:
# pipx inject --pip-args="--upgrade --pre" -f library yt-dlp[default]
#
# https://github.com/yt-dlp/yt-dlp-nightly-builds/releases
# https://pypi.org/project/yt-dlp/#history
cp {{ calibreweb_venv_path }}/scripts/lb-wrapper /usr/local/bin/
chmod a+x /usr/local/bin/lb-wrapper
fi
- name: Download Calibre-Web dependencies from 'requirements.txt' into python3 virtual environment {{ calibreweb_venv_path }}
## Ansible Pip Bug: Cannot use 'chdir' with 'env' https://github.com/ansible/ansible/issues/37912 (Patch landed)
#- name: Download calibre-web dependencies into vendor subdirectory.
# pip:
# requirements: "{{ calibreweb_path }}/requirements.txt"
# chdir: "{{ calibreweb_path }}"
# extra_args: '--target vendor'
# ignore_errors: True
##
# Implementing this with Ansible command module for now.
- name: Download Calibre-Web dependencies (using pip) into python3 virtual environment {{ calibreweb_venv_path }}
pip:
requirements: "{{ calibreweb_venv_path }}/requirements.txt"
virtualenv: "{{ calibreweb_venv_path }}" # /usr/local/calibre-web-py3
#virtualenv_site_packages: no
#virtualenv_command: python3 -m venv --system-site-packages {{ calibreweb_venv_path }}
virtualenv_site_packages: no
virtualenv_command: python3 -m venv {{ calibreweb_venv_path }}
extra_args: --prefer-binary # 2023-10-01: Lifesaver when recent wheels (e.g. piwheels.org) are inevitably not yet built! SEE #3560
# 2023-10-11: RasPiOS Bookworm doc for Python with venv (PEP 668 now enforced!)
# https://www.raspberrypi.com/documentation/computers/os.html#use-python-on-a-raspberry-pi
# https://www.raspberrypi.com/documentation/computers/os.html#install-python-packages-using-apt
# https://www.raspberrypi.com/documentation/computers/os.html#install-python-libraries-using-pip
# VIRTUALENV EXAMPLE COMMANDS:
# python3 -m venv /usr/local/calibre-web-py3 (create venv)
# cd /usr/local/calibre-web-py3
# . bin/activate (or 'source bin/activate' -- this prepends '/usr/local/calibre-web-py3/bin' to yr PATH)
# python3 -m pip list ('pip list' sufficient *IF* path set above!)
# python3 -m pip freeze > /tmp/requirements.txt
# python3 -m pip install -r requirements.txt
# deactivate
# https://pip.pypa.io/en/stable/user_guide/#requirements-files
# https://pip.pypa.io/en/latest/reference/requirements-file-format/
- name: Install /etc/systemd/system/calibre-web.service from template
template:
@ -174,40 +66,28 @@
dest: "{{ calibreweb_home }}" # /library/calibre-web
owner: "{{ calibreweb_user }}" # root
group: "{{ apache_user }}" # www-data on debuntu
#mode: '0644'
backup: yes
with_items:
- roles/calibre-web/files/metadata.db
- roles/calibre-web/files/metadata_db_prefs_backup.json
when: not metadatadb.stat.exists
#when: calibreweb_provision
- name: Does /library/calibre-web/config/app.db exist?
stat:
path: /library/calibre-web/config/app.db
register: appdb
- name: Provision/Copy default admin settings to {{ calibreweb_config }}/app.db IF it did not exist
- name: Provision/Copy default admin settings to {{ calibreweb_config }}/app.db IF metadata.db did not exist
copy:
src: roles/calibre-web/files/app.db
dest: "{{ calibreweb_config }}" # /library/calibre-web/config
owner: "{{ calibreweb_user }}" # root
group: "{{ apache_user }}" # www-data on debuntu
#mode: '0644'
backup: yes
when: not appdb.stat.exists
when: not metadatadb.stat.exists
#when: calibreweb_provision
# RECORD Calibre-Web AS INSTALLED
- name: Record (final) disk space used
shell: df -B1 --output=used / | tail -1
register: df2
- name: Add 'calibreweb_disk_usage = {{ df2.stdout|int - df1.stdout|int }}' to {{ iiab_ini_file }}
ini_file:
path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
section: calibre-web
option: calibreweb_disk_usage
value: "{{ df2.stdout|int - df1.stdout|int }}"
- name: "Set 'calibreweb_installed: True'"
set_fact:
calibreweb_installed: True

View file

@ -19,47 +19,56 @@
quiet: yes
- block:
- name: Install Calibre-Web if 'calibreweb_installed' not defined, e.g. in {{ iiab_state_file }} # /etc/iiab/iiab_state.yml
include_tasks: install.yml
when: calibreweb_installed is undefined
- name: Install Calibre-Web if 'calibreweb_installed' not defined, e.g. in {{ iiab_state_file }} # /etc/iiab/iiab_state.yml
include_tasks: install.yml
when: calibreweb_installed is undefined
- include_tasks: enable-or-disable.yml
- name: Enable & Restart 'calibre-web' systemd service, if calibreweb_enabled
systemd:
name: calibre-web
daemon_reload: yes
enabled: yes
state: restarted
when: calibreweb_enabled
- name: Add 'calibre-web' variable values to {{ iiab_ini_file }}
ini_file:
path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
section: calibre-web
option: "{{ item.option }}"
value: "{{ item.value | string }}"
with_items:
- option: name
value: Calibre-Web
- option: description
value: '"Calibre-Web is a web app providing a clean interface for browsing, reading and downloading e-books."'
- option: calibreweb_install
value: "{{ calibreweb_install }}"
- option: calibreweb_enabled
value: "{{ calibreweb_enabled }}"
- option: calibreweb_url1
value: "{{ calibreweb_url1 }}"
- option: calibreweb_url2
value: "{{ calibreweb_url2 }}"
- option: calibreweb_url3
value: "{{ calibreweb_url3 }}"
- option: calibreweb_path
value: "{{ calibreweb_venv_path }}"
- option: calibreweb_home
value: "{{ calibreweb_home }}"
- option: calibreweb_port
value: "{{ calibreweb_port }}"
- option: calibreweb_settings_database
value: "{{ calibreweb_settings_database }}"
- name: Disable & Stop 'calibre-web' systemd service, if not calibreweb_enabled
systemd:
name: calibre-web
enabled: no
state: stopped
when: not calibreweb_enabled
rescue:
- name: Enable/Disable/Restart NGINX
include_tasks: nginx.yml
- name: 'SEE ERROR ABOVE (skip_role_on_error: {{ skip_role_on_error }})'
fail:
msg: ""
when: not skip_role_on_error
- name: Add 'calibre-web' variable values to {{ iiab_ini_file }}
ini_file:
path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
section: calibre-web
option: "{{ item.option }}"
value: "{{ item.value | string }}"
with_items:
- option: name
value: Calibre-Web
- option: description
value: '"Calibre-Web is a web app providing a clean interface for browsing, reading and downloading e-books."'
- option: calibreweb_install
value: "{{ calibreweb_install }}"
- option: calibreweb_enabled
value: "{{ calibreweb_enabled }}"
- option: calibreweb_url1
value: "{{ calibreweb_url1 }}"
- option: calibreweb_url2
value: "{{ calibreweb_url2 }}"
- option: calibreweb_url3
value: "{{ calibreweb_url3 }}"
- option: calibreweb_path
value: "{{ calibreweb_venv_path }}"
- option: calibreweb_home
value: "{{ calibreweb_home }}"
- option: calibreweb_port
value: "{{ calibreweb_port }}"
- option: calibreweb_database
value: "{{ calibreweb_database }}"

View file

@ -0,0 +1,19 @@
# TO DO: restore http://box/libros & http://box/livres etc, alongside English (#2195)
# RELATED: https://github.com/janeczku/calibre-web/wiki/Setup-Reverse-Proxy
- name: Enable http://box{{ calibreweb_url1 }} via NGINX, by installing {{ nginx_conf_dir }}/calibre-web-nginx.conf from template # http://box/books
template:
src: calibre-web-nginx.conf.j2
dest: "{{ nginx_conf_dir }}/calibre-web-nginx.conf" # /etc/nginx/conf.d
when: calibreweb_enabled
- name: Disable http://box{{ calibreweb_url1 }} via NGINX, by removing {{ nginx_conf_dir }}/calibre-web-nginx.conf
file:
path: "{{ nginx_conf_dir }}/calibre-web-nginx.conf" # /etc/nginx/conf.d
state: absent
when: not calibreweb_enabled
- name: Restart 'nginx' systemd service
systemd:
name: nginx
state: restarted

View file

@ -5,7 +5,7 @@ location {{ calibreweb_url1 }}/ {
proxy_set_header Host $http_host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Scheme $scheme;
proxy_set_header X-Script-Name "{{ calibreweb_url1 }}";
proxy_set_header X-Script-Name {{ calibreweb_url1 }};
proxy_pass http://127.0.0.1:8083;
}
@ -14,7 +14,7 @@ location {{ calibreweb_url2 }}/ {
proxy_set_header Host $http_host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Scheme $scheme;
proxy_set_header X-Script-Name "{{ calibreweb_url2 }}";
proxy_set_header X-Script-Name {{ calibreweb_url2 }};
proxy_pass http://127.0.0.1:8083;
}
@ -23,6 +23,6 @@ location {{ calibreweb_url3 }}/ {
proxy_set_header Host $http_host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Scheme $scheme;
proxy_set_header X-Script-Name "{{ calibreweb_url3 }}";
proxy_set_header X-Script-Name {{ calibreweb_url3 }};
proxy_pass http://127.0.0.1:8083;
}

View file

@ -34,11 +34,11 @@ calibre_userdb: "{{ calibre_dbpath }}/users.sqlite"
# calibre-server --manage-users --userdb /library/calibre/users.sqlite
calibre_sample_book: "Metamorphosis-jackson.epub"
# Must be downloadable from https://download.iiab.io/packages
# Must be downloadable from http://download.iiab.io/packages
calibre_src_url: "https://raw.githubusercontent.com/kovidgoyal/calibre/master/setup/linux-installer.py"
calibre_deb_url: "{{ iiab_download_url }}" # https://download.iiab.io/packages
calibre_deb_url: "{{ iiab_download_url }}" # http://download.iiab.io/packages
# Above URL must offer both .deb files below: (for scripts/calibre-install-pinned-rpi.sh to run)
calibre_deb_pin_version: 3.33.1+dfsg-1 # for calibre_3.33.1+dfsg-1_all.deb (24M, 2018-10-21)
calibre_bin_deb_pin_version: "{{ calibre_deb_pin_version }}" # for calibre-bin_3.33.1+dfsg-1_armhf.deb (706K, 2018-10-23)

View file

@ -1,29 +0,0 @@
# http://box:8080 & http://box:8080/mobile WORK BUT OTHER URL'S LIKE http://box/calibre ARE A MESS (BOOKS RARELY DISPLAY)
#
# 2018-08-27 POSSIBLE FIX...CONSIDER THIS ProxyPass / ProxyPassReverse TECHNIQUE:
# https://github.com/iiab/iiab/tree/master/roles/calibre-web/templates/calibre-web.conf.j2
# (anyway this works great for calibre-web, allowing http://box/books
# to work even better than http://box:8083 when box == 192.168.0.x !)
#
#- name: Attempt to enable http://box/calibre via Apache (UNTESTED)
# command: a2ensite calibre.conf
# when: apache_installed and calibre_enabled
#
#- name: Attempt to disable http://box/calibre via Apache (UNTESTED)
# command: a2dissite calibre.conf
# when: apache_installed and not calibre_enabled
- name: Enable & (Re)Start 'calibre-serve' service, if calibre_enabled
systemd:
daemon_reload: yes
name: calibre-serve
enabled: yes
state: restarted
when: calibre_enabled
- name: Disable & Stop 'calibre-serve' service, if not calibre_enabled
systemd:
name: calibre-serve
enabled: no
state: stopped
when: not calibre_enabled

View file

@ -1,9 +1,4 @@
- name: Record (initial) disk space used
shell: df -B1 --output=used / | tail -1
register: df1
# 1. APT INSTALL CALIBRE 4.12+ or 5.12+ (calibre, calibredb, calibre-server etc) ON ALL OS'S
# 1. INSTALL CALIBRE 3.39.1+ or 4.12+ (calibre, calibredb, calibre-server etc) ON ALL OS'S
- name: "Install OS's latest packages: calibre, calibre-bin"
package:
@ -84,17 +79,6 @@
# 5. RECORD Calibre AS INSTALLED
- name: Record (final) disk space used
shell: df -B1 --output=used / | tail -1
register: df2
- name: Add 'calibre_disk_usage = {{ df2.stdout|int - df1.stdout|int }}' to {{ iiab_ini_file }}
ini_file:
path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
section: calibre
option: calibre_disk_usage
value: "{{ df2.stdout|int - df1.stdout|int }}"
- name: "Set 'calibre_installed: True'"
set_fact:
calibre_installed: True

View file

@ -19,37 +19,65 @@
quiet: yes
- block:
- name: Install Calibre if 'calibre_installed' not defined, e.g. in {{ iiab_state_file }} # /etc/iiab/iiab_state.yml
include_tasks: install.yml
when: calibre_installed is undefined
- name: Install Calibre if 'calibre_installed' not defined, e.g. in {{ iiab_state_file }} # /etc/iiab/iiab_state.yml
include_tasks: install.yml
when: calibre_installed is undefined
# http://box:8080 & http://box:8080/mobile WORK BUT OTHER URL'S LIKE http://box/calibre ARE A MESS (BOOKS RARELY DISPLAY)
#
# 2018-08-27 POSSIBLE FIX...CONSIDER THIS ProxyPass / ProxyPassReverse TECHNIQUE:
# https://github.com/iiab/iiab/tree/master/roles/calibre-web/templates/calibre-web.conf.j2
# (anyway this works great for calibre-web, allowing http://box/books
# to work even better than http://box:8083 when box == 192.168.0.x !)
#
#- name: Attempt to enable http://box/calibre via Apache (UNTESTED)
# command: a2ensite calibre.conf
# when: apache_installed and calibre_enabled
#
#- name: Attempt to disable http://box/calibre via Apache (UNTESTED)
# command: a2dissite calibre.conf
# when: apache_installed and not calibre_enabled
- include_tasks: enable-or-disable.yml
- name: Enable & (Re)Start 'calibre-serve' service, if calibre_enabled
systemd:
daemon_reload: yes
name: calibre-serve
enabled: yes
state: restarted
when: calibre_enabled
- name: Add 'calibre' variable values to {{ iiab_ini_file }}
ini_file:
path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
section: calibre
option: "{{ item.option }}"
value: "{{ item.value | string }}"
with_items:
- option: name
value: Calibre
- option: description
value: '"Calibre is an extremely popular personal library system for e-books."'
- option: calibre_src_url
value: "{{ calibre_src_url }}"
- option: calibre_dbpath
value: "{{ calibre_dbpath }}"
- option: calibre_port
value: "{{ calibre_port }}"
- option: calibre_enabled
value: "{{ calibre_enabled }}"
- name: Disable & Stop 'calibre-serve' service, if not calibre_enabled
systemd:
name: calibre-serve
enabled: no
state: stopped
when: not calibre_enabled
rescue:
#- name: Enable/Disable/Restart Apache if primary
# include_tasks: apache.yml
# when: not nginx_enabled
#
#- name: Enable/Disable/Restart NGINX if primary
# include_tasks: nginx.yml
# when: nginx_enabled
- name: 'SEE ERROR ABOVE (skip_role_on_error: {{ skip_role_on_error }})'
fail:
msg: ""
when: not skip_role_on_error
- name: Add 'calibre' variable values to {{ iiab_ini_file }}
ini_file:
path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
section: calibre
option: "{{ item.option }}"
value: "{{ item.value | string }}"
with_items:
- option: name
value: Calibre
- option: description
value: '"Calibre is an extremely popular personal library system for e-books."'
- option: calibre_src_url
value: "{{ calibre_src_url }}"
- option: calibre_dbpath
value: "{{ calibre_dbpath }}"
- option: calibre_port
value: "{{ calibre_port }}"
- option: calibre_enabled
value: "{{ calibre_enabled }}"

View file

@ -1,4 +1,4 @@
_Please Also See: http://FAQ.IIAB.IO > ["Captive Portal Administration: What tips & tricks exist?"](https://wiki.iiab.io/go/FAQ#Captive_Portal_Administration:_What_tips_&_tricks_exist%3F)_
_Please Also See: http://FAQ.IIAB.IO > ["Captive Portal Administration: What tips & tricks exist?"](http://wiki.laptop.org/go/IIAB/FAQ#Captive_Portal_Administration:_What_tips_.26_tricks_exist.3F)_
## Theory of Operation

View file

@ -1,8 +1,3 @@
- name: Record (initial) disk space used
shell: df -B1 --output=used / | tail -1
register: df1
- name: "Install packages: python3-dateutil, python3-jinja2"
package:
name:
@ -31,7 +26,7 @@
mode: "{{ item.mode }}"
with_items:
- { src: roles/captiveportal/templates/checkurls, dest: /opt/iiab/captiveportal/, mode: '0644' }
- { src: roles/captiveportal/templates/iiab-divert-to-nginx.j2, dest: /usr/sbin/iiab-divert-to-nginx, mode: '0755' }
- { src: roles/captiveportal/templates/iiab-divert-to-nginx, dest: /usr/sbin/, mode: '0755' }
- { src: roles/captiveportal/templates/iiab-make-cp-servers.py, dest: /usr/sbin/, mode: '0755' }
- name: Install /opt/iiab/captiveportal/capture-wsgi.py from template, mode '0755' (creates the server)
@ -56,17 +51,6 @@
# RECORD Captive Portal AS INSTALLED
- name: Record (final) disk space used
shell: df -B1 --output=used / | tail -1
register: df2
- name: Add 'captiveportal_disk_usage = {{ df2.stdout|int - df1.stdout|int }}' to {{ iiab_ini_file }}
ini_file:
path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
section: captiveportal
option: captiveportal_disk_usage
value: "{{ df2.stdout|int - df1.stdout|int }}"
- name: "Set 'captiveportal_installed: True'"
set_fact:
captiveportal_installed: True

View file

@ -19,33 +19,27 @@
quiet: yes
- block:
- name: Install Captive Portal if 'captiveportal_installed' not defined, e.g. in {{ iiab_state_file }} # /etc/iiab/iiab_state.yml
include_tasks: install.yml
when: captiveportal_installed is undefined
- name: Install Captive Portal if 'captiveportal_installed' not defined, e.g. in {{ iiab_state_file }} # /etc/iiab/iiab_state.yml
include_tasks: install.yml
when: captiveportal_installed is undefined
- include_tasks: enable-or-disable.yml
- name: Enable or Disable Captive Portal
include_tasks: enable-or-disable.yml
- name: Add 'captiveportal' variable values to {{ iiab_ini_file }}
ini_file:
path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
section: captiveportal
option: "{{ item.option }}"
value: "{{ item.value | string }}"
with_items:
- option: name
value: Captive Portal
- option: description
value: '"Captive Portal tries to open the browser automatically, so users don''t have to type in URL''s like http://box.lan in support of kiosk-like situations, in multilingual and less literate communities."'
- option: captiveportal_install
value: "{{ captiveportal_install }}"
- option: captiveportal_enabled
value: "{{ captiveportal_enabled }}"
rescue:
- name: 'SEE ERROR ABOVE (skip_role_on_error: {{ skip_role_on_error }})'
fail:
msg: ""
when: not skip_role_on_error
- name: Add 'captiveportal' variable values to {{ iiab_ini_file }}
ini_file:
path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
section: captiveportal
option: "{{ item.option }}"
value: "{{ item.value | string }}"
with_items:
- option: name
value: Captive Portal
- option: description
value: '"Captive Portal tries to open the browser automatically, so users don''t have to type in URL''s like http://box.lan in support of kiosk-like situations, in multilingual and less literate communities."'
- option: captiveportal_install
value: "{{ captiveportal_install }}"
- option: captiveportal_enabled
value: "{{ captiveportal_enabled }}"

View file

@ -1,4 +1,4 @@
#!/bin/bash -x
awk '{print("address=/" $1 "/{{ lan_ip }}")}' /opt/iiab/captiveportal/checkurls > /etc/dnsmasq.d/capture
awk '{print("address=/" $1 "/172.18.96.1")}' /opt/iiab/captiveportal/checkurls > /etc/dnsmasq.d/capture
echo "#following tells windows 7 that captive portal is active" >> /etc/dnsmasq.d/capture
echo "address=/dns.msftncsi.com/131.107.255.255" >> /etc/dnsmasq.d/capture

View file

@ -2,13 +2,13 @@
[CUPS](https://en.wikipedia.org/wiki/CUPS) (also known as the "Common UNIX Printing System") is the standards-based, open source printing system for Linux and macOS.
It allows your [Internet-in-a-Box (IIAB)](https://internet-in-a-box.org) to act as a print server.
It allows your [Internet-in-a-Box (IIAB)](http://internet-in-a-box.org) to act as a print server.
This can be useful if a printer is attached to your IIAB &mdash; so student/teacher print jobs from client computers and phones can be processed &mdash; and then sent to the appropriate printer.
## Using it
Make sure your IIAB was installed with these 2 lines in [/etc/iiab/local_vars.yml](http://faq.iiab.io/#What_is_local_vars.yml_and_how_do_I_customize_it%3F) :
Make sure your IIAB was installed with these 2 lines in [/etc/iiab/local_vars.yml](http://faq.iiab.io/#What_is_local_vars.yml_and_how_do_I_customize_it.3F) :
```
cups_install: True

View file

@ -2,11 +2,6 @@
# (OR ANY MEMBER OF LINUX GROUP 'lpadmin') AS SET UP BELOW...
- name: Record (initial) disk space used
shell: df -B1 --output=used / | tail -1
register: df1
- name: Install 'cups' package
package:
name: cups
@ -54,34 +49,19 @@
blockinfile:
path: /etc/cups/cupsd.conf
insertafter: '^<Location /admin>$'
block: |2 # |n MEANS: Set the block's left edge n CHARACTERS TO THE RIGHT of *this line's* indentation -- where n is {1..9} -- instead of setting its left edge to the 1st non-blank line's indentation below. Also surround block with comment lines: "# BEGIN ANSIBLE MANAGED BLOCK", "# END ANSIBLE MANAGED BLOCK"
block: |2 # Indent with 2 spaces, and surround block with 2 comment lines: "# BEGIN ANSIBLE MANAGED BLOCK", "# END ANSIBLE MANAGED BLOCK"
AuthType Default
Require user @SYSTEM
- name: "CUPS web administration: Create Linux username 'Admin' in Linux group 'lpadmin' (shell: /usr/sbin/nologin, create_home: no)"
- name: "CUPS web administration: Create Linux username 'Admin' with password 'changeme' in Linux group 'lpadmin' (shell: /usr/sbin/nologin, create_home: no)"
user:
name: Admin
append: yes # Don't clobber other groups, that other IIAB Apps might need.
groups: lpadmin
#password: "{{ 'changeme' | password_hash('sha512') }}" # Random salt. Presumably runs 5000 rounds of SHA-512 per /etc/login.defs & /etc/pam.d/common-password -- https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_filters.html#hashing-and-encrypting-strings-and-passwords
password: "{{ 'changeme' | password_hash('sha512') }}" # Random salt. Presumably runs 5000 rounds of SHA-512 per /etc/login.defs & /etc/pam.d/common-password -- https://docs.ansible.com/ansible/latest/user_guide/playbooks_filters.html#encrypting-and-checksumming-strings-and-passwords
create_home: no
shell: /usr/sbin/nologin # Debian/Ubuntu norm -- instead of /sbin/nologin, /bin/false
# 2024-05-01: Above password-setting approach no longer works w/ Ansible 2.17 RC1 (#3727).
# Ansible STOPS with this error...
#
# "[DEPRECATION WARNING]: Encryption using the Python crypt module is deprecated. The Python crypt module is
# deprecated and will be removed from Python 3.13. Install the passlib library for continued encryption
# functionality. This feature will be removed in version 2.17. Deprecation warnings can be disabled by
# setting deprecation_warnings=False in ansible.cfg."
#
# ...so we instead use Linux's "chpasswd" command (below!)
- name: Use chpasswd to set Linux username 'Admin' password to 'changeme'
command: chpasswd
args:
stdin: Admin:changeme
# - name: Add user '{{ iiab_admin_user }}' to Linux group 'lpadmin' -- for CUPS web administration (or modify default 'SystemGroup lpadmin' in /etc/cups/cups-files.conf -- in coordination with ~14 -> ~15 '@SYSTEM' lines in /etc/cups/cupsd.conf)
# #command: "gpasswd -a {{ iiab_admin_user | quote }} lpadmin"
# #command: "gpasswd -d {{ iiab_admin_user | quote }} lpadmin"
@ -96,14 +76,14 @@
name: cups
state: started
# - name: "Authorize Nearby IP Addresses: Run 'cupsctl --remote-admin --share-printers --user-cancel-any' to enable http://192.168.0.x:631 AND http://{{ lan_ip }}:631 (if cups_enabled) -- REPEATED USE OF 'cupsctl' COMMANDS CAN *DAMAGE* /etc/cups/cupsd.conf BY ADDING DUPLICATE LINES (AND WORSE!) -- SO PLEASE ALSO MANUALLY RUN 'sudo cupsctl' AND 'sudo cupsd -t' TO VERIFY /etc/cups/cupsd.conf"
# - name: "Authorize Nearby IP Addresses: Run 'cupsctl --remote-admin --share-printers --user-cancel-any' to enable http://192.168.0.x:631 AND http://172.18.96.1:631 (if cups_enabled) -- REPEATED USE OF 'cupsctl' COMMANDS CAN *DAMAGE* /etc/cups/cupsd.conf BY ADDING DUPLICATE LINES (AND WORSE!) -- SO PLEASE ALSO MANUALLY RUN 'sudo cupsctl' AND 'sudo cupsd -t' TO VERIFY /etc/cups/cupsd.conf"
# command: cupsctl --remote-admin --share-printers --user-cancel-any
# 2021-07-11: BOTH FLAGS *CANNOT* BE USED TOGETHER -- CHOOSE ONE OR THE OTHER:
# (1) '--remote-admin' AS ABOVE, OR (2) '--remote-any' AS BELOW.
# (RUN 'cupsctl' WITHOUT PARAMETERS TO CONFIRM THIS!)
- name: "Authorize All IP Addresses: Run 'cupsctl --remote-any --share-printers --user-cancel-any' to enable http://192.168.0.x:631 AND http://{{ lan_ip }}:631 AND http://10.8.0.y:631 (if cups_enabled) -- REPEATED USE OF 'cupsctl' COMMANDS CAN *DAMAGE* /etc/cups/cupsd.conf BY ADDING DUPLICATE LINES (AND WORSE!) -- SO PLEASE ALSO MANUALLY RUN 'sudo cupsctl' AND 'sudo cupsd -t' TO VERIFY /etc/cups/cupsd.conf"
- name: "Authorize All IP Addresses: Run 'cupsctl --remote-any --share-printers --user-cancel-any' to enable http://192.168.0.x:631 AND http://172.18.96.1:631 AND http://10.8.0.y:631 (if cups_enabled) -- REPEATED USE OF 'cupsctl' COMMANDS CAN *DAMAGE* /etc/cups/cupsd.conf BY ADDING DUPLICATE LINES (AND WORSE!) -- SO PLEASE ALSO MANUALLY RUN 'sudo cupsctl' AND 'sudo cupsd -t' TO VERIFY /etc/cups/cupsd.conf"
command: cupsctl --remote-any --share-printers --user-cancel-any
# 2021-07-11: In theory 'cupsctl' stanzas could be put in enable-or-disable.yml
@ -116,7 +96,7 @@
# command: cupsctl --no-remote-admin --no-remote-any --no-share-printers --no-user-cancel-any --no-debug-logging
# when: not cups_enabled
# - name: "2021-07-14: EXPERIMENTALLY ADD DIRECTIVES TO /etc/cups/cupsd.conf followed by 'systemctl restart cups'. As should no longer be nec thanks to NEW cups/templates/cups.conf for /etc/nginx/conf.d/cups.conf (followed by 'systemctl restart nginx'). Which FIXED URL'S LIKE: http://box/print, http://box.lan/print, http://192.168.0.x/print, http://{{ lan_ip }}/print and http://10.8.0.x/print (WITH OR WITHOUT THE TRAILING SLASH!) RECAP: (1) So be it that these 2 URL'S STILL DON'T WORK: http://box:631, http://box.lan:631 (due to CUPS' internal web server's overly stringent hostname checks, i.e. '400 Bad Request' and 'Request from \"localhost\" using invalid Host: field \"box[.lan]:631\".' in /var/log/cups/error_log) -- (2) While these 2 URL'S STILL DO WORK: http://localhost:631, http://127.0.0.1:631 -- (3) Whereas these 3 URL'S MAY WORK, DEPENDING ON 'cupsctl' COMMAND(S) ABOVE: http://192.168.0.x:631, http://{{ lan_ip }}:631, http://10.8.0.x:631"
# - name: "2021-07-14: EXPERIMENTALLY ADD DIRECTIVES TO /etc/cups/cupsd.conf followed by 'systemctl restart cups'. As should no longer be nec thanks to NEW cups/templates/cups.conf for /etc/nginx/conf.d/cups.conf (followed by 'systemctl restart nginx'). Which FIXED URL'S LIKE: http://box/print, http://box.lan/print, http://192.168.0.x/print, http://172.18.96.1/print and http://10.8.0.x/print (WITH OR WITHOUT THE TRAILING SLASH!) RECAP: (1) So be it that these 2 URL'S STILL DON'T WORK: http://box:631, http://box.lan:631 (due to CUPS' internal web server's overly stringent hostname checks, i.e. '400 Bad Request' and 'Request from \"localhost\" using invalid Host: field \"box[.lan]:631\".' in /var/log/cups/error_log) -- (2) While these 2 URL'S STILL DO WORK: http://localhost:631, http://127.0.0.1:631 -- (3) Whereas these 3 URL'S MAY WORK, DEPENDING ON 'cupsctl' COMMAND(S) ABOVE: http://192.168.0.x:631, http://172.18.96.1:631, http://10.8.0.x:631"
# lineinfile:
# path: /etc/cups/cupsd.conf
# line: "{{ item }}"
@ -125,7 +105,7 @@
# - "HostNameLookups On" # More False Leads: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=530027
# - "ServerAlias *"
# - "#ServerName {{ iiab_hostname }}.{{ iiab_domain }}" # box.lan
# - "#Listen {{ lan_ip }}:631" # e.g. 10.10.10.10
# - "#Listen {{ lan_ip }}:631" # 172.18.96.1
# - "#Listen 127.0.0.1:631"
# - "#Listen 0.0.0.0:631"
# - "#Listen *:631"
@ -144,17 +124,6 @@
# RECORD CUPS AS INSTALLED
- name: Record (final) disk space used
shell: df -B1 --output=used / | tail -1
register: df2
- name: Add 'cups_disk_usage = {{ df2.stdout|int - df1.stdout|int }}' to {{ iiab_ini_file }}
ini_file:
path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
section: cups
option: cups_disk_usage
value: "{{ df2.stdout|int - df1.stdout|int }}"
- name: "Set 'cups_installed: True'"
set_fact:
cups_installed: True

View file

@ -23,33 +23,26 @@
quiet: yes
- block:
- name: Install CUPS if 'cups_installed' not defined, e.g. in {{ iiab_state_file }} # /etc/iiab/iiab_state.yml
include_tasks: install.yml
when: cups_installed is undefined
- name: Install CUPS if 'cups_installed' not defined, e.g. in {{ iiab_state_file }} # /etc/iiab/iiab_state.yml
include_tasks: install.yml
when: cups_installed is undefined
- include_tasks: enable-or-disable.yml
- include_tasks: enable-or-disable.yml
- name: Add 'cups' variable values to {{ iiab_ini_file }}
ini_file:
path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
section: cups
option: "{{ item.option }}"
value: "{{ item.value | string }}"
with_items:
- option: name
value: CUPS
- option: description
value: '"CUPS (Common UNIX Printing System) is a modular printing system that allows a computer to act as a print server. A computer running CUPS is a host that can accept print jobs from client computers, process them, and send them to the appropriate printer."'
- option: cups_install
value: "{{ cups_install }}"
- option: cups_enabled
value: "{{ cups_enabled }}"
rescue:
- name: 'SEE ERROR ABOVE (skip_role_on_error: {{ skip_role_on_error }})'
fail:
msg: ""
when: not skip_role_on_error
- name: Add 'cups' variable values to {{ iiab_ini_file }}
ini_file:
path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
section: cups
option: "{{ item.option }}"
value: "{{ item.value | string }}"
with_items:
- option: name
value: CUPS
- option: description
value: '"CUPS (Common UNIX Printing System) is a modular printing system that allows a computer to act as a print server. A computer running CUPS is a host that can accept print jobs from client computers, process them, and send them to the appropriate printer."'
- option: cups_install
value: "{{ cups_install }}"
- option: cups_enabled
value: "{{ cups_enabled }}"

View file

@ -21,7 +21,7 @@ location ~ ^/print(|/.*)$ { # '~' -> '~*' for case-insensitive regex
return 301 http://localhost:631;
}
return 301 http://$host:631; # For 192.168.0.x, 10.10.10.10, 172.18.96.1, 10.8.0.y ETC
return 301 http://$host:631; # For 192.168.0.x, 172.18.96.1, 10.8.0.y ETC
}

View file

@ -1,48 +1,22 @@
# 2023-02-25: MONITOR FIRMWARE UPDATES in 3 places especially...
#
# 1. apt changelog firmware-brcm80211
# https://github.com/RPi-Distro/firmware-nonfree -> debian/config/brcm80211 (brcm, cypress)
# https://archive.raspberrypi.org/debian/dists/bullseye/main/binary-arm64/Packages (1.1MB text file, look inside for summary of latest firmware-brcm80211)
# https://archive.raspberrypi.org/debian/pool/main/f/firmware-nonfree/ -> firmware-brcm80211_* e.g.:
# https://archive.raspberrypi.org/debian/pool/main/f/firmware-nonfree/firmware-brcm80211_20190114-1+rpt11_all.deb from 2021-01-25
# https://archive.raspberrypi.org/debian/pool/main/f/firmware-nonfree/firmware-brcm80211_20210315-3+rpt4_all.deb from 2021-12-06
# https://archive.raspberrypi.org/debian/pool/main/f/firmware-nonfree/firmware-brcm80211_20221012-1~bpo11+1+rpt1_all.deb from 2022-11-17
# 2. apt changelog linux-firmware-raspi
# https://packages.ubuntu.com/search?keywords=linux-firmware-raspi
# 3. https://github.com/moodlebox/moodlebox/blob/main/roles/accesspoint/tasks/main.yml
#- name: Back up 4 OS-provided WiFi firmware files (incl symlink contents) to /lib/firmware/cypress/*.orig
- name: Back up 4 OS-provided WiFi firmware files (replicate any symlinks) to /lib/firmware/cypress/*.orig -- /usr/bin/iiab-check-firmware will later do similar (e.g. as firmware install completes) -- moving 2-or-4 of these to <ORIGINAL FILENAME>.YYYY-MM-DD-HH:MM:SS ("doubly timestamping" to preserve BOTH last-modif & moving date)
# copy:
# src: /lib/firmware/cypress/{{ item }}
# dest: /lib/firmware/cypress/{{ item }}.orig
# #local_follow: False # FAILS TO PRESERVE LINKS (ansible/ansible#74777) e.g. /lib/firmware/cypress/cyfmac43455-sdio.bin -> /etc/alternatives/cyfmac43455-sdio.bin -> ...
# 2023-05-01 CLARIF OF BELOW:
# 1) Even if 'mv' fails, no matter it'll continue to 'cp' below
# 2) 'cp -P' == 'cp --no-dereference' sufficient to replicate these symlinks and files ('cp -d' & 'cp -a' are incrementally stronger, and so probably can't hurt)
shell: |
mv /lib/firmware/cypress/{{ item }}.orig /lib/firmware/cypress/{{ item }}.orig.$(date +%F-%T)
cp -a /lib/firmware/cypress/{{ item }} /lib/firmware/cypress/{{ item }}.orig
- name: Back up original e.g. OS-provided firmware (for RPi internal WiFi)
copy:
src: "/lib/firmware/brcm/{{ item }}"
dest: "/lib/firmware/brcm/{{ item }}.orig"
with_items:
- cyfmac43430-sdio.bin
- cyfmac43430-sdio.clm_blob
- cyfmac43455-sdio.bin
- cyfmac43455-sdio.clm_blob
#ignore_errors: yes # 2023-02-25: Let's INTENTIONALLY surface any errors, e.g. if any future RasPiOS or Ubuntu-on-Rpi lack some of the above 4 files/links?
- brcmfmac43430-sdio.bin
- brcmfmac43455-sdio.bin
- brcmfmac43455-sdio.clm_blob
- name: Download higher-capacity firmwares (for RPi internal WiFi, per https://github.com/iiab/iiab/issues/823#issuecomment-662285202 and https://github.com/iiab/iiab/issues/2853)
- name: Download high-capacity older firmware (for RPi internal WiFi, per https://github.com/iiab/iiab/issues/823#issuecomment-662285202)
get_url:
url: "{{ iiab_download_url }}/{{ item }}"
dest: /lib/firmware/cypress/
url: "{{ item.url }}"
dest: "{{ item.dest }}"
timeout: "{{ download_timeout }}"
with_items:
- brcmfmac43455-sdio.bin_2021-11-30_minimal # 19 -- SAME AS RASPIOS & UBUNTU'S https://github.com/RPi-Distro/firmware-nonfree/blob/feeeda21e930c2e182484e8e1269b61cca2a8451/debian/config/brcm80211/cypress/cyfmac43455-sdio-minimal.bin
- brcmfmac43455-sdio.bin_2021-10-05_3rd-trial-minimal # 24 -- from https://github.com/iiab/iiab/issues/2853#issuecomment-934293015
- brcmfmac43455-sdio.clm_blob_2021-11-17_rpi # Works w/ both above -- SAME AS RASPIOS & UBUNTU'S https://github.com/RPi-Distro/firmware-nonfree/blob/dc406650e840705957f8403efeacf71d2d7543b3/debian/config/brcm80211/cypress/cyfmac43455-sdio.clm_blob
- brcmfmac43455-sdio.bin_2015-03-01_7.45.18.0_ub19.10.1 # 32 -- from https://github.com/iiab/iiab/issues/823#issuecomment-662285202
- brcmfmac43455-sdio.clm_blob_2018-02-26_rpi
- brcmfmac43430-sdio.bin_2018-09-11_7.45.98.65 # 30 -- from https://github.com/iiab/iiab/issues/823#issuecomment-662285202
- brcmfmac43430-sdio.clm_blob_2018-09-11_7.45.98.65
- { url: 'http://d.iiab.io/packages/brcmfmac43430-sdio.bin_2018-09-11_7.45.98.65', dest: '/lib/firmware/brcm/brcmfmac43430-sdio.bin.iiab' }
- { url: 'http://d.iiab.io/packages/brcmfmac43430-sdio.clm_blob_2018-09-11_7.45.98.65', dest: '/lib/firmware/brcm/brcmfmac43430-sdio.clm_blob.iiab' }
- { url: 'http://d.iiab.io/packages/brcmfmac43455-sdio.bin_2015-03-01_7.45.18.0_ub19.10.1', dest: '/lib/firmware/brcm/brcmfmac43455-sdio.bin.iiab' }
- { url: 'http://d.iiab.io/packages/brcmfmac43455-sdio.clm_blob_2018-02-26_rpi', dest: '/lib/firmware/brcm/brcmfmac43455-sdio.clm_blob.iiab' }
# RECORD firmware AS DOWNLOADED

View file

@ -2,75 +2,6 @@
include_tasks: download.yml
when: firmware_downloaded is undefined # SEE ALSO firmware_installed below
# Set 2 symlinks for RPi 3 B+ and 4 (43455)
# COMPARE: update-alternatives --display cyfmac43455-sdio.bin
# https://github.com/moodlebox/moodlebox/blob/main/roles/accesspoint/tasks/main.yml#L3-L6
- name: Populate rpi3bplus_rpi4_wifi_firmwares dictionary (lookup table for operator-chosen .bin and .clm_blob files in /lib/firmware/cypress)
set_fact:
rpi3bplus_rpi4_wifi_firmwares: # Dictionary keys (left side) are always strings, e.g. "19"
os:
- cyfmac43455-sdio.bin.orig # 2023-02-25: 7.45.241 from 2021-11-01 on Ubuntu 22.04.2 too (cyfmac43455-sdio-standard.bin)
- cyfmac43455-sdio.clm_blob.orig # On Ubuntu 22.04.2 too (brcmfmac43455-sdio.clm_blob_2021-11-17_rpi)
ub:
- cyfmac43455-sdio.bin.distrib # 2023-02-25: STALE 7.45.234 from 2021-04-15; on Ubuntu 22.04.2 NOT RasPiOS
- cyfmac43455-sdio.clm_blob.distrib # 4.7K instead of 2.7K w/ above "os"
19:
- brcmfmac43455-sdio.bin_2021-11-30_minimal # On Ubuntu 22.04.2 too (cyfmac43455-sdio-minimal.bin)
- brcmfmac43455-sdio.clm_blob_2021-11-17_rpi # On Ubuntu 22.04.2 too (cyfmac43455-sdio.clm_blob)
24:
- brcmfmac43455-sdio.bin_2021-10-05_3rd-trial-minimal
- brcmfmac43455-sdio.clm_blob_2021-11-17_rpi # On Ubuntu 22.04.2 too (cyfmac43455-sdio.clm_blob)
32:
- brcmfmac43455-sdio.bin_2015-03-01_7.45.18.0_ub19.10.1
- brcmfmac43455-sdio.clm_blob_2018-02-26_rpi # 14K instead of 2.7K w/ above "os"
- name: Symlink /lib/firmware/cypress/cyfmac43455-sdio.bin.iiab -> {{ rpi3bplus_rpi4_wifi_firmwares[rpi3bplus_rpi4_wifi_firmware][0] }} (as rpi3bplus_rpi4_wifi_firmware is "{{ rpi3bplus_rpi4_wifi_firmware }}")
file:
src: "{{ rpi3bplus_rpi4_wifi_firmwares[rpi3bplus_rpi4_wifi_firmware][0] }}"
path: /lib/firmware/cypress/cyfmac43455-sdio.bin.iiab
state: link
force: yes
- name: Symlink /lib/firmware/cypress/cyfmac43455-sdio.clm_blob.iiab -> {{ rpi3bplus_rpi4_wifi_firmwares[rpi3bplus_rpi4_wifi_firmware][1] }} (as rpi3bplus_rpi4_wifi_firmware is "{{ rpi3bplus_rpi4_wifi_firmware }}")
file:
src: "{{ rpi3bplus_rpi4_wifi_firmwares[rpi3bplus_rpi4_wifi_firmware][1] }}"
path: /lib/firmware/cypress/cyfmac43455-sdio.clm_blob.iiab
state: link
force: yes
# Set 2 symlinks for RPi Zero W and 3 (43430)
- name: Populate rpizerow_rpi3_wifi_firmwares dictionary (lookup table for operator-chosen .bin and .clm_blob files in /lib/firmware/cypress)
set_fact:
rpizerow_rpi3_wifi_firmwares:
os:
- cyfmac43430-sdio.bin.orig # 2023-02-25: 7.45.98 from 2021-07-19 on Ubuntu 22.04.2 too
- cyfmac43430-sdio.clm_blob.orig # On Ubuntu 22.04.2 too
ub:
- cyfmac43430-sdio.bin.distrib # 2023-02-25: STALE 7.45.98.118 from 2021-03-30; on Ubuntu 22.04.2 NOT RasPiOS
- cyfmac43430-sdio.clm_blob.distrib # Identical to above 4.7K cyfmac43430-sdio.clm_blob
30:
- brcmfmac43430-sdio.bin_2018-09-11_7.45.98.65
- brcmfmac43430-sdio.clm_blob_2018-09-11_7.45.98.65 # 14K instead of 4.7K w/ above "os" & "ub"
- name: Symlink /lib/firmware/cypress/cyfmac43430-sdio.bin.iiab -> {{ rpizerow_rpi3_wifi_firmwares[rpizerow_rpi3_wifi_firmware][0] }} (as rpizerow_rpi3_wifi_firmware is "{{ rpizerow_rpi3_wifi_firmware }}")
file:
src: "{{ rpizerow_rpi3_wifi_firmwares[rpizerow_rpi3_wifi_firmware][0] }}"
path: /lib/firmware/cypress/cyfmac43430-sdio.bin.iiab
state: link
force: yes
- name: Symlink /lib/firmware/cypress/cyfmac43430-sdio.clm_blob.iiab -> {{ rpizerow_rpi3_wifi_firmwares[rpizerow_rpi3_wifi_firmware][1] }} (as rpizerow_rpi3_wifi_firmware is "{{ rpizerow_rpi3_wifi_firmware }}")
file:
src: "{{ rpizerow_rpi3_wifi_firmwares[rpizerow_rpi3_wifi_firmware][1] }}"
path: /lib/firmware/cypress/cyfmac43430-sdio.clm_blob.iiab
state: link
force: yes
- name: 'Install from template: /usr/bin/iiab-check-firmware, /etc/systemd/system/iiab-check-firmware.service & /etc/profile.d/iiab-firmware-warn.sh'
template:
src: "{{ item.src }}"
@ -81,7 +12,7 @@
- { src: 'iiab-check-firmware.service', dest: '/etc/systemd/system/', mode: '0644' }
- { src: 'iiab-firmware-warn.sh', dest: '/etc/profile.d/', mode: '0644' }
- name: Enable & (Re)Start iiab-check-firmware.service (also runs on each boot) -- finalizing 2-or-4 symlink chains e.g. /lib/firmware/cypress/X.{bin|blob} -> /lib/firmware/cypress/X.{bin|blob}.iiab -> CHOSEN-FIRMWARE-FILE-OR-LINK
- name: Enable & (Re)Start iiab-check-firmware.service (also runs on each boot)
systemd:
name: iiab-check-firmware.service
daemon_reload: yes

View file

@ -1,30 +1,14 @@
# Plz set 'rpi3bplus_rpi4_wifi_firmware' and 'rpizerow_rpi3_wifi_firmware' in
# /etc/iiab/local_vars.yml to increase (or modify) the number of student WiFi
# client devices that can access your Raspberry Pi's internal WiFi hotspot.
# If IIAB's already installed, you should then run 'cd /opt/iiab/iiab' and
# then 'sudo ./runrole firmware' (DO RUN iiab-check-firmware FOR MORE TIPS!)
# 2018-2023 Background & Progress:
#
# Raspberry Pi 3 used to support 32 WiFi connections but is now limited to [4-10]
# https://github.com/iiab/iiab/issues/823#issuecomment-662285202
# Opinions about Pi 4B/3B+ WiFi features [practical AP firmware for schools!]
# https://github.com/iiab/iiab/issues/2853#issuecomment-957836892
# RPi WiFi hotspot firmware reliability fix, incl new/better choices for 3B+ & 4
# https://github.com/iiab/iiab/pull/3103
# Set WiFi firmware in /lib/firmware/cypress due to RasPiOS & Ubuntu changes
# https://github.com/iiab/iiab/pull/3482
# RISK: What USB 3.0 stick/drive patterns degrade a Raspberry Pi's 2.4GHz WiFi?
# https://github.com/iiab/iiab/issues/2638
# ► SEE "MONITOR FIRMWARE UPDATES in 3 places especially" in tasks/download.yml ◄
# Please set 'wifi_hotspot_capacity_rpi_fix: True' in /etc/iiab/local_vars.yml
# to restore support for 30-32 WiFi client devices on most Raspberry Pis that
# have internal WiFi. This installs firmware 7.45.98.65 for Zero W and RPi 3
# and firmware 7.45.18.0 for RPi 3 B+ and RPi 4. Capacity testing writeup:
# https://github.com/iiab/iiab/issues/823#issuecomment-662285202
- name: Install firmware (for RPi internal WiFi)
include_tasks: install.yml
when: firmware_installed is undefined
#when: firmware_installed is undefined
# Two variables are placed in /etc/iiab/iiab_state.yml:
# Two variable are placed in /etc/iiab/iiab_state.yml:
#
# - firmware_downloaded (set in download.yml) is used in install.yml
#

View file

@ -1,71 +1,66 @@
#!/bin/bash
# The 1st time /usr/bin/iiab-check-firmware runs (at the end of
# firmware/tasks/install.yml) 2-4 lynchpin top links are put in place,
# finalizing symlink chains like:
#
# /lib/firmware/cypress/X.{bin|blob} ->
# /lib/firmware/cypress/X.{bin|blob}.iiab ->
# CHOSEN-FIRMWARE-FILE-OR-LINK
#
# Also backing up top-of-chain originals (file or link!) by moving these to:
#
# /lib/firmware/cypress/<ORIGINAL FILENAME>.YYYY-MM-DD-HH:MM:SS
#
# NOTE these are "doubly timestamped" to preserve BOTH last-modif & moving date.
WARN=0
DATE=$(date +%F-%T)
# 2023-02-25: bash scripts using default_vars.yml &/or local_vars.yml
# 2021-08-18: bash scripts using default_vars.yml &/or local_vars.yml
# https://github.com/iiab/iiab-factory/blob/master/iiab
# https://github.com/iiab/iiab/blob/master/roles/firmware/templates/iiab-check-firmware#L10-14
# https://github.com/iiab/iiab/blob/master/roles/firmware/templates/iiab-check-firmware#L13
# https://github.com/iiab/iiab/blob/master/roles/network/templates/gateway/iiab-gen-iptables#L48-L52
# https://github.com/iiab/maps/blob/master/osm-source/pages/viewer/scripts/iiab-install-map-region#L23-L39
# https://github.com/iiab/iiab/blob/master/roles/0-DEPRECATED-ROLES/openvpn/templates/iiab-support READS AND WRITES, INCL NON-BOOLEAN
# https://github.com/iiab/maps/blob/master/osm-source/pages/viewer/scripts/iiab-install-map-region#L25-L34
# https://github.com/iiab/iiab/blob/master/roles/openvpn/templates/iiab-support READS AND WRITES, INCL NON-BOOLEAN
iiab_var_value() {
v1=$(grep "^$1:\s" /opt/iiab/iiab/vars/default_vars.yml | tail -1 | sed "s/^$1:\s\+//; s/#.*//; s/\s*$//; s/^\(['\"]\)\(.*\)\1$/\2/")
v2=$(grep "^$1:\s" /etc/iiab/local_vars.yml | tail -1 | sed "s/^$1:\s\+//; s/#.*//; s/\s*$//; s/^\(['\"]\)\(.*\)\1$/\2/")
[ "$v2" != "" ] && echo $v2 || echo $v1 # [ "$v2" ] ALSO WORKS
}
if grep -q '^wifi_hotspot_capacity_rpi_fix:\s\+[fF]alse\b' /etc/iiab/local_vars.yml ; then
echo "'wifi_hotspot_capacity_rpi_fix: False' found in /etc/iiab/local_vars.yml"
echo "...so WiFi firmware will NOT be checked or replaced."
link_fw() {
if [[ $(readlink /lib/firmware/cypress/$1) != $1.iiab ]] ; then
echo
mv /lib/firmware/cypress/$1 /lib/firmware/cypress/$1.$(date +%F-%T)
ln -s $1.iiab /lib/firmware/cypress/$1
echo -e "\e[1mSymlinked /lib/firmware/cypress/$1 -> $1.iiab\e[0m"
touch /tmp/.fw_modified
fi
}
if [[ $(iiab_var_value rpi3bplus_rpi4_wifi_firmware) != "os" ]] ; then
link_fw cyfmac43455-sdio.bin
link_fw cyfmac43455-sdio.clm_blob
exit 0
fi
if [[ $(iiab_var_value rpizerow_rpi3_wifi_firmware) != "os" ]] ; then
link_fw cyfmac43430-sdio.bin
link_fw cyfmac43430-sdio.clm_blob
echo -e "'wifi_hotspot_capacity_rpi_fix: True' presumed..."
echo -e "...in /etc/iiab/local_vars.yml (or /opt/iiab/iiab/vars/default_vars.yml ?)\n"
if ! $(diff -q /lib/firmware/brcm/brcmfmac43455-sdio.bin.iiab /lib/firmware/brcm/brcmfmac43455-sdio.bin); then
mv /lib/firmware/brcm/brcmfmac43455-sdio.bin /lib/firmware/brcm/brcmfmac43455-sdio.bin.$DATE
cp /lib/firmware/brcm/brcmfmac43455-sdio.bin.iiab /lib/firmware/brcm/brcmfmac43455-sdio.bin
echo "Replacing /lib/firmware/brcm/brcmfmac43455-sdio.bin"
WARN=1
fi
if [ -f /tmp/.fw_modified ]; then
bash /etc/profile.d/iiab-firmware-warn.sh
if ! $(diff -q /lib/firmware/brcm/brcmfmac43455-sdio.clm_blob.iiab /lib/firmware/brcm/brcmfmac43455-sdio.clm_blob); then
mv /lib/firmware/brcm/brcmfmac43455-sdio.clm_blob /lib/firmware/brcm/brcmfmac43455-sdio.clm_blob.$DATE
cp /lib/firmware/brcm/brcmfmac43455-sdio.clm_blob.iiab /lib/firmware/brcm/brcmfmac43455-sdio.clm_blob
echo "Replacing /lib/firmware/brcm/brcmfmac43455-sdio.clm_blob"
WARN=1
fi
if ! $(diff -q /lib/firmware/brcm/brcmfmac43430-sdio.bin.iiab /lib/firmware/brcm/brcmfmac43430-sdio.bin); then
mv /lib/firmware/brcm/brcmfmac43430-sdio.bin /lib/firmware/brcm/brcmfmac43430-sdio.bin.$DATE
cp /lib/firmware/brcm/brcmfmac43430-sdio.bin.iiab /lib/firmware/brcm/brcmfmac43430-sdio.bin
cp /lib/firmware/brcm/brcmfmac43430-sdio.clm_blob.iiab /lib/firmware/brcm/brcmfmac43430-sdio.clm_blob
echo "Replacing /lib/firmware/brcm/brcmfmac43430-sdio.bin"
WARN=1
fi
if ! $(diff -q /lib/firmware/brcm/brcmfmac43430-sdio.clm_blob.iiab /lib/firmware/brcm/brcmfmac43430-sdio.clm_blob); then
mv /lib/firmware/brcm/brcmfmac43430-sdio.clm_blob /lib/firmware/brcm/brcmfmac43430-sdio.clm_blob.$DATE
cp /lib/firmware/brcm/brcmfmac43430-sdio.clm_blob.iiab /lib/firmware/brcm/brcmfmac43430-sdio.clm_blob
echo "Replacing /lib/firmware/brcm/brcmfmac43430-sdio.clm_blob"
WARN=1
fi
if [ "$WARN" = "1" ]; then
echo -e "\n \e[41;1mWiFi Firmware has been replaced, per iiab/iiab#823.\e[0m"
echo -e " \e[41;1mReboot is required to activate.\e[0m\n"
touch /.fw_replaced
#echo "rebooting..."
#reboot
else
echo -e "\n\e[1mWiFi Firmware links in /lib/firmware/cypress appear \e[92mCORRECT\e[0m\e[1m, per iiab/iiab#3482\e[0m"
echo
echo -e "\e[100;1m(No reboot appears necessary!)\e[0m"
echo
echo -e "NOTE: If you change rpi3bplus_rpi4_wifi_firmware or rpizerow_rpi3_wifi_firmware"
echo -e "settings in /etc/iiab/local_vars.yml, please then run:"
echo
echo -e " cd /opt/iiab/iiab"
echo -e " sudo iiab-hotspot-off # NO LONGER NEC? eg to restore 'wifi_up_down: True'"
echo -e " sudo ./runrole --reinstall firmware"
echo -e " sudo iiab-network # SOMETIMES NECESSARY"
echo -e " sudo iiab-hotspot-on # NO LONGER NEC? eg to restore 'wifi_up_down: True'"
echo -e " sudo reboot\n"
#echo
#echo -e "Disconnect your power cord before rebooting, for better WiFi firmware results.\n"
echo -e " WiFi Firmware check \e[42;1mPASSED\e[0m, per iiab/iiab#823." # Or \e[92m for green on black
echo -e " (Assuming you've rebooted since it was replaced!)\n"
if [ -f /.fw_replaced ]; then
rm /.fw_replaced
fi
fi
# \e[1m = bright white \e[100;1m = bright white, on gray \n\e[41;1m = bright white, on red
# \e[42;1m = bright white, on bright green \e[92m = green on black
# exit 0

View file

@ -1,9 +1,12 @@
#!/bin/bash
if [ -f /tmp/.fw_modified ]; then
echo -e "\n\e[41;1mWiFi Firmware link(s) modified, per iiab/iiab#3482: PLEASE REBOOT!\e[0m"
echo
echo -e "If you want this warning to stop, reboot to remove /tmp/.fw_modified\n"
if [ -f /.fw_replaced ]; then
echo -e "\n \e[41;1mWiFi Firmware has been replaced, per iiab/iiab#823.\e[0m"
if grep -q '^wifi_hotspot_capacity_rpi_fix:\s\+[fF]alse\b' /etc/iiab/local_vars.yml ; then
echo -e " \e[100;1mIf you want these warnings to stop, run:\e[0m"
echo
echo -e " \e[100;1msudo rm /.fw_replaced\e[0m\n"
else
echo -e " \e[41;1mReboot is required to activate.\e[0m\n"
fi
fi
# \e[1m = bright white \e[100;1m = bright white, on gray \n\e[41;1m = bright white, on red

View file

@ -9,7 +9,7 @@
# Info needed to install Gitea:
gitea_version: "1.22" # 2022-01-30: Grabs latest from this MAJOR/MINOR release branch. Rather than exhaustively hard-coding point releases (e.g. 1.14.5) every few weeks. Quotes nec if trailing zero.
gitea_version: 1.15 # 2021-03-07: Grabs latest point release from this branch. Rather than hardcoding (e.g. 1.14.5) every few weeks.
iset_suffixes:
i386: 386
x86_64: amd64
@ -17,9 +17,9 @@ iset_suffixes:
armv6l: arm-6
armv7l: arm-6 # "arm-7" used to work, but no longer since 2019-04-20's Gitea 1.8.0: https://github.com/iiab/iiab/issues/1673 https://github.com/iiab/iiab/pull/1713 -- 2019-07-31: ARM7 support will return at some point, according to: https://github.com/go-gitea/gitea/pull/7037#issuecomment-516735216 (what about ARM8 support for RPi 4?)
gitea_iset_suffix: "{{ iset_suffixes[ansible_machine] | default('unknown') }}" # A bit safer than ansible_architecture (see kiwix/defaults/main.yml)
gitea_iset_suffix: "{{ iset_suffixes[ansible_architecture] | default('unknown') }}"
gitea_download_url: "https://dl.gitea.com/gitea/{{ gitea_version }}/gitea-{{ gitea_version }}-linux-{{ gitea_iset_suffix }}"
gitea_download_url: "https://dl.gitea.io/gitea/{{ gitea_version }}/gitea-{{ gitea_version }}-linux-{{ gitea_iset_suffix }}"
gitea_integrity_url: "{{ gitea_download_url }}.asc"
gitea_root_directory: "{{ content_base }}/gitea" # /library/gitea

View file

@ -1,8 +1,3 @@
- name: Record (initial) disk space used
shell: df -B1 --output=used / | tail -1
register: df1
# 1. Prepare to install Gitea: create user and directory structure
- name: Shut down existing Gitea instance (if we're reinstalling)
@ -48,10 +43,10 @@
msg: "Could not find a binary for the CPU architecture \"{{ ansible_architecture }}\""
when: gitea_iset_suffix == "unknown"
- name: Download Gitea binary {{ gitea_download_url }} to {{ gitea_install_path }} (0775, ~134 MB, SLOW DOWNLOAD CAN TAKE ~15 MIN)
- name: Download Gitea binary {{ gitea_download_url }} to {{ gitea_install_path }} (0775, ~104 MB)
get_url:
url: "{{ gitea_download_url }}"
dest: "{{ gitea_install_path }}" # e.g. /library/gitea/bin/gitea-1.21
dest: "{{ gitea_install_path }}" # e.g. /library/gitea/bin/gitea-1.14
mode: 0775
timeout: "{{ download_timeout }}"
@ -61,16 +56,16 @@
dest: "{{ gitea_checksum_path }}"
timeout: "{{ download_timeout }}"
- name: Verify Gitea binary with GPG signature ("BAD signature" FALSE ALARMS continue as of 2023-07-16, despite their claims at https://docs.gitea.com/installation/install-from-binary#verify-gpg-signature)
- name: Verify Gitea binary with GPG signature
shell: |
gpg --keyserver keys.openpgp.org --recv {{ gitea_gpg_key }}
gpg --keyserver pgp.mit.edu --recv {{ gitea_gpg_key }}
gpg --verify {{ gitea_checksum_path }} {{ gitea_install_path }}
ignore_errors: yes
- name: Symlink {{ gitea_link_path }} -> {{ gitea_install_path }}
file:
src: "{{ gitea_install_path }}"
path: "{{ gitea_link_path }}" # /library/gitea/gitea
path: "{{ gitea_link_path }}"
owner: gitea
group: gitea
state: link
@ -110,17 +105,6 @@
# 5. RECORD Gitea AS INSTALLED
- name: Record (final) disk space used
shell: df -B1 --output=used / | tail -1
register: df2
- name: Add 'gitea_disk_usage = {{ df2.stdout|int - df1.stdout|int }}' to {{ iiab_ini_file }}
ini_file:
path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
section: gitea
option: gitea_disk_usage
value: "{{ df2.stdout|int - df1.stdout|int }}"
- name: "Set 'gitea_installed: True'"
set_fact:
gitea_installed: True

View file

@ -19,37 +19,46 @@
quiet: yes
- block:
- name: Install Gitea {{ gitea_version }} if 'gitea_installed' not defined, e.g. in {{ iiab_state_file }} # /etc/iiab/iiab_state.yml
include_tasks: install.yml
when: gitea_installed is undefined
- name: Install Gitea {{ gitea_version }} if 'gitea_installed' not defined, e.g. in {{ iiab_state_file }} # /etc/iiab/iiab_state.yml
include_tasks: install.yml
when: gitea_installed is undefined
- include_tasks: enable-or-disable.yml
- name: Enable & Restart 'gitea' systemd service, if gitea_enabled
systemd:
name: gitea
daemon_reload: yes
enabled: yes
state: restarted
when: gitea_enabled
- name: Add 'gitea' variable values to {{ iiab_ini_file }}
ini_file:
path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
section: gitea
option: "{{ item.option }}"
value: "{{ item.value | string }}"
with_items:
- option: name
value: Gitea
- option: description
value: '"Gitea is like GitHub for more offline communities: Git with a cup of tea"'
- option: gitea_install
value: "{{ gitea_install }}"
- option: gitea_enabled
value: "{{ gitea_enabled }}"
- option: gitea_run_directory
value: "{{ gitea_run_directory }}"
- option: gitea_url
value: "{{ gitea_url }}"
- name: Disable & Stop 'gitea' systemd service, if not gitea_enabled
systemd:
name: gitea
enabled: no
state: stopped
when: not gitea_enabled
rescue:
- name: Enable/Disable/Restart NGINX
include_tasks: nginx.yml
- name: 'SEE ERROR ABOVE (skip_role_on_error: {{ skip_role_on_error }})'
fail:
msg: ""
when: not skip_role_on_error
- name: Add 'gitea' to list of services at {{ iiab_ini_file }}
ini_file:
path: "{{ iiab_ini_file }}" # /etc/iiab/iiab_state.yml
section: gitea
option: "{{ item.option }}"
value: "{{ item.value | string }}"
with_items:
- option: name
value: Gitea
- option: description
value: '"Gitea is like GitHub for more offline communities: Git with a cup of tea"'
- option: gitea_install
value: "{{ gitea_install }}"
- option: gitea_enabled
value: "{{ gitea_enabled }}"
- option: gitea_run_directory
value: "{{ gitea_run_directory }}"
- option: gitea_url
value: "{{ gitea_url }}"

View file

@ -1,19 +1,3 @@
- name: Enable & Restart 'gitea' systemd service, if gitea_enabled
systemd:
name: gitea
daemon_reload: yes
enabled: yes
state: restarted
when: gitea_enabled
- name: Disable & Stop 'gitea' systemd service, if not gitea_enabled
systemd:
name: gitea
enabled: no
state: stopped
when: not gitea_enabled
- name: Enable http://box{{ gitea_url }} via NGINX, by installing {{ nginx_conf_dir }}/gitea-nginx.conf from template
template:
src: gitea-nginx.conf.j2

View file

@ -2,8 +2,7 @@
; Copy required sections to your own app.ini (default is custom/conf/app.ini)
; and modify as needed.
; see https://docs.gitea.com/administration/config-cheat-sheet for additional documentation.
; https://docs.gitea.com/next/administration/config-cheat-sheet
; see https://docs.gitea.io/en-us/config-cheat-sheet/ for additional documentation.
; App name that shows in every page title
APP_NAME = {{ gitea_display_name }}
@ -24,11 +23,9 @@ DEFAULT_PRIVATE = last
; Global limit of repositories per user, applied at creation time. -1 means no limit
MAX_CREATION_LIMIT = -1
; Mirror sync queue length, increase if mirror syncing starts hanging
; 2023-07-16 ERROR: MIRROR_QUEUE_LENGTH = 1000
; `[repository].MIRROR_QUEUE_LENGTH`. Use new options in `[queue.mirror]`
MIRROR_QUEUE_LENGTH = 1000
; Patch test queue length, increase if pull request patch testing starts hanging
; 2023-07-16 ERROR: PULL_REQUEST_QUEUE_LENGTH = 1000
; `[repository].PULL_REQUEST_QUEUE_LENGTH`. Use new options in `[queue.pr_patch_checker]`
PULL_REQUEST_QUEUE_LENGTH = 1000
; Preferred Licenses to place at the top of the List
; The name here must match the filename in conf/license or custom/conf/license
PREFERRED_LICENSES = Apache License 2.0,MIT License
@ -204,22 +201,13 @@ PPROF_DATA_PATH = data/tmp/pprof
LANDING_PAGE = home
; Enables git-lfs support. true or false, default is false.
LFS_START_SERVER = false
; Where your lfs files reside, default is data/lfs.
LFS_CONTENT_PATH = {{ gitea_lfs_root }}
; LFS authentication secret, change this yourself
LFS_JWT_SECRET =
; LFS authentication validity period (in time.Duration), pushes taking longer than this may fail.
LFS_HTTP_AUTH_EXPIRY = 20m
; lfs [Large File Storage] storage will override storage
;
[lfs]
;STORAGE_TYPE = local
;
; Where your lfs files reside, default is data/lfs.
PATH = {{ gitea_lfs_root }}
;
; override the minio base path if storage type is minio
;MINIO_BASE_PATH = lfs/
; Define allowed algorithms and their minimum key length (use -1 to disable a type)
[ssh.minimum_key_sizes]
ED25519 = 256
@ -252,8 +240,7 @@ ISSUE_INDEXER_PATH = indexers/issues.bleve
; repo indexer by default disabled, since it uses a lot of disk space
REPO_INDEXER_ENABLED = false
REPO_INDEXER_PATH = indexers/repos.bleve
; 2023-07-16 ERROR: UPDATE_BUFFER_LEN = 20
; `[indexer].UPDATE_BUFFER_LEN`. Use new options in `[queue.issue_indexer]`
UPDATE_BUFFER_LEN = 20
MAX_FILE_SIZE = 1048576
[admin]
@ -373,8 +360,7 @@ PAGING_NUM = 10
[mailer]
ENABLED = false
; Buffer length of channel, keep it as it is if you don't know what it is.
; 2023-07-16 ERROR: SEND_BUFFER_LEN = 100
; `[mailer].SEND_BUFFER_LEN`. Use new options in `[queue.mailer]`
SEND_BUFFER_LEN = 100
; Name displayed in mail title
SUBJECT = %(APP_NAME)s
; Mail server

View file

@ -13,7 +13,7 @@
iiab-admin README
=================
`Internet-in-a-Box <https://internet-in-a-box.org>`_ (IIAB) encourages you to pay attention to the security of your learning community.
`Internet-in-a-Box <http://internet-in-a-box.org>`_ (IIAB) encourages you to pay attention to the security of your learning community.
This Ansible playbook is one of the very first that runs when you install IIAB, and we hope reading this helps you understand your choices:
@ -21,11 +21,11 @@ Configure user 'iiab-admin'
---------------------------
* `admin-user.yml <tasks/admin-user.yml>`_ configures a Linux user that will give you access to IIAB's Admin Console (http://box.lan/admin) after IIAB is installed — and can also help you at the command-line with IIAB community support commands like {iiab-diagnostics, iiab-hotspot-on, iiab-check-firmware, etc}.
* If initial creation of the user and password was somehow not already taken care of by IIAB's 1-line installer (https://download.iiab.io) or by your underlying OS, that too will be taken care of here.
* If initial creation of the user and password was somehow not already taken care of by IIAB's 1-line installer (http://download.iiab.io) or by your underlying OS, that too will be taken care of here.
* By default this user is ``iiab-admin`` with password ``g0adm1n``
* *Do change the default password if you haven't yet, by running:* **sudo passwd iiab-admin**
* After IIAB is installed, you can also change the password by logging into Admin Console (http://box.lan/admin) > Utilities > Change Password.
* If you prefer to use a pre-existing user like ``pi`` or ``ubuntu`` (or any other username) customize the variable ``iiab_admin_user`` in your `/etc/iiab/local_vars.yml <https://wiki.iiab.io/go/FAQ#What_is_local_vars.yml_and_how_do_I_customize_it%3F>`_ (preferably do this prior to installing IIAB!)
* If you prefer to use a pre-existing user like ``pi`` or ``ubuntu`` (or any other username) customize the variable ``iiab_admin_user`` in your `/etc/iiab/local_vars.yml <http://wiki.laptop.org/go/IIAB/FAQ#What_is_local_vars.yml_and_how_do_I_customize_it.3F>`_ (preferably do this prior to installing IIAB!)
* You can set ``iiab_admin_can_sudo: False`` if you want a strict security lockdown (if you're really sure you won't need IIAB community support commands like `/usr/bin/iiab-diagnostics <../../scripts/iiab-diagnostics.README.md>`_, `/usr/bin/iiab-hotspot-on <../network/templates/network/iiab-hotspot-on>`_, `iiab-check-firmware <../firmware/templates/iiab-check-firmware>`_, etc!)
* You can also set ``iiab_admin_user_install: False`` if you're sure you know how to do all this `account and sudo configuration <tasks/admin-user.yml>`_ manually.
@ -36,14 +36,14 @@ Security
#. ``iiab-admin`` (specified by ``admin_console_group`` in `/opt/iiab/iiab/vars/default_vars.yml <../../vars/default_vars.yml>`_ and `/opt/iiab/iiab-admin-console/vars/default_vars.yml <https://github.com/iiab/iiab-admin-console/blob/master/vars/default_vars.yml>`_)
#. ``sudo``
* Please read much more about what escalated (root) actions are authorized when you log into IIAB's Admin Console, and how this works: https://github.com/iiab/iiab-admin-console/blob/master/Authentication.md
* If your IIAB includes Tailscale (VPN), ``/root/.ssh/authorized_keys`` should be installed by `roles/tailscale/tasks/install.yml <../tailscale/tasks/install.yml>`_ to facilitate remote community support. Feel free to remove this as mentioned here: https://wiki.iiab.io/go/Security
* If your IIAB includes OpenVPN, ``/root/.ssh/authorized_keys`` should be installed by `roles/openvpn/tasks/install.yml <../openvpn/tasks/install.yml>`_ to facilitate remote community support. Feel free to remove this as mentioned here: http://wiki.laptop.org/go/IIAB/Security
* Auto-checking for the default/published password (as specified by ``iiab_admin_published_pwd`` in `/opt/iiab/iiab/vars/default_vars.yml <../../vars/default_vars.yml>`_) is implemented in `/etc/profile.d <templates/sshpwd-profile-iiab.sh>`_ (and `/etc/xdg/lxsession/LXDE-pi <templates/sshpwd-lxde-iiab.sh>`_ when it exists, i.e. on Raspberry Pi OS with desktop).
Example
=======
* If you later change your mind about ``sudo`` privileges for user 'iiab-admin' (as specified by ``iiab_admin_user``) then do this:
#. Go ahead and change the value of ``iiab_admin_can_sudo`` (to either True or False) in `/etc/iiab/local_vars.yml <https://wiki.iiab.io/go/FAQ#What_is_local_vars.yml_and_how_do_I_customize_it%3F>`_
#. Go ahead and change the value of ``iiab_admin_can_sudo`` (to either True or False) in `/etc/iiab/local_vars.yml <http://wiki.laptop.org/go/IIAB/FAQ#What_is_local_vars.yml_and_how_do_I_customize_it.3F>`_
#. Make sure that ``iiab_admin_user_install: True`` is also set.
#. Then re-run this Ansible playbook, by running ``cd /opt/iiab/iiab`` followed by ``sudo ./runrole --reinstall iiab-admin``
@ -56,16 +56,16 @@ Historical Notes
Remote Support Tools
--------------------
The `iiab-diagnostics <../../scripts/iiab-diagnostics.README.md>`_ and `Tailscale (VPN) <https://en.wikipedia.org/wiki/Tailscale>`_ options mentioned above can greatly help you empower your community, typically during the implementation phase of your project, even if Linux is new to you.
The `iiab-diagnostics <../../scripts/iiab-diagnostics.README.md>`_ and `OpenVPN <https://en.wikipedia.org/wiki/OpenVPN>`_ options mentioned above can greatly help you empower your community, typically during the implementation phase of your project, even if Linux is new to you.
Similarly, `tasks/main.yml <tasks/main.yml>`_ adds a couple text mode tools — extremely helpful over expensive / low-bandwidth connections:
Similarly, `access.yml <tasks/access.yml>`_ adds a couple text mode tools — extremely helpful over expensive / low-bandwidth connections:
* `lynx <https://en.wikipedia.org/wiki/Lynx_(web_browser)>`_
* `screen <https://linuxize.com/post/how-to-use-linux-screen/>`_
*More great tools to help you jumpstart community action at a distance:*
* `FAQ.IIAB.IO <https://wiki.iiab.io/go/FAQ>`_ > "How can I remotely manage my Internet-in-a-Box?"
* http://FAQ.IIAB.IO > "How can I remotely manage my Internet-in-a-Box?"
Admin Console
-------------

View file

@ -0,0 +1,6 @@
- name: "Install text mode packages, useful during remote access: screen, lynx"
package:
name:
- lynx
- screen
state: present

View file

@ -1,6 +0,0 @@
- name: "Install text-mode packages, useful during remote access: lynx, screen"
package:
name:
- lynx
- screen
state: present

View file

@ -2,17 +2,8 @@
# https://github.com/iiab/iiab/blob/master/roles/iiab-admin/README.rst
- name: Record (initial) disk space used
shell: df -B1 --output=used / | tail -1
register: df1
- name: "Install text-mode packages, useful during remote access: lynx, screen"
package:
name:
- lynx
- screen
state: present
- name: Install lynx, screen
include_tasks: access.yml
- name: Install sudo & /etc/sudoers with logging to /var/log/sudo.log
include_tasks: sudo-prereqs.yml
@ -28,7 +19,7 @@
# (1) by the OS installer
# (2) by the OS's graphical desktop tools
# (3) at the command-line: sudo passwd iiab-admin
# (4) by IIAB's 1-line installer: https://download.iiab.io
# (4) by IIAB's 1-line installer: http://download.iiab.io
# (5) by this role: roles/iiab-admin/tasks/admin-user.yml
# (6) by IIAB's Admin Console during installation
# ...and/or...
@ -40,17 +31,6 @@
# RECORD iiab-admin AS INSTALLED
- name: Record (final) disk space used
shell: df -B1 --output=used / | tail -1
register: df2
- name: Add 'iiab_admin_disk_usage = {{ df2.stdout|int - df1.stdout|int }}' to {{ iiab_ini_file }}
ini_file:
path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
section: iiab-admin
option: iiab_admin_disk_usage
value: "{{ df2.stdout|int - df1.stdout|int }}"
- name: "Set 'iiab_admin_installed: True'"
set_fact:
iiab_admin_installed: True

Some files were not shown because too many files have changed in this diff Show more