1
0
Fork 0
mirror of https://github.com/iiab/iiab.git synced 2025-03-09 15:40:17 +00:00

Merge branch 'master' into allow-large-uploads

This commit is contained in:
A Holt 2023-12-21 11:19:02 -05:00 committed by GitHub
commit dcfd65fee4
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
388 changed files with 11174 additions and 4170 deletions

View file

@ -0,0 +1,58 @@
name: '"10 min" IIAB test install'
# run-name: ${{ github.actor }} is testing out GitHub Actions 🚀
# https://michaelcurrin.github.io/dev-cheatsheets/cheatsheets/ci-cd/github-actions/triggers.html
on: [push, pull_request, workflow_dispatch]
# on:
# push:
#
# pull_request:
#
# # Allows you to run this workflow manually from the Actions tab
# workflow_dispatch:
#
# # Set your workflow to run every day of the week from Monday to Friday at 6:00 UTC
# schedule:
# - cron: "0 6 * * 1-5"
jobs:
test-install:
runs-on: ubuntu-latest
steps:
- run: echo "🎉 The job was automatically triggered by a ${{ github.event_name }} event."
- run: echo "🔎 The name of your branch is ${{ github.ref }} and your repository is ${{ github.repository }}."
#- name: Dump GitHub context (typically almost 500 lines)
# env:
# GITHUB_CONTEXT: ${{ toJSON(github) }}
# run: echo "$GITHUB_CONTEXT"
- name: Check out repository code
uses: actions/checkout@v3.1.0
- run: echo "🍏 This job's status is ${{ job.status }}."
- name: GitHub Actions "runner" environment
run: |
uname -a # uname -srm
whoami # Typically 'runner' instead of 'root'
pwd # /home/runner/work/iiab/iiab == $GITHUB_WORKSPACE == ${{ github.workspace }}
# ls
# ls $GITHUB_WORKSPACE
# ls ${{ github.workspace }}
# ls -la /opt # az, containerd, google, hostedtoolcache, microsoft, mssql-tools, pipx, pipx_bin, post-generation, runner, vsts
# apt update
# apt dist-upgrade -y
# apt autoremove -y
- name: Set up /opt/iiab/iiab
run: |
mkdir /opt/iiab
mv $GITHUB_WORKSPACE /opt/iiab
mkdir $GITHUB_WORKSPACE # OR SUBSEQUENT STEPS WILL FAIL ('working-directory: /opt/iiab/iiab' hacks NOT worth it!)
- name: Set up /etc/iiab/local_vars.yml
run: |
sudo mkdir /etc/iiab
# touch /etc/iiab/local_vars.yml
sudo cp /opt/iiab/iiab/vars/local_vars_none.yml /etc/iiab/local_vars.yml
- run: sudo /opt/iiab/iiab/scripts/ansible
- run: sudo ./iiab-install
working-directory: /opt/iiab/iiab
- run: iiab-summary
- run: cat /etc/iiab/iiab_state.yml

View file

@ -0,0 +1,65 @@
name: '"30 min" IIAB test install deb12 on rpi3'
# run-name: ${{ github.actor }} is testing out GitHub Actions 🚀
# https://michaelcurrin.github.io/dev-cheatsheets/cheatsheets/ci-cd/github-actions/triggers.html
on: [push, pull_request, workflow_dispatch]
# on:
# push:
#
# pull_request:
#
# # Allows you to run this workflow manually from the Actions tab
# workflow_dispatch:
#
# # Set your workflow to run every day of the week from Monday to Friday at 6:00 UTC
# schedule:
# - cron: "0 6 * * 1-5"
jobs:
test-install:
runs-on: ubuntu-latest
strategy:
matrix:
arch: [debian12]
include:
- arch: debian12
cpu: cortex-a7
cpu_info: cpuinfo/raspberrypi_3b
base_image: https://raspi.debian.net/daily/raspi_3_bookworm.img.xz
# source https://raspi.debian.net/daily-images/
steps:
#- run: echo "🎉 The job was automatically triggered by a ${{ github.event_name }} event."
#- run: echo "🔎 The name of your branch is ${{ github.ref }} and your repository is ${{ github.repository }}."
#- name: Dump GitHub context (typically almost 500 lines)
# env:
# GITHUB_CONTEXT: ${{ toJSON(github) }}
# run: echo "$GITHUB_CONTEXT"
- name: Dump matrix context
env:
MATRIX_CONTEXT: ${{ toJSON(matrix) }}
run: echo "$MATRIX_CONTEXT"
- uses: actions/checkout@v3.1.0
- uses: pguyot/arm-runner-action@v2
with:
image_additional_mb: 1024
base_image: ${{ matrix.base_image }}
cpu: ${{ matrix.cpu }}
cpu_info: ${{ matrix.cpu_info }}
copy_repository_path: /opt/iiab/iiab
commands: |
echo "🍏 This job's status is ${{ job.status }}."
grep Model /proc/cpuinfo
uname -a # uname -srm
whoami # Typically 'root' instead of 'runner'
pwd # /home/runner/work/iiab/iiab == $GITHUB_WORKSPACE == ${{ github.workspace }}
apt-get update -y --allow-releaseinfo-change
apt-get install --no-install-recommends -y git
ls /opt/iiab/iiab
mkdir /etc/iiab
cp /opt/iiab/iiab/vars/local_vars_none.yml /etc/iiab/local_vars.yml
/opt/iiab/iiab/scripts/ansible
./iiab-install
cd /opt/iiab/iiab
iiab-summary
cat /etc/iiab/iiab_state.yml

View file

@ -0,0 +1,77 @@
name: '"30 min" IIAB test install raspios'
# run-name: ${{ github.actor }} is testing out GitHub Actions 🚀
# https://michaelcurrin.github.io/dev-cheatsheets/cheatsheets/ci-cd/github-actions/triggers.html
on: [push, pull_request, workflow_dispatch]
# on:
# push:
#
# pull_request:
#
# # Allows you to run this workflow manually from the Actions tab
# workflow_dispatch:
#
# # Set your workflow to run every day of the week from Monday to Friday at 6:00 UTC
# schedule:
# - cron: "0 6 * * 1-5"
jobs:
test-install:
runs-on: ubuntu-latest
strategy:
matrix:
arch: [aarch64] #[zero_raspbian, zero_raspios, zero2_raspios, aarch64]
include:
#- arch: zero_raspbian
# cpu: arm1176
# cpu_info: cpuinfo/raspberrypi_zero_w
# base_image: raspbian_lite:latest
#- arch: zero_raspios
# cpu: arm1176
# cpu_info: cpuinfo/raspberrypi_zero_w
# base_image: raspios_lite:latest
#- arch: zero2_raspios
# cpu: cortex-a7
# cpu_info: cpuinfo/raspberrypi_zero2_w
# base_image: raspios_lite:latest
- arch: aarch64
cpu: cortex-a53
cpu_info: cpuinfo/raspberrypi_zero2_w_arm64
base_image: raspios_lite_arm64:latest
steps:
#- run: echo "🎉 The job was automatically triggered by a ${{ github.event_name }} event."
#- run: echo "🔎 The name of your branch is ${{ github.ref }} and your repository is ${{ github.repository }}."
#- name: Dump GitHub context (typically almost 500 lines)
# env:
# GITHUB_CONTEXT: ${{ toJSON(github) }}
# run: echo "$GITHUB_CONTEXT"
- name: Dump matrix context
env:
MATRIX_CONTEXT: ${{ toJSON(matrix) }}
run: echo "$MATRIX_CONTEXT"
- uses: actions/checkout@v3.1.0
- uses: pguyot/arm-runner-action@v2
with:
image_additional_mb: 1024
base_image: ${{ matrix.base_image }}
cpu: ${{ matrix.cpu }}
cpu_info: ${{ matrix.cpu_info }}
copy_repository_path: /opt/iiab/iiab
commands: |
echo "🍏 This job's status is ${{ job.status }}."
#test `uname -m` = ${{ matrix.arch }}
grep Model /proc/cpuinfo
uname -a # uname -srm
whoami # Typically 'root' instead of 'runner'
pwd # /home/runner/work/iiab/iiab == $GITHUB_WORKSPACE == ${{ github.workspace }}
sudo apt-get update -y --allow-releaseinfo-change
sudo apt-get install --no-install-recommends -y git
ls /opt/iiab/iiab
sudo mkdir /etc/iiab
sudo cp /opt/iiab/iiab/vars/local_vars_none.yml /etc/iiab/local_vars.yml
sudo /opt/iiab/iiab/scripts/ansible
sudo ./iiab-install
cd /opt/iiab/iiab
iiab-summary
cat /etc/iiab/iiab_state.yml

9
.gitignore vendored
View file

@ -1,8 +1,13 @@
xs-config.spec # https://git-scm.com/docs/gitignore
build build
deprecated deprecated
.ansible .ansible
*.patches *.patches
*.log *.log
*.retry *.retry
# Lines below for emacs, which generates even more tmp files since 2022
*~ *~
.#*
\#*#

View file

@ -1,3 +1,3 @@
# SEE THE NEW<br>[github.com/iiab/iiab/wiki/IIAB-Contributors-Guide](https://github.com/iiab/iiab/wiki/IIAB-Contributors-Guide) # SEE THE NEW<br>[github.com/iiab/iiab/wiki/Technical-Contributors-Guide](https://github.com/iiab/iiab/wiki/Technical-Contributors-Guide)
# THANKS! # THANKS!

View file

@ -15,6 +15,6 @@ this is to include the following two lines at the top of the file:
Licensed under the terms of the GNU GPL v2 or later; see LICENSE for details. Licensed under the terms of the GNU GPL v2 or later; see LICENSE for details.
All files not containing an explicit copyright notice or terms of license in All files not containing an explicit copyright notice or terms of license in
the file are Copyright © 2015-2021, Unleash Kids, and are licensed under the the file are Copyright © 2015-2022, Unleash Kids, and are licensed under the
terms of the GPLv2 license in the file named LICENSE in the root of the terms of the GPLv2 license in the file named LICENSE in the root of the
repository. repository.

View file

@ -2,26 +2,26 @@
# Internet-in-a-Box (IIAB) # Internet-in-a-Box (IIAB)
[Internet-in-a-Box (IIAB)](https://internet-in-a-box.org) is a "learning hotspot" that brings the Internet's crown jewels [Internet-in-a-Box (IIAB)](https://internet-in-a-box.org) is a “learning hotspot” that brings the Internet's crown jewels
(Wikipedia in any language, thousands of Khan Academy videos, zoomable OpenStreetMap, electronic books, WordPress journaling, Toys from Trash electronics projects, ETC) to those without Internet. (Wikipedia in any language, thousands of Khan Academy videos, zoomable OpenStreetMap, electronic books, WordPress journaling, Toys from Trash electronics projects, ETC) to those without Internet.
You can build your own tiny, affordable server (an offline digital library) for your school, your medical clinic, your prison, your region and/or your very own family — accessible with any nearby smartphone, tablet or laptop. You can build your own tiny, affordable server (an offline digital library) for your school, your medical clinic, your prison, your region and/or your very own family — accessible with any nearby smartphone, tablet or laptop.
Internet-in-a-Box gives you the DIY tools to: Internet-in-a-Box gives you the DIY tools to:
1. Download then drag-and-drop to arrange the [very best of the Worlds Free Knowledge](https://internet-in-a-box.org/#quality-content). 1. Download then drag-and-drop to arrange the [very best of the Worlds Free Knowledge](https://internet-in-a-box.org/#quality-content).
2. Choose among [30+ powerful educational apps](http://FAQ.IIAB.IO#What_services_.28IIAB_apps.29_are_suggested_during_installation.3F) for your school or learning/teaching community, optionally with a complete LMS (learning management system). 2. Choose among [30+ powerful educational apps](https://wiki.iiab.io/go/FAQ#What_services_.28IIAB_apps.29_are_suggested_during_installation%3F) for your school or learning/teaching community, optionally with a complete LMS (learning management system).
3. Exchange local/indigenous knowledge with nearby communities, using our [Manage Content](https://github.com/iiab/iiab-admin-console/blob/master/roles/console/files/help/InstContent.rst#manage-content) interface and possible mesh networking. 3. Exchange local/indigenous knowledge with nearby communities, using our [Manage Content](https://github.com/iiab/iiab-admin-console/blob/master/roles/console/files/help/InstContent.rst#manage-content) interface and possible mesh networking.
FYI this [community product](https://en.wikipedia.org/wiki/Internet-in-a-Box) is enabled by professional volunteers working [side-by-side](http://FAQ.IIAB.IO#What_are_the_best_places_for_community_support.3F) with schools, clinics and libraries around the world. *Thank you for being a part of our http://OFF.NETWORK grassroots technology [movement](https://meta.wikimedia.org/wiki/Internet-in-a-Box)!* FYI this [community product](https://en.wikipedia.org/wiki/Internet-in-a-Box) is enabled by professional volunteers working [side-by-side](https://wiki.iiab.io/go/FAQ#What_are_the_best_places_for_community_support%3F) with schools, clinics and libraries around the world. *Thank you for being a part of our http://OFF.NETWORK grassroots technology [movement](https://meta.wikimedia.org/wiki/Internet-in-a-Box)!*
## Installation ## Installation
Install Internet-in-a-Box (IIAB) from [download.iiab.io](https://download.iiab.io/) Install Internet-in-a-Box (IIAB) from: [**download.iiab.io**](https://download.iiab.io/)
Please see [FAQ.IIAB.IO](http://FAQ.IIAB.IO) which has 40+ questions and answers to help you along the way, as you put together the <!--digital--> "local learning hotspot" most suitable for your own teaching/learning community. Here are 2 ways to install IIAB: Please see [FAQ.IIAB.IO](https://wiki.iiab.io/go/FAQ) which has 40+ questions and answers to help you along the way (e.g. [“Is a quick installation possible?”](https://wiki.iiab.io/go/FAQ#Is_a_quick_installation_possible%3F)) as you put together the <!--digital--> “local learning hotspot” most suitable for your own teaching/learning community. Here are 2 ways to install IIAB:
- Our [1-line installer](https://download.iiab.io/) gets you the very latest, typically within about an hour, on [different Linux distributions](https://github.com/iiab/iiab/wiki/IIAB-Platforms#operating-systems). - Our [1-line installer](https://download.iiab.io/) gets you the very latest, typically within about an hour, on [different Linux distributions](https://github.com/iiab/iiab/wiki/IIAB-Platforms#operating-systems).
- [Prefab disk images](https://github.com/iiab/iiab/wiki/Raspberry-Pi-Images:-Summary) ([.img files](https://archive.org/search.php?query=iiab%20.img&sort=-publicdate)) are sometimes a few months out of date, but can be flashed directly onto a microSD card, for insertion into Raspberry Pi. - [Prefab disk images](https://github.com/iiab/iiab/wiki/Raspberry-Pi-Images-~-Summary#iiab-images-for-raspberry-pi) ([.img files](https://archive.org/search.php?query=iiab%20.img&sort=-publicdate)) are sometimes a few months out of date, but can be flashed directly onto a microSD card, for insertion into Raspberry Pi.
Our [HOW-TO videos](https://www.youtube.com/channel/UC0cBGCxr_WPBPa3IqPVEe3g) can be very helpful and the [Installation](https://github.com/iiab/iiab/wiki/IIAB-Installation) wiki page has more intricate details e.g. if you're trying to install Internet-in-a-Box (IIAB) onto a [another Linux](https://github.com/iiab/iiab/wiki/IIAB-Platforms) that has not yet been tried. Our [HOW-TO videos](https://www.youtube.com/channel/UC0cBGCxr_WPBPa3IqPVEe3g) can be very helpful and the [Installation](https://github.com/iiab/iiab/wiki/IIAB-Installation) wiki page has more intricate details e.g. if you're trying to install Internet-in-a-Box (IIAB) onto a [another Linux](https://github.com/iiab/iiab/wiki/IIAB-Platforms) that has not yet been tried.
@ -29,20 +29,22 @@ See our [Tech Docs Wiki](https://github.com/iiab/iiab/wiki) for more about the u
After you've installed the software, you should [add content](https://github.com/iiab/iiab/wiki/IIAB-Installation#add-content), which can of course take time when downloading multi-gigabyte Content Packs! After you've installed the software, you should [add content](https://github.com/iiab/iiab/wiki/IIAB-Installation#add-content), which can of course take time when downloading multi-gigabyte Content Packs!
Finally, you can [customize your Internet-in-a-Box home page](http://FAQ.IIAB.IO#How_do_I_customize_my_Internet-in-a-Box_home_page.3F) (typically http://box or http://box.lan) using our **drag-and-drop** Admin Console (http://box.lan/admin) &mdash; to arrange Content Packs and IIAB Apps (services) for your local community's needs. Finally, you can [customize your Internet-in-a-Box home page](https://wiki.iiab.io/go/FAQ#How_do_I_customize_my_Internet-in-a-Box_home_page%3F) (typically http://box or http://box.lan) using our **drag-and-drop** Admin Console (http://box.lan/admin) &mdash; to arrange Content Packs and IIAB Apps (services) for your local community's needs.
## Community ## Community
Internet-in-a-Box (IIAB) greatly welcomes contributions from educators, librarians and [IT/UX/QA people](https://github.com/iiab/iiab/wiki/Technical-Contributors-Guide) of all kinds! Global community updates and videos are regularly posted to: **[@internet_in_box](https://twitter.com/internet_in_box)**
If you would like to volunteer, please [make contact](https://internet-in-a-box.org/pages/contributing.html) after looking over "[How can I help?](http://FAQ.IIAB.IO#How_can_I_help.3F)" at: [FAQ.IIAB.IO](http://FAQ.IIAB.IO) _Internet-in-a-Box (IIAB) greatly welcomes contributions from educators, librarians and [IT/UX/QA people](https://github.com/iiab/iiab/wiki/Technical-Contributors-Guide) of all kinds!_
If you would like to volunteer, please [make contact](https://internet-in-a-box.org/contributing.html) after looking over [“How can I help?”](https://wiki.iiab.io/go/FAQ#How_can_I_help%3F) at: [FAQ.IIAB.IO](https://wiki.iiab.io/go/FAQ)
<!-- To learn about our software architecture, check out our [Contributors Guide](https://github.com/iiab/iiab/wiki/IIAB-Contributors-Guide).--> <!-- To learn about our software architecture, check out our [Contributors Guide](https://github.com/iiab/iiab/wiki/IIAB-Contributors-Guide).-->
To learn more about our open community architecture for "offline" learning, check out "[What technical documentation exists?](http://FAQ.IIAB.IO#What_technical_documentation_exists.3F)" To learn more about our open community architecture for “offline” learning, check out [“What technical documentation exists?”](https://wiki.iiab.io/go/FAQ#What_technical_documentation_exists%3F)
FYI we use [Ansible](http://FAQ.IIAB.IO#What_is_Ansible_and_what_version_should_I_use.3F) <!--as the underlying technology--> to install, deploy, configure and manage the various software components. FYI we use [Ansible](https://wiki.iiab.io/go/FAQ#What_is_Ansible_and_what_version_should_I_use%3F) <!--as the underlying technology--> to install, deploy, configure and manage the various software components.
*Thank you for helping us enable offline access to the Internet's free/open knowledge jewels, as well as "Sneakernet-of-Alexandria" distribution of local/indigenous content, when mass media channels do not serve grassroots voices.* *Thank you for helping us enable offline access to the Internet's free/open knowledge jewels, as well as “Sneakernet-of-Alexandria” distribution of local/indigenous content, when mass media channels do not serve grassroots voices.*
## Versions ## Versions
@ -52,4 +54,4 @@ Install our latest pre-release using the 1-line installer at: [**download.iiab.i
You can also consider <!--latest Internet-in-a-Box (IIAB)--> earlier official releases at: [github.com/iiab/iiab/releases](https://github.com/iiab/iiab/releases) You can also consider <!--latest Internet-in-a-Box (IIAB)--> earlier official releases at: [github.com/iiab/iiab/releases](https://github.com/iiab/iiab/releases)
For much older versions, see: [github.com/xsce](http://github.com/xsce), [schoolserver.org](http://schoolserver.org) For much older versions, see: [github.com/xsce](https://github.com/xsce), [schoolserver.org](http://schoolserver.org)

View file

@ -3,10 +3,10 @@
become: yes become: yes
vars_files: vars_files:
- vars/default_vars.yml - vars/default_vars.yml
- vars/{{ ansible_local.local_facts.os_ver }}.yml - vars/{{ ansible_local.local_facts.os_ver }}.yml
- /etc/iiab/local_vars.yml - /etc/iiab/local_vars.yml
- /etc/iiab/iiab_state.yml - /etc/iiab/iiab_state.yml
roles: roles:
- { role: 0-init } - { role: 0-init }

View file

@ -3,10 +3,10 @@
become: yes become: yes
vars_files: vars_files:
- vars/default_vars.yml - vars/default_vars.yml
- vars/{{ ansible_local.local_facts.os_ver }}.yml - vars/{{ ansible_local.local_facts.os_ver }}.yml
- /etc/iiab/local_vars.yml - /etc/iiab/local_vars.yml
- /etc/iiab/iiab_state.yml - /etc/iiab/iiab_state.yml
roles: roles:
- { role: 0-init } - { role: 0-init }

View file

@ -1,17 +1,57 @@
#!/bin/bash -e #!/bin/bash -e
# Running from a git repo # Running from a git repo
# Add cmdline options for passing to ansible # Add cmdline options for passing to ansible
# Todo add proper shift to gobble up --debug --reinstall
PLAYBOOK=iiab-stages.yml PLAYBOOK=iiab-stages.yml
INVENTORY=ansible_hosts INVENTORY=ansible_hosts
IIAB_STATE_FILE=/etc/iiab/iiab_state.yml IIAB_STATE_FILE=/etc/iiab/iiab_state.yml
ARGS="" ARGS="--extra-vars {" # Needs boolean not string so use JSON list. bash forces {...} to '{...}' for Ansible
CWD=`pwd` CWD=`pwd`
OS=`grep ^ID= /etc/os-release | cut -d= -f2` OS=`grep ^ID= /etc/os-release | cut -d= -f2`
OS=${OS//\"/} OS=${OS//\"/} # Remove all '"'
MIN_RPI_KERN=5.4.0 # Do not use 'rpi-update' unless absolutely necessary: https://github.com/iiab/iiab/issues/1993 MIN_RPI_KERN=5.4.0 # Do not use 'rpi-update' unless absolutely necessary: https://github.com/iiab/iiab/issues/1993
MIN_ANSIBLE_VER=2.11.6 # Ansible 2.8.3 and 2.8.6 had serious bugs, preventing their use with IIAB. MIN_ANSIBLE_VER=2.14.13 # 2023-05-22: ansible-core 2.12 EOL per https://docs.ansible.com/ansible/latest/reference_appendices/release_and_maintenance.html#ansible-core-support-matrix 2022-11-09: Raspberry Pi 3 (and 3 B+ etc?) apparently install (and require?) ansible-core 2.11 for now -- @deldesir can explain more on PR #3419. Historical: Ansible 2.8.3 and 2.8.6 had serious bugs, preventing their use with IIAB.
REINSTALL=false
DEBUG=false
SKIP_ROLE_ON_ERROR=false
usage() {
echo -e "\n\e[1mUse './iiab-install' for regular installs, or to continue an install."
echo -e "Use './iiab-install --risky' to force 'skip_role_on_error: True'"
echo -e "Use './iiab-install --reinstall' to force running all Stages 0-9, followed by the Network Role."
echo -e "Use './iiab-install --debug' to run Stage 0, followed by Stages 3-9, followed by the Network Role."
echo -e "Use './iiab-configure' to run Stage 0, followed by Stages 4-9."
echo -e "Use './runrole' to run Stage 0, followed by a single Stage or Role."
echo -e "Use './iiab-network' to run Stage 0, followed by the Network Role.\e[0m\n"
}
# https://stackoverflow.com/questions/192249/how-do-i-parse-command-line-arguments-in-bash/14203146#14203146
while [[ $# -gt 0 ]]; do
case $1 in
--reinstall)
REINSTALL=true
shift
;;
--debug)
DEBUG=true
shift
;;
-r|--risky)
SKIP_ROLE_ON_ERROR=true
shift
;;
*)
usage
exit 1
;;
esac
done
ARGS="$ARGS\"skip_role_on_error\":$SKIP_ROLE_ON_ERROR" # Needs boolean not
# string so use JSON list. Ansible permits these boolean values: (refresher)
# https://github.com/iiab/iiab/blob/master/roles/0-init/tasks/validate_vars.yml#L19-L43
if [ ! -f /etc/iiab/local_vars.yml ]; then if [ ! -f /etc/iiab/local_vars.yml ]; then
@ -25,13 +65,13 @@ if [ ! -f /etc/iiab/local_vars.yml ]; then
echo -e "████████████████████████████████████████████████████████████████████████████████\n" >&2 echo -e "████████████████████████████████████████████████████████████████████████████████\n" >&2
fi fi
echo -e "\nEXITING: /opt/iiab/iiab/iiab-install REQUIRES /etc/iiab/local_vars.yml\n" >&2 echo -e "\n\e[1mEXITING: /opt/iiab/iiab/iiab-install REQUIRES /etc/iiab/local_vars.yml\e[0m\n" >&2
echo -e "(1) Please read http://wiki.laptop.org/go/IIAB/local_vars.yml to learn more" >&2 echo -e "(1) See http://FAQ.IIAB.IO -> What is local_vars.yml and how do I customize it?" >&2
echo -e "(2) MIN/MEDIUM/BIG samples are included in /opt/iiab/iiab/vars" >&2 echo -e "(2) SMALL/MEDIUM/LARGE samples are included in /opt/iiab/iiab/vars" >&2
echo -e "(3) NO TIME FOR DETAILS? RUN INTERNET-IN-A-BOX'S FRIENDLY 1-LINE INSTALLER:\n" >&2 echo -e "(3) NO TIME FOR DETAILS? RUN INTERNET-IN-A-BOX'S FRIENDLY 1-LINE INSTALLER:\n" >&2
echo -e ' http://download.iiab.io\n' >&2 echo -e ' https://download.iiab.io\n' >&2
exit 1 exit 1
fi fi
@ -42,14 +82,15 @@ fi
echo -e "\n\n./iiab-install $* BEGUN IN $CWD\n" echo -e "\n\n./iiab-install $* BEGUN IN $CWD\n"
echo -e "local_facts.fact DIAGNOSTICS... (A FEW LINES OF ERRORS/WARNINGS BELOW ARE OK!)\n" echo -e "local_facts.fact DIAGNOSTICS... (A FEW LINES OF ERRORS/WARNINGS BELOW ARE OK!)\n"
scripts/local_facts.fact # Exit & advise, if OS not supported. scripts/local_facts.fact # Exit & advise, if OS not supported.
if [ ! -f /etc/ansible/facts.d/local_facts.fact ]; then mkdir -p /etc/ansible/facts.d
mkdir -p /etc/ansible/facts.d
fi
cp scripts/local_facts.fact /etc/ansible/facts.d/local_facts.fact cp scripts/local_facts.fact /etc/ansible/facts.d/local_facts.fact
echo -e "\nPlaced /etc/ansible/facts.d/local_facts.fact into position." echo -e "\nPlaced /etc/ansible/facts.d/local_facts.fact into position.\n"
mkdir -p /etc/iiab/install-flags # MANDATORY since 2022-07-22
echo -e "/etc/iiab/install-flags directory created/verified."
echo -e "(e.g. for PR #3318 netwarn pop-ups, asking you to run iiab-network)\n"
if [ ! -f $PLAYBOOK ]; then if [ ! -f $PLAYBOOK ]; then
echo "EXITING: IIAB Playbook ""$PLAYBOOK"" not found." echo "EXITING: IIAB Playbook ""$PLAYBOOK"" not found."
@ -57,16 +98,6 @@ if [ ! -f $PLAYBOOK ]; then
exit 1 exit 1
fi fi
if [ "$1" != "--debug" ] && [ "$1" != "--reinstall" ] && [ "$1" != "" ]; then
echo "Use './iiab-install' for regular installs, or to continue an install."
echo "Use './iiab-install --reinstall' to force running all Stages 0-9, followed by the Network Role."
echo "Use './iiab-install --debug' to run Stage 0, followed by Stages 3-9, followed by the Network Role."
echo "Use './iiab-configure' to run Stage 0, followed by Stages 4-9."
echo "Use './runrole' to run Stage 0, followed by a single Stage or Role."
echo "Use './iiab-network' to run Stage 0, followed by the Network Role."
exit 1
fi
# Subroutine compares software version numbers. Generates rare false positives # Subroutine compares software version numbers. Generates rare false positives
# like "1.0 > 1" and "2.4.0 > 2.4". Avoid risks by structuring conditionals w/ # like "1.0 > 1" and "2.4.0 > 2.4". Avoid risks by structuring conditionals w/
# a consistent # of decimal points e.g. "if version_gt w.x.y.z a.b.c.d; then" # a consistent # of decimal points e.g. "if version_gt w.x.y.z a.b.c.d; then"
@ -93,7 +124,7 @@ CURR_ANSIBLE_VER=0
#if [[ $(command -v ansible) ]]; then # Also Works! $(...) nests more easily than backticks #if [[ $(command -v ansible) ]]; then # Also Works! $(...) nests more easily than backticks
#if [[ `which ansible` ]]; then # "which" misses built-in commands like cd, and is RISKY per https://stackoverflow.com/questions/592620/check-if-a-program-exists-from-a-bash-script #if [[ `which ansible` ]]; then # "which" misses built-in commands like cd, and is RISKY per https://stackoverflow.com/questions/592620/check-if-a-program-exists-from-a-bash-script
#if [[ `type -P ansible` ]]; then # "type -P" isn't POSIX compliant; it misses built-in commands like "cd" #if [[ `type -P ansible` ]]; then # "type -P" isn't POSIX compliant; it misses built-in commands like "cd"
if [[ `command -v ansible` ]]; then # "command -v" is POSIX compliant; it catches built-in commands like "cd" if [[ $(command -v ansible) ]]; then # "command -v" is POSIX compliant; it catches built-in commands like "cd"
CURR_ANSIBLE_VER=$(ansible --version | head -1 | cut -f 2- -d " " | sed 's/.* \([^ ]*\)\].*/\1/') CURR_ANSIBLE_VER=$(ansible --version | head -1 | cut -f 2- -d " " | sed 's/.* \([^ ]*\)\].*/\1/')
# Above works with 'ansible [core 2.11.0rc2]' -- these old ways do not: # Above works with 'ansible [core 2.11.0rc2]' -- these old ways do not:
#CURR_ANSIBLE_VER=$(ansible --version | head -1 | awk '{print $2}') #CURR_ANSIBLE_VER=$(ansible --version | head -1 | awk '{print $2}')
@ -125,41 +156,38 @@ if [ -f /etc/iiab/iiab.env ]; then
fi fi
fi fi
if [ "$1" == "--reinstall" ]; then if $($REINSTALL); then
STAGE=0 STAGE=0
ARGS="$ARGS"" --extra-vars reinstall=True" #ARGS="$ARGS"" --extra-vars reinstall=True"
ARGS="$ARGS,\"reinstall\":True" # Needs boolean not string so use JSON list
sed -i 's/^STAGE=.*/STAGE=0/' /etc/iiab/iiab.env sed -i 's/^STAGE=.*/STAGE=0/' /etc/iiab/iiab.env
echo "Wrote STAGE=0 (counter) to /etc/iiab/iiab.env" echo "Wrote STAGE=0 (counter) to /etc/iiab/iiab.env"
elif [ "$STAGE" -ge 2 ] && [ "$1" == "--debug" ]; then elif [ "$STAGE" -ge 2 ] && $($DEBUG); then
STAGE=2 STAGE=2
sed -i 's/^STAGE=.*/STAGE=2/' /etc/iiab/iiab.env sed -i 's/^STAGE=.*/STAGE=2/' /etc/iiab/iiab.env
echo "Wrote STAGE=2 (counter) to /etc/iiab/iiab.env" echo "Wrote STAGE=2 (counter) to /etc/iiab/iiab.env"
elif [ "$STAGE" -eq 9 ]; then elif [ "$STAGE" -eq 9 ]; then
echo -e "\nEXITING: STAGE (counter) in /etc/iiab/iiab.env shows Stage 9 Is Already Done." echo -e "\n\e[1mEXITING: STAGE (counter) in /etc/iiab/iiab.env shows Stage 9 Is Already Done.\e[0m"
echo -e "Use './iiab-install --reinstall' to force running all Stages 0-9, followed by the Network Role." usage
echo -e "Use './iiab-install --debug' to run Stage 0, followed by Stages 3-9, followed by the Network Role." exit 0 # Allows rerunning https://download.iiab.io/install.txt
echo -e "Use './iiab-configure' to run Stage 0, followed by Stages 4-9."
echo -e "Use './runrole' to run Stage 0, followed by a single Stage or Role."
echo -e "Use './iiab-network' to run Stage 0, followed by the Network Role.\n\n"
exit 0 # Allows rerunning http://download.iiab.io/install.txt
fi fi
fi fi
if [ "$STAGE" -lt 2 ] && [ "$1" == "--debug" ]; then if [ "$STAGE" -lt 2 ] && $($DEBUG); then
echo -e "\n'--debug' *ignored* as STAGE (counter) < 2." echo -e "\n'--debug' *ignored* as STAGE (counter) < 2."
fi fi
# /etc/iiab/iiab_state.yml is mandatory and must be created here. Background: # /etc/iiab/iiab_state.yml is mandatory and must be created here. Background:
# Allow iiab-install to read IIAB_STATE_FILE to not repeat installs of previous # Allow iiab-install to read IIAB_STATE_FILE to not repeat installs of previous
# roles that already completed within the stage. # roles that already completed within the stage.
if [ ! -f $IIAB_STATE_FILE ]; then if [ ! -f $IIAB_STATE_FILE ]; then # touch $IIAB_STATE_FILE
#touch $IIAB_STATE_FILE echo -e "\nCreating... $IIAB_STATE_FILE"
cat > $IIAB_STATE_FILE << EOF cat > $IIAB_STATE_FILE << EOF
# DO *NOT* MANUALLY EDIT THIS, THANKS! # DO *NOT* MANUALLY EDIT THIS, THANKS!
# IIAB does NOT currently support uninstalling apps/services. # IIAB does NOT currently support uninstalling apps/services.
EOF EOF
fi fi
echo -e "\nTRY TO RERUN './iiab-install' IF IT FAILS DUE TO CONNECTIVITY ISSUES ETC!\n" echo -e "\nTRY TO RERUN './iiab-install' IF IT FAILS DUE TO CONNECTIVITY ISSUES ETC!\n"
echo -e "\e[1mRunning local Ansible playbooks...\n...Stage 0 will now run\n...followed by Stages $(($STAGE + 1))-9\n...and then the Network Role.\e[0m\n" echo -e "\e[1mRunning local Ansible playbooks...\n...Stage 0 will now run\n...followed by Stages $(($STAGE + 1))-9\n...and then the Network Role.\e[0m\n"
@ -168,6 +196,8 @@ export ANSIBLE_LOG_PATH="$CWD""/iiab-install.log"
ansible -m setup -i $INVENTORY localhost --connection=local | grep python ansible -m setup -i $INVENTORY localhost --connection=local | grep python
ansible -m setup -i $INVENTORY localhost --connection=local >> /dev/null # So vars are recorded in /opt/iiab/iiab/iiab-install.log ansible -m setup -i $INVENTORY localhost --connection=local >> /dev/null # So vars are recorded in /opt/iiab/iiab/iiab-install.log
ansible-playbook -i $INVENTORY $PLAYBOOK ${ARGS} --connection=local ARGS="$ARGS}"
echo -e "\nNOW RUN: ansible-playbook -i $INVENTORY $PLAYBOOK $ARGS --connection=local\n"
ansible-playbook -i $INVENTORY $PLAYBOOK $ARGS --connection=local
echo -e "./iiab-install $* COMPLETED IN $CWD\n\n" echo -e "./iiab-install $* COMPLETED IN $CWD\n\n"

View file

@ -4,11 +4,14 @@
CWD=`pwd` CWD=`pwd`
export ANSIBLE_LOG_PATH="$CWD/iiab-network.log" export ANSIBLE_LOG_PATH="$CWD/iiab-network.log"
if [ ! -f iiab-network.yml ]; then exit_error() {
echo "iiab-network.yml not found in current directory." echo -e "\nEXITING: "$@ | tee -a /opt/iiab/iiab/iiab-network.log
echo "Please rerun this command from the top level of the git repo."
echo "Exiting."
exit 1 exit 1
}
if [ ! -f iiab-network.yml ]; then
exit_error "iiab-network.yml not found in current directory." \
"Please rerun this command from the top level of the git repo."
fi fi
OS="unknown" # will be overridden below, if /etc/iiab/iiab.env is legit OS="unknown" # will be overridden below, if /etc/iiab/iiab.env is legit
@ -19,26 +22,21 @@ if [ -f /etc/iiab/iiab.env ]; then
if grep -q STAGE= /etc/iiab/iiab.env ; then if grep -q STAGE= /etc/iiab/iiab.env ; then
echo -e "\nExtracted STAGE=$STAGE (counter) from /etc/iiab/iiab.env" echo -e "\nExtracted STAGE=$STAGE (counter) from /etc/iiab/iiab.env"
if ! [ "$STAGE" -eq "$STAGE" ] 2> /dev/null; then if ! [ "$STAGE" -eq "$STAGE" ] 2> /dev/null; then
echo -e "\nEXITING: STAGE (counter) value == ""$STAGE"" is non-integer" exit_error "STAGE (counter) value == ""$STAGE"" is non-integer"
exit 1
elif [ "$STAGE" -lt 0 ] || [ "$STAGE" -gt 9 ]; then elif [ "$STAGE" -lt 0 ] || [ "$STAGE" -gt 9 ]; then
echo -e "\nEXITING: STAGE (counter) value == ""$STAGE"" is out-of-range" exit_error "STAGE (counter) value == ""$STAGE"" is out-of-range"
exit 1
elif [ "$STAGE" -lt 3 ]; then elif [ "$STAGE" -lt 3 ]; then
echo -e "\nEXITING: STAGE (counter) value == ""$STAGE" exit_error "STAGE (counter) value == ""$STAGE" \
echo -e "\nIIAB Stage 3 not complete." "\nIIAB Stage 3 not complete." \
echo -e "\nPlease run: ./iiab-install" "\nPlease run: ./iiab-install"
exit 1
fi fi
else else
echo -e "\nEXITING: STAGE (counter) not found" exit_error "STAGE (counter) not found" \
echo -e "\nIIAB not installed." "\nIIAB not installed." \
echo -e "\nPlease run: ./iiab-install" "\nPlease run: ./iiab-install"
exit 1
fi fi
else else
echo -e "\nEXITING: /etc/iiab/iiab.env not found" exit_error "/etc/iiab/iiab.env not found"
exit 1
fi fi
echo "Ansible will now run iiab-network.yml -- log file is iiab-network.log" echo "Ansible will now run iiab-network.yml -- log file is iiab-network.log"
@ -94,3 +92,4 @@ echo "iiab-network run start: $Start"
echo "iiab-network run end: $End" echo "iiab-network run end: $End"
echo echo
echo "Please REBOOT to fully verify your network -- graphical desktops MUST reboot!" echo "Please REBOOT to fully verify your network -- graphical desktops MUST reboot!"
exit 0

View file

@ -3,10 +3,10 @@
become: yes become: yes
vars_files: vars_files:
- vars/default_vars.yml - vars/default_vars.yml
- vars/{{ ansible_local.local_facts.os_ver }}.yml - vars/{{ ansible_local.local_facts.os_ver }}.yml
- /etc/iiab/local_vars.yml - /etc/iiab/local_vars.yml
- /etc/iiab/iiab_state.yml - /etc/iiab/iiab_state.yml
roles: roles:
- { role: 0-init } - { role: 0-init }

20
iiab-setup Executable file
View file

@ -0,0 +1,20 @@
#!/bin/bash -e
# Running from a git repo
# Assumes iiab repos are downloaded
apt -y update
apt -y full-upgrade
apt -y install git curl nano gawk wget pastebinit
cd /opt/iiab/iiab
scripts/ansible
# 2022-09-27: iiab-install now handles this
#mkdir -p /etc/iiab/install-flags
if [ ! -f /etc/iiab/local_vars.yml ]; then
cp /opt/iiab/iiab/vars/local_vars_none.yml /etc/iiab/local_vars.yml
fi
reboot

View file

@ -3,11 +3,11 @@
become: yes become: yes
vars_files: vars_files:
- roles/0-init/defaults/main.yml - roles/0-init/defaults/main.yml
- vars/default_vars.yml - vars/default_vars.yml
- vars/{{ ansible_local.local_facts.os_ver }}.yml - vars/{{ ansible_local.local_facts.os_ver }}.yml
- /etc/iiab/local_vars.yml - /etc/iiab/local_vars.yml
- /etc/iiab/iiab_state.yml - /etc/iiab/iiab_state.yml
tasks: tasks:

View file

@ -2,9 +2,9 @@
become: yes become: yes
vars_files: vars_files:
- vars/default_vars.yml - vars/default_vars.yml
- vars/{{ ansible_local.local_facts.os_ver }}.yml - vars/{{ ansible_local.local_facts.os_ver }}.yml
- /etc/iiab/local_vars.yml - /etc/iiab/local_vars.yml
roles: roles:
- { role: 0-init } - { role: 0-init }

View file

@ -23,14 +23,6 @@
# ...after it is set in 0-init/tasks/main.yml # ...after it is set in 0-init/tasks/main.yml
first_run: False first_run: False
rpi_model: none # 2021-07-30: Broadly used! rpi_model: none # 2021-07-30: Broadly used!
#xo_model: none # 2021-07-30: No longer used
# 2021-07-30: Recorded to /etc/iiab/iiab.ini but not used programmatically:
gw_active: False
# 2021-07-30: Broadly used, but not in an organized way -- most all IIAB
# outfitting/provisioning happens online -- in situations where connectivity
# failures should be reported to the operator, rather than papered over:
internet_available: False
discovered_wan_iface: none # 2021-07-30: Very broadly used!
# 2021-07-30: Barely used -- for {named, dhcpd, squid} in # 2021-07-30: Barely used -- for {named, dhcpd, squid} in
# roles/network/tasks/main.yml -- after being set in 0-init/tasks/network.yml # roles/network/tasks/main.yml -- after being set in 0-init/tasks/network.yml

View file

@ -1,13 +1,26 @@
# workaround for fact that auto create does not work on iiab_ini_file (/etc/iiab/iiab.ini) - name: Record disk_used_a_priori (permanently, into {{ iiab_ini_file }} below) to later estimate iiab_software_disk_usage
shell: df -B1 --output=used / | tail -1
register: df1
# workaround for fact that auto create does not work on iiab_ini_file
- name: Create {{ iiab_ini_file }} - name: Create {{ iiab_ini_file }}
file: file:
path: "{{ iiab_ini_file }}" path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
state: touch state: touch
- name: Add 'location' variable values to {{ iiab_ini_file }} - name: Run command 'dpkg --print-architecture' to identify OS architecture (CPU arch as revealed by ansible_architecture ~= ansible_machine is NOT enough!)
command: dpkg --print-architecture
register: dpkg_arch
- name: Run command 'dpkg --print-foreign-architectures' (secondary OS arch, if available)
command: dpkg --print-foreign-architectures
register: dpkg_foreign_arch
- name: Add 'summary' variable values to {{ iiab_ini_file }}
ini_file: ini_file:
path: "{{ iiab_ini_file }}" path: "{{ iiab_ini_file }}"
section: location section: summary
option: "{{ item.option }}" option: "{{ item.option }}"
value: "{{ item.value | string }}" value: "{{ item.value | string }}"
with_items: with_items:
@ -15,29 +28,39 @@
value: "{{ iiab_base }}" value: "{{ iiab_base }}"
- option: iiab_dir - option: iiab_dir
value: "{{ iiab_dir }}" value: "{{ iiab_dir }}"
- option: disk_used_a_priori
value: "{{ df1.stdout }}"
- name: Add 'version' variable values to {{ iiab_ini_file }} - name: Add 'initial' variable values to {{ iiab_ini_file }}
ini_file: ini_file:
path: "{{ iiab_ini_file }}" path: "{{ iiab_ini_file }}"
section: version section: initial
option: "{{ item.option }}" option: "{{ item.option }}"
value: "{{ item.value | string }}" value: "{{ item.value | string }}"
with_items: with_items:
- option: os_ver
value: "{{ os_ver }}"
- option: distribution - option: distribution
value: "{{ ansible_distribution }}" value: "{{ ansible_facts['distribution'] }}"
- option: arch - option: arch
value: "{{ ansible_architecture }}" value: "{{ ansible_architecture }}"
- option: iiab_base_ver - option: dpkg_arch
value: "{{ iiab_base_ver }}" value: "{{ dpkg_arch.stdout }}"
- option: iiab_branch - option: dpkg_foreign_arch
value: "{{ ansible_local.local_facts.iiab_branch }}" value: "{{ dpkg_foreign_arch.stdout }}"
- option: iiab_commit
value: "{{ ansible_local.local_facts.iiab_commit }}"
- option: install_date
value: "{{ ansible_date_time.iso8601 }}"
#- option: xo_model
# value: "{{ xo_model }}"
- option: rpi_model - option: rpi_model
value: "{{ rpi_model }}" value: "{{ rpi_model }}"
- option: devicetree_model - option: devicetree_model
value: "{{ devicetree_model }}" value: "{{ devicetree_model }}"
- option: iiab_base_ver
value: "{{ iiab_base_ver }}"
- option: iiab_remote_url
value: "{{ ansible_local.local_facts.iiab_remote_url }}"
- option: iiab_branch
value: "{{ ansible_local.local_facts.iiab_branch }}"
- option: iiab_commit
value: "{{ ansible_local.local_facts.iiab_commit }}"
- option: iiab_recent_tag
value: "{{ ansible_local.local_facts.iiab_recent_tag }}"
- option: install_date
value: "{{ ansible_date_time.iso8601 }}"

View file

@ -1,3 +1,8 @@
- name: "Set 'iiab_fqdn: {{ iiab_hostname }}.{{ iiab_domain }}'"
set_fact:
iiab_fqdn: "{{ iiab_hostname }}.{{ iiab_domain }}"
FQDN_changed: False
- name: Does /etc/cloud/cloud.cfg exist e.g. is this Ubuntu Server 18+ ? - name: Does /etc/cloud/cloud.cfg exist e.g. is this Ubuntu Server 18+ ?
stat: stat:
path: /etc/cloud/cloud.cfg path: /etc/cloud/cloud.cfg
@ -17,24 +22,23 @@
# 2021-08-31: Periods in /etc/hostname fail with some WiFi routers (#2904) # 2021-08-31: Periods in /etc/hostname fail with some WiFi routers (#2904)
# command: hostnamectl set-hostname "{{ iiab_hostname }}.{{ iiab_domain }}" # command: hostnamectl set-hostname "{{ iiab_hostname }}.{{ iiab_domain }}"
#- name: Install /etc/sysconfig/network from template (redhat) # 2022-07-11: Should the first entry match just hostname and domain move to
# template: # after localhost? See PR's #1 & #8 -- with discussion on #3302 -- and also:
# src: roles/network/templates/network/sysconfig.network.j2 # 1. /etc/hosts -- #1815 solved by PR #1847
# dest: /etc/sysconfig/network # 2. /etc/hostname -- #2904 solved by PR #2973
# owner: root
# group: root
# mode: 0644
# when: is_redhat
# roles/network/tasks/hosts.yml [no longer in use] ALSO did this:
- name: 'Put FQDN & hostnames in /etc/hosts: "127.0.0.1 {{ iiab_hostname }}.{{ iiab_domain }} localhost.localdomain localhost {{ iiab_hostname }} box box.lan"' - name: 'Put FQDN & hostnames in /etc/hosts: "127.0.0.1 {{ iiab_hostname }}.{{ iiab_domain }} localhost.localdomain localhost {{ iiab_hostname }} box box.lan"'
lineinfile: lineinfile:
path: /etc/hosts path: /etc/hosts
regexp: '^127\.0\.0\.1' regexp: '^127\.0\.0\.1'
line: '127.0.0.1 {{ iiab_hostname }}.{{ iiab_domain }} localhost.localdomain localhost {{ iiab_hostname }} box box.lan' line: '127.0.0.1 {{ iiab_hostname }}.{{ iiab_domain }} localhost.localdomain localhost {{ iiab_hostname }} box box.lan'
#owner: root
#group: root # 2021-07-30: FQDN_changed isn't used as in the past -- its remaining use is
#mode: 0644 # for {named, dhcpd, squid} in roles/network/tasks/main.yml -- possibly it
# should be reconsidered? See PR #2876: roles/network might become optional?
- name: "Also set 'FQDN_changed: True' -- if iiab_fqdn != ansible_fqdn ({{ ansible_fqdn }})"
set_fact:
FQDN_changed: True
when: iiab_fqdn != ansible_fqdn
#- name: Re-configuring httpd - not initial install #- name: Re-configuring httpd - not initial install
# include_tasks: roles/httpd/tasks/main.yml # include_tasks: roles/httpd/tasks/main.yml

View file

@ -7,17 +7,14 @@
# Higher-level purpose explained at the bottom of: # Higher-level purpose explained at the bottom of:
# https://github.com/iiab/iiab/blob/master/vars/default_vars.yml # https://github.com/iiab/iiab/blob/master/vars/default_vars.yml
- name: "Ansible just ran /etc/ansible/facts.d/local_facts.fact to set 11 vars -- here we extract 3 of those -- rpi_model: {{ ansible_local.local_facts.rpi_model }}, devicetree_model: {{ ansible_local.local_facts.devicetree_model }}, iiab_stage: {{ ansible_local.local_facts.stage }}" - name: "Ansible just ran /etc/ansible/facts.d/local_facts.fact to set 15 vars -- here we extract 6 of those -- iiab_stage: {{ ansible_local.local_facts.stage }}, rpi_model: {{ ansible_local.local_facts.rpi_model }}, devicetree_model: {{ ansible_local.local_facts.devicetree_model }}, os_ver: {{ ansible_local.local_facts.os_ver }}, python_version: {{ ansible_local.local_facts.python_version }}, php_version: {{ ansible_local.local_facts.php_version }}"
set_fact: set_fact:
iiab_stage: "{{ ansible_local.local_facts.stage }}"
rpi_model: "{{ ansible_local.local_facts.rpi_model }}" rpi_model: "{{ ansible_local.local_facts.rpi_model }}"
devicetree_model: "{{ ansible_local.local_facts.devicetree_model }}" devicetree_model: "{{ ansible_local.local_facts.devicetree_model }}"
#xo_model: "{{ ansible_local.local_facts.xo_model }}" os_ver: "{{ ansible_local.local_facts.os_ver }}"
iiab_stage: "{{ ansible_local.local_facts.stage }}" python_version: "{{ ansible_local.local_facts.python_version }}"
php_version: "{{ ansible_local.local_facts.php_version }}"
# 2020-10-29: Appears no longer nec (see 3 above ansible_local.local_facts.*)
#- name: Re-read local_facts.facts from /etc/ansible/facts.d
# setup:
# filter: ansible_local
# Initialize /etc/iiab/iiab.ini writing the 'location' and 'version' sections # Initialize /etc/iiab/iiab.ini writing the 'location' and 'version' sections
# once and only once, to preserve the install date and git hash. # once and only once, to preserve the install date and git hash.
@ -26,10 +23,9 @@
when: not iiab_ini_test.stat.exists when: not iiab_ini_test.stat.exists
# 2021-07-30: The 'first_run' flag isn't much used anymore. In theory it's # 2021-07-30: The 'first_run' flag isn't much used anymore. In theory it's
# still used in these 2 places: # still used in 1-prep/tasks/hardware.yml for raspberry_pi.yml
# (1) roles/1-prep/tasks/main.yml for raspberry_pi.yml #
# (2) roles/network/tasks/named.yml for "Stop named before copying files" # This needs to be reworked for 0-init speed, and overall understandability.
# In practice however, it's no longer important, and might be reconsidered?
- name: Set first_run flag - name: Set first_run flag
set_fact: set_fact:
first_run: True first_run: True
@ -38,11 +34,14 @@
# Copies the latest/known version of iiab-diagnostics into /usr/bin (so it can # Copies the latest/known version of iiab-diagnostics into /usr/bin (so it can
# be run even if local source tree /opt/iiab/iiab is deleted to conserve disk). # be run even if local source tree /opt/iiab/iiab is deleted to conserve disk).
- name: Copy /opt/iiab/iiab/scripts/iiab-diagnostics to /usr/bin/iiab-diagnostics - name: Copy iiab-summary & iiab-diagnostics from /opt/iiab/iiab/scripts/ to /usr/bin/
copy: copy:
src: "{{ iiab_dir }}/scripts/iiab-diagnostics" src: "{{ iiab_dir }}/scripts/{{ item }}"
dest: /usr/bin/ dest: /usr/bin/
mode: '0755' mode: '0755'
with_items:
- iiab-summary
- iiab-diagnostics
- name: Create globally-writable directory /etc/iiab/diag (0777) so non-root users can run 'iiab-diagnostics' - name: Create globally-writable directory /etc/iiab/diag (0777) so non-root users can run 'iiab-diagnostics'
file: file:
@ -53,13 +52,14 @@
- name: Pre-check that IIAB's "XYZ_install" + "XYZ_enabled" vars (1) are defined, (2) are boolean-not-string variables, and (3) contain plausible values. Also checks that "XYZ_install" is True when "XYZ_installed" is defined. - name: Pre-check that IIAB's "XYZ_install" + "XYZ_enabled" vars (1) are defined, (2) are boolean-not-string variables, and (3) contain plausible values. Also checks that "XYZ_install" is True when "XYZ_installed" is defined.
include_tasks: validate_vars.yml include_tasks: validate_vars.yml
when: not (rpi_model | regex_search('\\bW\\b')) # Ansible require double backslashes, e.g. with \b "word boundary" anchors: https://www.regular-expressions.info/wordboundaries.html https://stackoverflow.com/questions/56869119/ansible-regular-expression-to-match-a-string-and-extract-the-line/56869801#56869801
- name: "Time Zone / TZ: Set symlink /etc/localtime to UTC if it doesn't exist?" # 2022-12-30: Functionality moved to www_options/tasks/php-settings.yml
include_tasks: tz.yml # - name: "Time Zone / TZ: Set symlink /etc/localtime to UTC if it doesn't exist?"
# include_tasks: tz.yml
- name: Test Gateway + Test Internet + Set new hostname/domain (hostname.yml) if nec + Set 'gui_port' to 80 or 443 for Admin Console
include_tasks: network.yml
- name: Set hostname / domain (etc) in various places
include_tasks: hostname.yml
- name: Add 'runtime' variable values to {{ iiab_ini_file }} - name: Add 'runtime' variable values to {{ iiab_ini_file }}
ini_file: ini_file:
@ -74,10 +74,14 @@
value: "{{ iiab_base_ver }}" value: "{{ iiab_base_ver }}"
- option: iiab_revision - option: iiab_revision
value: "{{ iiab_revision }}" value: "{{ iiab_revision }}"
- option: iiab_remote_url
value: "{{ ansible_local.local_facts.iiab_remote_url }}"
- option: runtime_branch - option: runtime_branch
value: "{{ ansible_local.local_facts.iiab_branch }}" value: "{{ ansible_local.local_facts.iiab_branch }}"
- option: runtime_commit - option: runtime_commit
value: "{{ ansible_local.local_facts.iiab_commit }}" value: "{{ ansible_local.local_facts.iiab_commit }}"
- option: iiab_recent_tag
value: "{{ ansible_local.local_facts.iiab_recent_tag }}"
- option: runtime_date - option: runtime_date
value: "{{ ansible_date_time.iso8601 }}" value: "{{ ansible_date_time.iso8601 }}"
- option: ansible_version - option: ansible_version
@ -88,24 +92,22 @@
value: "{{ ansible_memtotal_mb }}" value: "{{ ansible_memtotal_mb }}"
- option: swap_mb - option: swap_mb
value: "{{ ansible_swaptotal_mb }}" value: "{{ ansible_swaptotal_mb }}"
- option: gw_active
value: "{{ gw_active }}"
- option: internet_available
value: "{{ internet_available }}"
- option: rpi_model - option: rpi_model
value: "{{ rpi_model }}" value: "{{ rpi_model }}"
- option: devicetree_model - option: devicetree_model
value: "{{ devicetree_model }}" value: "{{ devicetree_model }}"
- option: os_ver
value: "{{ os_ver }}"
- option: python_version
value: "{{ python_version }}"
- option: php_version
value: "{{ php_version }}"
- option: first_run - option: first_run
value: "{{ first_run }}" value: "{{ first_run }}"
- option: local_tz # e.g. 'EDT' (summer) or 'EST' (winter) after Ansible interprets symlink /etc/localtime -- or 'UTC' if /etc/localtime doesn't exist # - option: local_tz # e.g. 'EDT' (summer) or 'EST' (winter) after Ansible interprets symlink /etc/localtime -- or 'UTC' if /etc/localtime doesn't exist
value: "{{ local_tz }}" # value: "{{ local_tz }}"
- option: etc_localtime.stdout # e.g. 'America/New_York' direct from symlink /etc/localtime -- or '' if /etc/localtime doesn't exist # - option: etc_localtime.stdout # e.g. 'America/New_York' direct from symlink /etc/localtime -- or '' if /etc/localtime doesn't exist
value: "{{ etc_localtime.stdout }}" # value: "{{ etc_localtime.stdout }}"
#- option: no_NM_reload
# value: "{{ no_NM_reload }}"
#- option: is_F18
# value: "{{ is_F18 }}"
- option: FQDN_changed - option: FQDN_changed
value: "{{ FQDN_changed }}" value: "{{ FQDN_changed }}"

View file

@ -1,74 +0,0 @@
- name: Do we have a gateway? If 'ip route' specifies a default route, Ansible parses details here...
debug:
var: ansible_default_ipv4
- name: "If above ansible_default_ipv4.gateway is defined, set WAN candidate 'discovered_wan_iface: {{ ansible_default_ipv4.alias }}' -- using ansible_default_ipv4.alias"
set_fact:
discovered_wan_iface: "{{ ansible_default_ipv4.alias }}"
when: ansible_default_ipv4.gateway is defined
- name: "Verify gateway active: ping -c4 {{ ansible_default_ipv4.gateway }} -- using ansible_default_ipv4.gateway"
shell: ping -c4 "{{ ansible_default_ipv4.gateway }}" | grep icmp_seq=4 | wc -l
register: gw_active_test
when: discovered_wan_iface != "none"
- name: "If gateway responded, set 'gw_active: True' and 'iiab_wan_iface: {{ discovered_wan_iface }}' -- using discovered_wan_iface"
set_fact:
iiab_wan_iface: "{{ discovered_wan_iface }}"
gw_active: True
when: discovered_wan_iface != "none" and gw_active_test.stdout == "1"
- name: 'Test for Internet access, using: {{ iiab_download_url }}/heart-beat.txt'
get_url:
url: "{{ iiab_download_url }}/heart-beat.txt"
dest: /tmp/heart-beat.txt
#timeout: "{{ download_timeout }}"
# @jvonau recommends: 100sec is too much (keep 10sec default)
ignore_errors: True
#async: 10
#poll: 2
register: internet_access_test
- name: "Set 'internet_available: True' if above download succeeded AND not disregard_network"
set_fact:
internet_available: True # Initialized to 'False' in 0-init/defaults/main.yml
when: not internet_access_test.failed and not disregard_network
- name: Remove downloaded Internet test file /tmp/heart-beat.txt
file:
path: /tmp/heart-beat.txt
state: absent
- name: "Set 'iiab_fqdn: {{ iiab_hostname }}.{{ iiab_domain }}'"
set_fact:
iiab_fqdn: "{{ iiab_hostname }}.{{ iiab_domain }}"
FQDN_changed: False
- name: Set hostname / domain (etc) in various places -- if iiab_fqdn != ansible_fqdn ({{ ansible_fqdn }})
include_tasks: hostname.yml
when: iiab_fqdn != ansible_fqdn
# 2021-07-30: FQDN_changed isn't used as in the past -- its remaining use is
# for {named, dhcpd, squid} in roles/network/tasks/main.yml -- possibly it
# should be reconsidered? See PR #2876: roles/network might become optional?
- name: "Also set 'FQDN_changed: True' -- if iiab_fqdn != ansible_fqdn ({{ ansible_fqdn }})"
set_fact:
FQDN_changed: True
when: iiab_fqdn != ansible_fqdn
# 2021-08-17: (1) iiab-gen-iptables works better if gui_port is set directly in
# default_vars.yml and/or local_vars.yml (2) Admin Console's iiab-admin.yml
# and js-menu.yml set 'adm_cons_force_ssl: False'
# - name: "Set 'gui_port: 80' for Admin Console if not adm_cons_force_ssl"
# set_fact:
# gui_port: 80
# when: not adm_cons_force_ssl
# - name: "Set 'gui_port: 443' for Admin Console if adm_cons_force_ssl"
# set_fact:
# gui_port: 443
# when: adm_cons_force_ssl

View file

@ -1,3 +1,5 @@
# 2022-12-30: Functionality moved to www_options/tasks/php-settings.yml
- name: "'local_tz: {{ local_tz }}' was set by ansible_date_time.tz in /opt/iiab/iiab/vars/default_vars.yml -- e.g. if Ansible finds symlink /etc/localtime -> ../usr/share/zoneinfo/America/New_York -- it will simplify that to 'EDT' (in the summer) or 'EST' (in the winter)" - name: "'local_tz: {{ local_tz }}' was set by ansible_date_time.tz in /opt/iiab/iiab/vars/default_vars.yml -- e.g. if Ansible finds symlink /etc/localtime -> ../usr/share/zoneinfo/America/New_York -- it will simplify that to 'EDT' (in the summer) or 'EST' (in the winter)"
command: echo command: echo

View file

@ -63,38 +63,28 @@
# #
# 2020-11-04: Fix validation of 5 [now 4] core dependencies, for ./runrole etc # 2020-11-04: Fix validation of 5 [now 4] core dependencies, for ./runrole etc
- name: Set vars_checklist for 44 + 44 + 40 vars ("XYZ_install" + "XYZ_enabled" + "XYZ_installed") to be checked
- name: Set vars_checklist for 45 + 45 + 40 vars ("XYZ_install" + "XYZ_enabled" + "XYZ_installed") to be checked
set_fact: set_fact:
vars_checklist: vars_checklist:
- hostapd - hostapd
- dhcpd
- named
- dnsmasq - dnsmasq
- bluetooth - bluetooth
#- wondershaper # Unmaintained
- sshd - sshd
- openvpn - openvpn
- remoteit - remoteit
- admin_console - admin_console
#- nginx # MANDATORY #- nginx # MANDATORY
#- apache # Unmaintained - former dependency #- apache # Unmaintained - former dependency
#- mysql # MANDATORY
- squid - squid
#- dansguardian # Unmaintained
- cups - cups
- samba - samba
- usb_lib - usb_lib
#- xo_services # Unmaintained
#- activity_server # Unmaintained
#- ejabberd_xs # Unmaintained
#- idmgr # Unmaintained
- azuracast - azuracast
#- dokuwiki # Unmaintained
#- ejabberd # Unmaintained
#- elgg # Unmaintained
- gitea - gitea
- jupyterhub - jupyterhub
- lokole - lokole
- mysql # Dependency - excluded from _installed check below
- mediawiki - mediawiki
- mosquitto - mosquitto
- nodejs # Dependency - excluded from _installed check below - nodejs # Dependency - excluded from _installed check below
@ -111,6 +101,7 @@
- osm_vector_maps - osm_vector_maps
- transmission - transmission
- awstats - awstats
- matomo
- monit - monit
- munin - munin
- phpmyadmin - phpmyadmin
@ -122,6 +113,7 @@
- calibreweb - calibreweb
- calibre - calibre
- pbx - pbx
- network
- name: Assert that {{ vars_checklist | length }} "XYZ_install" vars are all... defined - name: Assert that {{ vars_checklist | length }} "XYZ_install" vars are all... defined
assert: assert:
@ -163,5 +155,41 @@
that: "{{ item }}_install or {{ item }}_installed is undefined" that: "{{ item }}_install or {{ item }}_installed is undefined"
fail_msg: "DISALLOWED: '{{ item }}_install: False' (e.g. in /etc/iiab/local_vars.yml) WHEN '{{ item }}_installed' is defined (e.g. in /etc/iiab/iiab_state.yml) -- IIAB DOES NOT SUPPORT UNINSTALLS -- please verify those 2 files especially, and other places variables are defined?" fail_msg: "DISALLOWED: '{{ item }}_install: False' (e.g. in /etc/iiab/local_vars.yml) WHEN '{{ item }}_installed' is defined (e.g. in /etc/iiab/iiab_state.yml) -- IIAB DOES NOT SUPPORT UNINSTALLS -- please verify those 2 files especially, and other places variables are defined?"
quiet: yes quiet: yes
when: item != 'nodejs' and item != 'postgresql' and item != 'mongodb' and item != 'yarn' # Exclude auto-installed dependencies when: item != 'mysql' and item != 'postgresql' and item != 'mongodb' and item != 'nodejs' and item != 'yarn' # Exclude auto-installed dependencies
loop: "{{ vars_checklist }}" loop: "{{ vars_checklist }}"
- name: Set vars_deprecated_list for 4+ vars ("XYZ_install") to be checked
set_fact:
vars_deprecated_list:
- dhcpd # Deprecated
- named # Deprecated
- wondershaper # Deprecated
- dansguardian # Deprecated
#- xo_services # Unmaintained
#- activity_server # Unmaintained
#- ejabberd_xs # Unmaintained
#- idmgr # Unmaintained
#- dokuwiki # Unmaintained
#- ejabberd # Unmaintained
#- elgg # Unmaintained
- name: 'DISALLOW "XYZ_install: True" if deprecated'
assert:
that: "{{ item }}_install is undefined or not {{ item }}_install"
fail_msg: "DISALLOWED: '{{ item }}_install: True' (e.g. in /etc/iiab/local_vars.yml)"
quiet: yes
loop: "{{ vars_deprecated_list }}"
# 2023-12-04: ansible-core 2.16.1 suddenly no longer allows 'assert' with
# 'with_items' below (whereas 'loop' construct above works!) BACKGROUND:
#
# 'due to mitigation of security issue CVE-2023-5764 in ansible-core 2.16.1,
# conditional expressions with embedded template blocks can fail with the
# message “Conditional is marked as unsafe, and cannot be evaluated.”'
# https://docs.ansible.com/ansible-core/2.16/porting_guides/porting_guide_core_2.16.html#playbook
#
# with_items:
# - dhcpd # Deprecated
# - named # Deprecated
# - wondershaper # Deprecated
# - dansguardian # Deprecated

View file

@ -14,7 +14,7 @@
- name: Download {{ iiab_download_url }}/iwlwifi-8000C-13.ucode to /lib/firmware for built-in WiFi on NUC6 - name: Download {{ iiab_download_url }}/iwlwifi-8000C-13.ucode to /lib/firmware for built-in WiFi on NUC6
get_url: get_url:
url: "{{ iiab_download_url }}/iwlwifi-8000C-13.ucode" # http://download.iiab.io/packages url: "{{ iiab_download_url }}/iwlwifi-8000C-13.ucode" # https://download.iiab.io/packages
dest: /lib/firmware dest: /lib/firmware
timeout: "{{ download_timeout }}" timeout: "{{ download_timeout }}"
when: usb_NUC6.stdout|int > 0 when: usb_NUC6.stdout|int > 0

View file

@ -2,7 +2,7 @@
package: package:
name: name:
- parted # 2022-03-15: RasPiOS and Ubuntu install this regardless -- so rarely nec, but just in case. - parted # 2022-03-15: RasPiOS and Ubuntu install this regardless -- so rarely nec, but just in case.
- cloud-guest-utils # 2022-03-15: For growpart command -- though RasPiOS currently doesn't need this, as raspi-config's do_expand_rootfs() instead uses fdisk. Ubuntu pre-installs cloud-guest-utils, for use with cloud-init. - cloud-guest-utils # 2022-04-02: For growpart command -- whereas RasPiOS's 'raspi-config --expand-rootfs' instead uses fdisk (requiring a reboot, see do_expand_rootfs() in https://github.com/RPi-Distro/raspi-config/blob/master/raspi-config). FYI Ubuntu pre-installs cloud-guest-utils, for use with cloud-init.
state: present state: present
- name: "Install from templates: /usr/sbin/iiab-expand-rootfs, /etc/systemd/system/iiab-expand-rootfs.service" - name: "Install from templates: /usr/sbin/iiab-expand-rootfs, /etc/systemd/system/iiab-expand-rootfs.service"

View file

@ -23,9 +23,21 @@
name: iiab-admin name: iiab-admin
#when: iiab_admin_install # Flag might be created in future? #when: iiab_admin_install # Flag might be created in future?
- name: Install dnsmasq -- configure LATER in 'network', after Stage 9 - name: Copy iiab-apps-to-be-installed from {{ iiab_dir }}/scripts to /usr/bin/
include_tasks: roles/network/tasks/dnsmasq.yml copy:
#when: dnsmasq_install # Flag might be used in future? src: "{{ iiab_dir }}/scripts/iiab-apps-to-be-installed" # /opt/iiab/iiab
dest: /usr/bin/
mode: '0755'
- name: Copy iiab-network from {{ iiab_dir }}/scripts to /usr/local/bin/
copy:
src: "{{ iiab_dir }}/scripts/iiab-network"
dest: /usr/local/bin/
mode: '0755'
- name: Install ~12 network/wifi/related packages + Squid if necessary + configure /etc/sysctl.conf -- full configuration LATER in 'network', after Stage 9
include_tasks: roles/network/tasks/install.yml
when: network_install and network_installed is undefined
- include_tasks: uuid.yml - include_tasks: uuid.yml
- include_tasks: ubermix.yml - include_tasks: ubermix.yml
@ -62,7 +74,10 @@
# when: not is_debuntu and selinux_disabled is defined and selinux_disabled.changed # when: not is_debuntu and selinux_disabled is defined and selinux_disabled.changed
- name: Recording STAGE 1 HAS COMPLETED ============================ - name: Install {{ iiab_env_file }} from template -- FYI this file can be run as a script if absolutely nec -- e.g. 'source /etc/iiab/iiab.env && echo $WWWROOT'
template: template:
src: roles/1-prep/templates/iiab.env.j2 src: roles/1-prep/templates/iiab.env.j2
dest: "{{ iiab_env_file }}" # Can also be run as a script if absolutely nec, e.g. 'source /etc/iiab/iiab.env && echo $WWWROOT' dest: "{{ iiab_env_file }}"
- name: Recording STAGE 1 HAS COMPLETED ============================
meta: noop

View file

@ -1,4 +1,4 @@
#!/bin/bash -x #!/bin/bash -xe
# Expand rootfs partition to its maximum size, if /.expand-rootfs exists. # Expand rootfs partition to its maximum size, if /.expand-rootfs exists.
# Used by /etc/systemd/system/iiab-expand-rootfs.service on IIAB boot. # Used by /etc/systemd/system/iiab-expand-rootfs.service on IIAB boot.
@ -8,14 +8,25 @@
# Verifies that rootfs is the last partition. # Verifies that rootfs is the last partition.
# RELATED:
# 1. https://github.com/iiab/iiab-factory/blob/master/box/rpi/min-sd
# 2. https://github.com/iiab/iiab-factory/blob/master/box/rpi/cp-sd
# 3. https://github.com/iiab/iiab-factory/blob/master/box/rpi/xz-json-sd
# OR https://github.com/iiab/iiab-factory/blob/master/box/rpi/exp-sd
if [ -f /.expand-rootfs ] || [ -f /.resize-rootfs ]; then if [ -f /.expand-rootfs ] || [ -f /.resize-rootfs ]; then
echo "$0: Expanding rootfs partition" echo "$0: Expanding rootfs partition"
if [ -x /usr/bin/raspi-config ]; then # Raspberry Pi OS if [ -x /usr/bin/raspi-config ]; then # Raspberry Pi OS -- WARNING: their fdisk-centric approach of course FAILS with "Hybrid MBR" or GPT partition tables, as required by any drive > 2TB :/
# 2022-02-17: Uses do_expand_rootfs() from: # 2022-02-17: Uses do_expand_rootfs() from:
# https://github.com/RPi-Distro/raspi-config/blob/master/raspi-config # https://github.com/RPi-Distro/raspi-config/blob/master/raspi-config
raspi-config --expand-rootfs # 2023-10-05: Official new RPi instructions:
else # Other Linux OS's # sudo raspi-config nonint do_expand_rootfs
# https://www.raspberrypi.com/documentation/computers/configuration.html#expand-filesystem-nonint
raspi-config --expand-rootfs # REQUIRES A REBOOT
rm -f /.expand-rootfs /.resize-rootfs
reboot # In future, we might warn interactive users that a reboot is coming?
else # REQUIRES NO REBOOT; BEWARE iiab-expand-rootfs.service RACE CONDITION WITH fsck (PR #2522 & #3325)
# 2022-03-15: Borrows from above raspi-config URL's do_expand_rootfs() # 2022-03-15: Borrows from above raspi-config URL's do_expand_rootfs()
ROOT_PART="$(findmnt / -o SOURCE -n)" # e.g. /dev/sda2 or /dev/mmcblk0p2 ROOT_PART="$(findmnt / -o SOURCE -n)" # e.g. /dev/sda2 or /dev/mmcblk0p2
ROOT_DEV="/dev/$(lsblk -no pkname "$ROOT_PART")" # e.g. /dev/sda or /dev/mmcblk0 ROOT_DEV="/dev/$(lsblk -no pkname "$ROOT_PART")" # e.g. /dev/sda or /dev/mmcblk0
@ -30,8 +41,10 @@ if [ -f /.expand-rootfs ] || [ -f /.resize-rootfs ]; then
fi fi
# Expand partition # Expand partition
growpart $ROOT_DEV $ROOT_PART_NUM # raspi-config instead uses fdisk growpart $ROOT_DEV $ROOT_PART_NUM || true # raspi-config instead uses fdisk (assuming MBR). They really should transition to gdisk, as required by any drive > 2TB. WARNING: growpart RC 2 is more severe than RC 1, and should possibly be handled separately in future?
rc=$? # Make Return Code visible, for 'bash -x'
resize2fs $ROOT_PART resize2fs $ROOT_PART
rc=$? # Make RC visible (as above)
# 2022-03-15: Legacy code below worked with Raspberry Pi microSD cards # 2022-03-15: Legacy code below worked with Raspberry Pi microSD cards
# but *not* with USB boot drives, internal spinning disks/SSD's, etc. # but *not* with USB boot drives, internal spinning disks/SSD's, etc.
@ -53,7 +66,7 @@ if [ -f /.expand-rootfs ] || [ -f /.resize-rootfs ]; then
# # Resize partition # # Resize partition
# growpart /dev/$root_dev $root_part_no # growpart /dev/$root_dev $root_part_no
# resize2fs /dev/$root_part # resize2fs /dev/$root_part
rm -f /.expand-rootfs /.resize-rootfs
fi fi
rm -f /.expand-rootfs /.resize-rootfs
fi fi

View file

@ -1,14 +1,24 @@
[Unit] [Unit]
Description=Root Filesystem Auto-Expander Description=Root Filesystem Auto-Expander
DefaultDependencies=no
# 2022-08-08: IIAB's 4 core OS's have 'After=systemd-fsck-root.service' WITHIN
# systemd-remount-fs.service, allowing us to avoid #3325 race condition w/ fsck
After=systemd-remount-fs.service
# 2022-08-08: While dphys-swapfile.service doesn't exist on Ubuntu, Mint
# and pure Debian, the following line may still serve a purpose on RasPiOS:
Before=dphys-swapfile.service
[Service] [Service]
Environment=TERM=linux Environment=TERM=linux
Type=oneshot Type=oneshot
ExecStart=/usr/sbin/iiab-expand-rootfs ExecStart=/usr/sbin/iiab-expand-rootfs
# 2022-08-08: By default, systemd dangerously kills rootfs expansion after just
# 90s (1TB microSD cards take ~8 min to expand). Let's remove the time limit:
TimeoutSec=infinity
# "Standard output type syslog is obsolete" # "Standard output type syslog is obsolete"
# StandardError=syslog # StandardError=syslog
# WHEREAS StandardError=journal is the default, per https://www.freedesktop.org/software/systemd/man/systemd.exec.html#StandardOutput= # WHEREAS StandardError=journal is the default, per https://www.freedesktop.org/software/systemd/man/systemd.exec.html#StandardOutput=
RemainAfterExit=no RemainAfterExit=yes
[Install] [Install]
WantedBy=multi-user.target WantedBy=local-fs.target

View file

@ -1,6 +1,6 @@
# fl.yml signifies "file layout" # fl.yml signifies "file layout"
- name: "File Layout - Create directories: 1 in /etc, 1 in {{ py3_dist_path }}, 3 in {{ iiab_base }}, 17 in {{ content_base }}" # iiab_base: /opt/iiab - name: "File Layout - Create directories: 1 in {{ py3_dist_path }}, 2 in {{ iiab_base }}, 17 in {{ content_base }}" # iiab_base: /opt/iiab
file: file:
path: "{{ item }}" path: "{{ item }}"
# owner: root # owner: root
@ -8,11 +8,11 @@
# mode: '0755' # mode: '0755'
state: directory state: directory
with_items: with_items:
- /etc/sysconfig/olpc-scripts/setup.d/installed/ #- /etc/sysconfig/olpc-scripts/setup.d/installed/
- "{{ py3_dist_path }}/iiab" # /usr/lib/python3/dist-packages - "{{ py3_dist_path }}/iiab" # /usr/lib/python3/dist-packages
- "{{ yum_packages_dir }}" # /opt/iiab/yum-packages #- "{{ yum_packages_dir }}" # /opt/iiab/yum-packages
- "{{ pip_packages_dir }}" # /opt/iiab/pip-packages - "{{ pip_packages_dir }}" # /opt/iiab/pip-packages
- "{{ downloads_dir }}" # /opt/iiab/downloads -- generally already done by Stage 1's roles/remoteit/tasks/install.yml - "{{ downloads_dir }}" # /opt/iiab/downloads
#- "{{ content_base }}/downloads" # /library/downloads auto-created just below #- "{{ content_base }}/downloads" # /library/downloads auto-created just below
- "{{ content_base }}/downloads/zims" - "{{ content_base }}/downloads/zims"
- "{{ content_base }}/downloads/maps" - "{{ content_base }}/downloads/maps"

View file

@ -8,8 +8,14 @@
- include_tasks: packages.yml - include_tasks: packages.yml
- name: Install network packages (including many WiFi tools, and also iptables-persistent for firewall) - name: "Use 'sysctl' to set 'kernel.core_uses_pid: 1' in /etc/sysctl.conf"
include_tasks: network.yml sysctl: # Places these settings in /etc/sysctl.conf, to survive reboot
name: "{{ item.name }}"
value: "{{ item.value }}"
with_items:
#- { name: 'kernel.sysrq', value: '1' } # OS values differ, Ok?
- { name: 'kernel.core_uses_pid', value: '1' }
#- { name: 'kernel.shmmax', value: '268435456' } # OS values differ, Ok?
- include_tasks: iiab-startup.yml - include_tasks: iiab-startup.yml

View file

@ -1,27 +1,31 @@
# 2022-03-16: 'apt show <pkg> | grep Size' revealed download sizes, on 64-bit RasPiOS with desktop. # 2022-03-16: 'apt show <pkg> | grep Size' revealed download sizes, on 64-bit RasPiOS with desktop.
- name: "Install 16 common packages: acpid, bzip2, curl, gawk, htop, i2c-tools, logrotate, mlocate, pandoc, pastebinit, rsync, sqlite3, tar, unzip, usbutils, wget" - name: "Install 19 common packages: acpid, bzip2, cron, curl, gawk, gpg, htop, i2c-tools, logrotate, lshw, pandoc, pastebinit, plocate, rsync, sqlite3, tar, unzip, usbutils, wget"
package: package:
name: name:
- acpid # 55kB download: Daemon for ACPI (power mgmt) events - acpid # 55kB download: Daemon for ACPI (power mgmt) events
- bzip2 # 47kB download: RasPiOS installs this regardless -- 2021-04-26: Prob not used, but can't hurt? - bzip2 # 47kB download: RasPiOS installs this regardless -- 2021-04-26: Prob not used, but can't hurt?
- cron # 98kB download: RasPiOS installs this regardless -- 2022-10-13: Debian 12 needs this added (for now?)
- curl # 254kB download: RasPiOS installs this regardless -- Used to install roles/nodejs and roles/nodered - curl # 254kB download: RasPiOS installs this regardless -- Used to install roles/nodejs and roles/nodered
#- etckeeper # 54kB download: "nobody is really using etckeeper and it's bloating the filesystem every time apt runs" per @jvonau at https://github.com/iiab/iiab/issues/1146 #- etckeeper # 54kB download: "nobody is really using etckeeper and it's bloating the filesystem every time apt runs" per @jvonau at https://github.com/iiab/iiab/issues/1146
#- exfat-fuse # 28kB download: 2021-07-27: Should no longer be nec with 5.4+ kernels, so let's try commenting it out #- exfat-fuse # 28kB download: 2021-07-27: Should no longer be nec with 5.4+ kernels, so let's try commenting it out
#- exfat-utils # 41kB download: Ditto! See also 'ntfs-3g' below #- exfat-utils # 41kB download: Ditto! See also 'ntfs-3g' below
- gawk # 533kB download - gawk # 533kB download
- gpg # 884kB download: Debian 12+ (especially!) require this for apt installs of gitea, kolibri, mongodb, yarn
- htop # 109kB download: RasPiOS installs this regardless - htop # 109kB download: RasPiOS installs this regardless
- i2c-tools # 78kB download: RasPiOS installs this regardless -- Low-level bus/chip/register/EEPROM tools e.g. for RTC - i2c-tools # 78kB download: Low-level bus/chip/register/EEPROM tools e.g. for RTC
- logrotate # 67kB download: RasPiOS installs this regardless - logrotate # 67kB download: RasPiOS installs this regardless
- lshw # 257kB download: For 'lshw -C network' in iiab-diagnostics
#- lynx # 505kB download: Installed by 1-prep's roles/iiab-admin/tasks/main.yml #- lynx # 505kB download: Installed by 1-prep's roles/iiab-admin/tasks/main.yml
#- make # 376kB download: 2021-07-27: Currently used by roles/pbx and no other roles #- make # 376kB download: 2021-07-27: Currently used by roles/pbx and no other roles
- mlocate # 92kB download #- ntfs-3g # 379kB download: RasPiOS installs this regardless -- 2021-07-31: But this should no longer be nec with 5.4+ kernels, similar to exfat packages above -- however, see also this symlink warning: https://superuser.com/questions/1050544/mount-with-kernel-ntfs-and-not-ntfs-3g -- and upcoming kernel 5.15 improvements: https://www.phoronix.com/scan.php?page=news_item&px=New-NTFS-Likely-For-Linux-5.15
#- ntfs-3g # 379kB download: RaspiOS installs this regardless -- 2021-07-31: But this should no longer be nec with 5.4+ kernels, similar to exfat packages above -- however, see also this symlink warning: https://superuser.com/questions/1050544/mount-with-kernel-ntfs-and-not-ntfs-3g -- and upcoming kernel 5.15 improvements: https://www.phoronix.com/scan.php?page=news_item&px=New-NTFS-Likely-For-Linux-5.15
#- openssh-server # 318kB download: RasPiOS installs this regardless -- this is also installed by 1-prep's roles/sshd/tasks/main.yml to cover all OS's #- openssh-server # 318kB download: RasPiOS installs this regardless -- this is also installed by 1-prep's roles/sshd/tasks/main.yml to cover all OS's
- pandoc # 19kB download: For /usr/bin/iiab-refresh-wiki-docs - pandoc # 19kB download: For /usr/bin/iiab-refresh-wiki-docs
- pastebinit # 47kB download: For /usr/bin/iiab-diagnostics - pastebinit # 47kB download: For /usr/bin/iiab-diagnostics
#- python3-pip # 337kB download: RasPiOS installs this regardless -- 2021-07-29: And already installed by /opt/iiab/iiab/scripts/ansible -- this auto-installs 'python3-setuptools' and 'python3' etc #- mlocate # 92kB download
#- python3-venv # 1188kB download: RasPiOS installs this regardless -- 2021-07-30: For Ansible module 'pip' used in roles like {calibre-web, jupyterhub, lokole} -- whereas roles/kalite uses (virtual) package 'virtualenv' for Python 2 -- all these 3+1 IIAB roles install 'python3-venv' for themselves. FYI: Debian 11 auto-installs 'python3-venv' when you install 'python3' -- whereas Ubuntu (e.g. 20.04 & 21.10) and RaspiOS 10 did not. - plocate # 97kB download: Faster & smaller than locate & mlocate
#- python3-pip # 337kB download: 2023-03-22: Used to be installed by /opt/iiab/iiab/scripts/ansible -- which would auto-install 'python3-setuptools' and 'python3' etc
#- python3-venv # 1188kB download: 2023-03-22: Already installed by /opt/iiab/iiab/scripts/ansible -- used by roles like {calibre-web, jupyterhub, lokole} -- whereas roles/kalite uses (virtual) package 'virtualenv' for Python 2 -- all these 3+1 IIAB roles install 'python3-venv' for themselves. FYI: Debian 11 no longer auto-installs 'python3-venv' when you install 'python3'
- rsync # 351kB download: RasPiOS installs this regardless - rsync # 351kB download: RasPiOS installs this regardless
#- screen # 551kB download: Installed by 1-prep's roles/iiab-admin/tasks/main.yml #- screen # 551kB download: Installed by 1-prep's roles/iiab-admin/tasks/main.yml
- sqlite3 # 1054kB download - sqlite3 # 1054kB download

View file

@ -1,10 +1,21 @@
.. |ss| raw:: html
<strike>
.. |se| raw:: html
</strike>
.. |nbsp| unicode:: 0xA0
:trim:
==================== ====================
3-base-server README 3-base-server README
==================== ====================
This 3rd `stage <https://github.com/iiab/iiab/wiki/IIAB-Contributors-Guide#ansible>`_ installs base server infra that `Internet-in-a-Box (IIAB) <https://internet-in-a-box.org/>`_ requires, including: This 3rd `stage <https://github.com/iiab/iiab/wiki/IIAB-Contributors-Guide#ansible>`_ installs base server infra that `Internet-in-a-Box (IIAB) <https://internet-in-a-box.org/>`_ requires, including:
- `MySQL <https://github.com/iiab/iiab/blob/master/roles/mysql>`_ (database underlying many/most user-facing apps). This IIAB role also installs apt package: - |ss| `MySQL <https://github.com/iiab/iiab/blob/master/roles/mysql>`_ (database underlying many/most user-facing apps). |se| |nbsp| *As of 2023-11-05, MySQL / MariaDB is NO LONGER INSTALLED by 3-base-server — instead it's installed on-demand — as a dependency of Matomo, MediaWiki, Nextcloud, PBX (for FreePBX), WordPress &/or Admin Console.* This IIAB role (roles/mysql) also installs apt package:
- **php{{ php_version }}-mysql** — which forcibly installs **php{{ php_version }}-common** - **php{{ php_version }}-mysql** — which forcibly installs **php{{ php_version }}-common**
- `NGINX <https://github.com/iiab/iiab/blob/master/roles/nginx>`_ web server (with Apache in some lingering cases). This IIAB role also installs apt package: - `NGINX <https://github.com/iiab/iiab/blob/master/roles/nginx>`_ web server (with Apache in some lingering cases). This IIAB role also installs apt package:
- **php{{ php_version }}-fpm** — which forcibly installs **php{{ php_version }}-cli**, **php{{ php_version }}-common** and **libsodium23** - **php{{ php_version }}-fpm** — which forcibly installs **php{{ php_version }}-cli**, **php{{ php_version }}-common** and **libsodium23**

View file

@ -3,10 +3,13 @@
- name: ...IS BEGINNING ===================================== - name: ...IS BEGINNING =====================================
meta: noop meta: noop
- name: MYSQL + CORE PHP # 2023-11-05: MySQL (actually MariaDB) had been mandatory, installed on every
include_role: # IIAB by 3-base-server. Now installed on demand -- as a dependency of Matomo,
name: mysql # MediaWiki, Nextcloud, PBX (for FreePBX), WordPress &/or Admin Console.
#when: mysql_install # - name: MYSQL + CORE PHP
# include_role:
# name: mysql
# #when: mysql_install
# 2021-05-21: Apache role 'httpd' is installed as nec by any of these 6 roles: # 2021-05-21: Apache role 'httpd' is installed as nec by any of these 6 roles:
# #

View file

@ -2,7 +2,7 @@
4-server-options README 4-server-options README
======================= =======================
Whereas 3-base-server installs critical packages needed by all, this 4th `stage <https://github.com/iiab/iiab/wiki/IIAB-Contributors-Guide#ansible>`_ installs a broad array of *options* ⁠— depending on which server apps will be installed in later stages ⁠— as specified in `/etc/iiab/local_vars.yml <http://FAQ.IIAB.IO#What_is_local_vars.yml_and_how_do_I_customize_it.3F>`_ Whereas 3-base-server installs critical packages needed by all, this 4th `stage <https://github.com/iiab/iiab/wiki/IIAB-Contributors-Guide#ansible>`_ installs a broad array of *options* ⁠— depending on which server apps will be installed in later stages ⁠— as specified in `/etc/iiab/local_vars.yml <http://FAQ.IIAB.IO#What_is_local_vars.yml_and_how_do_I_customize_it%3F>`_
This includes more networking fundamentals, that may further be configured later on. This includes more networking fundamentals, that may further be configured later on.
@ -11,7 +11,7 @@ Specifically, these might be installed:
- Python libraries - Python libraries
- SSH daemon - SSH daemon
- Bluetooth for Raspberry Pi - Bluetooth for Raspberry Pi
- Instant-sharing of `USB stick content <https://wiki.iiab.io/go/FAQ#Can_teachers_display_their_own_content.3F>`_ - Instant-sharing of `USB stick content <https://wiki.iiab.io/go/FAQ#Can_teachers_display_their_own_content%3F>`_
- CUPS Printing - CUPS Printing
- Samba for Windows filesystems - Samba for Windows filesystems
- `www_options <https://github.com/iiab/iiab/blob/master/roles/www_options/tasks/main.yml>`_ - `www_options <https://github.com/iiab/iiab/blob/master/roles/www_options/tasks/main.yml>`_

View file

@ -24,23 +24,6 @@
name: sshd name: sshd
when: sshd_install when: sshd_install
# UNMAINTAINED
- name: Install named / BIND
include_tasks: roles/network/tasks/named.yml
when: named_install is defined and named_install
# UNMAINTAINED
- name: Install dhcpd
include_tasks: roles/network/tasks/dhcpd.yml
when: dhcpd_install is defined and dhcpd_install
# LESS MAINTAINED
- name: Install Squid
include_tasks: roles/network/tasks/squid.yml
when: squid_install and squid_installed is undefined
- name: Install Bluetooth - only on Raspberry Pi - name: Install Bluetooth - only on Raspberry Pi
include_role: include_role:
name: bluetooth name: bluetooth

View file

@ -3,11 +3,6 @@
- name: ...IS BEGINNING ==================================== - name: ...IS BEGINNING ====================================
meta: noop meta: noop
- name: AZURACAST
include_role:
name: azuracast
when: azuracast_install is defined and azuracast_install
# UNMAINTAINED # UNMAINTAINED
- name: DOKUWIKI - name: DOKUWIKI
include_role: include_role:
@ -36,10 +31,11 @@
name: jupyterhub name: jupyterhub
when: jupyterhub_install when: jupyterhub_install
# UNMAINTAINED
- name: LOKOLE - name: LOKOLE
include_role: include_role:
name: lokole name: lokole
when: lokole_install when: lokole_install is defined and lokole_install
- name: MEDIAWIKI - name: MEDIAWIKI
include_role: include_role:

View file

@ -40,10 +40,23 @@
name: pathagar name: pathagar
when: pathagar_install is defined and pathagar_install when: pathagar_install is defined and pathagar_install
# WARNING: Since March 2023, 32-bit RasPiOS can act as 64-bit on RPi 4 and
# RPi 400 (unlike RPi 3!) SEE: https://github.com/iiab/iiab/pull/3422 and #3516
- name: Run command 'dpkg --print-architecture' to identify OS architecture (CPU arch as revealed by ansible_architecture ~= ansible_machine is NO LONGER enough!)
command: dpkg --print-architecture
register: dpkg_arch
when: sugarizer_install
- name: Explain bypassing of Sugarizer install if 32-bit OS
fail: # FORCE IT RED THIS ONCE!
msg: "BYPASSING SUGARIZER INSTALL ATTEMPT, as Sugarizer Server 1.5.0 requires MongoDB 3.2+ which is NO LONGER SUPPORTED on 32-bit Raspberry Pi OS. 'dpkg --print-architecture' output for your OS: {{ dpkg_arch.stdout }}"
when: sugarizer_install and not dpkg_arch.stdout is search("64")
ignore_errors: True
- name: SUGARIZER - name: SUGARIZER
include_role: include_role:
name: sugarizer name: sugarizer
when: sugarizer_install when: sugarizer_install and dpkg_arch.stdout is search("64")
- name: Recording STAGE 7 HAS COMPLETED ======================== - name: Recording STAGE 7 HAS COMPLETED ========================
lineinfile: lineinfile:

View file

@ -12,17 +12,17 @@
include_role: include_role:
name: awstats name: awstats
when: awstats_install when: awstats_install
- name: MATOMO
include_role:
name: matomo
when: matomo_install
- name: MONIT - name: MONIT
include_role: include_role:
name: monit name: monit
when: monit_install when: monit_install
- name: MUNIN
include_role:
name: munin
when: munin_install
- name: PHPMYADMIN - name: PHPMYADMIN
include_role: include_role:
name: phpmyadmin name: phpmyadmin

View file

@ -3,16 +3,34 @@
- name: ...IS BEGINNING ==================================== - name: ...IS BEGINNING ====================================
meta: noop meta: noop
# Is porting to Python 3 complete, and if so does this belong elsewhere? - name: AZURACAST
include_role:
name: azuracast
when: azuracast_install
# Porting to Python 3 is complete: does this belong elsewhere?
- name: CAPTIVE PORTAL - name: CAPTIVE PORTAL
include_role: include_role:
name: captiveportal name: captiveportal
when: captiveportal_install when: captiveportal_install
# WARNING: Since March 2023, 32-bit RasPiOS can act as 64-bit on RPi 4 and
# RPi 400 (unlike RPi 3!) SEE: https://github.com/iiab/iiab/pull/3516
- name: Run command 'dpkg --print-architecture' to identify OS architecture (CPU arch as revealed by ansible_architecture ~= ansible_machine is NO LONGER enough!)
command: dpkg --print-architecture
register: dpkg_arch
when: internetarchive_install
- name: Explain bypassing of Internet Archive install if 32-bit OS
fail: # FORCE IT RED THIS ONCE!
msg: "BYPASSING INTERNET ARCHIVE PER https://github.com/iiab/iiab/issues/3641 -- 'dpkg --print-architecture' output for your OS: {{ dpkg_arch.stdout }}"
when: internetarchive_install and not dpkg_arch.stdout is search("64")
ignore_errors: True
- name: INTERNETARCHIVE - name: INTERNETARCHIVE
include_role: include_role:
name: internetarchive name: internetarchive
when: internetarchive_install when: internetarchive_install and dpkg_arch.stdout is search("64")
- name: MINETEST - name: MINETEST
include_role: include_role:
@ -37,12 +55,46 @@
name: pbx name: pbx
when: pbx_install when: pbx_install
- name: "2021-06-27 TEMPORARY CODE TO INSTALL 'php-pear' UNTIL ADMIN CONSOLE DECLARES ITS OWN DEPENDENCY FOR: https://github.com/iiab/iiab-admin-console/blob/master/roles/cmdsrv/tasks/main.yml#L19"
package: - name: '2023-11-05 / TEMPORARY UNTIL ADMIN CONSOLE DECLARES ITS DEPENDENCY: Install MySQL (MariaDB) if admin_console_install (for setup-feedback and record_feedback.php)'
name: php-pear # WARNING: this also drags in 'php{{ php_version }}-xml' (also installed by MediaWiki, Nextcloud, roles/pbx's FreePBX, WordPress) AND 'php{{ php_version }}-cgi' (also installed by roles/pbx's FreePBX) set_fact:
state: present mysql_install: True
mysql_enabled: True
when: admin_console_install when: admin_console_install
- name: '2023-11-05 / TEMPORARY UNTIL ADMIN CONSOLE DECLARES ITS DEPENDENCY: Install MySQL (MariaDB) if admin_console_install (for setup-feedback and record_feedback.php)'
include_role:
name: mysql
when: admin_console_install
- name: '2023-11-05 / TEMPORARY UNTIL ADMIN CONSOLE DECLARES ITS DEPENDENCY: Install MySQL (MariaDB) if admin_console_install (for setup-feedback and record_feedback.php)'
fail:
msg: "Admin Console install cannot proceed, as MySQL / MariaDB is not installed."
when: admin_console_install and mysql_installed is undefined
# 2023-11-05: Moved from Stage 8, as it acts on mysql_installed (that might be set just above!)
- name: MUNIN
include_role:
name: munin
when: munin_install
- name: Read 'disk_used_a_priori' from /etc/iiab/iiab.ini
set_fact:
df1: "{{ lookup('ansible.builtin.ini', 'disk_used_a_priori', section='summary', file=iiab_ini_file) }}"
- name: Record currently used disk space, to compare with original 'disk_used_a_priori'
shell: df -B1 --output=used / | tail -1
register: df2
- name: Add ESTIMATED 'iiab_software_disk_usage = {{ df2.stdout|int - df1|int }}' to {{ iiab_ini_file }}
ini_file:
path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
section: summary
option: iiab_software_disk_usage
value: "{{ df2.stdout|int - df1|int }}"
- name: Recording STAGE 9 HAS COMPLETED ==================== - name: Recording STAGE 9 HAS COMPLETED ====================
lineinfile: lineinfile:
path: "{{ iiab_env_file }}" path: "{{ iiab_env_file }}"

View file

@ -1,3 +1,8 @@
- name: Record (initial) disk space used
shell: df -B1 --output=used / | tail -1
register: df1
- name: 'Install package: awstats' - name: 'Install package: awstats'
package: package:
name: awstats name: awstats
@ -83,7 +88,7 @@
# when: awstats_enabled and not is_debuntu # when: awstats_enabled and not is_debuntu
- name: "Summarize logs up to now: /usr/bin/perl /usr/lib/cgi-bin/awstats.pl -config=schoolserver -update" - name: "Summarize logs up to now: /usr/bin/perl /usr/lib/cgi-bin/awstats.pl -config=schoolserver -update"
shell: /usr/bin/perl /usr/lib/cgi-bin/awstats.pl -config=schoolserver -update command: /usr/bin/perl /usr/lib/cgi-bin/awstats.pl -config=schoolserver -update
- name: Install /etc/nginx/cgi-bin.php from template - name: Install /etc/nginx/cgi-bin.php from template
template: template:
@ -93,6 +98,17 @@
# RECORD AWStats AS INSTALLED # RECORD AWStats AS INSTALLED
- name: Record (final) disk space used
shell: df -B1 --output=used / | tail -1
register: df2
- name: Add 'awstats_disk_usage = {{ df2.stdout|int - df1.stdout|int }}' to {{ iiab_ini_file }}
ini_file:
path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
section: awstats
option: awstats_disk_usage
value: "{{ df2.stdout|int - df1.stdout|int }}"
- name: "Set 'awstats_installed: True'" - name: "Set 'awstats_installed: True'"
set_fact: set_fact:
awstats_installed: True awstats_installed: True

View file

@ -19,27 +19,34 @@
quiet: yes quiet: yes
- name: Install AWStats if 'awstats_installed' not defined, e.g. in {{ iiab_state_file }} # /etc/iiab/iiab_state.yml - block:
include_tasks: install.yml
when: awstats_installed is undefined
- name: Install AWStats if 'awstats_installed' not defined, e.g. in {{ iiab_state_file }} # /etc/iiab/iiab_state.yml
include_tasks: install.yml
when: awstats_installed is undefined
- name: Enable/Disable/Restart NGINX - name: Enable/Disable/Restart NGINX
include_tasks: nginx.yml include_tasks: nginx.yml
- name: Add 'awstats' variable values to {{ iiab_ini_file }}
ini_file:
path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
section: awstats
option: "{{ item.option }}"
value: "{{ item.value | string }}"
with_items:
- option: name
value: AWStats
- option: description
value: '"AWStats (originally known as Advanced Web Statistics) is a package written in Perl which generates static or dynamic html summaries based upon web server logs."'
- option: awstats_install
value: "{{ awstats_install }}"
- option: awstats_enabled
value: "{{ awstats_enabled }}"
- name: Add 'awstats' variable values to {{ iiab_ini_file }} rescue:
ini_file:
path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini - name: 'SEE ERROR ABOVE (skip_role_on_error: {{ skip_role_on_error }})'
section: awstats fail:
option: "{{ item.option }}" msg: ""
value: "{{ item.value | string }}" when: not skip_role_on_error
with_items:
- option: name
value: AWStats
- option: description
value: '"AWStats (originally known as Advanced Web Statistics) is a package written in Perl which generates static or dynamic html summaries based upon web server logs."'
- option: awstats_install
value: "{{ awstats_install }}"
- option: awstats_enabled
value: "{{ awstats_enabled }}"

View file

@ -261,7 +261,7 @@ AllowToUpdateStatsFromBrowser=1
# 3 - Possible on CLI and CGI # 3 - Possible on CLI and CGI
# Default: 2 # Default: 2
# #
AllowFullYearView=2 AllowFullYearView=3

View file

@ -1,19 +1,47 @@
========== ================
AzuraCast README AzuraCast README
========== ================
This playbook adds `AzuraCast <https://azuracast.com/>`_ to Internet-in-a-Box (IIAB) for network radio station functionality. With 'AzuraCast' you and your community can schedule podcasts, music, and even do live streaming of audio content. A variety of streaming formats are supported. Install `AzuraCast <https://azuracast.com/>`_ with your `Internet-in-a-Box (IIAB) <https://internet-in-a-box.org/>`_ if you want a simple, self-hosted "web radio station" with a modern web UI/UX. You and your community can then schedule newscasts, podcasts, music, and even do live streaming of audio content (video streaming might also be possible in future!)
Please see AzuraCast's `screenshots <https://www.azuracast.com/about/screenshots.html>`_. As soon as you install AzuraCast with IIAB, it can stream MP3 files (and similar files) using `LiquidSoap <https://docs.azuracast.com/en/developers/liquidsoap>`_ to help you schedule or randomize playback of MP3 songs (and similar).
As of 2019-08-04, this will only run on Ubuntu 18.04, and tentatively on Debian 10 "Buster" (`#1766 <https://github.com/iiab/iiab/issues/1766>`_). Support for Raspberry Pi remains a goal for now — please if you can, consider helping us solve this critical challenge (`#1772 <https://github.com/iiab/iiab/issues/1772>`_, `AzuraCast/AzuraCast#332 <https://github.com/AzuraCast/AzuraCast/issues/332>`_). Please see AzuraCast's `screenshots <https://www.google.com/search?q=azuracast+screenshot&tbm=isch>`_ and `docs <./README.rst#azuracast-docs>`_. Community implementation examples:
* https://twitter.com/internet_in_box/status/1564986581664014342
* https://youtu.be/XfiFiOi46mk
Optionally, live-streaming can also be made to work, e.g. if you install `Mixxx or BUTT <https://docs.azuracast.com/en/user-guide/streaming-software>`_ on your own. (If so, you have many options to configure streaming with `Icecast <https://icecast.org/>`_, `Shoutcast <https://www.shoutcast.com/>`_, etc.)
Requirements
------------
AzuraCast recommends `2-to-4 GB RAM minimum <https://docs.azuracast.com/en/getting-started/requirements#system-requirements>`_.
As of 2022-08-31, AzuraCast should run on Ubuntu 22.04 and **64-bit** Raspberry Pi OS: `#1772 <https://github.com/iiab/iiab/issues/1772>`_, `AzuraCast/AzuraCast#332 <https://github.com/AzuraCast/AzuraCast/issues/332>`_, `PR #2946 <https://github.com/iiab/iiab/pull/2946>`_
Other Linux distributions may also work, at your own risk, especially if Docker runs smoothly.
NOTE: AzuraCast was designed to be installed *just once* on a fresh OS. So ``./runrole --reinstall azuracast`` is not supported in general. However, if you accidentally damage your AzuraCast software, IIAB has posted `technical tips <./tasks/install.yml>`_ *(use at your own risk!)* in case of emergency.
Using It Using It
-------- --------
* Do a normal IIAB install (http://download.iiab.io), making sure to set both variables ``azuracast_install`` and ``azuracast_enabled`` to ``True`` when it prompts you to edit `/etc/iiab/local_vars.yml <http://FAQ.IIAB.IO#What_is_local_vars.yml_and_how_do_I_customize_it.3F>`_, as you begin the installation. * Do a normal IIAB install (https://download.iiab.io), making sure to set both variables ``azuracast_install`` and ``azuracast_enabled`` to ``True`` when IIAB's installer prompts you to edit `/etc/iiab/local_vars.yml <http://FAQ.IIAB.IO#What_is_local_vars.yml_and_how_do_I_customize_it%3F>`_
* When the IIAB software install completes, it will ask you to reboot, and AzuraCast's console will then be available at http://box.lan:10080 * When the IIAB software install completes, it will ask you to reboot, and AzuraCast's console will then be available at http://box.lan:12080
* This console site will prompt you to complete AzuraCast's initial setup: user accounts, managing stations, radio streams, etc. * That console site will prompt you to complete AzuraCast's initial setup: user accounts, managing stations, radio streams, etc.
* Finally, check out some `how-to videos <https://www.youtube.com/watch?v=b1Rxlu5P804>`_ to learn to manage your own radio station! * Finally, check out some `how-to videos <https://www.youtube.com/watch?v=b1Rxlu5P804>`_ to learn to manage your own radio station!
Note: When creating a station using AzuraCast's console, its default streaming ports for ``station`` and ``autodj`` need to be in the `port range 10000-10100 <https://github.com/iiab/iiab/wiki/IIAB-Networking#list-of-ports--services>`_. NOTE: When creating a station using AzuraCast's console, its default streaming ports for ``station`` and ``autodj`` need to be in the `port range 10000-10499 <https://github.com/iiab/iiab/wiki/IIAB-Networking#list-of-ports--services>`_ (ports 12080 and 12443 may also be required!)
AzuraCast Docs
--------------
- https://docs.azuracast.com
- https://docs.azuracast.com/en/getting-started/installation/post-installation-steps
- https://docs.azuracast.com/en/getting-started/settings
- https://docs.azuracast.com/en/getting-started/updates (can *DAMAGE* AzuraCast as of 2022-09-28)
- https://docs.azuracast.com/en/user-guide/streaming-software
- https://docs.azuracast.com/en/user-guide/troubleshooting
- https://docs.azuracast.com/en/user-guide/logs
- https://docs.azuracast.com/en/administration/docker

View file

@ -1,15 +1,15 @@
# A full-featured online radio station suite. # A full-featured online radio station suite. Uses Docker.
# Works on Ubuntu 18.04, Debian 9, 10. Uses docker # README: https://github.com/iiab/iiab/tree/master/roles/azuracast#readme
# azuracast_install: False # azuracast_install: False
# azuracast_enabled: False # azuracast_enabled: False # This var is currently IGNORED
# azuracast_http_port: 10080 # azuracast_http_port: 12080
# azuracast_https_port: 10443 # azuracast_https_port: 12443
## AzuraCast needs many ports in the 8000:8100 range by default, but IIAB services ## AzuraCast needs many ports in the 8000:8496 range by default, but IIAB
## conflict with those ports so this variable below sets a sane prefix. ## services conflict, so this variable below sets a sane prefix.
## e.g. setting the below variable to 10 will result in port ranges 10000-10100 ## e.g. setting the below variable to 10 will result in port range 10000-10499
## being reserved for AzuraCast: ## being reserved for AzuraCast:
# azuracast_port_range_prefix: 10 # azuracast_port_range_prefix: 10

View file

@ -1,23 +1,49 @@
# 2022-09-29: './runrole --reinstall azuracast' is NOT supported!
#
# 1. But if you must, first completely uninstall Docker + WIPE AzuraCast data:
#
# apt purge docker-ce docker-ce-cli containerd.io docker-compose-plugin docker-scan-plugin
# rm -rf /library/docker /var/lib/docker /var/lib/containerd
#
# Per https://docs.docker.com/engine/install/ubuntu/#uninstall-docker-engine
#
# 2. REBOOT to avoid later problems with 'systemctl status docker' -- if you
# don't reboot, Ansible will fail below when 'docker.sh install' fails to
# start docker.service -- likewise if you run './docker.sh install-docker'
# manually in /opt/azuracast. Either way, 'systemctl restart docker' won't
# work for ~2 minutes. (Rebooting avoids all these hassles!)
#
# 3. Just FYI the Docker install process will rebuild its 11 core directories
# in /var/lib/docker -> /library/docker: (as 'docker.sh install' begins)
#
# buildkit containers image network overlay2 plugins runtimes swarm tmp trust volumes
#
# 4. Just FYI both MySQL passwords (MYSQL_PASSWORD & MYSQL_ROOT_PASSWORD) will
# be WIPED from /opt/azuracast/azuracast.env (and new passwords
# auto-generated below, for use inside AzuraCast's Docker container).
#
# 5. Run './runrole --reinstall azuracast' in /opt/iiab/iiab
- name: Record (initial) disk space used
shell: df -B1 --output=used / | tail -1
register: df1
- name: AzuraCast - Make config directory {{ azuracast_host_dir }} - name: AzuraCast - Make config directory {{ azuracast_host_dir }}
file: file:
path: "{{ azuracast_host_dir }}" path: "{{ azuracast_host_dir }}"
state: directory state: directory
- name: AzuraCast - Install {{ azuracast_host_dir }}/.env from template - name: AzuraCast - Install {{ azuracast_host_dir }}/.env from template
template: template:
src: env.j2 src: prod.env.j2
dest: "{{ azuracast_host_dir }}/.env" dest: "{{ azuracast_host_dir }}/.env"
#owner: root
#group: root
mode: 0644
- name: AzuraCast - Install {{ azuracast_host_dir }}/docker-compose.override.yml from template - name: AzuraCast - Install {{ azuracast_host_dir }}/azuracast.env for altered ports
template: template:
src: docker-compose.override.yml.j2 src: azuracast.env.j2
dest: "{{ azuracast_host_dir }}/docker-compose.override.yml" dest: "{{ azuracast_host_dir }}/azuracast.env"
#owner: root
#group: root
mode: 0644
- name: AzuraCast - Download {{ docker_sh_url }} to {{ azuracast_host_dir }} - name: AzuraCast - Download {{ docker_sh_url }} to {{ azuracast_host_dir }}
get_url: get_url:
@ -26,13 +52,6 @@
mode: 0755 mode: 0755
timeout: "{{ download_timeout }}" timeout: "{{ download_timeout }}"
- name: AzuraCast - Download AzuraCast's docker-compose.yml sample from GitHub to {{ azuracast_host_dir }}
get_url:
url: "{{ docker_compose_url }}"
dest: "{{ azuracast_host_dir }}/docker-compose.yml"
mode: 0755
timeout: "{{ download_timeout }}"
#- name: AzuraCast - Make changes to docker.sh script so it runs headless #- name: AzuraCast - Make changes to docker.sh script so it runs headless
# lineinfile: # lineinfile:
# path: "{{ azuracast_host_dir }}/docker.sh" # path: "{{ azuracast_host_dir }}/docker.sh"
@ -40,27 +59,45 @@
# line: "\\1reply='Y'" # line: "\\1reply='Y'"
# backrefs: yes # backrefs: yes
# 2022-09-28: https://docs.azuracast.com/en/getting-started/installation/docker
# (& testing) confirm this is done automatically by 'docker.sh install' below.
#
# - name: AzuraCast - Download AzuraCast's docker-compose.yml sample from GitHub to {{ azuracast_host_dir }}
# get_url:
# url: "{{ docker_compose_url }}"
# dest: "{{ azuracast_host_dir }}/docker-compose.yml"
# timeout: "{{ download_timeout }}"
#- name: AzuraCast - Install {{ azuracast_host_dir }}/docker-compose.override.yml from template
# template:
# src: docker-compose.override.yml.j2
# dest: "{{ azuracast_host_dir }}/docker-compose.override.yml"
#- name: Change default port number range 8xxx:8xxx to {{ azuracast_port_range_prefix }}xxx:{{ azuracast_port_range_prefix }}xxx icecast-stations in docker-compose.yml
# replace:
# path: "{{ azuracast_host_dir }}/docker-compose.yml"
# regexp: "^( *- \\')8([0-9]{3})\\:8([0-9]{3}\\'.*)$"
# replace: "\\g<1>{{ azuracast_port_range_prefix }}\\g<2>:{{ azuracast_port_range_prefix }}\\g<3>"
- name: AzuraCast - Make directory {{ docker_container_dir }} - name: AzuraCast - Make directory {{ docker_container_dir }}
file: file:
path: "{{ docker_container_dir }}" path: "{{ docker_container_dir }}"
state: directory state: directory
- name: AzuraCast - Symlink /var/lib/docker -> {{ docker_container_dir }} - name: AzuraCast - Symlink /var/lib/docker -> {{ docker_container_dir }}
file: file:
src: "{{ docker_container_dir }}" src: "{{ docker_container_dir }}"
path: /var/lib/docker path: /var/lib/docker
state: link state: link
- name: Change default port number range 8xxx:8xxx to {{ azuracast_port_range_prefix }}xxx:{{ azuracast_port_range_prefix }}xxx icecast-stations in docker-compose.yml # 2022-09-28: "yes 'Y'" toggled whatever it found in /opt/azuracast/.env (e.g.
replace: # AZURACAST_VERSION=stable from templates/prod.env.j2) to the opposite (e.g.
path: "{{ azuracast_host_dir }}/docker-compose.yml" # AZURACAST_VERSION=latest). Let's not modify /opt/azuracast/.env unless nec!
regexp: "^( *- \\')8([0-9]{3})\\:8([0-9]{3}\\'.*)$" #
replace: "\\g<1>{{ azuracast_port_range_prefix }}\\g<2>:{{ azuracast_port_range_prefix }}\\g<3>" # - name: AzuraCast - Setup for stable channel install
# shell: "yes 'Y' | /bin/bash docker.sh setup-release"
- name: AzuraCast - Setup for stable channel install # args:
shell: "yes 'Y' | /bin/bash docker.sh setup-release" # chdir: "{{ azuracast_host_dir }}"
args:
chdir: "{{ azuracast_host_dir }}"
- name: AzuraCast - Run the installer - name: AzuraCast - Run the installer
shell: "yes '' | /bin/bash docker.sh install" shell: "yes '' | /bin/bash docker.sh install"
@ -70,6 +107,17 @@
# RECORD AzuraCast AS INSTALLED # RECORD AzuraCast AS INSTALLED
- name: Record (final) disk space used
shell: df -B1 --output=used / | tail -1
register: df2
- name: Add 'azuracast_disk_usage = {{ df2.stdout|int - df1.stdout|int }}' to {{ iiab_ini_file }}
ini_file:
path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
section: azuracast
option: azuracast_disk_usage
value: "{{ df2.stdout|int - df1.stdout|int }}"
- name: "Set 'azuracast_installed: True'" - name: "Set 'azuracast_installed: True'"
set_fact: set_fact:
azuracast_installed: True azuracast_installed: True

View file

@ -19,25 +19,32 @@
quiet: yes quiet: yes
- name: Install AzuraCast if 'azuracast_installed' not defined, e.g. in {{ iiab_state_file }} # /etc/iiab/iiab_state.yml - block:
include_tasks: install.yml
when: azuracast_installed is undefined
- name: Install AzuraCast if 'azuracast_installed' not defined, e.g. in {{ iiab_state_file }} # /etc/iiab/iiab_state.yml
include_tasks: install.yml
when: azuracast_installed is undefined
# TODO figure out what to turn off/on for AzuraCast # TODO figure out what to turn off/on for AzuraCast
# - include_tasks: enable-or-disable.yml # - include_tasks: enable-or-disable.yml
- name: Add 'azuracast' variable values to {{ iiab_ini_file }}
ini_file:
path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
section: azuracast
option: "{{ item.option }}"
value: "{{ item.value | string }}"
with_items:
- option: name
value: azuracast
- option: description
value: '"AzuraCast is simple, self-hosted web radio. Use it to schedule student newscasts, podcasts, music (e.g. MP3''s and similar) and even do live-streaming."'
- option: enabled
value: "{{ azuracast_enabled }}"
- name: Add 'azuracast' variable values to {{ iiab_ini_file }} rescue:
ini_file:
path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini - name: 'SEE ERROR ABOVE (skip_role_on_error: {{ skip_role_on_error }})'
section: azuracast fail:
option: "{{ item.option }}" msg: ""
value: "{{ item.value | string }}" when: not skip_role_on_error
with_items:
- option: name
value: azuracast
- option: description
value: '"AzuraCast is a self-hosted, all-in-one radio station platform. Use AzuraCast to schedule podcasts, music, and even do live streaming of audio content. A variety of streaming formats are supported."'
- option: enabled
value: "{{ azuracast_enabled }}"

View file

@ -0,0 +1,16 @@
# work in progress might never be ready as the web interface has setting that would need to match
location /azuracast/
proxy_set_header Host $http_host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Scheme $scheme;
proxy_set_header X-Script-Name /azureacast;
proxy_pass http://127.0.0.1:{{ azuracast_http_port }};
}
location /radio/ {
proxy_set_header Host $http_host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Scheme $scheme;
proxy_set_header X-Script-Name /radio;
proxy_pass http://127.0.0.1:{{ azuracast_http_port }};
}

View file

@ -0,0 +1,155 @@
# IIAB version for altered ports
#
# AzuraCast Customization
#
# The application environment.
# Valid options: production, development, testing
APPLICATION_ENV=production
# Manually modify the logging level.
# This allows you to log debug-level errors temporarily (for problem-solving) or reduce
# the volume of logs that are produced by your installation, without needing to modify
# whether your installation is a production or development instance.
# Valid options: debug, info, notice, warning, error, critical, alert, emergency
# LOG_LEVEL=notice
# Enable the composer "merge" functionality to combine the main application's
# composer.json file with any plugins' composer files.
# This can have performance implications, so you should only use it if
# you use one or more plugins with their own Composer dependencies.
# Valid options: true, false
COMPOSER_PLUGIN_MODE=false
# The minimum port number to use when automatically assigning ports to a station.
# By default, this matches the first forwarded port on the "stations" container.
# You can modify this variable if your station port range is different.
# Be sure to also forward the necessary ports via `docker-compose.yml`
# (and nginx, if you want to use the built-in port-80/443 proxy)!
AUTO_ASSIGN_PORT_MIN="{{ azuracast_port_range_prefix }}000"
# The maximum port number to use when automatically assigning ports to a station.
# See AUTO_ASSIGN_PORT_MIN.
AUTO_ASSIGN_PORT_MAX="{{ azuracast_port_range_prefix }}499"
#
# Database Configuration
# --
# Once the database has been installed, DO NOT CHANGE these values!
#
# The host to connect to. Leave this as the default value unless you're connecting
# to an external database server.
# Default: mariadb
MYSQL_HOST=mariadb
# The port to connect to. Leave this as the default value unless you're connecting
# to an external database server.
# Default: 3306
MYSQL_PORT=3306
# The username AzuraCast will use to connect to the database.
# Default: azuracast
MYSQL_USER=azuracast
# The password AzuraCast will use to connect to the database.
# By default, the database is not exposed to the Internet at all and this is only
# an internal password used by the service itself.
# Default: azur4c457
MYSQL_PASSWORD=azur4c457
# The name of the AzuraCast database.
# Default: azuracast
MYSQL_DATABASE=azuracast
# Automatically generate a random root password upon the first database spin-up.
# This password will be visible in the mariadb container's logs.
# Default: yes
MYSQL_RANDOM_ROOT_PASSWORD=yes
# Log slower queries for the purpose of diagnosing issues. Only turn this on when
# you need to, by uncommenting this and switching it to 1.
# To read the slow query log once enabled, run:
# docker-compose exec mariadb slow_queries
# Default: 0
MYSQL_SLOW_QUERY_LOG=0
# Set the amount of allowed connections to the database. This value should be increased
# if you are seeing the `Too many connections` error in the logs.
# Default: 100
MYSQL_MAX_CONNECTIONS=100
#
# Redis Configuration
#
# Uncomment these fields if you are using a third-party Redis host instead of the one provided with AzuraCast.
# Do not modify these fields if you are using the standard AzuraCast Redis host.
#
# Whether to use the Redis cache; if set to false, will disable Redis and use flatfile cache instead.
# Default: true
# ENABLE_REDIS=true
# Name of the Redis host.
# Default: redis
# REDIS_HOST=redis
# Port to connect to on the Redis host.
# Default: 6379
# REDIS_PORT=6379
# Database index to use on the Redis host.
# Default: 1
# REDIS_DB=1
#
# Advanced Configuration
#
# PHP's maximum POST body size and max upload filesize.
# PHP_MAX_FILE_SIZE=25M
# PHP's maximum memory limit.
# PHP_MEMORY_LIMIT=128M
# PHP's maximum script execution time (in seconds).
# PHP_MAX_EXECUTION_TIME=30
# The maximum execution time (and lock timeout) for the 15-second, 1-minute and 5-minute synchronization tasks.
# SYNC_SHORT_EXECUTION_TIME=600
# The maximum execution time (and lock timeout) for the 1-hour synchronization task.
# SYNC_LONG_EXECUTION_TIME=1800
# Maximum number of PHP-FPM worker processes to spawn.
# PHP_FPM_MAX_CHILDREN=5
#
# PHP-SPX profiling extension Configuration
#
# These environment variables allow you to enable and configure the PHP-SPX profiling extension
# which can be helpful when debugging resource issues in AzuraCast.
#
# The profiling dashboard can be accessed by visting https://yourdomain.com/?SPX_KEY=dev&SPX_UI_URI=/
# If you change the PROFILING_EXTENSION_HTTP_KEY variable change the value for SPX_KEY accordingly.
#
# Enable the profiling extension.
# Profiling data can be viewed by visiting http://your-azuracast-site/?SPX_KEY=dev&SPX_UI_URI=/
# Default: 0
# PROFILING_EXTENSION_ENABLED=0
# Profile ALL requests made to this account.
# This will have significant performance impact on your installation and should only be used in test circumstances.
# Default: 0
# PROFILING_EXTENSION_ALWAYS_ON=0
# Configure the value for the SPX_KEY parameter needed to access the profiling dashboard
# Default: dev
# PROFILING_EXTENSION_HTTP_KEY=dev
# Configure the IP whitelist for the profiling dashboard
# By default only localhost is allowed to access this page.
# Uncomment this line to enable access for you.
# Default: 127.0.0.1
# PROFILING_EXTENSION_HTTP_IP_WHITELIST=*

View file

@ -0,0 +1,155 @@
# https://github.com/AzuraCast/AzuraCast/blob/main/azuracast.sample.env
#
# AzuraCast Customization
#
# The application environment.
# Valid options: production, development, testing
APPLICATION_ENV=production
# Manually modify the logging level.
# This allows you to log debug-level errors temporarily (for problem-solving) or reduce
# the volume of logs that are produced by your installation, without needing to modify
# whether your installation is a production or development instance.
# Valid options: debug, info, notice, warning, error, critical, alert, emergency
# LOG_LEVEL=notice
# Enable the composer "merge" functionality to combine the main application's
# composer.json file with any plugins' composer files.
# This can have performance implications, so you should only use it if
# you use one or more plugins with their own Composer dependencies.
# Valid options: true, false
COMPOSER_PLUGIN_MODE=false
# The minimum port number to use when automatically assigning ports to a station.
# By default, this matches the first forwarded port on the "stations" container.
# You can modify this variable if your station port range is different.
# Be sure to also forward the necessary ports via `docker-compose.yml`
# (and nginx, if you want to use the built-in port-80/443 proxy)!
AUTO_ASSIGN_PORT_MIN=8000
# The maximum port number to use when automatically assigning ports to a station.
# See AUTO_ASSIGN_PORT_MIN.
AUTO_ASSIGN_PORT_MAX=8499
#
# Database Configuration
# --
# Once the database has been installed, DO NOT CHANGE these values!
#
# The host to connect to. Leave this as the default value unless you're connecting
# to an external database server.
# Default: mariadb
MYSQL_HOST=mariadb
# The port to connect to. Leave this as the default value unless you're connecting
# to an external database server.
# Default: 3306
MYSQL_PORT=3306
# The username AzuraCast will use to connect to the database.
# Default: azuracast
MYSQL_USER=azuracast
# The password AzuraCast will use to connect to the database.
# By default, the database is not exposed to the Internet at all and this is only
# an internal password used by the service itself.
# Default: azur4c457
MYSQL_PASSWORD=azur4c457
# The name of the AzuraCast database.
# Default: azuracast
MYSQL_DATABASE=azuracast
# Automatically generate a random root password upon the first database spin-up.
# This password will be visible in the mariadb container's logs.
# Default: yes
MYSQL_RANDOM_ROOT_PASSWORD=yes
# Log slower queries for the purpose of diagnosing issues. Only turn this on when
# you need to, by uncommenting this and switching it to 1.
# To read the slow query log once enabled, run:
# docker-compose exec mariadb slow_queries
# Default: 0
MYSQL_SLOW_QUERY_LOG=0
# Set the amount of allowed connections to the database. This value should be increased
# if you are seeing the `Too many connections` error in the logs.
# Default: 100
MYSQL_MAX_CONNECTIONS=100
#
# Redis Configuration
#
# Uncomment these fields if you are using a third-party Redis host instead of the one provided with AzuraCast.
# Do not modify these fields if you are using the standard AzuraCast Redis host.
#
# Whether to use the Redis cache; if set to false, will disable Redis and use flatfile cache instead.
# Default: true
# ENABLE_REDIS=true
# Name of the Redis host.
# Default: redis
# REDIS_HOST=redis
# Port to connect to on the Redis host.
# Default: 6379
# REDIS_PORT=6379
# Database index to use on the Redis host.
# Default: 1
# REDIS_DB=1
#
# Advanced Configuration
#
# PHP's maximum POST body size and max upload filesize.
# PHP_MAX_FILE_SIZE=25M
# PHP's maximum memory limit.
# PHP_MEMORY_LIMIT=128M
# PHP's maximum script execution time (in seconds).
# PHP_MAX_EXECUTION_TIME=30
# The maximum execution time (and lock timeout) for the 15-second, 1-minute and 5-minute synchronization tasks.
# SYNC_SHORT_EXECUTION_TIME=600
# The maximum execution time (and lock timeout) for the 1-hour synchronization task.
# SYNC_LONG_EXECUTION_TIME=1800
# Maximum number of PHP-FPM worker processes to spawn.
# PHP_FPM_MAX_CHILDREN=5
#
# PHP-SPX profiling extension Configuration
#
# These environment variables allow you to enable and configure the PHP-SPX profiling extension
# which can be helpful when debugging resource issues in AzuraCast.
#
# The profiling dashboard can be accessed by visting https://yourdomain.com/?SPX_KEY=dev&SPX_UI_URI=/
# If you change the PROFILING_EXTENSION_HTTP_KEY variable change the value for SPX_KEY accordingly.
#
# Enable the profiling extension.
# Profiling data can be viewed by visiting http://your-azuracast-site/?SPX_KEY=dev&SPX_UI_URI=/
# Default: 0
# PROFILING_EXTENSION_ENABLED=0
# Profile ALL requests made to this account.
# This will have significant performance impact on your installation and should only be used in test circumstances.
# Default: 0
# PROFILING_EXTENSION_ALWAYS_ON=0
# Configure the value for the SPX_KEY parameter needed to access the profiling dashboard
# Default: dev
# PROFILING_EXTENSION_HTTP_KEY=dev
# Configure the IP whitelist for the profiling dashboard
# By default only localhost is allowed to access this page.
# Uncomment this line to enable access for you.
# Default: 127.0.0.1
# PROFILING_EXTENSION_HTTP_IP_WHITELIST=*

View file

@ -0,0 +1,54 @@
# This file was automatically generated by AzuraCast and modified for IIAB
# You can modify it as necessary. To apply changes, restart the Docker containers.
# Remove the leading "#" symbol from lines to uncomment them.
# (Docker Compose) All Docker containers are prefixed by this name. Do not change this after installation.
# Default: azuracast
COMPOSE_PROJECT_NAME=azuracast
# (Docker Compose) The amount of time to wait before a Docker Compose operation fails. Increase this on lower performance computers.
# Default: 300
COMPOSE_HTTP_TIMEOUT=300
# Release Channel
# Valid options: latest, stable
# Default: latest
AZURACAST_VERSION=stable
NGINX_TIMEOUT=1800
# HTTP Port
# The main port AzuraCast listens to for insecure HTTP connections.
# Default: 80
AZURACAST_HTTP_PORT={{ azuracast_http_port }}
# HTTPS Port
# The main port AzuraCast listens to for secure HTTPS connections.
# Default: 443
AZURACAST_HTTPS_PORT={{ azuracast_https_port }}
# SFTP Port
# The port AzuraCast listens to for SFTP file management connections.
# Default: 2022
AZURACAST_SFTP_PORT=2022
# Station Ports
# The ports AzuraCast should listen to for station broadcasts and incoming DJ
# connections.
# Default: 8000,8005,8006,8010,8015,8016,8020,8025,8026,8030,8035,8036,8040,8045,8046,8050,8055,8056,8060,8065,8066,8070,8075,8076,8090,8095,8096,8100,8105,8106,8110,8115,8116,8120,8125,8126,8130,8135,8136,8140,8145,8146,8150,8155,8156,8160,8165,8166,8170,8175,8176,8180,8185,8186,8190,8195,8196,8200,8205,8206,8210,8215,8216,8220,8225,8226,8230,8235,8236,8240,8245,8246,8250,8255,8256,8260,8265,8266,8270,8275,8276,8280,8285,8286,8290,8295,8296,8300,8305,8306,8310,8315,8316,8320,8325,8326,8330,8335,8336,8340,8345,8346,8350,8355,8356,8360,8365,8366,8370,8375,8376,8380,8385,8386,8390,8395,8396,8400,8405,8406,8410,8415,8416,8420,8425,8426,8430,8435,8436,8440,8445,8446,8450,8455,8456,8460,8465,8466,8470,8475,8476,8480,8485,8486,8490,8495,8496
AZURACAST_STATION_PORTS=10000,10005,10006,10010,10015,10016,10020,10025,10026,10030,10035,10036,10040,10045,10046,10050,10055,10056,10060,10065,10066,10070,10075,10076,10080,10085,10086,10090,10095,10096,10100,10105,10106,10110,10115,10116,10120,10125,10126,10130,10135,10136,10140,10145,10146,10150,10155,10156,10160,10165,10166,10170,10175,10176,10180,10185,10186,10190,10195,10196,10200,10205,10206,10210,10215,10216,10220,10225,10226,10230,10235,10236,10240,10245,10246,10250,10255,10256,10260,10265,10266,10270,10275,10276,10280,10285,10286,10290,10295,10296,10300,10305,10306,10310,10315,10316,10320,10325,10326,10330,10335,10336,10340,10345,10346,10350,10355,10356,10360,10365,10366,10370,10375,10376,10380,10385,10386,10390,10395,10396,10400,10405,10406,10410,10415,10416,10420,10425,10426,10430,10435,10436,10440,10445,10446,10450,10455,10456,10460,10465,10466,10470,10475,10476,10480,10485,10486,10490,10495,10496
# Docker User UID
# Set the UID of the user running inside the Docker containers. Matching this
# with your host UID can fix permission issues.
# Default: 1000
AZURACAST_PUID=1000
# Docker User GID
# Set the GID of the user running inside the Docker containers. Matching this
# with your host GID can fix permission issues.
# Default: 1000
AZURACAST_PGID=1000
# Advanced: Use Privileged Docker Settings
# Default: true
AZURACAST_COMPOSE_PRIVILEGED=true

View file

@ -0,0 +1,161 @@
#
# AzuraCast Customization
#
# The application environment.
# Valid options: production, development, testing
APPLICATION_ENV=production
# Manually modify the logging level.
# This allows you to log debug-level errors temporarily (for problem-solving) or reduce
# the volume of logs that are produced by your installation, without needing to modify
# whether your installation is a production or development instance.
# Valid options: debug, info, notice, warning, error, critical, alert, emergency
# LOG_LEVEL=notice
# Enable the composer "merge" functionality to combine the main application's
# composer.json file with any plugins' composer files.
# This can have performance implications, so you should only use it if
# you use one or more plugins with their own Composer dependencies.
# Valid options: true, false
COMPOSER_PLUGIN_MODE=false
# The minimum port number to use when automatically assigning ports to a station.
# By default, this matches the first forwarded port on the "stations" container.
# You can modify this variable if your station port range is different.
# Be sure to also forward the necessary ports via `docker-compose.yml`
# (and nginx, if you want to use the built-in port-80/443 proxy)!
AUTO_ASSIGN_PORT_MIN=8000
# The maximum port number to use when automatically assigning ports to a station.
# See AUTO_ASSIGN_PORT_MIN.
AUTO_ASSIGN_PORT_MAX=8499
# This allows you to debug Slim Application Errors you may encounter
# By default, this is disabled to prevent users from seeing privileged information
# Please report any Slim Application Error logs to the development team on GitHub
# Valid options: true, false
SHOW_DETAILED_ERRORS=false
#
# Database Configuration
# --
# Once the database has been installed, DO NOT CHANGE these values!
#
# The host to connect to. Leave this as the default value unless you're connecting
# to an external database server.
# Default: localhost
# MYSQL_HOST=localhost
# The port to connect to. Leave this as the default value unless you're connecting
# to an external database server.
# Default: 3306
# MYSQL_PORT=3306
# The username AzuraCast will use to connect to the database.
# Default: azuracast
# MYSQL_USER=azuracast
# The password AzuraCast will use to connect to the database.
# By default, the database is not exposed to the Internet at all and this is only
# an internal password used by the service itself.
# Default: azur4c457
MYSQL_PASSWORD=azur4c457
# The name of the AzuraCast database.
# Default: azuracast
# MYSQL_DATABASE=azuracast
# Automatically generate a random root password upon the first database spin-up.
# This password will be visible in the mariadb container's logs.
# Default: yes
MYSQL_RANDOM_ROOT_PASSWORD=yes
# Log slower queries for the purpose of diagnosing issues. Only turn this on when
# you need to, by uncommenting this and switching it to 1.
# To read the slow query log once enabled, run:
# docker-compose exec mariadb slow_queries
# Default: 0
# MYSQL_SLOW_QUERY_LOG=0
# Set the amount of allowed connections to the database. This value should be increased
# if you are seeing the `Too many connections` error in the logs.
# Default: 100
# MYSQL_MAX_CONNECTIONS=100
#
# Redis Configuration
#
# Uncomment these fields if you are using a third-party Redis host instead of the one provided with AzuraCast.
# Do not modify these fields if you are using the standard AzuraCast Redis host.
#
# Whether to use the Redis cache; if set to false, will disable Redis and use flatfile cache instead.
# Default: true
# ENABLE_REDIS=true
# Name of the Redis host.
# Default: localhost
# REDIS_HOST=localhost
# Port to connect to on the Redis host.
# Default: 6379
# REDIS_PORT=6379
# Database index to use on the Redis host.
# Default: 1
# REDIS_DB=1
#
# Advanced Configuration
#
# PHP's maximum POST body size and max upload filesize.
# PHP_MAX_FILE_SIZE=25M
# PHP's maximum memory limit.
# PHP_MEMORY_LIMIT=128M
# PHP's maximum script execution time (in seconds).
# PHP_MAX_EXECUTION_TIME=30
# The maximum execution time (and lock timeout) for the 15-second, 1-minute and 5-minute synchronization tasks.
# SYNC_SHORT_EXECUTION_TIME=600
# The maximum execution time (and lock timeout) for the 1-hour synchronization task.
# SYNC_LONG_EXECUTION_TIME=1800
# Maximum number of PHP-FPM worker processes to spawn.
# PHP_FPM_MAX_CHILDREN=5
#
# PHP-SPX profiling extension Configuration
#
# These environment variables allow you to enable and configure the PHP-SPX profiling extension
# which can be helpful when debugging resource issues in AzuraCast.
#
# The profiling dashboard can be accessed by visting https://yourdomain.com/?SPX_KEY=dev&SPX_UI_URI=/
# If you change the PROFILING_EXTENSION_HTTP_KEY variable change the value for SPX_KEY accordingly.
#
# Enable the profiling extension.
# Profiling data can be viewed by visiting http://your-azuracast-site/?SPX_KEY=dev&SPX_UI_URI=/
# Default: 0
# PROFILING_EXTENSION_ENABLED=0
# Profile ALL requests made to this account.
# This will have significant performance impact on your installation and should only be used in test circumstances.
# Default: 0
# PROFILING_EXTENSION_ALWAYS_ON=0
# Configure the value for the SPX_KEY parameter needed to access the profiling dashboard
# Default: dev
# PROFILING_EXTENSION_HTTP_KEY=dev
# Configure the IP whitelist for the profiling dashboard
# By default only localhost is allowed to access this page.
# Uncomment this line to enable access for you.
# Default: 127.0.0.1
# PROFILING_EXTENSION_HTTP_IP_WHITELIST=*

View file

@ -0,0 +1,214 @@
#
# AzuraCast Docker Compose Configuration File
#
# When updating, you will be prompted to replace this file with a new
# version; you should do this whenever possible to take advantage of
# new updates.
#
# If you need to customize this file, you can create a new file named:
# docker-compose.override.yml
# with any changes you need to make.
#
services:
web:
container_name: azuracast
image: "ghcr.io/azuracast/azuracast:${AZURACAST_VERSION:-latest}"
# Want to customize the HTTP/S ports? Follow the instructions here:
# https://docs.azuracast.com/en/administration/docker#using-non-standard-ports
ports:
- '${AZURACAST_HTTP_PORT:-80}:80'
- '${AZURACAST_HTTPS_PORT:-443}:443'
- '${AZURACAST_SFTP_PORT:-2022}:2022'
# This default mapping is the outgoing and incoming ports for the first 50 stations.
# You can override this port mapping in your own docker-compose.override.yml file.
# For instructions, see:
# https://docs.azuracast.com/en/administration/docker#expanding-the-station-port-range
- '8000:8000'
- '8005:8005'
- '8006:8006'
- '8010:8010'
- '8015:8015'
- '8016:8016'
- '8020:8020'
- '8025:8025'
- '8026:8026'
- '8030:8030'
- '8035:8035'
- '8036:8036'
- '8040:8040'
- '8045:8045'
- '8046:8046'
- '8050:8050'
- '8055:8055'
- '8056:8056'
- '8060:8060'
- '8065:8065'
- '8066:8066'
- '8070:8070'
- '8075:8075'
- '8076:8076'
- '8090:8090'
- '8095:8095'
- '8096:8096'
- '8100:8100'
- '8105:8105'
- '8106:8106'
- '8110:8110'
- '8115:8115'
- '8116:8116'
- '8120:8120'
- '8125:8125'
- '8126:8126'
- '8130:8130'
- '8135:8135'
- '8136:8136'
- '8140:8140'
- '8145:8145'
- '8146:8146'
- '8150:8150'
- '8155:8155'
- '8156:8156'
- '8160:8160'
- '8165:8165'
- '8166:8166'
- '8170:8170'
- '8175:8175'
- '8176:8176'
- '8180:8180'
- '8185:8185'
- '8186:8186'
- '8190:8190'
- '8195:8195'
- '8196:8196'
- '8200:8200'
- '8205:8205'
- '8206:8206'
- '8210:8210'
- '8215:8215'
- '8216:8216'
- '8220:8220'
- '8225:8225'
- '8226:8226'
- '8230:8230'
- '8235:8235'
- '8236:8236'
- '8240:8240'
- '8245:8245'
- '8246:8246'
- '8250:8250'
- '8255:8255'
- '8256:8256'
- '8260:8260'
- '8265:8265'
- '8266:8266'
- '8270:8270'
- '8275:8275'
- '8276:8276'
- '8280:8280'
- '8285:8285'
- '8286:8286'
- '8290:8290'
- '8295:8295'
- '8296:8296'
- '8300:8300'
- '8305:8305'
- '8306:8306'
- '8310:8310'
- '8315:8315'
- '8316:8316'
- '8320:8320'
- '8325:8325'
- '8326:8326'
- '8330:8330'
- '8335:8335'
- '8336:8336'
- '8340:8340'
- '8345:8345'
- '8346:8346'
- '8350:8350'
- '8355:8355'
- '8356:8356'
- '8360:8360'
- '8365:8365'
- '8366:8366'
- '8370:8370'
- '8375:8375'
- '8376:8376'
- '8380:8380'
- '8385:8385'
- '8386:8386'
- '8390:8390'
- '8395:8395'
- '8396:8396'
- '8400:8400'
- '8405:8405'
- '8406:8406'
- '8410:8410'
- '8415:8415'
- '8416:8416'
- '8420:8420'
- '8425:8425'
- '8426:8426'
- '8430:8430'
- '8435:8435'
- '8436:8436'
- '8440:8440'
- '8445:8445'
- '8446:8446'
- '8450:8450'
- '8455:8455'
- '8456:8456'
- '8460:8460'
- '8465:8465'
- '8466:8466'
- '8470:8470'
- '8475:8475'
- '8476:8476'
- '8480:8480'
- '8485:8485'
- '8486:8486'
- '8490:8490'
- '8495:8495'
- '8496:8496'
env_file: azuracast.env
environment:
LANG: ${LANG:-en_US.UTF-8}
AZURACAST_DC_REVISION: 14
AZURACAST_VERSION: ${AZURACAST_VERSION:-latest}
AZURACAST_SFTP_PORT: ${AZURACAST_SFTP_PORT:-2022}
NGINX_TIMEOUT: ${NGINX_TIMEOUT:-1800}
LETSENCRYPT_HOST: ${LETSENCRYPT_HOST:-}
LETSENCRYPT_EMAIL: ${LETSENCRYPT_EMAIL:-}
PUID: ${AZURACAST_PUID:-1000}
PGID: ${AZURACAST_PGID:-1000}
volumes:
- www_uploads:/var/azuracast/uploads
- station_data:/var/azuracast/stations
- shoutcast2_install:/var/azuracast/servers/shoutcast2
- stereo_tool_install:/var/azuracast/servers/stereo_tool
- geolite_install:/var/azuracast/geoip
- sftpgo_data:/var/azuracast/sftpgo/persist
- backups:/var/azuracast/backups
- acme:/var/azuracast/acme
- db_data:/var/lib/mysql
restart: unless-stopped
ulimits: &default-ulimits
nofile:
soft: 65536
hard: 65536
logging: &default-logging
options:
max-size: "1m"
max-file: "5"
volumes:
db_data: { }
acme: { }
shoutcast2_install: { }
stereo_tool_install: { }
geolite_install: { }
sftpgo_data: { }
station_data: { }
www_uploads: { }
backups: { }

View file

@ -0,0 +1,840 @@
#!/usr/bin/env bash
# shellcheck disable=SC2145,SC2178,SC2120,SC2162
# Functions to manage .env files
__dotenv=
__dotenv_file=
__dotenv_cmd=.env
.env() {
REPLY=()
[[ $__dotenv_file || ${1-} == -* ]] || .env.--file .env || return
if declare -F -- ".env.${1-}" >/dev/null; then
.env."$@"
return
fi
return 64
}
.env.-f() { .env.--file "$@"; }
.env.get() {
.env::arg "get requires a key" "$@" &&
[[ "$__dotenv" =~ ^(.*(^|$'\n'))([ ]*)"$1="(.*)$ ]] &&
REPLY=${BASH_REMATCH[4]%%$'\n'*} && REPLY=${REPLY%"${REPLY##*[![:space:]]}"}
}
.env.parse() {
local line key
while IFS= read -r line; do
line=${line#"${line%%[![:space:]]*}"} # trim leading whitespace
line=${line%"${line##*[![:space:]]}"} # trim trailing whitespace
if [[ ! "$line" || "$line" == '#'* ]]; then continue; fi
if (($#)); then
for key; do
if [[ $key == "${line%%=*}" ]]; then
REPLY+=("$line")
break
fi
done
else
REPLY+=("$line")
fi
done <<<"$__dotenv"
((${#REPLY[@]}))
}
.env.export() { ! .env.parse "$@" || export "${REPLY[@]}"; }
.env.set() {
.env::file load || return
local key saved=$__dotenv
while (($#)); do
key=${1#+}
key=${key%%=*}
if .env.get "$key"; then
REPLY=()
if [[ $1 == +* ]]; then
shift
continue # skip if already found
elif [[ $1 == *=* ]]; then
__dotenv=${BASH_REMATCH[1]}${BASH_REMATCH[3]}$1$'\n'${BASH_REMATCH[4]#*$'\n'}
else
__dotenv=${BASH_REMATCH[1]}${BASH_REMATCH[4]#*$'\n'}
continue # delete all occurrences
fi
elif [[ $1 == *=* ]]; then
__dotenv+="${1#+}"$'\n'
fi
shift
done
[[ $__dotenv == "$saved" ]] || .env::file save
}
.env.puts() { echo "${1-}" >>"$__dotenv_file" && __dotenv+="$1"$'\n'; }
.env.generate() {
.env::arg "key required for generate" "$@" || return
.env.get "$1" && return || REPLY=$("${@:2}") || return
.env::one "generate: ouptut of '${*:2}' has more than one line" "$REPLY" || return
.env.puts "$1=$REPLY"
}
.env.--file() {
.env::arg "filename required for --file" "$@" || return
__dotenv_file=$1
.env::file load || return
(($# < 2)) || .env "${@:2}"
}
.env::arg() { [[ "${2-}" ]] || {
echo "$__dotenv_cmd: $1" >&2
return 64
}; }
.env::one() { [[ "$2" != *$'\n'* ]] || .env::arg "$1"; }
.env::file() {
local REPLY=$__dotenv_file
case "$1" in
load)
__dotenv=
! [[ -f "$REPLY" ]] || __dotenv="$(<"$REPLY")"$'\n' || return
;;
save)
if [[ -L "$REPLY" ]] && declare -F -- realpath.resolved >/dev/null; then
realpath.resolved "$REPLY"
fi
{ [[ ! -f "$REPLY" ]] || cp -p "$REPLY" "$REPLY.bak"; } &&
printf %s "$__dotenv" >"$REPLY.bak" && mv "$REPLY.bak" "$REPLY"
;;
esac
}
# Shortcut to convert semver version (x.yyy.zzz) into a comparable number.
version-number() {
echo "$@" | awk -F. '{ printf("%03d%03d%03d\n", $1,$2,$3); }'
}
# Get the current release channel for AzuraCast
get-release-channel() {
local AZURACAST_VERSION="latest"
if [[ -f .env ]]; then
.env --file .env get AZURACAST_VERSION
AZURACAST_VERSION="${REPLY:-latest}"
fi
echo "$AZURACAST_VERSION"
}
get-release-branch-name() {
if [[ $(get-release-channel) == "stable" ]]; then
echo "stable"
else
echo "main"
fi
}
# This is a general-purpose function to ask Yes/No questions in Bash, either
# with or without a default answer. It keeps repeating the question until it
# gets a valid answer.
ask() {
# https://djm.me/ask
local prompt default reply
while true; do
if [[ "${2:-}" == "Y" ]]; then
prompt="Y/n"
default=Y
elif [[ "${2:-}" == "N" ]]; then
prompt="y/N"
default=N
else
prompt="y/n"
default=
fi
# Ask the question (not using "read -p" as it uses stderr not stdout)
echo -n "$1 [$prompt] "
read reply
# Default?
if [[ -z "$reply" ]]; then
reply=${default}
fi
# Check if the reply is valid
case "$reply" in
Y* | y*) return 0 ;;
N* | n*) return 1 ;;
esac
done
}
# Generate a prompt to set an environment file value.
envfile-set() {
local VALUE INPUT
.env --file .env
.env get "$1"
VALUE=${REPLY:-$2}
echo -n "$3 [$VALUE]: "
read INPUT
VALUE=${INPUT:-$VALUE}
.env set "${1}=${VALUE}"
}
#
# Configure the ports used by AzuraCast.
#
setup-ports() {
envfile-set "AZURACAST_HTTP_PORT" "80" "Port to use for HTTP connections"
envfile-set "AZURACAST_HTTPS_PORT" "443" "Port to use for HTTPS connections"
envfile-set "AZURACAST_SFTP_PORT" "2022" "Port to use for SFTP connections"
}
#
# Configure release mode settings.
#
setup-release() {
if [[ ! -f .env ]]; then
curl -fsSL https://raw.githubusercontent.com/AzuraCast/AzuraCast/main/sample.env -o .env
fi
local OLD_RELEASE_CHANNEL
.env --file .env get AZURACAST_VERSION
OLD_RELEASE_CHANNEL="${REPLY:-latest}"
local AZURACAST_VERSION="${OLD_RELEASE_CHANNEL}"
if [[ $AZURACAST_VERSION == "latest" ]]; then
if ask "Your current release channel is 'Rolling Release'. Switch to 'Stable' release channel?" N; then
AZURACAST_VERSION="stable"
fi
elif [[ $AZURACAST_VERSION == "stable" ]]; then
if ask "Your current release channel is 'Stable'. Switch to 'Rolling Release' release channel?" N; then
AZURACAST_VERSION="latest"
fi
fi
.env --file .env set AZURACAST_VERSION=${AZURACAST_VERSION}
if [[ $AZURACAST_VERSION != $OLD_RELEASE_CHANNEL ]]; then
if ask "You should update the Docker Utility Script after changing release channels. Automatically update it now?" Y; then
update-self
fi
fi
}
check-install-requirements() {
local CURRENT_OS CURRENT_ARCH REQUIRED_COMMANDS SCRIPT_DIR
set -e
echo "Checking installation requirements for AzuraCast..."
CURRENT_OS=$(uname -s)
if [[ $CURRENT_OS == "Linux" ]]; then
echo -en "\e[32m[PASS]\e[0m Operating System: ${CURRENT_OS}\n"
else
echo -en "\e[41m[FAIL]\e[0m Operating System: ${CURRENT_OS}\n"
echo " You are running an unsupported operating system."
echo " Automated AzuraCast installation is not currently supported on this"
echo " operating system."
exit 1
fi
CURRENT_ARCH=$(uname -m)
if [[ $CURRENT_ARCH == "x86_64" ]]; then
echo -en "\e[32m[PASS]\e[0m Architecture: ${CURRENT_ARCH}\n"
elif [[ $CURRENT_ARCH == "aarch64" ]]; then
echo -en "\e[32m[PASS]\e[0m Architecture: ${CURRENT_ARCH}\n"
else
echo -en "\e[41m[FAIL]\e[0m Architecture: ${CURRENT_ARCH}\n"
echo " You are running an unsupported processor architecture."
echo " Automated AzuraCast installation is not currently supported on this "
echo " operating system."
exit 1
fi
REQUIRED_COMMANDS=(curl awk)
for COMMAND in "${REQUIRED_COMMANDS[@]}" ; do
if [[ $(command -v "$COMMAND") ]]; then
echo -en "\e[32m[PASS]\e[0m Command Present: ${COMMAND}\n"
else
echo -en "\e[41m[FAIL]\e[0m Command Present: ${COMMAND}\n"
echo " ${COMMAND} does not appear to be installed."
echo " Install ${COMMAND} using your host's package manager,"
echo " then continue installing using this script."
exit 1
fi
done
if [[ $EUID -ne 0 ]]; then
if [[ $(command -v sudo) ]]; then
echo -en "\e[32m[PASS]\e[0m User Permissions\n"
else
echo -en "\e[41m[FAIL]\e[0m User Permissions\n"
echo " You are not currently the root user, and "
echo " 'sudo' does not appear to be installed."
echo " Install sudo using your host's package manager,"
echo " then continue installing using this script."
exit 1
fi
else
echo -en "\e[32m[PASS]\e[0m User Permissions\n"
fi
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
if [[ $SCRIPT_DIR == "/var/azuracast" ]]; then
echo -en "\e[32m[PASS]\e[0m Installation Directory\n"
else
echo -en "\e[93m[WARN]\e[0m Installation Directory\n"
echo " AzuraCast is not installed in /var/azuracast, as is recommended"
echo " for most installations. This will not prevent AzuraCast from"
echo " working, but you will need to update any instructions in our"
echo " documentation to reflect your current directory:"
echo " $SCRIPT_DIR"
fi
echo -en "\e[32m[PASS]\e[0m All requirements met!\n"
set +e
}
install-docker() {
set -e
curl -fsSL get.docker.com -o get-docker.sh
sh get-docker.sh
rm get-docker.sh
if [[ $EUID -ne 0 ]]; then
sudo usermod -aG docker "$(whoami)"
echo "You must log out or restart to apply necessary Docker permissions changes."
echo "Restart, then continue installing using this script."
exit 1
fi
set +e
}
install-docker-compose() {
set -e
echo "Installing Docker Compose..."
curl -fsSL -o docker-compose https://github.com/docker/compose/releases/download/v2.4.1/docker-compose-linux-$(uname -m)
ARCHITECTURE=amd64
if [ "$(uname -m)" = "aarch64" ]; then
ARCHITECTURE=arm64
fi
curl -fsSL -o docker-compose-switch https://github.com/docker/compose-switch/releases/download/v1.0.4/docker-compose-linux-${ARCHITECTURE}
if [[ $EUID -ne 0 ]]; then
sudo chmod a+x ./docker-compose
sudo chmod a+x ./docker-compose-switch
sudo mv ./docker-compose /usr/libexec/docker/cli-plugins/docker-compose
sudo mv ./docker-compose-switch /usr/local/bin/docker-compose
else
chmod a+x ./docker-compose
chmod a+x ./docker-compose-switch
mv ./docker-compose /usr/libexec/docker/cli-plugins/docker-compose
mv ./docker-compose-switch /usr/local/bin/docker-compose
fi
echo "Docker Compose updated!"
set +e
}
run-installer() {
local AZURACAST_RELEASE_BRANCH
AZURACAST_RELEASE_BRANCH=$(get-release-branch-name)
if [[ ! -f .env ]]; then
curl -fsSL https://raw.githubusercontent.com/AzuraCast/AzuraCast/$AZURACAST_RELEASE_BRANCH/sample.env -o .env
fi
if [[ ! -f azuracast.env ]]; then
curl -fsSL https://raw.githubusercontent.com/AzuraCast/AzuraCast/$AZURACAST_RELEASE_BRANCH/azuracast.sample.env -o azuracast.env
fi
if [[ ! -f docker-compose.yml ]]; then
curl -fsSL https://raw.githubusercontent.com/AzuraCast/AzuraCast/$AZURACAST_RELEASE_BRANCH/docker-compose.sample.yml -o docker-compose.yml
fi
touch docker-compose.new.yml
local dc_config_test=$(docker-compose -f docker-compose.new.yml config 2>/dev/null)
if [ $? -ne 0 ]; then
if ask "Docker Compose needs to be updated to continue. Update to latest version?" Y; then
install-docker-compose
fi
fi
curl -fsSL https://raw.githubusercontent.com/AzuraCast/AzuraCast/$AZURACAST_RELEASE_BRANCH/docker-compose.installer.yml -o docker-compose.installer.yml
docker-compose -p azuracast_installer -f docker-compose.installer.yml pull
docker-compose -p azuracast_installer -f docker-compose.installer.yml run --rm installer install "$@"
rm docker-compose.installer.yml
}
#
# Run the initial installer of Docker and AzuraCast.
# Usage: ./docker.sh install
#
install() {
check-install-requirements
if [[ $(command -v docker) && $(docker --version) ]]; then
echo "Docker is already installed! Continuing..."
else
if ask "Docker does not appear to be installed. Install Docker now?" Y; then
install-docker
fi
fi
if [[ $(command -v docker-compose) ]]; then
echo "Docker Compose is already installed. Continuing..."
else
if ask "Docker Compose does not appear to be installed. Install Docker Compose now?" Y; then
install-docker-compose
fi
fi
setup-release
run-installer "$@"
# Installer creates a file at docker-compose.new.yml; copy it to the main spot.
if [[ -s docker-compose.new.yml ]]; then
if [[ -f docker-compose.yml ]]; then
rm docker-compose.yml
fi
mv docker-compose.new.yml docker-compose.yml
fi
# If this script is running as a non-root user, set the PUID/PGID in the environment vars appropriately.
if [[ $EUID -ne 0 ]]; then
.env --file .env set AZURACAST_PUID="$(id -u)"
.env --file .env set AZURACAST_PGID="$(id -g)"
fi
docker-compose pull
docker-compose run --rm web -- azuracast_install "$@"
docker-compose up -d
exit
}
install-dev() {
if [[ $(command -v docker) && $(docker --version) ]]; then
echo "Docker is already installed! Continuing..."
else
if ask "Docker does not appear to be installed. Install Docker now?" Y; then
install-docker
fi
fi
if [[ $(command -v docker-compose) ]]; then
echo "Docker Compose is already installed. Continuing..."
else
if ask "Docker Compose does not appear to be installed. Install Docker Compose now?" Y; then
install-docker-compose
fi
fi
if [[ ! -f docker-compose.yml ]]; then
cp docker-compose.sample.yml docker-compose.yml
fi
if [[ ! -f docker-compose.override.yml ]]; then
cp docker-compose.dev.yml docker-compose.override.yml
fi
if [[ ! -f .env ]]; then
cp dev.env .env
fi
if [[ ! -f azuracast.env ]]; then
cp azuracast.dev.env azuracast.env
echo "Customize azuracast.env file now before continuing. Re-run this command to continue installation."
exit
fi
# If this script is running as a non-root user, set the PUID/PGID in the environment vars appropriately.
if [[ $EUID -ne 0 ]]; then
.env --file .env set AZURACAST_PUID="$(id -u)"
.env --file .env set AZURACAST_PGID="$(id -g)"
fi
chmod 777 ./frontend/ ./web/ ./vendor/ \
./web/static/ ./web/static/api/ \
./web/static/dist/ ./web/static/img/
docker-compose build
docker-compose run --rm web -- azuracast_install "$@"
docker-compose -p azuracast_frontend -f docker-compose.frontend.yml build
docker-compose -p azuracast_frontend -f docker-compose.frontend.yml run --rm frontend npm run build
docker-compose up -d
exit
}
#
# Update the Docker images and codebase.
# Usage: ./docker.sh update
#
update() {
echo "[NOTICE] Before you continue, please make sure you have a recent snapshot of your system and or backed it up."
if ask "Are you ready to continue with the update?" Y; then
# Check for a new Docker Utility Script.
local AZURACAST_RELEASE_BRANCH
AZURACAST_RELEASE_BRANCH=$(get-release-branch-name)
curl -fsSL https://raw.githubusercontent.com/AzuraCast/AzuraCast/$AZURACAST_RELEASE_BRANCH/docker.sh -o docker.new.sh
local UTILITY_FILES_MATCH
UTILITY_FILES_MATCH="$(
cmp --silent docker.sh docker.new.sh
echo $?
)"
local UPDATE_UTILITY=0
if [[ ${UTILITY_FILES_MATCH} -ne 0 ]]; then
if ask "The Docker Utility Script has changed since your version. Update to latest version?" Y; then
UPDATE_UTILITY=1
fi
fi
if [[ ${UPDATE_UTILITY} -ne 0 ]]; then
mv docker.new.sh docker.sh
chmod a+x docker.sh
echo "A new Docker Utility Script has been downloaded."
echo "Please re-run the update process to continue."
exit
else
rm docker.new.sh
fi
run-installer --update "$@"
# Check for updated Docker Compose config.
local COMPOSE_FILES_MATCH
if [[ ! -s docker-compose.new.yml ]]; then
curl -fsSL https://raw.githubusercontent.com/AzuraCast/AzuraCast/$AZURACAST_RELEASE_BRANCH/docker-compose.sample.yml -o docker-compose.new.yml
fi
COMPOSE_FILES_MATCH="$(
cmp --silent docker-compose.yml docker-compose.new.yml
echo $?
)"
if [[ ${COMPOSE_FILES_MATCH} -ne 0 ]]; then
docker-compose -f docker-compose.new.yml pull
docker-compose down
cp docker-compose.yml docker-compose.backup.yml
mv docker-compose.new.yml docker-compose.yml
else
rm docker-compose.new.yml
docker-compose pull
docker-compose down
fi
docker-compose run --rm web -- azuracast_update "$@"
docker-compose up -d
if ask "Clean up all stopped Docker containers and images to save space?" Y; then
docker system prune -f
fi
echo "Update complete!"
fi
exit
}
#
# Update this Docker utility script.
# Usage: ./docker.sh update-self
#
update-self() {
local AZURACAST_RELEASE_BRANCH
AZURACAST_RELEASE_BRANCH=$(get-release-branch-name)
curl -fsSL https://raw.githubusercontent.com/AzuraCast/AzuraCast/$AZURACAST_RELEASE_BRANCH/docker.sh -o docker.sh
chmod a+x docker.sh
echo "New Docker utility script downloaded."
exit
}
#
# Run a CLI command inside the Docker container.
# Usage: ./docker.sh cli [command]
#
cli() {
docker-compose exec --user="azuracast" web azuracast_cli "$@"
exit
}
#
# Enter the bash terminal of the running web container.
# Usage: ./docker.sh bash
#
bash() {
docker-compose exec --user="azuracast" web bash
exit
}
#
# Enter the MariaDB database management terminal with the correct credentials.
#
db() {
local MYSQL_HOST MYSQL_PORT MYSQL_USER MYSQL_PASSWORD MYSQL_DATABASE
.env --file azuracast.env get MYSQL_HOST
MYSQL_HOST="${REPLY:-localhost}"
.env --file azuracast.env get MYSQL_PORT
MYSQL_PORT="${REPLY:-3306}"
.env --file azuracast.env get MYSQL_USER
MYSQL_USER="${REPLY:-azuracast}"
.env --file azuracast.env get MYSQL_PASSWORD
MYSQL_PASSWORD="${REPLY:-azur4c457}"
.env --file azuracast.env get MYSQL_DATABASE
MYSQL_DATABASE="${REPLY:-azuracast}"
docker-compose exec --user="mysql" web mysql --user=${MYSQL_USER} --password=${MYSQL_PASSWORD} \
--host=${MYSQL_HOST} --port=${MYSQL_PORT} --database=${MYSQL_DATABASE}
exit
}
#
# Back up the Docker volumes to a .tar.gz file.
# Usage:
# ./docker.sh backup [/custom/backup/dir/custombackupname.zip]
#
backup() {
local BACKUP_PATH BACKUP_DIR BACKUP_FILENAME BACKUP_EXT
BACKUP_PATH=$(readlink -f ${1:-"./backup.tar.gz"})
BACKUP_DIR=$(dirname -- "$BACKUP_PATH")
BACKUP_FILENAME=$(basename -- "$BACKUP_PATH")
BACKUP_EXT="${BACKUP_FILENAME##*.}"
shift
# Prepare permissions
if [[ $EUID -ne 0 ]]; then
.env --file .env set AZURACAST_PUID="$(id -u)"
.env --file .env set AZURACAST_PGID="$(id -g)"
fi
docker-compose exec --user="azuracast" web azuracast_cli azuracast:backup "/var/azuracast/backups/${BACKUP_FILENAME}" "$@"
# Move from Docker volume to local filesystem
docker run --rm -v "azuracast_backups:/backup_src" \
-v "$BACKUP_DIR:/backup_dest" \
busybox mv "/backup_src/${BACKUP_FILENAME}" "/backup_dest/${BACKUP_FILENAME}"
}
#
# Restore an AzuraCast backup into Docker.
# Usage:
# ./docker.sh restore [/custom/backup/dir/custombackupname.zip]
#
restore() {
if [[ ! -f .env ]] || [[ ! -f azuracast.env ]]; then
echo "AzuraCast hasn't been installed yet on this server."
echo "You should run './docker.sh install' first before restoring."
exit 1
fi
if ask "Restoring will remove any existing AzuraCast installation data, replacing it with your backup. Continue?" Y; then
if [[ $1 != "" ]]; then
local BACKUP_PATH BACKUP_DIR BACKUP_FILENAME BACKUP_EXT
BACKUP_PATH=$(readlink -f ${1:-"./backup.tar.gz"})
BACKUP_DIR=$(dirname -- "$BACKUP_PATH")
BACKUP_FILENAME=$(basename -- "$BACKUP_PATH")
BACKUP_EXT="${BACKUP_FILENAME##*.}"
shift
if [[ ! -f ${BACKUP_PATH} ]]; then
echo "File '${BACKUP_PATH}' does not exist. Nothing to restore."
exit 1
fi
docker-compose down -v
docker volume create azuracast_backups
# Move from local filesystem to Docker volume
docker run --rm -v "$BACKUP_DIR:/backup_src" \
-v "azuracast_backups:/backup_dest" \
busybox mv "/backup_src/${BACKUP_FILENAME}" "/backup_dest/${BACKUP_FILENAME}"
# Prepare permissions
if [[ $EUID -ne 0 ]]; then
.env --file .env set AZURACAST_PUID="$(id -u)"
.env --file .env set AZURACAST_PGID="$(id -g)"
fi
docker-compose run --rm web -- azuracast_restore "/var/azuracast/backups/${BACKUP_FILENAME}" "$@"
# Move file back from volume to local filesystem
docker run --rm -v "azuracast_backups:/backup_src" \
-v "$BACKUP_DIR:/backup_dest" \
busybox mv "/backup_src/${BACKUP_FILENAME}" "/backup_dest/${BACKUP_FILENAME}"
docker-compose down
docker-compose up -d
else
docker-compose down
# Remove all volumes except the backup volume.
docker volume rm -f $(docker volume ls | grep -v "azuracast_backups" | awk 'NR>1 {print $2}')
docker-compose run --rm web -- azuracast_restore "$@"
docker-compose down
docker-compose up -d
fi
fi
exit
}
#
# Restore the Docker volumes from a legacy backup format .tar.gz file.
# Usage:
# ./docker.sh restore [/custom/backup/dir/custombackupname.tar.gz]
#
restore-legacy() {
local APP_BASE_DIR BACKUP_PATH BACKUP_DIR BACKUP_FILENAME
APP_BASE_DIR=$(pwd)
BACKUP_PATH=${1:-"./backup.tar.gz"}
BACKUP_DIR=$(cd "$(dirname "$BACKUP_PATH")" && pwd)
BACKUP_FILENAME=$(basename "$BACKUP_PATH")
cd "$APP_BASE_DIR" || exit
if [ -f "$BACKUP_PATH" ]; then
docker-compose down
docker volume rm azuracast_db_data azuracast_station_data
docker volume create azuracast_db_data
docker volume create azuracast_station_data
docker run --rm -v "$BACKUP_DIR:/backup" \
-v azuracast_db_data:/azuracast/db \
-v azuracast_station_data:/azuracast/stations \
busybox tar zxvf "/backup/$BACKUP_FILENAME"
docker-compose up -d
else
echo "File $BACKUP_PATH does not exist in this directory. Nothing to restore."
exit 1
fi
exit
}
#
# DEVELOPER TOOL:
# Access the static console as a developer.
# Usage: ./docker.sh static [static_container_command]
#
static() {
docker-compose -f docker-compose.frontend.yml down -v
docker-compose -f docker-compose.frontend.yml build
docker-compose --env-file=.env -f docker-compose.frontend.yml run --rm frontend "$@"
exit
}
#
# Stop all Docker containers and remove related volumes.
# Usage: ./docker.sh uninstall
#
uninstall() {
if ask "This operation is destructive and will wipe your existing Docker containers. Continue?" N; then
docker-compose down -v
docker-compose rm -f
docker volume prune -f
echo "All AzuraCast Docker containers and volumes were removed."
echo "To remove *all* Docker containers and volumes, run:"
echo " docker stop \$(docker ps -a -q)"
echo " docker rm \$(docker ps -a -q)"
echo " docker volume prune -f"
echo ""
fi
exit
}
#
# LetsEncrypt: Now managed via the Web UI.
#
setup-letsencrypt() {
echo "LetsEncrypt is now managed from within the web interface."
}
letsencrypt-create() {
setup-letsencrypt
exit
}
#
# Utility script to facilitate switching ports.
# Usage: ./docker.sh change-ports
#
change-ports() {
setup-ports
docker-compose down
docker-compose up -d
}
#
# Helper scripts for basic Docker Compose functions
#
up() {
echo "Starting up AzuraCast services..."
docker-compose up -d
}
down() {
echo "Shutting down AzuraCast services..."
docker-compose down
}
restart() {
down
up
}
# Ensure we're in the same directory as this script.
cd "$( dirname "${BASH_SOURCE[0]}" )" || exit
"$@"

View file

@ -0,0 +1 @@
The three file found here are mentioned in docker.sh's run-installer() with 'sample.' added to the filename

View file

@ -0,0 +1,11 @@
COMPOSE_PROJECT_NAME=azuracast
AZURACAST_HTTP_PORT=80
AZURACAST_HTTPS_PORT=443
AZURACAST_SFTP_PORT=2022
AZURACAST_PUID=1000
AZURACAST_PGID=1000
NGINX_TIMEOUT=1800

View file

@ -13,60 +13,94 @@
Calibre-Web README Calibre-Web README
================== ==================
Calibre-Web provides a clean interface for browsing, reading and downloading This Ansible role installs
e-books using an existing Calibre database. Teachers can upload e-books, `Calibre-Web <https://github.com/janeczku/calibre-web#readme>`_ as a modern
adjust e-book metadata, and create custom e-book collections ("bookshelves"): client-server alternative to Calibre, for your
https://github.com/janeczku/calibre-web#about `Internet-in-a-Box (IIAB) <https://internet-in-a-box.org>`_.
This Ansible role installs Calibre-Web as part of your Internet-in-a-Box (IIAB) Calibre-Web provides a clean web interface for students to browse, read and
as a possible alternative to Calibre. download e-books using a
`Calibre-compatible database <https://manual.calibre-ebook.com/db_api.html>`_.
*WARNING: Calibre-Web depends on Calibre's own* ``/usr/bin/ebook-convert`` *program, Teachers upload e-books, adjust e-book metadata, and create custom "bookshelf"
so we strongly recommend you also install Calibre during your IIAB collections — to help students build the best local community library!
installation!*
Please note Calibre-Web's Ansible playbook is ``/opt/iiab/iiab/roles/calibre-web`` .. image:: https://www.yankodesign.com/images/design_news/2019/05/221758/luo_beetle_library_8.jpg
whereas its Ansible variables ``calibreweb_*`` do **not** include the dash,
per Ansible recommendations. 🍒 GURU TIPS 🍒
* Calibre-Web takes advantage of Calibre's own `/usr/bin/ebook-convert
<https://manual.calibre-ebook.com/generated/en/ebook-convert.html>`_ program
if that's installed — so consider also installing
`Calibre <https://calibre-ebook.com/whats-new>`_ during your IIAB
installation — *if you tolerate the weighty ~1 GB (of graphical OS libraries)
that Calibre mandates!*
* If you choose to also install Calibre (e.g. by running
``sudo apt install calibre``) then you'll get useful e-book
importing/organizing tools like
`/usr/bin/calibredb <https://manual.calibre-ebook.com/generated/en/calibredb.html>`_.
Install It
----------
Install Calibre-Web by setting these 2 variables in
`/etc/iiab/local_vars.yml <https://wiki.iiab.io/go/FAQ#What_is_local_vars.yml_and_how_do_I_customize_it%3F>`_::
calibreweb_install: True
calibreweb_enabled: True
Then install IIAB (`download.iiab.io <https://download.iiab.io>`_). Or if
IIAB's already installed, run::
cd /opt/iiab/iiab
sudo ./runrole calibre-web
NOTE: Calibre-Web's Ansible role (playbook) in
`/opt/iiab/iiab/roles <https://github.com/iiab/iiab/tree/master/roles>`_ is
``calibre-web`` which contains a hyphen — *whereas its Ansible variables*
``calibreweb_*`` *do NOT contain a hyphen!*
Using It Using It
-------- --------
After installation, try out Calibre-Web at http://box/books (or box.lan/books). Try Calibre-Web on your own IIAB by browsing to http://box/books (or
http://box.lan/books).
Typically students access it without a password (to read and download books) *Students* access it without a password (to read and download books).
whereas teachers add books using an administrative account, as follows::
*Teachers* add and arrange books using an administrative account, by clicking
**Guest** then logging in with::
Username: Admin Username: Admin
Password: changeme Password: changeme
If the default configuration is not found, the Calibre-Web server creates a 🍒 GURU TIPS 🍒
new settings file with calibre-web's own default administrative account::
Username: admin * If Calibre-Web's configuration file (app.db) goes missing, the administrative
Password: admin123 account will revert to::
Backend Username: admin
------- Password: admin123
You can manage the backend Calibre-Web server with these systemd commands:: * If you lose your password, you can change it with the
``-s [username]:[newpassword]`` command-line option:
systemctl enable calibre-web https://github.com/janeczku/calibre-web/wiki/FAQ#what-do-i-do-if-i-lose-my-admin-password
systemctl restart calibre-web
systemctl status calibre-web
systemctl stop calibre-web
Configuration Configuration
------------- -------------
To configure Calibre-Web, log in as user 'Admin' then click 'Admin' on top. To configure Calibre-Web browse to http://box/books then click **Guest** to log
Check 'Configuration' options near the bottom of the page. in as user **Admin** (default passwords above!)
Critical settings are stored in:: Then click the leftmost **Admin** button to administer — considering all 3
**Configuration** buttons further below.
These critical settings are stored in::
/library/calibre-web/config/app.db /library/calibre-web/config/app.db
Your e-book metadata is stored in a Calibre-style database:: Whereas your e-book metadata is stored in a Calibre-style database::
/library/calibre-web/metadata.db /library/calibre-web/metadata.db
@ -74,26 +108,56 @@ See also::
/library/calibre-web/metadata_db_prefs_backup.json /library/calibre-web/metadata_db_prefs_backup.json
Finally, take note of Calibre-Web's `FAQ <https://github.com/janeczku/calibre-web/wiki/FAQ>`_ and official docs on its `Runtime Configuration Options <https://github.com/janeczku/calibre-web/wiki/Configuration>`_ and `Command Line Interface <https://github.com/janeczku/calibre-web/wiki/Command-Line-Interface>`_. Finally, take note of Calibre-Web's
`FAQ <https://github.com/janeczku/calibre-web/wiki/FAQ>`_ and official docs on
its
`Runtime Configuration Options <https://github.com/janeczku/calibre-web/wiki/Configuration>`_
and
`Command Line Interface <https://github.com/janeczku/calibre-web/wiki/Command-Line-Interface>`_.
Backend
-------
You can manage the backend Calibre-Web server with systemd commands like::
systemctl status calibre-web
systemctl stop calibre-web
systemctl restart calibre-web
Run all commands
`as root <https://unix.stackexchange.com/questions/3063/how-do-i-run-a-command-as-the-system-administrator-root>`_.
Errors and warnings can be seen if you run::
journalctl -u calibre-web
Log verbosity level can be
`adjusted <https://github.com/janeczku/calibre-web/wiki/Configuration#logfile-configuration>`_
within Calibre-Web's **Configuration > Basic Configuration > Logfile
Configuration**.
Finally, http://box/live/stats (Calibre-Web's **About** page) can be a very
useful list of ~42 `Calibre-Web dependencies <https://github.com/janeczku/calibre-web/wiki/Dependencies-in-Calibre-Web-Linux-and-Windows>`_
(mostly Python packages, and the version number of each that's installed).
Back Up Everything Back Up Everything
------------------ ------------------
Please back up the entire folder ``/library/calibre-web`` before upgrading — Please back up the entire folder ``/library/calibre-web`` before upgrading —
as it contains your Calibre-Web content **and** settings! as it contains your Calibre-Web content **and** configuration settings!
Upgrading Upgrading
--------- ---------
Reinstalling Calibre-Web automatically upgrades to the latest version if your "Reinstalling" Calibre-Web automatically installs the latest version — if your
Internet-in-a-Box (IIAB) is online. Internet-in-a-Box (IIAB) is online.
But first: back up your content **and** settings, as explained above. But first: back up your content **and** configuration settings, as explained above.
**Also move your /library/calibre-web/config/app.db AND/OR **Also move your /library/calibre-web/config/app.db AND/OR
/library/calibre-web/metadata.db out of the way — if you're sure you want to /library/calibre-web/metadata.db out of the way — if you're sure you want to
fully reset your Calibre-Web settings (to install defaults) AND/OR remove all fully reset your Calibre-Web settings (to install defaults) AND/OR remove all
e-book metadata! Then run**:: e-book metadata! Then run, as root**::
cd /opt/iiab/iiab cd /opt/iiab/iiab
./runrole --reinstall calibre-web ./runrole --reinstall calibre-web
@ -104,7 +168,7 @@ manually::
cd /usr/local/calibre-web-py3 cd /usr/local/calibre-web-py3
git pull git pull
This older way *is no longer recommended*:: This older way is *no longer recommended*::
cd /opt/iiab/iiab cd /opt/iiab/iiab
./iiab-install --reinstall # OR: ./iiab-configure ./iiab-install --reinstall # OR: ./iiab-configure

View file

@ -14,8 +14,10 @@
# All above are set in: github.com/iiab/iiab/blob/master/vars/default_vars.yml # All above are set in: github.com/iiab/iiab/blob/master/vars/default_vars.yml
# If nec, change them by editing /etc/iiab/local_vars.yml prior to installing! # If nec, change them by editing /etc/iiab/local_vars.yml prior to installing!
calibreweb_repo_url: https://github.com/janeczku/calibre-web
calibreweb_version: master # WAS: master, 0.6.4, 0.6.5, 0.6.6, 0.6.7, 0.6.8, 0.6.9 calibreweb_version: master # WAS: master, 0.6.4, 0.6.5, 0.6.6, 0.6.7, 0.6.8, 0.6.9
calibreweb_venv_wipe: False # 2023-12-04: NEW default TDD (Test-Driven Dev!)
calibreweb_venv_path: /usr/local/calibre-web-py3 calibreweb_venv_path: /usr/local/calibre-web-py3
calibreweb_exec_path: "{{ calibreweb_venv_path }}/cps.py" calibreweb_exec_path: "{{ calibreweb_venv_path }}/cps.py"

Binary file not shown.

View file

@ -0,0 +1,52 @@
- name: Enable & Restart 'calibre-web' systemd service, if calibreweb_enabled
systemd:
name: calibre-web
daemon_reload: yes
enabled: yes
state: restarted
when: calibreweb_enabled
- name: Disable & Stop 'calibre-web' systemd service, if not calibreweb_enabled
systemd:
name: calibre-web
enabled: no
state: stopped
when: not calibreweb_enabled
# TO DO: restore http://box/libros & http://box/livres etc, alongside English (#2195)
# RELATED: https://github.com/janeczku/calibre-web/wiki/Setup-Reverse-Proxy
- name: Enable http://box{{ calibreweb_url1 }} via NGINX, by installing {{ nginx_conf_dir }}/calibre-web-nginx.conf from template # http://box/books
template:
src: calibre-web-nginx.conf.j2
dest: "{{ nginx_conf_dir }}/calibre-web-nginx.conf" # /etc/nginx/conf.d
when: calibreweb_enabled
- name: If enabling with Calibre-Web enhanced for large audio/video "books" too, also append onto calibre-web-nginx.conf AND symlink /library/www/html/calibre-web -> /library/calibre-web (WIP)
shell: |
if [ -f {{ calibreweb_venv_path }}/scripts/calibre-web-nginx.conf ]; then
cat {{ calibreweb_venv_path }}/scripts/calibre-web-nginx.conf >> {{ nginx_conf_dir }}/calibre-web-nginx.conf
# 2023-12-05: Not needed as a result of PR iiab/calibre-web#57
# ln -sf {{ calibreweb_home }} {{ doc_root }}/calibre-web
fi
when: calibreweb_enabled
- name: Disable http://box{{ calibreweb_url1 }} via NGINX, by removing {{ nginx_conf_dir }}/calibre-web-nginx.conf
file:
path: "{{ nginx_conf_dir }}/calibre-web-nginx.conf"
state: absent
when: not calibreweb_enabled
- name: If disabling, also remove symlink /library/www/html/calibre-web (WIP)
file:
path: "{{ doc_root }}/calibre-web" # /library/www/html
state: absent
when: not calibreweb_enabled
- name: Restart 'nginx' systemd service
systemd:
name: nginx
state: restarted

View file

@ -1,10 +1,24 @@
- name: "Install packages: imagemagick, python3-venv" - name: Record (initial) disk space used
shell: df -B1 --output=used / | tail -1
register: df1
- name: "Install packages: imagemagick, python3-netifaces"
package: package:
name: name:
- imagemagick - imagemagick
- python3-venv - python3-netifaces
state: present state: present
# https://github.com/iiab/iiab/pull/3496#issuecomment-1475094542
#- name: "Install packages: python3-dev, gcc to compile 'netifaces'"
# package:
# name:
# - python3-dev # header files
# - gcc # compiler
# state: present
# when: python_version is version('3.10', '>=')
- name: Allow ImageMagick to read PDFs, per /etc/ImageMagick-6/policy.xml, to create book cover thumbnails - name: Allow ImageMagick to read PDFs, per /etc/ImageMagick-6/policy.xml, to create book cover thumbnails
lineinfile: lineinfile:
path: /etc/ImageMagick-6/policy.xml path: /etc/ImageMagick-6/policy.xml
@ -13,6 +27,17 @@
line: ' <policy domain="coder" rights="read" pattern="PDF" />' line: ' <policy domain="coder" rights="read" pattern="PDF" />'
state: present state: present
- name: "Remove previous virtual environment {{ calibreweb_venv_path }} -- if 'calibreweb_venv_wipe: True'"
file:
path: "{{ calibreweb_venv_path }}"
state: absent
when: calibreweb_venv_wipe
- name: Does {{ calibreweb_venv_path }} exist?
stat:
path: "{{ calibreweb_venv_path }}" # /usr/local/calibre-web-py3
register: calibreweb_venv
- name: "Create 3 Calibre-Web folders to store data and config files: {{ calibreweb_home }}, {{ calibreweb_venv_path }}, {{ calibreweb_config }} (all set to {{ calibreweb_user }}:{{ apache_user }}) (default to 0755)" - name: "Create 3 Calibre-Web folders to store data and config files: {{ calibreweb_home }}, {{ calibreweb_venv_path }}, {{ calibreweb_config }} (all set to {{ calibreweb_user }}:{{ apache_user }}) (default to 0755)"
file: file:
state: directory state: directory
@ -22,32 +47,60 @@
with_items: with_items:
- "{{ calibreweb_home }}" # /library/calibre-web - "{{ calibreweb_home }}" # /library/calibre-web
- "{{ calibreweb_config }}" # /library/calibre-web/config - "{{ calibreweb_config }}" # /library/calibre-web/config
- "{{ calibreweb_venv_path }}" # /usr/local/calibre-web-py3 - "{{ calibreweb_venv_path }}"
## TODO: Calibre-web future release might get into pypi https://github.com/janeczku/calibre-web/issues/456 # FYI since May 2021, Calibre-Web (major releases) can be installed with pip:
- name: Clone i.e. download Calibre-Web ({{ calibreweb_version }}) from https://github.com/janeczku/calibre-web.git to {{ calibreweb_venv_path }} (~94 MB initially, ~115+ MB later) # https://pypi.org/project/calibreweb/
# https://github.com/janeczku/calibre-web/issues/456
# https://github.com/janeczku/calibre-web/issues/677
# https://github.com/janeczku/calibre-web/pull/927
# https://github.com/janeczku/calibre-web/pull/1459
- name: "Clone i.e. download Calibre-Web ({{ calibreweb_version }}) from {{ calibreweb_repo_url }} to {{ calibreweb_venv_path }} (~94 MB initially, ~115+ MB later) -- if {{ calibreweb_venv_path }} created just above"
git: git:
repo: https://github.com/janeczku/calibre-web.git repo: "{{ calibreweb_repo_url }}" # e.g. https://github.com/janeczku/calibre-web
dest: "{{ calibreweb_venv_path }}" dest: "{{ calibreweb_venv_path }}"
force: yes force: yes
depth: 1 #depth: 1 # 2023-11-04: Full clone for now, to help @deldesir & wider community testing
version: "{{ calibreweb_version }}" # e.g. master, 0.6.17 version: "{{ calibreweb_version }}" # e.g. master, 0.6.21
when: not calibreweb_venv.stat.exists
## Ansible Pip Bug: Cannot use 'chdir' with 'env' https://github.com/ansible/ansible/issues/37912 (Patch landed) - name: If Calibre-Web is being enhanced with audio/video "books" too, install/upgrade additional prereqs (CAN TAKE 3+ MINUTES, WIP)
#- name: Download calibre-web dependencies into vendor subdirectory. shell: |
# pip: if [ -f {{ calibreweb_venv_path }}/scripts/lb-wrapper ]; then
# requirements: "{{ calibreweb_path }}/requirements.txt" apt install ffmpeg pipx -y
# chdir: "{{ calibreweb_path }}" if lb --version; then
# extra_args: '--target vendor' pipx upgrade --include-injected xklb
# ignore_errors: True else
## pipx install xklb
# Implementing this with Ansible command module for now. ln -sf /root/.local/bin/lb /usr/local/bin/lb
- name: Download Calibre-Web dependencies (using pip) into python3 virtual environment {{ calibreweb_venv_path }} ln -sf /root/.local/pipx/venvs/xklb/bin/yt-dlp /usr/local/bin/yt-dlp
fi
cp {{ calibreweb_venv_path }}/scripts/lb-wrapper {{ calibreweb_venv_path }}/scripts/lb-wrapper.greedy /usr/local/bin/
chmod a+x /usr/local/bin/lb-wrapper /usr/local/bin/lb-wrapper.greedy
fi
- name: Download Calibre-Web dependencies from 'requirements.txt' into python3 virtual environment {{ calibreweb_venv_path }}
pip: pip:
requirements: "{{ calibreweb_venv_path }}/requirements.txt" requirements: "{{ calibreweb_venv_path }}/requirements.txt"
virtualenv: "{{ calibreweb_venv_path }}" # /usr/local/calibre-web-py3 virtualenv: "{{ calibreweb_venv_path }}" # /usr/local/calibre-web-py3
virtualenv_site_packages: no #virtualenv_site_packages: no
virtualenv_command: python3 -m venv {{ calibreweb_venv_path }} virtualenv_command: python3 -m venv --system-site-packages {{ calibreweb_venv_path }}
extra_args: --prefer-binary # 2023-10-01: Lifesaver when recent wheels (e.g. piwheels.org) are inevitably not yet built! SEE #3560
# 2023-10-11: RasPiOS Bookworm doc for Python with venv (PEP 668 now enforced!)
# https://www.raspberrypi.com/documentation/computers/os.html#python-on-raspberry-pi
# https://www.raspberrypi.com/documentation/computers/os.html#using-pip-with-virtual-environments
# VIRTUALENV EXAMPLE COMMANDS:
# cd /usr/local/calibre-web-py3
# source bin/activate (prepends '/usr/local/calibre-web-py3/bin' to yr PATH)
# python3 -m pip list ('pip list' sufficient *IF* path set above!)
# python3 -m pip freeze > /tmp/requirements.txt
# python3 -m pip install -r requirements.txt
# deactivate
# https://pip.pypa.io/en/stable/user_guide/#requirements-files
# https://pip.pypa.io/en/latest/reference/requirements-file-format/
- name: Install /etc/systemd/system/calibre-web.service from template - name: Install /etc/systemd/system/calibre-web.service from template
template: template:
@ -88,6 +141,17 @@
# RECORD Calibre-Web AS INSTALLED # RECORD Calibre-Web AS INSTALLED
- name: Record (final) disk space used
shell: df -B1 --output=used / | tail -1
register: df2
- name: Add 'calibreweb_disk_usage = {{ df2.stdout|int - df1.stdout|int }}' to {{ iiab_ini_file }}
ini_file:
path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
section: calibre-web
option: calibreweb_disk_usage
value: "{{ df2.stdout|int - df1.stdout|int }}"
- name: "Set 'calibreweb_installed: True'" - name: "Set 'calibreweb_installed: True'"
set_fact: set_fact:
calibreweb_installed: True calibreweb_installed: True

View file

@ -19,56 +19,47 @@
quiet: yes quiet: yes
- name: Install Calibre-Web if 'calibreweb_installed' not defined, e.g. in {{ iiab_state_file }} # /etc/iiab/iiab_state.yml - block:
include_tasks: install.yml
when: calibreweb_installed is undefined
- name: Install Calibre-Web if 'calibreweb_installed' not defined, e.g. in {{ iiab_state_file }} # /etc/iiab/iiab_state.yml
include_tasks: install.yml
when: calibreweb_installed is undefined
- name: Enable & Restart 'calibre-web' systemd service, if calibreweb_enabled - include_tasks: enable-or-disable.yml
systemd:
name: calibre-web
daemon_reload: yes
enabled: yes
state: restarted
when: calibreweb_enabled
- name: Disable & Stop 'calibre-web' systemd service, if not calibreweb_enabled - name: Add 'calibre-web' variable values to {{ iiab_ini_file }}
systemd: ini_file:
name: calibre-web path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
enabled: no section: calibre-web
state: stopped option: "{{ item.option }}"
when: not calibreweb_enabled value: "{{ item.value | string }}"
with_items:
- option: name
value: Calibre-Web
- option: description
value: '"Calibre-Web is a web app providing a clean interface for browsing, reading and downloading e-books."'
- option: calibreweb_install
value: "{{ calibreweb_install }}"
- option: calibreweb_enabled
value: "{{ calibreweb_enabled }}"
- option: calibreweb_url1
value: "{{ calibreweb_url1 }}"
- option: calibreweb_url2
value: "{{ calibreweb_url2 }}"
- option: calibreweb_url3
value: "{{ calibreweb_url3 }}"
- option: calibreweb_path
value: "{{ calibreweb_venv_path }}"
- option: calibreweb_home
value: "{{ calibreweb_home }}"
- option: calibreweb_port
value: "{{ calibreweb_port }}"
- option: calibreweb_settings_database
value: "{{ calibreweb_settings_database }}"
- name: Enable/Disable/Restart NGINX rescue:
include_tasks: nginx.yml
- name: 'SEE ERROR ABOVE (skip_role_on_error: {{ skip_role_on_error }})'
- name: Add 'calibre-web' variable values to {{ iiab_ini_file }} fail:
ini_file: msg: ""
path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini when: not skip_role_on_error
section: calibre-web
option: "{{ item.option }}"
value: "{{ item.value | string }}"
with_items:
- option: name
value: Calibre-Web
- option: description
value: '"Calibre-Web is a web app providing a clean interface for browsing, reading and downloading e-books."'
- option: calibreweb_install
value: "{{ calibreweb_install }}"
- option: calibreweb_enabled
value: "{{ calibreweb_enabled }}"
- option: calibreweb_url1
value: "{{ calibreweb_url1 }}"
- option: calibreweb_url2
value: "{{ calibreweb_url2 }}"
- option: calibreweb_url3
value: "{{ calibreweb_url3 }}"
- option: calibreweb_path
value: "{{ calibreweb_venv_path }}"
- option: calibreweb_home
value: "{{ calibreweb_home }}"
- option: calibreweb_port
value: "{{ calibreweb_port }}"
- option: calibreweb_settings_database
value: "{{ calibreweb_settings_database }}"

View file

@ -1,19 +0,0 @@
# TO DO: restore http://box/libros & http://box/livres etc, alongside English (#2195)
# RELATED: https://github.com/janeczku/calibre-web/wiki/Setup-Reverse-Proxy
- name: Enable http://box{{ calibreweb_url1 }} via NGINX, by installing {{ nginx_conf_dir }}/calibre-web-nginx.conf from template # http://box/books
template:
src: calibre-web-nginx.conf.j2
dest: "{{ nginx_conf_dir }}/calibre-web-nginx.conf" # /etc/nginx/conf.d
when: calibreweb_enabled
- name: Disable http://box{{ calibreweb_url1 }} via NGINX, by removing {{ nginx_conf_dir }}/calibre-web-nginx.conf
file:
path: "{{ nginx_conf_dir }}/calibre-web-nginx.conf" # /etc/nginx/conf.d
state: absent
when: not calibreweb_enabled
- name: Restart 'nginx' systemd service
systemd:
name: nginx
state: restarted

View file

@ -5,7 +5,7 @@ location {{ calibreweb_url1 }}/ {
proxy_set_header Host $http_host; proxy_set_header Host $http_host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Scheme $scheme; proxy_set_header X-Scheme $scheme;
proxy_set_header X-Script-Name {{ calibreweb_url1 }}; proxy_set_header X-Script-Name "{{ calibreweb_url1 }}";
proxy_pass http://127.0.0.1:8083; proxy_pass http://127.0.0.1:8083;
} }
@ -14,7 +14,7 @@ location {{ calibreweb_url2 }}/ {
proxy_set_header Host $http_host; proxy_set_header Host $http_host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Scheme $scheme; proxy_set_header X-Scheme $scheme;
proxy_set_header X-Script-Name {{ calibreweb_url2 }}; proxy_set_header X-Script-Name "{{ calibreweb_url2 }}";
proxy_pass http://127.0.0.1:8083; proxy_pass http://127.0.0.1:8083;
} }
@ -23,6 +23,6 @@ location {{ calibreweb_url3 }}/ {
proxy_set_header Host $http_host; proxy_set_header Host $http_host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Scheme $scheme; proxy_set_header X-Scheme $scheme;
proxy_set_header X-Script-Name {{ calibreweb_url3 }}; proxy_set_header X-Script-Name "{{ calibreweb_url3 }}";
proxy_pass http://127.0.0.1:8083; proxy_pass http://127.0.0.1:8083;
} }

View file

@ -34,11 +34,11 @@ calibre_userdb: "{{ calibre_dbpath }}/users.sqlite"
# calibre-server --manage-users --userdb /library/calibre/users.sqlite # calibre-server --manage-users --userdb /library/calibre/users.sqlite
calibre_sample_book: "Metamorphosis-jackson.epub" calibre_sample_book: "Metamorphosis-jackson.epub"
# Must be downloadable from http://download.iiab.io/packages # Must be downloadable from https://download.iiab.io/packages
calibre_src_url: "https://raw.githubusercontent.com/kovidgoyal/calibre/master/setup/linux-installer.py" calibre_src_url: "https://raw.githubusercontent.com/kovidgoyal/calibre/master/setup/linux-installer.py"
calibre_deb_url: "{{ iiab_download_url }}" # http://download.iiab.io/packages calibre_deb_url: "{{ iiab_download_url }}" # https://download.iiab.io/packages
# Above URL must offer both .deb files below: (for scripts/calibre-install-pinned-rpi.sh to run) # Above URL must offer both .deb files below: (for scripts/calibre-install-pinned-rpi.sh to run)
calibre_deb_pin_version: 3.33.1+dfsg-1 # for calibre_3.33.1+dfsg-1_all.deb (24M, 2018-10-21) calibre_deb_pin_version: 3.33.1+dfsg-1 # for calibre_3.33.1+dfsg-1_all.deb (24M, 2018-10-21)
calibre_bin_deb_pin_version: "{{ calibre_deb_pin_version }}" # for calibre-bin_3.33.1+dfsg-1_armhf.deb (706K, 2018-10-23) calibre_bin_deb_pin_version: "{{ calibre_deb_pin_version }}" # for calibre-bin_3.33.1+dfsg-1_armhf.deb (706K, 2018-10-23)

View file

@ -0,0 +1,29 @@
# http://box:8080 & http://box:8080/mobile WORK BUT OTHER URL'S LIKE http://box/calibre ARE A MESS (BOOKS RARELY DISPLAY)
#
# 2018-08-27 POSSIBLE FIX...CONSIDER THIS ProxyPass / ProxyPassReverse TECHNIQUE:
# https://github.com/iiab/iiab/tree/master/roles/calibre-web/templates/calibre-web.conf.j2
# (anyway this works great for calibre-web, allowing http://box/books
# to work even better than http://box:8083 when box == 192.168.0.x !)
#
#- name: Attempt to enable http://box/calibre via Apache (UNTESTED)
# command: a2ensite calibre.conf
# when: apache_installed and calibre_enabled
#
#- name: Attempt to disable http://box/calibre via Apache (UNTESTED)
# command: a2dissite calibre.conf
# when: apache_installed and not calibre_enabled
- name: Enable & (Re)Start 'calibre-serve' service, if calibre_enabled
systemd:
daemon_reload: yes
name: calibre-serve
enabled: yes
state: restarted
when: calibre_enabled
- name: Disable & Stop 'calibre-serve' service, if not calibre_enabled
systemd:
name: calibre-serve
enabled: no
state: stopped
when: not calibre_enabled

View file

@ -1,4 +1,9 @@
# 1. INSTALL CALIBRE 3.39.1+ or 4.12+ (calibre, calibredb, calibre-server etc) ON ALL OS'S - name: Record (initial) disk space used
shell: df -B1 --output=used / | tail -1
register: df1
# 1. APT INSTALL CALIBRE 4.12+ or 5.12+ (calibre, calibredb, calibre-server etc) ON ALL OS'S
- name: "Install OS's latest packages: calibre, calibre-bin" - name: "Install OS's latest packages: calibre, calibre-bin"
package: package:
@ -79,6 +84,17 @@
# 5. RECORD Calibre AS INSTALLED # 5. RECORD Calibre AS INSTALLED
- name: Record (final) disk space used
shell: df -B1 --output=used / | tail -1
register: df2
- name: Add 'calibre_disk_usage = {{ df2.stdout|int - df1.stdout|int }}' to {{ iiab_ini_file }}
ini_file:
path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
section: calibre
option: calibre_disk_usage
value: "{{ df2.stdout|int - df1.stdout|int }}"
- name: "Set 'calibre_installed: True'" - name: "Set 'calibre_installed: True'"
set_fact: set_fact:
calibre_installed: True calibre_installed: True

View file

@ -19,65 +19,37 @@
quiet: yes quiet: yes
- name: Install Calibre if 'calibre_installed' not defined, e.g. in {{ iiab_state_file }} # /etc/iiab/iiab_state.yml - block:
include_tasks: install.yml
when: calibre_installed is undefined
# http://box:8080 & http://box:8080/mobile WORK BUT OTHER URL'S LIKE http://box/calibre ARE A MESS (BOOKS RARELY DISPLAY) - name: Install Calibre if 'calibre_installed' not defined, e.g. in {{ iiab_state_file }} # /etc/iiab/iiab_state.yml
# include_tasks: install.yml
# 2018-08-27 POSSIBLE FIX...CONSIDER THIS ProxyPass / ProxyPassReverse TECHNIQUE: when: calibre_installed is undefined
# https://github.com/iiab/iiab/tree/master/roles/calibre-web/templates/calibre-web.conf.j2
# (anyway this works great for calibre-web, allowing http://box/books
# to work even better than http://box:8083 when box == 192.168.0.x !)
#
#- name: Attempt to enable http://box/calibre via Apache (UNTESTED)
# command: a2ensite calibre.conf
# when: apache_installed and calibre_enabled
#
#- name: Attempt to disable http://box/calibre via Apache (UNTESTED)
# command: a2dissite calibre.conf
# when: apache_installed and not calibre_enabled
- name: Enable & (Re)Start 'calibre-serve' service, if calibre_enabled - include_tasks: enable-or-disable.yml
systemd:
daemon_reload: yes
name: calibre-serve
enabled: yes
state: restarted
when: calibre_enabled
- name: Disable & Stop 'calibre-serve' service, if not calibre_enabled - name: Add 'calibre' variable values to {{ iiab_ini_file }}
systemd: ini_file:
name: calibre-serve path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
enabled: no section: calibre
state: stopped option: "{{ item.option }}"
when: not calibre_enabled value: "{{ item.value | string }}"
with_items:
- option: name
value: Calibre
- option: description
value: '"Calibre is an extremely popular personal library system for e-books."'
- option: calibre_src_url
value: "{{ calibre_src_url }}"
- option: calibre_dbpath
value: "{{ calibre_dbpath }}"
- option: calibre_port
value: "{{ calibre_port }}"
- option: calibre_enabled
value: "{{ calibre_enabled }}"
#- name: Enable/Disable/Restart Apache if primary rescue:
# include_tasks: apache.yml
# when: not nginx_enabled
#
#- name: Enable/Disable/Restart NGINX if primary
# include_tasks: nginx.yml
# when: nginx_enabled
- name: 'SEE ERROR ABOVE (skip_role_on_error: {{ skip_role_on_error }})'
- name: Add 'calibre' variable values to {{ iiab_ini_file }} fail:
ini_file: msg: ""
path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini when: not skip_role_on_error
section: calibre
option: "{{ item.option }}"
value: "{{ item.value | string }}"
with_items:
- option: name
value: Calibre
- option: description
value: '"Calibre is an extremely popular personal library system for e-books."'
- option: calibre_src_url
value: "{{ calibre_src_url }}"
- option: calibre_dbpath
value: "{{ calibre_dbpath }}"
- option: calibre_port
value: "{{ calibre_port }}"
- option: calibre_enabled
value: "{{ calibre_enabled }}"

View file

@ -1,4 +1,4 @@
_Please Also See: http://FAQ.IIAB.IO > ["Captive Portal Administration: What tips & tricks exist?"](http://wiki.laptop.org/go/IIAB/FAQ#Captive_Portal_Administration:_What_tips_.26_tricks_exist.3F)_ _Please Also See: http://FAQ.IIAB.IO > ["Captive Portal Administration: What tips & tricks exist?"](https://wiki.iiab.io/go/FAQ#Captive_Portal_Administration:_What_tips_&_tricks_exist%3F)_
## Theory of Operation ## Theory of Operation

View file

@ -1,3 +1,8 @@
- name: Record (initial) disk space used
shell: df -B1 --output=used / | tail -1
register: df1
- name: "Install packages: python3-dateutil, python3-jinja2" - name: "Install packages: python3-dateutil, python3-jinja2"
package: package:
name: name:
@ -26,7 +31,7 @@
mode: "{{ item.mode }}" mode: "{{ item.mode }}"
with_items: with_items:
- { src: roles/captiveportal/templates/checkurls, dest: /opt/iiab/captiveportal/, mode: '0644' } - { src: roles/captiveportal/templates/checkurls, dest: /opt/iiab/captiveportal/, mode: '0644' }
- { src: roles/captiveportal/templates/iiab-divert-to-nginx, dest: /usr/sbin/, mode: '0755' } - { src: roles/captiveportal/templates/iiab-divert-to-nginx.j2, dest: /usr/sbin/iiab-divert-to-nginx, mode: '0755' }
- { src: roles/captiveportal/templates/iiab-make-cp-servers.py, dest: /usr/sbin/, mode: '0755' } - { src: roles/captiveportal/templates/iiab-make-cp-servers.py, dest: /usr/sbin/, mode: '0755' }
- name: Install /opt/iiab/captiveportal/capture-wsgi.py from template, mode '0755' (creates the server) - name: Install /opt/iiab/captiveportal/capture-wsgi.py from template, mode '0755' (creates the server)
@ -51,6 +56,17 @@
# RECORD Captive Portal AS INSTALLED # RECORD Captive Portal AS INSTALLED
- name: Record (final) disk space used
shell: df -B1 --output=used / | tail -1
register: df2
- name: Add 'captiveportal_disk_usage = {{ df2.stdout|int - df1.stdout|int }}' to {{ iiab_ini_file }}
ini_file:
path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
section: captiveportal
option: captiveportal_disk_usage
value: "{{ df2.stdout|int - df1.stdout|int }}"
- name: "Set 'captiveportal_installed: True'" - name: "Set 'captiveportal_installed: True'"
set_fact: set_fact:
captiveportal_installed: True captiveportal_installed: True

View file

@ -19,27 +19,33 @@
quiet: yes quiet: yes
- name: Install Captive Portal if 'captiveportal_installed' not defined, e.g. in {{ iiab_state_file }} # /etc/iiab/iiab_state.yml - block:
include_tasks: install.yml
when: captiveportal_installed is undefined
- name: Install Captive Portal if 'captiveportal_installed' not defined, e.g. in {{ iiab_state_file }} # /etc/iiab/iiab_state.yml
include_tasks: install.yml
when: captiveportal_installed is undefined
- name: Enable or Disable Captive Portal - include_tasks: enable-or-disable.yml
include_tasks: enable-or-disable.yml
- name: Add 'captiveportal' variable values to {{ iiab_ini_file }}
ini_file:
path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
section: captiveportal
option: "{{ item.option }}"
value: "{{ item.value | string }}"
with_items:
- option: name
value: Captive Portal
- option: description
value: '"Captive Portal tries to open the browser automatically, so users don''t have to type in URL''s like http://box.lan in support of kiosk-like situations, in multilingual and less literate communities."'
- option: captiveportal_install
value: "{{ captiveportal_install }}"
- option: captiveportal_enabled
value: "{{ captiveportal_enabled }}"
- name: Add 'captiveportal' variable values to {{ iiab_ini_file }} rescue:
ini_file:
path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini - name: 'SEE ERROR ABOVE (skip_role_on_error: {{ skip_role_on_error }})'
section: captiveportal fail:
option: "{{ item.option }}" msg: ""
value: "{{ item.value | string }}" when: not skip_role_on_error
with_items:
- option: name
value: Captive Portal
- option: description
value: '"Captive Portal tries to open the browser automatically, so users don''t have to type in URL''s like http://box.lan in support of kiosk-like situations, in multilingual and less literate communities."'
- option: captiveportal_install
value: "{{ captiveportal_install }}"
- option: captiveportal_enabled
value: "{{ captiveportal_enabled }}"

View file

@ -1,4 +1,4 @@
#!/bin/bash -x #!/bin/bash -x
awk '{print("address=/" $1 "/172.18.96.1")}' /opt/iiab/captiveportal/checkurls > /etc/dnsmasq.d/capture awk '{print("address=/" $1 "/{{ lan_ip }}")}' /opt/iiab/captiveportal/checkurls > /etc/dnsmasq.d/capture
echo "#following tells windows 7 that captive portal is active" >> /etc/dnsmasq.d/capture echo "#following tells windows 7 that captive portal is active" >> /etc/dnsmasq.d/capture
echo "address=/dns.msftncsi.com/131.107.255.255" >> /etc/dnsmasq.d/capture echo "address=/dns.msftncsi.com/131.107.255.255" >> /etc/dnsmasq.d/capture

View file

@ -2,13 +2,13 @@
[CUPS](https://en.wikipedia.org/wiki/CUPS) (also known as the "Common UNIX Printing System") is the standards-based, open source printing system for Linux and macOS. [CUPS](https://en.wikipedia.org/wiki/CUPS) (also known as the "Common UNIX Printing System") is the standards-based, open source printing system for Linux and macOS.
It allows your [Internet-in-a-Box (IIAB)](http://internet-in-a-box.org) to act as a print server. It allows your [Internet-in-a-Box (IIAB)](https://internet-in-a-box.org) to act as a print server.
This can be useful if a printer is attached to your IIAB &mdash; so student/teacher print jobs from client computers and phones can be processed &mdash; and then sent to the appropriate printer. This can be useful if a printer is attached to your IIAB &mdash; so student/teacher print jobs from client computers and phones can be processed &mdash; and then sent to the appropriate printer.
## Using it ## Using it
Make sure your IIAB was installed with these 2 lines in [/etc/iiab/local_vars.yml](http://faq.iiab.io/#What_is_local_vars.yml_and_how_do_I_customize_it.3F) : Make sure your IIAB was installed with these 2 lines in [/etc/iiab/local_vars.yml](http://faq.iiab.io/#What_is_local_vars.yml_and_how_do_I_customize_it%3F) :
``` ```
cups_install: True cups_install: True

View file

@ -2,6 +2,11 @@
# (OR ANY MEMBER OF LINUX GROUP 'lpadmin') AS SET UP BELOW... # (OR ANY MEMBER OF LINUX GROUP 'lpadmin') AS SET UP BELOW...
- name: Record (initial) disk space used
shell: df -B1 --output=used / | tail -1
register: df1
- name: Install 'cups' package - name: Install 'cups' package
package: package:
name: cups name: cups
@ -49,7 +54,7 @@
blockinfile: blockinfile:
path: /etc/cups/cupsd.conf path: /etc/cups/cupsd.conf
insertafter: '^<Location /admin>$' insertafter: '^<Location /admin>$'
block: |2 # Indent with 2 spaces, and surround block with 2 comment lines: "# BEGIN ANSIBLE MANAGED BLOCK", "# END ANSIBLE MANAGED BLOCK" block: |2 # |n MEANS: Set the block's left edge n CHARACTERS TO THE RIGHT of *this line's* indentation -- where n is {1..9} -- instead of setting its left edge to the 1st non-blank line's indentation below. Also surround block with comment lines: "# BEGIN ANSIBLE MANAGED BLOCK", "# END ANSIBLE MANAGED BLOCK"
AuthType Default AuthType Default
Require user @SYSTEM Require user @SYSTEM
@ -76,14 +81,14 @@
name: cups name: cups
state: started state: started
# - name: "Authorize Nearby IP Addresses: Run 'cupsctl --remote-admin --share-printers --user-cancel-any' to enable http://192.168.0.x:631 AND http://172.18.96.1:631 (if cups_enabled) -- REPEATED USE OF 'cupsctl' COMMANDS CAN *DAMAGE* /etc/cups/cupsd.conf BY ADDING DUPLICATE LINES (AND WORSE!) -- SO PLEASE ALSO MANUALLY RUN 'sudo cupsctl' AND 'sudo cupsd -t' TO VERIFY /etc/cups/cupsd.conf" # - name: "Authorize Nearby IP Addresses: Run 'cupsctl --remote-admin --share-printers --user-cancel-any' to enable http://192.168.0.x:631 AND http://{{ lan_ip }}:631 (if cups_enabled) -- REPEATED USE OF 'cupsctl' COMMANDS CAN *DAMAGE* /etc/cups/cupsd.conf BY ADDING DUPLICATE LINES (AND WORSE!) -- SO PLEASE ALSO MANUALLY RUN 'sudo cupsctl' AND 'sudo cupsd -t' TO VERIFY /etc/cups/cupsd.conf"
# command: cupsctl --remote-admin --share-printers --user-cancel-any # command: cupsctl --remote-admin --share-printers --user-cancel-any
# 2021-07-11: BOTH FLAGS *CANNOT* BE USED TOGETHER -- CHOOSE ONE OR THE OTHER: # 2021-07-11: BOTH FLAGS *CANNOT* BE USED TOGETHER -- CHOOSE ONE OR THE OTHER:
# (1) '--remote-admin' AS ABOVE, OR (2) '--remote-any' AS BELOW. # (1) '--remote-admin' AS ABOVE, OR (2) '--remote-any' AS BELOW.
# (RUN 'cupsctl' WITHOUT PARAMETERS TO CONFIRM THIS!) # (RUN 'cupsctl' WITHOUT PARAMETERS TO CONFIRM THIS!)
- name: "Authorize All IP Addresses: Run 'cupsctl --remote-any --share-printers --user-cancel-any' to enable http://192.168.0.x:631 AND http://172.18.96.1:631 AND http://10.8.0.y:631 (if cups_enabled) -- REPEATED USE OF 'cupsctl' COMMANDS CAN *DAMAGE* /etc/cups/cupsd.conf BY ADDING DUPLICATE LINES (AND WORSE!) -- SO PLEASE ALSO MANUALLY RUN 'sudo cupsctl' AND 'sudo cupsd -t' TO VERIFY /etc/cups/cupsd.conf" - name: "Authorize All IP Addresses: Run 'cupsctl --remote-any --share-printers --user-cancel-any' to enable http://192.168.0.x:631 AND http://{{ lan_ip }}:631 AND http://10.8.0.y:631 (if cups_enabled) -- REPEATED USE OF 'cupsctl' COMMANDS CAN *DAMAGE* /etc/cups/cupsd.conf BY ADDING DUPLICATE LINES (AND WORSE!) -- SO PLEASE ALSO MANUALLY RUN 'sudo cupsctl' AND 'sudo cupsd -t' TO VERIFY /etc/cups/cupsd.conf"
command: cupsctl --remote-any --share-printers --user-cancel-any command: cupsctl --remote-any --share-printers --user-cancel-any
# 2021-07-11: In theory 'cupsctl' stanzas could be put in enable-or-disable.yml # 2021-07-11: In theory 'cupsctl' stanzas could be put in enable-or-disable.yml
@ -96,7 +101,7 @@
# command: cupsctl --no-remote-admin --no-remote-any --no-share-printers --no-user-cancel-any --no-debug-logging # command: cupsctl --no-remote-admin --no-remote-any --no-share-printers --no-user-cancel-any --no-debug-logging
# when: not cups_enabled # when: not cups_enabled
# - name: "2021-07-14: EXPERIMENTALLY ADD DIRECTIVES TO /etc/cups/cupsd.conf followed by 'systemctl restart cups'. As should no longer be nec thanks to NEW cups/templates/cups.conf for /etc/nginx/conf.d/cups.conf (followed by 'systemctl restart nginx'). Which FIXED URL'S LIKE: http://box/print, http://box.lan/print, http://192.168.0.x/print, http://172.18.96.1/print and http://10.8.0.x/print (WITH OR WITHOUT THE TRAILING SLASH!) RECAP: (1) So be it that these 2 URL'S STILL DON'T WORK: http://box:631, http://box.lan:631 (due to CUPS' internal web server's overly stringent hostname checks, i.e. '400 Bad Request' and 'Request from \"localhost\" using invalid Host: field \"box[.lan]:631\".' in /var/log/cups/error_log) -- (2) While these 2 URL'S STILL DO WORK: http://localhost:631, http://127.0.0.1:631 -- (3) Whereas these 3 URL'S MAY WORK, DEPENDING ON 'cupsctl' COMMAND(S) ABOVE: http://192.168.0.x:631, http://172.18.96.1:631, http://10.8.0.x:631" # - name: "2021-07-14: EXPERIMENTALLY ADD DIRECTIVES TO /etc/cups/cupsd.conf followed by 'systemctl restart cups'. As should no longer be nec thanks to NEW cups/templates/cups.conf for /etc/nginx/conf.d/cups.conf (followed by 'systemctl restart nginx'). Which FIXED URL'S LIKE: http://box/print, http://box.lan/print, http://192.168.0.x/print, http://{{ lan_ip }}/print and http://10.8.0.x/print (WITH OR WITHOUT THE TRAILING SLASH!) RECAP: (1) So be it that these 2 URL'S STILL DON'T WORK: http://box:631, http://box.lan:631 (due to CUPS' internal web server's overly stringent hostname checks, i.e. '400 Bad Request' and 'Request from \"localhost\" using invalid Host: field \"box[.lan]:631\".' in /var/log/cups/error_log) -- (2) While these 2 URL'S STILL DO WORK: http://localhost:631, http://127.0.0.1:631 -- (3) Whereas these 3 URL'S MAY WORK, DEPENDING ON 'cupsctl' COMMAND(S) ABOVE: http://192.168.0.x:631, http://{{ lan_ip }}:631, http://10.8.0.x:631"
# lineinfile: # lineinfile:
# path: /etc/cups/cupsd.conf # path: /etc/cups/cupsd.conf
# line: "{{ item }}" # line: "{{ item }}"
@ -105,7 +110,7 @@
# - "HostNameLookups On" # More False Leads: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=530027 # - "HostNameLookups On" # More False Leads: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=530027
# - "ServerAlias *" # - "ServerAlias *"
# - "#ServerName {{ iiab_hostname }}.{{ iiab_domain }}" # box.lan # - "#ServerName {{ iiab_hostname }}.{{ iiab_domain }}" # box.lan
# - "#Listen {{ lan_ip }}:631" # 172.18.96.1 # - "#Listen {{ lan_ip }}:631" # e.g. 10.10.10.10
# - "#Listen 127.0.0.1:631" # - "#Listen 127.0.0.1:631"
# - "#Listen 0.0.0.0:631" # - "#Listen 0.0.0.0:631"
# - "#Listen *:631" # - "#Listen *:631"
@ -124,6 +129,17 @@
# RECORD CUPS AS INSTALLED # RECORD CUPS AS INSTALLED
- name: Record (final) disk space used
shell: df -B1 --output=used / | tail -1
register: df2
- name: Add 'cups_disk_usage = {{ df2.stdout|int - df1.stdout|int }}' to {{ iiab_ini_file }}
ini_file:
path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
section: cups
option: cups_disk_usage
value: "{{ df2.stdout|int - df1.stdout|int }}"
- name: "Set 'cups_installed: True'" - name: "Set 'cups_installed: True'"
set_fact: set_fact:
cups_installed: True cups_installed: True

View file

@ -23,26 +23,33 @@
quiet: yes quiet: yes
- name: Install CUPS if 'cups_installed' not defined, e.g. in {{ iiab_state_file }} # /etc/iiab/iiab_state.yml - block:
include_tasks: install.yml
when: cups_installed is undefined
- name: Install CUPS if 'cups_installed' not defined, e.g. in {{ iiab_state_file }} # /etc/iiab/iiab_state.yml
include_tasks: install.yml
when: cups_installed is undefined
- include_tasks: enable-or-disable.yml - include_tasks: enable-or-disable.yml
- name: Add 'cups' variable values to {{ iiab_ini_file }}
ini_file:
path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
section: cups
option: "{{ item.option }}"
value: "{{ item.value | string }}"
with_items:
- option: name
value: CUPS
- option: description
value: '"CUPS (Common UNIX Printing System) is a modular printing system that allows a computer to act as a print server. A computer running CUPS is a host that can accept print jobs from client computers, process them, and send them to the appropriate printer."'
- option: cups_install
value: "{{ cups_install }}"
- option: cups_enabled
value: "{{ cups_enabled }}"
- name: Add 'cups' variable values to {{ iiab_ini_file }} rescue:
ini_file:
path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini - name: 'SEE ERROR ABOVE (skip_role_on_error: {{ skip_role_on_error }})'
section: cups fail:
option: "{{ item.option }}" msg: ""
value: "{{ item.value | string }}" when: not skip_role_on_error
with_items:
- option: name
value: CUPS
- option: description
value: '"CUPS (Common UNIX Printing System) is a modular printing system that allows a computer to act as a print server. A computer running CUPS is a host that can accept print jobs from client computers, process them, and send them to the appropriate printer."'
- option: cups_install
value: "{{ cups_install }}"
- option: cups_enabled
value: "{{ cups_enabled }}"

View file

@ -21,7 +21,7 @@ location ~ ^/print(|/.*)$ { # '~' -> '~*' for case-insensitive regex
return 301 http://localhost:631; return 301 http://localhost:631;
} }
return 301 http://$host:631; # For 192.168.0.x, 172.18.96.1, 10.8.0.y ETC return 301 http://$host:631; # For 192.168.0.x, 10.10.10.10, 172.18.96.1, 10.8.0.y ETC
} }

View file

@ -1,27 +1,48 @@
- name: Back up 4 OS-provided WiFi firmware files (or symlinks) to /lib/firmware/brcm/*.orig # 2023-02-25: MONITOR FIRMWARE UPDATES in 3 places especially...
copy: #
src: /lib/firmware/brcm/{{ item }} # 1. apt changelog firmware-brcm80211
dest: /lib/firmware/brcm/{{ item }}.orig # https://github.com/RPi-Distro/firmware-nonfree -> debian/config/brcm80211 (brcm, cypress)
with_items: # https://archive.raspberrypi.org/debian/dists/bullseye/main/binary-arm64/Packages (1.1MB text file, look inside for summary of latest firmware-brcm80211)
- brcmfmac43430-sdio.bin # https://archive.raspberrypi.org/debian/pool/main/f/firmware-nonfree/ -> firmware-brcm80211_* e.g.:
- brcmfmac43430-sdio.clm_blob # https://archive.raspberrypi.org/debian/pool/main/f/firmware-nonfree/firmware-brcm80211_20190114-1+rpt11_all.deb from 2021-01-25
- brcmfmac43455-sdio.bin # https://archive.raspberrypi.org/debian/pool/main/f/firmware-nonfree/firmware-brcm80211_20210315-3+rpt4_all.deb from 2021-12-06
- brcmfmac43455-sdio.clm_blob # https://archive.raspberrypi.org/debian/pool/main/f/firmware-nonfree/firmware-brcm80211_20221012-1~bpo11+1+rpt1_all.deb from 2022-11-17
ignore_errors: yes # 2. apt changelog linux-firmware-raspi
# https://packages.ubuntu.com/search?keywords=linux-firmware-raspi
# 3. https://github.com/moodlebox/moodlebox/blob/main/roles/accesspoint/tasks/main.yml
- name: Download higher-capacity firmware (for RPi internal WiFi, per https://github.com/iiab/iiab/issues/823#issuecomment-662285202 and https://github.com/iiab/iiab/issues/2853) #- name: Back up 4 OS-provided WiFi firmware files (incl symlink contents) to /lib/firmware/cypress/*.orig
- name: Back up 4 OS-provided WiFi firmware files (replicate any symlinks) to /lib/firmware/cypress/*.orig -- /usr/bin/iiab-check-firmware will later do similar (e.g. as firmware install completes) -- moving 2-or-4 of these to <ORIGINAL FILENAME>.YYYY-MM-DD-HH:MM:SS ("doubly timestamping" to preserve BOTH last-modif & moving date)
# copy:
# src: /lib/firmware/cypress/{{ item }}
# dest: /lib/firmware/cypress/{{ item }}.orig
# #local_follow: False # FAILS TO PRESERVE LINKS (ansible/ansible#74777) e.g. /lib/firmware/cypress/cyfmac43455-sdio.bin -> /etc/alternatives/cyfmac43455-sdio.bin -> ...
# 2023-05-01 CLARIF OF BELOW:
# 1) Even if 'mv' fails, no matter it'll continue to 'cp' below
# 2) 'cp -P' == 'cp --no-dereference' sufficient to replicate these symlinks and files ('cp -d' & 'cp -a' are incrementally stronger, and so probably can't hurt)
shell: |
mv /lib/firmware/cypress/{{ item }}.orig /lib/firmware/cypress/{{ item }}.orig.$(date +%F-%T)
cp -a /lib/firmware/cypress/{{ item }} /lib/firmware/cypress/{{ item }}.orig
with_items:
- cyfmac43430-sdio.bin
- cyfmac43430-sdio.clm_blob
- cyfmac43455-sdio.bin
- cyfmac43455-sdio.clm_blob
#ignore_errors: yes # 2023-02-25: Let's INTENTIONALLY surface any errors, e.g. if any future RasPiOS or Ubuntu-on-Rpi lack some of the above 4 files/links?
- name: Download higher-capacity firmwares (for RPi internal WiFi, per https://github.com/iiab/iiab/issues/823#issuecomment-662285202 and https://github.com/iiab/iiab/issues/2853)
get_url: get_url:
url: "{{ item }}" url: "{{ iiab_download_url }}/{{ item }}"
dest: /lib/firmware/brcm/ dest: /lib/firmware/cypress/
timeout: "{{ download_timeout }}" timeout: "{{ download_timeout }}"
with_items: with_items:
- http://d.iiab.io/packages/brcmfmac43455-sdio.bin_2021-11-30_minimal # 19 -- from https://github.com/RPi-Distro/firmware-nonfree/blob/feeeda21e930c2e182484e8e1269b61cca2a8451/debian/config/brcm80211/cypress/cyfmac43455-sdio-minimal.bin - brcmfmac43455-sdio.bin_2021-11-30_minimal # 19 -- SAME AS RASPIOS & UBUNTU'S https://github.com/RPi-Distro/firmware-nonfree/blob/feeeda21e930c2e182484e8e1269b61cca2a8451/debian/config/brcm80211/cypress/cyfmac43455-sdio-minimal.bin
- http://d.iiab.io/packages/brcmfmac43455-sdio.bin_2021-10-05_3rd-trial-minimal # 24 -- from https://github.com/iiab/iiab/issues/2853#issuecomment-934293015 - brcmfmac43455-sdio.bin_2021-10-05_3rd-trial-minimal # 24 -- from https://github.com/iiab/iiab/issues/2853#issuecomment-934293015
- http://d.iiab.io/packages/brcmfmac43455-sdio.clm_blob_2021-11-17_rpi # Works w/ both above -- from https://github.com/RPi-Distro/firmware-nonfree/blob/dc406650e840705957f8403efeacf71d2d7543b3/debian/config/brcm80211/cypress/cyfmac43455-sdio.clm_blob - brcmfmac43455-sdio.clm_blob_2021-11-17_rpi # Works w/ both above -- SAME AS RASPIOS & UBUNTU'S https://github.com/RPi-Distro/firmware-nonfree/blob/dc406650e840705957f8403efeacf71d2d7543b3/debian/config/brcm80211/cypress/cyfmac43455-sdio.clm_blob
- http://d.iiab.io/packages/brcmfmac43455-sdio.bin_2015-03-01_7.45.18.0_ub19.10.1 # 32 -- from https://github.com/iiab/iiab/issues/823#issuecomment-662285202 - brcmfmac43455-sdio.bin_2015-03-01_7.45.18.0_ub19.10.1 # 32 -- from https://github.com/iiab/iiab/issues/823#issuecomment-662285202
- http://d.iiab.io/packages/brcmfmac43455-sdio.clm_blob_2018-02-26_rpi - brcmfmac43455-sdio.clm_blob_2018-02-26_rpi
- http://d.iiab.io/packages/brcmfmac43430-sdio.bin_2018-09-11_7.45.98.65 # 30 -- from https://github.com/iiab/iiab/issues/823#issuecomment-662285202 - brcmfmac43430-sdio.bin_2018-09-11_7.45.98.65 # 30 -- from https://github.com/iiab/iiab/issues/823#issuecomment-662285202
- http://d.iiab.io/packages/brcmfmac43430-sdio.clm_blob_2018-09-11_7.45.98.65 - brcmfmac43430-sdio.clm_blob_2018-09-11_7.45.98.65
# RECORD firmware AS DOWNLOADED # RECORD firmware AS DOWNLOADED

View file

@ -4,61 +4,69 @@
# Set 2 symlinks for RPi 3 B+ and 4 (43455) # Set 2 symlinks for RPi 3 B+ and 4 (43455)
# COMPARE: update-alternatives --display cyfmac43455-sdio.bin
# https://github.com/moodlebox/moodlebox/blob/main/roles/accesspoint/tasks/main.yml#L3-L6
- name: Populate rpi3bplus_rpi4_wifi_firmwares dictionary (lookup table for operator-chosen .bin and .clm_blob files in /lib/firmware/brcm) - name: Populate rpi3bplus_rpi4_wifi_firmwares dictionary (lookup table for operator-chosen .bin and .clm_blob files in /lib/firmware/cypress)
set_fact: set_fact:
rpi3bplus_rpi4_wifi_firmwares: # Dictionary keys (left side) are always strings, e.g. "19" rpi3bplus_rpi4_wifi_firmwares: # Dictionary keys (left side) are always strings, e.g. "19"
os: os:
- brcmfmac43455-sdio.bin.orig - cyfmac43455-sdio.bin.orig # 2023-02-25: 7.45.241 from 2021-11-01 on Ubuntu 22.04.2 too (cyfmac43455-sdio-standard.bin)
- brcmfmac43455-sdio.clm_blob.orig - cyfmac43455-sdio.clm_blob.orig # On Ubuntu 22.04.2 too (brcmfmac43455-sdio.clm_blob_2021-11-17_rpi)
ub:
- cyfmac43455-sdio.bin.distrib # 2023-02-25: STALE 7.45.234 from 2021-04-15; on Ubuntu 22.04.2 NOT RasPiOS
- cyfmac43455-sdio.clm_blob.distrib # 4.7K instead of 2.7K w/ above "os"
19: 19:
- brcmfmac43455-sdio.bin_2021-11-30_minimal - brcmfmac43455-sdio.bin_2021-11-30_minimal # On Ubuntu 22.04.2 too (cyfmac43455-sdio-minimal.bin)
- brcmfmac43455-sdio.clm_blob_2021-11-17_rpi - brcmfmac43455-sdio.clm_blob_2021-11-17_rpi # On Ubuntu 22.04.2 too (cyfmac43455-sdio.clm_blob)
24: 24:
- brcmfmac43455-sdio.bin_2021-10-05_3rd-trial-minimal - brcmfmac43455-sdio.bin_2021-10-05_3rd-trial-minimal
- brcmfmac43455-sdio.clm_blob_2021-11-17_rpi - brcmfmac43455-sdio.clm_blob_2021-11-17_rpi # On Ubuntu 22.04.2 too (cyfmac43455-sdio.clm_blob)
32: 32:
- brcmfmac43455-sdio.bin_2015-03-01_7.45.18.0_ub19.10.1 - brcmfmac43455-sdio.bin_2015-03-01_7.45.18.0_ub19.10.1
- brcmfmac43455-sdio.clm_blob_2018-02-26_rpi - brcmfmac43455-sdio.clm_blob_2018-02-26_rpi # 14K instead of 2.7K w/ above "os"
- name: Symlink /lib/firmware/brcm/brcmfmac43455-sdio.bin.iiab -> {{ rpi3bplus_rpi4_wifi_firmwares[rpi3bplus_rpi4_wifi_firmware][0] }} (as rpi3bplus_rpi4_wifi_firmware is "{{ rpi3bplus_rpi4_wifi_firmware }}") - name: Symlink /lib/firmware/cypress/cyfmac43455-sdio.bin.iiab -> {{ rpi3bplus_rpi4_wifi_firmwares[rpi3bplus_rpi4_wifi_firmware][0] }} (as rpi3bplus_rpi4_wifi_firmware is "{{ rpi3bplus_rpi4_wifi_firmware }}")
file: file:
src: "{{ rpi3bplus_rpi4_wifi_firmwares[rpi3bplus_rpi4_wifi_firmware][0] }}" src: "{{ rpi3bplus_rpi4_wifi_firmwares[rpi3bplus_rpi4_wifi_firmware][0] }}"
path: /lib/firmware/brcm/brcmfmac43455-sdio.bin.iiab path: /lib/firmware/cypress/cyfmac43455-sdio.bin.iiab
state: link state: link
force: yes force: yes
- name: Symlink /lib/firmware/brcm/brcmfmac43455-sdio.clm_blob.iiab -> {{ rpi3bplus_rpi4_wifi_firmwares[rpi3bplus_rpi4_wifi_firmware][1] }} (as rpi3bplus_rpi4_wifi_firmware is "{{ rpi3bplus_rpi4_wifi_firmware }}") - name: Symlink /lib/firmware/cypress/cyfmac43455-sdio.clm_blob.iiab -> {{ rpi3bplus_rpi4_wifi_firmwares[rpi3bplus_rpi4_wifi_firmware][1] }} (as rpi3bplus_rpi4_wifi_firmware is "{{ rpi3bplus_rpi4_wifi_firmware }}")
file: file:
src: "{{ rpi3bplus_rpi4_wifi_firmwares[rpi3bplus_rpi4_wifi_firmware][1] }}" src: "{{ rpi3bplus_rpi4_wifi_firmwares[rpi3bplus_rpi4_wifi_firmware][1] }}"
path: /lib/firmware/brcm/brcmfmac43455-sdio.clm_blob.iiab path: /lib/firmware/cypress/cyfmac43455-sdio.clm_blob.iiab
state: link state: link
force: yes force: yes
# Set 2 symlinks for RPi Zero W and 3 (43430) # Set 2 symlinks for RPi Zero W and 3 (43430)
- name: Populate rpizerow_rpi3_wifi_firmwares dictionary (lookup table for operator-chosen .bin and .clm_blob files in /lib/firmware/brcm) - name: Populate rpizerow_rpi3_wifi_firmwares dictionary (lookup table for operator-chosen .bin and .clm_blob files in /lib/firmware/cypress)
set_fact: set_fact:
rpizerow_rpi3_wifi_firmwares: rpizerow_rpi3_wifi_firmwares:
os: os:
- brcmfmac43430-sdio.bin.orig - cyfmac43430-sdio.bin.orig # 2023-02-25: 7.45.98 from 2021-07-19 on Ubuntu 22.04.2 too
- brcmfmac43430-sdio.clm_blob.orig - cyfmac43430-sdio.clm_blob.orig # On Ubuntu 22.04.2 too
ub:
- cyfmac43430-sdio.bin.distrib # 2023-02-25: STALE 7.45.98.118 from 2021-03-30; on Ubuntu 22.04.2 NOT RasPiOS
- cyfmac43430-sdio.clm_blob.distrib # Identical to above 4.7K cyfmac43430-sdio.clm_blob
30: 30:
- brcmfmac43430-sdio.bin_2018-09-11_7.45.98.65 - brcmfmac43430-sdio.bin_2018-09-11_7.45.98.65
- brcmfmac43430-sdio.clm_blob_2018-09-11_7.45.98.65 - brcmfmac43430-sdio.clm_blob_2018-09-11_7.45.98.65 # 14K instead of 4.7K w/ above "os" & "ub"
- name: Symlink /lib/firmware/brcm/brcmfmac43430-sdio.bin.iiab -> {{ rpizerow_rpi3_wifi_firmwares[rpizerow_rpi3_wifi_firmware][0] }} (as rpizerow_rpi3_wifi_firmware is "{{ rpizerow_rpi3_wifi_firmware }}") - name: Symlink /lib/firmware/cypress/cyfmac43430-sdio.bin.iiab -> {{ rpizerow_rpi3_wifi_firmwares[rpizerow_rpi3_wifi_firmware][0] }} (as rpizerow_rpi3_wifi_firmware is "{{ rpizerow_rpi3_wifi_firmware }}")
file: file:
src: "{{ rpizerow_rpi3_wifi_firmwares[rpizerow_rpi3_wifi_firmware][0] }}" src: "{{ rpizerow_rpi3_wifi_firmwares[rpizerow_rpi3_wifi_firmware][0] }}"
path: /lib/firmware/brcm/brcmfmac43430-sdio.bin.iiab path: /lib/firmware/cypress/cyfmac43430-sdio.bin.iiab
state: link state: link
force: yes force: yes
- name: Symlink /lib/firmware/brcm/brcmfmac43430-sdio.clm_blob.iiab -> {{ rpizerow_rpi3_wifi_firmwares[rpizerow_rpi3_wifi_firmware][1] }} (as rpizerow_rpi3_wifi_firmware is "{{ rpizerow_rpi3_wifi_firmware }}") - name: Symlink /lib/firmware/cypress/cyfmac43430-sdio.clm_blob.iiab -> {{ rpizerow_rpi3_wifi_firmwares[rpizerow_rpi3_wifi_firmware][1] }} (as rpizerow_rpi3_wifi_firmware is "{{ rpizerow_rpi3_wifi_firmware }}")
file: file:
src: "{{ rpizerow_rpi3_wifi_firmwares[rpizerow_rpi3_wifi_firmware][1] }}" src: "{{ rpizerow_rpi3_wifi_firmwares[rpizerow_rpi3_wifi_firmware][1] }}"
path: /lib/firmware/brcm/brcmfmac43430-sdio.clm_blob.iiab path: /lib/firmware/cypress/cyfmac43430-sdio.clm_blob.iiab
state: link state: link
force: yes force: yes
@ -73,7 +81,7 @@
- { src: 'iiab-check-firmware.service', dest: '/etc/systemd/system/', mode: '0644' } - { src: 'iiab-check-firmware.service', dest: '/etc/systemd/system/', mode: '0644' }
- { src: 'iiab-firmware-warn.sh', dest: '/etc/profile.d/', mode: '0644' } - { src: 'iiab-firmware-warn.sh', dest: '/etc/profile.d/', mode: '0644' }
- name: Enable & (Re)Start iiab-check-firmware.service (also runs on each boot) - name: Enable & (Re)Start iiab-check-firmware.service (also runs on each boot) -- finalizing 2-or-4 symlink chains e.g. /lib/firmware/cypress/X.{bin|blob} -> /lib/firmware/cypress/X.{bin|blob}.iiab -> CHOSEN-FIRMWARE-FILE-OR-LINK
systemd: systemd:
name: iiab-check-firmware.service name: iiab-check-firmware.service
daemon_reload: yes daemon_reload: yes

View file

@ -3,22 +3,26 @@
# client devices that can access your Raspberry Pi's internal WiFi hotspot. # client devices that can access your Raspberry Pi's internal WiFi hotspot.
# If IIAB's already installed, you should then run 'cd /opt/iiab/iiab' and # If IIAB's already installed, you should then run 'cd /opt/iiab/iiab' and
# then 'sudo ./runrole firmware' (do run iiab-check-firmware for more tips!) # then 'sudo ./runrole firmware' (DO RUN iiab-check-firmware FOR MORE TIPS!)
# BACKGROUND AS OF 2022-01-10:
# https://github.com/iiab/iiab/issues/823#issuecomment-662285202
# https://github.com/iiab/iiab/issues/2853#issuecomment-957836892
# https://github.com/iiab/iiab/pull/3103
# https://github.com/RPi-Distro/firmware-nonfree/tree/bullseye/debian/config/brcm80211 (brcm, cypress)
# https://archive.raspberrypi.org/debian/pool/main/f/firmware-nonfree/firmware-brcm80211_20190114-1+rpt11_all.deb from 2021-01-25
# https://archive.raspberrypi.org/debian/pool/main/f/firmware-nonfree/firmware-brcm80211_20210315-3+rpt4_all.deb from 2021-12-06
# 2018-2023 Background & Progress:
#
# Raspberry Pi 3 used to support 32 WiFi connections but is now limited to [4-10]
# https://github.com/iiab/iiab/issues/823#issuecomment-662285202
# Opinions about Pi 4B/3B+ WiFi features [practical AP firmware for schools!]
# https://github.com/iiab/iiab/issues/2853#issuecomment-957836892
# RPi WiFi hotspot firmware reliability fix, incl new/better choices for 3B+ & 4
# https://github.com/iiab/iiab/pull/3103
# Set WiFi firmware in /lib/firmware/cypress due to RasPiOS & Ubuntu changes
# https://github.com/iiab/iiab/pull/3482
# RISK: What USB 3.0 stick/drive patterns degrade a Raspberry Pi's 2.4GHz WiFi? # RISK: What USB 3.0 stick/drive patterns degrade a Raspberry Pi's 2.4GHz WiFi?
# https://github.com/iiab/iiab/issues/2638 # https://github.com/iiab/iiab/issues/2638
# ► SEE "MONITOR FIRMWARE UPDATES in 3 places especially" in tasks/download.yml ◄
- name: Install firmware (for RPi internal WiFi) - name: Install firmware (for RPi internal WiFi)
include_tasks: install.yml include_tasks: install.yml
#when: firmware_installed is undefined when: firmware_installed is undefined
# Two variables are placed in /etc/iiab/iiab_state.yml: # Two variables are placed in /etc/iiab/iiab_state.yml:
# #

View file

@ -1,10 +1,24 @@
#!/bin/bash #!/bin/bash
# 2021-08-18: bash scripts using default_vars.yml &/or local_vars.yml # The 1st time /usr/bin/iiab-check-firmware runs (at the end of
# firmware/tasks/install.yml) 2-4 lynchpin top links are put in place,
# finalizing symlink chains like:
#
# /lib/firmware/cypress/X.{bin|blob} ->
# /lib/firmware/cypress/X.{bin|blob}.iiab ->
# CHOSEN-FIRMWARE-FILE-OR-LINK
#
# Also backing up top-of-chain originals (file or link!) by moving these to:
#
# /lib/firmware/cypress/<ORIGINAL FILENAME>.YYYY-MM-DD-HH:MM:SS
#
# NOTE these are "doubly timestamped" to preserve BOTH last-modif & moving date.
# 2023-02-25: bash scripts using default_vars.yml &/or local_vars.yml
# https://github.com/iiab/iiab-factory/blob/master/iiab # https://github.com/iiab/iiab-factory/blob/master/iiab
# https://github.com/iiab/iiab/blob/master/roles/firmware/templates/iiab-check-firmware#L10-14 # https://github.com/iiab/iiab/blob/master/roles/firmware/templates/iiab-check-firmware#L10-14
# https://github.com/iiab/iiab/blob/master/roles/network/templates/gateway/iiab-gen-iptables#L48-L52 # https://github.com/iiab/iiab/blob/master/roles/network/templates/gateway/iiab-gen-iptables#L48-L52
# https://github.com/iiab/maps/blob/master/osm-source/pages/viewer/scripts/iiab-install-map-region#L25-L34 # https://github.com/iiab/maps/blob/master/osm-source/pages/viewer/scripts/iiab-install-map-region#L23-L39
# https://github.com/iiab/iiab/blob/master/roles/openvpn/templates/iiab-support READS AND WRITES, INCL NON-BOOLEAN # https://github.com/iiab/iiab/blob/master/roles/openvpn/templates/iiab-support READS AND WRITES, INCL NON-BOOLEAN
iiab_var_value() { iiab_var_value() {
@ -14,29 +28,29 @@ iiab_var_value() {
} }
link_fw() { link_fw() {
if [[ $(readlink /lib/firmware/brcm/$1) != $1.iiab ]] ; then if [[ $(readlink /lib/firmware/cypress/$1) != $1.iiab ]] ; then
echo echo
mv /lib/firmware/brcm/$1 /lib/firmware/brcm/$1.$(date +%F-%T) mv /lib/firmware/cypress/$1 /lib/firmware/cypress/$1.$(date +%F-%T)
ln -s $1.iiab /lib/firmware/brcm/$1 ln -s $1.iiab /lib/firmware/cypress/$1
echo -e "\e[1mSymlinked /lib/firmware/brcm/$1 -> $1.iiab\e[0m" echo -e "\e[1mSymlinked /lib/firmware/cypress/$1 -> $1.iiab\e[0m"
touch /tmp/.fw_modified touch /tmp/.fw_modified
fi fi
} }
if [[ $(iiab_var_value rpi3bplus_rpi4_wifi_firmware) != "os" ]] ; then if [[ $(iiab_var_value rpi3bplus_rpi4_wifi_firmware) != "os" ]] ; then
link_fw brcmfmac43455-sdio.bin link_fw cyfmac43455-sdio.bin
link_fw brcmfmac43455-sdio.clm_blob link_fw cyfmac43455-sdio.clm_blob
fi fi
if [[ $(iiab_var_value rpizerow_rpi3_wifi_firmware) != "os" ]] ; then if [[ $(iiab_var_value rpizerow_rpi3_wifi_firmware) != "os" ]] ; then
link_fw brcmfmac43430-sdio.bin link_fw cyfmac43430-sdio.bin
link_fw brcmfmac43430-sdio.clm_blob link_fw cyfmac43430-sdio.clm_blob
fi fi
if [ -f /tmp/.fw_modified ]; then if [ -f /tmp/.fw_modified ]; then
bash /etc/profile.d/iiab-firmware-warn.sh bash /etc/profile.d/iiab-firmware-warn.sh
else else
echo -e "\n\e[1mWiFi Firmware links in /lib/firmware/brcm appear \e[92mCORRECT\e[0m\e[1m, per iiab/iiab#2853.\e[0m" echo -e "\n\e[1mWiFi Firmware links in /lib/firmware/cypress appear \e[92mCORRECT\e[0m\e[1m, per iiab/iiab#3482\e[0m"
echo echo
echo -e "\e[100;1m(No reboot appears necessary!)\e[0m" echo -e "\e[100;1m(No reboot appears necessary!)\e[0m"
echo echo
@ -44,10 +58,11 @@ else
echo -e "settings in /etc/iiab/local_vars.yml, please then run:" echo -e "settings in /etc/iiab/local_vars.yml, please then run:"
echo echo
echo -e " cd /opt/iiab/iiab" echo -e " cd /opt/iiab/iiab"
echo -e " sudo iiab-hotspot-off # Sometimes nec, eg to restore 'wifi_up_down: True'" echo -e " sudo iiab-hotspot-off # NO LONGER NEC? eg to restore 'wifi_up_down: True'"
echo -e " sudo ./iiab-network # Or, 'sudo ./runrole firmware' is SOMETIMES enough" echo -e " sudo ./runrole --reinstall firmware"
echo -e " sudo iiab-hotspot-on # Sometimes nec, eg to restore 'wifi_up_down: True'" echo -e " sudo ./iiab-network # SOMETIMES NECESSARY"
echo -e " sudo poweroff\n" echo -e " sudo iiab-hotspot-on # NO LONGER NEC? eg to restore 'wifi_up_down: True'"
echo -e " sudo reboot\n"
#echo #echo
#echo -e "Disconnect your power cord before rebooting, for better WiFi firmware results.\n" #echo -e "Disconnect your power cord before rebooting, for better WiFi firmware results.\n"
fi fi

View file

@ -1,9 +1,9 @@
#!/bin/bash #!/bin/bash
if [ -f /tmp/.fw_modified ]; then if [ -f /tmp/.fw_modified ]; then
echo -e "\n\e[41;1mWiFi Firmware link(s) modified, per iiab/iiab#2853: PLEASE REBOOT!\e[0m" echo -e "\n\e[41;1mWiFi Firmware link(s) modified, per iiab/iiab#3482: PLEASE REBOOT!\e[0m"
echo echo
echo -e "If you want this warning to stop, run: sudo rm /tmp/.fw_modified\n" echo -e "If you want this warning to stop, reboot to remove /tmp/.fw_modified\n"
fi fi
# \e[1m = bright white \e[100;1m = bright white, on gray \n\e[41;1m = bright white, on red # \e[1m = bright white \e[100;1m = bright white, on gray \n\e[41;1m = bright white, on red

View file

@ -9,7 +9,7 @@
# Info needed to install Gitea: # Info needed to install Gitea:
gitea_version: 1.16 # 2022-01-30: Grabs latest point release from this branch. Rather than hardcoding (e.g. 1.14.5) every few weeks. gitea_version: "1.21" # 2022-01-30: Grabs latest from this MAJOR/MINOR release branch. Rather than exhaustively hard-coding point releases (e.g. 1.14.5) every few weeks. Quotes nec if trailing zero.
iset_suffixes: iset_suffixes:
i386: 386 i386: 386
x86_64: amd64 x86_64: amd64
@ -17,9 +17,9 @@ iset_suffixes:
armv6l: arm-6 armv6l: arm-6
armv7l: arm-6 # "arm-7" used to work, but no longer since 2019-04-20's Gitea 1.8.0: https://github.com/iiab/iiab/issues/1673 https://github.com/iiab/iiab/pull/1713 -- 2019-07-31: ARM7 support will return at some point, according to: https://github.com/go-gitea/gitea/pull/7037#issuecomment-516735216 (what about ARM8 support for RPi 4?) armv7l: arm-6 # "arm-7" used to work, but no longer since 2019-04-20's Gitea 1.8.0: https://github.com/iiab/iiab/issues/1673 https://github.com/iiab/iiab/pull/1713 -- 2019-07-31: ARM7 support will return at some point, according to: https://github.com/go-gitea/gitea/pull/7037#issuecomment-516735216 (what about ARM8 support for RPi 4?)
gitea_iset_suffix: "{{ iset_suffixes[ansible_architecture] | default('unknown') }}" gitea_iset_suffix: "{{ iset_suffixes[ansible_machine] | default('unknown') }}" # A bit safer than ansible_architecture (see kiwix/defaults/main.yml)
gitea_download_url: "https://dl.gitea.io/gitea/{{ gitea_version }}/gitea-{{ gitea_version }}-linux-{{ gitea_iset_suffix }}" gitea_download_url: "https://dl.gitea.com/gitea/{{ gitea_version }}/gitea-{{ gitea_version }}-linux-{{ gitea_iset_suffix }}"
gitea_integrity_url: "{{ gitea_download_url }}.asc" gitea_integrity_url: "{{ gitea_download_url }}.asc"
gitea_root_directory: "{{ content_base }}/gitea" # /library/gitea gitea_root_directory: "{{ content_base }}/gitea" # /library/gitea

View file

@ -1,3 +1,19 @@
- name: Enable & Restart 'gitea' systemd service, if gitea_enabled
systemd:
name: gitea
daemon_reload: yes
enabled: yes
state: restarted
when: gitea_enabled
- name: Disable & Stop 'gitea' systemd service, if not gitea_enabled
systemd:
name: gitea
enabled: no
state: stopped
when: not gitea_enabled
- name: Enable http://box{{ gitea_url }} via NGINX, by installing {{ nginx_conf_dir }}/gitea-nginx.conf from template - name: Enable http://box{{ gitea_url }} via NGINX, by installing {{ nginx_conf_dir }}/gitea-nginx.conf from template
template: template:
src: gitea-nginx.conf.j2 src: gitea-nginx.conf.j2

View file

@ -1,3 +1,8 @@
- name: Record (initial) disk space used
shell: df -B1 --output=used / | tail -1
register: df1
# 1. Prepare to install Gitea: create user and directory structure # 1. Prepare to install Gitea: create user and directory structure
- name: Shut down existing Gitea instance (if we're reinstalling) - name: Shut down existing Gitea instance (if we're reinstalling)
@ -43,10 +48,10 @@
msg: "Could not find a binary for the CPU architecture \"{{ ansible_architecture }}\"" msg: "Could not find a binary for the CPU architecture \"{{ ansible_architecture }}\""
when: gitea_iset_suffix == "unknown" when: gitea_iset_suffix == "unknown"
- name: Download Gitea binary {{ gitea_download_url }} to {{ gitea_install_path }} (0775, ~102 MB) - name: Download Gitea binary {{ gitea_download_url }} to {{ gitea_install_path }} (0775, ~134 MB, SLOW DOWNLOAD CAN TAKE ~15 MIN)
get_url: get_url:
url: "{{ gitea_download_url }}" url: "{{ gitea_download_url }}"
dest: "{{ gitea_install_path }}" # e.g. /library/gitea/bin/gitea-1.15 dest: "{{ gitea_install_path }}" # e.g. /library/gitea/bin/gitea-1.21
mode: 0775 mode: 0775
timeout: "{{ download_timeout }}" timeout: "{{ download_timeout }}"
@ -56,16 +61,16 @@
dest: "{{ gitea_checksum_path }}" dest: "{{ gitea_checksum_path }}"
timeout: "{{ download_timeout }}" timeout: "{{ download_timeout }}"
- name: Verify Gitea binary with GPG signature - name: Verify Gitea binary with GPG signature ("BAD signature" FALSE ALARMS continue as of 2023-07-16, despite their claims at https://docs.gitea.com/installation/install-from-binary#verify-gpg-signature)
shell: | shell: |
gpg --keyserver pgp.mit.edu --recv {{ gitea_gpg_key }} gpg --keyserver keys.openpgp.org --recv {{ gitea_gpg_key }}
gpg --verify {{ gitea_checksum_path }} {{ gitea_install_path }} gpg --verify {{ gitea_checksum_path }} {{ gitea_install_path }}
ignore_errors: yes ignore_errors: yes
- name: Symlink {{ gitea_link_path }} -> {{ gitea_install_path }} - name: Symlink {{ gitea_link_path }} -> {{ gitea_install_path }}
file: file:
src: "{{ gitea_install_path }}" src: "{{ gitea_install_path }}"
path: "{{ gitea_link_path }}" path: "{{ gitea_link_path }}" # /library/gitea/gitea
owner: gitea owner: gitea
group: gitea group: gitea
state: link state: link
@ -105,6 +110,17 @@
# 5. RECORD Gitea AS INSTALLED # 5. RECORD Gitea AS INSTALLED
- name: Record (final) disk space used
shell: df -B1 --output=used / | tail -1
register: df2
- name: Add 'gitea_disk_usage = {{ df2.stdout|int - df1.stdout|int }}' to {{ iiab_ini_file }}
ini_file:
path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
section: gitea
option: gitea_disk_usage
value: "{{ df2.stdout|int - df1.stdout|int }}"
- name: "Set 'gitea_installed: True'" - name: "Set 'gitea_installed: True'"
set_fact: set_fact:
gitea_installed: True gitea_installed: True

View file

@ -19,46 +19,37 @@
quiet: yes quiet: yes
- name: Install Gitea {{ gitea_version }} if 'gitea_installed' not defined, e.g. in {{ iiab_state_file }} # /etc/iiab/iiab_state.yml - block:
include_tasks: install.yml
when: gitea_installed is undefined
- name: Install Gitea {{ gitea_version }} if 'gitea_installed' not defined, e.g. in {{ iiab_state_file }} # /etc/iiab/iiab_state.yml
include_tasks: install.yml
when: gitea_installed is undefined
- name: Enable & Restart 'gitea' systemd service, if gitea_enabled - include_tasks: enable-or-disable.yml
systemd:
name: gitea
daemon_reload: yes
enabled: yes
state: restarted
when: gitea_enabled
- name: Disable & Stop 'gitea' systemd service, if not gitea_enabled - name: Add 'gitea' variable values to {{ iiab_ini_file }}
systemd: ini_file:
name: gitea path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
enabled: no section: gitea
state: stopped option: "{{ item.option }}"
when: not gitea_enabled value: "{{ item.value | string }}"
with_items:
- option: name
value: Gitea
- option: description
value: '"Gitea is like GitHub for more offline communities: Git with a cup of tea"'
- option: gitea_install
value: "{{ gitea_install }}"
- option: gitea_enabled
value: "{{ gitea_enabled }}"
- option: gitea_run_directory
value: "{{ gitea_run_directory }}"
- option: gitea_url
value: "{{ gitea_url }}"
- name: Enable/Disable/Restart NGINX rescue:
include_tasks: nginx.yml
- name: 'SEE ERROR ABOVE (skip_role_on_error: {{ skip_role_on_error }})'
- name: Add 'gitea' to list of services at {{ iiab_ini_file }} fail:
ini_file: msg: ""
path: "{{ iiab_ini_file }}" # /etc/iiab/iiab_state.yml when: not skip_role_on_error
section: gitea
option: "{{ item.option }}"
value: "{{ item.value | string }}"
with_items:
- option: name
value: Gitea
- option: description
value: '"Gitea is like GitHub for more offline communities: Git with a cup of tea"'
- option: gitea_install
value: "{{ gitea_install }}"
- option: gitea_enabled
value: "{{ gitea_enabled }}"
- option: gitea_run_directory
value: "{{ gitea_run_directory }}"
- option: gitea_url
value: "{{ gitea_url }}"

View file

@ -2,7 +2,8 @@
; Copy required sections to your own app.ini (default is custom/conf/app.ini) ; Copy required sections to your own app.ini (default is custom/conf/app.ini)
; and modify as needed. ; and modify as needed.
; see https://docs.gitea.io/en-us/config-cheat-sheet/ for additional documentation. ; see https://docs.gitea.com/administration/config-cheat-sheet for additional documentation.
; https://docs.gitea.com/next/administration/config-cheat-sheet
; App name that shows in every page title ; App name that shows in every page title
APP_NAME = {{ gitea_display_name }} APP_NAME = {{ gitea_display_name }}
@ -23,9 +24,11 @@ DEFAULT_PRIVATE = last
; Global limit of repositories per user, applied at creation time. -1 means no limit ; Global limit of repositories per user, applied at creation time. -1 means no limit
MAX_CREATION_LIMIT = -1 MAX_CREATION_LIMIT = -1
; Mirror sync queue length, increase if mirror syncing starts hanging ; Mirror sync queue length, increase if mirror syncing starts hanging
MIRROR_QUEUE_LENGTH = 1000 ; 2023-07-16 ERROR: MIRROR_QUEUE_LENGTH = 1000
; `[repository].MIRROR_QUEUE_LENGTH`. Use new options in `[queue.mirror]`
; Patch test queue length, increase if pull request patch testing starts hanging ; Patch test queue length, increase if pull request patch testing starts hanging
PULL_REQUEST_QUEUE_LENGTH = 1000 ; 2023-07-16 ERROR: PULL_REQUEST_QUEUE_LENGTH = 1000
; `[repository].PULL_REQUEST_QUEUE_LENGTH`. Use new options in `[queue.pr_patch_checker]`
; Preferred Licenses to place at the top of the List ; Preferred Licenses to place at the top of the List
; The name here must match the filename in conf/license or custom/conf/license ; The name here must match the filename in conf/license or custom/conf/license
PREFERRED_LICENSES = Apache License 2.0,MIT License PREFERRED_LICENSES = Apache License 2.0,MIT License
@ -201,13 +204,22 @@ PPROF_DATA_PATH = data/tmp/pprof
LANDING_PAGE = home LANDING_PAGE = home
; Enables git-lfs support. true or false, default is false. ; Enables git-lfs support. true or false, default is false.
LFS_START_SERVER = false LFS_START_SERVER = false
; Where your lfs files reside, default is data/lfs.
LFS_CONTENT_PATH = {{ gitea_lfs_root }}
; LFS authentication secret, change this yourself ; LFS authentication secret, change this yourself
LFS_JWT_SECRET = LFS_JWT_SECRET =
; LFS authentication validity period (in time.Duration), pushes taking longer than this may fail. ; LFS authentication validity period (in time.Duration), pushes taking longer than this may fail.
LFS_HTTP_AUTH_EXPIRY = 20m LFS_HTTP_AUTH_EXPIRY = 20m
; lfs [Large File Storage] storage will override storage
;
[lfs]
;STORAGE_TYPE = local
;
; Where your lfs files reside, default is data/lfs.
PATH = {{ gitea_lfs_root }}
;
; override the minio base path if storage type is minio
;MINIO_BASE_PATH = lfs/
; Define allowed algorithms and their minimum key length (use -1 to disable a type) ; Define allowed algorithms and their minimum key length (use -1 to disable a type)
[ssh.minimum_key_sizes] [ssh.minimum_key_sizes]
ED25519 = 256 ED25519 = 256
@ -240,7 +252,8 @@ ISSUE_INDEXER_PATH = indexers/issues.bleve
; repo indexer by default disabled, since it uses a lot of disk space ; repo indexer by default disabled, since it uses a lot of disk space
REPO_INDEXER_ENABLED = false REPO_INDEXER_ENABLED = false
REPO_INDEXER_PATH = indexers/repos.bleve REPO_INDEXER_PATH = indexers/repos.bleve
UPDATE_BUFFER_LEN = 20 ; 2023-07-16 ERROR: UPDATE_BUFFER_LEN = 20
; `[indexer].UPDATE_BUFFER_LEN`. Use new options in `[queue.issue_indexer]`
MAX_FILE_SIZE = 1048576 MAX_FILE_SIZE = 1048576
[admin] [admin]
@ -360,7 +373,8 @@ PAGING_NUM = 10
[mailer] [mailer]
ENABLED = false ENABLED = false
; Buffer length of channel, keep it as it is if you don't know what it is. ; Buffer length of channel, keep it as it is if you don't know what it is.
SEND_BUFFER_LEN = 100 ; 2023-07-16 ERROR: SEND_BUFFER_LEN = 100
; `[mailer].SEND_BUFFER_LEN`. Use new options in `[queue.mailer]`
; Name displayed in mail title ; Name displayed in mail title
SUBJECT = %(APP_NAME)s SUBJECT = %(APP_NAME)s
; Mail server ; Mail server

View file

@ -13,7 +13,7 @@
iiab-admin README iiab-admin README
================= =================
`Internet-in-a-Box <http://internet-in-a-box.org>`_ (IIAB) encourages you to pay attention to the security of your learning community. `Internet-in-a-Box <https://internet-in-a-box.org>`_ (IIAB) encourages you to pay attention to the security of your learning community.
This Ansible playbook is one of the very first that runs when you install IIAB, and we hope reading this helps you understand your choices: This Ansible playbook is one of the very first that runs when you install IIAB, and we hope reading this helps you understand your choices:
@ -21,11 +21,11 @@ Configure user 'iiab-admin'
--------------------------- ---------------------------
* `admin-user.yml <tasks/admin-user.yml>`_ configures a Linux user that will give you access to IIAB's Admin Console (http://box.lan/admin) after IIAB is installed — and can also help you at the command-line with IIAB community support commands like {iiab-diagnostics, iiab-hotspot-on, iiab-check-firmware, etc}. * `admin-user.yml <tasks/admin-user.yml>`_ configures a Linux user that will give you access to IIAB's Admin Console (http://box.lan/admin) after IIAB is installed — and can also help you at the command-line with IIAB community support commands like {iiab-diagnostics, iiab-hotspot-on, iiab-check-firmware, etc}.
* If initial creation of the user and password was somehow not already taken care of by IIAB's 1-line installer (http://download.iiab.io) or by your underlying OS, that too will be taken care of here. * If initial creation of the user and password was somehow not already taken care of by IIAB's 1-line installer (https://download.iiab.io) or by your underlying OS, that too will be taken care of here.
* By default this user is ``iiab-admin`` with password ``g0adm1n`` * By default this user is ``iiab-admin`` with password ``g0adm1n``
* *Do change the default password if you haven't yet, by running:* **sudo passwd iiab-admin** * *Do change the default password if you haven't yet, by running:* **sudo passwd iiab-admin**
* After IIAB is installed, you can also change the password by logging into Admin Console (http://box.lan/admin) > Utilities > Change Password. * After IIAB is installed, you can also change the password by logging into Admin Console (http://box.lan/admin) > Utilities > Change Password.
* If you prefer to use a pre-existing user like ``pi`` or ``ubuntu`` (or any other username) customize the variable ``iiab_admin_user`` in your `/etc/iiab/local_vars.yml <http://wiki.laptop.org/go/IIAB/FAQ#What_is_local_vars.yml_and_how_do_I_customize_it.3F>`_ (preferably do this prior to installing IIAB!) * If you prefer to use a pre-existing user like ``pi`` or ``ubuntu`` (or any other username) customize the variable ``iiab_admin_user`` in your `/etc/iiab/local_vars.yml <https://wiki.iiab.io/go/FAQ#What_is_local_vars.yml_and_how_do_I_customize_it%3F>`_ (preferably do this prior to installing IIAB!)
* You can set ``iiab_admin_can_sudo: False`` if you want a strict security lockdown (if you're really sure you won't need IIAB community support commands like `/usr/bin/iiab-diagnostics <../../scripts/iiab-diagnostics.README.md>`_, `/usr/bin/iiab-hotspot-on <../network/templates/network/iiab-hotspot-on>`_, `iiab-check-firmware <../firmware/templates/iiab-check-firmware>`_, etc!) * You can set ``iiab_admin_can_sudo: False`` if you want a strict security lockdown (if you're really sure you won't need IIAB community support commands like `/usr/bin/iiab-diagnostics <../../scripts/iiab-diagnostics.README.md>`_, `/usr/bin/iiab-hotspot-on <../network/templates/network/iiab-hotspot-on>`_, `iiab-check-firmware <../firmware/templates/iiab-check-firmware>`_, etc!)
* You can also set ``iiab_admin_user_install: False`` if you're sure you know how to do all this `account and sudo configuration <tasks/admin-user.yml>`_ manually. * You can also set ``iiab_admin_user_install: False`` if you're sure you know how to do all this `account and sudo configuration <tasks/admin-user.yml>`_ manually.
@ -36,14 +36,14 @@ Security
#. ``iiab-admin`` (specified by ``admin_console_group`` in `/opt/iiab/iiab/vars/default_vars.yml <../../vars/default_vars.yml>`_ and `/opt/iiab/iiab-admin-console/vars/default_vars.yml <https://github.com/iiab/iiab-admin-console/blob/master/vars/default_vars.yml>`_) #. ``iiab-admin`` (specified by ``admin_console_group`` in `/opt/iiab/iiab/vars/default_vars.yml <../../vars/default_vars.yml>`_ and `/opt/iiab/iiab-admin-console/vars/default_vars.yml <https://github.com/iiab/iiab-admin-console/blob/master/vars/default_vars.yml>`_)
#. ``sudo`` #. ``sudo``
* Please read much more about what escalated (root) actions are authorized when you log into IIAB's Admin Console, and how this works: https://github.com/iiab/iiab-admin-console/blob/master/Authentication.md * Please read much more about what escalated (root) actions are authorized when you log into IIAB's Admin Console, and how this works: https://github.com/iiab/iiab-admin-console/blob/master/Authentication.md
* If your IIAB includes OpenVPN, ``/root/.ssh/authorized_keys`` should be installed by `roles/openvpn/tasks/install.yml <../openvpn/tasks/install.yml>`_ to facilitate remote community support. Feel free to remove this as mentioned here: http://wiki.laptop.org/go/IIAB/Security * If your IIAB includes OpenVPN, ``/root/.ssh/authorized_keys`` should be installed by `roles/openvpn/tasks/install.yml <../openvpn/tasks/install.yml>`_ to facilitate remote community support. Feel free to remove this as mentioned here: https://wiki.iiab.io/go/Security
* Auto-checking for the default/published password (as specified by ``iiab_admin_published_pwd`` in `/opt/iiab/iiab/vars/default_vars.yml <../../vars/default_vars.yml>`_) is implemented in `/etc/profile.d <templates/sshpwd-profile-iiab.sh>`_ (and `/etc/xdg/lxsession/LXDE-pi <templates/sshpwd-lxde-iiab.sh>`_ when it exists, i.e. on Raspberry Pi OS with desktop). * Auto-checking for the default/published password (as specified by ``iiab_admin_published_pwd`` in `/opt/iiab/iiab/vars/default_vars.yml <../../vars/default_vars.yml>`_) is implemented in `/etc/profile.d <templates/sshpwd-profile-iiab.sh>`_ (and `/etc/xdg/lxsession/LXDE-pi <templates/sshpwd-lxde-iiab.sh>`_ when it exists, i.e. on Raspberry Pi OS with desktop).
Example Example
======= =======
* If you later change your mind about ``sudo`` privileges for user 'iiab-admin' (as specified by ``iiab_admin_user``) then do this: * If you later change your mind about ``sudo`` privileges for user 'iiab-admin' (as specified by ``iiab_admin_user``) then do this:
#. Go ahead and change the value of ``iiab_admin_can_sudo`` (to either True or False) in `/etc/iiab/local_vars.yml <http://wiki.laptop.org/go/IIAB/FAQ#What_is_local_vars.yml_and_how_do_I_customize_it.3F>`_ #. Go ahead and change the value of ``iiab_admin_can_sudo`` (to either True or False) in `/etc/iiab/local_vars.yml <https://wiki.iiab.io/go/FAQ#What_is_local_vars.yml_and_how_do_I_customize_it%3F>`_
#. Make sure that ``iiab_admin_user_install: True`` is also set. #. Make sure that ``iiab_admin_user_install: True`` is also set.
#. Then re-run this Ansible playbook, by running ``cd /opt/iiab/iiab`` followed by ``sudo ./runrole --reinstall iiab-admin`` #. Then re-run this Ansible playbook, by running ``cd /opt/iiab/iiab`` followed by ``sudo ./runrole --reinstall iiab-admin``

View file

@ -2,6 +2,11 @@
# https://github.com/iiab/iiab/blob/master/roles/iiab-admin/README.rst # https://github.com/iiab/iiab/blob/master/roles/iiab-admin/README.rst
- name: Record (initial) disk space used
shell: df -B1 --output=used / | tail -1
register: df1
- name: "Install text-mode packages, useful during remote access: lynx, screen" - name: "Install text-mode packages, useful during remote access: lynx, screen"
package: package:
name: name:
@ -23,7 +28,7 @@
# (1) by the OS installer # (1) by the OS installer
# (2) by the OS's graphical desktop tools # (2) by the OS's graphical desktop tools
# (3) at the command-line: sudo passwd iiab-admin # (3) at the command-line: sudo passwd iiab-admin
# (4) by IIAB's 1-line installer: http://download.iiab.io # (4) by IIAB's 1-line installer: https://download.iiab.io
# (5) by this role: roles/iiab-admin/tasks/admin-user.yml # (5) by this role: roles/iiab-admin/tasks/admin-user.yml
# (6) by IIAB's Admin Console during installation # (6) by IIAB's Admin Console during installation
# ...and/or... # ...and/or...
@ -35,6 +40,17 @@
# RECORD iiab-admin AS INSTALLED # RECORD iiab-admin AS INSTALLED
- name: Record (final) disk space used
shell: df -B1 --output=used / | tail -1
register: df2
- name: Add 'iiab_admin_disk_usage = {{ df2.stdout|int - df1.stdout|int }}' to {{ iiab_ini_file }}
ini_file:
path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
section: iiab-admin
option: iiab_admin_disk_usage
value: "{{ df2.stdout|int - df1.stdout|int }}"
- name: "Set 'iiab_admin_installed: True'" - name: "Set 'iiab_admin_installed: True'"
set_fact: set_fact:
iiab_admin_installed: True iiab_admin_installed: True

View file

@ -1,31 +1,37 @@
- name: Install /etc/profile.d/sshpwd-profile-iiab.sh from template, to issue warnings (during shell/ssh logins) if iiab-admin password is the default # 2022-07-22: SIMILAR TO roles/www_options/tasks/main.yml FOR browser
# AND roles/network/tasks/netwarn.yml FOR iiab-network
- name: Install /etc/profile.d/iiab-pwdwarn-profile.sh from template, to issue warnings (during shell/ssh logins) if iiab-admin password is the default
template: template:
src: sshpwd-profile-iiab.sh.j2 src: iiab-pwdwarn-profile.sh.j2
dest: /etc/profile.d/sshpwd-profile-iiab.sh dest: /etc/profile.d/iiab-pwdwarn-profile.sh
mode: '0644' mode: '0644'
- name: Is /etc/xdg/lxsession/LXDE-pi a directory? - name: Does /home/{{ iiab_admin_user }}/.config/wayfire.ini exist?
stat: stat:
path: /etc/xdg/lxsession/LXDE-pi path: /home/{{ iiab_admin_user }}/.config/wayfire.ini
register: lx register: wayfire_ini
- name: "If so, install from template: /etc/xdg/lxsession/LXDE-pi/sshpwd-lxde-iiab.sh" - name: "If so, install from template: /usr/local/sbin/iiab-pwdwarn-wayfire"
template: template:
src: sshpwd-lxde-iiab.sh.j2 src: iiab-pwdwarn-wayfire.j2
dest: /etc/xdg/lxsession/LXDE-pi/sshpwd-lxde-iiab.sh dest: /usr/local/sbin/iiab-pwdwarn-wayfire
mode: '0755' mode: '0755'
when: lx.stat.isdir is defined and lx.stat.isdir # and is_raspbian when: wayfire_ini.stat.exists
# 2019-03-07: This popup (/etc/xdg/lxsession/LXDE-pi/sshpwd-lxde-iiab.sh) does # 2019-03-07: This pop-up (/etc/xdg/lxsession/LXDE-pi/sshpwd-lxde-iiab.sh) did
# not actually appear when triggered by /etc/xdg/autostart/pprompt-iiab.desktop # not actually appear when triggered by /etc/xdg/autostart/pprompt-iiab.desktop
# (or pprompt.desktop as Raspbian has working since 2018-11-13!) Too bad as it # (or pprompt.desktop as Raspbian has working since 2018-11-13!) Too bad as it
# would be really nice to standardize this popup across Ubermix & all distros.. # would be really nice to standardize pop-ups across Ubermix & all distros...
# Is this a permissions/security issue presumably? Official autostart spec is: # Is this a permissions/security issue presumably? Official autostart spec is:
# https://specifications.freedesktop.org/autostart-spec/autostart-spec-latest.html # https://specifications.freedesktop.org/autostart-spec/autostart-spec-latest.html
# Raspbian's 2016-2018 evolution here: https://github.com/iiab/iiab/issues/1537 # Raspbian's 2016-2018 evolution here: https://github.com/iiab/iiab/issues/1537
- name: ...and put a line in /etc/xdg/lxsession/LXDE-pi/autostart to trigger popups - name: ...and put a line in /home/{{ iiab_admin_user }}/.config/wayfire.ini to trigger pop-ups
lineinfile: ini_file:
path: /etc/xdg/lxsession/LXDE-pi/autostart path: /home/{{ iiab_admin_user }}/.config/wayfire.ini # iiab-admin
line: "@/etc/xdg/lxsession/LXDE-pi/sshpwd-lxde-iiab.sh" section: autostart
when: lx.stat.isdir is defined and lx.stat.isdir # and is_raspbian option: iiab-pwdwarn-wayfire
value: /usr/local/sbin/iiab-pwdwarn-wayfire
when: wayfire_ini.stat.exists

View file

@ -16,7 +16,8 @@
check_user_pwd() { check_user_pwd() {
#[ $(id -un) = "root" ] || return 2 #[ $(id -un) = "root" ] || return 2
#[ $(id -un) = "root" ] || [ $(id -un) = "iiab-admin" ] || return 2 #[ $(id -un) = "root" ] || [ $(id -un) = "iiab-admin" ] || return 2
[ -r /etc/shadow ] || return 2 # FORCE ERROR if /etc/shadow not readable
#[ -r /etc/shadow ] || return 2 # FORCE ERROR if /etc/shadow not readable
# *BUT* overall bash script still returns exit code 0 ("success"). # *BUT* overall bash script still returns exit code 0 ("success").
#id -u $1 > /dev/null 2>&1 || return 2 # Not needed if return 1 is good #id -u $1 > /dev/null 2>&1 || return 2 # Not needed if return 1 is good
@ -25,7 +26,10 @@ check_user_pwd() {
# 2021-08-28: New OS's use 'yescrypt' so use Perl instead of Python (#2949) # 2021-08-28: New OS's use 'yescrypt' so use Perl instead of Python (#2949)
# This also helps avoid parsing the (NEW) 4th sub-field in $y$j9T$SALT$HASH # This also helps avoid parsing the (NEW) 4th sub-field in $y$j9T$SALT$HASH
field2=$(grep "^$1:" /etc/shadow | cut -d: -f2)
# 2022-09-21 #3368: Sets field2 to "" if sudo -n fails to read /etc/shadow
# 2022-10-18 #3404: Redirect stderr to /dev/null, to avoid Mint pop-up
field2=$(sudo -n grep "^$1:" /etc/shadow 2> /dev/null | cut -d: -f2)
[[ $(perl -e "print crypt('$2', '$field2')") == $field2 ]] [[ $(perl -e "print crypt('$2', '$field2')") == $field2 ]]
# # $meth (hashing method) is typically '6' which implies 5000 rounds # # $meth (hashing method) is typically '6' which implies 5000 rounds

View file

@ -19,14 +19,18 @@ check_user_pwd() {
# enough when user does not exist. Or uncomment to FORCE ERROR CODE 2. # enough when user does not exist. Or uncomment to FORCE ERROR CODE 2.
# Either way, overall bash script still returns exit code 0 ("success") # Either way, overall bash script still returns exit code 0 ("success")
# sudo works below (unlike in sshpwd-profile-iiab.sh) b/c RaspiOS ships w/ # sudo works below (unlike in sshpwd-profile-iiab.sh) b/c RasPiOS ships w/
# /etc/sudoers.d/010_pi-nopasswd containing "pi ALL=(ALL) NOPASSWD: ALL" # /etc/sudoers.d/010_pi-nopasswd containing "pi ALL=(ALL) NOPASSWD: ALL"
# (read access to /etc/shadow is otherwise restricted to just root and # (read access to /etc/shadow is otherwise restricted to just root and
# group www-data i.e. Apache, NGINX get special access). SEE: #2431, #2561 # group www-data i.e. Apache, NGINX get special access). SEE: #2431, #2561
# 2021-08-28: New OS's use 'yescrypt' so use Perl instead of Python (#2949) # 2021-08-28: New OS's use 'yescrypt' so use Perl instead of Python (#2949)
# This also helps avoid parsing the (NEW) 4th sub-field in $y$j9T$SALT$HASH # This also helps avoid parsing the (NEW) 4th sub-field in $y$j9T$SALT$HASH
field2=$(grep "^$1:" /etc/shadow | cut -d: -f2)
# 2022-09-21 #3368: Sets field2 to "" if sudo -n fails to read /etc/shadow
# 2022-10-18 #3404: Redirect stderr to /dev/null, as RasPiOS might one day
# force an annoying pop-up, as Mint did (due to sshpwd-profile-iiab.sh.j2)
field2=$(sudo -n grep "^$1:" /etc/shadow 2>/dev/null | cut -d: -f2)
[[ $(perl -e "print crypt('$2', '$field2')") == $field2 ]] [[ $(perl -e "print crypt('$2', '$field2')") == $field2 ]]
# # $meth (hashing method) is typically '6' which implies 5000 rounds # # $meth (hashing method) is typically '6' which implies 5000 rounds
@ -37,8 +41,8 @@ check_user_pwd() {
# [ $(python3 -c "import crypt; print(crypt.crypt('$2', '\$$meth\$$salt'))") == "\$$meth\$$salt\$$hash" ] # [ $(python3 -c "import crypt; print(crypt.crypt('$2', '\$$meth\$$salt'))") == "\$$meth\$$salt\$$hash" ]
} }
#grep -q "^PasswordAuthentication\s\+no\b" /etc/ssh/sshd_config && return # grep -q "^PasswordAuthentication\s\+no\b" /etc/ssh/sshd_config && return
#systemctl is-active {{ sshd_service }} || return # systemctl is-active ssh || return # #3444: Or use Ansible var sshd_service
if check_user_pwd "{{ iiab_admin_user }}" "{{ iiab_admin_published_pwd }}" ; then # iiab-admin g0adm1n if check_user_pwd "{{ iiab_admin_user }}" "{{ iiab_admin_published_pwd }}" ; then # iiab-admin g0adm1n
zenity --warning --width=600 --text="Published password in use by user '{{ iiab_admin_user }}'.\n\nTHIS IS A SECURITY RISK - please change its password using IIAB's Admin Console (http://box.lan/admin) -> Utilities -> Change Password.\n\nSee 'What are the default passwords?' at http://FAQ.IIAB.IO" zenity --warning --width=600 --text="Published password in use by user '{{ iiab_admin_user }}'.\n\nTHIS IS A SECURITY RISK - please change its password using IIAB's Admin Console (http://box.lan/admin) -> Utilities -> Change Password.\n\nSee 'What are the default passwords?' at http://FAQ.IIAB.IO"

View file

@ -8,7 +8,7 @@ Access to our library of millions of books, journals, audio and video recordings
This Ansible role installs the Internet Archive's dweb-mirror project on This Ansible role installs the Internet Archive's dweb-mirror project on
Internet-in-a-Box (IIAB). Use this to build up a dynamic offline library Internet-in-a-Box (IIAB). Use this to build up a dynamic offline library
arising from the materials you can explore at http://dweb.archive.org arising from the materials you can explore at https://dweb.archive.org
The Offline Internet Archive server: The Offline Internet Archive server:
@ -248,7 +248,7 @@ and just checks the content is up to date.
## Managing collections on Internet Archive ## Managing collections on Internet Archive
You can create and manage your own collections on the [Internet Archive site](http://www.archive.org). You can create and manage your own collections on the [Internet Archive site](https://www.archive.org).
Other people can then crawl those collections. Other people can then crawl those collections.
First get in touch with Mitra Ardron at `mitra@archive.org`, as processes may have changed since this is written. First get in touch with Mitra Ardron at `mitra@archive.org`, as processes may have changed since this is written.

View file

@ -1,3 +1,19 @@
- name: Enable & Restart 'internetarchive' systemd service, if internetarchive_enabled
systemd:
name: internetarchive
daemon_reload: yes
enabled: yes
state: restarted
when: internetarchive_enabled
- name: Disable & Stop 'internetarchive' systemd service, if not internetarchive_enabled
systemd:
name: internetarchive
enabled: no
state: stopped
when: not internetarchive_enabled
- name: Enable http://box/archive via NGINX, by installing {{ nginx_conf_dir }}/internetarchive-nginx.conf from template - name: Enable http://box/archive via NGINX, by installing {{ nginx_conf_dir }}/internetarchive-nginx.conf from template
template: template:
src: internetarchive-nginx.conf.j2 # TO DO: roles/internetarchive/templates/internetarchive-nginx.conf.j2 src: internetarchive-nginx.conf.j2 # TO DO: roles/internetarchive/templates/internetarchive-nginx.conf.j2

View file

@ -9,10 +9,10 @@
include_role: include_role:
name: nodejs name: nodejs
- name: Assert that 10.x <= nodejs_version ({{ nodejs_version }}) <= 16.x - name: Assert that 10.x <= nodejs_version ({{ nodejs_version }}) <= 20.x
assert: assert:
that: nodejs_version is version('10.x', '>=') and nodejs_version is version('16.x', '<=') that: nodejs_version is version('10.x', '>=') and nodejs_version is version('20.x', '<=')
fail_msg: "Internet Archive install cannot proceed, as it currently requires Node.js 10.x - 16.x, and your nodejs_version is set to {{ nodejs_version }}. Please check the value of nodejs_version in /opt/iiab/iiab/vars/default_vars.yml and possibly also /etc/iiab/local_vars.yml" fail_msg: "Internet Archive install cannot proceed, as it currently requires Node.js 10.x - 20.x, and your nodejs_version is set to {{ nodejs_version }}. Please check the value of nodejs_version in /opt/iiab/iiab/vars/default_vars.yml and possibly also /etc/iiab/local_vars.yml"
quiet: yes quiet: yes
- name: "Set 'yarn_install: True' and 'yarn_enabled: True'" - name: "Set 'yarn_install: True' and 'yarn_enabled: True'"
@ -30,6 +30,11 @@
state: present state: present
- name: Record (initial) disk space used
shell: df -B1 --output=used / | tail -1
register: df1
# 2. CREATE 2 DIRS, WIPE /opt/iiab/internetarchive/node_modules & RUN YARN # 2. CREATE 2 DIRS, WIPE /opt/iiab/internetarchive/node_modules & RUN YARN
- name: mkdir {{ internetarchive_dir }} - name: mkdir {{ internetarchive_dir }}
@ -64,6 +69,17 @@
# 4. RECORD Internet Archive AS INSTALLED # 4. RECORD Internet Archive AS INSTALLED
- name: Record (final) disk space used
shell: df -B1 --output=used / | tail -1
register: df2
- name: Add 'internetarchive_disk_usage = {{ df2.stdout|int - df1.stdout|int }}' to {{ iiab_ini_file }}
ini_file:
path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
section: internetarchive
option: internetarchive_disk_usage
value: "{{ df2.stdout|int - df1.stdout|int }}"
- name: "Set 'internetarchive_installed: True'" - name: "Set 'internetarchive_installed: True'"
set_fact: set_fact:
internetarchive_installed: True internetarchive_installed: True

View file

@ -19,76 +19,60 @@
quiet: yes quiet: yes
# 2020-02-11: @mitra42 & @holta agree (#2247) that the following 2-stanza - block:
# "UPDATE internetarchive" block should run whenever one isn't installing
# (or reinstalling) internetarchive, for now. We're aware this means slowness
# during "./runrole internetarchive" but that's very intentional for now -- as
# it leads to more testing of more recent versions of internetarchive, which
# is strongly desired. Finally, these current norms can and probably will be
# changed in future, when broader IIAB norms develop around "./runrole
# --upgrade internetarchive" or "./runrole --update internetarchive" or such,
# as may evolve @ https://github.com/iiab/iiab/pull/2238#discussion_r376168178
- block: # BEGIN 2-STANZA BLOCK # 2020-02-11: @mitra42 & @holta agree (#2247) that the following 2-stanza
# "UPDATE internetarchive" portion should run whenever one isn't installing
# (or reinstalling) internetarchive, for now. We're aware this means slowness
# during "./runrole internetarchive" but that's very intentional for now -- as
# it leads to more testing of more recent versions of internetarchive, which
# is strongly desired. Finally, these current norms can and probably will be
# changed in future, when broader IIAB norms develop around "./runrole
# --upgrade internetarchive" or "./runrole --update internetarchive" or such,
# as may evolve @ https://github.com/iiab/iiab/pull/2238#discussion_r376168178
- name: "UPGRADE: Stop 'internetarchive' systemd service, if internetarchive_installed is defined" - name: "UPGRADE: Stop 'internetarchive' systemd service, if internetarchive_installed is defined"
systemd: systemd:
name: internetarchive name: internetarchive
daemon_reload: yes daemon_reload: yes
state: stopped state: stopped
when: internetarchive_installed is defined
- name: "UPGRADE: Run 'yarn upgrade' in {{ internetarchive_dir }}, if internetarchive_installed is defined" - name: "UPGRADE: Run 'yarn upgrade' in {{ internetarchive_dir }}, if internetarchive_installed is defined"
shell: yarn config set child-concurrency 1 && yarn install && yarn upgrade shell: yarn config set child-concurrency 1 && yarn install && yarn upgrade
args: args:
chdir: "{{ internetarchive_dir }}" chdir: "{{ internetarchive_dir }}"
when: internetarchive_installed is defined
when: internetarchive_installed is defined # END 2-STANZA BLOCK # "ELSE" INSTALL...
# "ELSE" INSTALL... - name: Install Internet Archive if 'internetarchive_installed' not defined, e.g. in {{ iiab_state_file }} # /etc/iiab/iiab_state.yml
include_tasks: install.yml
- name: Install Internet Archive if 'internetarchive_installed' not defined, e.g. in {{ iiab_state_file }} # /etc/iiab/iiab_state.yml when: internetarchive_installed is undefined
include_tasks: install.yml
when: internetarchive_installed is undefined
# ENABLE/DISABLE/RESTART SYSTEMD SERVICE & WEB SERVERS AS NEC ? - include_tasks: enable-or-disable.yml
- name: Enable & Restart 'internetarchive' systemd service, if internetarchive_enabled
systemd:
name: internetarchive
daemon_reload: yes
enabled: yes
state: restarted
when: internetarchive_enabled
- name: Disable & Stop 'internetarchive' systemd service, if not internetarchive_enabled
systemd:
name: internetarchive
enabled: no
state: stopped
when: not internetarchive_enabled
# - name: Enable/Disable/Restart Apache if primary
# include_tasks: apache.yml
# when: apache_installed is defined and not nginx_enabled
- name: Enable/Disable/Restart NGINX if primary
include_tasks: nginx.yml
#when: nginx_enabled
- name: Add 'internetarchive' variable values to {{ iiab_ini_file }} - name: Add 'internetarchive' variable values to {{ iiab_ini_file }}
ini_file: ini_file:
path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
section: internetarchive section: internetarchive
option: "{{ item.option }}" option: "{{ item.option }}"
value: "{{ item.value | string }}" value: "{{ item.value | string }}"
with_items: with_items:
- option: name - option: name
value: Internet Archive value: Internet Archive
- option: description - option: description
value: '"Take the Internet Archive experience and materials offline, in a decentralized way!"' value: '"Take the Internet Archive experience and materials offline, in a decentralized way!"'
- option: internetarchive_install - option: internetarchive_install
value: "{{ internetarchive_install }}" value: "{{ internetarchive_install }}"
- option: internetarchive_enabled - option: internetarchive_enabled
value: "{{ internetarchive_enabled }}" value: "{{ internetarchive_enabled }}"
rescue:
- name: 'SEE ERROR ABOVE (skip_role_on_error: {{ skip_role_on_error }})'
fail:
msg: ""
when: not skip_role_on_error

View file

@ -1,5 +1,7 @@
## JupyterHub programming environment with student Notebooks ## JupyterHub programming environment with student Notebooks
### CAUTION: Internet-in-a-Box (IIAB) does not support JupyterHub on 32-bit OS's, where installation will likely fail ([#3639](https://github.com/iiab/iiab/issues/3639)).
#### Secondary schools may want to consider JupyterHub to integrate coding with dynamic interactive graphing — A New Way to Think About Programming — allowing students to integrate science experiment results and program output within their own blog-like "Jupyter Notebooks." #### Secondary schools may want to consider JupyterHub to integrate coding with dynamic interactive graphing — A New Way to Think About Programming — allowing students to integrate science experiment results and program output within their own blog-like "Jupyter Notebooks."
* Jupyter Notebooks are widely used in the scientific community: * Jupyter Notebooks are widely used in the scientific community:
@ -9,10 +11,11 @@
* [JupyterHub changelog](https://jupyterhub.readthedocs.io/en/stable/changelog.html#changelog) * [JupyterHub changelog](https://jupyterhub.readthedocs.io/en/stable/changelog.html#changelog)
* Students create their own accounts on first use — e.g. at http://box.lan/jupyterhub — just as if they're logging in regularly (unfortunately the login screen doesn't make that clear, but the teacher _does not_ need to be involved!) * Students create their own accounts on first use — e.g. at http://box.lan/jupyterhub — just as if they're logging in regularly (unfortunately the login screen doesn't make that clear, but the teacher _does not_ need to be involved!)
* A student can then sign in with their username and password, to gain access to their files (Jupyter Notebooks). * A student can then sign in with their username and password, to gain access to their files (Jupyter Notebooks).
* The teacher should set and protect JupyterHub's overall `Admin` password, just in case. As with student accounts, the login screen doesn't make that clear — so just log in with username `Admin` — using any password that you want to become permanent. * The teacher should set and protect JupyterHub's overall `Admin` password, just in case. As with student accounts, the login screen unfortunately doesn't make that clear — so just log in with username `Admin` — using any password that you want to become permanent.
* Individual student folders are created in `/var/lib/private/` on the Internet-in-a-Box (IIAB) server: * Individual student folders are created in `/var/lib/private/` on your Internet-in-a-Box (IIAB) server:
* A student will only be able to see their own work — they do not have privileges outside of their own folder. * A student will only be able to see their own work — they do not have privileges outside of their own folder.
* Students may upload Jupyter Notebooks to the IIAB server, and download the current state of their work via a normal browser. * Students may upload Jupyter Notebooks to the IIAB server, and download the current state of their work via a normal browser.
* Linux administrators can read more about JupyterHub's [Local Users](https://github.com/jupyterhub/systemdspawner#local-users) and [c.SystemdSpawner.dynamic_users = True](https://github.com/jupyterhub/systemdspawner#dynamic_users)
### Settings ### Settings
@ -26,10 +29,11 @@ In some rare circumstances, it may be necessary to restart JupyterHub's systemd
sudo systemctl restart jupyterhub sudo systemctl restart jupyterhub
``` ```
FYI `/opt/iiab/jupyterhub` is a Python 3 virtual environment, that can be activated with the usual formula: FYI `/opt/iiab/jupyterhub` is a Python 3 virtual environment, that can be activated (and deactivated) with the usual:
``` ```
source /opt/iiab/jupyterhub/bin/activate source /opt/iiab/jupyterhub/bin/activate
(jupyterhub) root@box:~# deactivate
``` ```
Passwords are hashed using 4096 rounds of the latest Blowfish (bcrypt's $2b$ algorithm) and stored in: Passwords are hashed using 4096 rounds of the latest Blowfish (bcrypt's $2b$ algorithm) and stored in:
@ -42,19 +46,19 @@ Passwords are hashed using 4096 rounds of the latest Blowfish (bcrypt's $2b$ alg
Users can change their password by logging in, and then visiting URL: http://box.lan/jupyterhub/auth/change-password Users can change their password by logging in, and then visiting URL: http://box.lan/jupyterhub/auth/change-password
NOTE: This is the only way to change the password for user 'Admin', because Control Panel > Admin (below) does not permit deletion of this account. NOTE: This is the only way to change the password for user `Admin`, because **File > Hub Control Panel > Admin** (below) does not permit deletion of this account.
### Control Panel > Admin page, to manage other accounts ### File > Hub Control Panel > Admin, to manage accounts
The `Admin` user (and any users given `Admin` privilege) can reset user passwords by deleting the user from JupyterHub's **Admin** page (below). This logs the user out, but does not remove any of their data or home directories. The user can then set a new password in the usual way — simply by logging in. Example: The `Admin` user (and any users given `Admin` privilege) can reset user passwords by deleting the user from JupyterHub's **Admin** page (below). This logs the user out, but does not remove any of their data or home directories. The user can then set a new password in the usual way — simply by logging in. Example:
1. As a user with `Admin` privilege, click **Control Panel** in the top right of your JupyterHub: 1. As a user with `Admin` privilege, click **File > Hub Control Panel** in your JupyterHub:
![Control panel button in notebook, top right](control-panel-button1.png) ![image](https://user-images.githubusercontent.com/2458907/217602766-ab6a9d3c-9f92-496e-a0e8-6c18a084e960.png)
2. In the Control Panel, open the **Admin** link in the top left: 2. At the top of the Control Panel, click **Admin**:
![Admin button in control panel, top left](admin-access-button1.png) ![image](https://user-images.githubusercontent.com/2458907/217602473-f4f9fd40-b4c1-45e1-88c5-54c6d4b604ff.png)
This opens up the JupyterHub Admin page, where you can add / delete users, start / stop peoples servers and see who is online. This opens up the JupyterHub Admin page, where you can add / delete users, start / stop peoples servers and see who is online.
@ -70,8 +74,22 @@ The `Admin` user (and any users given `Admin` privilege) can reset user password
_WARNING: If on login users see "500 : Internal Server Error", you may need to remove ALL files of the form_ `/run/jupyter-johndoe-singleuser` _WARNING: If on login users see "500 : Internal Server Error", you may need to remove ALL files of the form_ `/run/jupyter-johndoe-singleuser`
### Logging
To see JupyterHub's (typically very long!) log, run:
```
journalctl -u jupyterhub
```
Sometimes other logs might also be available, e.g.:
```
journalctl -u jupyter-admin-singleuser
```
### PAWS/Jupyter Notebooks for Python Beginners ### PAWS/Jupyter Notebooks for Python Beginners
While PAWS is a little bit off topic, if you have an interest in Wikipedia, please do see this 23m 42s video ["Intro to PAWS/Jupyter notebooks for Python beginners"](https://www.youtube.com/watch?v=AUZkioRI-aA&list=PLeoTcBlDanyNQXBqI1rVXUqUTSSiuSIXN&index=8) by Chico Venancio, from 2021-06-01. While PAWS is a little bit off topic, if you have an interest in Wikipedia, please do see this 23m 42s video ["Intro to PAWS/Jupyter notebooks for Python beginners"](https://www.youtube.com/watch?v=AUZkioRI-aA&list=PLeoTcBlDanyNQXBqI1rVXUqUTSSiuSIXN&index=8) by Chico Venancio, from 2021-06-01.
He explains PAWS as a "powerful Python execution environment http://paws.wmcloud.org [allowing] ordinary folks to write interactive scripts to work with Wikimedia content." He explains PAWS as a "powerful Python execution environment https://paws.wmcloud.org = https://wikitech.wikimedia.org/wiki/PAWS [allowing] ordinary folks to write interactive scripts to work with Wikimedia content."

View file

@ -13,11 +13,21 @@
when: nodejs_installed is undefined when: nodejs_installed is undefined
- name: "Install package: python3-venv" - name: Record (initial) disk space used
shell: df -B1 --output=used / | tail -1
register: df1
- name: "Install package: python3-psutil"
package: package:
name: python3-venv name: python3-psutil
state: present state: present
- name: Remove previous virtual environment {{ jupyterhub_venv }}
file:
path: "{{ jupyterhub_venv }}"
state: absent
- name: Make 3 directories to hold JupyterHub config - name: Make 3 directories to hold JupyterHub config
file: file:
state: directory state: directory
@ -33,21 +43,33 @@
global: yes global: yes
state: latest state: latest
- name: "pip install 7 packages into virtual environment: {{ jupyterhub_venv }} (~229 MB)" - name: "pip install 3 packages into virtual environment: {{ jupyterhub_venv }} (~326 MB total, after 2 Ansible calls)"
pip: pip:
name: name:
- pip - pip
- wheel - wheel
- ipywidgets
- jupyterhub - jupyterhub
virtualenv: "{{ jupyterhub_venv }}" # /opt/iiab/jupyterhub
virtualenv_site_packages: no
virtualenv_command: python3 -m venv --system-site-packages "{{ jupyterhub_venv }}" # 2021-07-29: This works on RasPiOS 10, Debian 11, Ubuntu 20.04 and Mint 20 -- however if you absolutely must use the older Debian 10 -- you can work around errors "can't find Rust compiler" and "This package requires Rust >=1.41.0" if you (1) revert this line to 'virtualenv_command: virtualenv' AND (2) uncomment the line just below
#virtualenv_python: python3 # 2021-07-29: Was needed when above line was 'virtualenv_command: virtualenv' (generally for Python 2)
extra_args: "--no-cache-dir --prefer-binary" # 2021-11-30, 2022-07-07: The "--pre" flag had earlier been needed, for beta-like pre-releases of JupyterHub 2.0.0
# 2022-07-07: Attempting to "pip install" all 7 together (3 above + 4 below)
# fails on OS's like 64-bit RasPiOS (but interestingly works on Ubuntu 22.04!)
# https://github.com/iiab/iiab/issues/3283
- name: Break up jupyterhub/jupyterlab pip installs into 2 parts (3 packages above + 4 packages here) due to mutual dependency deadlock on some OS's
pip:
name:
- jupyterlab - jupyterlab
- jupyterhub_firstuseauthenticator - jupyterhub_firstuseauthenticator
- jupyterhub-systemdspawner - jupyterhub-systemdspawner
virtualenv: "{{ jupyterhub_venv }}" # /opt/iiab/jupyterhub - ipywidgets
virtualenv: "{{ jupyterhub_venv }}"
virtualenv_site_packages: no virtualenv_site_packages: no
virtualenv_command: python3 -m venv "{{ jupyterhub_venv }}" # 2021-07-29: This works on RaspiOS 10, Debian 11, Ubuntu 20.04 and Mint 20 -- however if you absolutely must use the older Debian 10 -- you can work around errors "can't find Rust compiler" and "This package requires Rust >=1.41.0" if you (1) revert this line to 'virtualenv_command: virtualenv' AND (2) uncomment the line just below virtualenv_command: python3 -m venv --system-site-packages "{{ jupyterhub_venv }}"
#virtualenv_python: python3 # 2021-07-29: Was needed when above line was 'virtualenv_command: virtualenv' (generally for Python 2) extra_args: "--no-cache-dir --prefer-binary" # 2023-10-01: Lifesaver when recent wheels (e.g. piwheels.org) are inevitably not yet built! SEE #3560
extra_args: "--no-cache-dir --pre" # 2021-11-30: The "--pre" flag should likely be removed after JupyterHub 2.0.0 is released.
- name: "Install from template: {{ jupyterhub_venv }}/etc/jupyterhub/jupyterhub_config.py" - name: "Install from template: {{ jupyterhub_venv }}/etc/jupyterhub/jupyterhub_config.py"
template: template:
@ -59,20 +81,21 @@
src: jupyterhub.service.j2 src: jupyterhub.service.j2
dest: /etc/systemd/system/jupyterhub.service dest: /etc/systemd/system/jupyterhub.service
- name: Install {{ jupyterhub_venv }}/bin/getsite.py from template, to fetch site_packages path, e.g. {{ jupyterhub_venv }}/lib/python{{ python_ver }}/site-packages # 2022-07-07: No longer needed, thx to upstream fixes
template: # - name: Install {{ jupyterhub_venv }}/bin/getsite.py from template, to fetch site_packages path, e.g. {{ jupyterhub_venv }}/lib/python{{ python_version }}/site-packages
src: getsite.py.j2 # template:
dest: "{{ jupyterhub_venv }}/bin/getsite.py" # src: getsite.py.j2
mode: 0755 # dest: "{{ jupyterhub_venv }}/bin/getsite.py"
# mode: 0755
- name: Install patch_FUA.sh from template -- to (1) fix async password-changing page, and (2) force usernames to lowercase -- patching $SITE_PACKAGES/firstuseauthenticator/firstuseauthenticator.py #
template: # - name: Install patch_FUA.sh from template -- to (1) fix async password-changing page, and (2) force usernames to lowercase -- patching $SITE_PACKAGES/firstuseauthenticator/firstuseauthenticator.py
src: patch_FUA.sh.j2 # template:
dest: "{{ jupyterhub_venv }}/bin/patch_FUA.sh" # src: patch_FUA.sh.j2
mode: 0755 # dest: "{{ jupyterhub_venv }}/bin/patch_FUA.sh"
# mode: 0755
- name: "Run the above two, via: {{ jupyterhub_venv }}/bin/patch_FUA.sh" #
command: "{{ jupyterhub_venv }}/bin/patch_FUA.sh" # - name: "Run the above two, via: {{ jupyterhub_venv }}/bin/patch_FUA.sh"
# command: "{{ jupyterhub_venv }}/bin/patch_FUA.sh"
- name: Install patch_http-warning.sh from template, to turn off the warning about http insecurity, in {{ jupyterhub_venv }}/share/jupyterhub/templates/login.html - name: Install patch_http-warning.sh from template, to turn off the warning about http insecurity, in {{ jupyterhub_venv }}/share/jupyterhub/templates/login.html
template: template:
@ -86,6 +109,17 @@
# RECORD JupyterHub AS INSTALLED # RECORD JupyterHub AS INSTALLED
- name: Record (final) disk space used
shell: df -B1 --output=used / | tail -1
register: df2
- name: Add 'jupyterhub_disk_usage = {{ df2.stdout|int - df1.stdout|int }}' to {{ iiab_ini_file }}
ini_file:
path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
section: jupyterhub
option: jupyterhub_disk_usage
value: "{{ df2.stdout|int - df1.stdout|int }}"
- name: "Set 'jupyterhub_installed: True'" - name: "Set 'jupyterhub_installed: True'"
set_fact: set_fact:
jupyterhub_installed: True jupyterhub_installed: True

View file

@ -19,26 +19,33 @@
quiet: yes quiet: yes
- name: Install Jupyter if jupyterhub_installed not defined, e.g. in {{ iiab_state_file }} # /etc/iiab/iiab_state.yml - block:
include_tasks: install.yml
when: jupyterhub_installed is undefined
- name: Install Jupyter if jupyterhub_installed not defined, e.g. in {{ iiab_state_file }} # /etc/iiab/iiab_state.yml
include_tasks: install.yml
when: jupyterhub_installed is undefined
- include_tasks: enable-or-disable.yml - include_tasks: enable-or-disable.yml
- name: Add 'jupyterhub' variable values to {{ iiab_ini_file }}
ini_file:
path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini
section: jupyterhub
option: "{{ item.option }}"
value: "{{ item.value | string }}"
with_items:
- option: name
value: JupyterHub
- option: description
value: '"High Schools may want to consider JupyterHub to integrate coding with dynamic interactive graphing — A New Way to Think About Programming — allowing students to integrate science experiment results and program output within their notebook/document/blog."'
- option: jupyterhub_install
value: "{{ jupyterhub_install }}"
- option: jupyterhub_enabled
value: "{{ jupyterhub_enabled }}"
- name: Add 'jupyterhub' variable values to {{ iiab_ini_file }} rescue:
ini_file:
path: "{{ iiab_ini_file }}" # /etc/iiab/iiab.ini - name: 'SEE ERROR ABOVE (skip_role_on_error: {{ skip_role_on_error }})'
section: jupyterhub fail:
option: "{{ item.option }}" msg: ""
value: "{{ item.value | string }}" when: not skip_role_on_error
with_items:
- option: name
value: JupyterHub
- option: description
value: '"High Schools may want to consider JupyterHub to integrate coding with dynamic interactive graphing — A New Way to Think About Programming — allowing students to integrate science experiment results and program output within their notebook/document/blog."'
- option: jupyterhub_install
value: "{{ jupyterhub_install }}"
- option: jupyterhub_enabled
value: "{{ jupyterhub_enabled }}"

Some files were not shown because too many files have changed in this diff Show more