Browse Source

adding compose

merge-requests/1/head
Steven Foerster 6 years ago
parent
commit
f6587314aa
  1. 6
      compose/production/postgres/Dockerfile
  2. 5
      compose/production/postgres/maintenance/_sourced/constants.sh
  3. 12
      compose/production/postgres/maintenance/_sourced/countdown.sh
  4. 41
      compose/production/postgres/maintenance/_sourced/messages.sh
  5. 16
      compose/production/postgres/maintenance/_sourced/yes_no.sh
  6. 38
      compose/production/postgres/maintenance/backup
  7. 22
      compose/production/postgres/maintenance/backups
  8. 55
      compose/production/postgres/maintenance/restore
  9. 13
      compose/production/tor/Dockerfile
  10. 25
      compose/production/tor/torrc
  11. 5
      compose/production/traefik/Dockerfile
  12. 176
      compose/production/traefik/traefik.toml
  13. 24
      extra/bitwarden.yml
  14. 20
      extra/homeassistant.yml
  15. 24
      extra/jellyfin.yml
  16. 29
      extra/nextcloud.yml
  17. 21
      extra/onlyoffice.yml
  18. 25
      extra/raspap.yml
  19. 65
      extra/rocketchat.yml
  20. 28
      extra/syncthing.yml
  21. 16
      extra/tor.yml

6
compose/production/postgres/Dockerfile

@ -0,0 +1,6 @@ @@ -0,0 +1,6 @@
FROM postgres:11.3
COPY ./compose/production/postgres/maintenance /usr/local/bin/maintenance
RUN chmod +x /usr/local/bin/maintenance/*
RUN mv /usr/local/bin/maintenance/* /usr/local/bin \
&& rmdir /usr/local/bin/maintenance

5
compose/production/postgres/maintenance/_sourced/constants.sh

@ -0,0 +1,5 @@ @@ -0,0 +1,5 @@
#!/usr/bin/env bash
BACKUP_DIR_PATH='/backups'
BACKUP_FILE_PREFIX='backup'

12
compose/production/postgres/maintenance/_sourced/countdown.sh

@ -0,0 +1,12 @@ @@ -0,0 +1,12 @@
#!/usr/bin/env bash
countdown() {
declare desc="A simple countdown. Source: https://superuser.com/a/611582"
local seconds="${1}"
local d=$(($(date +%s) + "${seconds}"))
while [ "$d" -ge `date +%s` ]; do
echo -ne "$(date -u --date @$(($d - `date +%s`)) +%H:%M:%S)\r";
sleep 0.1
done
}

41
compose/production/postgres/maintenance/_sourced/messages.sh

@ -0,0 +1,41 @@ @@ -0,0 +1,41 @@
#!/usr/bin/env bash
message_newline() {
echo
}
message_debug()
{
echo -e "DEBUG: ${@}"
}
message_welcome()
{
echo -e "\e[1m${@}\e[0m"
}
message_warning()
{
echo -e "\e[33mWARNING\e[0m: ${@}"
}
message_error()
{
echo -e "\e[31mERROR\e[0m: ${@}"
}
message_info()
{
echo -e "\e[37mINFO\e[0m: ${@}"
}
message_suggestion()
{
echo -e "\e[33mSUGGESTION\e[0m: ${@}"
}
message_success()
{
echo -e "\e[32mSUCCESS\e[0m: ${@}"
}

16
compose/production/postgres/maintenance/_sourced/yes_no.sh

@ -0,0 +1,16 @@ @@ -0,0 +1,16 @@
#!/usr/bin/env bash
yes_no() {
declare desc="Prompt for confirmation. \$\"\{1\}\": confirmation message."
local arg1="${1}"
local response=
read -r -p "${arg1} (y/[n])? " response
if [[ "${response}" =~ ^[Yy]$ ]]
then
exit 0
else
exit 1
fi
}

38
compose/production/postgres/maintenance/backup

@ -0,0 +1,38 @@ @@ -0,0 +1,38 @@
#!/usr/bin/env bash
### Create a database backup.
###
### Usage:
### $ docker-compose -f <environment>.yml (exec |run --rm) postgres backup
set -o errexit
set -o pipefail
set -o nounset
working_dir="$(dirname ${0})"
source "${working_dir}/_sourced/constants.sh"
source "${working_dir}/_sourced/messages.sh"
message_welcome "Backing up the '${POSTGRES_DB}' database..."
if [[ "${POSTGRES_USER}" == "postgres" ]]; then
message_error "Backing up as 'postgres' user is not supported. Assign 'POSTGRES_USER' env with another one and try again."
exit 1
fi
export PGHOST="${POSTGRES_HOST}"
export PGPORT="${POSTGRES_PORT}"
export PGUSER="${POSTGRES_USER}"
export PGPASSWORD="${POSTGRES_PASSWORD}"
export PGDATABASE="${POSTGRES_DB}"
backup_filename="${BACKUP_FILE_PREFIX}_$(date +'%Y_%m_%dT%H_%M_%S').sql.gz"
pg_dump | gzip > "${BACKUP_DIR_PATH}/${backup_filename}"
message_success "'${POSTGRES_DB}' database backup '${backup_filename}' has been created and placed in '${BACKUP_DIR_PATH}'."

22
compose/production/postgres/maintenance/backups

@ -0,0 +1,22 @@ @@ -0,0 +1,22 @@
#!/usr/bin/env bash
### View backups.
###
### Usage:
### $ docker-compose -f <environment>.yml (exec |run --rm) postgres backups
set -o errexit
set -o pipefail
set -o nounset
working_dir="$(dirname ${0})"
source "${working_dir}/_sourced/constants.sh"
source "${working_dir}/_sourced/messages.sh"
message_welcome "These are the backups you have got:"
ls -lht "${BACKUP_DIR_PATH}"

55
compose/production/postgres/maintenance/restore

@ -0,0 +1,55 @@ @@ -0,0 +1,55 @@
#!/usr/bin/env bash
### Restore database from a backup.
###
### Parameters:
### <1> filename of an existing backup.
###
### Usage:
### $ docker-compose -f <environment>.yml (exec |run --rm) postgres restore <1>
set -o errexit
set -o pipefail
set -o nounset
working_dir="$(dirname ${0})"
source "${working_dir}/_sourced/constants.sh"
source "${working_dir}/_sourced/messages.sh"
if [[ -z ${1+x} ]]; then
message_error "Backup filename is not specified yet it is a required parameter. Make sure you provide one and try again."
exit 1
fi
backup_filename="${BACKUP_DIR_PATH}/${1}"
if [[ ! -f "${backup_filename}" ]]; then
message_error "No backup with the specified filename found. Check out the 'backups' maintenance script output to see if there is one and try again."
exit 1
fi
message_welcome "Restoring the '${POSTGRES_DB}' database from the '${backup_filename}' backup..."
if [[ "${POSTGRES_USER}" == "postgres" ]]; then
message_error "Restoring as 'postgres' user is not supported. Assign 'POSTGRES_USER' env with another one and try again."
exit 1
fi
export PGHOST="${POSTGRES_HOST}"
export PGPORT="${POSTGRES_PORT}"
export PGUSER="${POSTGRES_USER}"
export PGPASSWORD="${POSTGRES_PASSWORD}"
export PGDATABASE="${POSTGRES_DB}"
message_info "Dropping the database..."
dropdb "${PGDATABASE}"
message_info "Creating a new database..."
createdb --owner="${POSTGRES_USER}"
message_info "Applying the backup to the new database..."
gunzip -c "${backup_filename}" | psql "${POSTGRES_DB}"
message_success "The '${POSTGRES_DB}' database has been restored from the '${backup_filename}' backup."

13
compose/production/tor/Dockerfile

@ -0,0 +1,13 @@ @@ -0,0 +1,13 @@
FROM alpine:latest
RUN apk update \
&& apk upgrade \
&& apk add tor \
&& rm /var/cache/apk/*
EXPOSE 9150
ADD ./compose/production/tor/torrc /etc/tor/torrc
USER tor
CMD /usr/bin/tor -f /etc/tor/torrc

25
compose/production/tor/torrc

@ -0,0 +1,25 @@ @@ -0,0 +1,25 @@
## The directory for keeping all the keys/etc
DataDirectory /var/lib/tor
## Tor opens a socks proxy on port 9150
SocksPort 0.0.0.0:9150
## Entry policies to allow/deny SOCKS requests based on IP address.
## SocksPolicy accept 192.168.1.0/24
SocksPolicy accept 172.17.0.0/16
SocksPolicy accept 10.0.0.0/8
SocksPolicy reject *
## Logs go to stdout at level "notice"
Log notice stdout
ControlPort 9051
# Try for at most NUM seconds when building circuits. If the circuit isn't open in that time, give up on it. (Default: 1 minute.)
CircuitBuildTimeout 5
# Send a padding cell every N seconds to keep firewalls from closing our connections while Tor is not in use.
KeepalivePeriod 60
# Force Tor to consider whether to build a new circuit every NUM seconds.
NewCircuitPeriod 15
# How many entry guards should we keep at a time?
NumEntryGuards 8

5
compose/production/traefik/Dockerfile

@ -0,0 +1,5 @@ @@ -0,0 +1,5 @@
FROM traefik:alpine
RUN mkdir -p /etc/traefik/acme
RUN touch /etc/traefik/acme/acme.json
RUN chmod 600 /etc/traefik/acme/acme.json
COPY ./compose/production/traefik/traefik.toml /etc/traefik

176
compose/production/traefik/traefik.toml

@ -0,0 +1,176 @@ @@ -0,0 +1,176 @@
#debug = true
logLevel = "ERROR" #DEBUG, INFO, WARN, ERROR, FATAL, PANIC
InsecureSkipVerify = true
#defaultEntryPoints = ["http", "https"]
defaultEntryPoints = ["http"]
# Entrypoints, http and https
[entryPoints]
# http should be redirected to https
[entryPoints.http]
address = ":80"
#[entryPoints.http.redirect]
#entryPoint = "https"
# https is the default
#[entryPoints.https]
#address = ":443"
# [entryPoints.https.tls]
## Enable ACME (Let's Encrypt): automatic SSL
#[acme]
## Email address used for registration
#email = "steven@cyber5k.com"
#storage = "/etc/traefik/acme/acme.json"
#entryPoint = "https"
#onDemand = false
#OnHostRule = true
# # Use a HTTP-01 acme challenge rather than TLS-SNI-01 challenge
# [acme.httpChallenge]
# entryPoint = "http"
[file]
[backends]
[backends.django]
[backends.django.servers.server1]
url = "http://django:5000"
[backends.pihole]
[backends.pihole.servers.server1]
url = "http://pihole:80"
[backends.homeassistant]
[backends.homeassistant.servers.server1]
url = "http://homeassistant:8123"
[backends.syncthing]
[backends.syncthing.servers.server1]
url = "http://syncthing:8384"
[backends.rocketchat]
[backends.rocketchat.servers.server1]
url = "http://rocketchat:3000"
[backends.nextcloud]
[backends.nextcloud.servers.server1]
url = "http://nextcloud:80"
[backends.onlyoffice]
[backends.onlyoffice.servers.server1]
url = "http://onlyoffice:80"
[backends.bitwarden]
[backends.bitwarden.servers.server1]
url = "http://bitwarden:80"
[backends.jellyfin]
[backends.jellyfin.servers.server1]
url = "http://jellyfin:8096"
[backends.raspap]
[backends.raspap.servers.server1]
url = "http://raspap:80"
[backends.cockpit]
[backends.cockpit.servers.server1]
url = "http://IPV4_PUBLIC:9090"
[frontends]
[frontends.django]
backend = "django"
passHostHeader = true
[frontends.django.headers]
HostsProxyHeaders = ['X-CSRFToken']
[frontends.django.routes.dr1]
#rule = "Host:mistborn.cyber5k.com"
rule = "Host:home.mistborn"
[frontends.pihole]
backend = "pihole"
passHostHeader = true
[frontends.pihole.headers]
HostsProxyHeaders = ['X-CSRFToken']
[frontends.pihole.routes.dr1]
rule = "Host:pihole.mistborn"
[frontends.homeassistant]
backend = "homeassistant"
passHostHeader = true
[frontends.homeassistant.headers]
HostsProxyHeaders = ['X-CSRFToken']
[frontends.homeassistant.routes.dr1]
rule = "Host:hass.mistborn"
[frontends.syncthing]
backend = "syncthing"
passHostHeader = true
[frontends.syncthing.headers]
HostsProxyHeaders = ['X-CSRFToken']
[frontends.syncthing.routes.dr1]
rule = "Host:syncthing.mistborn"
[frontends.rocketchat]
backend = "rocketchat"
passHostHeader = true
[frontends.rocketchat.headers]
HostsProxyHeaders = ['X-CSRFToken']
[frontends.rocketchat.routes.dr1]
rule = "Host:chat.mistborn"
[frontends.nextcloud]
backend = "nextcloud"
passHostHeader = true
[frontends.nextcloud.headers]
HostsProxyHeaders = ['X-CSRFToken']
[frontends.nextcloud.routes.dr1]
rule = "Host:nextcloud.mistborn"
[frontends.onlyoffice]
backend = "onlyoffice"
passHostHeader = true
[frontends.onlyoffice.headers]
HostsProxyHeaders = ['X-CSRFToken']
[frontends.onlyoffice.routes.dr1]
rule = "Host:onlyoffice.mistborn"
[frontends.bitwarden]
backend = "bitwarden"
passHostHeader = true
[frontends.bitwarden.headers]
HostsProxyHeaders = ['X-CSRFToken']
[frontends.bitwarden.routes.dr1]
rule = "Host:bitwarden.mistborn"
[frontends.jellyfin]
backend = "jellyfin"
passHostHeader = true
[frontends.jellyfin.headers]
HostsProxyHeaders = ['X-CSRFToken']
[frontends.jellyfin.routes.dr1]
rule = "Host:jellyfin.mistborn"
[frontends.raspap]
backend = "raspap"
passHostHeader = true
[frontends.raspap.headers]
HostsProxyHeaders = ['X-CSRFToken']
[frontends.raspap.routes.dr1]
rule = "Host:raspap.mistborn"
[frontends.cockpit]
backend = "cockpit"
passHostHeader = true
[frontends.cockpit.headers]
HostsProxyHeaders = ['X-CSRFToken']
[frontends.cockpit.routes.dr1]
rule = "Host:cockpit.mistborn"
## Connection to docker host system (docker.sock)
#[docker]
#endpoint = "unix:///var/run/docker.sock"
#domain = "localhost"
#watch = true
## This will hide all docker containers that don't have explicitly
## set label to "enable"
#exposedbydefault = false

24
extra/bitwarden.yml

@ -0,0 +1,24 @@ @@ -0,0 +1,24 @@
version: '3'
#volumes:
# production_bitwarden_data: {}
services:
bitwarden:
image: bitwardenrs/server:latest
container_name: mistborn_production_bitwarden
env_file:
- ../.envs/.production/.bitwarden
volumes:
- ../../mistborn_volumes/extra/bitwarden:/data
labels:
- "traefik.enable=true"
- "traefik.port=80"
ports:
- 3012:3012/tcp
restart: unless-stopped
networks:
default:
external:
name: mistborn_default

20
extra/homeassistant.yml

@ -0,0 +1,20 @@ @@ -0,0 +1,20 @@
version: '3'
services:
homeassistant:
container_name: mistborn_production_home_assistant
image: homeassistant/home-assistant:stable
volumes:
- ../../mistborn_volumes/extra/homeassistant/config:/config
environment:
- TZ=America/New_York
labels:
- "traefik.enable=true"
- "traefik.port=8123"
restart: unless-stopped
#network_mode: host
networks:
default:
external:
name: mistborn_default

24
extra/jellyfin.yml

@ -0,0 +1,24 @@ @@ -0,0 +1,24 @@
version: '3'
volumes:
production_jellyfin_config: {}
production_jellyfin_cache: {}
#production_nextcloud: {}
services:
jellyfin:
image: jellyfin/jellyfin:latest
container_name: mistborn_production_jellyfin
volumes:
- production_jellyfin_config:/config
- production_jellyfin_cache:/cache
- ../../mistborn_volumes/extra/nextcloud:/media:ro
labels:
- "traefik.enable=true"
- "traefik.port=8096"
restart: unless-stopped
networks:
default:
external:
name: mistborn_default

29
extra/nextcloud.yml

@ -0,0 +1,29 @@ @@ -0,0 +1,29 @@
version: '3'
#volumes:
# production_nextcloud: {}
services:
nextcloud:
image: nextcloud
container_name: mistborn_production_nextcloud
env_file:
- ../.envs/.production/.postgres
- ../.envs/.production/.nextcloud
labels:
- "traefik.enable=true"
- "traefik.port=80"
volumes:
- ../../mistborn_volumes/extra/nextcloud:/var/www/html
#- ./volumes/extra/nextcloud/config:/var/www/html/config
#- ./volumes/extra/nextcloud/custom_apps:/var/www/html/custom_apps
#- ./volumes/extra/nextcloud/data:/var/www/html/data
#- ./volumes/extra/nextcloud/themes:/var/www/html/themes
environment:
- VIRTUAL_HOST=nextcloud.mistborn
restart: unless-stopped
networks:
default:
external:
name: mistborn_default

21
extra/onlyoffice.yml

@ -0,0 +1,21 @@ @@ -0,0 +1,21 @@
version: '3'
services:
onlyoffice:
container_name: mistborn_production_onlyoffice
image: onlyoffice/documentserver:latest
volumes:
- ../../mistborn_volumes/extra/onlyoffice/logs:/var/log/onlyoffice
- ../../mistborn_volumes/extra/onlyoffice/cache:/var/lib/onlyoffice
env_file:
- ../.envs/.production/.onlyoffice
labels:
- "traefik.enable=true"
- "traefik.port=80"
restart: unless-stopped
#network_mode: host
networks:
default:
external:
name: mistborn_default

25
extra/raspap.yml

@ -0,0 +1,25 @@ @@ -0,0 +1,25 @@
version: '3'
services:
raspap:
build:
context: ..
dockerfile: ./compose/production/raspap/Dockerfile
#user: root
image: mistborn_production_raspap
container_name: mistborn_production_raspap
labels:
- "traefik.enable=true"
- "traefik.port=80"
env_file:
- ../.envs/.production/.pihole
command: /start
volumes:
#- ~/.ssh:/ssh:ro
- ../../mistborn_volumes/extra/raspap/etc-raspap:/etc/raspap
#- ../volumes/extra/raspap/etc-lighttpd:/etc/lighttpd
networks:
default:
external:
name: mistborn_default

65
extra/rocketchat.yml

@ -0,0 +1,65 @@ @@ -0,0 +1,65 @@
version: '3'
services:
# rocketchat
rocketchat:
image: rocket.chat:latest
container_name: mistborn_production_rocketchat
command: bash -c 'for i in `seq 1 30`; do node main.js && s=$$? && break || s=$$?; echo "Tried $$i times. Waiting 5 secs..."; sleep 5; done; (exit $$s)'
restart: unless-stopped
volumes:
- ../../mistborn_volumes/extra/rocketchat/uploads:/app/uploads
environment:
- PORT=3000
- ROOT_URL=http://chat.mistborn
- MONGO_URL=mongodb://mongo:27017/rocketchat
- MONGO_OPLOG_URL=mongodb://mongo:27017/local
- Accounts_UseDNSDomainCheck=False
labels:
- "traefik.enable=true"
- "traefik.port=3000"
depends_on:
- mongo
#ports:
# - 3000:3000
mongo:
image: mongo:4.0
container_name: mistborn_production_rocketchat_mongo
restart: unless-stopped
volumes:
- ../volumes/extra/rocketchat/data/db:/data/db
- ../volumes/extra/rocketchat/data/dump:/dump
command: mongod --smallfiles --oplogSize 128 --replSet rs0 --storageEngine=mmapv1
# this container's job is just run the command to initialize the replica set.
# it will run the command and remove himself (it will not stay running)
mongo-init-replica:
image: mongo
command: 'bash -c "for i in `seq 1 30`; do mongo mongo/rocketchat --eval \"rs.initiate({ _id: ''rs0'', members: [ { _id: 0, host: ''localhost:27017'' } ]})\" && s=$$? && break || s=$$?; echo \"Tried $$i times. Waiting 5 secs...\"; sleep 5; done; (exit $$s)"'
depends_on:
- mongo
# hubot, the popular chatbot (add the bot user first and change the password before starting this image)
hubot:
image: rocketchat/hubot-rocketchat:latest
container_name: mistborn_production_rocketchat_hubot
restart: unless-stopped
environment:
- ROCKETCHAT_URL=chat.mistborn #:3000
# you can add more scripts as you'd like here, they need to be installable by npm
- EXTERNAL_SCRIPTS=hubot-help,hubot-seen,hubot-links,hubot-diagnostics
env_file:
- ../.envs/.production/.rocketchat
depends_on:
- rocketchat
volumes:
- ../volumes/extra/rocketchat/hubot/scripts:/home/hubot/scripts
# this is used to expose the hubot port for notifications on the host on port 3001, e.g. for hubot-jenkins-notifier
ports:
- 3001:8080/tcp
networks:
default:
external:
name: mistborn_default

28
extra/syncthing.yml

@ -0,0 +1,28 @@ @@ -0,0 +1,28 @@
version: '3'
services:
syncthing:
image: linuxserver/syncthing
container_name: mistborn_production_syncthing
environment:
- PUID=1000
- PGID=1000
- TZ=Amereica/New_York
- UMASK_SET=022
volumes:
- ../../mistborn_volumes/extra/syncthing/config:/config
- ../../mistborn_volumes/extra/syncthing/data1:/data1
- ../../mistborn_volumes/extra/syncthing/data2:/data2
ports:
#- 8384:8384
- 22000:22000/tcp # listening port
- 21027:21027/udp # protocol discovery
labels:
- "traefik.enable=true"
- "traefik.port=8384"
restart: unless-stopped
networks:
default:
external:
name: mistborn_default

16
extra/tor.yml

@ -0,0 +1,16 @@ @@ -0,0 +1,16 @@
version: '3'
services:
tor-client:
build:
context: ..
dockerfile: ./compose/production/tor/Dockerfile
image: mistborn_production_tor
container_name: mistborn_production_tor
ports:
- 9150:9150/tcp
networks:
default:
external:
name: mistborn_default
Loading…
Cancel
Save