Initial commit

This commit is contained in:
Chay 2023-12-03 17:03:54 +02:00
commit f2040d809e
24 changed files with 734 additions and 0 deletions

5
README.md Normal file
View file

@ -0,0 +1,5 @@
# Altesq's Docker Configuration
The docker-compose files and/or dockerfiles you need to run the Altesq services.
Each folder is named after the service's name, not the subdomain it's running on.

2
anki/.env Normal file
View file

@ -0,0 +1,2 @@
USER=user
PASSWORD=password

3
anki/Dockerfile Normal file
View file

@ -0,0 +1,3 @@
FROM python:3.9
RUN pip install anki
CMD ["python", "-m", "anki.syncserver"]

16
anki/docker-compose.yml Normal file
View file

@ -0,0 +1,16 @@
version: '3'
services:
anki-server:
image: anki-server:latest
environment:
- SYNC_USER1=${USER}:${PASSWORD}
- SYNC_BASE=/srv/anki
ports:
- "8888:8080"
volumes:
- anki-server:/srv/anki
restart: always
volumes:
anki-server:

View file

@ -0,0 +1,144 @@
version: 2
global:
server_name: altesq.net
key_id: ed25519:auto
private_key: matrix_key.pem
old_private_keys: []
key_validity_period: 168h0m0s
database:
connection_string: postgres://dendrite:${DB_PASS}@postgres/dendrite?sslmode=disable
max_open_conns: 90
max_idle_conns: 2
conn_max_lifetime: -1
well_known_server_name: ""
well_known_client_name: ""
well_known_sliding_sync_proxy: ""
disable_federation: false
presence:
enable_inbound: false
enable_outbound: true
trusted_third_party_id_servers:
- matrix.org
- vector.im
jetstream:
storage_path: /var/dendrite/
addresses: []
topic_prefix: Dendrite
in_memory: false
disable_tls_validation: true
metrics:
enabled: false
basic_auth:
username: metrics
password: metrics
sentry:
enabled: false
dsn: ""
environment: ""
dns_cache:
enabled: true
cache_size: 4096
cache_lifetime: 5m0s
server_notices:
enabled: true
local_part: _server
display_name: Server Alert
avatar_url: ""
room_name: Server Alert
report_stats:
enabled: false
endpoint: https://panopticon.matrix.org/push
cache:
max_size_estimated: 1073741824
max_age: 1h0m0s
app_service_api:
disable_tls_validation: false
config_files: []
client_api:
registration_disabled: true
registration_requires_token: false
registration_shared_secret: "CHANGEME"
guests_disabled: false
enable_registration_captcha: false
recaptcha_api_js_url: ""
recaptcha_sitekey_class: ""
recaptcha_form_field: ""
recaptcha_public_key: ""
recaptcha_private_key: ""
recaptcha_bypass_secret: ""
recaptcha_siteverify_api: ""
turn:
turn_user_lifetime: ""
turn_uris: []
turn_shared_secret: ""
turn_username: ""
turn_password: ""
rate_limiting:
enabled: true
threshold: 5
cooloff_ms: 500
exempt_user_ids: []
federation_api:
send_max_retries: 16
p2p_retries_until_assumed_offline: 1
disable_tls_validation: false
disable_http_keepalives: false
key_perspectives:
- server_name: matrix.org
keys:
- key_id: ed25519:auto
public_key: Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw
- key_id: ed25519:a_RXGa
public_key: l8Hft5qXKn1vfHrg3p4+W8gELQVo8N13JkluMfmn2sQ
prefer_direct_fetch: false
key_server: {}
media_api:
base_path: /var/dendrite/media
max_file_size_bytes: 10485760
dynamic_thumbnails: false
max_thumbnail_generators: 10
thumbnail_sizes:
- width: 32
height: 32
method: crop
- width: 96
height: 96
method: crop
- width: 640
height: 480
method: scale
room_server:
default_room_version: "10"
sync_api:
real_ip_header: ""
search:
enabled: false
index_path: /var/dendrite/searchindex
in_memory: false
language: en
user_api:
bcrypt_cost: 10
openid_token_lifetime_ms: 3600000
push_gateway_disable_tls_validation: false
auto_join_rooms: []
relay_api: {}
mscs:
mscs: []
tracing:
enabled: false
jaeger:
serviceName: ""
disabled: false
rpc_metrics: false
traceid_128bit: false
tags: []
sampler: null
reporter: null
headers: null
baggage_restrictions: null
throttler: null
logging:
- type: file
level: info
params:
path: /var/dendrite/log

View file

@ -0,0 +1,53 @@
version: "3.4"
services:
postgres:
hostname: postgres
image: postgres:15-alpine
restart: always
volumes:
# This will create a docker volume to persist the database files in.
# If you prefer those files to be outside of docker, you'll need to change this.
- dendrite_postgres_data:/var/lib/postgresql/data
environment:
POSTGRES_PASSWORD: ${DB_PASS}
POSTGRES_USER: dendrite
POSTGRES_DATABASE: dendrite
healthcheck:
test: ["CMD-SHELL", "pg_isready -U dendrite"]
interval: 5s
timeout: 5s
retries: 5
networks:
- internal
monolith:
hostname: monolith
image: matrixdotorg/dendrite-monolith:latest
ports:
- 8008:8008
- 8448:8448
volumes:
- ./config:/etc/dendrite
# The following volumes use docker volumes, change this
# if you prefer to have those files outside of docker.
- dendrite_media:/var/dendrite/media
- dendrite_jetstream:/var/dendrite/jetstream
- dendrite_search_index:/var/dendrite/searchindex
depends_on:
postgres:
condition: service_healthy
networks:
- internal
restart: unless-stopped
networks:
internal:
attachable: true
volumes:
dendrite_postgres_data:
dendrite_media:
dendrite_jetstream:
dendrite_search_index:

1
forgejo/.env Normal file
View file

@ -0,0 +1 @@
DB_PASS=password

View file

@ -0,0 +1,48 @@
version: "3"
networks:
forgejo:
external: false
volumes:
postgres:
forgejo:
driver: local
services:
server:
image: codeberg.org/forgejo/forgejo:1.21.1-0
container_name: forgejo
environment:
- USER_UID=1000
- USER_GID=1000
- FORGEJO__database__DB_TYPE=postgres
- FORGEJO__database__HOST=db:5432
- FORGEJO__database__NAME=forgejo
- FORGEJO__database__USER=forgejo
- FORGEJO__database__PASSWD=${DB_PASS}
restart: always
networks:
- forgejo
volumes:
- forgejo:/data
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
ports:
- "3000:3000"
- "222:22"
depends_on:
- db
db:
image: postgres:14
restart: always
environment:
- POSTGRES_USER=forgejo
- POSTGRES_PASSWORD=${DB_PASS}
- POSTGRES_DB=forgejo
networks:
- forgejo
volumes:
- postgres:/var/lib/postgresql/data

1
lemmy/.env Normal file
View file

@ -0,0 +1 @@
DB_PASS=password

View file

@ -0,0 +1,28 @@
# DB Version: 15
# OS Type: linux
# DB Type: web
# Total Memory (RAM): 8 GB
# CPUs num: 4
# Data Storage: ssd
max_connections = 200
shared_buffers = 2GB
effective_cache_size = 6GB
maintenance_work_mem = 512MB
checkpoint_completion_target = 0.9
wal_buffers = 16MB
default_statistics_target = 100
random_page_cost = 1.1
effective_io_concurrency = 200
work_mem = 5242kB
huge_pages = off
min_wal_size = 1GB
max_wal_size = 4GB
max_worker_processes = 4
max_parallel_workers_per_gather = 2
max_parallel_workers = 4
max_parallel_maintenance_workers = 2
synchronous_commit=off
listen_addresses = '*'

84
lemmy/docker-compose.yml Normal file
View file

@ -0,0 +1,84 @@
version: "3.7"
x-logging: &default-logging
driver: "json-file"
options:
max-size: "50m"
max-file: "4"
services:
proxy:
image: docker.io/library/nginx
ports:
- "1236:8536"
volumes:
- ./nginx_internal.conf:/etc/nginx/nginx.conf:ro,Z
- ./proxy_params:/etc/nginx/proxy_params:ro,Z
restart: always
logging: *default-logging
depends_on:
- pictrs
- lemmy-ui
lemmy:
image: ravermeister/lemmy
hostname: lemmy
restart: always
logging: *default-logging
environment:
RUST_LOG: warn
volumes:
- ./lemmy.hjson:/config/config.hjson:Z
depends_on:
- postgres
- pictrs
lemmy-ui:
image: ravermeister/lemmy-ui
environment:
LEMMY_UI_LEMMY_INTERNAL_HOST: lemmy:8536
LEMMY_UI_LEMMY_EXTERNAL_HOST: "lemmy.altesq.net"
volumes:
- ./volumes/lemmy-ui/extra_themes:/app/extra_themes
depends_on:
- lemmy
restart: always
logging: *default-logging
pictrs:
image: docker.io/asonix/pictrs:0.4.3
hostname: pictrs
user: 991:991
environment:
PICTRS__SERVER__API_KEY: ${DB_PASS}
PICTRS__MEDIA__VIDEO_CODEC: vp9
PICTRS__MEDIA__GIF__MAX_WIDTH: 256
PICTRS__MEDIA__GIF__MAX_HEIGHT: 256
PICTRS__MEDIA__GIF__MAX_AREA: 65536
PICTRS__MEDIA__GIF__MAX_FRAME_COUNT: 400
PICTRS_OPENTELEMETRY_URL: http://otel:4137
RUST_LOG: debug
RUST_BACKTRACE: full
volumes:
- ./volumes/pictrs:/mnt:Z
restart: always
logging: *default-logging
deploy:
resources:
limits:
memory: 690m
postgres:
image: docker.io/postgres:15-alpine
hostname: postgres
volumes:
- ./volumes/postgres:/var/lib/postgresql/data:Z
- ./customPostgresql.conf:/etc/postgresql.conf
restart: always
environment:
POSTGRES_PASSWORD: ${DB_PASS}
POSTGRES_USER: lemmy
POSTGRES_DB: lemmy
command: postgres -c config_file=/etc/postgresql.conf
logging: *default-logging

20
lemmy/lemmy.hjson Normal file
View file

@ -0,0 +1,20 @@
{
# for more info about the config, check out the documentation
# https://join-lemmy.org/docs/en/administration/configuration.html
database: {
host: postgres
password: "${DB_PASS}"
}
hostname: "lemmy.altesq.net"
pictrs: {
url: "http://pictrs:8080/"
api_key: "${DB_PASS}"
}
email: {
smtp_server: "postfix:25"
smtp_from_address: "noreply@altesq.net"
tls_type: "none"
}
}

73
lemmy/nginx_internal.conf Normal file
View file

@ -0,0 +1,73 @@
worker_processes auto;
events {
worker_connections 1024;
}
http {
# We construct a string consistent of the "request method" and "http accept header"
# and then apply soem ~simply regexp matches to that combination to decide on the
# HTTP upstream we should proxy the request to.
#
# Example strings:
#
# "GET:application/activity+json"
# "GET:text/html"
# "POST:application/activity+json"
#
# You can see some basic match tests in this regex101 matching this configuration
# https://regex101.com/r/vwMJNc/1
#
# Learn more about nginx maps here http://nginx.org/en/docs/http/ngx_http_map_module.html
map "$request_method:$http_accept" $proxpass {
# If no explicit matches exists below, send traffic to lemmy-ui
default "http://lemmy-ui:1234";
# GET/HEAD requests that accepts ActivityPub or Linked Data JSON should go to lemmy.
#
# These requests are used by Mastodon and other fediverse instances to look up profile information,
# discover site information and so on.
"~^(?:GET|HEAD):.*?application\/(?:activity|ld)\+json" "http://lemmy:8536";
# All non-GET/HEAD requests should go to lemmy
#
# Rather than calling out POST, PUT, DELETE, PATCH, CONNECT and all the verbs manually
# we simply negate the GET|HEAD pattern from above and accept all possibly $http_accept values
"~^(?!(GET|HEAD)).*:" "http://lemmy:8536";
}
server {
# this is the port inside docker, not the public one yet
listen 1236;
listen 8536;
# change if needed, this is facing the public web
server_name localhost;
server_tokens off;
# Upload limit, relevant for pictrs
client_max_body_size 20M;
# Send actual client IP upstream
include proxy_params;
# frontend general requests
location / {
proxy_pass $proxpass;
rewrite ^(.+)/+$ $1 permanent;
}
# security.txt
location = /.well-known/security.txt {
proxy_pass "http://lemmy-ui:1234";
}
# backend
location ~ ^/(api|pictrs|feeds|nodeinfo|.well-known|version|sitemap.xml) {
proxy_pass "http://lemmy:8536";
# Send actual client IP upstream
include proxy_params;
}
}
}

4
lemmy/proxy_params Normal file
View file

@ -0,0 +1,4 @@
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;

10
mlmym/docker-compose.yml Normal file
View file

@ -0,0 +1,10 @@
version: '3'
services:
mlmym:
environment:
- LEMMY_DOMAIN=lemmy.altesq.net
- DARK=true
ports:
- 8882:8080
restart: always
image: ghcr.io/rystaf/mlmym:latest

1
pleroma/.env Normal file
View file

@ -0,0 +1 @@
DB_PASS=password

41
pleroma/Dockerfile Normal file
View file

@ -0,0 +1,41 @@
FROM elixir:1.11.4-alpine
ARG PLEROMA_VER=develop
ARG UID=911
ARG GID=911
ENV MIX_ENV=prod
RUN echo "http://nl.alpinelinux.org/alpine/latest-stable/main" >> /etc/apk/repositories \
&& apk update \
&& apk add git gcc g++ musl-dev make cmake file-dev \
exiftool imagemagick libmagic ncurses postgresql-client ffmpeg
RUN addgroup -g ${GID} pleroma \
&& adduser -h /pleroma -s /bin/false -D -G pleroma -u ${UID} pleroma
ARG DATA=/var/lib/pleroma
RUN mkdir -p /etc/pleroma \
&& chown -R pleroma /etc/pleroma \
&& mkdir -p ${DATA}/uploads \
&& mkdir -p ${DATA}/static \
&& chown -R pleroma ${DATA}
USER pleroma
WORKDIR /pleroma
RUN git clone -b develop https://git.pleroma.social/pleroma/pleroma.git /pleroma \
&& git checkout ${PLEROMA_VER}
RUN echo "import Mix.Config" > config/prod.secret.exs \
&& mix local.hex --force \
&& mix local.rebar --force \
&& mix deps.get --only prod \
&& mkdir release \
&& mix release --path /pleroma
COPY ./config.exs /etc/pleroma/config.exs
EXPOSE 4000
ENTRYPOINT ["/pleroma/docker-entrypoint.sh"]

85
pleroma/config.exs Normal file
View file

@ -0,0 +1,85 @@
import Config
config :pleroma, Pleroma.Web.Endpoint,
url: [host: System.get_env("DOMAIN", "localhost"), scheme: "https", port: 443],
http: [ip: {0, 0, 0, 0}, port: 4000]
config :pleroma, :frontend_configurations,
pleroma_fe: %{
theme: "charred-coal"
}
config :pleroma, :instance,
name: System.get_env("INSTANCE_NAME", "Pleroma"),
email: System.get_env("ADMIN_EMAIL"),
notify_email: System.get_env("NOTIFY_EMAIL"),
limit: 5000,
registrations_open: true,
federating: true,
healthcheck: true
config :pleroma, :media_proxy,
enabled: true,
proxy_opts: [
redirect_on_failure: true
]
#base_url: "https://cache.domain.tld"
config :pleroma, Pleroma.Repo,
adapter: Ecto.Adapters.Postgres,
username: System.get_env("DB_USER", "pleroma"),
password: System.fetch_env!("DB_PASS"),
database: System.get_env("DB_NAME", "pleroma"),
hostname: System.get_env("DB_HOST", "db"),
pool_size: 10,
prepare: :named,
parameters: [
plan_cache_mode: "force_custom_plan"
]
# Configure web push notifications
config :web_push_encryption, :vapid_details, subject: "mailto:#{System.get_env("NOTIFY_EMAIL")}"
config :pleroma, configurable_from_database: true
config :pleroma, :database, rum_enabled: false
config :pleroma, :instance, static_dir: "/var/lib/pleroma/static"
config :pleroma, Pleroma.Uploaders.Local, uploads: "/var/lib/pleroma/uploads"
# We can't store the secrets in this file, since this is baked into the docker image
if not File.exists?("/var/lib/pleroma/secret.exs") do
secret = :crypto.strong_rand_bytes(64) |> Base.encode64() |> binary_part(0, 64)
signing_salt = :crypto.strong_rand_bytes(8) |> Base.encode64() |> binary_part(0, 8)
{web_push_public_key, web_push_private_key} = :crypto.generate_key(:ecdh, :prime256v1)
secret_file =
EEx.eval_string(
"""
import Config
config :pleroma, Pleroma.Web.Endpoint,
secret_key_base: "<%= secret %>",
signing_salt: "<%= signing_salt %>"
config :web_push_encryption, :vapid_details,
public_key: "<%= web_push_public_key %>",
private_key: "<%= web_push_private_key %>"
""",
secret: secret,
signing_salt: signing_salt,
web_push_public_key: Base.url_encode64(web_push_public_key, padding: false),
web_push_private_key: Base.url_encode64(web_push_private_key, padding: false)
)
File.write("/var/lib/pleroma/secret.exs", secret_file)
end
import_config("/var/lib/pleroma/secret.exs")
# For additional user config
if File.exists?("/var/lib/pleroma/config.exs"),
do: import_config("/var/lib/pleroma/config.exs"),
else:
File.write("/var/lib/pleroma/config.exs", """
import Config
# For additional configuration outside of environmental variables
""")

View file

@ -0,0 +1,51 @@
version: '3.8'
services:
db:
image: postgres:12.1-alpine
container_name: pleroma_db
restart: always
healthcheck:
test: ["CMD", "pg_isready", "-U", "pleroma"]
environment:
POSTGRES_USER: pleroma
POSTGRES_PASSWORD: ${DB_PASS}
POSTGRES_DB: pleroma
volumes:
- ./postgres:/var/lib/postgresql/data
web:
image: pleroma
container_name: pleroma_web
healthcheck:
test:
[
"CMD-SHELL",
"wget -q --spider --proxy=off localhost:4000 || exit 1",
]
restart: always
ports:
- '4000:4000'
build:
context: .
# Feel free to remove or override this section
# See 'Build-time variables' in README.md
args:
- "UID=1000"
- "GID=1000"
- "PLEROMA_VER=v2.6.0"
volumes:
- ./uploads:/var/lib/pleroma/uploads
- ./static:/var/lib/pleroma/static
- ./config.exs:/etc/pleroma/config.exs:ro
environment:
DOMAIN: pleroma.altesq.net
INSTANCE_NAME: Altesq Pleroma
ADMIN_EMAIL: admin@altesq.net
NOTIFY_EMAIL: notify@altesq.net
DB_USER: pleroma
DB_PASS: ${DB_PASS}
DB_NAME: pleroma
depends_on:
- db

19
sftpgo/docker-compose.yml Normal file
View file

@ -0,0 +1,19 @@
version: '3'
services:
sftpgo:
restart: always
container_name: sftpgo
ports:
- 8881:8090
- 2022:2022
volumes:
- type: bind
source: ./data
target: /srv/sftpgo
- type: bind
source: ./home
target: /var/lib/sftpgo
environment:
- SFTPGO_HTTPD__BINDINGS__0__PORT=8090
- SFTPGO_GRACE_TIME=32
image: "drakkan/sftpgo:latest"

14
unciv/Dockerfile Normal file
View file

@ -0,0 +1,14 @@
# Use a base image with JDK pre-installed
FROM openjdk:latest
# Set the working directory
WORKDIR /app
# Copy the UncivServer.jar file to the container
COPY UncivServer.jar .
# Expose the desired port(s)
EXPOSE 8080
# Run the command to start the server
CMD ["java", "-jar", "UncivServer.jar", "-p", "8080"]

7
unciv/docker-compose.yml Normal file
View file

@ -0,0 +1,7 @@
version: '3'
services:
unciv:
image: unciv:latest
restart: always
ports:
- 8080:8080

View file

@ -0,0 +1,15 @@
version: '3.8'
services:
uptime-kuma:
image: louislam/uptime-kuma:1
container_name: uptime-kuma
volumes:
- uptime-kuma:/app/data
ports:
- "3001:3001"
restart: always
volumes:
uptime-kuma:

View file

@ -0,0 +1,9 @@
version: "3"
services:
watchtower:
image: containrrr/watchtower
restart: always
environment:
- WATCHTOWER_CLEANUP=true
volumes:
- /var/run/docker.sock:/var/run/docker.sock