Merge pull request #4844 from jippi/jippi-fork

Refactor Docker/Compose
This commit is contained in:
daniel 2024-03-05 06:03:14 -07:00 committed by GitHub
commit 0bd3e0ab80
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
53 changed files with 3679 additions and 769 deletions

View file

@ -4,4 +4,4 @@
## Usage: redis-cli [flags] [args] ## Usage: redis-cli [flags] [args]
## Example: "redis-cli KEYS *" or "ddev redis-cli INFO" or "ddev redis-cli --version" ## Example: "redis-cli KEYS *" or "ddev redis-cli INFO" or "ddev redis-cli --version"
redis-cli -p 6379 -h redis $@ exec redis-cli -p 6379 -h redis "$@"

View file

@ -1,8 +1,30 @@
data .DS_Store
Dockerfile /.bash_history
contrib/docker/Dockerfile.* /.bash_profile
docker-compose*.yml /.bashrc
.dockerignore /.composer
.git /.env
.gitignore /.env.dottie-backup
.env /.git
/.git-credentials
/.gitconfig
/.gitignore
/.idea
/.vagrant
/bootstrap/cache
/docker-compose-state/
/Homestead.json
/Homestead.yaml
/node_modules
/npm-debug.log
/public/hot
/public/storage
/public/vendor/horizon
/storage/*.key
/storage/docker
/vendor
/yarn-error.log
# Exceptions - these *MUST* be last
!/bootstrap/cache/.gitignore
!/public/vendor/horizon/.gitignore

View file

@ -7,3 +7,21 @@ end_of_line = lf
charset = utf-8 charset = utf-8
trim_trailing_whitespace = true trim_trailing_whitespace = true
insert_final_newline = true insert_final_newline = true
[*.{yml,yaml}]
indent_style = space
indent_size = 2
[*.{sh,envsh,env,env*}]
indent_style = space
indent_size = 4
# ShellCheck config
shell_variant = bash # like -ln=bash
binary_next_line = true # like -bn
switch_case_indent = true # like -ci
space_redirects = false # like -sr
keep_padding = false # like -kp
function_next_line = true # like -fn
never_split = true # like -ns
simplify = true

File diff suppressed because it is too large Load diff

View file

@ -1,78 +0,0 @@
APP_NAME="Pixelfed"
APP_ENV="production"
APP_KEY=
APP_DEBUG="false"
# Instance Configuration
OPEN_REGISTRATION="false"
ENFORCE_EMAIL_VERIFICATION="false"
PF_MAX_USERS="1000"
OAUTH_ENABLED="true"
# Media Configuration
PF_OPTIMIZE_IMAGES="true"
IMAGE_QUALITY="80"
MAX_PHOTO_SIZE="15000"
MAX_CAPTION_LENGTH="500"
MAX_ALBUM_LENGTH="4"
# Instance URL Configuration
APP_URL="http://localhost"
APP_DOMAIN="localhost"
ADMIN_DOMAIN="localhost"
SESSION_DOMAIN="localhost"
TRUST_PROXIES="*"
# Database Configuration
DB_CONNECTION="mysql"
DB_HOST="127.0.0.1"
DB_PORT="3306"
DB_DATABASE="pixelfed"
DB_USERNAME="pixelfed"
DB_PASSWORD="pixelfed"
# Redis Configuration
REDIS_CLIENT="predis"
REDIS_SCHEME="tcp"
REDIS_HOST="127.0.0.1"
REDIS_PASSWORD="null"
REDIS_PORT="6379"
# Laravel Configuration
SESSION_DRIVER="database"
CACHE_DRIVER="redis"
QUEUE_DRIVER="redis"
BROADCAST_DRIVER="log"
LOG_CHANNEL="stack"
HORIZON_PREFIX="horizon-"
# ActivityPub Configuration
ACTIVITY_PUB="false"
AP_REMOTE_FOLLOW="false"
AP_INBOX="false"
AP_OUTBOX="false"
AP_SHAREDINBOX="false"
# Experimental Configuration
EXP_EMC="true"
## Mail Configuration (Post-Installer)
MAIL_DRIVER=log
MAIL_HOST=smtp.mailtrap.io
MAIL_PORT=2525
MAIL_USERNAME=null
MAIL_PASSWORD=null
MAIL_ENCRYPTION=null
MAIL_FROM_ADDRESS="pixelfed@example.com"
MAIL_FROM_NAME="Pixelfed"
## S3 Configuration (Post-Installer)
PF_ENABLE_CLOUD=false
FILESYSTEM_CLOUD=s3
#AWS_ACCESS_KEY_ID=
#AWS_SECRET_ACCESS_KEY=
#AWS_DEFAULT_REGION=
#AWS_BUCKET=<BucketName>
#AWS_URL=
#AWS_ENDPOINT=
#AWS_USE_PATH_STYLE_ENDPOINT=false

View file

@ -1,3 +1,5 @@
# shellcheck disable=SC2034,SC2148
APP_NAME="Pixelfed Test" APP_NAME="Pixelfed Test"
APP_ENV=local APP_ENV=local
APP_KEY=base64:lwX95GbNWX3XsucdMe0XwtOKECta3h/B+p9NbH2jd0E= APP_KEY=base64:lwX95GbNWX3XsucdMe0XwtOKECta3h/B+p9NbH2jd0E=

View file

@ -1,125 +0,0 @@
---
name: Build Docker image
on:
workflow_dispatch:
push:
branches:
- dev
tags:
- '*'
pull_request:
paths:
- .github/workflows/build-docker.yml
- contrib/docker/Dockerfile.apache
- contrib/docker/Dockerfile.fpm
permissions:
contents: read
jobs:
build-docker-apache:
runs-on: ubuntu-latest
steps:
- name: Checkout Code
uses: actions/checkout@v3
- name: Docker Lint
uses: hadolint/hadolint-action@v3.0.0
with:
dockerfile: contrib/docker/Dockerfile.apache
failure-threshold: error
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to DockerHub
uses: docker/login-action@v2
secrets: inherit
with:
username: ${{ secrets.DOCKER_HUB_USERNAME }}
password: ${{ secrets.DOCKER_HUB_TOKEN }}
if: github.event_name != 'pull_request'
- name: Fetch tags
uses: docker/metadata-action@v4
secrets: inherit
id: meta
with:
images: ${{ secrets.DOCKER_HUB_ORGANISATION }}/pixelfed
flavor: |
latest=auto
suffix=-apache
tags: |
type=edge,branch=dev
type=pep440,pattern={{raw}}
type=pep440,pattern=v{{major}}.{{minor}}
type=ref,event=pr
- name: Build and push Docker image
uses: docker/build-push-action@v3
with:
context: .
file: contrib/docker/Dockerfile.apache
platforms: linux/amd64,linux/arm64
builder: ${{ steps.buildx.outputs.name }}
push: ${{ github.event_name != 'pull_request' }}
tags: ${{ steps.meta.outputs.tags }}
cache-from: type=gha
cache-to: type=gha,mode=max
build-docker-fpm:
runs-on: ubuntu-latest
steps:
- name: Checkout Code
uses: actions/checkout@v3
- name: Docker Lint
uses: hadolint/hadolint-action@v3.0.0
with:
dockerfile: contrib/docker/Dockerfile.fpm
failure-threshold: error
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to DockerHub
uses: docker/login-action@v2
secrets: inherit
with:
username: ${{ secrets.DOCKER_HUB_USERNAME }}
password: ${{ secrets.DOCKER_HUB_TOKEN }}
if: github.event_name != 'pull_request'
- name: Fetch tags
uses: docker/metadata-action@v4
secrets: inherit
id: meta
with:
images: ${{ secrets.DOCKER_HUB_ORGANISATION }}/pixelfed
flavor: |
suffix=-fpm
tags: |
type=edge,branch=dev
type=pep440,pattern={{raw}}
type=pep440,pattern=v{{major}}.{{minor}}
type=ref,event=pr
- name: Build and push Docker image
uses: docker/build-push-action@v3
with:
context: .
file: contrib/docker/Dockerfile.fpm
platforms: linux/amd64,linux/arm64
builder: ${{ steps.buildx.outputs.name }}
push: ${{ github.event_name != 'pull_request' }}
tags: ${{ steps.meta.outputs.tags }}
cache-from: type=gha
cache-to: type=gha,mode=max

231
.github/workflows/docker.yml vendored Normal file
View file

@ -0,0 +1,231 @@
---
name: Docker
on:
# See: https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#workflow_dispatch
workflow_dispatch:
# See: https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#push
push:
branches:
- dev
- staging
- jippi-fork # TODO(jippi): remove me before merge
tags:
- "*"
# See: https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#pull_request
pull_request:
types:
- opened
- reopened
- synchronize
jobs:
lint:
name: hadolint
runs-on: ubuntu-latest
permissions:
contents: read
steps:
- name: Checkout Code
uses: actions/checkout@v4
- name: Docker Lint
uses: hadolint/hadolint-action@v3.1.0
with:
dockerfile: Dockerfile
failure-threshold: error
shellcheck:
name: ShellCheck
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Run ShellCheck
uses: ludeeus/action-shellcheck@master
env:
SHELLCHECK_OPTS: --shell=bash --external-sources
with:
version: v0.9.0
additional_files: "*.envsh .env .env.docker .env.example .env.testing"
bats:
name: Bats Testing
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Run bats
run: docker run -v "$PWD:/var/www" bats/bats:latest /var/www/tests/bats
build:
name: Build, Test, and Push
runs-on: ubuntu-latest
strategy:
fail-fast: false
# See: https://docs.github.com/en/actions/using-jobs/using-a-matrix-for-your-jobs
matrix:
php_version:
- 8.2
- 8.3
target_runtime:
- apache
- fpm
- nginx
php_base:
- apache
- fpm
# See: https://docs.github.com/en/actions/using-jobs/using-a-matrix-for-your-jobs#excluding-matrix-configurations
# See: https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstrategymatrixexclude
exclude:
# targeting [apache] runtime with [fpm] base type doesn't make sense
- target_runtime: apache
php_base: fpm
# targeting [fpm] runtime with [apache] base type doesn't make sense
- target_runtime: fpm
php_base: apache
# targeting [nginx] runtime with [apache] base type doesn't make sense
- target_runtime: nginx
php_base: apache
# See: https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#example-using-concurrency-and-the-default-behavior
concurrency:
group: docker-build-${{ github.ref }}-${{ matrix.php_base }}-${{ matrix.php_version }}-${{ matrix.target_runtime }}
cancel-in-progress: true
permissions:
contents: read
packages: write
env:
# Set the repo variable [DOCKER_HUB_USERNAME] to override the default
# at https://github.com/<user>/<project>/settings/variables/actions
DOCKER_HUB_USERNAME: ${{ vars.DOCKER_HUB_USERNAME || 'pixelfed' }}
# Set the repo variable [DOCKER_HUB_ORGANISATION] to override the default
# at https://github.com/<user>/<project>/settings/variables/actions
DOCKER_HUB_ORGANISATION: ${{ vars.DOCKER_HUB_ORGANISATION || 'pixelfed' }}
# Set the repo variable [DOCKER_HUB_REPO] to override the default
# at https://github.com/<user>/<project>/settings/variables/actions
DOCKER_HUB_REPO: ${{ vars.DOCKER_HUB_REPO || 'pixelfed' }}
# For Docker Hub pushing to work, you need the secret [DOCKER_HUB_TOKEN]
# set to your Personal Access Token at https://github.com/<user>/<project>/settings/secrets/actions
#
# ! NOTE: no [login] or [push] will happen to Docker Hub until this secret is set!
HAS_DOCKER_HUB_CONFIGURED: ${{ secrets.DOCKER_HUB_TOKEN != '' }}
steps:
- name: Checkout Code
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
id: buildx
with:
version: v0.12.0 # *or* newer, needed for annotations to work
# See: https://github.com/docker/login-action?tab=readme-ov-file#github-container-registry
- name: Log in to the GitHub Container registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
# See: https://github.com/docker/login-action?tab=readme-ov-file#docker-hub
- name: Login to Docker Hub registry (conditionally)
if: ${{ env.HAS_DOCKER_HUB_CONFIGURED == true }}
uses: docker/login-action@v3
with:
username: ${{ env.DOCKER_HUB_USERNAME }}
password: ${{ secrets.DOCKER_HUB_TOKEN }}
- name: Docker meta
uses: docker/metadata-action@v5
id: meta
with:
images: |
name=ghcr.io/${{ github.repository }},enable=true
name=${{ env.DOCKER_HUB_ORGANISATION }}/${{ env.DOCKER_HUB_REPO }},enable=${{ env.HAS_DOCKER_HUB_CONFIGURED }}
flavor: |
latest=auto
suffix=-${{ matrix.target_runtime }}-${{ matrix.php_version }}
tags: |
type=raw,value=dev,enable=${{ github.ref == format('refs/heads/{0}', 'dev') }}
type=raw,value=staging,enable=${{ github.ref == format('refs/heads/{0}', 'staging') }}
type=pep440,pattern={{raw}}
type=pep440,pattern=v{{major}}.{{minor}}
type=ref,event=branch,prefix=branch-
type=ref,event=pr,prefix=pr-
type=ref,event=tag
env:
DOCKER_METADATA_ANNOTATIONS_LEVELS: manifest,index
- name: Docker meta (Cache)
uses: docker/metadata-action@v5
id: cache
with:
images: |
name=ghcr.io/${{ github.repository }}-cache,enable=true
name=${{ env.DOCKER_HUB_ORGANISATION }}/${{ env.DOCKER_HUB_REPO }}-cache,enable=${{ env.HAS_DOCKER_HUB_CONFIGURED }}
flavor: |
latest=auto
suffix=-${{ matrix.target_runtime }}-${{ matrix.php_version }}
tags: |
type=raw,value=dev,enable=${{ github.ref == format('refs/heads/{0}', 'dev') }}
type=raw,value=staging,enable=${{ github.ref == format('refs/heads/{0}', 'staging') }}
type=pep440,pattern={{raw}}
type=pep440,pattern=v{{major}}.{{minor}}
type=ref,event=branch,prefix=branch-
type=ref,event=pr,prefix=pr-
type=ref,event=tag
env:
DOCKER_METADATA_ANNOTATIONS_LEVELS: manifest,index
- name: Build and push Docker image
uses: docker/build-push-action@v5
with:
context: .
file: Dockerfile
target: ${{ matrix.target_runtime }}-runtime
platforms: linux/amd64,linux/arm64
builder: ${{ steps.buildx.outputs.name }}
tags: ${{ steps.meta.outputs.tags }}
annotations: ${{ steps.meta.outputs.annotations }}
push: true
sbom: true
provenance: true
build-args: |
PHP_VERSION=${{ matrix.php_version }}
PHP_BASE_TYPE=${{ matrix.php_base }}
cache-from: |
type=gha,scope=${{ matrix.target_runtime }}-${{ matrix.php_base }}-${{ matrix.php_version }}
cache-to: |
type=gha,mode=max,scope=${{ matrix.target_runtime }}-${{ matrix.php_base }}-${{ matrix.php_version }}
${{ steps.cache.outputs.tags }}
# goss validate the image
#
# See: https://github.com/goss-org/goss
- uses: e1himself/goss-installation-action@v1
with:
version: "v0.4.4"
- name: Execute Goss tests
run: |
dgoss run \
-v "./.env.testing:/var/www/.env" \
-e "EXPECTED_PHP_VERSION=${{ matrix.php_version }}" \
-e "PHP_BASE_TYPE=${{ matrix.php_base }}" \
${{ steps.meta.outputs.tags }}

42
.gitignore vendored
View file

@ -1,22 +1,30 @@
.DS_Store
/.bash_history
/.bash_profile
/.bashrc
/.composer
/.env
/.env.dottie-backup
#/.git
/.git-credentials
/.gitconfig
#/.gitignore
/.idea
/.vagrant
/bootstrap/cache
/docker-compose-state/
/Homestead.json
/Homestead.yaml
/node_modules /node_modules
/npm-debug.log
/public/hot /public/hot
/public/storage /public/storage
/public/vendor/horizon
/storage/*.key /storage/*.key
/storage/docker
/vendor /vendor
/.idea /yarn-error.log
/.vscode
/.vagrant # Exceptions - these *MUST* be last
/docker-volumes !/bootstrap/cache/.gitignore
Homestead.json !/public/vendor/horizon/.gitignore
Homestead.yaml
npm-debug.log
yarn-error.log
.env
.DS_Store
.bash_profile
.bash_history
.bashrc
.gitconfig
.git-credentials
/.composer/
/nginx.conf

5
.hadolint.yaml Normal file
View file

@ -0,0 +1,5 @@
ignored:
- DL3002 # warning: Last USER should not be root
- DL3008 # warning: Pin versions in apt get install. Instead of `apt-get install <package>` use `apt-get install <package>=<version>`
- SC2046 # warning: Quote this to prevent word splitting.
- SC2086 # info: Double quote to prevent globbing and word splitting.

4
.markdownlint.json Normal file
View file

@ -0,0 +1,4 @@
{
"MD013": false,
"MD014": false
}

12
.shellcheckrc Normal file
View file

@ -0,0 +1,12 @@
# See: https://github.com/koalaman/shellcheck/blob/master/shellcheck.1.md#rc-files
source-path=SCRIPTDIR
# Allow opening any 'source'd file, even if not specified as input
external-sources=true
# Turn on warnings for unquoted variables with safe values
enable=quote-safe-variables
# Turn on warnings for unassigned uppercase variables
enable=check-unassigned-uppercase

14
.vscode/extensions.json vendored Normal file
View file

@ -0,0 +1,14 @@
{
"recommendations": [
"foxundermoon.shell-format",
"timonwong.shellcheck",
"jetmartin.bats",
"aaron-bond.better-comments",
"streetsidesoftware.code-spell-checker",
"editorconfig.editorconfig",
"github.vscode-github-actions",
"bmewburn.vscode-intelephense-client",
"redhat.vscode-yaml",
"ms-azuretools.vscode-docker"
]
}

21
.vscode/settings.json vendored Normal file
View file

@ -0,0 +1,21 @@
{
"shellformat.useEditorConfig": true,
"[shellscript]": {
"files.eol": "\n",
"editor.defaultFormatter": "foxundermoon.shell-format"
},
"[yaml]": {
"editor.defaultFormatter": "redhat.vscode-yaml"
},
"[dockercompose]": {
"editor.defaultFormatter": "redhat.vscode-yaml",
"editor.autoIndent": "advanced",
},
"yaml.schemas": {
"https://json.schemastore.org/composer": "https://raw.githubusercontent.com/compose-spec/compose-spec/master/schema/compose-spec.json"
},
"files.associations": {
".env": "shellscript",
".env.*": "shellscript"
}
}

18
CODEOWNERS Normal file
View file

@ -0,0 +1,18 @@
# See: https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners
# These owners will be the default owners for everything in
# the repo. Unless a later match takes precedence,
* @dansup
# Docker related files
.editorconfig @jippi @dansup
.env @jippi @dansup
.env.* @jippi @dansup
.hadolint.yaml @jippi @dansup
.shellcheckrc @jippi @dansup
/.github/ @jippi @dansup
/docker/ @jippi @dansup
/tests/ @jippi @dansup
docker-compose.migrate.yml @jippi @dansup
docker-compose.yml @jippi @dansup
goss.yaml @jippi @dansup

307
Dockerfile Normal file
View file

@ -0,0 +1,307 @@
# syntax=docker/dockerfile:1
# See https://hub.docker.com/r/docker/dockerfile
#######################################################
# Configuration
#######################################################
# See: https://github.com/mlocati/docker-php-extension-installer
ARG DOCKER_PHP_EXTENSION_INSTALLER_VERSION="2.1.80"
# See: https://github.com/composer/composer
ARG COMPOSER_VERSION="2.6"
# See: https://nginx.org/
ARG NGINX_VERSION="1.25.3"
# See: https://github.com/ddollar/forego
ARG FOREGO_VERSION="0.17.2"
# See: https://github.com/hairyhenderson/gomplate
ARG GOMPLATE_VERSION="v3.11.6"
# See: https://github.com/jippi/dottie
ARG DOTTIE_VERSION="v0.9.5"
###
# PHP base configuration
###
# See: https://hub.docker.com/_/php/tags
ARG PHP_VERSION="8.1"
# See: https://github.com/docker-library/docs/blob/master/php/README.md#image-variants
ARG PHP_BASE_TYPE="apache"
ARG PHP_DEBIAN_RELEASE="bullseye"
ARG RUNTIME_UID=33 # often called 'www-data'
ARG RUNTIME_GID=33 # often called 'www-data'
# APT extra packages
ARG APT_PACKAGES_EXTRA=
# Extensions installed via [pecl install]
# ! NOTE: imagick is installed from [master] branch on GitHub due to 8.3 bug on ARM that haven't
# ! been released yet (after +10 months)!
# ! See: https://github.com/Imagick/imagick/pull/641
ARG PHP_PECL_EXTENSIONS="redis https://codeload.github.com/Imagick/imagick/tar.gz/28f27044e435a2b203e32675e942eb8de620ee58"
ARG PHP_PECL_EXTENSIONS_EXTRA=
# Extensions installed via [docker-php-ext-install]
ARG PHP_EXTENSIONS="intl bcmath zip pcntl exif curl gd"
ARG PHP_EXTENSIONS_EXTRA=""
ARG PHP_EXTENSIONS_DATABASE="pdo_pgsql pdo_mysql pdo_sqlite"
# GPG key for nginx apt repository
ARG NGINX_GPGKEY="573BFD6B3D8FBC641079A6ABABF5BD827BD9BF62"
# GPP key path for nginx apt repository
ARG NGINX_GPGKEY_PATH="/usr/share/keyrings/nginx-archive-keyring.gpg"
#######################################################
# Docker "copy from" images
#######################################################
# Composer docker image from Docker Hub
#
# NOTE: Docker will *not* pull this image unless it's referenced (via build target)
FROM composer:${COMPOSER_VERSION} AS composer-image
# php-extension-installer image from Docker Hub
#
# NOTE: Docker will *not* pull this image unless it's referenced (via build target)
FROM mlocati/php-extension-installer:${DOCKER_PHP_EXTENSION_INSTALLER_VERSION} AS php-extension-installer
# nginx webserver from Docker Hub.
# Used to copy some docker-entrypoint files for [nginx-runtime]
#
# NOTE: Docker will *not* pull this image unless it's referenced (via build target)
FROM nginx:${NGINX_VERSION} AS nginx-image
# Forego is a Procfile "runner" that makes it trival to run multiple
# processes under a simple init / PID 1 process.
#
# NOTE: Docker will *not* pull this image unless it's referenced (via build target)
#
# See: https://github.com/nginx-proxy/forego
FROM nginxproxy/forego:${FOREGO_VERSION}-debian AS forego-image
# Dottie makes working with .env files easier and safer
#
# NOTE: Docker will *not* pull this image unless it's referenced (via build target)
#
# See: https://github.com/jippi/dottie
FROM ghcr.io/jippi/dottie:${DOTTIE_VERSION} AS dottie-image
# gomplate-image grabs the gomplate binary from GitHub releases
#
# It's in its own layer so it can be fetched in parallel with other build steps
FROM php:${PHP_VERSION}-${PHP_BASE_TYPE}-${PHP_DEBIAN_RELEASE} AS gomplate-image
ARG TARGETARCH
ARG TARGETOS
ARG GOMPLATE_VERSION
RUN set -ex \
&& curl \
--silent \
--show-error \
--location \
--output /usr/local/bin/gomplate \
https://github.com/hairyhenderson/gomplate/releases/download/${GOMPLATE_VERSION}/gomplate_${TARGETOS}-${TARGETARCH} \
&& chmod +x /usr/local/bin/gomplate \
&& /usr/local/bin/gomplate --version
#######################################################
# Base image
#######################################################
FROM php:${PHP_VERSION}-${PHP_BASE_TYPE}-${PHP_DEBIAN_RELEASE} AS base
ARG BUILDKIT_SBOM_SCAN_STAGE="true"
ARG APT_PACKAGES_EXTRA
ARG PHP_DEBIAN_RELEASE
ARG PHP_VERSION
ARG RUNTIME_GID
ARG RUNTIME_UID
ARG TARGETPLATFORM
ENV DEBIAN_FRONTEND="noninteractive"
# Ensure we run all scripts through 'bash' rather than 'sh'
SHELL ["/bin/bash", "-c"]
RUN set -ex \
&& mkdir -pv /var/www/ \
&& chown -R ${RUNTIME_UID}:${RUNTIME_GID} /var/www
WORKDIR /var/www/
ENV APT_PACKAGES_EXTRA=${APT_PACKAGES_EXTRA}
# Install and configure base layer
COPY docker/shared/root/docker/install/base.sh /docker/install/base.sh
RUN --mount=type=cache,id=pixelfed-apt-${PHP_VERSION}-${PHP_DEBIAN_RELEASE}-${TARGETPLATFORM},sharing=locked,target=/var/lib/apt \
--mount=type=cache,id=pixelfed-apt-cache-${PHP_VERSION}-${PHP_DEBIAN_RELEASE}-${TARGETPLATFORM},sharing=locked,target=/var/cache/apt \
/docker/install/base.sh
#######################################################
# PHP: extensions
#######################################################
FROM base AS php-extensions
ARG PHP_DEBIAN_RELEASE
ARG PHP_EXTENSIONS
ARG PHP_EXTENSIONS_DATABASE
ARG PHP_EXTENSIONS_EXTRA
ARG PHP_PECL_EXTENSIONS
ARG PHP_PECL_EXTENSIONS_EXTRA
ARG PHP_VERSION
ARG TARGETPLATFORM
COPY --from=php-extension-installer /usr/bin/install-php-extensions /usr/local/bin/
COPY docker/shared/root/docker/install/php-extensions.sh /docker/install/php-extensions.sh
RUN --mount=type=cache,id=pixelfed-pear-${PHP_VERSION}-${PHP_DEBIAN_RELEASE}-${TARGETPLATFORM},sharing=locked,target=/tmp/pear \
--mount=type=cache,id=pixelfed-apt-${PHP_VERSION}-${PHP_DEBIAN_RELEASE}-${TARGETPLATFORM},sharing=locked,target=/var/lib/apt \
--mount=type=cache,id=pixelfed-apt-cache-${PHP_VERSION}-${PHP_DEBIAN_RELEASE}-${TARGETPLATFORM},sharing=locked,target=/var/cache/apt \
PHP_EXTENSIONS=${PHP_EXTENSIONS} \
PHP_EXTENSIONS_DATABASE=${PHP_EXTENSIONS_DATABASE} \
PHP_EXTENSIONS_EXTRA=${PHP_EXTENSIONS_EXTRA} \
PHP_PECL_EXTENSIONS=${PHP_PECL_EXTENSIONS} \
PHP_PECL_EXTENSIONS_EXTRA=${PHP_PECL_EXTENSIONS_EXTRA} \
/docker/install/php-extensions.sh
#######################################################
# PHP: composer and source code
#######################################################
FROM php-extensions AS composer-and-src
ARG PHP_VERSION
ARG PHP_DEBIAN_RELEASE
ARG RUNTIME_UID
ARG RUNTIME_GID
ARG TARGETPLATFORM
# Make sure composer cache is targeting our cache mount later
ENV COMPOSER_CACHE_DIR="/cache/composer"
# Don't enforce any memory limits for composer
ENV COMPOSER_MEMORY_LIMIT=-1
# Disable interactvitity from composer
ENV COMPOSER_NO_INTERACTION=1
# Copy composer from https://hub.docker.com/_/composer
COPY --link --from=composer-image /usr/bin/composer /usr/bin/composer
#! Changing user to runtime user
USER ${RUNTIME_UID}:${RUNTIME_GID}
# Install composer dependencies
# NOTE: we skip the autoloader generation here since we don't have all files avaliable (yet)
RUN --mount=type=cache,id=pixelfed-composer-${PHP_VERSION},sharing=locked,target=/cache/composer \
--mount=type=bind,source=composer.json,target=/var/www/composer.json \
--mount=type=bind,source=composer.lock,target=/var/www/composer.lock \
set -ex \
&& composer install --prefer-dist --no-autoloader --ignore-platform-reqs
# Copy all other files over
COPY --chown=${RUNTIME_UID}:${RUNTIME_GID} . /var/www/
#######################################################
# Runtime: base
#######################################################
FROM php-extensions AS shared-runtime
ARG RUNTIME_GID
ARG RUNTIME_UID
ENV RUNTIME_UID=${RUNTIME_UID}
ENV RUNTIME_GID=${RUNTIME_GID}
COPY --link --from=forego-image /usr/local/bin/forego /usr/local/bin/forego
COPY --link --from=dottie-image /dottie /usr/local/bin/dottie
COPY --link --from=gomplate-image /usr/local/bin/gomplate /usr/local/bin/gomplate
COPY --link --from=composer-image /usr/bin/composer /usr/bin/composer
COPY --link --from=composer-and-src --chown=${RUNTIME_UID}:${RUNTIME_GID} /var/www /var/www
#! Changing user to runtime user
USER ${RUNTIME_UID}:${RUNTIME_GID}
# Generate optimized autoloader now that we have all files around
RUN set -ex \
&& composer dump-autoload --optimize
USER root
# for detail why storage is copied this way, pls refer to https://github.com/pixelfed/pixelfed/pull/2137#discussion_r434468862
RUN set -ex \
&& cp --recursive --link --preserve=all storage storage.skel \
&& rm -rf html && ln -s public html
COPY docker/shared/root /
ENTRYPOINT ["/docker/entrypoint.sh"]
#######################################################
# Runtime: apache
#######################################################
FROM shared-runtime AS apache-runtime
COPY docker/apache/root /
RUN set -ex \
&& a2enmod rewrite remoteip proxy proxy_http \
&& a2enconf remoteip
CMD ["apache2-foreground"]
#######################################################
# Runtime: fpm
#######################################################
FROM shared-runtime AS fpm-runtime
COPY docker/fpm/root /
CMD ["php-fpm"]
#######################################################
# Runtime: nginx
#######################################################
FROM shared-runtime AS nginx-runtime
ARG NGINX_GPGKEY
ARG NGINX_GPGKEY_PATH
ARG NGINX_VERSION
ARG PHP_DEBIAN_RELEASE
ARG PHP_VERSION
ARG TARGETPLATFORM
# Install nginx dependencies
RUN --mount=type=cache,id=pixelfed-apt-lists-${PHP_VERSION}-${PHP_DEBIAN_RELEASE}-${TARGETPLATFORM},sharing=locked,target=/var/lib/apt/lists \
--mount=type=cache,id=pixelfed-apt-cache-${PHP_VERSION}-${PHP_DEBIAN_RELEASE}-${TARGETPLATFORM},sharing=locked,target=/var/cache/apt \
set -ex \
&& gpg1 --keyserver "hkp://keyserver.ubuntu.com:80" --keyserver-options timeout=10 --recv-keys "${NGINX_GPGKEY}" \
&& gpg1 --export "$NGINX_GPGKEY" > "$NGINX_GPGKEY_PATH" \
&& echo "deb [signed-by=${NGINX_GPGKEY_PATH}] https://nginx.org/packages/mainline/debian/ ${PHP_DEBIAN_RELEASE} nginx" >> /etc/apt/sources.list.d/nginx.list \
&& apt-get update \
&& apt-get install -y --no-install-recommends nginx=${NGINX_VERSION}*
# copy docker entrypoints from the *real* nginx image directly
COPY --link --from=nginx-image /docker-entrypoint.d /docker/entrypoint.d/
COPY docker/nginx/root /
COPY docker/nginx/Procfile .
STOPSIGNAL SIGQUIT
CMD ["forego", "start", "-r"]

View file

@ -25,31 +25,32 @@ class Kernel extends ConsoleKernel
*/ */
protected function schedule(Schedule $schedule) protected function schedule(Schedule $schedule)
{ {
$schedule->command('media:optimize')->hourlyAt(40); $schedule->command('media:optimize')->hourlyAt(40)->onOneServer();
$schedule->command('media:gc')->hourlyAt(5); $schedule->command('media:gc')->hourlyAt(5)->onOneServer();
$schedule->command('horizon:snapshot')->everyFiveMinutes(); $schedule->command('horizon:snapshot')->everyFiveMinutes()->onOneServer();
$schedule->command('story:gc')->everyFiveMinutes(); $schedule->command('story:gc')->everyFiveMinutes()->onOneServer();
$schedule->command('gc:failedjobs')->dailyAt(3); $schedule->command('gc:failedjobs')->dailyAt(3)->onOneServer();
$schedule->command('gc:passwordreset')->dailyAt('09:41'); $schedule->command('gc:passwordreset')->dailyAt('09:41')->onOneServer();
$schedule->command('gc:sessions')->twiceDaily(13, 23); $schedule->command('gc:sessions')->twiceDaily(13, 23)->onOneServer();
if(in_array(config_cache('pixelfed.cloud_storage'), ['1', true, 'true']) && config('media.delete_local_after_cloud')) { if (in_array(config_cache('pixelfed.cloud_storage'), ['1', true, 'true']) && config('media.delete_local_after_cloud')) {
$schedule->command('media:s3gc')->hourlyAt(15); $schedule->command('media:s3gc')->hourlyAt(15);
} }
if(config('import.instagram.enabled')) { if (config('import.instagram.enabled')) {
$schedule->command('app:transform-imports')->everyTenMinutes(); $schedule->command('app:transform-imports')->everyTenMinutes()->onOneServer();
$schedule->command('app:import-upload-garbage-collection')->hourlyAt(51); $schedule->command('app:import-upload-garbage-collection')->hourlyAt(51)->onOneServer();
$schedule->command('app:import-remove-deleted-accounts')->hourlyAt(37); $schedule->command('app:import-remove-deleted-accounts')->hourlyAt(37)->onOneServer();
$schedule->command('app:import-upload-clean-storage')->twiceDailyAt(1, 13, 32); $schedule->command('app:import-upload-clean-storage')->twiceDailyAt(1, 13, 32)->onOneServer();
if(config('import.instagram.storage.cloud.enabled') && (bool) config_cache('pixelfed.cloud_storage')) { if (config('import.instagram.storage.cloud.enabled') && (bool) config_cache('pixelfed.cloud_storage')) {
$schedule->command('app:import-upload-media-to-cloud-storage')->hourlyAt(39); $schedule->command('app:import-upload-media-to-cloud-storage')->hourlyAt(39)->onOneServer();
} }
} }
$schedule->command('app:notification-epoch-update')->weeklyOn(1, '2:21');
$schedule->command('app:hashtag-cached-count-update')->hourlyAt(25); $schedule->command('app:notification-epoch-update')->weeklyOn(1, '2:21')->onOneServer();
$schedule->command('app:account-post-count-stat-update')->everySixHours(25); $schedule->command('app:hashtag-cached-count-update')->hourlyAt(25)->onOneServer();
$schedule->command('app:account-post-count-stat-update')->everySixHours(25)->onOneServer();
} }
/** /**
@ -59,7 +60,7 @@ class Kernel extends ConsoleKernel
*/ */
protected function commands() protected function commands()
{ {
$this->load(__DIR__.'/Commands'); $this->load(__DIR__ . '/Commands');
require base_path('routes/console.php'); require base_path('routes/console.php');
} }

View file

@ -72,7 +72,7 @@ return [
'secret' => env('AWS_SECRET_ACCESS_KEY'), 'secret' => env('AWS_SECRET_ACCESS_KEY'),
'region' => env('AWS_DEFAULT_REGION'), 'region' => env('AWS_DEFAULT_REGION'),
'bucket' => env('AWS_BUCKET'), 'bucket' => env('AWS_BUCKET'),
'visibility' => 'public', 'visibility' => env('AWS_VISIBILITY', 'public'),
'url' => env('AWS_URL'), 'url' => env('AWS_URL'),
'endpoint' => env('AWS_ENDPOINT'), 'endpoint' => env('AWS_ENDPOINT'),
'use_path_style_endpoint' => env('AWS_USE_PATH_STYLE_ENDPOINT', false), 'use_path_style_endpoint' => env('AWS_USE_PATH_STYLE_ENDPOINT', false),

View file

@ -1,35 +0,0 @@
upstream fe {
server 127.0.0.1:8080;
}
server {
server_name real.domain;
listen [::]:443 ssl ipv6only=on;
listen 443 ssl;
ssl_certificate /etc/letsencrypt/live/real.domain/fullchain.pem; # managed by Certbot
ssl_certificate_key /etc/letsencrypt/live/real.domain/privkey.pem; # managed by Certbot
include /etc/letsencrypt/options-ssl-nginx.conf; # managed by Certbot
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; # managed by Certbot
location / {
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Host $http_x_forwarded_host;
proxy_set_header X-Forwarded-Port $http_x_forwarded_port;
proxy_redirect off;
proxy_pass http://fe/;
}
}
server {
if ($host = real.domain) {
return 301 https://$host$request_uri;
}
listen 80;
listen [::]:80;
server_name real.domain;
return 404;
}

View file

@ -1,100 +0,0 @@
FROM php:8.1-apache-bullseye
ENV COMPOSER_MEMORY_LIMIT=-1
ARG DEBIAN_FRONTEND=noninteractive
WORKDIR /var/www/
# Get Composer binary
COPY --from=composer:2.4.4 /usr/bin/composer /usr/bin/composer
# Install package dependencies
RUN apt-get update \
&& apt-get upgrade -y \
# && apt-get install -y --no-install-recommends apt-utils \
&& apt-get install -y --no-install-recommends \
## Standard
locales \
locales-all \
git \
gosu \
zip \
unzip \
libzip-dev \
libcurl4-openssl-dev \
## Image Optimization
optipng \
pngquant \
jpegoptim \
gifsicle \
## Image Processing
libjpeg62-turbo-dev \
libpng-dev \
libmagickwand-dev \
# Required for GD
libxpm4 \
libxpm-dev \
libwebp7 \
libwebp-dev \
## Video Processing
ffmpeg \
## Database
# libpq-dev \
# libsqlite3-dev \
mariadb-client \
# Locales Update
&& sed -i '/en_US/s/^#//g' /etc/locale.gen \
&& locale-gen \
&& update-locale \
# Install PHP extensions
&& docker-php-source extract \
#PHP Imagemagick extensions
&& pecl install imagick \
&& docker-php-ext-enable imagick \
# PHP GD extensions
&& docker-php-ext-configure gd \
--with-freetype \
--with-jpeg \
--with-webp \
--with-xpm \
&& docker-php-ext-install -j$(nproc) gd \
#PHP Redis extensions
&& pecl install redis \
&& docker-php-ext-enable redis \
#PHP Database extensions
&& docker-php-ext-install pdo_mysql \
#pdo_pgsql pdo_sqlite \
#PHP extensions (dependencies)
&& docker-php-ext-configure intl \
&& docker-php-ext-install -j$(nproc) intl bcmath zip pcntl exif curl \
#APACHE Bootstrap
&& a2enmod rewrite remoteip \
&& {\
echo RemoteIPHeader X-Real-IP ;\
echo RemoteIPTrustedProxy 10.0.0.0/8 ;\
echo RemoteIPTrustedProxy 172.16.0.0/12 ;\
echo RemoteIPTrustedProxy 192.168.0.0/16 ;\
echo SetEnvIf X-Forwarded-Proto "https" HTTPS=on ;\
} > /etc/apache2/conf-available/remoteip.conf \
&& a2enconf remoteip \
#Cleanup
&& docker-php-source delete \
&& apt-get autoremove --purge -y \
&& apt-get clean \
&& rm -rf /var/cache/apt \
&& rm -rf /var/lib/apt/lists/
# Use the default production configuration
COPY contrib/docker/php.production.ini "$PHP_INI_DIR/php.ini"
COPY . /var/www/
# for detail why storage is copied this way, pls refer to https://github.com/pixelfed/pixelfed/pull/2137#discussion_r434468862
RUN cp -r storage storage.skel \
&& composer install --prefer-dist --no-interaction --no-ansi --optimize-autoloader \
&& rm -rf html && ln -s public html \
&& chown -R www-data:www-data /var/www
RUN php artisan horizon:publish
VOLUME /var/www/storage /var/www/bootstrap
CMD ["/var/www/contrib/docker/start.apache.sh"]

View file

@ -1,90 +0,0 @@
FROM php:8.1-fpm-bullseye
ENV COMPOSER_MEMORY_LIMIT=-1
ARG DEBIAN_FRONTEND=noninteractive
WORKDIR /var/www/
# Get Composer binary
COPY --from=composer:2.4.4 /usr/bin/composer /usr/bin/composer
# Install package dependencies
RUN apt-get update \
&& apt-get upgrade -y \
# && apt-get install -y --no-install-recommends apt-utils \
&& apt-get install -y --no-install-recommends \
## Standard
locales \
locales-all \
git \
gosu \
zip \
unzip \
libzip-dev \
libcurl4-openssl-dev \
## Image Optimization
optipng \
pngquant \
jpegoptim \
gifsicle \
## Image Processing
libjpeg62-turbo-dev \
libpng-dev \
libmagickwand-dev \
# Required for GD
libxpm4 \
libxpm-dev \
libwebp7 \
libwebp-dev \
## Video Processing
ffmpeg \
## Database
# libpq-dev \
# libsqlite3-dev \
mariadb-client \
# Locales Update
&& sed -i '/en_US/s/^#//g' /etc/locale.gen \
&& locale-gen \
&& update-locale \
# Install PHP extensions
&& docker-php-source extract \
#PHP Imagemagick extensions
&& pecl install imagick \
&& docker-php-ext-enable imagick \
# PHP GD extensions
&& docker-php-ext-configure gd \
--with-freetype \
--with-jpeg \
--with-webp \
--with-xpm \
&& docker-php-ext-install -j$(nproc) gd \
#PHP Redis extensions
&& pecl install redis \
&& docker-php-ext-enable redis \
#PHP Database extensions
&& docker-php-ext-install pdo_mysql \
#pdo_pgsql pdo_sqlite \
#PHP extensions (dependencies)
&& docker-php-ext-configure intl \
&& docker-php-ext-install -j$(nproc) intl bcmath zip pcntl exif curl \
#Cleanup
&& docker-php-source delete \
&& apt-get autoremove --purge -y \
&& apt-get clean \
&& rm -rf /var/cache/apt \
&& rm -rf /var/lib/apt/lists/
# Use the default production configuration
COPY contrib/docker/php.production.ini "$PHP_INI_DIR/php.ini"
COPY . /var/www/
# for detail why storage is copied this way, pls refer to https://github.com/pixelfed/pixelfed/pull/2137#discussion_r434468862
RUN cp -r storage storage.skel \
&& composer install --prefer-dist --no-interaction --no-ansi --optimize-autoloader \
&& rm -rf html && ln -s public html \
&& chown -R www-data:www-data /var/www
RUN php artisan horizon:publish
VOLUME /var/www/storage /var/www/bootstrap
CMD ["/var/www/contrib/docker/start.fpm.sh"]

View file

@ -1,15 +0,0 @@
#!/bin/bash
# Create the storage tree if needed and fix permissions
cp -r storage.skel/* storage/
chown -R www-data:www-data storage/ bootstrap/
# Refresh the environment
php artisan config:cache
php artisan storage:link
php artisan horizon:publish
php artisan route:cache
php artisan view:cache
# Finally run Apache
apache2-foreground

View file

@ -1,15 +0,0 @@
#!/bin/bash
# Create the storage tree if needed and fix permissions
cp -r storage.skel/* storage/
chown -R www-data:www-data storage/ bootstrap/
# Refresh the environment
php artisan config:cache
php artisan storage:link
php artisan horizon:publish
php artisan route:cache
php artisan view:cache
# Finally run FPM
php-fpm

View file

@ -1,67 +0,0 @@
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
server_name pixelfed.example; # change this to your fqdn
root /home/pixelfed/public; # path to repo/public
ssl_certificate /etc/nginx/ssl/server.crt; # generate your own
ssl_certificate_key /etc/nginx/ssl/server.key; # or use letsencrypt
ssl_protocols TLSv1.2;
ssl_ciphers EECDH+AESGCM:EECDH+CHACHA20:EECDH+AES;
ssl_prefer_server_ciphers on;
#add_header X-Frame-Options "SAMEORIGIN";
add_header X-XSS-Protection "1; mode=block";
add_header X-Content-Type-Options "nosniff";
index index.php;
charset utf-8;
client_max_body_size 15M;
location / {
try_files $uri $uri/ /index.php?$query_string;
}
location = /favicon.ico { access_log off; log_not_found off; }
location = /robots.txt { access_log off; log_not_found off; }
error_page 404 /index.php;
location ~ \.php$ {
fastcgi_split_path_info ^(.+\.php)(/.+)$;
fastcgi_pass unix:/var/run/php/php8.1-fpm.sock;
fastcgi_param SCRIPT_FILENAME $realpath_root$fastcgi_script_name;
fastcgi_param QUERY_STRING $query_string;
fastcgi_param REQUEST_METHOD $request_method;
fastcgi_param CONTENT_TYPE $content_type;
fastcgi_param CONTENT_LENGTH $content_length;
fastcgi_param SCRIPT_NAME $fastcgi_script_name;
fastcgi_param REQUEST_URI $request_uri;
fastcgi_param DOCUMENT_URI $document_uri;
fastcgi_param DOCUMENT_ROOT $document_root;
fastcgi_param SERVER_PROTOCOL $server_protocol;
fastcgi_param GATEWAY_INTERFACE CGI/1.1;
fastcgi_param SERVER_SOFTWARE nginx/$nginx_version;
fastcgi_param REMOTE_ADDR $remote_addr;
fastcgi_param REMOTE_PORT $remote_port;
fastcgi_param SERVER_ADDR $server_addr;
fastcgi_param SERVER_PORT $server_port;
fastcgi_param SERVER_NAME $server_name;
fastcgi_param HTTPS $https if_not_empty;
fastcgi_param REDIRECT_STATUS 200;
fastcgi_param HTTP_PROXY "";
}
location ~ /\.(?!well-known).* {
deny all;
}
}
server { # Redirect http to https
server_name pixelfed.example; # change this to your fqdn
listen 80;
listen [::]:80;
return 301 https://$host$request_uri;
}

View file

@ -0,0 +1,42 @@
---
version: "3"
services:
migrate:
image: "secoresearch/rsync"
entrypoint: ""
working_dir: /migrate
command: 'bash -c "exit 1"'
restart: never
volumes:
################################
# Storage volume
################################
# OLD
- "app-storage:/migrate/app-storage/old"
# NEW
- "${DOCKER_APP_HOST_STORAGE_PATH}:/migrate/app-storage/new"
################################
# MySQL/DB volume
################################
# OLD
- "db-data:/migrate/db-data/old"
# NEW
- "${DOCKER_DB_HOST_DATA_PATH}:/migrate/db-data/new"
################################
# Redis volume
################################
# OLD
- "redis-data:/migrate/redis-data/old"
# NEW
- "${DOCKER_REDIS_HOST_DATA_PATH}:/migrate/redis-data/new"
# Volumes from the old [docker-compose.yml] file
# https://github.com/pixelfed/pixelfed/blob/b1ff44ca2f75c088a11576fb03b5bad2fbed4d5c/docker-compose.yml#L72-L76
volumes:
db-data:
redis-data:
app-storage:
app-bootstrap:

View file

@ -1,82 +1,218 @@
--- ---
version: '3' # Require 3.8 to ensure people use a recent version of Docker + Compose
version: "3.8"
# In order to set configuration, please use a .env file in ###############################################################
# your compose project directory (the same directory as your # Please see docker/README.md for usage information
# docker-compose.yml), and set database options, application ###############################################################
# name, key, and other settings there.
# A list of available settings is available in .env.example
#
# The services should scale properly across a swarm cluster
# if the volumes are properly shared between cluster members.
services: services:
## App and Worker # HTTP/HTTPS proxy
app: #
# Comment to use dockerhub image # Sits in front of the *real* webserver and manages SSL and (optionally)
image: pixelfed/pixelfed:latest # load-balancing between multiple web servers
#
# You can disable this service by setting [DOCKER_PROXY_PROFILE="disabled"]
# in your [.env] file - the setting is near the bottom of the file.
#
# This also disables the [proxy-acme] service, if this is not desired, change the
# [DOCKER_PROXY_ACME_PROFILE] setting to an empty string [""]
#
# See: https://github.com/nginx-proxy/nginx-proxy/tree/main/docs
proxy:
image: nginxproxy/nginx-proxy:1.4
container_name: "${DOCKER_ALL_CONTAINER_NAME_PREFIX}-proxy"
restart: unless-stopped restart: unless-stopped
env_file: profiles:
- .env.docker - ${DOCKER_PROXY_PROFILE:-}
environment:
DOCKER_SERVICE_NAME: "proxy"
volumes: volumes:
- app-storage:/var/www/storage - "${DOCKER_PROXY_HOST_DOCKER_SOCKET_PATH}:/tmp/docker.sock:ro"
- app-bootstrap:/var/www/bootstrap - "${DOCKER_ALL_HOST_CONFIG_ROOT_PATH}/proxy/conf.d:/etc/nginx/conf.d"
- "./.env.docker:/var/www/.env" - "${DOCKER_ALL_HOST_CONFIG_ROOT_PATH}/proxy/vhost.d:/etc/nginx/vhost.d"
networks: - "${DOCKER_ALL_HOST_CONFIG_ROOT_PATH}/proxy/certs:/etc/nginx/certs"
- external - "${DOCKER_ALL_HOST_DATA_ROOT_PATH}/proxy/html:/usr/share/nginx/html"
- internal
ports: ports:
- "8080:80" - "${DOCKER_PROXY_HOST_PORT_HTTP}:80"
- "${DOCKER_PROXY_HOST_PORT_HTTPS}:443"
healthcheck:
test: "curl --fail https://${APP_DOMAIN}/api/service/health-check"
interval: "${DOCKER_PROXY_HEALTHCHECK_INTERVAL}"
retries: 2
timeout: 5s
# Proxy companion for managing letsencrypt SSL certificates
#
# You can disable this service by setting [DOCKER_PROXY_ACME_PROFILE="disabled"]
# in your [.env] file - the setting is near the bottom of the file.
#
# See: https://github.com/nginx-proxy/acme-companion/tree/main/docs
proxy-acme:
image: nginxproxy/acme-companion
container_name: "${DOCKER_ALL_CONTAINER_NAME_PREFIX}-proxy-acme"
restart: unless-stopped
profiles:
- ${DOCKER_PROXY_ACME_PROFILE:-}
environment:
DEBUG: 0
DEFAULT_EMAIL: "${DOCKER_PROXY_LETSENCRYPT_EMAIL:?error}"
NGINX_PROXY_CONTAINER: "${DOCKER_ALL_CONTAINER_NAME_PREFIX}-proxy"
depends_on:
- proxy
volumes:
- "${DOCKER_ALL_HOST_CONFIG_ROOT_PATH}/proxy-acme:/etc/acme.sh"
- "${DOCKER_ALL_HOST_CONFIG_ROOT_PATH}/proxy/certs:/etc/nginx/certs"
- "${DOCKER_ALL_HOST_CONFIG_ROOT_PATH}/proxy/conf.d:/etc/nginx/conf.d"
- "${DOCKER_ALL_HOST_CONFIG_ROOT_PATH}/proxy/vhost.d:/etc/nginx/vhost.d"
- "${DOCKER_ALL_HOST_DATA_ROOT_PATH}/proxy/html:/usr/share/nginx/html"
- "${DOCKER_PROXY_HOST_DOCKER_SOCKET_PATH}:/var/run/docker.sock:ro"
web:
image: "${DOCKER_APP_IMAGE}:${DOCKER_APP_TAG}"
container_name: "${DOCKER_ALL_CONTAINER_NAME_PREFIX}-web"
restart: unless-stopped
profiles:
- ${DOCKER_WEB_PROFILE:-}
build:
target: ${DOCKER_APP_RUNTIME}-runtime
cache_from:
- "type=registry,ref=${DOCKER_APP_IMAGE}-cache:${DOCKER_APP_TAG}"
args:
APT_PACKAGES_EXTRA: "${DOCKER_APP_APT_PACKAGES_EXTRA:-}"
PHP_BASE_TYPE: "${DOCKER_APP_BASE_TYPE}"
PHP_DEBIAN_RELEASE: "${DOCKER_APP_DEBIAN_RELEASE}"
PHP_EXTENSIONS_EXTRA: "${DOCKER_APP_PHP_EXTENSIONS_EXTRA:-}"
PHP_PECL_EXTENSIONS_EXTRA: "${DOCKER_APP_PHP_PECL_EXTENSIONS_EXTRA:-}"
PHP_VERSION: "${DOCKER_APP_PHP_VERSION:?error}"
environment:
# Used by Pixelfed Docker init script
DOCKER_SERVICE_NAME: "web"
DOCKER_APP_ENTRYPOINT_DEBUG: ${DOCKER_APP_ENTRYPOINT_DEBUG:-0}
ENTRYPOINT_SKIP_SCRIPTS: ${ENTRYPOINT_SKIP_SCRIPTS:-}
# Used by [proxy] service
LETSENCRYPT_HOST: "${DOCKER_PROXY_LETSENCRYPT_HOST:?error}"
LETSENCRYPT_EMAIL: "${DOCKER_PROXY_LETSENCRYPT_EMAIL:?error}"
LETSENCRYPT_TEST: "${DOCKER_PROXY_LETSENCRYPT_TEST:-}"
VIRTUAL_HOST: "${APP_DOMAIN}"
VIRTUAL_PORT: "80"
volumes:
- "./.env:/var/www/.env"
- "${DOCKER_ALL_HOST_CONFIG_ROOT_PATH}/proxy/conf.d:/shared/proxy/conf.d"
- "${DOCKER_APP_HOST_CACHE_PATH}:/var/www/bootstrap/cache"
- "${DOCKER_APP_HOST_OVERRIDES_PATH}:/docker/overrides:ro"
- "${DOCKER_APP_HOST_STORAGE_PATH}:/var/www/storage"
labels:
com.github.nginx-proxy.nginx-proxy.keepalive: 30
com.github.nginx-proxy.nginx-proxy.http2.enable: true
com.github.nginx-proxy.nginx-proxy.http3.enable: true
ports:
- "${DOCKER_WEB_PORT_EXTERNAL_HTTP}:80"
depends_on: depends_on:
- db - db
- redis - redis
healthcheck:
test: 'curl --header "Host: ${APP_DOMAIN}" --fail http://localhost/api/service/health-check'
interval: "${DOCKER_WEB_HEALTHCHECK_INTERVAL}"
retries: 2
timeout: 5s
worker: worker:
image: pixelfed/pixelfed:latest image: "${DOCKER_APP_IMAGE}:${DOCKER_APP_TAG}"
restart: unless-stopped container_name: "${DOCKER_ALL_CONTAINER_NAME_PREFIX}-worker"
env_file:
- .env.docker
volumes:
- app-storage:/var/www/storage
- app-bootstrap:/var/www/bootstrap
networks:
- external
- internal
command: gosu www-data php artisan horizon command: gosu www-data php artisan horizon
restart: unless-stopped
stop_signal: SIGTERM
profiles:
- ${DOCKER_WORKER_PROFILE:-}
build:
target: ${DOCKER_APP_RUNTIME}-runtime
cache_from:
- "type=registry,ref=${DOCKER_APP_IMAGE}-cache:${DOCKER_APP_TAG}"
args:
APT_PACKAGES_EXTRA: "${DOCKER_APP_APT_PACKAGES_EXTRA:-}"
PHP_BASE_TYPE: "${DOCKER_APP_BASE_TYPE}"
PHP_DEBIAN_RELEASE: "${DOCKER_APP_DEBIAN_RELEASE}"
PHP_EXTENSIONS_EXTRA: "${DOCKER_APP_PHP_EXTENSIONS_EXTRA:-}"
PHP_PECL_EXTENSIONS_EXTRA: "${DOCKER_APP_PHP_PECL_EXTENSIONS_EXTRA:-}"
PHP_VERSION: "${DOCKER_APP_PHP_VERSION:?error}"
environment:
# Used by Pixelfed Docker init script
DOCKER_SERVICE_NAME: "worker"
DOCKER_APP_ENTRYPOINT_DEBUG: ${DOCKER_APP_ENTRYPOINT_DEBUG:-0}
ENTRYPOINT_SKIP_SCRIPTS: ${ENTRYPOINT_SKIP_SCRIPTS:-}
volumes:
- "./.env:/var/www/.env"
- "${DOCKER_ALL_HOST_CONFIG_ROOT_PATH}/proxy/conf.d:/shared/proxy/conf.d"
- "${DOCKER_APP_HOST_CACHE_PATH}:/var/www/bootstrap/cache"
- "${DOCKER_APP_HOST_OVERRIDES_PATH}:/docker/overrides:ro"
- "${DOCKER_APP_HOST_STORAGE_PATH}:/var/www/storage"
depends_on: depends_on:
- db - db
- redis - redis
healthcheck:
test: gosu www-data php artisan horizon:status | grep running
interval: "${DOCKER_WORKER_HEALTHCHECK_INTERVAL:?error}"
timeout: 5s
retries: 2
## DB and Cache
db: db:
image: mariadb:jammy image: ${DOCKER_DB_IMAGE:?error}
container_name: "${DOCKER_ALL_CONTAINER_NAME_PREFIX}-db"
command: ${DOCKER_DB_COMMAND:-}
restart: unless-stopped restart: unless-stopped
networks: profiles:
- internal - ${DOCKER_DB_PROFILE:-}
command: --default-authentication-plugin=mysql_native_password environment:
env_file: TZ: "${TZ:?error}"
- .env.docker # MySQL (Oracle) - "Environment Variables" at https://hub.docker.com/_/mysql
MYSQL_ROOT_PASSWORD: "${DB_PASSWORD:?error}"
MYSQL_USER: "${DB_USERNAME:?error}"
MYSQL_PASSWORD: "${DB_PASSWORD:?error}"
MYSQL_DATABASE: "${DB_DATABASE:?error}"
# MySQL (MariaDB) - "Start a mariadb server instance with user, password and database" at https://hub.docker.com/_/mariadb
MARIADB_ROOT_PASSWORD: "${DB_PASSWORD:?error}"
MARIADB_USER: "${DB_USERNAME:?error}"
MARIADB_PASSWORD: "${DB_PASSWORD:?error}"
MARIADB_DATABASE: "${DB_DATABASE:?error}"
# PostgreSQL - "Environment Variables" at https://hub.docker.com/_/postgres
POSTGRES_USER: "${DB_USERNAME:?error}"
POSTGRES_PASSWORD: "${DB_PASSWORD:?error}"
POSTGRES_DB: "${DB_DATABASE:?error}"
volumes: volumes:
- "db-data:/var/lib/mysql" - "${DOCKER_DB_HOST_DATA_PATH:?error}:${DOCKER_DB_CONTAINER_DATA_PATH:?error}"
ports:
- "${DOCKER_DB_HOST_PORT:?error}:${DOCKER_DB_CONTAINER_PORT:?error}"
healthcheck:
test:
[
"CMD",
"healthcheck.sh",
"--su-mysql",
"--connect",
"--innodb_initialized",
]
interval: "${DOCKER_DB_HEALTHCHECK_INTERVAL:?error}"
retries: 2
timeout: 5s
redis: redis:
image: redis:5-alpine image: redis:${DOCKER_REDIS_VERSION}
container_name: "${DOCKER_ALL_CONTAINER_NAME_PREFIX}-redis"
restart: unless-stopped restart: unless-stopped
env_file: command: "${DOCKER_REDIS_CONFIG_FILE:-} --requirepass '${REDIS_PASSWORD:-}'"
- .env.docker profiles:
- ${DOCKER_REDIS_PROFILE:-}
environment:
TZ: "${TZ:?error}"
REDISCLI_AUTH: ${REDIS_PASSWORD:-}
volumes: volumes:
- "redis-data:/data" - "${DOCKER_ALL_HOST_CONFIG_ROOT_PATH}/redis:/etc/redis"
networks: - "${DOCKER_REDIS_HOST_DATA_PATH}:/data"
- internal ports:
- "${DOCKER_REDIS_HOST_PORT}:6379"
volumes: healthcheck:
db-data: test: ["CMD", "redis-cli", "-p", "6379", "ping"]
redis-data: interval: "${DOCKER_REDIS_HEALTHCHECK_INTERVAL:?error}"
app-storage: retries: 2
app-bootstrap: timeout: 5s
networks:
internal:
internal: true
external:
driver: bridge

5
docker/README.md Normal file
View file

@ -0,0 +1,5 @@
# Pixelfed + Docker + Docker Compose
Please see the [Pixelfed Docs (Next)](https://jippi.github.io/pixelfed-docs-next/pr-preview/pr-1/running-pixelfed/) for current documentation on Docker usage.
The docs can be [reviewed in the pixelfed/docs-next](https://github.com/pixelfed/docs-next/pull/1) repository.

View file

@ -0,0 +1,8 @@
RemoteIPHeader X-Real-IP
# All private IPs as outlined in rfc1918
#
# See: https://datatracker.ietf.org/doc/html/rfc1918
RemoteIPTrustedProxy 10.0.0.0/8
RemoteIPTrustedProxy 172.16.0.0/12
RemoteIPTrustedProxy 192.168.0.0/16

11
docker/artisan Executable file
View file

@ -0,0 +1,11 @@
#!/bin/bash
declare service="${PF_SERVICE:=worker}"
declare user="${PF_USER:=www-data}"
exec docker compose exec \
--user "${user}" \
--env TERM \
--env COLORTERM \
"${service}" \
php artisan "${@}"

45
docker/dottie Executable file
View file

@ -0,0 +1,45 @@
#!/bin/bash
set -e -o errexit -o nounset -o pipefail
declare project_root="${PWD}"
declare user="${PF_USER:=www-data}"
if command -v git &>/dev/null; then
project_root=$(git rev-parse --show-toplevel)
fi
declare -r release="${DOTTIE_VERSION:-latest}"
declare -r update_check_file="/tmp/.dottie-update-check" # file to check age of since last update
declare -i update_check_max_age=$((8 * 60 * 60)) # 8 hours between checking for dottie version
declare -i update_check_cur_age=$((update_check_max_age + 1)) # by default the "update" event should happen
# default [docker run] flags
declare -a flags=(
--rm
--interactive
--tty
--user "${user}"
--env TERM
--env COLORTERM
--volume "${project_root}:/var/www"
--workdir /var/www
)
# if update file exists, find its age since last modification
if [[ -f "${update_check_file}" ]]; then
now=$(date +%s)
changed=$(date -r "${update_check_file}" +%s)
update_check_cur_age=$((now - changed))
fi
# if update file is older than max allowed poll for new version of dottie
if [[ $update_check_cur_age -gt $update_check_max_age ]]; then
flags+=(--pull always)
touch "${update_check_file}"
fi
# run dottie
exec docker run "${flags[@]}" "ghcr.io/jippi/dottie:${release}" "$@"

0
docker/fpm/root/.gitkeep Normal file
View file

2
docker/nginx/Procfile Normal file
View file

@ -0,0 +1,2 @@
fpm: php-fpm
nginx: nginx -g "daemon off;"

View file

@ -0,0 +1,49 @@
server {
listen 80 default_server;
server_name {{ getenv "APP_DOMAIN" }};
root /var/www/public;
add_header X-Frame-Options "SAMEORIGIN";
add_header X-XSS-Protection "1; mode=block";
add_header X-Content-Type-Options "nosniff";
access_log /dev/stdout;
error_log /dev/stderr warn;
index index.html index.htm index.php;
charset utf-8;
client_max_body_size {{ getenv "POST_MAX_SIZE" "61M" }};
location / {
try_files $uri $uri/ /index.php?$query_string;
}
location = /favicon.ico {
access_log off;
log_not_found off;
}
location = /robots.txt {
access_log off;
log_not_found off;
}
error_page 404 /index.php;
location ~ \.php$ {
fastcgi_split_path_info ^(.+\.php)(/.+)$;
fastcgi_pass 127.0.0.1:9000;
fastcgi_index index.php;
include fastcgi_params;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
}
location ~ /\.(?!well-known).* {
deny all;
}
}

View file

@ -0,0 +1,41 @@
# This is changed from the original "nginx" in upstream to work properly
# with permissions within pixelfed when serving static files.
user www-data;
worker_processes auto;
# Ensure the PID is writable
# Lifted from: https://hub.docker.com/r/nginxinc/nginx-unprivileged
pid /tmp/nginx.pid;
# Write error log to stderr (/proc/self/fd/2 -> /dev/stderr)
error_log /proc/self/fd/2 notice;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" "$http_x_forwarded_for"';
# Write error log to stdout (/proc/self/fd/1 -> /dev/stdout)
access_log /proc/self/fd/1 main;
sendfile on;
tcp_nopush on;
keepalive_timeout 65;
gzip on;
# Ensure all temp paths are in a writable by "www-data" user.
# Lifted from: https://hub.docker.com/r/nginxinc/nginx-unprivileged
client_body_temp_path /tmp/client_temp;
proxy_temp_path /tmp/proxy_temp_path;
fastcgi_temp_path /tmp/fastcgi_temp;
uwsgi_temp_path /tmp/uwsgi_temp;
scgi_temp_path /tmp/scgi_temp;
include /etc/nginx/conf.d/*.conf;
}

View file

@ -0,0 +1,31 @@
#!/bin/bash
: "${ENTRYPOINT_ROOT:="/docker"}"
# shellcheck source=SCRIPTDIR/../helpers.sh
source "${ENTRYPOINT_ROOT}/helpers.sh"
entrypoint-set-script-name "$0"
# Ensure the Docker volumes and required files are owned by the runtime user as other scripts
# will be writing to these
run-as-current-user chown --verbose "${RUNTIME_UID}:${RUNTIME_GID}" "./.env"
run-as-current-user chown --verbose "${RUNTIME_UID}:${RUNTIME_GID}" "./bootstrap/cache"
run-as-current-user chown --verbose "${RUNTIME_UID}:${RUNTIME_GID}" "./storage"
run-as-current-user chown --verbose --recursive "${RUNTIME_UID}:${RUNTIME_GID}" "./storage/docker"
# Optionally fix ownership of configured paths
: "${DOCKER_APP_ENSURE_OWNERSHIP_PATHS:=""}"
declare -a ensure_ownership_paths=()
IFS=' ' read -ar ensure_ownership_paths <<<"${DOCKER_APP_ENSURE_OWNERSHIP_PATHS}"
if [[ ${#ensure_ownership_paths[@]} == 0 ]]; then
log-info "No paths has been configured for ownership fixes via [\$DOCKER_APP_ENSURE_OWNERSHIP_PATHS]."
exit 0
fi
for path in "${ensure_ownership_paths[@]}"; do
log-info "Ensure ownership of [${path}] is correct"
stream-prefix-command-output run-as-current-user chown --recursive "${RUNTIME_UID}:${RUNTIME_GID}" "${path}"
done

View file

@ -0,0 +1,21 @@
#!/bin/bash
: "${ENTRYPOINT_ROOT:="/docker"}"
# shellcheck source=SCRIPTDIR/../helpers.sh
source "${ENTRYPOINT_ROOT}/helpers.sh"
entrypoint-set-script-name "$0"
# Validating dot-env files for any issues
for file in "${dot_env_files[@]}"; do
if ! file-exists "${file}"; then
log-warning "Could not source file [${file}]: does not exists"
continue
fi
# We ignore 'dir' + 'file' rules since they are validate *host* paths
# which do not (and should not) exists inside the container
#
# We disable fixer since its not interactive anyway
run-as-current-user dottie validate --file "${file}" --ignore-rule dir,file --exclude-prefix APP_KEY --no-fix
done

View file

@ -0,0 +1,33 @@
#!/bin/bash
# NOTE:
#
# This file is *sourced* not run by the entrypoint runner
# so any environment values set here will be accessible to all sub-processes
# and future entrypoint.d scripts
#
# We also don't need to source `helpers.sh` since it's already available
entrypoint-set-script-name "${BASH_SOURCE[0]}"
load-config-files
: "${MAX_PHOTO_SIZE:=15000}"
: "${MAX_ALBUM_LENGTH:=4}"
# We assign a 1MB buffer to the just-in-time calculated max post size to allow for fields and overhead
: "${POST_MAX_SIZE_BUFFER:=1M}"
log-info "POST_MAX_SIZE_BUFFER is set to [${POST_MAX_SIZE_BUFFER}]"
buffer=$(numfmt --invalid=fail --from=auto --to=none --to-unit=K "${POST_MAX_SIZE_BUFFER}")
log-info "POST_MAX_SIZE_BUFFER converted to KB is [${buffer}]"
# Automatically calculate the [post_max_size] value for [php.ini] and [nginx]
log-info "POST_MAX_SIZE will be calculated by [({MAX_PHOTO_SIZE} * {MAX_ALBUM_LENGTH}) + {POST_MAX_SIZE_BUFFER}]"
log-info " MAX_PHOTO_SIZE=${MAX_PHOTO_SIZE}"
log-info " MAX_ALBUM_LENGTH=${MAX_ALBUM_LENGTH}"
log-info " POST_MAX_SIZE_BUFFER=${buffer}"
: "${POST_MAX_SIZE:=$(numfmt --invalid=fail --from=auto --from-unit=K --to=si $(((MAX_PHOTO_SIZE * MAX_ALBUM_LENGTH) + buffer)))}"
log-info "POST_MAX_SIZE was calculated to [${POST_MAX_SIZE}]"
# NOTE: must export the value so it's available in other scripts!
export POST_MAX_SIZE

View file

@ -0,0 +1,60 @@
#!/bin/bash
: "${ENTRYPOINT_ROOT:="/docker"}"
# shellcheck source=SCRIPTDIR/../helpers.sh
source "${ENTRYPOINT_ROOT}/helpers.sh"
entrypoint-set-script-name "$0"
# Show [git diff] of templates being rendered (will help verify output)
: "${ENTRYPOINT_SHOW_TEMPLATE_DIFF:=1}"
# Directory where templates can be found
: "${ENTRYPOINT_TEMPLATE_DIR:=/docker/templates/}"
# Root path to write template template_files to (default is '', meaning it will be written to /<path>)
: "${ENTRYPOINT_TEMPLATE_OUTPUT_PREFIX:=}"
declare template_file relative_template_file_path output_file_dir
# load all dot-env config files
load-config-files
# export all dot-env variables so they are available in templating
#
# shellcheck disable=SC2068
export ${seen_dot_env_variables[@]}
find "${ENTRYPOINT_TEMPLATE_DIR}" -follow -type f -print | while read -r template_file; do
# Example: template_file=/docker/templates/usr/local/etc/php/php.ini
# The file path without the template dir prefix ($ENTRYPOINT_TEMPLATE_DIR)
#
# Example: /usr/local/etc/php/php.ini
relative_template_file_path="${template_file#"${ENTRYPOINT_TEMPLATE_DIR}"}"
# Adds optional prefix to the output file path
#
# Example: /usr/local/etc/php/php.ini
output_file_path="${ENTRYPOINT_TEMPLATE_OUTPUT_PREFIX}/${relative_template_file_path}"
# Remove the file from the path
#
# Example: /usr/local/etc/php
output_file_dir=$(dirname "${output_file_path}")
# Ensure the output directory is writable
if ! is-writable "${output_file_dir}"; then
log-error-and-exit "${output_file_dir} is not writable"
fi
# Create the output directory if it doesn't exists
ensure-directory-exists "${output_file_dir}"
# Render the template
log-info "Running [gomplate] on [${template_file}] --> [${output_file_path}]"
gomplate <"${template_file}" >"${output_file_path}"
# Show the diff from the envsubst command
if is-true "${ENTRYPOINT_SHOW_TEMPLATE_DIFF}"; then
git --no-pager diff --color=always "${template_file}" "${output_file_path}" || : # ignore diff exit code
fi
done

View file

@ -0,0 +1,13 @@
#!/bin/bash
: "${ENTRYPOINT_ROOT:="/docker"}"
# shellcheck source=SCRIPTDIR/../helpers.sh
source "${ENTRYPOINT_ROOT}/helpers.sh"
entrypoint-set-script-name "$0"
# Copy the [storage/] skeleton files over the "real" [storage/] directory so assets are updated between versions
run-as-runtime-user cp --force --recursive storage.skel/. ./storage/
# Ensure storage linkk are correctly configured
run-as-runtime-user php artisan storage:link

View file

@ -0,0 +1,38 @@
#!/bin/bash
: "${ENTRYPOINT_ROOT:="/docker"}"
# shellcheck source=SCRIPTDIR/../helpers.sh
source "${ENTRYPOINT_ROOT}/helpers.sh"
entrypoint-set-script-name "$0"
load-config-files
# Allow automatic applying of outstanding/new migrations on startup
: "${DOCKER_APP_RUN_ONE_TIME_SETUP_TASKS:=1}"
if is-false "${DOCKER_APP_RUN_ONE_TIME_SETUP_TASKS}"; then
log-warning "Automatic run of the 'One-time setup tasks' is disabled."
log-warning "Please set [DOCKER_APP_RUN_ONE_TIME_SETUP_TASKS=1] in your [.env] file to enable this."
exit 0
fi
await-database-ready
# Following https://docs.pixelfed.org/running-pixelfed/installation/#one-time-setup-tasks
#
# NOTE: Caches happens in [30-cache.sh]
only-once "key:generate" run-as-runtime-user php artisan key:generate
only-once "storage:link" run-as-runtime-user php artisan storage:link
only-once "initial:migrate" run-as-runtime-user php artisan migrate --force
only-once "import:cities" run-as-runtime-user php artisan import:cities
if is-true "${ACTIVITY_PUB:-false}"; then
only-once "instance:actor" run-as-runtime-user php artisan instance:actor
fi
if is-true "${OAUTH_ENABLED:-false}"; then
only-once "passport:keys" run-as-runtime-user php artisan passport:keys
fi

View file

@ -0,0 +1,42 @@
#!/bin/bash
: "${ENTRYPOINT_ROOT:="/docker"}"
# shellcheck source=SCRIPTDIR/../helpers.sh
source "${ENTRYPOINT_ROOT}/helpers.sh"
entrypoint-set-script-name "$0"
# Allow automatic applying of outstanding/new migrations on startup
: "${DB_APPLY_NEW_MIGRATIONS_AUTOMATICALLY:=0}"
# Wait for the database to be ready
await-database-ready
# Run the migrate:status command and capture output
output=$(run-as-runtime-user php artisan migrate:status || :)
# By default we have no new migrations
declare -i new_migrations=0
# Detect if any new migrations are available by checking for "No" in the output
echo "$output" | grep No && new_migrations=1
if is-false "${new_migrations}"; then
log-info "No new migrations detected"
exit 0
fi
log-warning "New migrations available"
# Print the output
echo "$output"
if is-false "${DB_APPLY_NEW_MIGRATIONS_AUTOMATICALLY}"; then
log-info "Automatic applying of new database migrations is disabled"
log-info "Please set [DB_APPLY_NEW_MIGRATIONS_AUTOMATICALLY=1] in your [.env] file to enable this."
exit 0
fi
run-as-runtime-user php artisan migrate --force

View file

@ -0,0 +1,9 @@
#!/bin/bash
: "${ENTRYPOINT_ROOT:="/docker"}"
# shellcheck source=SCRIPTDIR/../helpers.sh
source "${ENTRYPOINT_ROOT}/helpers.sh"
entrypoint-set-script-name "$0"
run-as-runtime-user php artisan horizon:publish

View file

@ -0,0 +1,11 @@
#!/bin/bash
: "${ENTRYPOINT_ROOT:="/docker"}"
# shellcheck source=SCRIPTDIR/../helpers.sh
source "${ENTRYPOINT_ROOT}/helpers.sh"
entrypoint-set-script-name "$0"
run-as-runtime-user php artisan config:cache
run-as-runtime-user php artisan route:cache
run-as-runtime-user php artisan view:cache

View file

@ -0,0 +1,105 @@
#!/bin/bash
# short curcuit the entrypoint if $ENTRYPOINT_SKIP isn't set to 0
if [[ ${ENTRYPOINT_SKIP:=0} != 0 ]]; then
exec "$@"
fi
: "${ENTRYPOINT_ROOT:="/docker"}"
export ENTRYPOINT_ROOT
# Directory where entrypoint scripts lives
: "${ENTRYPOINT_D_ROOT:="${ENTRYPOINT_ROOT}/entrypoint.d/"}"
export ENTRYPOINT_D_ROOT
: "${DOCKER_APP_HOST_OVERRIDES_PATH:="${ENTRYPOINT_ROOT}/overrides"}"
export DOCKER_APP_HOST_OVERRIDES_PATH
# Space separated list of scripts the entrypoint runner should skip
: "${ENTRYPOINT_SKIP_SCRIPTS:=""}"
# Load helper scripts
#
# shellcheck source=SCRIPTDIR/helpers.sh
source "${ENTRYPOINT_ROOT}/helpers.sh"
# Set the entrypoint name for logging
entrypoint-set-script-name "entrypoint.sh"
# Convert ENTRYPOINT_SKIP_SCRIPTS into a native bash array for easier lookup
declare -a skip_scripts
# shellcheck disable=SC2034
IFS=' ' read -ar skip_scripts <<< "$ENTRYPOINT_SKIP_SCRIPTS"
# Ensure the entrypoint root folder exists
mkdir -p "${ENTRYPOINT_D_ROOT}"
# If ENTRYPOINT_D_ROOT directory is empty, warn and run the regular command
if directory-is-empty "${ENTRYPOINT_D_ROOT}"; then
log-warning "No files found in ${ENTRYPOINT_D_ROOT}, skipping configuration"
exec "$@"
fi
# If the overridess directory exists, then copy all files into the container
if ! directory-is-empty "${DOCKER_APP_HOST_OVERRIDES_PATH}"; then
log-info "Overrides directory is not empty, copying files"
run-as-current-user cp --verbose --recursive "${DOCKER_APP_HOST_OVERRIDES_PATH}/." /
fi
acquire-lock "entrypoint.sh"
# Start scanning for entrypoint.d files to source or run
log-info "looking for shell scripts in [${ENTRYPOINT_D_ROOT}]"
find "${ENTRYPOINT_D_ROOT}" -follow -type f -print | sort -V | while read -r file; do
# Skip the script if it's in the skip-script list
if in-array "$(get-entrypoint-script-name "${file}")" skip_scripts; then
log-warning "Skipping script [${file}] since it's in the skip list (\$ENTRYPOINT_SKIP_SCRIPTS)"
continue
fi
# Inspect the file extension of the file we're processing
case "${file}" in
*.envsh)
if ! is-executable "${file}"; then
# warn on shell scripts without exec bit
log-error-and-exit "File [${file}] is not executable (please 'chmod +x' it)"
fi
log-info "${section_message_color}============================================================${color_clear}"
log-info "${section_message_color}Sourcing [${file}]${color_clear}"
log-info "${section_message_color}============================================================${color_clear}"
# shellcheck disable=SC1090
source "${file}"
# the sourced file will (should) than the log prefix, so this restores our own
# "global" log prefix once the file is done being sourced
entrypoint-restore-script-name
;;
*.sh)
if ! is-executable "${file}"; then
# warn on shell scripts without exec bit
log-error-and-exit "File [${file}] is not executable (please 'chmod +x' it)"
fi
log-info "${section_message_color}============================================================${color_clear}"
log-info "${section_message_color}Executing [${file}]${color_clear}"
log-info "${section_message_color}============================================================${color_clear}"
"${file}"
;;
*)
log-warning "Ignoring unrecognized file [${file}]"
;;
esac
done
release-lock "entrypoint.sh"
log-info "Configuration complete; ready for start up"
exec "$@"

View file

@ -0,0 +1,593 @@
#!/bin/bash
set -e -o errexit -o nounset -o pipefail
[[ ${DOCKER_APP_ENTRYPOINT_DEBUG:=0} == 1 ]] && set -x
: "${RUNTIME_UID:="33"}"
: "${RUNTIME_GID:="33"}"
# Some splash of color for important messages
declare -g error_message_color="\033[1;31m"
declare -g warn_message_color="\033[1;33m"
declare -g notice_message_color="\033[1;34m"
declare -g success_message_color="\033[1;32m"
# shellcheck disable=SC2034
declare -g section_message_color="\033[1;35m"
declare -g color_clear="\033[1;0m"
# Current and previous log prefix
declare -g script_name=
declare -g script_name_previous=
declare -g log_prefix=
declare -Ag lock_fds=()
# dot-env files to source when reading config
declare -a dot_env_files=(
/var/www/.env
)
# environment keys seen when source dot files (so we can [export] them)
declare -ga seen_dot_env_variables=()
declare -g docker_state_path
docker_state_path="$(readlink -f ./storage/docker)"
declare -g docker_locks_path="${docker_state_path}/lock"
declare -g docker_once_path="${docker_state_path}/once"
declare -g runtime_username
runtime_username=$(id -un "${RUNTIME_UID}")
# We should already be in /var/www, but just to be explicit
cd /var/www || log-error-and-exit "could not change to /var/www"
# @description Restore the log prefix to the previous value that was captured in [entrypoint-set-script-name ]
# @arg $1 string The name (or path) of the entrypoint script being run
function entrypoint-set-script-name()
{
script_name_previous="${script_name}"
script_name="${1}"
log_prefix="[entrypoint / $(get-entrypoint-script-name "$1")] - "
}
# @description Restore the log prefix to the previous value that was captured in [entrypoint-set-script-name ]
function entrypoint-restore-script-name()
{
entrypoint-set-script-name "${script_name_previous}"
}
# @description Run a command as the [runtime user]
# @arg $@ string The command to run
# @exitcode 0 if the command succeeeds
# @exitcode 1 if the command fails
function run-as-runtime-user()
{
run-command-as "${runtime_username}" "${@}"
}
# @description Run a command as the [runtime user]
# @arg $@ string The command to run
# @exitcode 0 if the command succeeeds
# @exitcode 1 if the command fails
function run-as-current-user()
{
run-command-as "$(id -un)" "${@}"
}
# @description Run a command as the a named user
# @arg $1 string The user to run the command as
# @arg $@ string The command to run
# @exitcode 0 If the command succeeeds
# @exitcode 1 If the command fails
function run-command-as()
{
local -i exit_code
local target_user
target_user=${1}
shift
log-info-stderr "${notice_message_color}👷 Running [${*}] as [${target_user}]${color_clear}"
# disable error on exit behavior temporarily while we run the command
set +e
if [[ ${target_user} != "root" ]]; then
stream-prefix-command-output su --preserve-environment "${target_user}" --shell /bin/bash --command "${*}"
else
stream-prefix-command-output "${@}"
fi
# capture exit code
exit_code=$?
# re-enable exit code handling
set -e
if [[ $exit_code != 0 ]]; then
log-error "${error_message_color}❌ Error!${color_clear}"
return "$exit_code"
fi
log-info-stderr "${success_message_color}✅ OK!${color_clear}"
return "$exit_code"
}
# @description Streams stdout from the command and echo it
# with log prefixing.
# @see stream-prefix-command-output
function stream-stdout-handler()
{
while read -r line; do
log-info "(stdout) ${line}"
done
}
# @description Streams stderr from the command and echo it
# with a bit of color and log prefixing.
# @see stream-prefix-command-output
function stream-stderr-handler()
{
while read -r line; do
log-info-stderr "(${error_message_color}stderr${color_clear}) ${line}"
done
}
# @description Steam stdout and stderr from a command with log prefix
# and stdout/stderr prefix. If stdout or stderr is being piped/redirected
# it will automatically fall back to non-prefixed output.
# @arg $@ string The command to run
function stream-prefix-command-output()
{
local stdout=stream-stdout-handler
local stderr=stream-stderr-handler
# if stdout is being piped, print it like normal with echo
if [ ! -t 1 ]; then
# shellcheck disable=SC1007
stdout= echo >&1 -ne
fi
# if stderr is being piped, print it like normal with echo
if [ ! -t 2 ]; then
# shellcheck disable=SC1007
stderr= echo >&2 -ne
fi
"$@" > >($stdout) 2> >($stderr)
}
# @description Print the given error message to stderr
# @arg $message string A error message.
# @stderr The error message provided with log prefix
function log-error()
{
local msg
if [[ $# -gt 0 ]]; then
msg="$*"
elif [[ ! -t 0 ]]; then
read -r msg || log-error-and-exit "[${FUNCNAME[0]}] could not read from stdin"
else
log-error-and-exit "[${FUNCNAME[0]}] did not receive any input arguments and STDIN is empty"
fi
echo -e "${error_message_color}${log_prefix}ERROR -${color_clear} ${msg}" >/dev/stderr
}
# @description Print the given error message to stderr and exit 1
# @arg $@ string A error message.
# @stderr The error message provided with log prefix
# @exitcode 1
function log-error-and-exit()
{
log-error "$@"
show-call-stack
exit 1
}
# @description Print the given warning message to stderr
# @arg $@ string A warning message.
# @stderr The warning message provided with log prefix
function log-warning()
{
local msg
if [[ $# -gt 0 ]]; then
msg="$*"
elif [[ ! -t 0 ]]; then
read -r msg || log-error-and-exit "[${FUNCNAME[0]}] could not read from stdin"
else
log-error-and-exit "[${FUNCNAME[0]}] did not receive any input arguments and STDIN is empty"
fi
echo -e "${warn_message_color}${log_prefix}WARNING -${color_clear} ${msg}" >/dev/stderr
}
# @description Print the given message to stdout unless [ENTRYPOINT_QUIET_LOGS] is set
# @arg $@ string A info message.
# @stdout The info message provided with log prefix unless $ENTRYPOINT_QUIET_LOGS
function log-info()
{
local msg
if [[ $# -gt 0 ]]; then
msg="$*"
elif [[ ! -t 0 ]]; then
read -r msg || log-error-and-exit "[${FUNCNAME[0]}] could not read from stdin"
else
log-error-and-exit "[${FUNCNAME[0]}] did not receive any input arguments and STDIN is empty"
fi
if [ -z "${ENTRYPOINT_QUIET_LOGS:-}" ]; then
echo -e "${notice_message_color}${log_prefix}${color_clear}${msg}"
fi
}
# @description Print the given message to stderr unless [ENTRYPOINT_QUIET_LOGS] is set
# @arg $@ string A info message.
# @stderr The info message provided with log prefix unless $ENTRYPOINT_QUIET_LOGS
function log-info-stderr()
{
local msg
if [[ $# -gt 0 ]]; then
msg="$*"
elif [[ ! -t 0 ]]; then
read -r msg || log-error-and-exit "[${FUNCNAME[0]}] could not read from stdin"
else
log-error-and-exit "[${FUNCNAME[0]}] did not receive any input arguments and STDIN is empty"
fi
if [ -z "${ENTRYPOINT_QUIET_LOGS:-}" ]; then
echo -e "${notice_message_color}${log_prefix}${color_clear}${msg}" >/dev/stderr
fi
}
# @description Loads the dot-env files used by Docker and track the keys present in the configuration.
# @sets seen_dot_env_variables array List of config keys discovered during loading
function load-config-files()
{
# Associative array (aka map/dictionary) holding the unique keys found in dot-env files
local -A _tmp_dot_env_keys
for file in "${dot_env_files[@]}"; do
if ! file-exists "${file}"; then
log-warning "Could not source file [${file}]: does not exists"
continue
fi
log-info "Sourcing ${file}"
# shellcheck disable=SC1090
source "${file}"
# find all keys in the dot-env file and store them in our temp associative array
for k in $(grep -v '^#' "${file}" | cut -d"=" -f1 | xargs); do
_tmp_dot_env_keys[$k]=1
done
done
# Used in other scripts (like templating) for [export]-ing the values
#
# shellcheck disable=SC2034
seen_dot_env_variables=("${!_tmp_dot_env_keys[@]}")
}
# @description Checks if $needle exists in $haystack
# @arg $1 string The needle (value) to search for
# @arg $2 array The haystack (array) to search in
# @exitcode 0 If $needle was found in $haystack
# @exitcode 1 If $needle was *NOT* found in $haystack
function in-array()
{
local -r needle="\<${1}\>"
local -nr haystack=$2
[[ ${haystack[*]} =~ $needle ]]
}
# @description Checks if $1 has executable bit set or not
# @arg $1 string The path to check
# @exitcode 0 If $1 has executable bit
# @exitcode 1 If $1 does *NOT* have executable bit
function is-executable()
{
[[ -x "$1" ]]
}
# @description Checks if $1 is writable or not
# @arg $1 string The path to check
# @exitcode 0 If $1 is writable
# @exitcode 1 If $1 is *NOT* writable
function is-writable()
{
[[ -w "$1" ]]
}
# @description Checks if $1 exists (directory or file)
# @arg $1 string The path to check
# @exitcode 0 If $1 exists
# @exitcode 1 If $1 does *NOT* exists
function path-exists()
{
[[ -e "$1" ]]
}
# @description Checks if $1 exists (file only)
# @arg $1 string The path to check
# @exitcode 0 If $1 exists
# @exitcode 1 If $1 does *NOT* exists
function file-exists()
{
[[ -f "$1" ]]
}
# @description Checks if $1 contains any files or not
# @arg $1 string The path to check
# @exitcode 0 If $1 contains files
# @exitcode 1 If $1 does *NOT* contain files
function directory-is-empty()
{
! path-exists "${1}" || [[ -z "$(ls -A "${1}")" ]]
}
# @description Ensures a directory exists (via mkdir)
# @arg $1 string The path to create
# @exitcode 0 If $1 If the path exists *or* was created
# @exitcode 1 If $1 If the path does *NOT* exists and could *NOT* be created
function ensure-directory-exists()
{
stream-prefix-command-output mkdir -pv "$@"
}
# @description Find the relative path for a entrypoint script by removing the ENTRYPOINT_D_ROOT prefix
# @arg $1 string The path to manipulate
# @stdout The relative path to the entrypoint script
function get-entrypoint-script-name()
{
echo "${1#"$ENTRYPOINT_D_ROOT"}"
}
# @description Ensure a command is only run once (via a 'lock' file) in the storage directory.
# The 'lock' is only written if the passed in command ($2) successfully ran.
# @arg $1 string The name of the lock file
# @arg $@ string The command to run
function only-once()
{
local name="${1:-$script_name}"
local file="${docker_once_path}/${name}"
shift
if [[ -e "${file}" ]]; then
log-info "Command [${*}] has already run once before (remove file [${file}] to run it again)"
return 0
fi
ensure-directory-exists "$(dirname "${file}")"
if ! "$@"; then
return 1
fi
stream-prefix-command-output touch "${file}"
return 0
}
# @description Best effort file lock to ensure *something* is not running in multiple containers.
# The script uses "trap" to clean up after itself if the script crashes
# @arg $1 string The lock identifier
function acquire-lock()
{
local name="${1:-$script_name}"
local file="${docker_locks_path}/${name}"
local lock_fd
ensure-directory-exists "$(dirname "${file}")"
exec {lock_fd}>"$file"
log-info "🔑 Trying to acquire lock: ${file}: "
while ! ([[ -v lock_fds[$name] ]] || flock -n -x "$lock_fd"); do
log-info "🔒 Waiting on lock ${file}"
staggered-sleep
done
[[ -v lock_fds[$name] ]] || lock_fds[$name]=$lock_fd
log-info "🔐 Lock acquired [${file}]"
on-trap "release-lock ${name}" EXIT INT QUIT TERM
}
# @description Release a lock aquired by [acquire-lock]
# @arg $1 string The lock identifier
function release-lock()
{
local name="${1:-$script_name}"
local file="${docker_locks_path}/${name}"
log-info "🔓 Releasing lock [${file}]"
[[ -v lock_fds[$name] ]] || return
# shellcheck disable=SC1083,SC2086
flock --unlock ${lock_fds[$name]}
unset 'lock_fds[$name]'
}
# @description Helper function to append multiple actions onto
# the bash [trap] logic
# @arg $1 string The command to run
# @arg $@ string The list of trap signals to register
function on-trap()
{
local trap_add_cmd=$1
shift || log-error-and-exit "${FUNCNAME[0]} usage error"
for trap_add_name in "$@"; do
trap -- "$(
# helper fn to get existing trap command from output
# of trap -p
#
# shellcheck disable=SC2317
extract_trap_cmd()
{
printf '%s\n' "${3:-}"
}
# print existing trap command with newline
eval "extract_trap_cmd $(trap -p "${trap_add_name}")"
# print the new trap command
printf '%s\n' "${trap_add_cmd}"
)" "${trap_add_name}" \
|| log-error-and-exit "unable to add to trap ${trap_add_name}"
done
}
# Set the trace attribute for the above function.
#
# This is required to modify DEBUG or RETURN traps because functions don't
# inherit them unless the trace attribute is set
declare -f -t on-trap
# @description Waits for the database to be healthy and responsive
function await-database-ready()
{
log-info "❓ Waiting for database to be ready"
load-config-files
case "${DB_CONNECTION:-}" in
mysql)
# shellcheck disable=SC2154
while ! echo "SELECT 1" | mysql --user="${DB_USERNAME}" --password="${DB_PASSWORD}" --host="${DB_HOST}" "${DB_DATABASE}" --silent >/dev/null; do
staggered-sleep
done
;;
pgsql)
# shellcheck disable=SC2154
while ! echo "SELECT 1" | PGPASSWORD="${DB_PASSWORD}" psql --user="${DB_USERNAME}" --host="${DB_HOST}" "${DB_DATABASE}" >/dev/null; do
staggered-sleep
done
;;
sqlsrv)
log-warning "Don't know how to check if SQLServer is *truely* ready or not - so will just check if we're able to connect to it"
# shellcheck disable=SC2154
while ! timeout 1 bash -c "cat < /dev/null > /dev/tcp/${DB_HOST}/${DB_PORT}"; do
staggered-sleep
done
;;
sqlite)
log-info "${success_message_color}sqlite is always ready${color_clear}"
;;
*)
log-error-and-exit "Unknown database type: [${DB_CONNECTION:-}]"
;;
esac
log-info "${success_message_color}✅ Successfully connected to database${color_clear}"
}
# @description sleeps between 1 and 3 seconds to ensure a bit of randomness
# in multiple scripts/containers doing work almost at the same time.
function staggered-sleep()
{
sleep "$(get-random-number-between 1 3)"
}
# @description Helper function to get a random number between $1 and $2
# @arg $1 int Minimum number in the range (inclusive)
# @arg $2 int Maximum number in the range (inclusive)
function get-random-number-between()
{
local -i from=${1:-1}
local -i to="${2:-10}"
shuf -i "${from}-${to}" -n 1
}
# @description Helper function to show the bask call stack when something
# goes wrong. Is super useful when needing to debug an issue
function show-call-stack()
{
local stack_size=${#FUNCNAME[@]}
local func
local lineno
local src
# to avoid noise we start with 1 to skip the get_stack function
for ((i = 1; i < stack_size; i++)); do
func="${FUNCNAME[$i]}"
[ -z "$func" ] && func="MAIN"
lineno="${BASH_LINENO[$((i - 1))]}"
src="${BASH_SOURCE[$i]}"
[ -z "$src" ] && src="non_file_source"
log-error " at: ${func} ${src}:${lineno}"
done
}
# @description Helper function see if $1 could be considered truthy
# returns [0] if input is truthy, otherwise [1]
# @arg $1 string The string to evaluate
# @see as-boolean
function is-true()
{
as-boolean "${1:-}" && return 0
return 1
}
# @description Helper function see if $1 could be considered falsey
# returns [0] if input is falsey, otherwise [1]
# @arg $1 string The string to evaluate
# @see as-boolean
function is-false()
{
as-boolean "${1:-}" && return 1
return 0
}
# @description Helper function see if $1 could be truethy or falsey.
# since this is a bash context, returning 0 is true and 1 is false
# so it works with [if is-false $input; then .... fi]
#
# This is a bit confusing, *especially* in a PHP world where [1] would be truthy and
# [0] would be falsely as return values
# @arg $1 string The string to evaluate
function as-boolean()
{
local input="${1:-}"
local var="${input,,}" # convert input to lower-case
case "$var" in
1 | true)
return 0
;;
0 | false)
return 1
;;
*)
log-warning "[as-boolean] variable [${var}] could not be detected as true or false, returning [1] (false) as default"
return 1
;;
esac
}

View file

@ -0,0 +1,61 @@
#!/bin/bash
set -ex -o errexit -o nounset -o pipefail
# Ensure we keep apt cache around in a Docker environment
rm -f /etc/apt/apt.conf.d/docker-clean
echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
# Don't install recommended packages by default
echo 'APT::Install-Recommends "false";' >> /etc/apt/apt.conf
# Don't install suggested packages by default
echo 'APT::Install-Suggests "false";' >> /etc/apt/apt.conf
declare -a packages=()
# Standard packages
packages+=(
apt-utils
ca-certificates
curl
git
gnupg1
gosu
locales
locales-all
moreutils
nano
procps
software-properties-common
unzip
wget
zip
)
# Image Optimization
packages+=(
gifsicle
jpegoptim
optipng
pngquant
)
# Video Processing
packages+=(
ffmpeg
)
# Database
packages+=(
mariadb-client
postgresql-client
)
readarray -d ' ' -t -O "${#packages[@]}" packages < <(echo -n "${APT_PACKAGES_EXTRA:-}")
apt-get update
apt-get upgrade -y
apt-get install -y "${packages[@]}"
locale-gen
update-locale

View file

@ -0,0 +1,27 @@
#!/bin/bash
set -ex -o errexit -o nounset -o pipefail
declare -a pecl_extensions=()
readarray -d ' ' -t pecl_extensions < <(echo -n "${PHP_PECL_EXTENSIONS:-}")
readarray -d ' ' -t -O "${#pecl_extensions[@]}" pecl_extensions < <(echo -n "${PHP_PECL_EXTENSIONS_EXTRA:-}")
declare -a php_extensions=()
readarray -d ' ' -t php_extensions < <(echo -n "${PHP_EXTENSIONS:-}")
readarray -d ' ' -t -O "${#php_extensions[@]}" php_extensions < <(echo -n "${PHP_EXTENSIONS_EXTRA:-}")
readarray -d ' ' -t -O "${#php_extensions[@]}" php_extensions < <(echo -n "${PHP_EXTENSIONS_DATABASE:-}")
# Optional script folks can copy into their image to do any [docker-php-ext-configure] work before the [docker-php-ext-install]
# this can also overwirte the [gd] configure above by simply running it again
declare -r custom_pre_configure_script=""
if [[ -e "${custom_pre_configure_script}" ]]; then
if [ ! -x "${custom_pre_configure_script}" ]; then
echo >&2 "ERROR: found ${custom_pre_configure_script} but its not executable - please [chmod +x] the file!"
exit 1
fi
"${custom_pre_configure_script}"
fi
# PECL + PHP extensions
IPE_KEEP_SYSPKG_CACHE=1 install-php-extensions "${pecl_extensions[@]}" "${php_extensions[@]}"

View file

@ -0,0 +1,16 @@
###########################################################
# DO NOT CHANGE
###########################################################
# This file is generated by the Pixelfed Docker setup, and
# will be rewritten on every container start
#
# You can put any [.conf] file in this directory
# (docker-compose-state/config/proxy/conf.d) and it will
# be loaded by nginx on startup.
#
# Run [docker compose exec proxy bash -c 'nginx -t && nginx -s reload']
# to test your config and reload the proxy
#
# See: https://github.com/nginx-proxy/nginx-proxy/blob/main/docs/README.md#custom-nginx-configuration
client_max_body_size {{ getenv "POST_MAX_SIZE" "61M" }};

View file

@ -376,7 +376,7 @@ zend.exception_ignore_args = On
; threat in any way, but it makes it possible to determine whether you use PHP ; threat in any way, but it makes it possible to determine whether you use PHP
; on your server or not. ; on your server or not.
; http://php.net/expose-php ; http://php.net/expose-php
expose_php = On expose_php = Off
;;;;;;;;;;;;;;;;;;; ;;;;;;;;;;;;;;;;;;;
; Resource Limits ; ; Resource Limits ;
@ -406,7 +406,7 @@ max_input_time = 60
; Maximum amount of memory a script may consume (128MB) ; Maximum amount of memory a script may consume (128MB)
; http://php.net/memory-limit ; http://php.net/memory-limit
memory_limit = 128M memory_limit = {{ getenv "DOCKER_APP_PHP_MEMORY_LIMIT" "128M" }}
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
; Error handling and logging ; ; Error handling and logging ;
@ -462,7 +462,7 @@ memory_limit = 128M
; Development Value: E_ALL ; Development Value: E_ALL
; Production Value: E_ALL & ~E_DEPRECATED & ~E_STRICT ; Production Value: E_ALL & ~E_DEPRECATED & ~E_STRICT
; http://php.net/error-reporting ; http://php.net/error-reporting
error_reporting = E_ALL & ~E_DEPRECATED & ~E_STRICT error_reporting = {{ getenv "DOCKER_APP_PHP_ERROR_REPORTING" "E_ALL & ~E_DEPRECATED & ~E_STRICT" }}
; This directive controls whether or not and where PHP will output errors, ; This directive controls whether or not and where PHP will output errors,
; notices and warnings too. Error output is very useful during development, but ; notices and warnings too. Error output is very useful during development, but
@ -479,7 +479,7 @@ error_reporting = E_ALL & ~E_DEPRECATED & ~E_STRICT
; Development Value: On ; Development Value: On
; Production Value: Off ; Production Value: Off
; http://php.net/display-errors ; http://php.net/display-errors
display_errors = Off display_errors = {{ getenv "DOCKER_APP_PHP_DISPLAY_ERRORS" "off" }}
; The display of errors which occur during PHP's startup sequence are handled ; The display of errors which occur during PHP's startup sequence are handled
; separately from display_errors. We strongly recommend you set this to 'off' ; separately from display_errors. We strongly recommend you set this to 'off'
@ -488,7 +488,7 @@ display_errors = Off
; Development Value: On ; Development Value: On
; Production Value: Off ; Production Value: Off
; http://php.net/display-startup-errors ; http://php.net/display-startup-errors
display_startup_errors = Off display_startup_errors = {{ getenv "DOCKER_APP_PHP_DISPLAY_ERRORS" "off" }}
; Besides displaying errors, PHP can also log errors to locations such as a ; Besides displaying errors, PHP can also log errors to locations such as a
; server-specific log, STDERR, or a location specified by the error_log ; server-specific log, STDERR, or a location specified by the error_log
@ -570,8 +570,9 @@ report_memleaks = On
; Log errors to specified file. PHP's default behavior is to leave this value ; Log errors to specified file. PHP's default behavior is to leave this value
; empty. ; empty.
; http://php.net/error-log ; http://php.net/error-log
; Example: ;
;error_log = php_errors.log ; NOTE: Write error log to stderr (/proc/self/fd/2 -> /dev/stderr)
error_log = /proc/self/fd/2
; Log errors to syslog (Event Log on Windows). ; Log errors to syslog (Event Log on Windows).
;error_log = syslog ;error_log = syslog
@ -679,7 +680,7 @@ auto_globals_jit = On
; Its value may be 0 to disable the limit. It is ignored if POST data reading ; Its value may be 0 to disable the limit. It is ignored if POST data reading
; is disabled through enable_post_data_reading. ; is disabled through enable_post_data_reading.
; http://php.net/post-max-size ; http://php.net/post-max-size
post_max_size = 64M post_max_size = {{ getenv "POST_MAX_SIZE" "61M" }}
; Automatically add files before PHP document. ; Automatically add files before PHP document.
; http://php.net/auto-prepend-file ; http://php.net/auto-prepend-file
@ -831,10 +832,10 @@ file_uploads = On
; Maximum allowed size for uploaded files. ; Maximum allowed size for uploaded files.
; http://php.net/upload-max-filesize ; http://php.net/upload-max-filesize
upload_max_filesize = 64M upload_max_filesize = {{ getenv "POST_MAX_SIZE" "61M" }}
; Maximum number of files that can be uploaded via a single request ; Maximum number of files that can be uploaded via a single request
max_file_uploads = 20 max_file_uploads = {{ getenv "MAX_ALBUM_LENGTH" "4" }}
;;;;;;;;;;;;;;;;;; ;;;;;;;;;;;;;;;;;;
; Fopen wrappers ; ; Fopen wrappers ;
@ -947,7 +948,7 @@ cli_server.color = On
[Date] [Date]
; Defines the default timezone used by the date functions ; Defines the default timezone used by the date functions
; http://php.net/date.timezone ; http://php.net/date.timezone
;date.timezone = date.timezone = {{ getenv "TZ" "UTC" }}
; http://php.net/date.default-latitude ; http://php.net/date.default-latitude
;date.default_latitude = 31.7667 ;date.default_latitude = 31.7667
@ -1735,7 +1736,7 @@ ldap.max_links = -1
[opcache] [opcache]
; Determines if Zend OPCache is enabled ; Determines if Zend OPCache is enabled
;opcache.enable=1 opcache.enable={{ getenv "DOCKER_APP_PHP_OPCACHE_ENABLE" "1" }}
; Determines if Zend OPCache is enabled for the CLI version of PHP ; Determines if Zend OPCache is enabled for the CLI version of PHP
;opcache.enable_cli=0 ;opcache.enable_cli=0
@ -1761,12 +1762,12 @@ ldap.max_links = -1
; When disabled, you must reset the OPcache manually or restart the ; When disabled, you must reset the OPcache manually or restart the
; webserver for changes to the filesystem to take effect. ; webserver for changes to the filesystem to take effect.
;opcache.validate_timestamps=1 opcache.validate_timestamps={{ getenv "DOCKER_APP_PHP_OPCACHE_VALIDATE_TIMESTAMPS" "0" }}
; How often (in seconds) to check file timestamps for changes to the shared ; How often (in seconds) to check file timestamps for changes to the shared
; memory storage allocation. ("1" means validate once per second, but only ; memory storage allocation. ("1" means validate once per second, but only
; once per request. "0" means always validate) ; once per request. "0" means always validate)
;opcache.revalidate_freq=2 opcache.revalidate_freq={{ getenv "DOCKER_APP_PHP_OPCACHE_REVALIDATE_FREQ" "2" }}
; Enables or disables file search in include_path optimization ; Enables or disables file search in include_path optimization
;opcache.revalidate_path=0 ;opcache.revalidate_path=0

View file

17
docker/shell Executable file
View file

@ -0,0 +1,17 @@
#!/bin/bash
declare service="${PF_SERVICE:=worker}"
declare user="${PF_USER:=www-data}"
declare -a command=("bash")
if [[ $# -ge 1 ]]; then
command=("$@")
fi
exec docker compose exec \
--user "${user}" \
--env TERM \
--env COLORTERM \
"${service}" \
"${command[@]}"

123
goss.yaml Normal file
View file

@ -0,0 +1,123 @@
# See: https://github.com/goss-org/goss/blob/master/docs/manual.md#goss-manual
package:
curl: { installed: true }
ffmpeg: { installed: true }
gifsicle: { installed: true }
gosu: { installed: true }
jpegoptim: { installed: true }
locales-all: { installed: true }
locales: { installed: true }
mariadb-client: { installed: true }
nano: { installed: true }
optipng: { installed: true }
pngquant: { installed: true }
postgresql-client: { installed: true }
unzip: { installed: true }
wget: { installed: true }
zip: { installed: true }
user:
www-data:
exists: true
uid: 33
gid: 33
groups:
- www-data
home: /var/www
shell: /usr/sbin/nologin
command:
php-version:
exit-status: 0
exec: 'php -v'
stdout:
- PHP {{ .Env.EXPECTED_PHP_VERSION }}
stderr: []
php-extensions:
exit-status: 0
exec: 'php -m'
stdout:
- bcmath
- Core
- ctype
- curl
- date
- dom
- exif
- fileinfo
- filter
- gd
- hash
- iconv
- imagick
- intl
- json
- libxml
- mbstring
- mysqlnd
- openssl
- pcntl
- pcre
- PDO
- pdo_mysql
- pdo_pgsql
- pdo_sqlite
- Phar
- posix
- readline
- redis
- Reflection
- session
- SimpleXML
- sodium
- SPL
- sqlite3
- standard
- tokenizer
- xml
- xmlreader
- xmlwriter
- zip
- zlib
stderr: []
forego-version:
exit-status: 0
exec: 'forego version'
stdout:
- dev
stderr: []
gomplate-version:
exit-status: 0
exec: 'gomplate -v'
stdout:
- gomplate version
stderr: []
gosu-version:
exit-status: 0
exec: 'gosu -v'
stdout:
- '1.12'
stderr: []
{{ if eq .Env.PHP_BASE_TYPE "nginx" }}
nginx-version:
exit-status: 0
exec: 'nginx -v'
stdout: []
stderr:
- 'nginx version: nginx'
{{ end }}
{{ if eq .Env.PHP_BASE_TYPE "apache" }}
apache-version:
exit-status: 0
exec: 'apachectl -v'
stdout:
- 'Server version: Apache/'
stderr: []
{{ end }}

103
tests/bats/helpers.bats Normal file
View file

@ -0,0 +1,103 @@
setup() {
DIR="$(cd "$(dirname "${BATS_TEST_FILENAME:-}")" >/dev/null 2>&1 && pwd)"
ROOT="$(dirname "$(dirname "$DIR")")"
load "$ROOT/docker/shared/root/docker/helpers.sh"
}
teardown() {
if [[ -e test_dir ]]; then
rm -rf test_dir
fi
}
@test "test [is-true]" {
is-true "1"
is-true "true"
is-true "TrUe"
}
@test "test [is-false]" {
is-false "0"
is-false "false"
is-false "FaLsE"
}
@test "test [is-false-expressions-0]" {
if is-false "0"; then
return 0
fi
return 1
}
@test "test [is-false-expressions-false]" {
if is-false "false"; then
return 0
fi
return 1
}
@test "test [is-false-expressions-FaLse]" {
if is-false "FaLse"; then
return 0
fi
return 1
}
@test "test [is-false-expressions-invalid]" {
if is-false "invalid"; then
return 0
fi
return 1
}
@test "test [is-true-expressions-1]" {
if is-true "1"; then
return 0
fi
return 1
}
@test "test [is-true-expressions-true]" {
if is-true "true"; then
return 0
fi
return 1
}
@test "test [is-true-expressions-TrUE]" {
if is-true "TrUE"; then
return 0
fi
return 1
}
@test "test [directory-is-empty] - non existing" {
directory-is-empty test_dir
}
@test "test [directory-is-empty] - actually empty" {
mkdir -p test_dir
directory-is-empty test_dir
}
@test "test [directory-is-empty] - not empty (directory)" {
mkdir -p test_dir/sub-dir
! directory-is-empty test_dir
}
@test "test [directory-is-empty] - not empty (file)" {
mkdir -p test_dir/
touch test_dir/hello-world.txt
! directory-is-empty test_dir
}