diff --git a/Dockerfile b/Dockerfile index e99e863..b0397b9 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,47 +1,40 @@ -FROM postgres:11 - -ARG DEBIAN_FRONTEND=noninteractive -ARG BUILD_DEPS='gcc git libffi-dev libssl-dev python3-dev python3-pip python3-wheel' - -RUN apt-get update && \ - apt-get install -y --no-install-recommends \ - $BUILD_DEPS \ - gosu \ - lzop \ - libpq-dev \ - pv \ - python3 \ - util-linux \ - # swift package needs pkg_resources and setuptools - python3-pkg-resources \ - python3-setuptools \ - python3-pip && \ - ln -sf /usr/bin/python3 /usr/bin/python && \ - ln -sf /usr/bin/pip3 /usr/bin/pip - -# setuptools from ubuntu archives is too old for googleapis-common-protos -RUN pip install --upgrade setuptools && \ - pip install --disable-pip-version-check --no-cache-dir \ - envdir==1.0.1 \ - wal-e[aws,azure,google,swift]==1.1.0 \ - gcloud==0.18.3 \ - oauth2client==4.1.3 \ - azure-storage==0.20.0 - -# cleanup -RUN apt-get purge -y --auto-remove $BUILD_DEPS && \ - apt-get autoremove -y && \ - apt-get clean -y +FROM postgres:11-alpine + +ENV WALE_LOG_DESTINATION stderr +ENV WALE_ENVDIR /etc/wal-e.d/env + +RUN mkdir -p $WALE_ENVDIR \ + && echo 'http://dl-cdn.alpinelinux.org/alpine/v3.5/main' >> /etc/apk/repositories \ + && apk add --update --virtual .build-deps \ + git \ + build-base \ + libffi-dev \ + openssl-dev \ + python3-dev=3.5.6-r0 \ + linux-headers \ + && apk add \ + lzo \ + pv \ + util-linux \ + ca-certificates \ + python3=3.5.6-r0 \ + && pip3 install --upgrade pip setuptools \ + && pip install --disable-pip-version-check --no-cache-dir \ + psycopg2-binary==2.7.6.1 \ + envdir==1.0.1 \ + wal-e[aws,azure,google,swift]==1.1.0 \ + gcloud==0.18.3 \ + oauth2client==4.1.3 \ + azure-storage==0.20.0 \ + && apk del .build-deps \ + && rm -rf /var/cache/apk/* COPY rootfs / -ENV WALE_ENVDIR=/etc/wal-e.d/env -RUN mkdir -p $WALE_ENVDIR ARG PATCH_CMD="python3 /patcher-script.py" RUN $PATCH_CMD file /bin/create_bucket /patcher-script.d/patch_boto_s3.py -RUN $PATCH_CMD file /usr/local/bin/wal-e /patcher-script.d/patch_boto_s3.py +RUN $PATCH_CMD module wal_e.cmd /patcher-script.d/patch_boto_s3.py RUN $PATCH_CMD module wal_e.worker.worker_util /patcher-script.d/patch_wal_e_s3.py - CMD ["/docker-entrypoint.sh", "postgres"] EXPOSE 5432 diff --git a/charts/database/templates/database-deployment.yaml b/charts/database/templates/database-deployment.yaml index e588c09..41f145a 100644 --- a/charts/database/templates/database-deployment.yaml +++ b/charts/database/templates/database-deployment.yaml @@ -41,11 +41,15 @@ spec: value: "{{.Values.global.storage}}" - name: PGCTLTIMEOUT value: "{{.Values.postgres.timeout}}" +{{- if eq .Values.global.storage "s3" }} + - name: S3_SSE + value: "{{.Values.s3.use_sse}}" +{{- end}} lifecycle: preStop: exec: command: - - gosu + - su-exec - postgres - do_backup readinessProbe: diff --git a/rootfs/bin/create_bucket b/rootfs/bin/create_bucket index e9caec9..c03dfe9 100755 --- a/rootfs/bin/create_bucket +++ b/rootfs/bin/create_bucket @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 import os diff --git a/rootfs/bin/do_backup b/rootfs/bin/do_backup index 10dcc6d..cd5b476 100755 --- a/rootfs/bin/do_backup +++ b/rootfs/bin/do_backup @@ -5,6 +5,7 @@ export BACKUPS_TO_RETAIN=${BACKUPS_TO_RETAIN:-5} echo "Performing a base backup..." if [[ -f "$PGDATA/recovery.conf" ]] ; then echo "Database is currently recovering from a backup. Aborting" + sleep 9 else # perform a backup envdir "$WALE_ENVDIR" wal-e backup-push "$PGDATA" diff --git a/rootfs/bin/is_running b/rootfs/bin/is_running index 4f66c3a..dd7aed5 100755 --- a/rootfs/bin/is_running +++ b/rootfs/bin/is_running @@ -8,4 +8,4 @@ if [[ -f "$PGDATA/recovery.conf" ]]; then exit 1 fi -gosu postgres pg_ctl status +su-exec postgres pg_ctl status diff --git a/rootfs/docker-entrypoint-initdb.d/001_setup_envdir.sh b/rootfs/docker-entrypoint-initdb.d/001_setup_envdir.sh index e2c00af..d18904f 100755 --- a/rootfs/docker-entrypoint-initdb.d/001_setup_envdir.sh +++ b/rootfs/docker-entrypoint-initdb.d/001_setup_envdir.sh @@ -17,6 +17,11 @@ if [[ "$DATABASE_STORAGE" == "s3" || "$DATABASE_STORAGE" == "minio" ]]; then else echo "https+path://s3-${AWS_REGION}.amazonaws.com:443" > WALE_S3_ENDPOINT fi + if [[ $S3_SSE ]]; then + echo $S3_SSE > WALE_S3_SSE + else + echo "false" > WALE_S3_SSE + fi else AWS_REGION="us-east-1" BUCKET_NAME="dbwal" diff --git a/rootfs/docker-entrypoint-initdb.d/003_restore_from_backup.sh b/rootfs/docker-entrypoint-initdb.d/003_restore_from_backup.sh index 5846881..a46c63d 100755 --- a/rootfs/docker-entrypoint-initdb.d/003_restore_from_backup.sh +++ b/rootfs/docker-entrypoint-initdb.d/003_restore_from_backup.sh @@ -13,13 +13,13 @@ chmod 0700 "$PGDATA" # reboot the server for wal_level to be set before backing up echo "Rebooting postgres to enable archive mode" -gosu postgres pg_ctl -D "$PGDATA" -w restart +su-exec postgres pg_ctl -D "$PGDATA" -w restart # check if there are any backups -- if so, let's restore # we could probably do better than just testing number of lines -- one line is just a heading, meaning no backups if [[ $(envdir "$WALE_ENVDIR" wal-e --terse backup-list | wc -l) -gt "1" ]]; then echo "Found backups. Restoring from backup..." - gosu postgres pg_ctl -D "$PGDATA" -w stop + su-exec postgres pg_ctl -D "$PGDATA" -w stop rm -rf "$PGDATA/*" envdir "$WALE_ENVDIR" wal-e backup-fetch "$PGDATA" LATEST cat << EOF > "$PGDATA/postgresql.conf" @@ -50,20 +50,11 @@ EOF echo "restore_command = 'envdir /etc/wal-e.d/env wal-e wal-fetch \"%f\" \"%p\"'" >> "$PGDATA/recovery.conf" chown -R postgres:postgres "$PGDATA" chmod 0700 "$PGDATA" - gosu postgres pg_ctl -D "$PGDATA" \ + su-exec postgres pg_ctl -D "$PGDATA" \ -o "-c listen_addresses=''" \ -w start - - echo "Waiting for recovery completion..." - while [ ! -f "$PGDATA/recovery.done" ] - do - sleep 2 - done fi -echo "Performing an initial backup..." -gosu postgres envdir "$WALE_ENVDIR" wal-e backup-push "$PGDATA" - # ensure $PGDATA has the right permissions chown -R postgres:postgres "$PGDATA" chmod 0700 "$PGDATA" diff --git a/rootfs/docker-entrypoint-initdb.d/004_run_backups.sh b/rootfs/docker-entrypoint-initdb.d/004_run_backups.sh index d311bd4..54f7592 100755 --- a/rootfs/docker-entrypoint-initdb.d/004_run_backups.sh +++ b/rootfs/docker-entrypoint-initdb.d/004_run_backups.sh @@ -1,4 +1,4 @@ #!/usr/bin/env bash # Run periodic backups in the background -gosu postgres backup & +su-exec postgres backup & diff --git a/rootfs/docker-entrypoint.sh b/rootfs/docker-entrypoint.sh index 0913006..4ca4784 100755 --- a/rootfs/docker-entrypoint.sh +++ b/rootfs/docker-entrypoint.sh @@ -23,7 +23,7 @@ if [ "$1" = 'postgres' ]; then # look specifically for PG_VERSION, as it is expected in the DB dir if [ ! -s "$PGDATA/PG_VERSION" ]; then - gosu postgres initdb + su-exec postgres initdb # check password first so we can output the warning before postgres # messes it up @@ -54,7 +54,7 @@ if [ "$1" = 'postgres' ]; then # internal start of server in order to allow set-up using psql-client # does not listen on TCP/IP and waits until start finishes - gosu postgres pg_ctl -D "$PGDATA" \ + su-exec postgres pg_ctl -D "$PGDATA" \ -o "-c listen_addresses=''" \ -w start @@ -94,7 +94,7 @@ if [ "$1" = 'postgres' ]; then echo done - gosu postgres pg_ctl -D "$PGDATA" -m fast -w stop + su-exec postgres pg_ctl -D "$PGDATA" -m fast -w stop set_listen_addresses '*' echo @@ -102,7 +102,7 @@ if [ "$1" = 'postgres' ]; then echo fi - exec gosu postgres "$@" + exec su-exec postgres "$@" fi exec "$@" diff --git a/rootfs/patcher-script.d/patch_wal_e_s3.py b/rootfs/patcher-script.d/patch_wal_e_s3.py index 1b2d4ea..fd248b3 100644 --- a/rootfs/patcher-script.d/patch_wal_e_s3.py +++ b/rootfs/patcher-script.d/patch_wal_e_s3.py @@ -7,10 +7,10 @@ def wrap_uri_put_file(creds, uri, fp, content_type=None, conn=None): k = s3_util._uri_to_key(creds, uri, conn=conn) if content_type is not None: k.content_type = content_type + encrypt_key = False if os.getenv('DATABASE_STORAGE') == 's3': - encrypt_key=True - else: - encrypt_key=False + if os.getenv('WALE_S3_SSE', 'false') == 'true': + encrypt_key = True k.set_contents_from_file(fp, encrypt_key=encrypt_key) return k s3.uri_put_file = wrap_uri_put_file