From 23f709b6018cc10ee6581c45cd11d04b1a5eb69e Mon Sep 17 00:00:00 2001 From: Petr Kubat Date: Mar 24 2020 10:16:16 +0000 Subject: sync with latest upstream source --- diff --git a/11 b/11 new file mode 120000 index 0000000..945c9b4 --- /dev/null +++ b/11 @@ -0,0 +1 @@ +. \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index cce3929..9533173 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM registry.fedoraproject.org/fedora:26 +FROM registry.fedoraproject.org/f31/s2i-core:latest # PostgreSQL image for OpenShift. # Volumes: @@ -10,45 +10,72 @@ FROM registry.fedoraproject.org/fedora:26 # * $POSTGRESQL_ADMIN_PASSWORD (Optional) - Password for the 'postgres' # PostgreSQL administrative account -ENV POSTGRESQL_VERSION=9.6 \ +ENV NAME=postgresql \ + VERSION=0 \ + ARCH=x86_64 \ + \ + POSTGRESQL_VERSION=11 \ + POSTGRESQL_PREV_VERSION=10 \ HOME=/var/lib/pgsql \ - PGUSER=postgres + PGUSER=postgres \ + APP_DATA=/opt/app-root -LABEL io.k8s.description="PostgreSQL is an advanced Object-Relational database management system" \ - io.k8s.display-name="PostgreSQL 9.6" \ +ENV SUMMARY="PostgreSQL is an advanced Object-Relational database management system" \ + DESCRIPTION="PostgreSQL is an advanced Object-Relational database management system (DBMS). \ +The image contains the client and server programs that you'll need to \ +create, run, maintain and access a PostgreSQL DBMS server." + +LABEL summary="$SUMMARY" \ + description="$DESCRIPTION" \ + io.k8s.description="$DESCRIPTION" \ + io.k8s.display-name="PostgreSQL 11" \ io.openshift.expose-services="5432:postgresql" \ - io.openshift.tags="database,postgresql,postgresql96" \ - com.redhat.component="postgresql" \ - maintainer="Pavel Raiskup " \ - name="$FCG/postgresql" \ + io.openshift.tags="database,postgresql,postgresql11" \ + com.redhat.component="$NAME" \ + maintainer="SoftwareCollections.org " \ + name="$FGC/$NAME" \ version="0" \ - release="1.$DISTTAG" \ - architecture="x86_64" \ - usage="Run without arguments to get usage info." \ - help="/help.1" + usage="docker run -d --name postgresql_database -e POSTGRESQL_USER=user -e POSTGRESQL_PASSWORD=pass -e POSTGRESQL_DATABASE=db -p 5432:5432 $FGC/$NAME" EXPOSE 5432 -ADD root / +COPY root/usr/libexec/fix-permissions /usr/libexec/fix-permissions # This image must forever use UID 26 for postgres user so our volumes are # safe in the future. This should *never* change, the last test is there # to make sure of that. RUN INSTALL_PKGS="rsync tar gettext bind-utils postgresql-server postgresql-contrib nss_wrapper " && \ - INSTALL_PKGS+="findutils python2 " && \ + INSTALL_PKGS+="findutils xz" && \ + dnf -y module enable postgresql:11 && \ dnf -y --setopt=tsflags=nodocs install $INSTALL_PKGS && \ rpm -V $INSTALL_PKGS && \ dnf clean all && \ test "$(id postgres)" = "uid=26(postgres) gid=26(postgres) groups=26(postgres)" && \ mkdir -p /var/lib/pgsql/data && \ - /usr/libexec/fix-permissions /var/lib/pgsql && \ - /usr/libexec/fix-permissions /var/run/postgresql + /usr/libexec/fix-permissions /var/lib/pgsql /var/run/postgresql # Get prefix path and path to scripts rather than hard-code them in scripts ENV CONTAINER_SCRIPTS_PATH=/usr/share/container-scripts/postgresql +COPY root / +COPY ./s2i/bin/ $STI_SCRIPTS_PATH + VOLUME ["/var/lib/pgsql/data"] +# S2I permission fixes +# -------------------- +# 1. unless specified otherwise (or - equivalently - we are in OpenShift), s2i +# build process would be executed as 'uid=26(postgres) gid=26(postgres)'. +# Such process wouldn't be able to execute the default 'assemble' script +# correctly (it transitively executes 'fix-permissions' script). So let's +# add the 'postgres' user into 'root' group here +# +# 2. we call fix-permissions on $APP_DATA here directly (UID=0 during build +# anyways) to assure that s2i process is actually able to _read_ the +# user-specified scripting. +RUN usermod -a -G root postgres && \ + /usr/libexec/fix-permissions --read-only "$APP_DATA" + USER 26 ENTRYPOINT ["container-entrypoint"] diff --git a/Dockerfile.fedora b/Dockerfile.fedora new file mode 120000 index 0000000..1d1fe94 --- /dev/null +++ b/Dockerfile.fedora @@ -0,0 +1 @@ +Dockerfile \ No newline at end of file diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 7a4a3ea..0000000 --- a/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. \ No newline at end of file diff --git a/README.md b/README.md index 92f6d17..d359f03 120000 --- a/README.md +++ b/README.md @@ -1 +1 @@ -./root/usr/share/container-scripts/postgresql/README.md \ No newline at end of file +root/usr/share/container-scripts/postgresql/README.md \ No newline at end of file diff --git a/help.md b/help.md new file mode 120000 index 0000000..42061c0 --- /dev/null +++ b/help.md @@ -0,0 +1 @@ +README.md \ No newline at end of file diff --git a/root/help.1 b/root/help.1 deleted file mode 100644 index 89c84d9..0000000 --- a/root/help.1 +++ /dev/null @@ -1,190 +0,0 @@ -.\"t -.\" WARNING: Do not edit this file manually, it is generated from README.md automatically. -.\" -.\"t -.\" Automatically generated by Pandoc 1.16.0.2 -.\" -.TH "POSTGRESQL\-95\-RHEL7" "1" "February 22, 2017" "Container Image Pages" "" -.hy -.SH PostgreSQL Docker image -.PP -This repository contains Dockerfiles for PostgreSQL images for general -usage and OpenShift. -Users can choose between RHEL and CentOS based images. -.SS Environment variables and volumes -.PP -The image recognizes the following environment variables that you can -set during initialization by passing \f[C]\-e\ VAR=VALUE\f[] to the -Docker run command. -.PP -.TS -tab(@); -lw(26.7n) lw(43.3n). -T{ -Variable name -T}@T{ -Description -T} -_ -T{ -\f[C]POSTGRESQL_USER\f[] -T}@T{ -User name for PostgreSQL account to be created -T} -T{ -\f[C]POSTGRESQL_PASSWORD\f[] -T}@T{ -Password for the user account -T} -T{ -\f[C]POSTGRESQL_DATABASE\f[] -T}@T{ -Database name -T} -T{ -\f[C]POSTGRESQL_ADMIN_PASSWORD\f[] -T}@T{ -Password for the \f[C]postgres\f[] admin account (optional) -T} -.TE -.PP -The following environment variables influence the PostgreSQL -configuration file. -They are all optional. -.PP -.TS -tab(@); -lw(15.7n) lw(37.6n) lw(16.7n). -T{ -Variable name -T}@T{ -Description -T}@T{ -Default -T} -_ -T{ -\f[C]POSTGRESQL_MAX_CONNECTIONS\f[] -T}@T{ -The maximum number of client connections allowed -T}@T{ -100 -T} -T{ -\f[C]POSTGRESQL_MAX_PREPARED_TRANSACTIONS\f[] -T}@T{ -Sets the maximum number of transactions that can be in the "prepared" -state. -If you are using prepared transactions, you will probably want this to -be at least as large as max_connections -T}@T{ -0 -T} -T{ -\f[C]POSTGRESQL_SHARED_BUFFERS\f[] -T}@T{ -Sets how much memory is dedicated to PostgreSQL to use for caching data -T}@T{ -32M -T} -T{ -\f[C]POSTGRESQL_EFFECTIVE_CACHE_SIZE\f[] -T}@T{ -Set to an estimate of how much memory is available for disk caching by -the operating system and within the database itself -T}@T{ -128M -T} -.TE -.PP -You can also set the following mount points by passing the -\f[C]\-v\ /host:/container\f[] flag to Docker. -.PP -.TS -tab(@); -l l. -T{ -Volume mount point -T}@T{ -Description -T} -_ -T{ -\f[C]/var/lib/pgsql/data\f[] -T}@T{ -PostgreSQL database cluster directory -T} -.TE -.PP -\f[B]Notice: When mouting a directory from the host into the container, -ensure that the mounted directory has the appropriate permissions and -that the owner and group of the directory matches the user UID or name -which is running inside the container.\f[] -.SS Usage -.PP -For this, we will assume that you are using the -\f[C]openshift/postgresql\-92\-centos7\f[] image. -If you want to set only the mandatory environment variables and not -store the database in a host directory, execute the following command: -.IP -.nf -\f[C] -$\ docker\ run\ \-d\ \-\-name\ postgresql_database\ \-e\ POSTGRESQL_USER=user\ \-e\ POSTGRESQL_PASSWORD=pass\ \-e\ POSTGRESQL_DATABASE=db\ \-p\ 5432:5432\ openshift/postgresql\-92\-centos7 -\f[] -.fi -.PP -This will create a container named \f[C]postgresql_database\f[] running -PostgreSQL with database \f[C]db\f[] and user with credentials -\f[C]user:pass\f[]. -Port 5432 will be exposed and mapped to the host. -If you want your database to be persistent across container executions, -also add a \f[C]\-v\ /host/db/path:/var/lib/pgsql/data\f[] argument. -This will be the PostgreSQL database cluster directory. -.PP -If the database cluster directory is not initialized, the entrypoint -script will first run -\f[C]initdb\f[] (http://www.postgresql.org/docs/9.2/static/app-initdb.html) -and setup necessary database users and passwords. -After the database is initialized, or if it was already present, -\f[C]postgres\f[] (http://www.postgresql.org/docs/9.2/static/app-postgres.html) -is executed and will run as PID 1. -You can stop the detached container by running -\f[C]docker\ stop\ postgresql_database\f[]. -.SS PostgreSQL auto\-tuning -.PP -When the PostgreSQL image is run with the \f[C]\-\-memory\f[] parameter -set and if there are no values provided for -\f[C]POSTGRESQL_SHARED_BUFFERS\f[] and -\f[C]POSTGRESQL_EFFECTIVE_CACHE_SIZE\f[] those values are automatically -calculated based on the value provided in the \f[C]\-\-memory\f[] -parameter. -.PP -The values are calculated based on the -upstream (https://wiki.postgresql.org/wiki/Tuning_Your_PostgreSQL_Server) -formulas. -For the \f[C]shared_buffers\f[] we use 1/4 of given memory and for the -\f[C]effective_cache_size\f[] we set the value to 1/2 of the given -memory. -.SS PostgreSQL admin account -.PP -The admin account \f[C]postgres\f[] has no password set by default, only -allowing local connections. -You can set it by setting the \f[C]POSTGRESQL_ADMIN_PASSWORD\f[] -environment variable when initializing your container. -This will allow you to login to the \f[C]postgres\f[] account remotely. -Local connections will still not require a password. -.SS Changing passwords -.PP -Since passwords are part of the image configuration, the only supported -method to change passwords for the database user -(\f[C]POSTGRESQL_USER\f[]) and \f[C]postgres\f[] admin user is by -changing the environment variables \f[C]POSTGRESQL_PASSWORD\f[] and -\f[C]POSTGRESQL_ADMIN_PASSWORD\f[], respectively. -.PP -Changing database passwords through SQL statements or any way other than -through the environment variables aforementioned will cause a mismatch -between the values stored in the variables and the actual passwords. -Whenever a database container starts it will reset the passwords to the -values stored in the environment variables. -.SH AUTHORS -Red Hat. diff --git a/root/usr/bin/cgroup-limits b/root/usr/bin/cgroup-limits deleted file mode 100755 index b9d4edc..0000000 --- a/root/usr/bin/cgroup-limits +++ /dev/null @@ -1,92 +0,0 @@ -#!/usr/bin/python - -""" -Script for parsing cgroup information - -This script will read some limits from the cgroup system and parse -them, printing out "VARIABLE=VALUE" on each line for every limit that is -successfully read. Output of this script can be directly fed into -bash's export command. Recommended usage from a bash script: - - set -o errexit - export_vars=$(cgroup-limits) ; export $export_vars - -Variables currently supported: - MAX_MEMORY_LIMIT_IN_BYTES - Maximum possible limit MEMORY_LIMIT_IN_BYTES can have. This is - currently constant value of 9223372036854775807. - MEMORY_LIMIT_IN_BYTES - Maximum amount of user memory in bytes. If this value is set - to the same value as MAX_MEMORY_LIMIT_IN_BYTES, it means that - there is no limit set. The value is taken from - /sys/fs/cgroup/memory/memory.limit_in_bytes - NUMBER_OF_CORES - Number of detected CPU cores that can be used. This value is - calculated from /sys/fs/cgroup/cpuset/cpuset.cpus - NO_MEMORY_LIMIT - Set to "true" if MEMORY_LIMIT_IN_BYTES is so high that the caller - can act as if no memory limit was set. Undefined otherwise. -""" - -from __future__ import print_function -import sys - - -def _read_file(path): - try: - with open(path, 'r') as f: - return f.read().strip() - except IOError: - return None - - -def get_memory_limit(): - """ - Read memory limit, in bytes. - """ - - limit = _read_file('/sys/fs/cgroup/memory/memory.limit_in_bytes') - if limit is None or not limit.isdigit(): - print("Warning: Can't detect memory limit from cgroups", - file=sys.stderr) - return None - return int(limit) - - -def get_number_of_cores(): - """ - Read number of CPU cores. - """ - - core_count = 0 - - line = _read_file('/sys/fs/cgroup/cpuset/cpuset.cpus') - if line is None: - print("Warning: Can't detect number of CPU cores from cgroups", - file=sys.stderr) - return None - - for group in line.split(','): - core_ids = list(map(int, group.split('-'))) - if len(core_ids) == 2: - core_count += core_ids[1] - core_ids[0] + 1 - else: - core_count += 1 - - return core_count - - -if __name__ == "__main__": - env_vars = { - "MAX_MEMORY_LIMIT_IN_BYTES": 9223372036854775807, - "MEMORY_LIMIT_IN_BYTES": get_memory_limit(), - "NUMBER_OF_CORES": get_number_of_cores() - } - - env_vars = {k: v for k, v in env_vars.items() if v is not None} - - if env_vars.get("MEMORY_LIMIT_IN_BYTES", 0) >= 92233720368547: - env_vars["NO_MEMORY_LIMIT"] = "true" - - for key, value in env_vars.items(): - print("{0}={1}".format(key, value)) diff --git a/root/usr/bin/run-postgresql b/root/usr/bin/run-postgresql index 1c8de78..2367e57 100755 --- a/root/usr/bin/run-postgresql +++ b/root/usr/bin/run-postgresql @@ -8,21 +8,51 @@ export_vars=$(cgroup-limits) ; export $export_vars source "${CONTAINER_SCRIPTS_PATH}/common.sh" set_pgdata + +process_extending_files \ + "${APP_DATA}/src/postgresql-pre-start" \ + "${CONTAINER_SCRIPTS_PATH}/pre-start" + check_env_vars generate_passwd_file generate_postgresql_config +# Is this brand new data volume? +PG_INITIALIZED=false + if [ ! -f "$PGDATA/postgresql.conf" ]; then initialize_database - NEED_TO_CREATE_USERS=yes + PG_INITIALIZED=: +else + try_pgupgrade fi -pg_ctl -w start -o "-h ''" -if [ "${NEED_TO_CREATE_USERS:-}" == "yes" ]; then - create_users +# Use insanely large timeout (24h) to ensure that the potential recovery has +# enough time here to happen (unless liveness probe kills us). Note that in +# case of server failure this command still exists immediately. +pg_ctl start -w --timeout 86400 -o "-h ''" + +# This is just a pedantic safety measure (the timeout above is unlikely to +# happen), but `pt_ctl -w` is not reliable prior to PostgreSQL v10 where it +# returns exit_status=0 even if the server is still starting. For more info +# see the issue#297 and +# https://www.postgresql.org/message-id/CAB7nPqSJs85wK9aknm%3D_jmS6GnH3SQBhpzKcqs8Qo2LhEg2etw%40mail.gmail.com +pg_isready + +if $PG_INITIALIZED ; then + process_extending_files \ + "${APP_DATA}/src/postgresql-init" \ + "${CONTAINER_SCRIPTS_PATH}/init" + migrate_db + create_users fi -set_passwords + +process_extending_files \ + "${APP_DATA}/src/postgresql-start" \ + "${CONTAINER_SCRIPTS_PATH}/start" + pg_ctl stop unset_env_vars +echo "Starting server..." exec postgres "$@" diff --git a/root/usr/bin/run-postgresql-slave b/root/usr/bin/run-postgresql-slave index 5d42d0d..82113b7 100755 --- a/root/usr/bin/run-postgresql-slave +++ b/root/usr/bin/run-postgresql-slave @@ -13,7 +13,7 @@ function initialize_replica() { echo "Initializing PostgreSQL slave ..." # TODO: Validate and reuse existing data? rm -rf $PGDATA - PGPASSWORD="${POSTGRESQL_MASTER_PASSWORD}" pg_basebackup -x --no-password --pgdata ${PGDATA} --host=${MASTER_FQDN} --port=5432 -U "${POSTGRESQL_MASTER_USER}" + PGPASSWORD="${POSTGRESQL_MASTER_PASSWORD}" pg_basebackup -X fetch --no-password --pgdata ${PGDATA} --host=${MASTER_FQDN} --port=5432 -U "${POSTGRESQL_MASTER_USER}" # PostgreSQL recovery configuration. generate_postgresql_recovery_config @@ -33,4 +33,5 @@ export MASTER_FQDN=$(postgresql_master_addr) initialize_replica unset_env_vars +echo "Starting server..." exec postgres "$@" diff --git a/root/usr/bin/usage b/root/usr/bin/usage new file mode 100755 index 0000000..fa9228b --- /dev/null +++ b/root/usr/bin/usage @@ -0,0 +1,4 @@ +#!/bin/bash + +cat /usr/share/container-scripts/postgresql/README.md + diff --git a/root/usr/libexec/check-container b/root/usr/libexec/check-container new file mode 100755 index 0000000..04a5b62 --- /dev/null +++ b/root/usr/libexec/check-container @@ -0,0 +1,27 @@ +#! /bin/sh + +# Try whether the PostgreSQL in container accepts connections. +# +# With --live, be tolerant to starting PG server. If the /bin/postgres binary +# has not been executed yet (the shell script is initializing the container), +# wait for it (this script might run forever, we expect that the timeout is +# maintained externally). + +test -z "$ENABLED_COLLECTIONS" || . scl_source enable $ENABLED_COLLECTIONS + +if test x"$1" = "x--live"; then + # Since livenessProbe is about to detect container deadlocks, and we + # so far don't know about real deadlocks to be detected -- we keep + # liveness probe to report that container is always ready (as long as + # we are able to execute shell, enable collections, etc., which is + # good for container sanity testing anyways). + exit 0 +fi + +# Readiness check follows, the --timeout is set to "infinite" because it +# is handled externally (readinessProbe.timeoutSeconds). +pg_isready -q \ + -h 127.0.0.1 \ + ${POSTGRESQL_USER+-U "$POSTGRESQL_USER"} \ + ${POSTGRESQL_DATABASE+-d "$POSTGRESQL_DATABASE"} \ + --timeout 0 diff --git a/root/usr/libexec/fix-permissions b/root/usr/libexec/fix-permissions index ebcdd3d..f4b2f86 100755 --- a/root/usr/libexec/fix-permissions +++ b/root/usr/libexec/fix-permissions @@ -1,7 +1,39 @@ #!/bin/sh -# Fix permissions on the given directory to allow group read/write of -# regular files and execute of directories. -find "$1" -exec chown postgres {} \; -find "$1" -exec chgrp 0 {} \; -find "$1" -exec chmod g+rw {} \; -find "$1" -type d -exec chmod g+x {} + + +documentation="\ +Recursively fix permissions on the given directories to allow GID=0 +read/write regular files and read/write/execute directories. + +To run this command, you have to be in the group root=0!" + +uid=26 +write=w + +usage () +{ + cat >&2 <&2 "fixing permissions on '$dir' directory" + find "$dir" -exec chown "$uid:0" {} \; + find "$dir" -exec chmod "g+r$write" {} \; + find "$dir" -type d -exec chmod g+x {} + +done diff --git a/root/usr/share/container-scripts/postgresql/README.md b/root/usr/share/container-scripts/postgresql/README.md index bdf5bf3..6153cae 100644 --- a/root/usr/share/container-scripts/postgresql/README.md +++ b/root/usr/share/container-scripts/postgresql/README.md @@ -1,65 +1,173 @@ -PostgreSQL Docker image -======================= +PostgreSQL 11 SQL Database Server container image +=============================================== + +This container image includes PostgreSQL 11 SQL database server for OpenShift and general usage. +Users can choose between RHEL, CentOS and Fedora based images. +The RHEL images are available in the [Red Hat Container Catalog](https://access.redhat.com/containers/), +the CentOS images are available on [Docker Hub](https://hub.docker.com/r/centos/), +and the Fedora images are available in [Fedora Registry](https://registry.fedoraproject.org/). +The resulting image can be run using [podman](https://github.com/containers/libpod). + +Note: while the examples in this README are calling `podman`, you can replace any such calls by `docker` with the same arguments + + +Description +----------- + +This container image provides a containerized packaging of the PostgreSQL postgres daemon +and client application. The postgres server daemon accepts connections from clients +and provides access to content from PostgreSQL databases on behalf of the clients. +You can find more information on the PostgreSQL project from the project Web site +(https://www.postgresql.org/). + + +Usage +----- + +For this, we will assume that you are using the `` image, available via `postgresql:11` imagestream tag in Openshift. +If you want to set only the mandatory environment variables and not store the database +in a host directory, execute the following command: + +``` +$ podman run -d --name postgresql_database -e POSTGRESQL_USER=user -e POSTGRESQL_PASSWORD=pass -e POSTGRESQL_DATABASE=db -p 5432:5432 +``` + +This will create a container named `postgresql_database` running PostgreSQL with +database `db` and user with credentials `user:pass`. Port 5432 will be exposed +and mapped to the host. If you want your database to be persistent across container +executions, also add a `-v /host/db/path:/var/lib/pgsql/data` argument (see +below). This will be the PostgreSQL database cluster directory. + +The same can be achieved in an Openshift instance using templates provided by Openshift or available in [examples](https://github.com/sclorg/postgresql-container/tree/master/examples): + +``` +$ oc process -f examples/postgresql-ephemeral-template.json -p POSTGRESQL_VERSION=11 -p POSTGRESQL_USER=user -p POSTGRESQL_PASSWORD=pass -p POSTGRESQL_DATABASE=db | oc create -f - +``` + +If the database cluster directory is not initialized, the entrypoint script will +first run [`initdb`](http://www.postgresql.org/docs/11/static/app-initdb.html) +and setup necessary database users and passwords. After the database is initialized, +or if it was already present, [`postgres`](http://www.postgresql.org/docs/11/static/app-postgres.html) +is executed and will run as PID 1. You can stop the detached container by running +`podman stop postgresql_database`. -This repository contains Dockerfiles for PostgreSQL images for general usage and OpenShift. -Users can choose between RHEL and CentOS based images. Environment variables and volumes ----------------------------------- +--------------------------------- The image recognizes the following environment variables that you can set during initialization by passing `-e VAR=VALUE` to the Docker run command. -| Variable name | Description | -| :--------------------------- | ---------------------------------------------- | -| `POSTGRESQL_USER` | User name for PostgreSQL account to be created | -| `POSTGRESQL_PASSWORD` | Password for the user account | -| `POSTGRESQL_DATABASE` | Database name | -| `POSTGRESQL_ADMIN_PASSWORD` | Password for the `postgres` admin account (optional) | +**`POSTGRESQL_USER`** + User name for PostgreSQL account to be created + +**`POSTGRESQL_PASSWORD`** + Password for the user account + +**`POSTGRESQL_DATABASE`** + Database name + +**`POSTGRESQL_ADMIN_PASSWORD`** + Password for the `postgres` admin account (optional) + + +Alternatively, the following options are related to migration scenario: + +**`POSTGRESQL_MIGRATION_REMOTE_HOST`** + Hostname/IP to migrate from + +**`POSTGRESQL_MIGRATION_ADMIN_PASSWORD`** + Password for the remote 'postgres' admin user + +**`POSTGRESQL_MIGRATION_IGNORE_ERRORS (optional, default 'no')`** + Set to 'yes' to ignore sql import errors + The following environment variables influence the PostgreSQL configuration file. They are all optional. -| Variable name | Description | Default -| :---------------------------- | ----------------------------------------------------------------------- | ------------------------------- -| `POSTGRESQL_MAX_CONNECTIONS` | The maximum number of client connections allowed | 100 -| `POSTGRESQL_MAX_PREPARED_TRANSACTIONS` | Sets the maximum number of transactions that can be in the "prepared" state. If you are using prepared transactions, you will probably want this to be at least as large as max_connections | 0 -| `POSTGRESQL_SHARED_BUFFERS` | Sets how much memory is dedicated to PostgreSQL to use for caching data | 32M -| `POSTGRESQL_EFFECTIVE_CACHE_SIZE` | Set to an estimate of how much memory is available for disk caching by the operating system and within the database itself | 128M +**`POSTGRESQL_MAX_CONNECTIONS (default: 100)`** + The maximum number of client connections allowed + +**`POSTGRESQL_MAX_PREPARED_TRANSACTIONS (default: 0)`** + Sets the maximum number of transactions that can be in the "prepared" state. If you are using prepared transactions, you will probably want this to be at least as large as max_connections + +**`POSTGRESQL_SHARED_BUFFERS (default: 32M)`** + Sets how much memory is dedicated to PostgreSQL to use for caching data + +**`POSTGRESQL_EFFECTIVE_CACHE_SIZE (default: 128M)`** + Set to an estimate of how much memory is available for disk caching by the operating system and within the database itself -You can also set the following mount points by passing the `-v /host:/container` flag to Docker. -| Volume mount point | Description | -| :----------------------- | ------------------------------------- | -| `/var/lib/pgsql/data` | PostgreSQL database cluster directory | +You can also set the following mount points by passing the `-v /host/dir:/container/dir:Z` flag to Docker. + +**`/var/lib/pgsql/data`** + PostgreSQL database cluster directory + **Notice: When mouting a directory from the host into the container, ensure that the mounted directory has the appropriate permissions and that the owner and group of the directory matches the user UID or name which is running inside the container.** -Usage +Typically (unless you use `podman run -u` option) processes in container +run under UID 26, so -- on GNU/Linux -- you can fix the datadir permissions +for example by: + +``` +$ setfacl -m u:26:-wx /your/data/dir +$ podman run <...> -v /your/data/dir:/var/lib/pgsql/data:Z <...> +``` + + +Data migration ---------------------- -For this, we will assume that you are using the `openshift/postgresql-92-centos7` image. -If you want to set only the mandatory environment variables and not store the database -in a host directory, execute the following command: +PostgreSQL container supports migration of data from remote PostgreSQL server. +You can run it like: ``` -$ docker run -d --name postgresql_database -e POSTGRESQL_USER=user -e POSTGRESQL_PASSWORD=pass -e POSTGRESQL_DATABASE=db -p 5432:5432 openshift/postgresql-92-centos7 +$ podman run -d --name postgresql_database \ + -e POSTGRESQL_MIGRATION_REMOTE_HOST=172.17.0.2 \ + -e POSTGRESQL_MIGRATION_ADMIN_PASSWORD=remoteAdminP@ssword \ + [ OPTIONAL_CONFIGURATION_VARIABLES ] + openshift/postgresql-92-centos7 ``` -This will create a container named `postgresql_database` running PostgreSQL with -database `db` and user with credentials `user:pass`. Port 5432 will be exposed -and mapped to the host. If you want your database to be persistent across container -executions, also add a `-v /host/db/path:/var/lib/pgsql/data` argument. This will be -the PostgreSQL database cluster directory. +The migration is done the **dump and restore** way (running `pg_dumpall` against +remote cluster and importing the dump locally by `psql`). Because the process +is streamed (unix pipeline), there are no intermediate dump files created during +this process to not waste additional storage space. -If the database cluster directory is not initialized, the entrypoint script will -first run [`initdb`](http://www.postgresql.org/docs/9.2/static/app-initdb.html) -and setup necessary database users and passwords. After the database is initialized, -or if it was already present, [`postgres`](http://www.postgresql.org/docs/9.2/static/app-postgres.html) -is executed and will run as PID 1. You can stop the detached container by running -`docker stop postgresql_database`. +If some SQL commands fail during applying, the default behavior +of the migration script is to fail as well to ensure the **all** or **nothing** +result of scripted, unattended migration. In most common cases, successful +migration is expected (but not guaranteed!), given you migrate from +a previous version of PostgreSQL server container, that is created using +the same principles as this one (e.g. migration from +`openshift/postgresql-92-centos7` to `centos/postgresql-95-centos7`). +Migration from a different kind of PostgreSQL container can likely fail. + +If this **all** or **nothing** principle is inadequate for you, and you know +what you are doing, there's optional `POSTGRESQL_MIGRATION_IGNORE_ERRORS` option +which does **best effort** migration (some data might be lost, it is up to user +to review the standard error output and fix the issues manually in +post-migration time). + +Please keep in mind that the container image provides help for users' +convenience, but fully automatic migration is not guaranteed. Thus, before you +start proceeding with the database migration, get prepared to perform manual +steps in order to get all your data migrated. + +Note that you might not use variables like `POSTGRESQL_USER` in migration +scenario, all the data (including info about databases, roles or passwords are +copied from old cluster). Ensure that you use the same +`OPTIONAL_CONFIGURATION_VARIABLES` as you used for initialization of the old +PostgreSQL container. If some non-default configuration is done on remote +cluster, you might need to copy the configuration files manually, too. + +Security warning: Note that the IP communication between old and new PostgreSQL +clusters is not encrypted by default, it is up to user to configure SSL on +remote cluster or ensure security via different means. PostgreSQL auto-tuning -------------------- @@ -96,3 +204,124 @@ values stored in the variables and the actual passwords. Whenever a database container starts it will reset the passwords to the values stored in the environment variables. + +Upgrading database (by switching to newer PostgreSQL image version) +------------------------------------------------------------------- + +** Warning! Please, before you decide to do the data directory upgrade, always +ensure that you've carefully backed up all your data and that you are OK with +potential manual rollback! ** + +This image supports automatic upgrade of data directory created by +the PostgreSQL server version 10 (and _only_ this version) - provided by sclorg +image. The upgrade process is designed so that you should be able to just +switch from *image A* to *image B*, and set the `$POSTGRESQL_UPGRADE` variable +appropriately to explicitly request the database data transformation. + +The upgrade process is internally implemented via `pg_upgrade` binary, and for +that purpose the container needs to contain two versions of PostgreSQL server +(have a look at `man pg_upgrade` for more info). + +For the `pg_upgrade` process - and the new server version, we need to initialize +a brand new data directory. That's data directory is created automatically by +container tooling under /var/lib/pgsql/data, which is usually external +bind-mountpoint. The `pg_upgrade` execution is then similar to dump&restore +approach -- it starts both old and new PostgreSQL servers (within container) and +"dumps" the old datadir while and at the same time it "restores" it into new +datadir. This operation requires a lot of data files copying, so you can decide +what type of upgrade you'll do by setting `$POSTGRESQL_UPGRADE` appropriately: + +**`copy`** + The data files are copied from old datadir to new datadir. This option has low risk of data loss in case of some upgrade failure. + +**`hardlink`** + Data files are hard-linked from old to the new data directory, which brings performance optimization - but the old directory becomes unusable, even in case of failure. + + +Note that because we copy data directory, you need to make sure that you have +enough space for the copy; upgrade failure because of not enough space might +lead to data loss. + + +Extending image +---------------- + +This image can be extended in Openshift using the `Source` build strategy or via the standalone +[source-to-image](https://github.com/openshift/source-to-image) application (where available). +For this, we will assume that you are using the `` image, +available via `postgresql:11` imagestream tag in Openshift. + +For example to build customized image `new-postgresql` +with configuration from `https://github.com/sclorg/postgresql-container/tree/master/examples/extending-image` run: + +``` +$ oc new-app postgresql:11~https://github.com/sclorg/postgresql-container.git \ + --name new-postgresql \ + --context-dir examples/extending-image/ \ + -e POSTGRESQL_USER=user \ + -e POSTGRESQL_DATABASE=db \ + -e POSTGRESQL_PASSWORD=password +``` + +or via `s2i`: + +``` +$ s2i build --context-dir examples/extending-image/ https://github.com/sclorg/postgresql-container.git new-postgresql +``` + +The directory passed to Openshift should contain one or more of the +following directories: + + +##### `postgresql-pre-start/` + +Source all `*.sh` files from this directory during early start of the +container. There's no PostgreSQL daemon running on background. + + +##### `postgresql-cfg/` + +Contained configuration files (`*.conf`) will be included at the end of image +postgresql.conf file. + + +##### `postgresql-init/` + +Contained shell scripts (`*.sh`) are sourced when the database is freshly +initialized (after successful initdb run which made the data directory +non-empty). At the time of sourcing these scripts, the local PostgreSQL +server is running. For re-deployments scenarios with persistent data +directory, the scripts are not sourced (no-op). + + +##### `postgresql-start/` + +Same sematics as `postgresql-init/`, except that these scripts are +always sourced (after `postgresql-init/` scripts, if they exist). + + +---------------------------------------------- + +During the s2i build all provided files are copied into `/opt/app-root/src` +directory in the new image. Only one +file with the same name can be used for customization and user provided files +are preferred over default files in `/usr/share/container-scripts/`- +so it is possible to overwrite them. + + +Troubleshooting +--------------- +At first the postgres daemon writes its logs to the standard output, so these are available in the container log. The log can be examined by running: + + podman logs + +Then log output is redirected to logging collector process and will appear in directory "pg_log". + + +See also +-------- +Dockerfile and other sources for this container image are available on +https://github.com/sclorg/postgresql-container. +In that repository, the Dockerfile for CentOS is called Dockerfile, the Dockerfile +for RHEL7 is called Dockerfile.rhel7, the Dockerfile for RHEL8 is called Dockerfile.rhel8, +and the Dockerfile for Fedora is called Dockerfile.fedora. diff --git a/root/usr/share/container-scripts/postgresql/common.sh b/root/usr/share/container-scripts/postgresql/common.sh index 76e7dac..e0e51ef 100644 --- a/root/usr/share/container-scripts/postgresql/common.sh +++ b/root/usr/share/container-scripts/postgresql/common.sh @@ -6,7 +6,7 @@ export POSTGRESQL_MAX_PREPARED_TRANSACTIONS=${POSTGRESQL_MAX_PREPARED_TRANSACTIO # limits are set). # Users can still override this by setting the POSTGRESQL_SHARED_BUFFERS # and POSTGRESQL_EFFECTIVE_CACHE_SIZE variables. -if [[ "${NO_MEMORY_LIMIT:-}" == "true" || -z "${MEMORY_LIMIT_IN_BYTES}" ]]; then +if [[ "${NO_MEMORY_LIMIT:-}" == "true" || -z "${MEMORY_LIMIT_IN_BYTES:-}" ]]; then export POSTGRESQL_SHARED_BUFFERS=${POSTGRESQL_SHARED_BUFFERS:-32MB} export POSTGRESQL_EFFECTIVE_CACHE_SIZE=${POSTGRESQL_EFFECTIVE_CACHE_SIZE:-128MB} else @@ -23,9 +23,6 @@ export POSTGRESQL_CONFIG_FILE=$HOME/openshift-custom-postgresql.conf postinitdb_actions= -psql_identifier_regex='^[a-zA-Z_][a-zA-Z0-9_]*$' -psql_password_regex='^[a-zA-Z0-9_~!@#$%^&*()-=<>,.?;:|]+$' - # match . files when moving userdata below shopt -s dotglob # extglob enables the !(userdata) glob pattern below. @@ -37,20 +34,26 @@ function usage() { fi cat >&2 <> "${POSTGRESQL_CONFIG_FILE}" fi + + if [ "$POSTGRESQL_VERSION" -ge 12 ] && [ "$(uname -p)" != 'x86_64' ] && [[ "$(. /etc/os-release ; echo $VERSION_ID)" =~ 7.* ]] ; then + # On non-intel arches, data_sync_retry = off does not work + # Upstream discussion: https://www.postgresql.org/message-id/CA+mCpegfOUph2U4ZADtQT16dfbkjjYNJL1bSTWErsazaFjQW9A@mail.gmail.com + # Upstream changes that caused this issue: + # https://github.com/postgres/postgres/commit/483520eca426fb1b428e8416d1d014ac5ad80ef4 + # https://github.com/postgres/postgres/commit/9ccdd7f66e3324d2b6d3dec282cfa9ff084083f1 + # RHBZ: https://bugzilla.redhat.com/show_bug.cgi?id=1779150 + echo "data_sync_retry = on" >>"${POSTGRESQL_CONFIG_FILE}" + fi + + ( + shopt -s nullglob + for conf in "${APP_DATA}"/src/postgresql-cfg/*.conf; do + echo include \'${conf}\' >> "${POSTGRESQL_CONFIG_FILE}" + done + ) } function generate_postgresql_recovery_config() { @@ -124,18 +146,23 @@ function generate_postgresql_recovery_config() { function generate_passwd_file() { export USER_ID=$(id -u) export GROUP_ID=$(id -g) - grep -v ^postgres /etc/passwd > "$HOME/passwd" + grep -v -e ^postgres -e ^$USER_ID /etc/passwd > "$HOME/passwd" echo "postgres:x:${USER_ID}:${GROUP_ID}:PostgreSQL Server:${HOME}:/bin/bash" >> "$HOME/passwd" export LD_PRELOAD=libnss_wrapper.so export NSS_WRAPPER_PASSWD=${HOME}/passwd export NSS_WRAPPER_GROUP=/etc/group } -function initialize_database() { +initdb_wrapper () +{ # Initialize the database cluster with utf8 support enabled by default. # This might affect performance, see: - # http://www.postgresql.org/docs/9.6/static/locale.html - LANG=${LANG:-en_US.utf8} initdb + # http://www.postgresql.org/docs/11/static/locale.html + LANG=${LANG:-en_US.utf8} "$@" +} + +function initialize_database() { + initdb_wrapper initdb # PostgreSQL configuration. cat >> "$PGDATA/postgresql.conf" <&2 "\n========== \$PGDATA upgrade: %s -> %s ==========\n\n" \ + "$POSTGRESQL_PREV_VERSION" \ + "$POSTGRESQL_VERSION" + + info_msg () { printf >&2 "\n===> $*\n\n" ;} + + # pg_upgrade writes logs to cwd, so go to the persistent storage first + cd "$HOME"/data + + # disable this because of scl_source, 'set +u' just makes the code ugly + # anyways + set +u + + # we need to have the old SCL enabled, otherwise the $old_pgengine is not + # working. The scl_source script doesn't pay attention to non-zero exit + # statuses, so use 'set +e'. + set +e + source scl_source enable $old_collection + set -e + + case $POSTGRESQL_UPGRADE in + copy) # we accept this + ;; + hardlink) + optimized=: + ;; + *) + echo >&2 "Unsupported value: \$POSTGRESQL_UPGRADE=$POSTGRESQL_UPGRADE" + false + ;; + esac + + # Ensure $PGDATA_new doesn't exist yet, so we can immediately remove it if + # there's some problem. + test ! -e "$PGDATA_new" + + # initialize the database + info_msg "Initialize new data directory; we will migrate to that." + initdb_cmd=( initdb_wrapper "$new_pgengine"/initdb "$PGDATA_new" ) + eval "\${initdb_cmd[@]} ${POSTGRESQL_UPGRADE_INITDB_OPTIONS-}" || \ + { rm -rf "$PGDATA_new" ; false ; } + + upgrade_cmd=( + "$new_pgengine"/pg_upgrade + "--old-bindir=$old_pgengine" + "--new-bindir=$new_pgengine" + "--old-datadir=$PGDATA" + "--new-datadir=$PGDATA_new" + ) + + # Dangerous --link option, we loose $DATADIR if something goes wrong. + ! $optimized || upgrade_cmd+=(--link) + + # User-specififed options for pg_upgrade. + eval "upgrade_cmd+=(${POSTGRESQL_UPGRADE_PGUPGRADE_OPTIONS-})" + + # On non-intel arches the data_sync_retry set to on + sed -i -e 's/data_sync_retry/#data_sync_retry/' "${POSTGRESQL_CONFIG_FILE}" + + # the upgrade + info_msg "Starting the pg_upgrade process." + + # Once we stop support for PostgreSQL 9.4, we don't need + # REDHAT_PGUPGRADE_FROM_RHEL hack as we don't upgrade from 9.2 -- that means + # that we don't need to fiddle with unix_socket_director{y,ies} option. + REDHAT_PGUPGRADE_FROM_RHEL=1 \ + "${upgrade_cmd[@]}" || { cat $(find "$PGDATA_new"/.. -name pg_upgrade_server.log) ; rm -rf "$PGDATA_new" && false ; } + + # Move the important configuration and remove old data. This is highly + # careless, but we can't do more for this over-automatized process. + info_msg "Swap the old and new PGDATA and cleanup." + mv "$PGDATA"/*.conf "$PGDATA_new" + rm -rf "$PGDATA" + mv "$PGDATA_new" "$PGDATA" + + # Get back the option we changed above + sed -i -e 's/#data_sync_retry/data_sync_retry/' "${POSTGRESQL_CONFIG_FILE}" + + info_msg "Upgrade DONE." +) + + +# Run right after container startup, when the data volume is already initialized +# (not initialized by this container run) and thus there exists a chance that +# the data was generated by incompatible PostgreSQL major version. +try_pgupgrade () +{ + local versionfile="$PGDATA"/PG_VERSION version upgrade_available + + # This file always exists. + test -f "$versionfile" + version=$(cat "$versionfile") + + # If we don't support pg_upgrade, skip. + test -z "${POSTGRESQL_PREV_VERSION-}" && return 0 + + if test "$POSTGRESQL_VERSION" = "$version"; then + # No need to call pg_upgrade. + + # Mistakenly requests upgrade? If not, just start the DB. + test -z "${POSTGRESQL_UPGRADE-}" && return 0 + + # Make _sure_ we have this safety-belt here, otherwise our users would + # just specify '-e POSTGRESQL_UPGRADE=hardlink' permanently, even for + # re-deployment cases when upgrade is not needed. Setting such + # unfortunate default could mean that pg_upgrade might (after some user + # mistake) migrate (or even destruct, especially with --link) the old data + # directory with limited rollback options, if any. + echo >&2 + echo >&2 "== WARNING!! ==" + echo >&2 "PostgreSQL server version matches the datadir PG_VERSION." + echo >&2 "The \$POSTGRESQL_UPGRADE makes no sense and you probably" + echo >&2 "made some mistake, keeping the variable set you might" + echo >&2 "risk a data loss in future!" + echo >&2 "===============" + echo >&2 + + # Exit here, but allow _really explicit_ foot-shot. + ${POSTGRESQL_UPGRADE_FORCE-false} + return 0 + fi + + # At this point in code we know that PG_VERSION doesn't match the PostgreSQL + # server major version; this might mean that user either (a) mistakenly + # deploys from a bad image, or (b) user wants to perform upgrade. For the + # upgrade we require explicit request -- just to avoid disasters in (a)-cases. + + if test -z "${POSTGRESQL_UPGRADE-}"; then + echo >&2 "Incompatible data directory. This container image provides" + echo >&2 "PostgreSQL '$POSTGRESQL_VERSION', but data directory is of" + echo >&2 "version '$version'." + echo >&2 + echo >&2 "This image supports automatic data directory upgrade from" + echo >&2 "'$POSTGRESQL_PREV_VERSION', please _carefully_ consult image documentation" + echo >&2 "about how to use the '\$POSTGRESQL_UPGRADE' startup option." + # We could wait for postgresql startup failure (there's no risk of data dir + # corruption), but fail rather early. + false + fi + + # We support pg_upgrade process only from previous version of this container + # (upgrade to N to N+1 is possible, so e.g. 9.4 to 9.5). + if test "$POSTGRESQL_PREV_VERSION" != "$version"; then + echo >&2 "With this container image you can only upgrade from data directory" + echo >&2 "of version '$POSTGRESQL_PREV_VERSION', not '$version'." + false + fi + + run_pgupgrade +} + +# get_matched_files PATTERN DIR [DIR ...] +# --------------------------------------- +# Print all basenames for files matching PATTERN in DIRs. +get_matched_files () +{ + local pattern=$1 dir + shift + for dir; do + test -d "$dir" || continue + find -L "$dir" -maxdepth 1 -type f -name "$pattern" -printf "%f\n" + done +} + +# process_extending_files DIR [DIR ...] +# ------------------------------------- +# Source all *.sh files in DIRs in alphabetical order, but if the file exists in +# more then one DIR, source only the first occurrence (first found wins). +process_extending_files() +{ + local filename dir + while read filename ; do + for dir in "$@"; do + local file="$dir/$filename" + if test -f "$file"; then + echo "=> sourcing $file ..." + source "$file" + set -e # ensure that users don't mistakenly change this + break + fi + done + done <<<"$(get_matched_files '*.sh' "$@" | sort -u)" +} diff --git a/root/usr/share/container-scripts/postgresql/scl_enable b/root/usr/share/container-scripts/postgresql/scl_enable new file mode 100644 index 0000000..1d967f9 --- /dev/null +++ b/root/usr/share/container-scripts/postgresql/scl_enable @@ -0,0 +1,3 @@ +# This will make scl collection binaries work out of box. +unset BASH_ENV PROMPT_COMMAND ENV +source scl_source enable $ENABLED_COLLECTIONS diff --git a/root/usr/share/container-scripts/postgresql/start/set_passwords.sh b/root/usr/share/container-scripts/postgresql/start/set_passwords.sh new file mode 100644 index 0000000..60d70e3 --- /dev/null +++ b/root/usr/share/container-scripts/postgresql/start/set_passwords.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +_psql () { psql --set ON_ERROR_STOP=1 "$@" ; } + +if [[ ",$postinitdb_actions," = *,simple_db,* ]]; then +_psql --set=username="$POSTGRESQL_USER" \ + --set=password="$POSTGRESQL_PASSWORD" \ +<<< "ALTER USER :\"username\" WITH ENCRYPTED PASSWORD :'password';" +fi + +if [ -v POSTGRESQL_MASTER_USER ]; then +_psql --set=masteruser="$POSTGRESQL_MASTER_USER" \ + --set=masterpass="$POSTGRESQL_MASTER_PASSWORD" \ +<<'EOF' +ALTER USER :"masteruser" WITH REPLICATION; +ALTER USER :"masteruser" WITH ENCRYPTED PASSWORD :'masterpass'; +EOF +fi + +if [ -v POSTGRESQL_ADMIN_PASSWORD ]; then +_psql --set=adminpass="$POSTGRESQL_ADMIN_PASSWORD" \ +<<<"ALTER USER \"postgres\" WITH ENCRYPTED PASSWORD :'adminpass';" +fi diff --git a/s2i/bin/assemble b/s2i/bin/assemble new file mode 100755 index 0000000..6ed8f7a --- /dev/null +++ b/s2i/bin/assemble @@ -0,0 +1,14 @@ +#!/bin/bash + +set -o errexit +set -o nounset +set -o pipefail + +shopt -s dotglob +echo "---> Installing application source ..." + + +mv /tmp/src/* ./ + +# Fix source directory permissions +/usr/libexec/fix-permissions --read-only ./ diff --git a/s2i/bin/run b/s2i/bin/run new file mode 120000 index 0000000..a7f4076 --- /dev/null +++ b/s2i/bin/run @@ -0,0 +1 @@ +/usr/bin/run-postgresql \ No newline at end of file diff --git a/s2i/bin/usage b/s2i/bin/usage new file mode 100755 index 0000000..9f41312 --- /dev/null +++ b/s2i/bin/usage @@ -0,0 +1 @@ +groff -t -man -ETascii /help.1 diff --git a/sources b/sources deleted file mode 100644 index e69de29..0000000 --- a/sources +++ /dev/null diff --git a/test/examples/custom-config/README b/test/examples/custom-config/README new file mode 100644 index 0000000..0c78521 --- /dev/null +++ b/test/examples/custom-config/README @@ -0,0 +1,2 @@ +Bind-mount this directory under /opt/app-root/src in container, and all the +*.conf files from postgresql-cfg/ files will be included to postgresql.conf. diff --git a/test/examples/custom-config/postgresql-cfg/10_shared_buffers.conf b/test/examples/custom-config/postgresql-cfg/10_shared_buffers.conf new file mode 100644 index 0000000..5c23ff7 --- /dev/null +++ b/test/examples/custom-config/postgresql-cfg/10_shared_buffers.conf @@ -0,0 +1 @@ +shared_buffers = 111MB diff --git a/test/examples/enable-ssl/postgresql-cfg/ssl.conf b/test/examples/enable-ssl/postgresql-cfg/ssl.conf new file mode 100644 index 0000000..7237ac0 --- /dev/null +++ b/test/examples/enable-ssl/postgresql-cfg/ssl.conf @@ -0,0 +1,5 @@ +ssl = on +ssl_cert_file = '/opt/app-root/src/server.crt' # server certificate +ssl_key_file = '/opt/app-root/src/server.key' # server private key +#ssl_ca_file # trusted certificate authorities +#ssl_crl_file # certificates revoked by certificate authorities diff --git a/test/examples/enable-ssl/postgresql-pre-start/enable_ssl.sh b/test/examples/enable-ssl/postgresql-pre-start/enable_ssl.sh new file mode 100644 index 0000000..6836912 --- /dev/null +++ b/test/examples/enable-ssl/postgresql-pre-start/enable_ssl.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +# Postgresql server will reject key files with liberal permissions +chmod og-rwx server.key diff --git a/test/examples/enable-ssl/server.crt b/test/examples/enable-ssl/server.crt new file mode 100644 index 0000000..aabcc98 --- /dev/null +++ b/test/examples/enable-ssl/server.crt @@ -0,0 +1,77 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + 2c:86:f9:22:0f:0c:ed:2b:e8:a3:f1:cf:9e:2b:09:82:22:76:ec:2b + Signature Algorithm: sha256WithRSAEncryption + Issuer: CN = testing + Validity + Not Before: Sep 16 11:39:01 2019 GMT + Not After : Sep 13 11:39:01 2029 GMT + Subject: CN = testing + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + RSA Public-Key: (2048 bit) + Modulus: + 00:d9:0f:25:ca:d9:32:4d:db:95:f5:5f:09:5a:2b: + e2:f7:ae:6a:b3:43:ce:1c:35:60:bd:cc:01:3f:f2: + 0f:eb:20:da:55:8b:42:95:da:a5:0a:c8:c5:43:54: + 64:85:e7:5b:2c:77:6a:1f:db:9f:56:39:35:e4:0f: + b0:1c:2b:a6:73:46:e8:27:2b:9f:62:5c:bf:7f:48: + 5a:99:e1:8d:73:fe:d6:3a:ec:25:35:07:ad:69:f3: + 95:81:ea:8a:20:50:fd:fc:e9:c0:b5:ac:f7:21:af: + 37:2c:8e:23:51:74:fa:75:b2:48:c4:6e:95:f1:2d: + bc:af:ff:f4:eb:da:a3:78:fe:e9:c9:c0:ef:21:b5: + 46:f5:e9:8c:9a:f9:94:84:a7:63:be:d6:fe:eb:31: + fb:ca:87:2e:e6:43:53:bd:3c:09:7f:cc:7b:9d:e2: + b9:0a:49:a5:5c:61:6e:94:f9:75:85:e3:41:e7:92: + 24:84:9f:61:c7:d4:cc:b5:26:8a:c1:db:bf:a5:ce: + 43:72:61:04:2f:bf:21:c4:d1:73:dd:b4:f8:37:bf: + 85:0d:0e:92:8d:22:33:4b:ed:6d:55:2d:0b:42:c4: + 23:e8:30:f3:86:2b:99:ba:e5:ba:ef:54:b4:40:29: + 2d:53:7c:d4:59:72:20:65:88:9d:68:5a:fc:25:a8: + 13:0d + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + 17:77:65:09:AD:ED:EE:02:01:AB:09:FE:1E:FD:AB:4E:F2:4D:0A:23 + X509v3 Authority Key Identifier: + keyid:17:77:65:09:AD:ED:EE:02:01:AB:09:FE:1E:FD:AB:4E:F2:4D:0A:23 + + X509v3 Basic Constraints: critical + CA:TRUE + Signature Algorithm: sha256WithRSAEncryption + 14:67:98:15:fa:57:88:75:89:9a:0b:f0:e1:94:dd:dc:12:ab: + a0:2a:20:6d:38:64:39:39:58:4f:4d:2d:16:1d:e2:e2:d3:56: + 35:2e:3c:f5:be:7e:16:fb:87:a1:b9:27:e6:d4:52:e8:1e:c5: + c7:b7:74:b5:15:53:6d:b0:90:34:8c:ce:20:82:62:60:1e:f2: + 21:f9:22:a5:cb:17:a7:a9:55:71:cb:66:f5:dd:c2:85:6a:e1: + a7:35:d0:b9:09:6a:ae:4d:a5:32:34:fa:2a:cc:10:85:6c:95: + 50:50:2c:e9:59:d1:40:78:16:d3:87:c3:31:cb:33:7b:0f:3a: + ef:51:c1:2e:0c:eb:38:61:de:01:42:0e:1d:cc:7d:b1:24:4b: + ef:ce:9d:c6:b0:97:51:c9:cc:23:d6:5d:4e:cf:68:06:c2:47: + 94:c5:80:df:07:bc:72:cc:79:3d:94:be:6d:c8:b3:17:e6:5e: + 52:38:c4:6b:a9:ee:ad:94:f9:74:bf:8a:95:12:06:b4:4d:17: + ca:72:a5:61:90:b7:c0:0f:d0:04:e1:39:3c:75:d5:8a:5c:11: + 96:f7:fe:82:5a:e6:30:2c:2f:94:4e:bb:1e:8e:d8:0b:6e:1e: + e4:5f:f6:c9:a3:4d:2f:58:ee:ad:b7:cd:53:3f:f1:dc:1e:d2: + 06:a0:03:58 +-----BEGIN CERTIFICATE----- +MIIDBTCCAe2gAwIBAgIULIb5Ig8M7Svoo/HPnisJgiJ27CswDQYJKoZIhvcNAQEL +BQAwEjEQMA4GA1UEAwwHdGVzdGluZzAeFw0xOTA5MTYxMTM5MDFaFw0yOTA5MTMx +MTM5MDFaMBIxEDAOBgNVBAMMB3Rlc3RpbmcwggEiMA0GCSqGSIb3DQEBAQUAA4IB +DwAwggEKAoIBAQDZDyXK2TJN25X1XwlaK+L3rmqzQ84cNWC9zAE/8g/rINpVi0KV +2qUKyMVDVGSF51ssd2of259WOTXkD7AcK6ZzRugnK59iXL9/SFqZ4Y1z/tY67CU1 +B61p85WB6oogUP386cC1rPchrzcsjiNRdPp1skjEbpXxLbyv//Tr2qN4/unJwO8h +tUb16Yya+ZSEp2O+1v7rMfvKhy7mQ1O9PAl/zHud4rkKSaVcYW6U+XWF40HnkiSE +n2HH1My1JorB27+lzkNyYQQvvyHE0XPdtPg3v4UNDpKNIjNL7W1VLQtCxCPoMPOG +K5m65brvVLRAKS1TfNRZciBliJ1oWvwlqBMNAgMBAAGjUzBRMB0GA1UdDgQWBBQX +d2UJre3uAgGrCf4e/atO8k0KIzAfBgNVHSMEGDAWgBQXd2UJre3uAgGrCf4e/atO +8k0KIzAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQAUZ5gV+leI +dYmaC/DhlN3cEqugKiBtOGQ5OVhPTS0WHeLi01Y1Ljz1vn4W+4ehuSfm1FLoHsXH +t3S1FVNtsJA0jM4ggmJgHvIh+SKlyxenqVVxy2b13cKFauGnNdC5CWquTaUyNPoq +zBCFbJVQUCzpWdFAeBbTh8MxyzN7DzrvUcEuDOs4Yd4BQg4dzH2xJEvvzp3GsJdR +ycwj1l1Oz2gGwkeUxYDfB7xyzHk9lL5tyLMX5l5SOMRrqe6tlPl0v4qVEga0TRfK +cqVhkLfAD9AE4Tk8ddWKXBGW9/6CWuYwLC+UTrsejtgLbh7kX/bJo00vWO6tt81T +P/HcHtIGoANY +-----END CERTIFICATE----- diff --git a/test/examples/enable-ssl/server.key b/test/examples/enable-ssl/server.key new file mode 100644 index 0000000..5a93b48 --- /dev/null +++ b/test/examples/enable-ssl/server.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDZDyXK2TJN25X1 +XwlaK+L3rmqzQ84cNWC9zAE/8g/rINpVi0KV2qUKyMVDVGSF51ssd2of259WOTXk +D7AcK6ZzRugnK59iXL9/SFqZ4Y1z/tY67CU1B61p85WB6oogUP386cC1rPchrzcs +jiNRdPp1skjEbpXxLbyv//Tr2qN4/unJwO8htUb16Yya+ZSEp2O+1v7rMfvKhy7m +Q1O9PAl/zHud4rkKSaVcYW6U+XWF40HnkiSEn2HH1My1JorB27+lzkNyYQQvvyHE +0XPdtPg3v4UNDpKNIjNL7W1VLQtCxCPoMPOGK5m65brvVLRAKS1TfNRZciBliJ1o +WvwlqBMNAgMBAAECggEAKvM8Xy8rLQzOV4c+qoEUoD37Dw3TsvE8+1FqzeRwEe6m +RVcRDeX90mx33CLO4VAuUlYuwa8LkFwxtbcE+g4JGbZmKZoQJ76ChgUjKF/hRZqf +eXlQw3WJcvWoF9T5D/v2xhza7RgUrq2lFUPq6Stkg/WLQJNBSD/snkbfh+vzfPVW +slg2zo1o9dMe53AOzjMkQ8RljbOfd+KZE340ZzftPxcTyrE9VaQqGLNtRiehhXPJ +dB7Kmc+/Pm1OkmpblnSAIJudsMNelUYsadYFgjtEgYYXFcuqNrCWeNRmMl5I/vEp +xnVf+gQfldJ+zAbkB4+nxMCOn1tqS2nOJRGW6xu4YQKBgQDt97CAAceXnRYoraqy +1ff5K8WxxCmcD0TcX/EfYXj1Qaex979x2SpftsrBTclkOxoTPv+OfrKZ1/Eq4a8z +0onP/lRGRxQ95gPvwFzQldKzmQVsoW6odZqMPO6hYJ0SsiZTwzc82IEXPA1QJJ6E +n3OggTLs0iCW3uLyXFL8npaL4wKBgQDpgdvSU82ipiEntMpqUuQdhw1TvnYwcSaC +GTUl4Uhwfdxmb424cqHiDoBKitd6DjHo20MshA/6WhWYq/dMz7ueEBu9cCKe85En +RzX0InCV26K1zBBbwMXGJGquIaQeha1GmOkpgORHjNwdwKZkZ/DJOHYOsjwGyQ22 +H8cC6MX4TwKBgQCpqQ+ApEQuN0QmKnNqX50VXHztmeLkrgo1aH3cFr2LdozeGLm4 +rNFGPmfeW9w7Btw3XpILgQ9LGieKoC8urmutDDH/jQvEeerSk35ZBIidnXq9kXb4 +yigu1f54tg4m1zb2P1dxnRakfx8qxYDzI0/n3lV1fPbZOf3qN6K/Ez5YawKBgHQi +rQTvvz+c5rKL3XyCG4iACeXTvY6cSC2+gcuEP3YLcxnTc6YABXmcAryQT1kaREJv +AvrZ9+Ro94LGTKn8S3DyzAktA1sRAumJJlF064/s/AD1LFGmD/dbV1+hxbGUhLiv +BpAo1eCsMzHtBhS8CWra1QS8KtSpHFOvfFh7EzNLAoGBANyGrLj6qzywdSV7ccnd +sWg5U7Jgzuonb3K8LAc9NTnGn9C093RGY5HhsS1kTew01QRfxR7IdtSI9Q0vO/6I +stLGSa9fpn4leu2P6iF8r640xx02UFhKyf8wpM2RF38hHjAWRW/BaUEfnECeDi5A +yWuW6DFMAuJ80LdwyMPGkkt2 +-----END PRIVATE KEY----- diff --git a/test/examples/extending-image/postgresql-cfg/s2i-extending.conf b/test/examples/extending-image/postgresql-cfg/s2i-extending.conf new file mode 100644 index 0000000..668142b --- /dev/null +++ b/test/examples/extending-image/postgresql-cfg/s2i-extending.conf @@ -0,0 +1,4 @@ +log_destination = 'stderr' +logging_collector = on +log_directory = 'pg_log' +log_filename = 'postgresql.log' diff --git a/test/examples/extending-image/postgresql-start/set_passwords.sh b/test/examples/extending-image/postgresql-start/set_passwords.sh new file mode 100644 index 0000000..9388e5b --- /dev/null +++ b/test/examples/extending-image/postgresql-start/set_passwords.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +# postgresql image encrypts user passwords at service start +# the functionality can be disabled by providing this file (postgresql-start-hook/set_passwords.sh) in s2i build diff --git a/test/examples/pgaudit/README b/test/examples/pgaudit/README new file mode 100644 index 0000000..5ae67dc --- /dev/null +++ b/test/examples/pgaudit/README @@ -0,0 +1,10 @@ +The PostgreSQL Audit Extension (or pgaudit) provides detailed session and/or object +audit logging via the standard logging facility provided by PostgreSQL. + +Bind-mount this directory under /opt/app-root/src in the container, and all the +*.conf files from postgresql-cfg/ files will be included to postgresql.conf. + +This config file enables the pgaudit extensions that is available in the container +image, but needs to be enabled. + +More about pgaudit extension at https://www.pgaudit.org. diff --git a/test/examples/pgaudit/postgresql-cfg/10_pgaudit.conf b/test/examples/pgaudit/postgresql-cfg/10_pgaudit.conf new file mode 100644 index 0000000..47329c4 --- /dev/null +++ b/test/examples/pgaudit/postgresql-cfg/10_pgaudit.conf @@ -0,0 +1 @@ +shared_preload_libraries = 'pgaudit' diff --git a/test/examples/postgresql-ephemeral-template.json b/test/examples/postgresql-ephemeral-template.json new file mode 100644 index 0000000..0f55830 --- /dev/null +++ b/test/examples/postgresql-ephemeral-template.json @@ -0,0 +1,253 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "name": "postgresql-ephemeral", + "annotations": { + "openshift.io/display-name": "PostgreSQL (Ephemeral)", + "description": "PostgreSQL database service, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing", + "iconClass": "icon-postgresql", + "tags": "database,postgresql", + "openshift.io/long-description": "This template provides a standalone PostgreSQL server with a database created. The database is not stored on persistent storage, so any restart of the service will result in all data being lost. The database name, username, and password are chosen via parameters when provisioning this service.", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "openshift.io/documentation-url": "https://docs.okd.io/latest/using_images/db_images/postgresql.html", + "openshift.io/support-url": "https://access.redhat.com" + } + }, + "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${POSTGRESQL_USER}\n Password: ${POSTGRESQL_PASSWORD}\n Database Name: ${POSTGRESQL_DATABASE}\n Connection URL: postgresql://${DATABASE_SERVICE_NAME}:5432/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/.", + "labels": { + "template": "postgresql-ephemeral-template" + }, + "objects": [ + { + "kind": "Secret", + "apiVersion": "v1", + "metadata": { + "name": "${DATABASE_SERVICE_NAME}", + "annotations": { + "template.openshift.io/expose-username": "{.data['database-user']}", + "template.openshift.io/expose-password": "{.data['database-password']}", + "template.openshift.io/expose-database_name": "{.data['database-name']}" + } + }, + "stringData" : { + "database-user" : "${POSTGRESQL_USER}", + "database-password" : "${POSTGRESQL_PASSWORD}", + "database-name" : "${POSTGRESQL_DATABASE}" + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "metadata": { + "name": "${DATABASE_SERVICE_NAME}", + "annotations": { + "template.openshift.io/expose-uri": "postgres://{.spec.clusterIP}:{.spec.ports[?(.name==\"postgresql\")].port}" + } + }, + "spec": { + "ports": [ + { + "name": "postgresql", + "protocol": "TCP", + "port": 5432, + "targetPort": 5432, + "nodePort": 0 + } + ], + "selector": { + "name": "${DATABASE_SERVICE_NAME}" + }, + "type": "ClusterIP", + "sessionAffinity": "None" + }, + "status": { + "loadBalancer": {} + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${DATABASE_SERVICE_NAME}", + "annotations": { + "template.alpha.openshift.io/wait-for-ready": "true" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "postgresql" + ], + "from": { + "kind": "ImageStreamTag", + "name": "postgresql:${POSTGRESQL_VERSION}", + "namespace": "${NAMESPACE}" + }, + "lastTriggeredImage": "" + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "name": "${DATABASE_SERVICE_NAME}" + }, + "template": { + "metadata": { + "labels": { + "name": "${DATABASE_SERVICE_NAME}" + } + }, + "spec": { + "containers": [ + { + "name": "postgresql", + "image": " ", + "ports": [ + { + "containerPort": 5432, + "protocol": "TCP" + } + ], + "readinessProbe": { + "timeoutSeconds": 1, + "initialDelaySeconds": 5, + "exec": { + "command": [ "/usr/libexec/check-container" ] + } + }, + "livenessProbe": { + "timeoutSeconds": 10, + "initialDelaySeconds": 120, + "exec": { + "command": [ "/usr/libexec/check-container", "--live" ] + } + }, + "env": [ + { + "name": "POSTGRESQL_USER", + "valueFrom": { + "secretKeyRef" : { + "name" : "${DATABASE_SERVICE_NAME}", + "key" : "database-user" + } + } + }, + { + "name": "POSTGRESQL_PASSWORD", + "valueFrom": { + "secretKeyRef" : { + "name" : "${DATABASE_SERVICE_NAME}", + "key" : "database-password" + } + } + }, + { + "name": "POSTGRESQL_DATABASE", + "valueFrom": { + "secretKeyRef" : { + "name" : "${DATABASE_SERVICE_NAME}", + "key" : "database-name" + } + } + } + ], + "resources": { + "limits": { + "memory": "${MEMORY_LIMIT}" + } + }, + "volumeMounts": [ + { + "name": "${DATABASE_SERVICE_NAME}-data", + "mountPath": "/var/lib/pgsql/data" + } + ], + "terminationMessagePath": "/dev/termination-log", + "imagePullPolicy": "IfNotPresent", + "capabilities": {}, + "securityContext": { + "capabilities": {}, + "privileged": false + } + } + ], + "volumes": [ + { + "name": "${DATABASE_SERVICE_NAME}-data", + "emptyDir": { + "medium": "" + } + } + ], + "restartPolicy": "Always", + "dnsPolicy": "ClusterFirst" + } + } + }, + "status": {} + } + ], + "parameters": [ + { + "name": "MEMORY_LIMIT", + "displayName": "Memory Limit", + "description": "Maximum amount of memory the container can use.", + "value": "512Mi", + "required": true + }, + { + "name": "NAMESPACE", + "displayName": "Namespace", + "description": "The OpenShift Namespace where the ImageStream resides.", + "value": "openshift" + }, + { + "name": "DATABASE_SERVICE_NAME", + "displayName": "Database Service Name", + "description": "The name of the OpenShift Service exposed for the database.", + "value": "postgresql", + "required": true + }, + { + "name": "POSTGRESQL_USER", + "displayName": "PostgreSQL Connection Username", + "description": "Username for PostgreSQL user that will be used for accessing the database.", + "generate": "expression", + "from": "user[A-Z0-9]{3}", + "required": true + }, + { + "name": "POSTGRESQL_PASSWORD", + "displayName": "PostgreSQL Connection Password", + "description": "Password for the PostgreSQL connection user.", + "generate": "expression", + "from": "[a-zA-Z0-9]{16}", + "required": true + }, + { + "name": "POSTGRESQL_DATABASE", + "displayName": "PostgreSQL Database Name", + "description": "Name of the PostgreSQL database accessed.", + "value": "sampledb", + "required": true + }, + { + "name": "POSTGRESQL_VERSION", + "displayName": "Version of PostgreSQL Image", + "description": "Version of PostgreSQL image to be used (10 or latest).", + "value": "10", + "required": true + } + ] +} diff --git a/test/examples/postgresql-persistent-template.json b/test/examples/postgresql-persistent-template.json new file mode 100644 index 0000000..b6b0455 --- /dev/null +++ b/test/examples/postgresql-persistent-template.json @@ -0,0 +1,277 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "name": "postgresql-persistent", + "annotations": { + "openshift.io/display-name": "PostgreSQL", + "description": "PostgreSQL database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.", + "iconClass": "icon-postgresql", + "tags": "database,postgresql", + "openshift.io/long-description": "This template provides a standalone PostgreSQL server with a database created. The database is stored on persistent storage. The database name, username, and password are chosen via parameters when provisioning this service.", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "openshift.io/documentation-url": "https://docs.okd.io/latest/using_images/db_images/postgresql.html", + "openshift.io/support-url": "https://access.redhat.com" + } + }, + "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${POSTGRESQL_USER}\n Password: ${POSTGRESQL_PASSWORD}\n Database Name: ${POSTGRESQL_DATABASE}\n Connection URL: postgresql://${DATABASE_SERVICE_NAME}:5432/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/.", + "labels": { + "template": "postgresql-persistent-template" + }, + "objects": [ + { + "kind": "Secret", + "apiVersion": "v1", + "metadata": { + "name": "${DATABASE_SERVICE_NAME}", + "annotations": { + "template.openshift.io/expose-username": "{.data['database-user']}", + "template.openshift.io/expose-password": "{.data['database-password']}", + "template.openshift.io/expose-database_name": "{.data['database-name']}" + } + }, + "stringData" : { + "database-user" : "${POSTGRESQL_USER}", + "database-password" : "${POSTGRESQL_PASSWORD}", + "database-name" : "${POSTGRESQL_DATABASE}" + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "metadata": { + "name": "${DATABASE_SERVICE_NAME}", + "annotations": { + "template.openshift.io/expose-uri": "postgres://{.spec.clusterIP}:{.spec.ports[?(.name==\"postgresql\")].port}" + } + }, + "spec": { + "ports": [ + { + "name": "postgresql", + "protocol": "TCP", + "port": 5432, + "targetPort": 5432, + "nodePort": 0 + } + ], + "selector": { + "name": "${DATABASE_SERVICE_NAME}" + }, + "type": "ClusterIP", + "sessionAffinity": "None" + }, + "status": { + "loadBalancer": {} + } + }, + { + "kind": "PersistentVolumeClaim", + "apiVersion": "v1", + "metadata": { + "name": "${DATABASE_SERVICE_NAME}" + }, + "spec": { + "accessModes": [ + "ReadWriteOnce" + ], + "resources": { + "requests": { + "storage": "${VOLUME_CAPACITY}" + } + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${DATABASE_SERVICE_NAME}", + "annotations": { + "template.alpha.openshift.io/wait-for-ready": "true" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "postgresql" + ], + "from": { + "kind": "ImageStreamTag", + "name": "postgresql:${POSTGRESQL_VERSION}", + "namespace": "${NAMESPACE}" + }, + "lastTriggeredImage": "" + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "name": "${DATABASE_SERVICE_NAME}" + }, + "template": { + "metadata": { + "labels": { + "name": "${DATABASE_SERVICE_NAME}" + } + }, + "spec": { + "containers": [ + { + "name": "postgresql", + "image": " ", + "ports": [ + { + "containerPort": 5432, + "protocol": "TCP" + } + ], + "readinessProbe": { + "timeoutSeconds": 1, + "initialDelaySeconds": 5, + "exec": { + "command": [ "/usr/libexec/check-container" ] + } + }, + "livenessProbe": { + "timeoutSeconds": 10, + "initialDelaySeconds": 120, + "exec": { + "command": [ "/usr/libexec/check-container", "--live" ] + } + }, + "env": [ + { + "name": "POSTGRESQL_USER", + "valueFrom": { + "secretKeyRef" : { + "name" : "${DATABASE_SERVICE_NAME}", + "key" : "database-user" + } + } + }, + { + "name": "POSTGRESQL_PASSWORD", + "valueFrom": { + "secretKeyRef" : { + "name" : "${DATABASE_SERVICE_NAME}", + "key" : "database-password" + } + } + }, + { + "name": "POSTGRESQL_DATABASE", + "valueFrom": { + "secretKeyRef" : { + "name" : "${DATABASE_SERVICE_NAME}", + "key" : "database-name" + } + } + } + ], + "resources": { + "limits": { + "memory": "${MEMORY_LIMIT}" + } + }, + "volumeMounts": [ + { + "name": "${DATABASE_SERVICE_NAME}-data", + "mountPath": "/var/lib/pgsql/data" + } + ], + "terminationMessagePath": "/dev/termination-log", + "imagePullPolicy": "IfNotPresent", + "capabilities": {}, + "securityContext": { + "capabilities": {}, + "privileged": false + } + } + ], + "volumes": [ + { + "name": "${DATABASE_SERVICE_NAME}-data", + "persistentVolumeClaim": { + "claimName": "${DATABASE_SERVICE_NAME}" + } + } + ], + "restartPolicy": "Always", + "dnsPolicy": "ClusterFirst" + } + } + }, + "status": {} + } + ], + "parameters": [ + { + "name": "MEMORY_LIMIT", + "displayName": "Memory Limit", + "description": "Maximum amount of memory the container can use.", + "value": "512Mi", + "required": true + }, + { + "name": "NAMESPACE", + "displayName": "Namespace", + "description": "The OpenShift Namespace where the ImageStream resides.", + "value": "openshift" + }, + { + "name": "DATABASE_SERVICE_NAME", + "displayName": "Database Service Name", + "description": "The name of the OpenShift Service exposed for the database.", + "value": "postgresql", + "required": true + }, + { + "name": "POSTGRESQL_USER", + "displayName": "PostgreSQL Connection Username", + "description": "Username for PostgreSQL user that will be used for accessing the database.", + "generate": "expression", + "from": "user[A-Z0-9]{3}", + "required": true + }, + { + "name": "POSTGRESQL_PASSWORD", + "displayName": "PostgreSQL Connection Password", + "description": "Password for the PostgreSQL connection user.", + "generate": "expression", + "from": "[a-zA-Z0-9]{16}", + "required": true + }, + { + "name": "POSTGRESQL_DATABASE", + "displayName": "PostgreSQL Database Name", + "description": "Name of the PostgreSQL database accessed.", + "value": "sampledb", + "required": true + }, + { + "name": "VOLUME_CAPACITY", + "displayName": "Volume Capacity", + "description": "Volume space available for data, e.g. 512Mi, 2Gi.", + "value": "1Gi", + "required": true + }, + { + "name": "POSTGRESQL_VERSION", + "displayName": "Version of PostgreSQL Image", + "description": "Version of PostgreSQL image to be used (10 or latest).", + "value": "10", + "required": true + } + ] +} diff --git a/test/examples/replica/README.md b/test/examples/replica/README.md new file mode 100644 index 0000000..b7c546d --- /dev/null +++ b/test/examples/replica/README.md @@ -0,0 +1,240 @@ +# PostgreSQL Replication Example + +**WARNING: This is only a Proof-Of-Concept example and it is not meant to be used in +production. Use at your own risk.** + +## What is PostgreSQL replication? + +Replication enables data from one database server (master, or primary) to be +replicated to one or more servers (slaves, or standby servers). + +PostgreSQL has [different replication solutions](http://www.postgresql.org/docs/9.2/static/different-replication-solutions.html), +each with its own pros and cons. +This example uses PostgreSQL's native support for [streaming replication](http://www.postgresql.org/docs/9.2/static/warm-standby.html). +In this configuration, the primary server operates in continuous archiving mode, +while each standby server operates in continuous recovery mode, streaming over +the network the write-ahead log (WAL) records from the primary as they're +generated. + +This configuration can be used to create a high availability (HA) cluster +configuration and has relatively low performance impact on the primary server. + +A standby server can also be used for read-only queries. + +## Deployment + +This example uses a [PersistentVolumeClaim](https://docs.okd.io/latest/architecture/additional_concepts/storage.html#persistent-volume-claims) +to request persistent storage for the primary PostgreSQL server. + +You need to have persistent volumes configured and available in your project in +order to continue. For trying out this example in a single node testing +environment, you can create a temporary volume with: + +``` +$ oc create -f - <(xargs chmod a+rwx)`" + }, + "accessModes": [ + "ReadWriteOnce" + ] + } +} +EOF +``` + +It is recommended, however, that you use other [type of PersistentVolume](https://docs.okd.io/latest/architecture/additional_concepts/storage.html#types-of-persistent-volumes) +such as NFS. + +Now you can create a new database deployment: + +``` +$ oc new-app examples/replica/postgresql_replica.json +``` + +## How does this example work? + +### Services 'postgresql-master' and 'postgresql-slave' + +These services are the entry point for connecting to, respectively, the primary +database server and any of the standby servers. + +In your application, connect to the `postgresql-master` service for write operations, and to `postgresql-master` or `postgresql-slave` for reads. +Keep in mind that reading from a slave might return slightly outdated data. + +To get a list of endpoints for the read-only standby servers, you can do a DNS +query. From a container in the same OpenShift project: + +``` +$ dig postgresql-slave A +search +short +``` + +### DeploymentConfig 'postgresql-master' + +This resource defines a [deployment configuration](https://docs.okd.io/latest/architecture/core_concepts/deployments.html#deployments-and-deployment-configurations) +to spawn the PostgreSQL primary database server, or master. + +Once the master is started, it works as a standalone database server, fully +independent of the slaves. + +### DeploymentConfig 'postgresql-slave' + +This resource defines a [deployment configuration](https://docs.okd.io/latest/architecture/core_concepts/deployments.html#deployments-and-deployment-configurations) +to spawn PostgreSQL standby servers, the slaves. + +Upon startup, each slave waits for the master server to become available (via +DNS lookup). Once that happens, the slave connects to the master and starts +streaming the WAL. + +To check that the slave is connected and streaming changes from the master, +you can issue the following commands: + +``` +$ master_name=`oc get pods -l name=postgresql-master -t '{{ (index .items 0).metadata.name }}'` +$ oc exec $master_name -- bash -c 'psql -c "select client_addr, state from pg_stat_replication;"' +``` + +After a successful deployment, you should get an output similar to: + +``` + client_addr | state +--------------+----------- + 172.17.0.227 | streaming +(1 row) +``` + +## Scaling + +By default, the provided template creates one primary and one standby server. +Scaling in this setup means increasing the number of standby servers, +consequently increasing data redundancy and concurrent read throughput (if +reading from slaves). + +You can add more slaves using `oc scale`: + +``` +$ oc scale dc postgresql-slave --replicas=2 +``` + +Using `oc scale` with `postgresql-master` is not supported. + +After scaling, you can verify that all slaves are streaming changes from the +master with: + +``` +$ oc exec $master_name -- bash -c 'psql -c "select client_addr, state from pg_stat_replication;"' + client_addr | state +--------------+----------- + 172.17.0.227 | streaming + 172.17.0.229 | streaming +(2 rows) +``` + +There should be one row per slave (number of replicas defined via `oc scale`). + +## Changing passwords + +You can change the passwords for the database user and admin, as well as the +password used for replication, by changing the appropriate environment variables +in the deployment configurations described earlier. +No other method is supported. + +On every deploy, passwords are reset to match the values in the environment +variables of the DeploymentConfig 'postgresql-master'. + +### POSTGRESQL_PASSWORD and POSTGRESQL_ADMIN_PASSWORD + +These are, respectively, the passwords for the regular database user defined +by `POSTGRESQL_USER` and the admin user 'postgres'. + +You can change these passwords with: + +``` +$ oc env dc postgresql-master POSTGRESQL_PASSWORD=NewPassword POSTGRESQL_ADMIN_PASSWORD=NewAdminPassword +deploymentconfigs/postgresql-master +``` + +This will trigger the redeployment of the primary server. +Note that you can change one password but not the other by simply omitting one +of the arguments to `oc env` above. + +You can verify that the new password is in effect with: + +``` +$ oc exec $master_name -- bash -c 'PGPASSWORD=NewPassword psql -h postgresql-master $POSTGRESQL_DATABASE $POSTGRESQL_USER -c "select * from (select inet_server_addr()) ra cross join (select current_database()) cdb cross join (select current_user) cu"' + inet_server_addr | current_database | current_user +------------------+------------------+-------------- + 172.17.1.38 | userdb | user +(1 row) +``` + +You should also be able to connect to a slave using the new password: + +``` +$ oc exec $master_name -- bash -c 'PGPASSWORD=NewPassword psql -h postgresql-slave $POSTGRESQL_DATABASE $POSTGRESQL_USER -c "select * from (select inet_server_addr()) ra cross join (select current_database()) cdb cross join (select current_user) cu"' + inet_server_addr | current_database | current_user +------------------+------------------+-------------- + 172.17.1.35 | userdb | user +(1 row) +``` + +For completeness, here's how to verify the new admin password: + +``` +$ oc exec $master_name -- bash -c 'PGPASSWORD=NewAdminPassword psql -h postgresql-master $POSTGRESQL_DATABASE -c "select * from (select inet_server_addr()) ra cross join (select current_database()) cdb cross join (select current_user) cu"' + inet_server_addr | current_database | current_user +------------------+------------------+-------------- + 172.17.1.38 | userdb | postgres +(1 row) +``` + +``` +$ oc exec $master_name -- bash -c 'PGPASSWORD=NewAdminPassword psql -h postgresql-slave $POSTGRESQL_DATABASE -c "select * from (select inet_server_addr()) ra cross join (select current_database()) cdb cross join (select current_user) cu"' + inet_server_addr | current_database | current_user +------------------+------------------+-------------- + 172.17.1.35 | userdb | postgres +(1 row) +``` + +### POSTGRESQL_MASTER_PASSWORD + +This password is used by standby servers to connect to the primary. Both +deployment configurations in this example setup need to agree on the value of +this password to have replication working correctly. + +You can change the environment variable with the password on both deployment +configurations at once: + +``` +$ oc env dc postgresql-master postgresql-slave POSTGRESQL_MASTER_PASSWORD=NewReplicationPassword +deploymentconfigs/postgresql-master +deploymentconfigs/postgresql-slave +``` + +This will trigger the redeployment of both primary and standby servers. + +Note that, as a current limitation in this example, the standby servers store +replicated data in an an ephemeral [emptyDir](https://docs.okd.io/latest/dev_guide/volumes.html). +This means that redeploying a standby server will cause it to start replicating +again from scratch. + +After the primary and standby servers are ready, you can verify that the standby +servers are successfully connected to the primary: + +``` +$ oc exec $master_name -- bash -c 'psql -c "select client_addr, state from pg_stat_replication;"' + client_addr | state +-------------+----------- + 172.17.1.35 | streaming +(1 row) +``` diff --git a/test/examples/replica/postgresql_replica.json b/test/examples/replica/postgresql_replica.json new file mode 100644 index 0000000..775a2dc --- /dev/null +++ b/test/examples/replica/postgresql_replica.json @@ -0,0 +1,370 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "name": "pg-replica-example", + "annotations": { + "description": "PostgreSQL Replication Example", + "iconClass": "icon-database", + "tags": "database,postgresql,replication" + } + }, + "parameters": [ + { + "name": "POSTGRESQL_MASTER_USER", + "description": "The username used for master-slave replication", + "value": "master", + "required": true + }, + { + "name": "POSTGRESQL_MASTER_PASSWORD", + "description": "The password for the PostgreSQL replication user", + "generate": "expression", + "from": "[a-zA-Z0-9]{12}", + "required": true + }, + { + "name": "POSTGRESQL_USER", + "description": "The username that clients will use to connect to PostgreSQL server", + "value": "user", + "required": true + }, + { + "name": "POSTGRESQL_PASSWORD", + "description": "The password for the PostgreSQL master user", + "generate": "expression", + "from": "[a-zA-Z0-9]{12}", + "required": true + }, + { + "name": "POSTGRESQL_DATABASE", + "description": "The name of the database that will be created", + "value": "userdb", + "required": true + }, + { + "name": "POSTGRESQL_ADMIN_PASSWORD", + "description": "The password for the PostgreSQL administrator", + "generate": "expression", + "from": "[a-zA-Z0-9]{12}", + "required": false + }, + { + "name": "POSTGRESQL_MASTER_SERVICE_NAME", + "description": "The name of the PostgreSQL Service (used to DNS lookup, default: 'postgresql-master')", + "value": "postgresql-master", + "required": true + }, + { + "name": "POSTGRESQL_SLAVE_SERVICE_NAME", + "description": "The name of the PostgreSQL Service (used to DNS lookup, default: 'postgresql-slave')", + "value": "postgresql-slave", + "required": true + }, + { + "name": "VOLUME_CAPACITY", + "description": "Volume space available for data, e.g. 512Mi, 2Gi", + "value": "512Mi", + "required": true + }, + { + "name": "IMAGESTREAMTAG", + "displayName": "ImageStreamTag", + "description": "The OpenShift ImageStreamTag to use for PostgreSQL.", + "value": "postgresql:9.6" + }, + { + "name": "NAMESPACE", + "displayName": "Namespace", + "description": "The OpenShift Namespace where the ImageStream resides.", + "value": "openshift" + } + ], + "objects": [ + { + "kind": "PersistentVolumeClaim", + "apiVersion": "v1", + "metadata": { + "name": "postgresql-data-claim" + }, + "spec": { + "accessModes": [ + "ReadWriteOnce" + ], + "resources": { + "requests": { + "storage": "${VOLUME_CAPACITY}" + } + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "metadata": { + "name": "${POSTGRESQL_MASTER_SERVICE_NAME}", + "labels": { + "name": "${POSTGRESQL_MASTER_SERVICE_NAME}" + } + }, + "spec": { + "ports": [ + { + "port": 5432, + "targetPort": 5432 + } + ], + "selector": { + "name": "${POSTGRESQL_MASTER_SERVICE_NAME}" + }, + "clusterIP": "None" + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "metadata": { + "name": "${POSTGRESQL_SLAVE_SERVICE_NAME}", + "labels": { + "name": "${POSTGRESQL_SLAVE_SERVICE_NAME}" + } + }, + "spec": { + "ports": [ + { + "port": 5432, + "targetPort": 5432 + } + ], + "selector": { + "name": "${POSTGRESQL_SLAVE_SERVICE_NAME}" + }, + "clusterIP": "None" + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${POSTGRESQL_MASTER_SERVICE_NAME}" + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "postgresql-master" + ], + "from": { + "kind": "ImageStreamTag", + "name": "${IMAGESTREAMTAG}", + "namespace": "${NAMESPACE}" + }, + "lastTriggeredImage": "" + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "name": "${POSTGRESQL_MASTER_SERVICE_NAME}" + }, + "template": { + "metadata": { + "labels": { + "name": "${POSTGRESQL_MASTER_SERVICE_NAME}" + } + }, + "spec": { + "volumes": [ + { + "name": "postgresql-data", + "persistentVolumeClaim": { + "claimName": "postgresql-data-claim" + } + } + ], + "containers": [ + { + "name": "postgresql-master", + "image": " ", + "args": [ + "run-postgresql-master" + ], + "ports": [ + { + "containerPort": 5432 + } + ], + "readinessProbe": { + "timeoutSeconds": 1, + "initialDelaySeconds": 5, + "exec": { + "command": [ "/usr/libexec/check-container" ] + } + }, + "livenessProbe": { + "timeoutSeconds": 10, + "initialDelaySeconds": 120, + "exec": { + "command": [ "/usr/libexec/check-container", "--live" ] + } + }, + "env": [ + { + "name": "POSTGRESQL_MASTER_USER", + "value": "${POSTGRESQL_MASTER_USER}" + }, + { + "name": "POSTGRESQL_MASTER_PASSWORD", + "value": "${POSTGRESQL_MASTER_PASSWORD}" + }, + { + "name": "POSTGRESQL_USER", + "value": "${POSTGRESQL_USER}" + }, + { + "name": "POSTGRESQL_PASSWORD", + "value": "${POSTGRESQL_PASSWORD}" + }, + { + "name": "POSTGRESQL_DATABASE", + "value": "${POSTGRESQL_DATABASE}" + }, + { + "name": "POSTGRESQL_ADMIN_PASSWORD", + "value": "${POSTGRESQL_ADMIN_PASSWORD}" + } + ], + "volumeMounts": [ + { + "name": "postgresql-data", + "mountPath": "/var/lib/pgsql/data" + } + ] + } + ] + } + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${POSTGRESQL_SLAVE_SERVICE_NAME}" + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "postgresql-slave" + ], + "from": { + "kind": "ImageStreamTag", + "name": "${IMAGESTREAMTAG}", + "namespace": "${NAMESPACE}" + }, + "lastTriggeredImage": "" + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "name": "${POSTGRESQL_SLAVE_SERVICE_NAME}" + }, + "template": { + "metadata": { + "labels": { + "name": "${POSTGRESQL_SLAVE_SERVICE_NAME}" + } + }, + "spec": { + "volumes": [ + { + "name": "postgresql-data", + "emptyDir": {} + } + ], + "containers": [ + { + "name": "postgresql-slave", + "image": " ", + "args": [ + "run-postgresql-slave" + ], + "ports": [ + { + "containerPort": 5432 + } + ], + "readinessProbe": { + "timeoutSeconds": 1, + "initialDelaySeconds": 5, + "exec": { + "command": [ "/usr/libexec/check-container" ] + } + }, + "livenessProbe": { + "timeoutSeconds": 10, + "initialDelaySeconds": 120, + "exec": { + "command": [ "/usr/libexec/check-container", "--live" ] + } + }, + "env": [ + { + "name": "POSTGRESQL_MASTER_SERVICE_NAME", + "value": "${POSTGRESQL_MASTER_SERVICE_NAME}" + }, + { + "name": "POSTGRESQL_MASTER_USER", + "value": "${POSTGRESQL_MASTER_USER}" + }, + { + "name": "POSTGRESQL_MASTER_PASSWORD", + "value": "${POSTGRESQL_MASTER_PASSWORD}" + }, + { + "name": "POSTGRESQL_USER", + "value": "${POSTGRESQL_USER}" + }, + { + "name": "POSTGRESQL_PASSWORD", + "value": "${POSTGRESQL_PASSWORD}" + }, + { + "name": "POSTGRESQL_DATABASE", + "value": "${POSTGRESQL_DATABASE}" + } + ], + "volumeMounts": [ + { + "name": "postgresql-data", + "mountPath": "/var/lib/pgsql/data" + } + ] + } + ] + } + } + } + } + ] +} diff --git a/test/examples/s2i-dump-data/.s2i/bin/assemble b/test/examples/s2i-dump-data/.s2i/bin/assemble new file mode 100755 index 0000000..48e9e0c --- /dev/null +++ b/test/examples/s2i-dump-data/.s2i/bin/assemble @@ -0,0 +1,36 @@ +#! /usr/bin/bash -x + +# fail early +set -e + +# source the convenience tooling +source "${CONTAINER_SCRIPTS_PATH}/common.sh" + +# set $PGDATA variable +set_pgdata + +# assert uninitialized data +test ! -f "$PGDATA/postgresql.conf" + +# empty config file is needed after 'initialize_database' call +touch "$POSTGRESQL_CONFIG_FILE" + +initialize_database + +# start local PostgreSQL server (wait with '-w') +pg_ctl -w start -o "-h ''" + +# load all sql files +shopt -s nullglob +for file in /tmp/src/init/*.sql; do + psql -f "$file" +done + +pg_ctl stop + +# dump the data into $PWD (in-image storage) +tar caf data.tar.xz -C "$PGDATA" . +rm -rf "$PGDATA" + +# install pre-start hook +cp -r /tmp/src/postgresql-pre-start . diff --git a/test/examples/s2i-dump-data/init/init.sql b/test/examples/s2i-dump-data/init/init.sql new file mode 100644 index 0000000..6cda639 --- /dev/null +++ b/test/examples/s2i-dump-data/init/init.sql @@ -0,0 +1,2 @@ +CREATE TABLE test (sth TEXT); +INSERT INTO test VALUES ('hello world'); diff --git a/test/examples/s2i-dump-data/postgresql-pre-start/10_boot.sh b/test/examples/s2i-dump-data/postgresql-pre-start/10_boot.sh new file mode 100644 index 0000000..ad85bcb --- /dev/null +++ b/test/examples/s2i-dump-data/postgresql-pre-start/10_boot.sh @@ -0,0 +1,3 @@ +if test ! -f "$PGDATA/postgresql.conf"; then + tar xf "$APP_DATA"/src/data.tar.xz -C "$PGDATA" +fi diff --git a/test/pagila.sh b/test/pagila.sh new file mode 100755 index 0000000..123c6bb --- /dev/null +++ b/test/pagila.sh @@ -0,0 +1,37 @@ +#! /bin/sh + +set -e + +die() { echo "$*" >&2 ; exit 1; } + +test -z "$CID" && die "Please specify \$CID variable" +# test -d common || die "Please run me from git root directory" + +pagila_mirror=https://dl.fedoraproject.org/pub/epel/7/x86_64/Packages/p/ +pagila_base="pagila-0.10.1-3.el7.noarch.rpm" +pagila=$pagila_mirror$pagila_base +pagila_file="$PWD/postgresql-container-pagila.sql" +pagila_sha256sum=b968d9498d866bff8f47d9e50edf49feeff108d4164bff2aa167dc3eae802701 + +( + flock --timeout 180 9 + + # Already downloaded? + test ! -f "$pagila_file" || exit 0 + + set -o pipefail + curl -s "$pagila" > "$pagila_base" + for file in ./usr/share/pagila/pagila-schema.sql \ + ./usr/share/pagila/pagila-data.sql \ + ./usr/share/pagila/pagila-insert-data.sql ; \ + do + rpm2cpio "$pagila_base" | cpio --extract --to-stdout "$file" + done >"$pagila_file" +) 9<"$0" + +case $(sha256sum "$pagila_file") in +"$pagila_sha256sum"*) ;; +*) false ;; +esac + +docker exec -i "$CID" container-entrypoint psql -tA < "$pagila_file" &>/dev/null diff --git a/test/pg-test-lib.sh b/test/pg-test-lib.sh new file mode 100644 index 0000000..71e8d5c --- /dev/null +++ b/test/pg-test-lib.sh @@ -0,0 +1,141 @@ +DEBUG=false + +info () { echo >&2 " * $*" ; } +debug () { ! ${DEBUG} || echo >&2 " ~ $*" ; } +error () { echo >&2 "ERROR: $*" ; false ; } + +get_image_id () +{ + local old_IFS=$IFS + local result + + # split "$1" into "$1 $2 .." on colons + IFS=: + set -- $1 + IFS=$old_IFS + case $2 in + local) + # Default to $IMAGE_NAME if it is set since .image-id might not exist + echo "${IMAGE_NAME-$(cat "$1"/.image-id)}" + ;; + remote) + local version=${1//\./} + case $OS in + rhel7) + ns=rhscl + if test "$version" -eq 92; then + ns=openshift3 + fi + image=registry.redhat.io/$ns/postgresql-${version}-rhel7 + ;; + centos7) + ns=centos + if test "$version" -eq 92; then + ns=openshift + fi + local image=docker.io/$ns/postgresql-${1//\./}-centos7 + ;; + rhel8) + ns=rhel8 + local image=registry.redhat.io/$ns/postgresql-${version} + ;; + esac + docker pull "$image" >/dev/null + echo "$image" + ;; + esac +} + +data_pagila_create () +{ + debug "initializing pagila database" + CID="$CID" ./test/pagila.sh +} + +data_pagila_check () +{ + debug "doing pagila check" + local exp_output='28 +16 +2' + local output=$(docker exec -i "$CID" container-entrypoint psql -tA </dev/null </dev/null || :") + case $output in + 1*) return ;; + "") ;; + *) echo "$output" ; false ;; + esac + sleep 1 + counter=$(( counter + 1 )) + done +} + + +# version2number VERSION [DEPTH] [WIDTH] +# -------------------------------------- +version2number () +{ + local old_IFS=$IFS + local to_print= depth=${2-3} width=${3-2} sum=0 one_part + IFS='.' + set -- $1 + while test $depth -ge 1; do + depth=$(( depth - 1 )) + part=${1-0} ; shift || : + printf "%0${width}d" "$part" + done + IFS=$old_IFS +} + +# container_ip CONTAINER_ID +# ------------------------- +container_ip() +{ + docker inspect --format='{{.NetworkSettings.IPAddress}}' "$1" +} + +# vi: set ft=sh diff --git a/test/run b/test/run new file mode 120000 index 0000000..653222b --- /dev/null +++ b/test/run @@ -0,0 +1 @@ +run_test \ No newline at end of file diff --git a/test/run-openshift b/test/run-openshift new file mode 120000 index 0000000..d84575f --- /dev/null +++ b/test/run-openshift @@ -0,0 +1 @@ +run-openshift-local-cluster \ No newline at end of file diff --git a/test/run-openshift-local-cluster b/test/run-openshift-local-cluster new file mode 100755 index 0000000..79c592f --- /dev/null +++ b/test/run-openshift-local-cluster @@ -0,0 +1,454 @@ +#!/bin/bash +# +# Test the Postgresql image in OpenShift. +# +# IMAGE_NAME specifies a name of the candidate image used for testing. +# The image has to be available before this script is executed. +# + +THISDIR=$(dirname ${BASH_SOURCE[0]}) +TEMPLATES="$THISDIR/examples" +REMOTE_TEMPLATES="https://raw.githubusercontent.com/openshift/origin/master/examples/db-templates" + +source "$THISDIR"/pg-test-lib.sh +source "$THISDIR"/test-lib-openshift.sh +source "$THISDIR"/test-lib-postgresql.sh + +set -exo nounset + +test -n "${IMAGE_NAME-}" || false 'make sure $IMAGE_NAME is defined' +test -n "${VERSION-}" || false 'make sure $VERSION is defined' +test -n "${OS-}" || false 'make sure $OS is defined' + +# Populate template variables if not set already +if [ -z "${EPHEMERAL_TEMPLATES:-}" ]; then + EPHEMERAL_TEMPLATES=" +$REMOTE_TEMPLATES/postgresql-ephemeral-template.json +$TEMPLATES/postgresql-ephemeral-template.json" +fi + +if [ -z "${PERSISTENT_TEMPLATES:-}" ]; then + PERSISTENT_TEMPLATES=" +$REMOTE_TEMPLATES/postgresql-persistent-template.json +$TEMPLATES/postgresql-persistent-template.json" +fi + +function assert_cmd_fails() { + if eval "$@" &>/dev/null; then + echo " FAIL" + return 1 + fi + echo " PASS" + return 0 +} + +function insert_postgresql_data() { + local image_name=$1 ; shift + local user=$1 ; shift + local pass=$1 ; shift + local database=$1 ; shift + local pod_ip=$1; shift + + : "Inserting data into the database" + local cmd="PGPASSWORD=$pass psql -c \"CREATE TABLE testing (a integer); INSERT INTO testing VALUES (42);\"" + local cmd_args="-h $pod_ip -U $user -d $database" + docker run --rm "$image_name" bash -c "$cmd $cmd_args" +} + +function check_postgresql_data() { + local image_name=$1 ; shift + local user=$1 ; shift + local pass=$1 ; shift + local database=$1 ; shift + local pod_ip=$1; shift + local timeout=${1:-60} + SECONDS=0 + + : "Checking whether the data can be accessed" + local cmd="PGPASSWORD=$pass psql -c \"select * from testing;\"" + local cmd_args="-h $pod_ip -U $user -d $database" + while true ; do + result=$(docker run --rm "$image_name" bash -c "$cmd -At $cmd_args") + if [ "$result" = "42" ]; then + echo " PASS" + return 0 + fi + echo -n "." + [ $SECONDS -gt $timeout ] && break + sleep 3 + done + echo " FAIL" + return 1 +} + + +function check_postgresql_os_service_connection() { + local util_image_name=$1 ; shift + local service_name=$1 ; shift + local user=$1 ; shift + local pass=$1 ; shift + local database=$1 ; shift + local timeout=${1:-60} ; shift || : + local pod_ip=$(ct_os_get_service_ip ${service_name}) + + : " Service ${service_name} check ..." + + local cmd="PGPASSWORD=${pass} pg_isready -t 15 -h ${pod_ip} -U ${user} -d ${database}" + local expected_value='accepting connections' + local output + local ret + SECONDS=0 + + echo -n "Waiting for ${service_name} service becoming ready ..." + while true ; do + output=$(docker run --rm ${util_image_name} bash -c "${cmd}" || :) + echo "${output}" | grep -qe "${expected_value}" && ret=0 || ret=1 + if [ ${ret} -eq 0 ] ; then + echo " PASS" + return 0 + fi + echo -n "." + [ ${SECONDS} -gt ${timeout} ] && break + sleep 3 + done + echo " FAIL" + return 1 +} + +function test_postgresql_pure_image() { + local image_name=$1 + local image_name_no_namespace=${image_name##*/} + local service_name=${image_name_no_namespace} + + ct_os_new_project + ct_os_upload_image "${image_name}" + # Create a specific imagestream tag for the image so that oc cannot use anything else + ct_os_upload_image "${image_name}" "$image_name_no_namespace:testing" + + ct_os_deploy_pure_image "$image_name_no_namespace:testing" \ + --name "${service_name}" \ + --env POSTGRESQL_ADMIN_PASSWORD=test + + ct_os_wait_pod_ready "${service_name}" 60 + check_postgresql_os_service_connection "${image_name}" "${service_name}" postgres test postgres + + ct_os_delete_project +} + +function test_postgresql_template() { + local image_name=$1; shift + local template=$1 + local image_name_no_namespace=${image_name##*/} + local service_name=${image_name_no_namespace} + + ct_os_new_project + ct_os_upload_image "${image_name}" "postgresql:$VERSION" + + ct_os_deploy_template_image "$template" \ + NAMESPACE="$(oc project -q)" \ + POSTGRESQL_VERSION="$VERSION" \ + DATABASE_SERVICE_NAME="${service_name}" \ + POSTGRESQL_USER=testu \ + POSTGRESQL_PASSWORD=testp \ + POSTGRESQL_DATABASE=testdb + + ct_os_wait_pod_ready "${service_name}" 60 + check_postgresql_os_service_connection "${image_name}" "${service_name}" testu testp testdb + + ct_os_delete_project +} + +function test_postgresql_update() { + local image_name=$1; shift + local template=$1 + local image_name_no_registry=${image_name#*/} + local service_name=${image_name_no_registry#*/} + local user="testu" pass="testp" db="testdb" + local registry="" old_image="" pod_ip="" + local version released=: + + old_image=$(get_image_id "$VERSION:remote") + + for version in $NOT_RELEASED_VERSIONS; do + case $image_name in + *$version*) + released=false + break + ;; + esac + done + + if docker pull "$old_image" 2>/dev/null; then + # Check if we do not have a stale unreleased versions list + # Fail only on rhel, on centos the image is likely already released + $released || [ "$OS" = "centos7" ] + elif $released; then + false "image '$old_image' should already be available" + else + return # not yet released image, skip + fi + + + ct_os_new_project + ct_os_upload_image "$old_image" "postgresql:$VERSION" + ct_os_deploy_template_image "$template" \ + NAMESPACE="$(oc project -q)" \ + POSTGRESQL_VERSION="$VERSION" \ + DATABASE_SERVICE_NAME="$service_name" \ + POSTGRESQL_USER="$user" \ + POSTGRESQL_PASSWORD="$pass" \ + POSTGRESQL_DATABASE="$db" + + ct_os_wait_pod_ready "${service_name}" 60 + check_postgresql_os_service_connection "$image_name" "$service_name" "$user" "$pass" "$db" + + pod_ip=$(ct_os_get_service_ip "$service_name") + insert_postgresql_data "$image_name" "$user" "$pass" "$db" "$pod_ip" + + ct_os_upload_image "$image_name" "postgresql:$VERSION" + : "Waiting for a few seconds while the pods get restarted" + sleep 5 + + ct_os_wait_pod_ready "$service_name" 60 + check_postgresql_os_service_connection "$image_name" "$service_name" "$user" "$pass" "$db" + + check_postgresql_data "$image_name" "$user" "$pass" "$db" "$pod_ip" + ct_os_delete_project +} + +function test_postgresql_replication() { + local image_name=$1 + local image_name_no_namespace=${image_name##*/} + local master_service_name=${image_name_no_namespace}-master + local slave_service_name=${image_name_no_namespace}-slave + local istag="postgresql:$VERSION" + local user="testu" pass="testp" db="testdb" + local master_name="" master_ip="" slave_name="" slave_ip="" + + ct_os_new_project + ct_os_upload_image "${image_name}" "$istag" + + ct_os_deploy_template_image "$TEMPLATES/replica/postgresql_replica.json" \ + NAMESPACE="$(oc project -q)" \ + IMAGESTREAMTAG="$istag" \ + POSTGRESQL_MASTER_SERVICE_NAME="$master_service_name" \ + POSTGRESQL_SLAVE_SERVICE_NAME="$slave_service_name" \ + POSTGRESQL_USER=testu \ + POSTGRESQL_PASSWORD=testp \ + POSTGRESQL_DATABASE=testdb + + ct_os_wait_pod_ready "$master_service_name" 60 + ct_os_wait_pod_ready "$slave_service_name" 60 + + # Force unused rc removal as we do not need rollbacks during testing + oc patch "dc/$master_service_name" -p '{"spec":{"revisionHistoryLimit":0}}' + oc patch "dc/$slave_service_name" -p '{"spec":{"revisionHistoryLimit":0}}' + + master_name=$(ct_os_get_pod_name "$master_service_name") + slave_name=$(ct_os_get_pod_name "$slave_service_name") + master_ip=$(ct_os_get_pod_ip "$master_name") + slave_ip=$(ct_os_get_pod_ip "$slave_name") + insert_postgresql_data "$image_name" "$user" "$pass" "$db" "$master_ip" + check_postgresql_data "$image_name" "$user" "$pass" "$db" "$slave_ip" + + : "Changing POSTGRESQL_PASSWORD for master and slave" + pass=redhat + oc set env "dc/$master_service_name" -e POSTGRESQL_PASSWORD="$pass" + oc set env "dc/$slave_service_name" -e POSTGRESQL_PASSWORD="$pass" + ct_os_wait_pod_ready "$master_service_name-2" 60 + ct_os_wait_pod_ready "$slave_service_name-2" 60 + # We need to get new pod names and IPs + master_name=$(ct_os_get_pod_name "$master_service_name") + slave_name=$(ct_os_get_pod_name "$slave_service_name") + master_ip=$(ct_os_get_pod_ip "$master_name") + slave_ip=$(ct_os_get_pod_ip "$slave_name") + check_postgresql_data "$image_name" "$user" "$pass" "$db" "$master_ip" + check_postgresql_data "$image_name" "$user" "$pass" "$db" "$slave_ip" + + : "Redeploying slave node" + oc rollout latest "$slave_service_name" + ct_os_wait_pod_ready "$slave_service_name-3" 60 + slave_name=$(ct_os_get_pod_name "$slave_service_name") + slave_ip=$(ct_os_get_pod_ip "$slave_name") + check_postgresql_data "$image_name" "$user" "$pass" "$db" "$slave_ip" + + : "Scaling slaves to 2" + oc scale --replicas 2 "dc/$slave_service_name" + ct_os_wait_rc_ready "$slave_service_name" 60 + slave_name=$(ct_os_get_pod_name "$slave_service_name") + for slave in $slave_name; do + ct_os_wait_pod_ready "$slave" 60 + slave_ip=$(ct_os_get_pod_ip "$slave") + check_postgresql_data "$image_name" "$user" "$pass" "$db" "$slave_ip" + done + + ct_os_delete_project +} + +function test_postgresql_persistent_redeploy() { + local image_name=$1; shift + local template=$1 + local image_name_no_namespace=${image_name##*/} + local service_name=$image_name_no_namespace + local user="testu" pass="testp" db="testdb" + local registry="" old_image="" pod_ip="" + + ct_os_new_project + ct_os_upload_image "$image_name" "postgresql:$VERSION" + + ct_os_deploy_template_image "$template" \ + NAMESPACE="$(oc project -q)" \ + POSTGRESQL_VERSION="$VERSION" \ + DATABASE_SERVICE_NAME="$service_name" \ + POSTGRESQL_USER="$user" \ + POSTGRESQL_PASSWORD="$pass" \ + POSTGRESQL_DATABASE="$db" + + ct_os_wait_pod_ready "$service_name" 60 + check_postgresql_os_service_connection "$image_name" "$service_name" testu testp testdb + + pod_ip=$(ct_os_get_service_ip "$service_name") + insert_postgresql_data "$image_name" "$user" "$pass" "$db" "$pod_ip" + + : "Redeploying pod" + oc rollout latest "$service_name" + : "Waiting for a few seconds while the pod gets restarted" + sleep 5 + + ct_os_wait_pod_ready "$service_name" 60 + check_postgresql_os_service_connection "$image_name" "$service_name" "$user" "$pass" "$db" + + : "This should succeed" + check_postgresql_data "$image_name" "$user" "$pass" "$db" "$pod_ip" 0 + ct_os_delete_project +} + +function test_postgresql_ephemeral_redeploy() { + local image_name=$1; shift + local template=$1 + local image_name_no_namespace=${image_name##*/} + local service_name=$image_name_no_namespace + local user="testu" pass="testp" db="testdb" + local registry="" old_image="" pod_ip="" + + ct_os_new_project + ct_os_upload_image "$image_name" "postgresql:$VERSION" + + ct_os_deploy_template_image "$template" \ + NAMESPACE="$(oc project -q)" \ + POSTGRESQL_VERSION="$VERSION" \ + DATABASE_SERVICE_NAME="$service_name" \ + POSTGRESQL_USER="$user" \ + POSTGRESQL_PASSWORD="$pass" \ + POSTGRESQL_DATABASE="$db" + + ct_os_wait_pod_ready "$service_name" 60 + check_postgresql_os_service_connection "$image_name" "$service_name" testu testp testdb + + pod_ip=$(ct_os_get_service_ip "$service_name") + insert_postgresql_data "$image_name" "$user" "$pass" "$db" "$pod_ip" + + : "Redeploying pod" + oc rollout latest "$service_name" + : "Waiting for a few seconds while the pod gets restarted" + sleep 5 + + ct_os_wait_pod_ready "$service_name" 60 + check_postgresql_os_service_connection "$image_name" "$service_name" "$user" "$pass" "$db" + + : "This should fail" + assert_cmd_fails check_postgresql_data "$image_name" "$user" "$pass" "$db" "$pod_ip" 0 + ct_os_delete_project +} + +function test_postgresql_configmap_start() { + local image_name=$1; shift + local template="$TEMPLATES/postgresql-ephemeral-template.json" + local image_name_no_namespace=${image_name##*/} + local service_name=$image_name_no_namespace + local user="testu" pass="testp" db="testdb" + local registry="" old_image="" pod_ip="" tmpdir="" + local test_string="" + + ct_os_new_project + ct_os_upload_image "$image_name" "postgresql:$VERSION" + + ct_os_deploy_template_image "$template" \ + NAMESPACE="$(oc project -q)" \ + POSTGRESQL_VERSION="$VERSION" \ + DATABASE_SERVICE_NAME="$service_name" \ + POSTGRESQL_USER="$user" \ + POSTGRESQL_PASSWORD="$pass" \ + POSTGRESQL_DATABASE="$db" + + ct_os_wait_pod_ready "$service_name" 60 + check_postgresql_os_service_connection "$image_name" "$service_name" testu testp testdb + + # Create a simple configMap with a start.sh script + tmpdir=$(mktemp -d) + test_string="Start is working" + echo "echo $test_string" >> "$tmpdir/start.sh" + oc create configmap postgresql-start --from-file="$tmpdir/start.sh" + oc set volume "dc/$service_name" --add -t configmap --configmap-name=postgresql-start -m /opt/app-root/src/postgresql-start --name postgresql-start + + # Wait for redeploy + ct_os_wait_pod_ready "$service_name-2" 60 + check_postgresql_os_service_connection "$image_name" "$service_name" testu testp testdb + + # Check logs for the test string + oc logs "$(ct_os_get_pod_name "$service_name")" | grep "$test_string" + + ct_os_delete_project +} + +function run_ephemeral_tests() { + local image_name=$1 + for template in $EPHEMERAL_TEMPLATES; do + test_postgresql_ephemeral_redeploy "$image_name" "$template" + test_postgresql_template "$image_name" "$template" + done +} + +function run_persistent_tests() { + local image_name=$1 + for template in $PERSISTENT_TEMPLATES; do + test_postgresql_persistent_redeploy "$image_name" "$template" + test_postgresql_update "$image_name" "$template" + done +} + +ct_os_cluster_up +# Print oc logs on failure +ct_os_enable_print_logs + +test_postgresql_pure_image "$IMAGE_NAME" +test_postgresql_replication "$IMAGE_NAME" +run_ephemeral_tests "$IMAGE_NAME" +run_persistent_tests "$IMAGE_NAME" +test_postgresql_configmap_start "$IMAGE_NAME" + +# test with the just built image and an integrated template +test_postgresql_integration "${IMAGE_NAME}" "${VERSION}" postgresql + +# test with a released image and an integrated template +# ignore possible failure of this test for centos images +fail_not_released=true +if [ "${OS}" == "rhel7" ] ; then + PUBLIC_IMAGE_NAME=${PUBLIC_IMAGE_NAME:-${REGISTRY:-registry.redhat.io/}rhscl/${BASE_IMAGE_NAME}-${VERSION//./}-rhel7} +else + PUBLIC_IMAGE_NAME=${PUBLIC_IMAGE_NAME:-${REGISTRY:-}centos/${BASE_IMAGE_NAME}-${VERSION//./}-centos7} + fail_not_released=false +fi + +export CT_SKIP_UPLOAD_IMAGE=true +# Try pulling the image first to see if it is accessible +if docker pull "${PUBLIC_IMAGE_NAME}"; then + test_postgresql_integration postgresql "${VERSION}" "${PUBLIC_IMAGE_NAME}" +else + echo "Warning: ${PUBLIC_IMAGE_NAME} could not be downloaded via 'docker'" + ! $fail_not_released || false "ERROR: Failed to pull image" +fi + +OS_TESTSUITE_RESULT=0 + +ct_os_cluster_down diff --git a/test/run-openshift-remote-cluster b/test/run-openshift-remote-cluster new file mode 100755 index 0000000..447d4bf --- /dev/null +++ b/test/run-openshift-remote-cluster @@ -0,0 +1,30 @@ +#!/bin/bash +# +# Test the PostgreSQL image in OpenShift (remote cluster) +# +# IMAGE_NAME specifies a name of the candidate image used for testing. +# The image has to be available before this script is executed. +# VERSION specifies the major version of the PostgreSQL in format of X.Y +# OS specifies RHEL version (e.g. OS=rhel7) +# + +THISDIR=$(dirname ${BASH_SOURCE[0]}) + +source ${THISDIR}/test-lib-postgresql.sh + +set -eo nounset + +trap ct_os_cleanup EXIT SIGINT + +ct_os_check_compulsory_vars + +oc status || false "It looks like oc is not properly logged in." + +export CT_SKIP_NEW_PROJECT=true +export CT_SKIP_UPLOAD_IMAGE=true +export CT_NAMESPACE=openshift + +test_postgresql_integration postgresql ${VERSION} "${IMAGE_NAME}" + +OS_TESTSUITE_RESULT=0 + diff --git a/test/run_migration_test b/test/run_migration_test new file mode 100755 index 0000000..654e56e --- /dev/null +++ b/test/run_migration_test @@ -0,0 +1,56 @@ +#! /bin/bash + +set -e +. test/pg-test-lib.sh +ADMIN_PASSWORD=redhat + +test $# -eq 2 || error "two args expected: $0 FROM TO" + +cleanup() +{ + set +e + set -- $container_from $container_to + if test $# -gt 0; then + docker stop "$@" >/dev/null + docker rm -f "$@" + fi +} +trap cleanup EXIT + +from=$1 +to=$2 +image_from=$(get_image_id "$from") +image_to=$(get_image_id "$to") + +assert_migration_succeeds () +{ + info "starting PostgreSQL server v$from" + container_from=$(docker run -e POSTGRESQL_ADMIN_PASSWORD="$ADMIN_PASSWORD" -d "$image_from") + wait_for_postgres "$container_from" + + eval "CID=\$container_from data_${1}_create" + eval "CID=\$container_from data_${1}_check" + + ip=$(container_ip "$container_from") + + info "starting new PostgreSQL server v$to with migration options" + container_to=$(docker run \ + -e POSTGRESQL_MIGRATION_REMOTE_HOST="$ip" \ + -e POSTGRESQL_MIGRATION_ADMIN_PASSWORD="$ADMIN_PASSWORD" \ + -d "$image_to") + + # Prolong a waiting time here a bit since both dump and restore is done in + # uncertain environment (usually both is done on the same hardware). + wait_for_postgres "$container_to" 100 + + info "check that the migration passed" + eval "CID=\$container_to data_${1}_check" + + docker stop "$container_from" + docker rm -f "$container_from" + docker stop "$container_to" + docker rm -f "$container_to" + container_from= container_to= +} + +assert_migration_succeeds pagila diff --git a/test/run_test b/test/run_test new file mode 100755 index 0000000..2c68730 --- /dev/null +++ b/test/run_test @@ -0,0 +1,912 @@ +#!/bin/bash +# +# Test the PostgreSQL image. +# +# IMAGE_NAME specifies the name of the candidate image used for testing. +# The image has to be available before this script is executed. +# + +set -exo nounset +shopt -s nullglob + +# library from container-common-scripts +. test/test-lib.sh + +# local library +. test/pg-test-lib.sh + +TEST_LIST="\ +run_container_creation_tests +run_general_tests +run_change_password_test +run_replication_test +run_master_restart_test +run_doc_test +run_s2i_test +run_test_cfg_hook +run_s2i_bake_data_test +run_s2i_enable_ssl_test +run_upgrade_test +run_migration_test +run_pgaudit_test +" + +test $# -eq 1 -a "${1-}" == --list && echo "$TEST_LIST" && exit 0 +test -n "${IMAGE_NAME-}" || false 'make sure $IMAGE_NAME is defined' +test -n "${VERSION-}" || false 'make sure $VERSION is defined' +test -n "${OS-}" || false 'make sure $OS is defined' + +CIDFILE_DIR=$(mktemp --suffix=postgresql_test_cidfiles -d) + +volumes_to_clean= +images_to_clean=() +files_to_clean= +test_dir="$(readlink -f "$(dirname "$0")")" +test_short_summary='' +TESTSUITE_RESULT=1 + +_cleanup_commands_space= +_cleanup_commands= + +add_cleanup_command () +{ + local cmd= space= + for arg; do + cmd+="$space$(printf "%q" "$arg")" + space=' ' + done + _cleanup_commands+="$_cleanup_commands_space$cmd" + _cleanup_commands_space=' +' +} +function cleanup() { + for cidfile in $CIDFILE_DIR/* ; do + CONTAINER=$(cat $cidfile) + + echo "Stopping and removing container $CONTAINER..." + docker stop $CONTAINER + exit_status=$(docker inspect -f '{{.State.ExitCode}}' $CONTAINER) + if [ "$exit_status" != "0" ]; then + echo "Dumping logs for $CONTAINER" + docker logs $CONTAINER + fi + docker rm $CONTAINER + rm $cidfile + echo "Done." + done + rmdir $CIDFILE_DIR + + ct_path_foreach "$volumes_to_clean" cleanup_volume_dir + + if test -n "${images_to_clean-}"; then + # Workaround for RHEL 7 bash bug: + # https://bugzilla.redhat.com/show_bug.cgi?id=1636393 + for image in "${images_to_clean[@]}"; do + docker rmi -f "$image" + done + fi + + ct_path_foreach "$files_to_clean" rm + + echo "$_cleanup_commands" | while read -r line; do + eval "$line" + done + + echo "$test_short_summary" + + if [ $TESTSUITE_RESULT -eq 0 ] ; then + echo "Tests for ${IMAGE_NAME} succeeded." + else + echo "Tests for ${IMAGE_NAME} failed." + fi +} +trap cleanup EXIT + +cleanup_volume_dir () +{ + test ! -d "$1" && : "WARN: cleaned $1 for some reason" && return 0 + # When we run this test script as non-root (we should?), the PostgreSQL server + # within container is still run under 'postgres' user. It means that, taking + # into account 0077 umask of PostgreSQL server, we are unable to remove files + # created by server. That's why we need to let docker escalate the privileges + # again. + local datadir=/var/lib/pgsql/data + docker run -v "$1:$datadir:Z" --rm "$IMAGE_NAME" /bin/sh -c "/bin/rm -rf $datadir/userdata" + rmdir "$1" +} + +function get_cid() { + local id="$1" ; shift || return 1 + echo $(cat "$CIDFILE_DIR/$id") +} + +function get_container_ip() { + local id="$1" ; shift + docker inspect --format='{{.NetworkSettings.IPAddress}}' $(get_cid "$id") +} + +function get_ip_from_cid() { + local cid="$1"; shift + docker inspect --format='{{.NetworkSettings.IPAddress}}' $cid +} + +function postgresql_cmd() { + docker run --rm -e PGPASSWORD="$PASS" "$IMAGE_NAME" psql "postgresql://$PGUSER@$CONTAINER_IP:5432/${DB-db}" "$@" +} + +function test_connection() { + local name=$1 ; shift + ip=$(get_container_ip $name) + echo " Testing PostgreSQL connection to $ip..." + local max_attempts=20 + local sleep_time=2 + for i in $(seq $max_attempts); do + echo " Trying to connect..." + set +e + # Don't let the code come here if neither user nor admin is able to + # connect. + if [ -v PGUSER ] && [ -v PASS ]; then + CONTAINER_IP=$ip postgresql_cmd <<< "SELECT 1;" + else + PGUSER=postgres PASS=$ADMIN_PASS CONTAINER_IP=$ip DB=postgres postgresql_cmd <<< "SELECT 1;" + fi + status=$? + set -e + if [ $status -eq 0 ]; then + echo " Success!" + return 0 + fi + sleep $sleep_time + done + return 1 +} + +function test_postgresql() { + echo " Testing PostgreSQL" + postgresql_cmd <<< "CREATE EXTENSION 'uuid-ossp';" # to test contrib package + postgresql_cmd <<< "CREATE TABLE tbl (col1 VARCHAR(20), col2 VARCHAR(20));" + postgresql_cmd <<< "INSERT INTO tbl VALUES ('foo1', 'bar1');" + postgresql_cmd <<< "INSERT INTO tbl VALUES ('foo2', 'bar2');" + postgresql_cmd <<< "INSERT INTO tbl VALUES ('foo3', 'bar3');" + postgresql_cmd <<< "SELECT * FROM tbl;" + #postgresql_cmd <<< "DROP TABLE tbl;" + echo " Success!" +} + +function create_container() { + local name=$1 ; shift + local cargs=${DOCKER_ARGS:-} + # TODO: fix all create_container() invocations so that we don't need this, + # e.g. multiline DOCKER_ARGS var should end by trailing backslashes + cargs=$(echo "$cargs" | tr '\n' ' ') + cidfile="$CIDFILE_DIR/$name" + # create container with a cidfile in a directory for cleanup + eval "docker run $cargs --cidfile \$cidfile -d \$IMAGE_NAME \"\$@\"" + echo "Created container $(cat $cidfile)" +} + + +create_volume_dir () +{ + volume_dir=`mktemp -d --tmpdir pg-testdata.XXXXX` + setfacl -m u:26:-wx "$volume_dir" + ct_path_append volumes_to_clean "$volume_dir" +} + + +create_temp_file () +{ + temp_file=`mktemp --tmpdir pg-testfile.XXXXX` + setfacl -m u:26:rw- "$temp_file" + ct_path_append files_to_clean "$temp_file" +} + + +function assert_login_access() { + local PGUSER=$1 ; shift + local PASS=$1 ; shift + local success=$1 ; shift + + echo "testing login as $PGUSER:$PASS; should_success=$success" + + if postgresql_cmd <<<'SELECT 1;' ; then + if $success ; then + echo " $PGUSER($PASS) access granted as expected" + return + fi + else + if ! $success ; then + echo " $PGUSER($PASS) access denied as expected" + return + fi + fi + echo " $PGUSER($PASS) login assertion failed" + exit 1 +} + +function assert_local_access() { + local id="$1" ; shift + docker exec -i $(get_cid "$id") bash -c psql <<< "SELECT 1;" +} + + +# Make sure the invocation of docker run fails. +function assert_container_creation_fails() { + + # Time the docker run command. It should fail. If it doesn't fail, + # postgresql will keep running so we kill it with SIGKILL to make sure + # timeout returns a non-zero value. + set +e + timeout -s 9 --preserve-status 60s docker run --rm "$@" $IMAGE_NAME + ret=$? + set -e + + # Timeout will exit with a high number. + if [ $ret -gt 30 ]; then + return 1 + fi +} + + +# assert_container_creation_succeeds NAME [ARGS] +# ---------------------------------------------- +# Chcek that 'docker run' with IMAGE_NAME succeeds with docker arguments +# specified as ARGS. +assert_container_creation_succeeds () +{ + local check_env=false + local name=pg-success-"$(ct_random_string)" + local PGUSER='' PGPASS='' DB='' ADMIN_PASS= + local docker_args= + + for arg; do + docker_args+=" $(printf "%q" "$arg")" + if $check_env; then + local env=${arg//=*/} + local val=${arg//$env=/} + case $env in + POSTGRESQL_ADMIN_PASSWORD) ADMIN_PASS=$val ;; + POSTGRESQL_USER) PGUSER=$val ;; + POSTGRESQL_PASSWORD) PGPASS=$val ;; + POSTGRESQL_DATABASE) DB=$val ;; + esac + check_env=false + elif test "$arg" = -e; then + check_env=: + fi + done + + DOCKER_ARGS=$docker_args create_container "$name" + + if test -n "$PGUSER" && test -n "$PGPASS"; then + PGUSER=$PGUSER PASS=$PGPASS DB=$DB test_connection "$name" + fi + + if test -n "$ADMIN_PASS"; then + PGUSER=postgres PASS=$ADMIN_PASS DB=$DB test_connection "$name" + fi + + docker stop "$(get_cid "$name")" +} + + +function try_image_invalid_combinations() { + assert_container_creation_fails -e POSTGRESQL_USER=user -e POSTGRESQL_PASSWORD=pass "$@" + assert_container_creation_fails -e POSTGRESQL_USER=user -e POSTGRESQL_DATABASE=db "$@" + assert_container_creation_fails -e POSTGRESQL_PASSWORD=pass -e POSTGRESQL_DATABASE=db "$@" +} + +function run_container_creation_tests() { + echo " Testing image entrypoint usage" + try_image_invalid_combinations + try_image_invalid_combinations -e POSTGRESQL_ADMIN_PASSWORD=admin_pass + + VERY_LONG_IDENTIFIER="very_long_identifier_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + assert_container_creation_fails -e POSTGRESQL_USER= -e POSTGRESQL_PASSWORD=pass -e POSTGRESQL_DATABASE=db -e POSTGRESQL_ADMIN_PASSWORD=admin_pass + assert_container_creation_fails -e POSTGRESQL_USER=$VERY_LONG_IDENTIFIER -e POSTGRESQL_PASSWORD=pass -e POSTGRESQL_DATABASE=db -e POSTGRESQL_ADMIN_PASSWORD=admin_pass + assert_container_creation_succeeds -e POSTGRESQL_USER=user -e POSTGRESQL_PASSWORD="\"" -e POSTGRESQL_DATABASE=db -e POSTGRESQL_ADMIN_PASSWORD=admin_pass + assert_container_creation_succeeds -e POSTGRESQL_USER=user -e POSTGRESQL_PASSWORD=pass -e POSTGRESQL_DATABASE=9invalid -e POSTGRESQL_ADMIN_PASSWORD=admin_pass + assert_container_creation_fails -e POSTGRESQL_USER=user -e POSTGRESQL_PASSWORD=pass -e POSTGRESQL_DATABASE=$VERY_LONG_IDENTIFIER -e POSTGRESQL_ADMIN_PASSWORD=admin_pass + assert_container_creation_succeeds -e POSTGRESQL_USER=user -e POSTGRESQL_PASSWORD=pass -e POSTGRESQL_DATABASE=db -e POSTGRESQL_ADMIN_PASSWORD="\"" + echo " Success!" + + assert_container_creation_succeeds -e POSTGRESQL_ADMIN_PASSWORD="the @password" + assert_container_creation_succeeds -e POSTGRESQL_PASSWORD="the pass" -e POSTGRESQL_USER="the user" -e POSTGRESQL_DATABASE="the db" +} + +function test_config_option() { + local name=$1 ; shift + local setting=$1 ; shift + local value=$1 ; shift + + docker exec $(get_cid ${name}) grep -q "${setting} = ${value}" /var/lib/pgsql/openshift-custom-postgresql.conf +} + + +# wait_ready +# ---------- +# Wait until the PG container becomes ready +wait_ready () +{ + while ! docker exec "$(get_cid "$1")" /usr/libexec/check-container ; do + sleep 1 + done +} + + +# assert_runtime_option option value +# ---------------------------------- +assert_runtime_option () +{ + local name=$1 option=$2 value=$3 + wait_ready "$name" + set -- $(docker exec "$(get_cid "$name")" bash -c "psql -tA -c 'SHOW $option;'") + test "$value" = "$1" +} + + +function run_configuration_tests() { + local name=$1 ; shift + echo " Testing image configuration settings" + test_config_option ${name} max_connections ${POSTGRESQL_MAX_CONNECTIONS} + test_config_option ${name} max_prepared_transactions ${POSTGRESQL_MAX_PREPARED_TRANSACTIONS} + test_config_option ${name} shared_buffers ${POSTGRESQL_SHARED_BUFFERS} + echo " Success!" +} + +test_scl_usage() { + local name="$1" + local run_cmd="$2" + local expected="$3" + + echo " Testing the image SCL enable" + out=$(docker run --rm ${IMAGE_NAME} /bin/bash -c "${run_cmd}") + if ! echo "${out}" | grep -q "${expected}"; then + echo "ERROR[/bin/bash -c "${run_cmd}"] Expected '${expected}', got '${out}'" + return 1 + fi + out=$(docker exec $(get_cid $name) /bin/bash -c "${run_cmd}" 2>&1) + if ! echo "${out}" | grep -q "${expected}"; then + echo "ERROR[exec /bin/bash -c "${run_cmd}"] Expected '${expected}', got '${out}'" + return 1 + fi + out=$(docker exec $(get_cid $name) /bin/sh -ic "${run_cmd}" 2>&1) + if ! echo "${out}" | grep -q "${expected}"; then + echo "ERROR[exec /bin/sh -ic "${run_cmd}"] Expected '${expected}', got '${out}'" + return 1 + fi +} + + +function run_tests() { + echo " Testing general usage (run_tests) with '$1' as argument" + local name=$1 ; shift + + user_login=false + admin_login=false + envs= + # NOTE: We work wrongly with variables so please don't try to pass spaces + # within PGUSER/PASS/ADMIN_PASS variables. + [ -v PGUSER ] && envs+=" -e POSTGRESQL_USER=$PGUSER" + [ -v PASS ] && envs+=" -e POSTGRESQL_PASSWORD=$PASS" + if [ -v PGUSER ] && [ -v PASS ]; then + envs+=" -e POSTGRESQL_DATABASE=db" + user_login=: + fi + + if [ -v ADMIN_PASS ]; then + envs="$envs -e POSTGRESQL_ADMIN_PASSWORD=$ADMIN_PASS" + admin_login=: + fi + if [ -v POSTGRESQL_MAX_CONNECTIONS ]; then + envs="$envs -e POSTGRESQL_MAX_CONNECTIONS=$POSTGRESQL_MAX_CONNECTIONS" + fi + if [ -v POSTGRESQL_MAX_PREPARED_TRANSACTIONS ]; then + envs="$envs -e POSTGRESQL_MAX_PREPARED_TRANSACTIONS=$POSTGRESQL_MAX_PREPARED_TRANSACTIONS" + fi + if [ -v POSTGRESQL_SHARED_BUFFERS ]; then + envs="$envs -e POSTGRESQL_SHARED_BUFFERS=$POSTGRESQL_SHARED_BUFFERS" + fi + DOCKER_ARGS="${DOCKER_ARGS:-} $envs" create_container $name + CONTAINER_IP=$(get_container_ip $name) + test_connection $name + echo " Testing scl usage" + test_scl_usage $name 'psql --version' "$VERSION" + + echo " Testing login accesses" + assert_login_access "${PGUSER:-}" "${PASS-}" "$user_login" + assert_login_access "${PGUSER:-}" "${PASS-}_foo" false + + assert_login_access postgres "${ADMIN_PASS-}" "$admin_login" + assert_login_access postgres "${ADMIN_PASS-}_foo" false + + assert_local_access $name + run_configuration_tests $name + echo " Success!" + + if $user_login; then + test_postgresql $name + fi + + if $admin_login; then + DB=postgres PGUSER=postgres PASS=$ADMIN_PASS test_postgresql $name + fi +} + +function run_slave() { + local suffix="$1"; shift + docker run $cluster_args -e POSTGRESQL_MASTER_IP=${master_hostname} \ + -d --cidfile ${CIDFILE_DIR}/slave-${suffix}.cid $IMAGE_NAME run-postgresql-slave +} + +function run_master() { + local suffix="$1"; shift + master_args=${master_args-} + docker run $cluster_args $master_args \ + -d --cidfile ${CIDFILE_DIR}/master-${suffix}.cid $IMAGE_NAME run-postgresql-master >/dev/null +} + +function test_slave_visibility() { + local max_attempts=30 + + for slave in $slave_cids; do + slave_ip=$(get_ip_from_cid $slave) + if [ -z "$slave_ip" ]; then + echo "Failed to get IP for slave $slave." + echo "Dumping logs for $slave" + docker logs "$slave" + return 1 + fi + for i in $(seq $max_attempts); do + result="$(postgresql_cmd -c "select client_addr from pg_stat_replication;" | grep "$slave_ip" || true)" + if [[ -n "${result}" ]]; then + echo "${slave_ip} successfully registered as SLAVE for ${master_ip}" + break + fi + if [[ "${i}" == "${max_attempts}" ]]; then + echo "The ${slave_ip} failed to register in MASTER" + echo "Dumping logs for $slave" + docker logs $slave + return 1 + fi + sleep 1 + done + done +} + +function test_value_replication() { + local max_attempts=30 + + # Setup the replication data + local value + value=24 + postgresql_cmd -c "CREATE TABLE $table_name (a integer); INSERT INTO $table_name VALUES ($value);" + + # Read value from slaves and check whether it is expected + for slave in $slave_cids; do + slave_ip=$(get_ip_from_cid $slave) + CONTAINER_IP=$slave_ip + for i in $(seq $max_attempts); do + result="$(postgresql_cmd -At -c "select * from $table_name" || :)" + if [[ "$result" == "$value" ]]; then + echo "${slave_ip} successfully got value from MASTER ${master_ip}" + break + fi + if [[ "${i}" == "${max_attempts}" ]]; then + echo "The ${slave_ip} failed to see value added on MASTER" + echo "Dumping logs for $slave" + docker logs $slave + return 1 + fi + sleep 1 + done + done +} + +function setup_replication_cluster() { + # Run the PostgreSQL master + run_master "$cid_suffix" + + # Run the PostgreSQL slaves + local i + master_ip=$(get_container_ip "master-$cid_suffix.cid") + local cluster_args="$cluster_args --add-host postgresql-master:$master_ip" + local master_hostname="postgresql-master" + for i in $(seq ${slave_num:-1}); do + slave_cids="$slave_cids $(run_slave $cid_suffix-$i)" + done + +} + +function run_master_restart_test() { + local DB=postgres + local PGUSER=master + local PASS=master + + echo "Testing failed master restart" + local cluster_args="-e POSTGRESQL_ADMIN_PASSWORD=pass -e POSTGRESQL_MASTER_USER=$PGUSER -e POSTGRESQL_MASTER_PASSWORD=$PASS" + local cid_suffix="mrestart" + local table_name="t1" + local master_ip= + local slave_cids= + + create_volume_dir + local master_args="-v ${volume_dir}:/var/lib/pgsql/data:Z" + + # Setup the cluster + slave_num=2 setup_replication_cluster + + # Check if the master knows about the slaves + CONTAINER_IP=$master_ip + test_slave_visibility + + echo "Kill the master and create a new one" + local cidfile=$CIDFILE_DIR/master-$cid_suffix.cid + docker kill $(cat $cidfile) + # Don't forget to remove its .cid file + rm $cidfile + + run_master $cid_suffix + CONTAINER_IP=$(get_container_ip master-$cid_suffix.cid) + + # Update master_ip in slaves + for slave in $slave_cids; do + docker exec -u 0 $slave bash -c "sed \"s/$master_ip/$CONTAINER_IP/g\" /etc/hosts >/tmp/hosts && cp /tmp/hosts /etc/hosts" + done + master_ip=$CONTAINER_IP + # Check if the new master sees existing slaves + test_slave_visibility + + # Check if the replication works + table_name="t1" test_value_replication +} + +function run_replication_test() { + local DB=postgres + local PGUSER=master + local PASS=master + + echo "Testing master-slave replication" + local cluster_args="-e POSTGRESQL_ADMIN_PASSWORD=pass -e POSTGRESQL_MASTER_USER=$PGUSER -e POSTGRESQL_MASTER_PASSWORD=$PASS" + local cid_suffix="basic" + local master_ip= + local slave_cids= + + # Setup the cluster + setup_replication_cluster + + # Check if the master knows about the slaves + CONTAINER_IP=$master_ip + test_slave_visibility + + # Do some real work to test replication in practice + table_name="t1" test_value_replication +} + +function run_change_password_test() { + echo " Testing password change" + local name="change_password" + + local database='db' + local user='user' + local password='password' + local admin_password='adminPassword' + + create_volume_dir + local volume_options="-v ${volume_dir}:/var/lib/pgsql/data:Z" + + DOCKER_ARGS=" +-e POSTGRESQL_DATABASE=${database} +-e POSTGRESQL_USER=${user} +-e POSTGRESQL_PASSWORD=${password} +-e POSTGRESQL_ADMIN_PASSWORD=${admin_password} +$volume_options +" create_container ${name} + + # need to set these because `postgresql_cmd` relies on global variables + PGUSER=${user} + PASS=${password} + + # need this to wait for the container to start up + CONTAINER_IP=$(get_container_ip ${name}) + test_connection ${name} + + echo " Testing login" + + assert_login_access ${user} ${password} true + assert_login_access 'postgres' ${admin_password} true + + echo " Changing passwords" + + docker stop $(get_cid ${name}) + DOCKER_ARGS=" +-e POSTGRESQL_DATABASE=${database} +-e POSTGRESQL_USER=${user} +-e POSTGRESQL_PASSWORD=NEW_${password} +-e POSTGRESQL_ADMIN_PASSWORD=NEW_${admin_password} +$volume_options +" create_container "${name}_NEW" + + # need to set this because `postgresql_cmd` relies on global variables + PASS="NEW_${password}" + + # need this to wait for the container to start up + CONTAINER_IP=$(get_container_ip "${name}_NEW") + test_connection "${name}_NEW" + + echo " Testing login with new passwords" + + assert_login_access ${user} "NEW_${password}" true + assert_login_access ${user} ${password} false + + assert_login_access 'postgres' "NEW_${admin_password}" true + assert_login_access 'postgres' ${admin_password} false + + echo " Success!" +} + +run_upgrade_test () +{ + # Do not run on Fedora or RHEL8 until the upgrade script + # is fixed for non-SCL use cases + { [ "${OS}" == "fedora" ] || [ "${OS}" == "rhel8" ]; } && return 0 + + local upgrade_path="none 9.2 9.4 9.5 9.6 10 12 none" prev= act= + for act in $upgrade_path; do + if test "$act" = $VERSION; then + break + fi + prev=$act + done + test "$prev" != none + # Check if the previous image is available in the registry + docker pull "$(get_image_id "$prev:remote")" || return 0 + + # TODO: We run this script from $VERSION directory, through test/run symlink. + test/run_upgrade_test "$prev:remote" "$VERSION:local" +} + +run_migration_test () +{ + [ "${OS}" == "fedora" ] && return 0 + + local from_version + # Only test a subset of the migration path on non-intel hosts + if [ "$(uname -i)" == "x86_64" ]; then + local upgrade_path="9.2 9.4 9.5 9.6 10 12" + else + local upgrade_path="10 12" + fi + + for from_version in $upgrade_path; do + # Do not test migration from $VERSION:remote to $VERSION:local + test $(version2number $from_version) -lt $(version2number "$VERSION") \ + || break + # Skip if the previous image is not available in the registry + docker pull "$(get_image_id "$from_version:remote")" || continue + test/run_migration_test $from_version:remote $VERSION:local + done +} + +run_doc_test() { + local tmpdir=$(mktemp -d) + local f + echo " Testing documentation in the container image" + # Extract the help files from the container + for f in help.1 ; do + docker run --rm ${IMAGE_NAME} /bin/bash -c "cat /${f}" >${tmpdir}/$(basename ${f}) + # Check whether the files include some important information + for term in "POSTGRESQL\_ADMIN\_PASSWORD" volume 5432 ; do + if ! cat ${tmpdir}/$(basename ${f}) | grep -F -q -e "${term}" ; then + echo "ERROR: File /${f} does not include '${term}'." + return 1 + fi + done + done + # Check whether the files use the correct format + if ! file ${tmpdir}/help.1 | grep -q roff ; then + echo "ERROR: /help.1 is not in troff or groff format" + return 1 + fi + echo " Success!" + echo +} + +test_the_app_image () { + local container_name=$1 + local mount_opts=$2 + echo " Testing s2i app image with invalid configuration" + assert_container_creation_fails -e POSTGRESQL_PASSWORD=pass -e POSTGRESQL_DATABASE=db + echo " Testing s2i app image with correct configuration" + + DOCKER_ARGS=" +-e POSTGRESQL_DATABASE=db +-e POSTGRESQL_USER=user +-e POSTGRESQL_PASSWORD=password +-e POSTGRESQL_ADMIN_PASSWORD=password +-e POSTGRESQL_BACKUP_USER=backuser +-e POSTGRESQL_BACKUP_PASSWORD=pass +${mount_opts} +" create_container "$container_name" + + # need this to wait for the container to start up + PGUSER=user PASS=password test_connection "$container_name" + PGUSER=backuser PASS=pass DB=backup test_connection "$container_name" + + docker stop "$(get_cid $container_name)" >/dev/null +} + +run_s2i_test() { + local temp_file + echo " Testing s2i usage" + ct_s2i_usage "${IMAGE_NAME}" --pull-policy=never 1>/dev/null + + echo " Testing s2i build" + + local s2i_image_name=$IMAGE_NAME-testapp_$(ct_random_string) + images_to_clean+=( "$s2i_image_name" ) + + ct_s2i_build_as_df "file://${test_dir}/test-app" "${IMAGE_NAME}" "$s2i_image_name" 1>/dev/null + IMAGE_NAME=$s2i_image_name test_the_app_image s2i_config_build "" + + echo " Testing s2i mount" + create_temp_file + cat "$test_dir"/test-app/postgresql-init/backup_user.sh >> "$temp_file" + + # Test against original image, not the s2i one. But even if so, we expect + # user mouns the directory under "s2i" direcetory $APP_DATA/src. + local mount_point=/opt/app-root/src/postgresql-init/add_backup_user.sh + test_the_app_image _s2i_test_mount "-v ${temp_file}:$mount_point:z,ro" + echo " Success!" +} + +function run_general_tests() { + PGUSER=user PASS=pass POSTGRESQL_MAX_CONNECTIONS=42 POSTGRESQL_MAX_PREPARED_TRANSACTIONS=42 POSTGRESQL_SHARED_BUFFERS=64MB run_tests no_admin + PGUSER=user1 PASS=pass1 ADMIN_PASS=r00t run_tests admin + DB=postgres ADMIN_PASS=r00t run_tests only_admin + # Test with arbitrary uid for the container + DOCKER_ARGS="-u 12345" PGUSER=user2 PASS=pass run_tests no_admin_altuid + DOCKER_ARGS="-u 12345" PGUSER=user3 PASS=pass1 ADMIN_PASS=r00t run_tests admin_altuid + DB=postgres DOCKER_ARGS="-u 12345" ADMIN_PASS=rOOt run_tests only_admin_altuid +} + +run_test_cfg_hook() +{ + local volume_dir name=pg-test-cfg-dir + volume_dir=$(mktemp -d --tmpdir pg-hook-volume.XXXXX) + add_cleanup_command /bin/rm -rf "$volume_dir" + setfacl -R -m u:26:rwx "$volume_dir" + cp -r "$test_dir"/examples/custom-config/* "$volume_dir" + setfacl -R -m u:26:rwx "$volume_dir" + + DOCKER_ARGS=" +-e POSTGRESQL_ADMIN_PASSWORD=password +-v $volume_dir:/opt/app-root/src:Z + " create_container "$name" + assert_runtime_option "$name" shared_buffers 111MB + + # Check that POSTGRESQL_SHARED_BUFFERS has effect. + DOCKER_ARGS=" +-e POSTGRESQL_ADMIN_PASSWORD=password +-e POSTGRESQL_SHARED_BUFFERS=113MB + " create_container "$name-2" + assert_runtime_option "$name-2" shared_buffers 113MB + + # Check that volume has priority over POSTGRESQL_SHARED_BUFFERS. + DOCKER_ARGS=" +-e POSTGRESQL_ADMIN_PASSWORD=password +-e POSTGRESQL_SHARED_BUFFERS=113MB +-v $volume_dir:/opt/app-root/src:Z + " create_container "$name-3" + assert_runtime_option "$name-3" shared_buffers 111MB +} + +run_s2i_enable_ssl_test() +{ + local s2i_image_name="$IMAGE_NAME-ssl_$(ct_random_string)" + ct_s2i_build_as_df "file://$test_dir/examples/enable-ssl" "${IMAGE_NAME}" "$s2i_image_name" 1>/dev/null + images_to_clean+=( "$s2i_image_name" ) + + local container_name=enable-ssl-test + + DOCKER_ARGS="-e POSTGRESQL_ADMIN_PASSWORD=password" \ + IMAGE_NAME="$s2i_image_name" create_container "$container_name" + + wait_ready "$container_name" + CONTAINER_IP=$(get_container_ip $container_name) + + DB=postgres assert_login_access postgres password true + + docker run --rm -e PGPASSWORD="password" "$IMAGE_NAME" psql "postgresql://postgres@$CONTAINER_IP:5432/postgres?sslmode=require" || \ + false "FAIL: Did not manage to connect using SSL only." + + docker stop "$(get_cid "$container_name")" +} + +run_s2i_bake_data_test () +{ + local s2i_image_name="$IMAGE_NAME-bake_$(ct_random_string)" + ct_s2i_build_as_df "file://$test_dir/examples/s2i-dump-data" "${IMAGE_NAME}" "$s2i_image_name" 1>/dev/null + images_to_clean+=( "$s2i_image_name" ) + + local container_name=bake-data-test + + DOCKER_ARGS="-e POSTGRESQL_ADMIN_PASSWORD=password" \ + IMAGE_NAME="$s2i_image_name" create_container "$container_name" + + wait_ready "$container_name" + + test "hello world" == "$(docker exec "$(get_cid "$container_name")" \ + bash -c "psql -tA -c 'SELECT * FROM test;'")" + + docker stop "$(get_cid "$container_name")" +} + +run_pgaudit_test() +{ + # extension pgaudit is not available for older versions + case ${VERSION} in + 9.6|10|11) echo "pgaudit not expected, test skipped."; return ;; + *) ;; + esac + + local config_dir data_dir name=pg-test-pgaudit + + # create a dir for config + config_dir=$(mktemp -d --tmpdir pg-hook-volume.XXXXX) + add_cleanup_command /bin/rm -rf "$config_dir" + cp -r "$test_dir"/examples/pgaudit/* "$config_dir" + setfacl -R -m u:26:rwx "$config_dir" + + # create a dir for data + create_volume_dir + data_dir="${volume_dir}" + + DOCKER_ARGS=" +-e POSTGRESQL_ADMIN_PASSWORD=password +-v ${config_dir}:/opt/app-root/src:Z +-v ${data_dir}:/var/lib/pgsql/data:Z + " create_container "$name" + + assert_runtime_option "$name" shared_preload_libraries pgaudit + wait_ready "$name" + + # enable the pgaudit extension + docker exec -i $(get_cid "$name") bash -c psql </dev/null + docker rm -f "$CID" >/dev/null +} + + +# check_upgrade_path {hardlink|copy} dataspec +assert_upgrade_succeeds () +{ + info "Initializing datadir with $VERSION_FROM PostgreSQL" + local INIT_IMAGE=$(get_image_id "$VERSION_FROM") + local dataspec=$2 + init_datadir "$INIT_IMAGE" "$dataspec" + + info "Running upgrade '$1/$dataspec'" + + for upgrade_to in "${UPGRADE_PATH[@]}"; do + info "Upgrading to $upgrade_to" + UPGRADE_IMAGE=$(get_image_id "$upgrade_to") + run_server "$DATADIR" "$UPGRADE_IMAGE" "-e POSTGRESQL_UPGRADE=$1" + wait_for_postgres "$CID" + eval "data_${dataspec}_check" + debug "the upgrading container of version '$upgrade_to' responded" + docker stop "$CID" >/dev/null + docker rm -f "$CID" >/dev/null + + run_server "$DATADIR" "$UPGRADE_IMAGE" + wait_for_postgres "$CID" + eval "data_${dataspec}_check" + debug "restarted server of version '$upgrade_to' responded" + docker stop "$CID" >/dev/null + docker rm -f "$CID" >/dev/null + done +} + +VERSION_FROM=$1 ; shift +UPGRADE_PATH=( "$@" ) +for data in empty pagila; do + assert_upgrade_succeeds hardlink "$data" + assert_upgrade_succeeds copy "$data" +done diff --git a/test/test-app/postgresql-init/backup_user.sh b/test/test-app/postgresql-init/backup_user.sh new file mode 100644 index 0000000..762bef1 --- /dev/null +++ b/test/test-app/postgresql-init/backup_user.sh @@ -0,0 +1,12 @@ +# Check that user credentials for backup is set + +[[ -v POSTGRESQL_BACKUP_USER && -v POSTGRESQL_BACKUP_PASSWORD ]] || usage "You have to set all variables for user for doing backup: POSTGRESQL_BACKUP_USER, POSTGRESQL_BACKUP_PASSWORD" + +# create backup user with 'backup' database +psql --variable=user="$POSTGRESQL_BACKUP_USER" \ + --variable=password="$POSTGRESQL_BACKUP_PASSWORD" \ + <<<" +CREATE USER :user SUPERUSER password :'password'; +CREATE DATABASE backup OWNER = :user; +ALTER USER :user set default_transaction_read_only = on; +" diff --git a/test/test-lib-openshift.sh b/test/test-lib-openshift.sh new file mode 100644 index 0000000..f62eab6 --- /dev/null +++ b/test/test-lib-openshift.sh @@ -0,0 +1,1043 @@ +# shellcheck shell=bash +# some functions are used from test-lib.sh, that is usually in the same dir +# shellcheck source=/dev/null +source "$(dirname "${BASH_SOURCE[0]}")"/test-lib.sh + +# Set of functions for testing docker images in OpenShift using 'oc' command + +# A variable containing the overall test result; must be changed to 0 in the end +# of the testing script: +# OS_TESTSUITE_RESULT=0 +# And the following trap must be set, in the beginning of the test script: +# trap ct_os_cleanup EXIT SIGINT +OS_TESTSUITE_RESULT=1 +OS_CLUSTER_STARTED_BY_TEST=0 + +function ct_os_cleanup() { + if [ $OS_TESTSUITE_RESULT -eq 0 ] ; then + # shellcheck disable=SC2153 + echo "OpenShift tests for ${IMAGE_NAME} succeeded." + else + # shellcheck disable=SC2153 + echo "OpenShift tests for ${IMAGE_NAME} failed." + fi +} + +# ct_os_check_compulsory_vars +# --------------------------- +# Check the compulsory variables: +# * IMAGE_NAME specifies a name of the candidate image used for testing. +# * VERSION specifies the major version of the MariaDB in format of X.Y +# * OS specifies RHEL version (e.g. OS=rhel7) +function ct_os_check_compulsory_vars() { + # shellcheck disable=SC2016 + test -n "${IMAGE_NAME-}" || ( echo 'make sure $IMAGE_NAME is defined' >&2 ; exit 1) + # shellcheck disable=SC2016 + test -n "${VERSION-}" || ( echo 'make sure $VERSION is defined' >&2 ; exit 1) + # shellcheck disable=SC2016 + test -n "${OS-}" || ( echo 'make sure $OS is defined' >&2 ; exit 1) +} + +# ct_os_get_status +# -------------------- +# Returns status of all objects to make debugging easier. +function ct_os_get_status() { + oc get all + oc status +} + +# ct_os_print_logs +# -------------------- +# Returns status of all objects and logs from all pods. +function ct_os_print_logs() { + ct_os_get_status + while read -r pod_name; do + echo "INFO: printing logs for pod ${pod_name}" + oc logs "${pod_name}" + done < <(oc get pods --no-headers=true -o custom-columns=NAME:.metadata.name) +} + +# ct_os_enable_print_logs +# -------------------- +# Enables automatic printing of pod logs on ERR. +function ct_os_enable_print_logs() { + set -E + trap ct_os_print_logs ERR +} + +# ct_get_public_ip +# -------------------- +# Returns best guess for the IP that the node is accessible from other computers. +# This is a bit funny heuristic, simply goes through all IPv4 addresses that +# hostname -I returns and de-prioritizes IP addresses commonly used for local +# addressing. The rest of addresses are taken as public with higher probability. +function ct_get_public_ip() { + local hostnames + local public_ip='' + local found_ip + hostnames=$(hostname -I) + for guess_exp in '127\.0\.0\.1' '192\.168\.[0-9\.]*' '172\.[0-9\.]*' \ + '10\.[0-9\.]*' '[0-9\.]*' ; do + found_ip=$(echo "${hostnames}" | grep -oe "${guess_exp}") + if [ -n "${found_ip}" ] ; then + # shellcheck disable=SC2001 + hostnames=$(echo "${hostnames}" | sed -e "s/${found_ip}//") + public_ip="${found_ip}" + fi + done + if [ -z "${public_ip}" ] ; then + echo "ERROR: public IP could not be guessed." >&2 + return 1 + fi + echo "${public_ip}" +} + +# ct_os_run_in_pod POD_NAME CMD +# -------------------- +# Runs [cmd] in the pod specified by prefix [pod_prefix]. +# Arguments: pod_name - full name of the pod +# Arguments: cmd - command to be run in the pod +function ct_os_run_in_pod() { + local pod_name="$1" ; shift + + oc exec "$pod_name" -- "$@" +} + +# ct_os_get_service_ip SERVICE_NAME +# -------------------- +# Returns IP of the service specified by [service_name]. +# Arguments: service_name - name of the service +function ct_os_get_service_ip() { + local service_name="${1}" ; shift + oc get "svc/${service_name}" -o yaml | grep clusterIP | \ + cut -d':' -f2 | grep -oe '172\.30\.[0-9\.]*' +} + + +# ct_os_get_all_pods_status +# -------------------- +# Returns status of all pods. +function ct_os_get_all_pods_status() { + oc get pods -o custom-columns=Ready:status.containerStatuses[0].ready,NAME:.metadata.name +} + +# ct_os_get_all_pods_name +# -------------------- +# Returns the full name of all pods. +function ct_os_get_all_pods_name() { + oc get pods --no-headers -o custom-columns=NAME:.metadata.name +} + +# ct_os_get_pod_status POD_PREFIX +# -------------------- +# Returns status of the pod specified by prefix [pod_prefix]. +# Note: Ignores -build and -deploy pods +# Arguments: pod_prefix - prefix or whole ID of the pod +function ct_os_get_pod_status() { + local pod_prefix="${1}" ; shift + ct_os_get_all_pods_status | grep -e "${pod_prefix}" | grep -Ev "(build|deploy)$" \ + | awk '{print $1}' | head -n 1 +} + +# ct_os_get_pod_name POD_PREFIX +# -------------------- +# Returns the full name of pods specified by prefix [pod_prefix]. +# Note: Ignores -build and -deploy pods +# Arguments: pod_prefix - prefix or whole ID of the pod +function ct_os_get_pod_name() { + local pod_prefix="${1}" ; shift + ct_os_get_all_pods_name | grep -e "^${pod_prefix}" | grep -Ev "(build|deploy)$" +} + +# ct_os_get_pod_ip POD_NAME +# -------------------- +# Returns the ip of the pod specified by [pod_name]. +# Arguments: pod_name - full name of the pod +function ct_os_get_pod_ip() { + local pod_name="${1}" + oc get pod "$pod_name" --no-headers -o custom-columns=IP:status.podIP +} + +# ct_os_check_pod_readiness POD_PREFIX STATUS +# -------------------- +# Checks whether the pod is ready. +# Arguments: pod_prefix - prefix or whole ID of the pod +# Arguments: status - expected status (true, false) +function ct_os_check_pod_readiness() { + local pod_prefix="${1}" ; shift + local status="${1}" ; shift + test "$(ct_os_get_pod_status "${pod_prefix}")" == "${status}" +} + +# ct_os_wait_pod_ready POD_PREFIX TIMEOUT +# -------------------- +# Wait maximum [timeout] for the pod becomming ready. +# Arguments: pod_prefix - prefix or whole ID of the pod +# Arguments: timeout - how many seconds to wait seconds +function ct_os_wait_pod_ready() { + local pod_prefix="${1}" ; shift + local timeout="${1}" ; shift + SECONDS=0 + echo -n "Waiting for ${pod_prefix} pod becoming ready ..." + while ! ct_os_check_pod_readiness "${pod_prefix}" "true" ; do + echo -n "." + [ "${SECONDS}" -gt "${timeout}" ] && echo " FAIL" && return 1 + sleep 3 + done + echo " DONE" +} + +# ct_os_wait_rc_ready POD_PREFIX TIMEOUT +# -------------------- +# Wait maximum [timeout] for the rc having desired number of replicas ready. +# Arguments: pod_prefix - prefix of the replication controller +# Arguments: timeout - how many seconds to wait seconds +function ct_os_wait_rc_ready() { + local pod_prefix="${1}" ; shift + local timeout="${1}" ; shift + SECONDS=0 + echo -n "Waiting for ${pod_prefix} pod becoming ready ..." + while ! test "$( (oc get --no-headers statefulsets; oc get --no-headers rc) 2>/dev/null \ + | grep "^${pod_prefix}" | awk '$2==$3 {print "ready"}')" == "ready" ; do + echo -n "." + [ "${SECONDS}" -gt "${timeout}" ] && echo " FAIL" && return 1 + sleep 3 + done + echo " DONE" +} + +# ct_os_deploy_pure_image IMAGE [ENV_PARAMS, ...] +# -------------------- +# Runs [image] in the openshift and optionally specifies env_params +# as environment variables to the image. +# Arguments: image - prefix or whole ID of the pod to run the cmd in +# Arguments: env_params - environment variables parameters for the images. +function ct_os_deploy_pure_image() { + local image="${1}" ; shift + # ignore error exit code, because oc new-app returns error when image exists + oc new-app "${image}" "$@" || : + # let openshift cluster to sync to avoid some race condition errors + sleep 3 +} + +# ct_os_deploy_s2i_image IMAGE APP [ENV_PARAMS, ... ] +# -------------------- +# Runs [image] and [app] in the openshift and optionally specifies env_params +# as environment variables to the image. +# Arguments: image - prefix or whole ID of the pod to run the cmd in +# Arguments: app - url or local path to git repo with the application sources. +# Arguments: env_params - environment variables parameters for the images. +function ct_os_deploy_s2i_image() { + local image="${1}" ; shift + local app="${1}" ; shift + # ignore error exit code, because oc new-app returns error when image exists + oc new-app "${image}~${app}" "$@" || : + + # let openshift cluster to sync to avoid some race condition errors + sleep 3 +} + +# ct_os_deploy_template_image TEMPLATE [ENV_PARAMS, ...] +# -------------------- +# Runs template in the openshift and optionally gives env_params to use +# specific values in the template. +# Arguments: template - prefix or whole ID of the pod to run the cmd in +# Arguments: env_params - environment variables parameters for the template. +# Example usage: ct_os_deploy_template_image mariadb-ephemeral-template.yaml \ +# DATABASE_SERVICE_NAME=mysql-57-centos7 \ +# DATABASE_IMAGE=mysql-57-centos7 \ +# MYSQL_USER=testu \ +# MYSQL_PASSWORD=testp \ +# MYSQL_DATABASE=testdb +function ct_os_deploy_template_image() { + local template="${1}" ; shift + oc process -f "${template}" "$@" | oc create -f - + # let openshift cluster to sync to avoid some race condition errors + sleep 3 +} + +# _ct_os_get_uniq_project_name +# -------------------- +# Returns a uniq name of the OpenShift project. +function _ct_os_get_uniq_project_name() { + local r + while true ; do + r=${RANDOM} + mkdir /var/tmp/sclorg-test-${r} &>/dev/null && echo sclorg-test-${r} && break + done +} + +# ct_os_new_project [PROJECT] +# -------------------- +# Creates a new project in the openshfit using 'os' command. +# Arguments: project - project name, uses a new random name if omitted +# Expects 'os' command that is properly logged in to the OpenShift cluster. +# Not using mktemp, because we cannot use uppercase characters. +# The OPENSHIFT_CLUSTER_PULLSECRET_PATH environment variable can be set +# to contain a path to a k8s secret definition which will be used +# to authenticate to image registries. +# shellcheck disable=SC2120 +function ct_os_new_project() { + if [ "${CT_SKIP_NEW_PROJECT:-false}" == 'true' ] ; then + echo "Creating project skipped." + return + fi + local project_name="${1:-$(_ct_os_get_uniq_project_name)}" ; shift || : + oc new-project "${project_name}" + # let openshift cluster to sync to avoid some race condition errors + sleep 3 + if test -n "${OPENSHIFT_CLUSTER_PULLSECRET_PATH:-}" -a -e "${OPENSHIFT_CLUSTER_PULLSECRET_PATH:-}"; then + oc create -f "$OPENSHIFT_CLUSTER_PULLSECRET_PATH" + # add registry pullsecret to the serviceaccount if provided + secret_name=$(grep '^\s*name:' "$OPENSHIFT_CLUSTER_PULLSECRET_PATH" | awk '{ print $2 }') + secret_json='{"imagePullSecrets": [{"name": "'${secret_name}'"}]}' + oc patch serviceaccount default -p "$secret_json" + fi +} + +# ct_os_delete_project [PROJECT] +# -------------------- +# Deletes the specified project in the openshfit +# Arguments: project - project name, uses the current project if omitted +# shellcheck disable=SC2120 +function ct_os_delete_project() { + if [ "${CT_SKIP_NEW_PROJECT:-false}" == 'true' ] ; then + echo "Deleting project skipped, cleaning objects only." + # when not having enough privileges (remote cluster), it might fail and + # it is not a big problem, so ignore failure in this case + ct_delete_all_objects || : + return + fi + local project_name="${1:-$(oc project -q)}" ; shift || : + oc delete project "${project_name}" +} + +# ct_delete_all_objects +# ----------------- +# Deletes all objects within the project. +# Handy when we have one project and want to run more tests. +function ct_delete_all_objects() { + for x in bc builds dc is isimage istag po pv pvc rc routes secrets svc ; do + oc delete "$x" --all + done + # for some objects it takes longer to be really deleted, so a dummy sleep + # to avoid some races when other test can see not-yet-deleted objects and can fail + sleep 10 +} + +# ct_os_docker_login +# -------------------- +# Logs in into docker daemon +# Uses global REGISRTY_ADDRESS environment variable for arbitrary registry address. +# Does not do anything if REGISTRY_ADDRESS is set. +function ct_os_docker_login() { + [ -n "${REGISTRY_ADDRESS:-}" ] && "REGISTRY_ADDRESS set, not trying to docker login." && return 0 + # docker login fails with "404 page not found" error sometimes, just try it more times + # shellcheck disable=SC2034 + for i in $(seq 12) ; do + # shellcheck disable=SC2015 + docker login -u developer -p "$(oc whoami -t)" "${REGISRTY_ADDRESS:-172.30.1.1:5000}" && return 0 || : + sleep 5 + done + return 1 +} + +# ct_os_upload_image IMAGE [IMAGESTREAM] +# -------------------- +# Uploads image from local registry to the OpenShift internal registry. +# Arguments: image - image name to upload +# Arguments: imagestream - name and tag to use for the internal registry. +# In the format of name:tag ($image_name:latest by default) +# Uses global REGISRTY_ADDRESS environment variable for arbitrary registry address. +function ct_os_upload_image() { + local input_name="${1}" ; shift + local image_name=${input_name##*/} + local imagestream=${1:-$image_name:latest} + local output_name + + output_name="${REGISRTY_ADDRESS:-172.30.1.1:5000}/$(oc project -q)/$imagestream" + + ct_os_docker_login + docker tag "${input_name}" "${output_name}" + docker push "${output_name}" +} + +# ct_os_is_tag_exists IS_NAME TAG +# -------------------- +# Checks whether the specified tag exists for an image stream +# Arguments: is_name - name of the image stream +# Arguments: tag - name of the tag (usually version) +function ct_os_is_tag_exists() { + local is_name=$1 ; shift + local tag=$1 ; shift + oc get is "${is_name}" -n openshift -o=jsonpath='{.spec.tags[*].name}' | grep -qw "${tag}" +} + +# ct_os_template_exists T_NAME +# -------------------- +# Checks whether the specified template exists for an image stream +# Arguments: t_name - template name of the image stream +function ct_os_template_exists() { + local t_name=$1 ; shift + oc get templates -n openshift | grep -q "^${t_name}\s" +} + +# ct_os_install_in_centos +# -------------------- +# Installs os cluster in CentOS +function ct_os_install_in_centos() { + yum install -y centos-release-openshift-origin + yum install -y wget git net-tools bind-utils iptables-services bridge-utils\ + bash-completion origin-clients docker origin-clients +} + +# ct_os_cluster_up [DIR, IS_PUBLIC, CLUSTER_VERSION] +# -------------------- +# Runs the local OpenShift cluster using 'oc cluster up' and logs in as developer. +# Arguments: dir - directory to keep configuration data in, random if omitted +# Arguments: is_public - sets either private or public hostname for web-UI, +# use "true" for allow remote access to the web-UI, +# "false" is default +# Arguments: cluster_version - version of the OpenShift cluster to use, empty +# means default version of `oc`; example value: 3.7; +# also can be specified outside by OC_CLUSTER_VERSION +function ct_os_cluster_up() { + ct_os_cluster_running && echo "Cluster already running. Nothing is done." && return 0 + ct_os_logged_in && echo "Already logged in to a cluster. Nothing is done." && return 0 + + mkdir -p /var/tmp/openshift + local dir="${1:-$(mktemp -d /var/tmp/openshift/os-data-XXXXXX)}" ; shift || : + local is_public="${1:-'false'}" ; shift || : + local default_cluster_version=${OC_CLUSTER_VERSION:-} + local cluster_version=${1:-${default_cluster_version}} ; shift || : + if ! grep -qe '--insecure-registry.*172\.30\.0\.0' /etc/sysconfig/docker ; then + sed -i "s|OPTIONS='|OPTIONS='--insecure-registry 172.30.0.0/16 |" /etc/sysconfig/docker + fi + + systemctl stop firewalld || : + setenforce 0 + iptables -F + + systemctl restart docker + local cluster_ip="127.0.0.1" + [ "${is_public}" == "true" ] && cluster_ip=$(ct_get_public_ip) + + if [ -n "${cluster_version}" ] ; then + # if $cluster_version is not set, we simply use oc that is available + ct_os_set_path_oc "${cluster_version}" + fi + + mkdir -p "${dir}"/{config,data,pv} + case $(oc version| head -n 1) in + "oc v3.1"?.*) + oc cluster up --base-dir="${dir}/data" --public-hostname="${cluster_ip}" + ;; + "oc v3."*) + oc cluster up --host-data-dir="${dir}/data" --host-config-dir="${dir}/config" \ + --host-pv-dir="${dir}/pv" --use-existing-config --public-hostname="${cluster_ip}" + ;; + *) + echo "ERROR: Unexpected oc version." >&2 + return 1 + ;; + esac + oc version + oc login -u system:admin + oc project default + ct_os_wait_rc_ready docker-registry 180 + ct_os_wait_rc_ready router 30 + oc login -u developer -p developer + OS_CLUSTER_STARTED_BY_TEST=1 + # let openshift cluster to sync to avoid some race condition errors + sleep 3 +} + +# ct_os_cluster_down +# -------------------- +# Shuts down the local OpenShift cluster using 'oc cluster down' +function ct_os_cluster_down() { + if [ ${OS_CLUSTER_STARTED_BY_TEST:-0} -eq 1 ] ; then + echo "Cluster started by the test, shutting down." + oc cluster down + else + echo "Cluster not started by the test, shutting down skipped." + fi +} + +# ct_os_cluster_running +# -------------------- +# Returns 0 if oc cluster is running +function ct_os_cluster_running() { + oc cluster status &>/dev/null +} + +# ct_os_logged_in +# --------------- +# Returns 0 if logged in to a cluster (remote or local) +function ct_os_logged_in() { + oc whoami >/dev/null +} + +# ct_os_set_path_oc OC_VERSION +# -------------------- +# This is a trick that helps using correct version of the `oc`: +# The input is version of the openshift in format v3.6.0 etc. +# If the currently available version of oc is not of this version, +# it first takes a look into /usr/local/oc-/bin directory, +# and if not found there it downloads the community release from github. +# In the end the PATH variable is changed, so the other tests can still use just 'oc'. +# Arguments: oc_version - X.Y part of the version of OSE (e.g. 3.9) +function ct_os_set_path_oc() { + local oc_version + local oc_path + + oc_version=$(ct_os_get_latest_ver "$1") + + if oc version | grep -q "oc ${oc_version%.*}." ; then + echo "Binary oc found already available in version ${oc_version}: $(command -v oc) Doing noting." + return 0 + fi + + # first check whether we already have oc available in /usr/local + local installed_oc_path="/usr/local/oc-${oc_version%.*}/bin" + + if [ -x "${installed_oc_path}/oc" ] ; then + oc_path="${installed_oc_path}" + echo "Binary oc found in ${installed_oc_path}" >&2 + else + # oc not available in /usr/local, try to download it from github (community release) + oc_path="/tmp/oc-${oc_version}-bin" + ct_os_download_upstream_oc "${oc_version}" "${oc_path}" + fi + if [ -z "${oc_path}" ] ; then + echo "ERROR: oc not found installed, nor downloaded" >&1 + return 1 + fi + export PATH="${oc_path}:${PATH}" + if ! oc version | grep -q "oc ${oc_version%.*}." ; then + echo "ERROR: something went wrong, oc located at ${oc_path}, but oc of version ${oc_version} not found in PATH ($PATH)" >&1 + return 1 + else + echo "PATH set correctly, binary oc found in version ${oc_version}: $(command -v oc)" + fi +} + +# ct_os_get_latest_ver VERSION_PART_X +# -------------------- +# Returns full version (vX.Y.Z) from part of the version (X.Y) +# Arguments: vxy - X.Y part of the version +# Returns vX.Y.Z variant of the version +function ct_os_get_latest_ver(){ + local vxy="v$1" + for vz in {3..0} ; do + curl -sif "https://github.com/openshift/origin/releases/tag/${vxy}.${vz}" >/dev/null && echo "${vxy}.${vz}" && return 0 + done + echo "ERROR: version ${vxy} not found in https://github.com/openshift/origin/tags" >&2 + return 1 +} + +# ct_os_download_upstream_oc OC_VERSION OUTPUT_DIR +# -------------------- +# Downloads a particular version of openshift-origin-client-tools from +# github into specified output directory +# Arguments: oc_version - version of OSE (e.g. v3.7.2) +# Arguments: output_dir - output directory +function ct_os_download_upstream_oc() { + local oc_version=$1 + local output_dir=$2 + + # check whether we already have the binary in place + [ -x "${output_dir}/oc" ] && return 0 + + mkdir -p "${output_dir}" + # using html output instead of https://api.github.com/repos/openshift/origin/releases/tags/${oc_version}, + # because API is limited for number of queries if not authenticated + tarball=$(curl -si "https://github.com/openshift/origin/releases/tag/${oc_version}" | grep -o -e "openshift-origin-client-tools-${oc_version}-[a-f0-9]*-linux-64bit.tar.gz" | head -n 1) + + # download, unpack the binaries and then put them into output directory + echo "Downloading https://github.com/openshift/origin/releases/download/${oc_version}/${tarball} into ${output_dir}/" >&2 + curl -sL https://github.com/openshift/origin/releases/download/"${oc_version}"/"${tarball}" | tar -C "${output_dir}" -xz + mv -f "${output_dir}"/"${tarball%.tar.gz}"/* "${output_dir}/" + + rmdir "${output_dir}"/"${tarball%.tar.gz}" +} + + +# ct_os_test_s2i_app_func IMAGE APP CONTEXT_DIR CHECK_CMD [OC_ARGS] +# -------------------- +# Runs [image] and [app] in the openshift and optionally specifies env_params +# as environment variables to the image. Then check the container by arbitrary +# function given as argument (such an argument may include string, +# that will be replaced with actual IP). +# Arguments: image - prefix or whole ID of the pod to run the cmd in (compulsory) +# Arguments: app - url or local path to git repo with the application sources (compulsory) +# Arguments: context_dir - sub-directory inside the repository with the application sources (compulsory) +# Arguments: check_command - CMD line that checks whether the container works (compulsory; '' will be replaced with actual IP) +# Arguments: oc_args - all other arguments are used as additional parameters for the `oc new-app` +# command, typically environment variables (optional) +function ct_os_test_s2i_app_func() { + local image_name=${1} + local app=${2} + local context_dir=${3} + local check_command=${4} + local oc_args=${5:-} + local import_image=${6:-} + local image_name_no_namespace=${image_name##*/} + local service_name="${image_name_no_namespace}-testing" + local image_tagged="${image_name_no_namespace}:${VERSION}" + + if [ $# -lt 4 ] || [ -z "${1}" ] || [ -z "${2}" ] || [ -z "${3}" ] || [ -z "${4}" ]; then + echo "ERROR: ct_os_test_s2i_app_func() requires at least 4 arguments that cannot be emtpy." >&2 + return 1 + fi + + # shellcheck disable=SC2119 + ct_os_new_project + # Create a specific imagestream tag for the image so that oc cannot use anything else + if [ "${CT_SKIP_UPLOAD_IMAGE:-false}" == 'true' ] ; then + if [ -n "${import_image}" ] ; then + echo "Importing image ${import_image} as ${image_name}:${VERSION}" + oc import-image "${image_name}":"${VERSION}" --from "${import_image}" --confirm + else + echo "Uploading and importing image skipped." + fi + else + if [ -n "${import_image}" ] ; then + echo "Warning: Import image ${import_image} requested, but uploading image ${image_name} instead." + fi + ct_os_upload_image "${image_name}" "${image_tagged}" + fi + + local app_param="${app}" + if [ -d "${app}" ] ; then + # for local directory, we need to copy the content, otherwise too smart os command + # pulls the git remote repository instead + app_param=$(ct_obtain_input "${app}") + fi + + # shellcheck disable=SC2086 + ct_os_deploy_s2i_image "${image_tagged}" "${app_param}" \ + --context-dir="${context_dir}" \ + --name "${service_name}" \ + ${oc_args} + + if [ -d "${app}" ] ; then + # in order to avoid weird race seen sometimes, let's wait shortly + # before starting the build explicitly + sleep 5 + oc start-build "${service_name}" --from-dir="${app_param}" + fi + + ct_os_wait_pod_ready "${service_name}" 300 + + local ip + local check_command_exp + + ip=$(ct_os_get_service_ip "${service_name}") + # shellcheck disable=SC2001 + check_command_exp=$(echo "$check_command" | sed -e "s//$ip/g") + + echo " Checking APP using $check_command_exp ..." + local result=0 + eval "$check_command_exp" || result=1 + + if [ $result -eq 0 ] ; then + echo " Check passed." + else + echo " Check failed." + fi + + # shellcheck disable=SC2119 + ct_os_delete_project + return $result +} + +# ct_os_test_s2i_app IMAGE APP CONTEXT_DIR EXPECTED_OUTPUT [PORT, PROTOCOL, RESPONSE_CODE, OC_ARGS, ... ] +# -------------------- +# Runs [image] and [app] in the openshift and optionally specifies env_params +# as environment variables to the image. Then check the http response. +# Arguments: image - prefix or whole ID of the pod to run the cmd in (compulsory) +# Arguments: app - url or local path to git repo with the application sources (compulsory) +# Arguments: context_dir - sub-directory inside the repository with the application sources (compulsory) +# Arguments: expected_output - PCRE regular expression that must match the response body (compulsory) +# Arguments: port - which port to use (optional; default: 8080) +# Arguments: protocol - which protocol to use (optional; default: http) +# Arguments: response_code - what http response code to expect (optional; default: 200) +# Arguments: oc_args - all other arguments are used as additional parameters for the `oc new-app` +# command, typically environment variables (optional) +function ct_os_test_s2i_app() { + local image_name=${1} + local app=${2} + local context_dir=${3} + local expected_output=${4} + local port=${5:-8080} + local protocol=${6:-http} + local response_code=${7:-200} + local oc_args=${8:-} + local import_image=${9:-} + + if [ $# -lt 4 ] || [ -z "${1}" ] || [ -z "${2}" ] || [ -z "${3}" ] || [ -z "${4}" ]; then + echo "ERROR: ct_os_test_s2i_app() requires at least 4 arguments that cannot be emtpy." >&2 + return 1 + fi + + ct_os_test_s2i_app_func "${image_name}" \ + "${app}" \ + "${context_dir}" \ + "ct_os_test_response_internal '${protocol}://:${port}' '${response_code}' '${expected_output}'" \ + "${oc_args}" "${import_image}" +} + +# ct_os_test_template_app_func IMAGE APP IMAGE_IN_TEMPLATE CHECK_CMD [OC_ARGS] +# -------------------- +# Runs [image] and [app] in the openshift and optionally specifies env_params +# as environment variables to the image. Then check the container by arbitrary +# function given as argument (such an argument may include string, +# that will be replaced with actual IP). +# Arguments: image_name - prefix or whole ID of the pod to run the cmd in (compulsory) +# Arguments: template - url or local path to a template to use (compulsory) +# Arguments: name_in_template - image name used in the template +# Arguments: check_command - CMD line that checks whether the container works (compulsory; '' will be replaced with actual IP) +# Arguments: oc_args - all other arguments are used as additional parameters for the `oc new-app` +# command, typically environment variables (optional) +# Arguments: other_images - some templates need other image to be pushed into the OpenShift registry, +# specify them in this parameter as "|", where "" is a full image name +# (including registry if needed) and "" is a tag under which the image should be available +# in the OpenShift registry. +function ct_os_test_template_app_func() { + local image_name=${1} + local template=${2} + local name_in_template=${3} + local check_command=${4} + local oc_args=${5:-} + local other_images=${6:-} + local import_image=${7:-} + + if [ $# -lt 4 ] || [ -z "${1}" ] || [ -z "${2}" ] || [ -z "${3}" ] || [ -z "${4}" ]; then + echo "ERROR: ct_os_test_template_app_func() requires at least 4 arguments that cannot be emtpy." >&2 + return 1 + fi + + local service_name="${name_in_template}-testing" + local image_tagged="${name_in_template}:${VERSION}" + + # shellcheck disable=SC2119 + ct_os_new_project + + # Create a specific imagestream tag for the image so that oc cannot use anything else + if [ "${CT_SKIP_UPLOAD_IMAGE:-false}" == 'true' ] ; then + if [ -n "${import_image}" ] ; then + echo "Importing image ${import_image} as ${image_name}:${VERSION}" + oc import-image "${image_name}":"${VERSION}" --from "${import_image}" --confirm + else + echo "Uploading and importing image skipped." + fi + else + if [ -n "${import_image}" ] ; then + echo "Warning: Import image ${import_image} requested, but uploading image ${image_name} instead." + fi + ct_os_upload_image "${image_name}" "${image_tagged}" + + # upload also other images, that template might need (list of pairs in the format | + local image_tag_a + local i_t + for i_t in ${other_images} ; do + echo "${i_t}" + IFS='|' read -ra image_tag_a <<< "${i_t}" + docker pull "${image_tag_a[0]}" + ct_os_upload_image "${image_tag_a[0]}" "${image_tag_a[1]}" + done + fi + + # get the template file from remote or local location; if not found, it is + # considered an internal template name, like 'mysql', so use the name + # explicitly + local local_template + local namespace + + namespace=${CT_NAMESPACE:-$(oc project -q)} + + local_template=$(ct_obtain_input "${template}" 2>/dev/null || echo "--template=${template}") + # shellcheck disable=SC2086 + oc new-app "${local_template}" \ + --name "${name_in_template}" \ + -p NAMESPACE="${namespace}" \ + ${oc_args} + + ct_os_wait_pod_ready "${service_name}" 300 + + local ip + local check_command_exp + + ip=$(ct_os_get_service_ip "${service_name}") + # shellcheck disable=SC2001 + check_command_exp=$(echo "$check_command" | sed -e "s//$ip/g") + + echo " Checking APP using $check_command_exp ..." + local result=0 + eval "$check_command_exp" || result=1 + + if [ $result -eq 0 ] ; then + echo " Check passed." + else + echo " Check failed." + fi + + # shellcheck disable=SC2119 + ct_os_delete_project + return $result +} + +# params: +# ct_os_test_template_app IMAGE APP IMAGE_IN_TEMPLATE EXPECTED_OUTPUT [PORT, PROTOCOL, RESPONSE_CODE, OC_ARGS, ... ] +# -------------------- +# Runs [image] and [app] in the openshift and optionally specifies env_params +# as environment variables to the image. Then check the http response. +# Arguments: image_name - prefix or whole ID of the pod to run the cmd in (compulsory) +# Arguments: template - url or local path to a template to use (compulsory) +# Arguments: name_in_template - image name used in the template +# Arguments: expected_output - PCRE regular expression that must match the response body (compulsory) +# Arguments: port - which port to use (optional; default: 8080) +# Arguments: protocol - which protocol to use (optional; default: http) +# Arguments: response_code - what http response code to expect (optional; default: 200) +# Arguments: oc_args - all other arguments are used as additional parameters for the `oc new-app` +# command, typically environment variables (optional) +# Arguments: other_images - some templates need other image to be pushed into the OpenShift registry, +# specify them in this parameter as "|", where "" is a full image name +# (including registry if needed) and "" is a tag under which the image should be available +# in the OpenShift registry. +function ct_os_test_template_app() { + local image_name=${1} + local template=${2} + local name_in_template=${3} + local expected_output=${4} + local port=${5:-8080} + local protocol=${6:-http} + local response_code=${7:-200} + local oc_args=${8:-} + local other_images=${9:-} + local import_image=${10:-} + + if [ $# -lt 4 ] || [ -z "${1}" ] || [ -z "${2}" ] || [ -z "${3}" ] || [ -z "${4}" ]; then + echo "ERROR: ct_os_test_template_app() requires at least 4 arguments that cannot be emtpy." >&2 + return 1 + fi + + ct_os_test_template_app_func "${image_name}" \ + "${template}" \ + "${name_in_template}" \ + "ct_os_test_response_internal '${protocol}://:${port}' '${response_code}' '${expected_output}'" \ + "${oc_args}" \ + "${other_images}" \ + "${import_image}" +} + +# ct_os_test_image_update IMAGE_NAME OLD_IMAGE ISTAG CHECK_FUNCTION OC_ARGS +# -------------------- +# Runs an image update test with [image] uploaded to [is] imagestream +# and checks the services using an arbitrary function provided in [check_function]. +# Arguments: image_name - prefix or whole ID of the pod to run the cmd in (compulsory) +# Arguments: old_image - valid name of the image from the registry +# Arguments: istag - imagestream to upload the images into (compulsory) +# Arguments: check_function - command to be run to check functionality of created services (compulsory) +# Arguments: oc_args - arguments to use during oc new-app (compulsory) +ct_os_test_image_update() { + local image_name=$1; shift + local old_image=$1; shift + local istag=$1; shift + local check_function=$1; shift + local service_name=${image_name##*/} + local ip="" check_command_exp="" + + echo "Running image update test for: $image_name" + # shellcheck disable=SC2119 + ct_os_new_project + + # Get current image from repository and create an imagestream + docker pull "$old_image:latest" 2>/dev/null + ct_os_upload_image "$old_image" "$istag" + + # Setup example application with curent image + oc new-app "$@" --name "$service_name" + ct_os_wait_pod_ready "$service_name" 60 + + # Check application output + ip=$(ct_os_get_service_ip "$service_name") + check_command_exp=${check_function///$ip} + ct_assert_cmd_success "$check_command_exp" + + # Tag built image into the imagestream and wait for rebuild + ct_os_upload_image "$image_name" "$istag" + ct_os_wait_pod_ready "${service_name}-2" 60 + + # Check application output + ip=$(ct_os_get_service_ip "$service_name") + check_command_exp=${check_function///$ip} + ct_assert_cmd_success "$check_command_exp" + + # shellcheck disable=SC2119 + ct_os_delete_project +} + +# ct_os_deploy_cmd_image IMAGE_NAME +# -------------------- +# Runs a special command pod, a pod that does nothing, but includes utilities for testing. +# A typical usage is a mysql pod that includes mysql commandline, that we need for testing. +# Running commands inside this command pod is done via ct_os_cmd_image_run function. +# The pod is not run again if already running. +# Arguments: image_name - image to be used as a command pod +function ct_os_deploy_cmd_image() { + local image_name=${1} + oc get pod command-app &>/dev/null && echo "command POD already running" && return 0 + echo "command POD not running yet, will start one called command-app" + oc create -f - <" + local sleep_time=3 + local attempt=1 + local result=1 + local status + local response_code + local response_file + local util_image_name='python:3.6' + + response_file=$(mktemp /tmp/ct_test_response_XXXXXX) + ct_os_deploy_cmd_image "${util_image_name}" + + while [ "${attempt}" -le "${max_attempts}" ]; do + ct_os_cmd_image_run "curl --connect-timeout 10 -s -w '%{http_code}' '${url}'" >"${response_file}" && status=0 || status=1 + if [ "${status}" -eq 0 ]; then + response_code=$(tail -c 3 "${response_file}") + if [ "${response_code}" -eq "${expected_code}" ]; then + result=0 + fi + grep -qP -e "${body_regexp}" "${response_file}" || result=1; + # Some services return 40x code until they are ready, so let's give them + # some chance and not end with failure right away + # Do not wait if we already have expected outcome though + if [ "${result}" -eq 0 ] || [ "${attempt}" -gt "${ignore_error_attempts}" ] || [ "${attempt}" -eq "${max_attempts}" ] ; then + break + fi + fi + attempt=$(( attempt + 1 )) + sleep "${sleep_time}" + done + rm -f "${response_file}" + return "${result}" +} + +# ct_os_get_image_from_pod +# ------------------------ +# Print image identifier from an existing pod to stdout +# Argument: pod_prefix - prefix or full name of the pod to get image from +ct_os_get_image_from_pod() { + local pod_prefix=$1 ; shift + local pod_name + pod_name=$(ct_os_get_pod_name "$pod_prefix") + oc get "po/${pod_name}" -o yaml | sed -ne 's/^\s*image:\s*\(.*\)\s*$/\1/ p' | head -1 +} + +# ct_os_check_cmd_internal +# ---------------- +# Runs a specified command, checks exit code and compares the output with expected regexp. +# That all is done inside an image in the cluster, so the function is used +# typically in clusters that are not accessible outside. +# The check is repeated until timeout. +# Argument: util_image_name - name of the image in the cluster that is used for running the cmd +# Argument: service_name - kubernetes' service name to work with (IP address is taken from this one) +# Argument: check_command - command that is run within the util_image_name container +# Argument: expected_content_match - regexp that must be in the output (use .* to ignore check) +# Argument: timeout - number of seconds to wait till the check succeeds +function ct_os_check_cmd_internal() { + local util_image_name=$1 ; shift + local service_name=$1 ; shift + local check_command=$1 ; shift + local expected_content_match=${1:-.*} ; shift + local timeout=${1:-60} ; shift || : + + : " Service ${service_name} check ..." + + local output + local ret + local ip + local check_command_exp + + ip=$(ct_os_get_service_ip "${service_name}") + # shellcheck disable=SC2001 + check_command_exp=$(echo "$check_command" | sed -e "s//$ip/g") + + ct_os_deploy_cmd_image "$(ct_os_get_image_from_pod "${util_image_name##*/}" | head -n 1)" + SECONDS=0 + + echo -n "Waiting for ${service_name} service becoming ready ..." + while true ; do + output=$(ct_os_cmd_image_run "$check_command_exp") + ret=$? + echo "${output}" | grep -qe "${expected_content_match}" || ret=1 + if [ ${ret} -eq 0 ] ; then + echo " PASS" + return 0 + fi + echo -n "." + [ ${SECONDS} -gt "${timeout}" ] && break + sleep 3 + done + echo " FAIL" + return 1 +} + +# vim: set tabstop=2:shiftwidth=2:expandtab: diff --git a/test/test-lib-postgresql.sh b/test/test-lib-postgresql.sh new file mode 100644 index 0000000..711a865 --- /dev/null +++ b/test/test-lib-postgresql.sh @@ -0,0 +1,31 @@ +#!/bin/bash +# +# Functions for tests for the PostgreSQL image in OpenShift. +# +# IMAGE_NAME specifies a name of the candidate image used for testing. +# The image has to be available before this script is executed. +# + +THISDIR=$(dirname ${BASH_SOURCE[0]}) + +source ${THISDIR}/test-lib.sh +source ${THISDIR}/test-lib-openshift.sh + +function test_postgresql_integration() { + local image_name=$1 + local VERSION=$2 + local import_image=$3 + local service_name=${import_image##*/} + ct_os_template_exists postgresql-ephemeral && t=postgresql-ephemeral || t=postgresql-persistent + ct_os_test_template_app_func "${image_name}" \ + "${t}" \ + "${service_name}" \ + "ct_os_check_cmd_internal '${import_image}' '${service_name}' 'PGPASSWORD=testp pg_isready -t 15 -h -U testu -d testdb' 'accepting connections' 120" \ + "-p POSTGRESQL_VERSION=${VERSION} \ + -p DATABASE_SERVICE_NAME="${service_name}-testing" \ + -p POSTGRESQL_USER=testu \ + -p POSTGRESQL_PASSWORD=testp \ + -p POSTGRESQL_DATABASE=testdb" "" "${import_image}" +} + +# vim: set tabstop=2:shiftwidth=2:expandtab: diff --git a/test/test-lib.sh b/test/test-lib.sh new file mode 100644 index 0000000..b959c34 --- /dev/null +++ b/test/test-lib.sh @@ -0,0 +1,653 @@ +# shellcheck shell=bash +# +# Test a container image. +# +# Always use sourced from a specific container testfile +# +# reguires definition of CID_FILE_DIR +# CID_FILE_DIR=$(mktemp --suffix=_test_cidfiles -d) +# reguires definition of TEST_LIST +# TEST_LIST="\ +# ctest_container_creation +# ctest_doc_content" + +# Container CI tests +# abbreviated as "ct" + +# may be redefined in the specific container testfile +EXPECTED_EXIT_CODE=0 + +# ct_cleanup +# -------------------- +# Cleans up containers used during tests. Stops and removes all containers +# referenced by cid_files in CID_FILE_DIR. Dumps logs if a container exited +# unexpectedly. Removes the cid_files and CID_FILE_DIR as well. +# Uses: $CID_FILE_DIR - path to directory containing cid_files +# Uses: $EXPECTED_EXIT_CODE - expected container exit code +function ct_cleanup() { + for cid_file in "$CID_FILE_DIR"/* ; do + local container + container=$(cat "$cid_file") + + : "Stopping and removing container $container..." + docker stop "$container" + exit_status=$(docker inspect -f '{{.State.ExitCode}}' "$container") + if [ "$exit_status" != "$EXPECTED_EXIT_CODE" ]; then + : "Dumping logs for $container" + docker logs "$container" + fi + docker rm -v "$container" + rm "$cid_file" + done + rmdir "$CID_FILE_DIR" + : "Done." +} + +# ct_enable_cleanup +# -------------------- +# Enables automatic container cleanup after tests. +function ct_enable_cleanup() { + trap ct_cleanup EXIT SIGINT +} + +# ct_get_cid [name] +# -------------------- +# Prints container id from cid_file based on the name of the file. +# Argument: name - name of cid_file where the container id will be stored +# Uses: $CID_FILE_DIR - path to directory containing cid_files +function ct_get_cid() { + local name="$1" ; shift || return 1 + cat "$CID_FILE_DIR/$name" +} + +# ct_get_cip [id] +# -------------------- +# Prints container ip address based on the container id. +# Argument: id - container id +function ct_get_cip() { + local id="$1" ; shift + docker inspect --format='{{.NetworkSettings.IPAddress}}' "$(ct_get_cid "$id")" +} + +# ct_wait_for_cid [cid_file] +# -------------------- +# Holds the execution until the cid_file is created. Usually run after container +# creation. +# Argument: cid_file - name of the cid_file that should be created +function ct_wait_for_cid() { + local cid_file=$1 + local max_attempts=10 + local sleep_time=1 + local attempt=1 + local result=1 + while [ $attempt -le $max_attempts ]; do + [ -f "$cid_file" ] && [ -s "$cid_file" ] && return 0 + : "Waiting for container start..." + attempt=$(( attempt + 1 )) + sleep $sleep_time + done + return 1 +} + +# ct_assert_container_creation_fails [container_args] +# -------------------- +# The invocation of docker run should fail based on invalid container_args +# passed to the function. Returns 0 when container fails to start properly. +# Argument: container_args - all arguments are passed directly to dokcer run +# Uses: $CID_FILE_DIR - path to directory containing cid_files +function ct_assert_container_creation_fails() { + local ret=0 + local max_attempts=10 + local attempt=1 + local cid_file=assert + set +e + local old_container_args="${CONTAINER_ARGS-}" + # we really work with CONTAINER_ARGS as with a string + # shellcheck disable=SC2124 + CONTAINER_ARGS="$@" + if ct_create_container "$cid_file" ; then + local cid + cid=$(ct_get_cid "$cid_file") + + while [ "$(docker inspect -f '{{.State.Running}}' "$cid")" == "true" ] ; do + sleep 2 + attempt=$(( attempt + 1 )) + if [ "$attempt" -gt "$max_attempts" ]; then + docker stop "$cid" + ret=1 + break + fi + done + exit_status=$(docker inspect -f '{{.State.ExitCode}}' "$cid") + if [ "$exit_status" == "0" ]; then + ret=1 + fi + docker rm -v "$cid" + rm "$CID_FILE_DIR/$cid_file" + fi + [ -n "$old_container_args" ] && CONTAINER_ARGS="$old_container_args" + set -e + return "$ret" +} + +# ct_create_container [name, command] +# -------------------- +# Creates a container using the IMAGE_NAME and CONTAINER_ARGS variables. Also +# stores the container id to a cid_file located in the CID_FILE_DIR, and waits +# for the creation of the file. +# Argument: name - name of cid_file where the container id will be stored +# Argument: command - optional command to be executed in the container +# Uses: $CID_FILE_DIR - path to directory containing cid_files +# Uses: $CONTAINER_ARGS - optional arguments passed directly to docker run +# Uses: $IMAGE_NAME - name of the image being tested +function ct_create_container() { + local cid_file="$CID_FILE_DIR/$1" ; shift + # create container with a cidfile in a directory for cleanup + # shellcheck disable=SC2086 + docker run --cidfile="$cid_file" -d ${CONTAINER_ARGS:-} "$IMAGE_NAME" "$@" + ct_wait_for_cid "$cid_file" || return 1 + : "Created container $(cat "$cid_file")" +} + +# ct_scl_usage_old [name, command, expected] +# -------------------- +# Tests three ways of running the SCL, by looking for an expected string +# in the output of the command +# Argument: name - name of cid_file where the container id will be stored +# Argument: command - executed inside the container +# Argument: expected - string that is expected to be in the command output +# Uses: $CID_FILE_DIR - path to directory containing cid_files +# Uses: $IMAGE_NAME - name of the image being tested +function ct_scl_usage_old() { + local name="$1" + local command="$2" + local expected="$3" + local out="" + : " Testing the image SCL enable" + out=$(docker run --rm "${IMAGE_NAME}" /bin/bash -c "${command}") + if ! echo "${out}" | grep -q "${expected}"; then + echo "ERROR[/bin/bash -c \"${command}\"] Expected '${expected}', got '${out}'" >&2 + return 1 + fi + out=$(docker exec "$(ct_get_cid "$name")" /bin/bash -c "${command}" 2>&1) + if ! echo "${out}" | grep -q "${expected}"; then + echo "ERROR[exec /bin/bash -c \"${command}\"] Expected '${expected}', got '${out}'" >&2 + return 1 + fi + out=$(docker exec "$(ct_get_cid "$name")" /bin/sh -ic "${command}" 2>&1) + if ! echo "${out}" | grep -q "${expected}"; then + echo "ERROR[exec /bin/sh -ic \"${command}\"] Expected '${expected}', got '${out}'" >&2 + return 1 + fi +} + +# ct_doc_content_old [strings] +# -------------------- +# Looks for occurence of stirngs in the documentation files and checks +# the format of the files. Files examined: help.1 +# Argument: strings - strings expected to appear in the documentation +# Uses: $IMAGE_NAME - name of the image being tested +function ct_doc_content_old() { + local tmpdir + tmpdir=$(mktemp -d) + local f + : " Testing documentation in the container image" + # Extract the help files from the container + # shellcheck disable=SC2043 + for f in help.1 ; do + docker run --rm "${IMAGE_NAME}" /bin/bash -c "cat /${f}" >"${tmpdir}/$(basename "${f}")" + # Check whether the files contain some important information + for term in "$@" ; do + if ! grep -F -q -e "${term}" "${tmpdir}/$(basename "${f}")" ; then + echo "ERROR: File /${f} does not include '${term}'." >&2 + return 1 + fi + done + # Check whether the files use the correct format + for term in TH PP SH ; do + if ! grep -q "^\.${term}" "${tmpdir}/help.1" ; then + echo "ERROR: /help.1 is probably not in troff or groff format, since '${term}' is missing." >&2 + return 1 + fi + done + done + : " Success!" +} + +# full_ca_file_path +# Return string for full path to CA file +function full_ca_file_path() +{ + echo "/etc/pki/ca-trust/source/anchors/RH-IT-Root-CA.crt" +} +# ct_mount_ca_file +# ------------------ +# Check if /etc/pki/certs/RH-IT-Root-CA.crt file exists +# return mount string for containers or empty string +function ct_mount_ca_file() +{ + # mount CA file only if NPM_REGISTRY variable is present. + local mount_parameter="" + if [ -n "$NPM_REGISTRY" ] && [ -f "$(full_ca_file_path)" ]; then + mount_parameter="-v $(full_ca_file_path):$(full_ca_file_path):Z" + fi + echo "$mount_parameter" +} + +# ct_build_s2i_npm_variables URL_TO_NPM_JS_SERVER +# ------------------------------------------ +# Function returns -e NPM_MIRROR and -v MOUNT_POINT_FOR_CAFILE +# or empty string +function ct_build_s2i_npm_variables() +{ + npm_variables="" + if [ -n "$NPM_REGISTRY" ] && [ -f "$(full_ca_file_path)" ]; then + npm_variables="-e NPM_MIRROR=$NPM_REGISTRY $(ct_mount_ca_file)" + fi + echo "$npm_variables" +} + +# ct_npm_works +# -------------------- +# Checks existance of the npm tool and runs it. +function ct_npm_works() { + local tmpdir + tmpdir=$(mktemp -d) + : " Testing npm in the container image" + local cid_file="${tmpdir}/cid" + if ! docker run --rm "${IMAGE_NAME}" /bin/bash -c "npm --version" >"${tmpdir}/version" ; then + echo "ERROR: 'npm --version' does not work inside the image ${IMAGE_NAME}." >&2 + return 1 + fi + + # shellcheck disable=SC2046 + docker run -d $(ct_mount_ca_file) --rm --cidfile="$cid_file" "${IMAGE_NAME}-testapp" + + # Wait for the container to write it's CID file + ct_wait_for_cid "$cid_file" || return 1 + + if ! docker exec "$(cat "$cid_file")" /bin/bash -c "npm --verbose install jquery && test -f node_modules/jquery/src/jquery.js" >"${tmpdir}/jquery" 2>&1 ; then + echo "ERROR: npm could not install jquery inside the image ${IMAGE_NAME}." >&2 + return 1 + fi + + if [ -n "$NPM_REGISTRY" ] && [ -f "$(full_ca_file_path)" ]; then + if ! grep -qo "$NPM_REGISTRY" "${tmpdir}/jquery"; then + echo "ERROR: Internal repository is NOT set. Even it is requested." + return 1 + fi + fi + + if [ -f "$cid_file" ]; then + docker stop "$(cat "$cid_file")" + rm "$cid_file" + fi + : " Success!" +} + +# ct_path_append PATH_VARNAME DIRECTORY +# ------------------------------------- +# Append DIRECTORY to VARIABLE of name PATH_VARNAME, the VARIABLE must consist +# of colon-separated list of directories. +ct_path_append () +{ + if eval "test -n \"\${$1-}\""; then + eval "$1=\$2:\$$1" + else + eval "$1=\$2" + fi +} + + +# ct_path_foreach PATH ACTION [ARGS ...] +# -------------------------------------- +# For each DIR in PATH execute ACTION (path is colon separated list of +# directories). The particular calls to ACTION will look like +# '$ ACTION directory [ARGS ...]' +ct_path_foreach () +{ + local dir dirlist action save_IFS + save_IFS=$IFS + IFS=: + dirlist=$1 + action=$2 + shift 2 + for dir in $dirlist; do "$action" "$dir" "$@" ; done + IFS=$save_IFS +} + + +# ct_run_test_list +# -------------------- +# Execute the tests specified by TEST_LIST +# Uses: $TEST_LIST - list of test names +function ct_run_test_list() { + for test_case in $TEST_LIST; do + : "Running test $test_case" + # shellcheck source=/dev/null + [ -f "test/$test_case" ] && source "test/$test_case" + # shellcheck source=/dev/null + [ -f "../test/$test_case" ] && source "../test/$test_case" + $test_case + done; +} + +# ct_gen_self_signed_cert_pem +# --------------------------- +# Generates a self-signed PEM certificate pair into specified directory. +# Argument: output_dir - output directory path +# Argument: base_name - base name of the certificate files +# Resulted files will be those: +# /-cert-selfsigned.pem -- public PEM cert +# /-key.pem -- PEM private key +ct_gen_self_signed_cert_pem() { + local output_dir=$1 ; shift + local base_name=$1 ; shift + mkdir -p "${output_dir}" + openssl req -newkey rsa:2048 -nodes -keyout "${output_dir}"/"${base_name}"-key.pem -subj '/C=GB/ST=Berkshire/L=Newbury/O=My Server Company' > "${base_name}"-req.pem + openssl req -new -x509 -nodes -key "${output_dir}"/"${base_name}"-key.pem -batch > "${output_dir}"/"${base_name}"-cert-selfsigned.pem +} + +# ct_obtain_input FILE|DIR|URL +# -------------------- +# Either copies a file or a directory to a tmp location for local copies, or +# downloads the file from remote location. +# Resulted file path is printed, so it can be later used by calling function. +# Arguments: input - local file, directory or remote URL +function ct_obtain_input() { + local input=$1 + local extension="${input##*.}" + + # Try to use same extension for the temporary file if possible + [[ "${extension}" =~ ^[a-z0-9]*$ ]] && extension=".${extension}" || extension="" + + local output + output=$(mktemp "/var/tmp/test-input-XXXXXX$extension") + if [ -f "${input}" ] ; then + cp -f "${input}" "${output}" + elif [ -d "${input}" ] ; then + rm -f "${output}" + cp -r -LH "${input}" "${output}" + elif echo "${input}" | grep -qe '^http\(s\)\?://' ; then + curl "${input}" > "${output}" + else + echo "ERROR: file type not known: ${input}" >&2 + return 1 + fi + echo "${output}" +} + +# ct_test_response +# ---------------- +# Perform GET request to the application container, checks output with +# a reg-exp and HTTP response code. +# Argument: url - request URL path +# Argument: expected_code - expected HTTP response code +# Argument: body_regexp - PCRE regular expression that must match the response body +# Argument: max_attempts - Optional number of attempts (default: 20), three seconds sleep between +# Argument: ignore_error_attempts - Optional number of attempts when we ignore error output (default: 10) +ct_test_response() { + local url="$1" + local expected_code="$2" + local body_regexp="$3" + local max_attempts=${4:-20} + local ignore_error_attempts=${5:-10} + + : " Testing the HTTP(S) response for <${url}>" + local sleep_time=3 + local attempt=1 + local result=1 + local status + local response_code + local response_file + response_file=$(mktemp /tmp/ct_test_response_XXXXXX) + while [ "${attempt}" -le "${max_attempts}" ]; do + curl --connect-timeout 10 -s -w '%{http_code}' "${url}" >"${response_file}" && status=0 || status=1 + if [ "${status}" -eq 0 ]; then + response_code=$(tail -c 3 "${response_file}") + if [ "${response_code}" -eq "${expected_code}" ]; then + result=0 + fi + grep -qP -e "${body_regexp}" "${response_file}" || result=1; + # Some services return 40x code until they are ready, so let's give them + # some chance and not end with failure right away + # Do not wait if we already have expected outcome though + if [ "${result}" -eq 0 ] || [ "${attempt}" -gt "${ignore_error_attempts}" ] || [ "${attempt}" -eq "${max_attempts}" ] ; then + break + fi + fi + attempt=$(( attempt + 1 )) + sleep "${sleep_time}" + done + rm -f "${response_file}" + return "${result}" +} + +# ct_registry_from_os OS +# ---------------- +# Transform operating system string [os] into registry url +# Argument: OS - string containing the os version +ct_registry_from_os() { + local registry="" + case $1 in + rhel*) + registry=registry.redhat.io + ;; + *) + registry=docker.io + ;; + esac + echo "$registry" +} + + # ct_get_public_image_name OS BASE_IMAGE_NAME VERSION +# ---------------- +# Transform the arguments into public image name +# Argument: OS - string containing the os version +# Argument: BASE_IMAGE_NAME - string containing the base name of the image as defined in the Makefile +# Argument: VERSION - string containing the version of the image as defined in the Makefile +ct_get_public_image_name() { + local os=$1; shift + local base_image_name=$1; shift + local version=$1; shift + + local public_image_name + local registry + + registry=$(ct_registry_from_os "$os") + if [ "x$os" == "xrhel7" ]; then + public_image_name=$registry/rhscl/$base_image_name-${version//./}-rhel7 + elif [ "x$os" == "xrhel8" ]; then + public_image_name=$registry/rhel8/$base_image_name-${version//./} + elif [ "x$os" == "xcentos7" ]; then + public_image_name=$registry/centos/$base_image_name-${version//./}-centos7 + fi + + echo "$public_image_name" +} + +# ct_assert_cmd_success CMD +# ---------------- +# Evaluates [cmd] and fails if it does not succeed. +# Argument: CMD - Command to be run +function ct_assert_cmd_success() { + echo "Checking '$*' for success ..." + if ! eval "$@" &>/dev/null; then + echo " FAIL" + return 1 + fi + echo " PASS" + return 0 +} + +# ct_assert_cmd_failure CMD +# ---------------- +# Evaluates [cmd] and fails if it succeeds. +# Argument: CMD - Command to be run +function ct_assert_cmd_failure() { + echo "Checking '$*' for failure ..." + if eval "$@" &>/dev/null; then + echo " FAIL" + return 1 + fi + echo " PASS" + return 0 +} + + +# ct_random_string [LENGTH=10] +# ---------------------------- +# Generate pseudorandom alphanumeric string of LENGTH bytes, the +# default length is 10. The string is printed on stdout. +ct_random_string() +( + export LC_ALL=C + dd if=/dev/urandom count=1 bs=10k 2>/dev/null \ + | tr -dc 'a-z0-9' \ + | fold -w "${1-10}" \ + | head -n 1 +) + +# ct_s2i_usage IMG_NAME [S2I_ARGS] +# ---------------------------- +# Create a container and run the usage script inside +# Argument: IMG_NAME - name of the image to be used for the container run +# Argument: S2I_ARGS - Additional list of source-to-image arguments, currently unused. +ct_s2i_usage() +{ + local img_name=$1; shift + local s2i_args="$*"; + local usage_command="/usr/libexec/s2i/usage" + docker run --rm "$img_name" bash -c "$usage_command" +} + +# ct_s2i_build_as_df APP_PATH SRC_IMAGE DST_IMAGE [S2I_ARGS] +# ---------------------------- +# Create a new s2i app image from local sources in a similar way as source-to-image would have used. +# Argument: APP_PATH - local path to the app sources to be used in the test +# Argument: SRC_IMAGE - image to be used as a base for the s2i build +# Argument: DST_IMAGE - image name to be used during the tagging of the s2i build result +# Argument: S2I_ARGS - Additional list of source-to-image arguments. +# Only used to check for pull-policy=never and environment variable definitions. +ct_s2i_build_as_df() +{ + local app_path=$1; shift + local src_image=$1; shift + local dst_image=$1; shift + local s2i_args="$*"; + local local_app=upload/src/ + local local_scripts=upload/scripts/ + local user_id= + local df_name= + local tmpdir= + local incremental=false + local mount_options="" + + # Run the entire thing inside a subshell so that we do not leak shell options outside of the function + ( + # Error out if any part of the build fails + set -e + + # Use /tmp to not pollute cwd + tmpdir=$(mktemp -d) + df_name=$(mktemp -p "$tmpdir" Dockerfile.XXXX) + cd "$tmpdir" + # Check if the image is available locally and try to pull it if it is not + docker images "$src_image" &>/dev/null || echo "$s2i_args" | grep -q "pull-policy=never" || docker pull "$src_image" + user=$(docker inspect -f "{{.Config.User}}" "$src_image") + # Default to root if no user is set by the image + user=${user:-0} + # run the user through the image in case it is non-numeric or does not exist + # NOTE: The '-eq' test is used to check if $user is numeric as it will fail if $user is not an integer + if ! [ "$user" -eq "$user" ] 2>/dev/null && ! user_id=$(docker run --rm "$src_image" bash -c "id -u $user 2>/dev/null"); then + echo "ERROR: id of user $user not found inside image $src_image." + echo "Terminating s2i build." + return 1 + else + user_id=${user_id:-$user} + fi + echo "$s2i_args" | grep -q "\-\-incremental" && incremental=true + if $incremental; then + inc_tmp=$(mktemp -d --tmpdir incremental.XXXX) + setfacl -m "u:$user_id:rwx" "$inc_tmp" + # Check if the image exists, build should fail (for testing use case) if it does not + docker images "$dst_image" &>/dev/null || (echo "Image $dst_image not found."; false) + # Run the original image with a mounted in volume and get the artifacts out of it + cmd="if [ -s /usr/libexec/s2i/save-artifacts ]; then /usr/libexec/s2i/save-artifacts > \"$inc_tmp/artifacts.tar\"; else touch \"$inc_tmp/artifacts.tar\"; fi" + docker run --rm -v "$inc_tmp:$inc_tmp:Z" "$dst_image" bash -c "$cmd" + # Move the created content into the $tmpdir for the build to pick it up + mv "$inc_tmp/artifacts.tar" "$tmpdir/" + fi + # Strip file:// from APP_PATH and copy its contents into current context + mkdir -p "$local_app" + cp -r "${app_path/file:\/\//}/." "$local_app" + [ -d "$local_app/.s2i/bin/" ] && mv "$local_app/.s2i/bin" "$local_scripts" + # Create a Dockerfile named df_name and fill it with proper content + #FIXME: Some commands could be combined into a single layer but not sure if worth the trouble for testing purposes + cat <"$df_name" +FROM $src_image +LABEL "io.openshift.s2i.build.image"="$src_image" \\ + "io.openshift.s2i.build.source-location"="$app_path" +USER root +COPY $local_app /tmp/src +EOF + [ -d "$local_scripts" ] && echo "COPY $local_scripts /tmp/scripts" >> "$df_name" && + echo "RUN chown -R $user_id:0 /tmp/scripts" >>"$df_name" + echo "RUN chown -R $user_id:0 /tmp/src" >>"$df_name" + # Check for custom environment variables inside .s2i/ folder + if [ -e "$local_app/.s2i/environment" ]; then + # Remove any comments and add the contents as ENV commands to the Dockerfile + sed '/^\s*#.*$/d' "$local_app/.s2i/environment" | while read -r line; do + echo "ENV $line" >>"$df_name" + done + fi + # Filter out env var definitions from $s2i_args and create Dockerfile ENV commands out of them + echo "$s2i_args" | grep -o -e '\(-e\|--env\)[[:space:]=]\S*=\S*' | sed -e 's/-e /ENV /' -e 's/--env[ =]/ENV /' >>"$df_name" + # Check if CA autority is present on host and add it into Dockerfile + [ -f "$(full_ca_file_path)" ] && echo "RUN cd /etc/pki/ca-trust/source/anchors && update-ca-trust extract" >>"$df_name" + + # Add in artifacts if doing an incremental build + if $incremental; then + { echo "RUN mkdir /tmp/artifacts" + echo "ADD artifacts.tar /tmp/artifacts" + echo "RUN chown -R $user_id:0 /tmp/artifacts" ; } >>"$df_name" + fi + + echo "USER $user_id" >>"$df_name" + # If exists, run the custom assemble script, else default to /usr/libexec/s2i/assemble + if [ -x "$local_scripts/assemble" ]; then + echo "RUN /tmp/scripts/assemble" >>"$df_name" + else + echo "RUN /usr/libexec/s2i/assemble" >>"$df_name" + fi + # If exists, set the custom run script as CMD, else default to /usr/libexec/s2i/run + if [ -x "$local_scripts/run" ]; then + echo "CMD /tmp/scripts/run" >>"$df_name" + else + echo "CMD /usr/libexec/s2i/run" >>"$df_name" + fi + + # Check if -v parameter is present in s2i_args and add it into docker build command + mount_options=$(echo "$s2i_args" | grep -o -e '\(-v\)[[:space:]]\.*\S*' || true) + + # Run the build and tag the result + # shellcheck disable=SC2086 + docker build $mount_options -f "$df_name" --no-cache=true -t "$dst_image" . + ) +} + +# ct_check_image_availability PUBLIC_IMAGE_NAME +# ---------------------------- +# Pull an image from the public repositories to see if the image is already available. +# Argument: PUBLIC_IMAGE_NAME - string containing the public name of the image to pull +ct_check_image_availability() { + local public_image_name=$1; + + # Try pulling the image to see if it is accessible + if ! docker pull "$public_image_name" &>/dev/null; then + echo "$public_image_name could not be downloaded via 'docker'" + return 1 + fi +} + +# vim: set tabstop=2:shiftwidth=2:expandtab: