diff --git a/.osbs-repo-config b/.osbs-repo-config new file mode 100644 index 0000000..d2914e4 --- /dev/null +++ b/.osbs-repo-config @@ -0,0 +1,3 @@ +[autorebuild] +enabled = true + diff --git a/8.0 b/8.0 new file mode 120000 index 0000000..945c9b4 --- /dev/null +++ b/8.0 @@ -0,0 +1 @@ +. \ No newline at end of file diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..3363d8a --- /dev/null +++ b/Dockerfile @@ -0,0 +1,71 @@ +FROM rhel8/s2i-core:1 + +# MySQL image for OpenShift. +# +# Volumes: +# * /var/lib/mysql/data - Datastore for MySQL +# Environment: +# * $MYSQL_USER - Database user name +# * $MYSQL_PASSWORD - User's password +# * $MYSQL_DATABASE - Name of the database to create +# * $MYSQL_ROOT_PASSWORD (Optional) - Password for the 'root' MySQL account + +ENV MYSQL_VERSION=8.0 \ + APP_DATA=/opt/app-root/src \ + HOME=/var/lib/mysql + +ENV SUMMARY="MySQL 8.0 SQL database server" \ + DESCRIPTION="MySQL is a multi-user, multi-threaded SQL database server. The container \ +image provides a containerized packaging of the MySQL mysqld daemon and client application. \ +The mysqld server daemon accepts connections from clients and provides access to content from \ +MySQL databases on behalf of the clients." + +LABEL summary="$SUMMARY" \ + description="$DESCRIPTION" \ + io.k8s.description="$DESCRIPTION" \ + io.k8s.display-name="MySQL 8.0" \ + io.openshift.expose-services="3306:mysql" \ + io.openshift.tags="database,mysql,mysql80,mysql-80" \ + com.redhat.component="rh-mysql57-container" \ + name="rhel8/mysql-80" \ + version="1" \ + usage="docker run -d -e MYSQL_USER=user -e MYSQL_PASSWORD=pass -e MYSQL_DATABASE=db -p 3306:3306 rhel8/mysql-80" \ + maintainer="SoftwareCollections.org " + +EXPOSE 3306 + +# This image must forever use UID 27 for mysql user so our volumes are +# safe in the future. This should *never* change, the last test is there +# to make sure of that. +RUN yum module enable mysql:$MYSQL_VERSION && \ + INSTALL_PKGS="policycoreutils rsync tar gettext hostname bind-utils groff-base shadow-utils mysql-server" && \ + yum --enablerepo rhel-8-server-buildroot-rpms install -y --setopt=tsflags=nodocs $INSTALL_PKGS && \ + rpm -V $INSTALL_PKGS && \ + yum clean all && \ + mkdir -p /var/lib/mysql/data && chown -R mysql /var/lib/mysql && \ + test "$(id mysql)" = "uid=27(mysql) gid=27(mysql) groups=27(mysql)" + +# Get prefix path and path to scripts rather than hard-code them in scripts +ENV CONTAINER_SCRIPTS_PATH=/usr/share/container-scripts/mysql \ + MYSQL_PREFIX=/usr + +COPY 8.0/root-common / +COPY 8.0/s2i-common/bin/ $STI_SCRIPTS_PATH +COPY 8.0/root / + +# this is needed due to issues with squash +# when this directory gets rm'd by the container-setup +# script. +# Also reset permissions of filesystem to default values +RUN rm -rf /etc/my.cnf.d/* && \ + /usr/libexec/container-setup && \ + rpm-file-permissions + +# Not using VOLUME statement since it's not working in OpenShift Online: +# https://github.com/sclorg/httpd-container/issues/30 +# VOLUME ["/var/lib/mysql/data"] + +USER 27 + +ENTRYPOINT ["container-entrypoint"] +CMD ["run-mysqld"] diff --git a/Dockerfile.rhel7 b/Dockerfile.rhel7 new file mode 120000 index 0000000..1d1fe94 --- /dev/null +++ b/Dockerfile.rhel7 @@ -0,0 +1 @@ +Dockerfile \ No newline at end of file diff --git a/README.md b/README.md deleted file mode 100644 index 9a93dc5..0000000 --- a/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# mysql - -The mysql package \ No newline at end of file diff --git a/README.md b/README.md new file mode 120000 index 0000000..cc942f0 --- /dev/null +++ b/README.md @@ -0,0 +1 @@ +root/usr/share/container-scripts/mysql/README.md \ No newline at end of file diff --git a/cccp.yml b/cccp.yml new file mode 100644 index 0000000..f83579e --- /dev/null +++ b/cccp.yml @@ -0,0 +1 @@ +job-id: mysql-57-centos7 diff --git a/container.yaml b/container.yaml new file mode 100644 index 0000000..b18db84 --- /dev/null +++ b/container.yaml @@ -0,0 +1,8 @@ +autorebuild: + from_latest: true + +platforms: + only: + - x86_64 + - ppc64le + - s390x diff --git a/content_sets.yml b/content_sets.yml new file mode 100644 index 0000000..3b36e7d --- /dev/null +++ b/content_sets.yml @@ -0,0 +1,20 @@ +# This is a file defining which content sets are needed to update content in +# this image. Data provided here helps determine which images are vulnerable to +# specific CVEs. Generally you should only need to update this file when: +# 1. You start depending on new product +# 2. You are preparing new product release and your content sets will change +--- +x86_64: +- rhel-7-server-rpms +- rhel-7-server-optional-rpms +- rhel-server-rhscl-7-rpms + +ppc64le: +- rhel-7-for-power-le-rpms +- rhel-7-for-power-le-optional-rpms +- rhel-7-server-for-power-le-rhscl-rpms + +s390x: +- rhel-7-for-system-z-rpms +- rhel-7-for-system-z-optional-rpms +- rhel-7-server-for-system-z-rhscl-rpms diff --git a/help.md b/help.md new file mode 120000 index 0000000..42061c0 --- /dev/null +++ b/help.md @@ -0,0 +1 @@ +README.md \ No newline at end of file diff --git a/root-common/etc/my.cnf b/root-common/etc/my.cnf new file mode 100644 index 0000000..0844075 --- /dev/null +++ b/root-common/etc/my.cnf @@ -0,0 +1,12 @@ +[mysqld] + +# Disabling symbolic-links is recommended to prevent assorted security risks +symbolic-links = 0 + +# http://www.percona.com/blog/2008/05/31/dns-achilles-heel-mysql-installation/ +skip_name_resolve + +# http://www.chriscalender.com/ignoring-the-lostfound-directory-in-your-datadir/ +ignore-db-dir=lost+found + +!includedir /etc/my.cnf.d diff --git a/root-common/usr/bin/cgroup-limits b/root-common/usr/bin/cgroup-limits new file mode 100755 index 0000000..f50bbbb --- /dev/null +++ b/root-common/usr/bin/cgroup-limits @@ -0,0 +1,92 @@ +#!/usr/bin/python3 + +""" +Script for parsing cgroup information + +This script will read some limits from the cgroup system and parse +them, printing out "VARIABLE=VALUE" on each line for every limit that is +successfully read. Output of this script can be directly fed into +bash's export command. Recommended usage from a bash script: + + set -o errexit + export_vars=$(cgroup-limits) ; export $export_vars + +Variables currently supported: + MAX_MEMORY_LIMIT_IN_BYTES + Maximum possible limit MEMORY_LIMIT_IN_BYTES can have. This is + currently constant value of 9223372036854775807. + MEMORY_LIMIT_IN_BYTES + Maximum amount of user memory in bytes. If this value is set + to the same value as MAX_MEMORY_LIMIT_IN_BYTES, it means that + there is no limit set. The value is taken from + /sys/fs/cgroup/memory/memory.limit_in_bytes + NUMBER_OF_CORES + Number of detected CPU cores that can be used. This value is + calculated from /sys/fs/cgroup/cpuset/cpuset.cpus + NO_MEMORY_LIMIT + Set to "true" if MEMORY_LIMIT_IN_BYTES is so high that the caller + can act as if no memory limit was set. Undefined otherwise. +""" + +from __future__ import print_function +import sys + + +def _read_file(path): + try: + with open(path, 'r') as f: + return f.read().strip() + except IOError: + return None + + +def get_memory_limit(): + """ + Read memory limit, in bytes. + """ + + limit = _read_file('/sys/fs/cgroup/memory/memory.limit_in_bytes') + if limit is None or not limit.isdigit(): + print("Warning: Can't detect memory limit from cgroups", + file=sys.stderr) + return None + return int(limit) + + +def get_number_of_cores(): + """ + Read number of CPU cores. + """ + + core_count = 0 + + line = _read_file('/sys/fs/cgroup/cpuset/cpuset.cpus') + if line is None: + print("Warning: Can't detect number of CPU cores from cgroups", + file=sys.stderr) + return None + + for group in line.split(','): + core_ids = list(map(int, group.split('-'))) + if len(core_ids) == 2: + core_count += core_ids[1] - core_ids[0] + 1 + else: + core_count += 1 + + return core_count + + +if __name__ == "__main__": + env_vars = { + "MAX_MEMORY_LIMIT_IN_BYTES": 9223372036854775807, + "MEMORY_LIMIT_IN_BYTES": get_memory_limit(), + "NUMBER_OF_CORES": get_number_of_cores() + } + + env_vars = {k: v for k, v in env_vars.items() if v is not None} + + if env_vars.get("MEMORY_LIMIT_IN_BYTES", 0) >= 92233720368547: + env_vars["NO_MEMORY_LIMIT"] = "true" + + for key, value in env_vars.items(): + print("{0}={1}".format(key, value)) diff --git a/root-common/usr/bin/container-entrypoint b/root-common/usr/bin/container-entrypoint new file mode 100755 index 0000000..9d8ad4d --- /dev/null +++ b/root-common/usr/bin/container-entrypoint @@ -0,0 +1,2 @@ +#!/bin/bash +exec "$@" diff --git a/root-common/usr/bin/mysqld-master b/root-common/usr/bin/mysqld-master new file mode 120000 index 0000000..8a0786e --- /dev/null +++ b/root-common/usr/bin/mysqld-master @@ -0,0 +1 @@ +run-mysqld-master \ No newline at end of file diff --git a/root-common/usr/bin/mysqld-slave b/root-common/usr/bin/mysqld-slave new file mode 120000 index 0000000..dc0f58b --- /dev/null +++ b/root-common/usr/bin/mysqld-slave @@ -0,0 +1 @@ +run-mysqld-slave \ No newline at end of file diff --git a/root-common/usr/bin/run-mysqld b/root-common/usr/bin/run-mysqld new file mode 100755 index 0000000..9482a25 --- /dev/null +++ b/root-common/usr/bin/run-mysqld @@ -0,0 +1,39 @@ +#!/bin/bash + +export_vars=$(cgroup-limits); export $export_vars +source ${CONTAINER_SCRIPTS_PATH}/common.sh +set -eu +if [[ -v DEBUG_IGNORE_SCRIPT_FAILURES ]]; then + set +e +fi + +export_setting_variables + +log_volume_info $MYSQL_DATADIR + +# pre-init files +process_extending_files ${APP_DATA}/mysql-pre-init/ ${CONTAINER_SCRIPTS_PATH}/pre-init/ + +if [ ! -d "$MYSQL_DATADIR/mysql" ]; then + initialize_database "$@" +else + start_local_mysql "$@" +fi + +# set mysql_flags and admin_flagsadmin_flags properly +if [ -z "${MYSQL_ROOT_PASSWORD:-}" || is_allowing_connection_with_empty_password ]; then + mysql_flags="-u root --socket=$MYSQL_LOCAL_SOCKET" +else + mysql_flags="-u root --socket=$MYSQL_LOCAL_SOCKET -p${MYSQL_ROOT_PASSWORD}" +fi +admin_flags="--defaults-file=$MYSQL_DEFAULTS_FILE $mysql_flags" + +# init files +process_extending_files ${APP_DATA}/mysql-init/ ${CONTAINER_SCRIPTS_PATH}/init/ + +# Restart the MySQL server with public IP bindings +shutdown_local_mysql +unset_env_vars +log_volume_info $MYSQL_DATADIR +log_info 'Running final exec -- Only MySQL server logs after this point' +exec ${MYSQL_PREFIX}/libexec/mysqld --defaults-file=$MYSQL_DEFAULTS_FILE "$@" 2>&1 diff --git a/root-common/usr/bin/run-mysqld-master b/root-common/usr/bin/run-mysqld-master new file mode 100755 index 0000000..f8b481e --- /dev/null +++ b/root-common/usr/bin/run-mysqld-master @@ -0,0 +1,49 @@ +#!/bin/bash +# +# This is an entrypoint that runs the MySQL server in the 'master' mode. +# + +export_vars=$(cgroup-limits); export $export_vars +source ${CONTAINER_SCRIPTS_PATH}/common.sh +set -eu +if [[ -v DEBUG_IGNORE_SCRIPT_FAILURES ]]; then + set +e +fi + +export_setting_variables + +log_volume_info $MYSQL_DATADIR + +export MYSQL_RUNNING_AS_MASTER=1 + +# The 'server-id' for master needs to be constant +export MYSQL_SERVER_ID=1 +log_info "The 'master' server-id is ${MYSQL_SERVER_ID}" + +# pre-init files +process_extending_files ${APP_DATA}/mysql-pre-init/ ${CONTAINER_SCRIPTS_PATH}/pre-init/ + +if [ ! -d "$MYSQL_DATADIR/mysql" ]; then + initialize_database "$@" +else + start_local_mysql "$@" +fi + +log_info 'Setting passwords ...' +[ -f ${CONTAINER_SCRIPTS_PATH}/passwd-change.sh ] && source ${CONTAINER_SCRIPTS_PATH}/passwd-change.sh + +# Setup the 'master' replication on the MySQL server +mysql $mysql_flags <&1 diff --git a/root-common/usr/bin/run-mysqld-slave b/root-common/usr/bin/run-mysqld-slave new file mode 100755 index 0000000..d16a86d --- /dev/null +++ b/root-common/usr/bin/run-mysqld-slave @@ -0,0 +1,47 @@ +#!/bin/bash +# +# This is an entrypoint that runs the MySQL server in the 'slave' mode. +# + +export_vars=$(cgroup-limits); export $export_vars +source ${CONTAINER_SCRIPTS_PATH}/common.sh +set -eu +if [[ -v DEBUG_IGNORE_SCRIPT_FAILURES ]]; then + set +e +fi + +export_setting_variables + +log_volume_info $MYSQL_DATADIR + +export MYSQL_RUNNING_AS_SLAVE=1 + +# Generate the unique 'server-id' for this master +export MYSQL_SERVER_ID=$(server_id) +log_info "The 'slave' server-id is ${MYSQL_SERVER_ID}" + +# pre-init files +process_extending_files ${APP_DATA}/mysql-pre-init/ ${CONTAINER_SCRIPTS_PATH}/pre-init/ + +if [ ! -e "${MYSQL_DATADIR}/mysql" ]; then + # Initialize MySQL database and wait for the MySQL master to accept + # connections. + initialize_database "$@" + wait_for_mysql_master + + mysql $mysql_flags <&1 diff --git a/root-common/usr/bin/usage b/root-common/usr/bin/usage new file mode 100755 index 0000000..feafb93 --- /dev/null +++ b/root-common/usr/bin/usage @@ -0,0 +1,4 @@ +#!/bin/bash + +cat /usr/share/container-scripts/mysql/README.md + diff --git a/root-common/usr/libexec/container-setup b/root-common/usr/libexec/container-setup new file mode 100755 index 0000000..6160d4e --- /dev/null +++ b/root-common/usr/libexec/container-setup @@ -0,0 +1,59 @@ +#!/bin/bash + +# This function returns all config files that daemon uses and their path +# includes /opt. It is used to get correct path to the config file. +mysql_get_config_files_scl() { + scl enable ${ENABLED_COLLECTIONS} -- my_print_defaults --help --verbose | \ + grep --after=1 '^Default options' | \ + tail -n 1 | \ + grep -o '[^ ]*opt[^ ]*my.cnf' +} + +# This function picks the main config file that deamon uses and we ship in rpm +mysql_get_correct_config() { + # we use the same config in non-SCL packages, not necessary to guess + [ -z "${ENABLED_COLLECTIONS}" ] && echo -n "/etc/my.cnf" && return + + # from all config files read by daemon, pick the first that exists + for f in `mysql_get_config_files_scl` ; do + [ -f "$f" ] && echo "$f" + done | head -n 1 +} + +export MYSQL_CONFIG_FILE=$(mysql_get_correct_config) + +[ -z "$MYSQL_CONFIG_FILE" ] && echo "MYSQL_CONFIG_FILE is empty" && exit 1 + +unset -f mysql_get_correct_config mysql_get_config_files_scl + +# we provide own config files for the container, so clean what rpm ships here +mkdir -p ${MYSQL_CONFIG_FILE}.d +rm -f ${MYSQL_CONFIG_FILE}.d/* + +# we may add options during service init, so we need to have this dir writable by daemon user +chown -R mysql:0 ${MYSQL_CONFIG_FILE}.d ${MYSQL_CONFIG_FILE} +restorecon -R ${MYSQL_CONFIG_FILE}.d ${MYSQL_CONFIG_FILE} + +# API of the container are standard paths /etc/my.cnf and /etc/my.cnf.d +# we already include own /etc/my.cnf for container, but for cases the +# actually used config file is not on standard path /etc/my.cnf, we +# need to move it to the location daemon expects it and create symlinks +if [ "$MYSQL_CONFIG_FILE" != "/etc/my.cnf" ] ; then + rm -rf /etc/my.cnf.d + mv /etc/my.cnf ${MYSQL_CONFIG_FILE} + ln -s ${MYSQL_CONFIG_FILE} /etc/my.cnf + ln -s ${MYSQL_CONFIG_FILE}.d /etc/my.cnf.d +fi + +# setup directory for data +mkdir -p /var/lib/mysql/data +chown -R mysql:0 /var/lib/mysql +restorecon -R /var/lib/mysql + +# Loosen permission bits for group to avoid problems running container with +# arbitrary UID +# When only specifying user, group is 0, that's why /var/lib/mysql must have +# owner mysql.0; that allows to avoid a+rwx for this dir +/usr/libexec/fix-permissions /var/lib/mysql ${MYSQL_CONFIG_FILE}.d ${APP_DATA}/.. +usermod -a -G root mysql + diff --git a/root-common/usr/libexec/fix-permissions b/root-common/usr/libexec/fix-permissions new file mode 100755 index 0000000..820e718 --- /dev/null +++ b/root-common/usr/libexec/fix-permissions @@ -0,0 +1,6 @@ +#!/bin/sh +# Fix permissions on the given directory to allow group read/write of +# regular files and execute of directories. +find $@ -exec chown mysql:0 {} \; +find $@ -exec chmod g+rw {} \; +find $@ -type d -exec chmod g+x {} + diff --git a/root-common/usr/share/container-scripts/mysql/cnf/40-paas.cnf b/root-common/usr/share/container-scripts/mysql/cnf/40-paas.cnf new file mode 100644 index 0000000..e79f2c5 --- /dev/null +++ b/root-common/usr/share/container-scripts/mysql/cnf/40-paas.cnf @@ -0,0 +1,30 @@ +[mysqld] +# +# Settings configured by the user +# + +# Sets how the table names are stored and compared. Default: 0 +lower_case_table_names = ${MYSQL_LOWER_CASE_TABLE_NAMES} + +# Sets whether queries should be logged +general_log = ${MYSQL_LOG_QUERIES_ENABLED} +general_log_file = ${MYSQL_DATADIR}/mysql-query.log + +# The maximum permitted number of simultaneous client connections. Default: 151 +max_connections = ${MYSQL_MAX_CONNECTIONS} + +# The minimum/maximum lengths of the word to be included in a FULLTEXT index. Default: 4/20 +ft_min_word_len = ${MYSQL_FT_MIN_WORD_LEN} +ft_max_word_len = ${MYSQL_FT_MAX_WORD_LEN} + +# In case the native AIO is broken. Default: 1 +# See http://help.directadmin.com/item.php?id=529 +innodb_use_native_aio = ${MYSQL_AIO} + +[myisamchk] +# The minimum/maximum lengths of the word to be included in a FULLTEXT index. Default: 4/20 +# +# To ensure that myisamchk and the server use the same values for full-text +# parameters, we placed them in both sections. +ft_min_word_len = ${MYSQL_FT_MIN_WORD_LEN} +ft_max_word_len = ${MYSQL_FT_MAX_WORD_LEN} diff --git a/root-common/usr/share/container-scripts/mysql/cnf/50-my-tuning.cnf b/root-common/usr/share/container-scripts/mysql/cnf/50-my-tuning.cnf new file mode 100644 index 0000000..e6b33f4 --- /dev/null +++ b/root-common/usr/share/container-scripts/mysql/cnf/50-my-tuning.cnf @@ -0,0 +1,27 @@ +[mysqld] +key_buffer_size = ${MYSQL_KEY_BUFFER_SIZE} +max_allowed_packet = ${MYSQL_MAX_ALLOWED_PACKET} +table_open_cache = ${MYSQL_TABLE_OPEN_CACHE} +sort_buffer_size = ${MYSQL_SORT_BUFFER_SIZE} +read_buffer_size = ${MYSQL_READ_BUFFER_SIZE} +read_rnd_buffer_size = 256K +net_buffer_length = 2K +thread_stack = 256K +myisam_sort_buffer_size = 2M + +# It is recommended that innodb_buffer_pool_size is configured to 50 to 75 percent of system memory. +innodb_buffer_pool_size = ${MYSQL_INNODB_BUFFER_POOL_SIZE} +# Set .._log_file_size to 25 % of buffer pool size +innodb_log_file_size = ${MYSQL_INNODB_LOG_FILE_SIZE} +innodb_log_buffer_size = ${MYSQL_INNODB_LOG_BUFFER_SIZE} + +[mysqldump] +quick +max_allowed_packet = 16M + +[mysql] +no-auto-rehash + +[myisamchk] +key_buffer_size = 8M +sort_buffer_size = 8M diff --git a/root-common/usr/share/container-scripts/mysql/common.sh b/root-common/usr/share/container-scripts/mysql/common.sh new file mode 100644 index 0000000..e988ff8 --- /dev/null +++ b/root-common/usr/share/container-scripts/mysql/common.sh @@ -0,0 +1,310 @@ +#!/bin/bash + +source ${CONTAINER_SCRIPTS_PATH}/helpers.sh + +# Data directory where MySQL database files live. The data subdirectory is here +# because .bashrc and my.cnf both live in /var/lib/mysql/ and we don't want a +# volume to override it. +export MYSQL_DATADIR=/var/lib/mysql/data + +# Unix local domain socket to connect to MySQL server +export MYSQL_LOCAL_SOCKET=/tmp/mysql.sock + +# Configuration settings. +export MYSQL_DEFAULTS_FILE=${MYSQL_DEFAULTS_FILE:-/etc/my.cnf} + +function export_setting_variables() { + export MYSQL_BINLOG_FORMAT=${MYSQL_BINLOG_FORMAT:-STATEMENT} + export MYSQL_LOWER_CASE_TABLE_NAMES=${MYSQL_LOWER_CASE_TABLE_NAMES:-0} + export MYSQL_LOG_QUERIES_ENABLED=${MYSQL_LOG_QUERIES_ENABLED:-0} + export MYSQL_MAX_CONNECTIONS=${MYSQL_MAX_CONNECTIONS:-151} + export MYSQL_FT_MIN_WORD_LEN=${MYSQL_FT_MIN_WORD_LEN:-4} + export MYSQL_FT_MAX_WORD_LEN=${MYSQL_FT_MAX_WORD_LEN:-20} + export MYSQL_AIO=${MYSQL_AIO:-1} + export MYSQL_MAX_ALLOWED_PACKET=${MYSQL_MAX_ALLOWED_PACKET:-200M} + export MYSQL_TABLE_OPEN_CACHE=${MYSQL_TABLE_OPEN_CACHE:-400} + export MYSQL_SORT_BUFFER_SIZE=${MYSQL_SORT_BUFFER_SIZE:-256K} + + # Export memory limit variables and calculate limits + local export_vars=$(cgroup-limits) && export $export_vars || exit 1 + if [ -n "${NO_MEMORY_LIMIT:-}" -o -z "${MEMORY_LIMIT_IN_BYTES:-}" ]; then + export MYSQL_KEY_BUFFER_SIZE=${MYSQL_KEY_BUFFER_SIZE:-32M} + export MYSQL_READ_BUFFER_SIZE=${MYSQL_READ_BUFFER_SIZE:-8M} + export MYSQL_INNODB_BUFFER_POOL_SIZE=${MYSQL_INNODB_BUFFER_POOL_SIZE:-32M} + export MYSQL_INNODB_LOG_FILE_SIZE=${MYSQL_INNODB_LOG_FILE_SIZE:-8M} + export MYSQL_INNODB_LOG_BUFFER_SIZE=${MYSQL_INNODB_LOG_BUFFER_SIZE:-8M} + else + export MYSQL_KEY_BUFFER_SIZE=${MYSQL_KEY_BUFFER_SIZE:-$((MEMORY_LIMIT_IN_BYTES/1024/1024/10))M} + export MYSQL_READ_BUFFER_SIZE=${MYSQL_READ_BUFFER_SIZE:-$((MEMORY_LIMIT_IN_BYTES/1024/1024/20))M} + export MYSQL_INNODB_BUFFER_POOL_SIZE=${MYSQL_INNODB_BUFFER_POOL_SIZE:-$((MEMORY_LIMIT_IN_BYTES/1024/1024/2))M} + # We are multiplying by 15 first and dividing by 100 later so we get as much + # precision as possible with whole numbers. Result is 15% of memory. + export MYSQL_INNODB_LOG_FILE_SIZE=${MYSQL_INNODB_LOG_FILE_SIZE:-$((MEMORY_LIMIT_IN_BYTES*15/1024/1024/100))M} + export MYSQL_INNODB_LOG_BUFFER_SIZE=${MYSQL_INNODB_LOG_BUFFER_SIZE:-$((MEMORY_LIMIT_IN_BYTES*15/1024/1024/100))M} + fi + export MYSQL_DATADIR_ACTION=${MYSQL_DATADIR_ACTION:-upgrade-warn} +} + +# this stores whether the database was initialized from empty datadir +export MYSQL_DATADIR_FIRST_INIT=false + +# Be paranoid and stricter than we should be. +# https://dev.mysql.com/doc/refman/en/identifiers.html +mysql_identifier_regex='^[a-zA-Z0-9_]+$' +mysql_password_regex='^[a-zA-Z0-9_~!@#$%^&*()-=<>,.?;:|]+$' + +# Variables that are used to connect to local mysql during initialization +mysql_flags="-u root --socket=$MYSQL_LOCAL_SOCKET" +admin_flags="--defaults-file=$MYSQL_DEFAULTS_FILE $mysql_flags" + +# Make sure env variables don't propagate to mysqld process. +function unset_env_vars() { + log_info 'Cleaning up environment variables MYSQL_USER, MYSQL_PASSWORD, MYSQL_DATABASE and MYSQL_ROOT_PASSWORD ...' + unset MYSQL_USER MYSQL_PASSWORD MYSQL_DATABASE MYSQL_ROOT_PASSWORD +} + +# Poll until MySQL responds to our ping. +function wait_for_mysql() { + pid=$1 ; shift + + while true; do + if [ -d "/proc/$pid" ]; then + mysqladmin $admin_flags ping &>/dev/null && log_info "MySQL started successfully" && return 0 + else + return 1 + fi + log_info "Waiting for MySQL to start ..." + sleep 1 + done +} + +# Start local MySQL server with a defaults file +function start_local_mysql() { + log_info 'Starting MySQL server with disabled networking ...' + ${MYSQL_PREFIX}/libexec/mysqld \ + --defaults-file=$MYSQL_DEFAULTS_FILE \ + --skip-networking --socket=$MYSQL_LOCAL_SOCKET "$@" & + mysql_pid=$! + wait_for_mysql $mysql_pid +} + +# Shutdown mysql flushing privileges +function shutdown_local_mysql() { + log_info 'Shutting down MySQL ...' + mysqladmin $admin_flags flush-privileges shutdown +} + +# Initialize the MySQL database (create user accounts and the initial database) +function initialize_database() { + log_info 'Initializing database ...' + if [[ "$MYSQL_VERSION" < "5.7" ]] ; then + # Using --rpm since we need mysql_install_db behaves as in RPM + log_and_run mysql_install_db --rpm --datadir=$MYSQL_DATADIR + else + log_initialization ${MYSQL_PREFIX}/libexec/mysqld --initialize --datadir=$MYSQL_DATADIR --ignore-db-dir=lost+found + fi + + # The '--initialize' option sets an auto generated root password. + mysql_flags="$mysql_flags -p${AUTOGENERATED_ROOT_PASSWORD}" + admin_flags="--defaults-file=$MYSQL_DEFAULTS_FILE $mysql_flags" + + start_local_mysql "$@" + + # The first valid connection after running 'mysql --initialize' must use + # the --connect-expired-password option and set a new root password. + mysql_flags="$mysql_flags --connect-expired-password" + admin_flags="--defaults-file=$MYSQL_DEFAULTS_FILE $mysql_flags" + + # As we have a temporary auto generated root password, the first thing to do is to + # change it. We try first to set it to an empty password, but if this is not allowed + # (due to validate_plugin, for example), we then set it to MYSQL_ROOT_PASSWORD. + if [ -v MYSQL_ROOT_PASSWORD ]; then + log_info "Setting password for MySQL root user ..." + set +e + mysql $mysql_flags -e "ALTER USER 'root'@'localhost' IDENTIFIED BY '';" + RETCODE=$? + set -e + + if [ $RETCODE -eq 0 ]; then + mysql_flags="-u root --socket=$MYSQL_LOCAL_SOCKET" + else + mysql $mysql_flags -e "ALTER USER 'root'@'localhost' IDENTIFIED BY '${MYSQL_ROOT_PASSWORD}';" + mysql_flags="-u root --socket=$MYSQL_LOCAL_SOCKET -p${MYSQL_ROOT_PASSWORD}" + fi + + mysql $mysql_flags </dev/null && log_info "MySQL master is ready" && return 0 + sleep 1 + done +} + +# get_matched_files finds file for image extending +function get_matched_files() { + local custom_dir default_dir + custom_dir="$1" + default_dir="$2" + files_matched="$3" + find "$default_dir" -maxdepth 1 -type f -name "$files_matched" -printf "%f\n" + [ -d "$custom_dir" ] && find "$custom_dir" -maxdepth 1 -type f -name "$files_matched" -printf "%f\n" +} + +# process_extending_files process extending files in $1 and $2 directories +# - source all *.sh files +# (if there are files with same name source only file from $1) +function process_extending_files() { + local custom_dir default_dir + custom_dir=$1 + default_dir=$2 + + while read filename ; do + echo "=> sourcing $filename ..." + # Custom file is prefered + if [ -f $custom_dir/$filename ]; then + source $custom_dir/$filename + else + source $default_dir/$filename + fi + done <<<"$(get_matched_files "$custom_dir" "$default_dir" '*.sh' | sort -u)" +} + +# process extending config files in $1 and $2 directories +# - expand variables in *.cnf and copy the files into /etc/my.cnf.d directory +# (if there are files with same name source only file from $1) +function process_extending_config_files() { + local custom_dir default_dir + custom_dir=$1 + default_dir=$2 + + while read filename ; do + echo "=> sourcing $filename ..." + # Custom file is prefered + if [ -f $custom_dir/$filename ]; then + envsubst < $custom_dir/$filename > /etc/my.cnf.d/$filename + else + envsubst < $default_dir/$filename > /etc/my.cnf.d/$filename + fi + done <<<"$(get_matched_files "$custom_dir" "$default_dir" '*.cnf' | sort -u)" +} + +# Converts string version to the integer format (5.5.33 is converted to 505, +# 10.1.23-MariaDB is converted into 1001, etc. +function version2number() { + local version_major=$(echo "$1" | grep -o -e '^[0-9]*\.[0-9]*') + printf %d%02d ${version_major%%.*} ${version_major##*.} +} + +# Converts the version in format of an integer into major.minor +function number2version() { + local numver=${1} + echo $((numver / 100)).$((numver % 100)) +} + +# Prints version of the mysqld that is currently available (string) +function mysqld_version() { + ${MYSQL_PREFIX}/libexec/mysqld -V | awk '{print $3}' +} + +# Returns version from the daemon in integer format +function mysqld_compat_version() { + version2number $(mysqld_version) +} + +# Returns version from the datadir in the integer format +function get_datadir_version() { + local datadir="$1" + local upgrade_info_file=$(get_mysql_upgrade_info_file "$datadir") + [ -r "$upgrade_info_file" ] || return + local version_text=$(cat "$upgrade_info_file" | head -n 1) + version2number "${version_text}" +} + +# Returns name of the file in the datadir that holds version information about the data +function get_mysql_upgrade_info_file() { + local datadir="$1" + echo "$datadir/mysql_upgrade_info" +} + +# Writes version string of the daemon into mysql_upgrade_info file +# (should be only used when the file is missing and only during limited time; +# once most deployments include this version file, we should leave it on +# scripts to generate the file right after initialization or when upgrading) +function write_mysql_upgrade_info_file() { + local datadir="$1" + local version=$(mysqld_version) + local upgrade_info_file=$(get_mysql_upgrade_info_file "$datadir") + if [ -f "$datadir/mysql_upgrade_info" ] ; then + echo "File ${upgrade_info_file} exists, nothing is done." + else + log_info "Storing version '${version}' information into the data dir '${upgrade_info_file}'" + echo "${version}" > "${upgrade_info_file}" + mysqld_version >"$datadir/mysql_upgrade_info" + fi +} + +# Checks if mysql server is allowing connection for 'root'@'localhost' without password +function is_allowing_connection_with_empty_password() { + set +e + mysql -u root --socket=$MYSQL_LOCAL_SOCKET -e "DO 0;" # NO-OP command, just to test the connection + RETCODE=$? + set -e + return $RETCODE +} diff --git a/root-common/usr/share/container-scripts/mysql/helpers.sh b/root-common/usr/share/container-scripts/mysql/helpers.sh new file mode 100644 index 0000000..c07ddde --- /dev/null +++ b/root-common/usr/share/container-scripts/mysql/helpers.sh @@ -0,0 +1,36 @@ +function log_info { + echo "---> `date +%T` $@" +} + +function log_warn { + echo "---> `date +%T` Warning: $@" +} + +function log_and_run { + log_info "Running $@" + "$@" +} + +function log_initialization { + log_info "Running $@" + AUTOGENERATED_ROOT_PASSWORD="$("$@" 2>&1 | grep "temporary password" | awk '{print $NF}')" +} + +function log_volume_info { + CONTAINER_DEBUG=${CONTAINER_DEBUG:-} + if [[ "${CONTAINER_DEBUG,,}" != "true" ]]; then + return + fi + + log_info "Volume info for $@:" + set +e + log_and_run mount + while [ $# -gt 0 ]; do + log_and_run ls -alZ $1 + shift + done + set -e + if [[ -v DEBUG_IGNORE_SCRIPT_FAILURES ]]; then + set +e + fi +} diff --git a/root-common/usr/share/container-scripts/mysql/init/40-datadir-action.sh b/root-common/usr/share/container-scripts/mysql/init/40-datadir-action.sh new file mode 100644 index 0000000..2bc8095 --- /dev/null +++ b/root-common/usr/share/container-scripts/mysql/init/40-datadir-action.sh @@ -0,0 +1,111 @@ +upstream_upgrade_info() { + echo -n "For upstream documentation about upgrading, see: " + case ${MYSQL_VERSION} in + 10.0) echo "https://mariadb.com/kb/en/library/upgrading-from-mariadb-55-to-mariadb-100/" ;; + 10.1) echo "https://mariadb.com/kb/en/library/upgrading-from-mariadb-100-to-mariadb-101/" ;; + 10.2) echo "https://mariadb.com/kb/en/library/upgrading-from-mariadb-101-to-mariadb-102/" ;; + 5.6) echo "https://dev.mysql.com/doc/refman/5.6/en/upgrading-from-previous-series.html" ;; + 5.7) echo "https://dev.mysql.com/doc/refman/5.7/en/upgrading-from-previous-series.html" ;; + *) echo "Non expected version '${MYSQL_VERSION}'" ; return 1 ;; + esac +} + +check_datadir_version() { + local datadir="$1" + local datadir_version=$(get_datadir_version "$datadir") + local mysqld_version=$(mysqld_compat_version) + local datadir_version_dot=$(number2version "${datadir_version}") + local mysqld_version_dot=$(number2version "${mysqld_version}") + + for datadir_action in ${MYSQL_DATADIR_ACTION//,/ } ; do + log_info "Running datadir action: ${datadir_action}" + case ${datadir_action} in + upgrade-auto|upgrade-warn) + if [ -z "${datadir_version}" ] || [ "${datadir_version}" -eq 0 ] ; then + # Writing the info file, since historically it was not written + log_warn "Version of the data could not be determined."\ + "It is because the file mysql_upgrade_info is missing in the data directory, which"\ + "is most probably because it was not created when initialization of data directory."\ + "In order to allow seamless updates to the next higher version in the future,"\ + "the file mysql_upgrade_info will be created."\ + "If the data directory was created with a different version than ${mysqld_version_dot},"\ + "it is required to run this container with the MYSQL_DATADIR_ACTION environment variable"\ + "set to 'force', or run 'mysql_upgrade' utility manually; the mysql_upgrade tool"\ + "checks the tables and creates such a file as well. $(upstream_upgrade_info)" + write_mysql_upgrade_info_file "${MYSQL_DATADIR}" + continue + # This is currently a dead-code, but should be enabled after the mysql_upgrade_info + # file gets to the deployments (after few months most of the deployments should already have the file) + log_warn "Version of the data could not be determined."\ + "Running such a container is risky."\ + "The current daemon version is ${mysqld_version_dot}."\ + "If you are not sure whether the data directory is compatible with the current"\ + "version ${mysqld_version_dot}, restore the data from a back-up."\ + "If restoring from a back-up is not possible, create a file 'mysql_upgrade_info'"\ + "that includes version information (${mysqld_version_dot} in this case) in the root"\ + "of the data directory."\ + "In order to create the 'mysql_upgrade_info' file, either run this container with"\ + "the MYSQL_DATADIR_ACTION environment variable set to 'force', or run 'mysql_upgrade' utility"\ + "manually; the mysql_upgrade tool checks the tables and creates such a file as well."\ + "That will enable correct upgrade check in the future. $(upstream_upgrade_info)" + fi + + if [ "${datadir_version}" -eq "${mysqld_version}" ] ; then + log_info "MySQL server version check passed, both server and data directory"\ + "are version ${mysqld_version_dot}." + continue + fi + + if [ $(( ${datadir_version} + 1 )) -eq "${mysqld_version}" -o "${datadir_version}" -eq 505 -a "${mysqld_version}" -eq 1000 ] ; then + log_warn "MySQL server is version ${mysqld_version_dot} and datadir is version"\ + "${datadir_version_dot}, which is a compatible combination." + if [ "${MYSQL_DATADIR_ACTION}" == 'upgrade-auto' ] ; then + log_info "The data directory will be upgraded automatically from ${datadir_version_dot}"\ + "to version ${mysqld_version_dot}. $(upstream_upgrade_info)" + log_and_run mysql_upgrade ${mysql_flags} + else + log_warn "Automatic upgrade is not turned on, proceed with the upgrade."\ + "In order to upgrade the data directory, run this container with the MYSQL_DATADIR_ACTION"\ + "environment variable set to 'upgrade-auto' or run mysql_upgrade manually. $(upstream_upgrade_info)" + fi + else + log_warn "MySQL server is version ${mysqld_version_dot} and datadir is version"\ + "${datadir_version_dot}, which are incompatible. Remember, that upgrade is only supported"\ + "by upstream from previous version and it is not allowed to skip versions. $(upstream_upgrade_info)" + if [ "${datadir_version}" -gt "${mysqld_version}" ] ; then + log_warn "Downgrading to the lower version is not supported. Consider"\ + "dumping data and load them again into a fresh instance. $(upstream_upgrade_info)" + fi + log_warn "Consider restoring the database from a back-up. To ignore this"\ + "warning, set 'MYSQL_DATADIR_ACTION' variable to 'upgrade-force', but this may result in data corruption. $(upstream_upgrade_info)" + return 1 + fi + ;; + + upgrade-force) + log_and_run mysql_upgrade ${mysql_flags} --force + ;; + + optimize) + log_and_run mysqlcheck ${mysql_flags} --optimize --all-databases --force + ;; + + analyze) + log_and_run mysqlcheck ${mysql_flags} --analyze --all-databases --force + ;; + + disable) + log_info "Nothing is done about the data directory." + ;; + *) + log_warn "Unknown value of MYSQL_DATADIR_ACTION variable: '${MYSQL_DATADIR_ACTION}', ignoring." + ;; + esac + done +} + +check_datadir_version "${MYSQL_DATADIR}" + +unset -f check_datadir_version upstream_upgrade_info + + diff --git a/root-common/usr/share/container-scripts/mysql/init/50-passwd-change.sh b/root-common/usr/share/container-scripts/mysql/init/50-passwd-change.sh new file mode 100644 index 0000000..2514f2d --- /dev/null +++ b/root-common/usr/share/container-scripts/mysql/init/50-passwd-change.sh @@ -0,0 +1,48 @@ +password_change() { + log_info 'Setting passwords ...' + + # Set the password for MySQL user and root everytime this container is started. + # This allows to change the password by editing the deployment configuration. + if [[ -v MYSQL_USER && -v MYSQL_PASSWORD ]]; then +mysql $mysql_flags < "5.6" ] ; then +mysql $mysql_flags < "5.6" ] ; then +mysql $mysql_flags < /etc/my.cnf.d/base.cnf + diff --git a/root-common/usr/share/container-scripts/mysql/pre-init/60-replication-config.sh b/root-common/usr/share/container-scripts/mysql/pre-init/60-replication-config.sh new file mode 100644 index 0000000..a923476 --- /dev/null +++ b/root-common/usr/share/container-scripts/mysql/pre-init/60-replication-config.sh @@ -0,0 +1,17 @@ +# mysqld configuration for replication scenarios + +if [ -v MYSQL_RUNNING_AS_MASTER ] || [ -v MYSQL_RUNNING_AS_SLAVE ] ; then + log_info 'Processing basic MySQL configuration for replication (master and slave) files ...' + envsubst < ${CONTAINER_SCRIPTS_PATH}/pre-init/my-repl-gtid.cnf.template > /etc/my.cnf.d/repl-gtid.cnf +fi + +if [ -v MYSQL_RUNNING_AS_MASTER ] ; then + log_info 'Processing basic MySQL configuration for replication (master only) files ...' + envsubst < ${CONTAINER_SCRIPTS_PATH}/pre-init/my-master.cnf.template > /etc/my.cnf.d/master.cnf +fi + +if [ -v MYSQL_RUNNING_AS_SLAVE ] ; then + log_info 'Processing basic MySQL configuration for replication (slave only) files ...' + envsubst < ${CONTAINER_SCRIPTS_PATH}/pre-init/my-slave.cnf.template > /etc/my.cnf.d/slave.cnf +fi + diff --git a/root-common/usr/share/container-scripts/mysql/pre-init/70-s2i-config.sh b/root-common/usr/share/container-scripts/mysql/pre-init/70-s2i-config.sh new file mode 100644 index 0000000..7a8ae5a --- /dev/null +++ b/root-common/usr/share/container-scripts/mysql/pre-init/70-s2i-config.sh @@ -0,0 +1,6 @@ +# additional arbitrary mysqld configuration provided by user using s2i + +log_info 'Processing additional arbitrary MySQL configuration provided by s2i ...' + +process_extending_config_files ${APP_DATA}/mysql-cfg/ ${CONTAINER_SCRIPTS_PATH}/cnf/ + diff --git a/root-common/usr/share/container-scripts/mysql/pre-init/my-base.cnf.template b/root-common/usr/share/container-scripts/mysql/pre-init/my-base.cnf.template new file mode 100644 index 0000000..c654f7f --- /dev/null +++ b/root-common/usr/share/container-scripts/mysql/pre-init/my-base.cnf.template @@ -0,0 +1,5 @@ +[mysqld] +datadir = ${MYSQL_DATADIR} +basedir = ${MYSQL_PREFIX} +plugin-dir = ${MYSQL_PREFIX}/lib64/mysql/plugin + diff --git a/root-common/usr/share/container-scripts/mysql/pre-init/my-master.cnf.template b/root-common/usr/share/container-scripts/mysql/pre-init/my-master.cnf.template new file mode 100644 index 0000000..f434885 --- /dev/null +++ b/root-common/usr/share/container-scripts/mysql/pre-init/my-master.cnf.template @@ -0,0 +1,7 @@ +[mysqld] + +server-id = ${MYSQL_SERVER_ID} +log_bin = ${MYSQL_DATADIR}/mysql-bin.log +binlog_do_db = mysql +binlog_do_db = ${MYSQL_DATABASE} +binlog_format = ${MYSQL_BINLOG_FORMAT} diff --git a/root-common/usr/share/container-scripts/mysql/pre-init/my-repl-gtid.cnf.template b/root-common/usr/share/container-scripts/mysql/pre-init/my-repl-gtid.cnf.template new file mode 100644 index 0000000..63671cb --- /dev/null +++ b/root-common/usr/share/container-scripts/mysql/pre-init/my-repl-gtid.cnf.template @@ -0,0 +1,6 @@ +[mysqld] + +gtid_mode = ON +log-slave-updates = ON +enforce-gtid-consistency = ON + diff --git a/root-common/usr/share/container-scripts/mysql/pre-init/my-slave.cnf.template b/root-common/usr/share/container-scripts/mysql/pre-init/my-slave.cnf.template new file mode 100644 index 0000000..5bdf109 --- /dev/null +++ b/root-common/usr/share/container-scripts/mysql/pre-init/my-slave.cnf.template @@ -0,0 +1,7 @@ +[mysqld] + +server-id = ${MYSQL_SERVER_ID} +log_bin = ${MYSQL_DATADIR}/mysql-bin.log +relay-log = ${MYSQL_DATADIR}/mysql-relay-bin.log +binlog_do_db = mysql +binlog_do_db = ${MYSQL_DATABASE} diff --git a/root-common/usr/share/container-scripts/mysql/scl_enable b/root-common/usr/share/container-scripts/mysql/scl_enable new file mode 100644 index 0000000..5a25432 --- /dev/null +++ b/root-common/usr/share/container-scripts/mysql/scl_enable @@ -0,0 +1,3 @@ +# This will make scl collection binaries work out of box. +unset BASH_ENV PROMPT_COMMAND ENV +source scl_source enable ${ENABLED_COLLECTIONS} diff --git a/root/usr/share/container-scripts/mysql/README.md b/root/usr/share/container-scripts/mysql/README.md new file mode 100644 index 0000000..3fc1b68 --- /dev/null +++ b/root/usr/share/container-scripts/mysql/README.md @@ -0,0 +1,340 @@ +MySQL 8.0 SQL Database Server container image +========================================== + +This container image includes MySQL 8.0 SQL database server for OpenShift and general usage. +Users can choose between RHEL and CentOS based images. +The RHEL image is available in the [Red Hat Container Catalog](https://access.redhat.com/containers/#/registry.access.redhat.com/rhel8/mysql-80) +as registry.access.redhat.com/rhel8/mysql-80. +The CentOS image is then available on [Docker Hub](https://hub.docker.com/r/centos/mysql-80-centos8/) +as centos/mysql-80-centos8. + + +Description +----------- + +This container image provides a containerized packaging of the MySQL mysqld daemon +and client application. The mysqld server daemon accepts connections from clients +and provides access to content from MySQL databases on behalf of the clients. +You can find more information on the MySQL project from the project Web site +(https://www.mysql.com/). + + +Usage +----- + +For this, we will assume that you are using the MySQL 8.0 container image from the +Red Hat Container Catalog called `rhel8/mysql-80`. +If you want to set only the mandatory environment variables and not store +the database in a host directory, execute the following command: + +``` +$ docker run -d --name mysql_database -e MYSQL_USER=user -e MYSQL_PASSWORD=pass -e MYSQL_DATABASE=db -p 3306:3306 rhel8/mysql-80 +``` + +This will create a container named `mysql_database` running MySQL with database +`db` and user with credentials `user:pass`. Port 3306 will be exposed and mapped +to the host. If you want your database to be persistent across container executions, +also add a `-v /host/db/path:/var/lib/mysql/data` argument. This will be the MySQL +data directory. + +If the database directory is not initialized, the entrypoint script will first +run [`mysql_install_db`](https://dev.mysql.com/doc/refman/en/mysql-install-db.html) +and setup necessary database users and passwords. After the database is initialized, +or if it was already present, `mysqld` is executed and will run as PID 1. You can + stop the detached container by running `docker stop mysql_database`. + + +Environment variables and volumes +--------------------------------- + +The image recognizes the following environment variables that you can set during +initialization by passing `-e VAR=VALUE` to the Docker run command. + +**`MYSQL_USER`** + User name for MySQL account to be created + +**`MYSQL_PASSWORD`** + Password for the user account + +**`MYSQL_DATABASE`** + Database name + +**`MYSQL_ROOT_PASSWORD`** + Password for the root user (optional) + + +The following environment variables influence the MySQL configuration file. They are all optional. + +**`MYSQL_LOWER_CASE_TABLE_NAMES (default: 0)`** + Sets how the table names are stored and compared + +**`MYSQL_MAX_CONNECTIONS (default: 151)`** + The maximum permitted number of simultaneous client connections + +**`MYSQL_MAX_ALLOWED_PACKET (default: 200M)`** + The maximum size of one packet or any generated/intermediate string + +**`MYSQL_FT_MIN_WORD_LEN (default: 4)`** + The minimum length of the word to be included in a FULLTEXT index + +**`MYSQL_FT_MAX_WORD_LEN (default: 20)`** + The maximum length of the word to be included in a FULLTEXT index + +**`MYSQL_AIO (default: 1)`** + Controls the `innodb_use_native_aio` setting value in case the native AIO is broken. See http://help.directadmin.com/item.php?id=529 + +**`MYSQL_TABLE_OPEN_CACHE (default: 400)`** + The number of open tables for all threads + +**`MYSQL_KEY_BUFFER_SIZE (default: 32M or 10% of available memory)`** + The size of the buffer used for index blocks + +**`MYSQL_SORT_BUFFER_SIZE (default: 256K)`** + The size of the buffer used for sorting + +**`MYSQL_READ_BUFFER_SIZE (default: 8M or 5% of available memory)`** + The size of the buffer used for a sequential scan + +**`MYSQL_INNODB_BUFFER_POOL_SIZE (default: 32M or 50% of available memory)`** + The size of the buffer pool where InnoDB caches table and index data + +**`MYSQL_INNODB_LOG_FILE_SIZE (default: 8M or 15% of available available)`** + The size of each log file in a log group + +**`MYSQL_INNODB_LOG_BUFFER_SIZE (default: 8M or 15% of available memory)`** + The size of the buffer that InnoDB uses to write to the log files on disk + +**`MYSQL_DEFAULTS_FILE (default: /etc/my.cnf)`** + Point to an alternative configuration file + +**`MYSQL_BINLOG_FORMAT (default: statement)`** + Set sets the binlog format, supported values are `row` and `statement` + +**`MYSQL_LOG_QUERIES_ENABLED (default: 0)`** + To enable query logging set this to `1` + + +You can also set the following mount points by passing the `-v /host:/container` flag to Docker. + +**`/var/lib/mysql/data`** + MySQL data directory + + +**Notice: When mouting a directory from the host into the container, ensure that the mounted +directory has the appropriate permissions and that the owner and group of the directory +matches the user UID or name which is running inside the container.** + + +MySQL auto-tuning +----------------- + +When the MySQL image is run with the `--memory` parameter set and you didn't +specify value for some parameters, their values will be automatically +calculated based on the available memory. + +**`MYSQL_KEY_BUFFER_SIZE (default: 10%)`** + `key_buffer_size` + +**`MYSQL_READ_BUFFER_SIZE (default: 5%)`** + `read_buffer_size` + +**`MYSQL_INNODB_BUFFER_POOL_SIZE (default: 50%)`** + `innodb_buffer_pool_size` + +**`MYSQL_INNODB_LOG_FILE_SIZE (default: 15%)`** + `innodb_log_file_size` + +**`MYSQL_INNODB_LOG_BUFFER_SIZE (default: 15%)`** + `innodb_log_buffer_size` + + + +MySQL root user +--------------------------------- +The root user has no password set by default, only allowing local connections. +You can set it by setting the `MYSQL_ROOT_PASSWORD` environment variable. This +will allow you to login to the root account remotely. Local connections will +still not require a password. + +To disable remote root access, simply unset `MYSQL_ROOT_PASSWORD` and restart +the container. + + +Changing passwords +------------------ + +Since passwords are part of the image configuration, the only supported method +to change passwords for the database user (`MYSQL_USER`) and root user is by +changing the environment variables `MYSQL_PASSWORD` and `MYSQL_ROOT_PASSWORD`, +respectively. + +Changing database passwords through SQL statements or any way other than through +the environment variables aforementioned will cause a mismatch between the +values stored in the variables and the actual passwords. Whenever a database +container starts it will reset the passwords to the values stored in the +environment variables. + + +Default my.cnf file +------------------- +With environment variables we are able to customize a lot of different parameters +or configurations for the mysql bootstrap configurations. If you'd prefer to use +your own configuration file, you can override the `MYSQL_DEFAULTS_FILE` env +variable with the full path of the file you wish to use. For example, the default +location is `/etc/my.cnf` but you can change it to `/etc/mysql/my.cnf` by setting + `MYSQL_DEFAULTS_FILE=/etc/mysql/my.cnf` + + +Extending image +--------------- +This image can be extended using [source-to-image](https://github.com/openshift/source-to-image). + +For example, to build a customized MariaDB database image `my-mysql-rhel7` +with a configuration in `~/image-configuration/` run: + +``` +$ s2i build ~/image-configuration/ rhel8/mysql-80 my-mysql-rhel7 +``` + +The directory passed to `s2i build` can contain these directories: + +`mysql-cfg/` + When starting the container, files from this directory will be used as + a configuration for the `mysqld` daemon. + `envsubst` command is run on this file to still allow customization of + the image using environmental variables + +`mysql-pre-init/` + Shell scripts (`*.sh`) available in this directory are sourced before + `mysqld` daemon is started. + +`mysql-init/` + Shell scripts (`*.sh`) available in this directory are sourced when + `mysqld` daemon is started locally. In this phase, use `${mysql_flags}` + to connect to the locally running daemon, for example `mysql $mysql_flags < dump.sql` + +Variables that can be used in the scripts provided to s2i: + +`$mysql_flags` + arguments for the `mysql` tool that will connect to the locally running `mysqld` during initialization + +`$MYSQL_RUNNING_AS_MASTER` + variable defined when the container is run with `run-mysqld-master` command + +`$MYSQL_RUNNING_AS_SLAVE` + variable defined when the container is run with `run-mysqld-slave` command + +`$MYSQL_DATADIR_FIRST_INIT` + variable defined when the container was initialized from the empty data dir + +During `s2i build` all provided files are copied into `/opt/app-root/src` +directory into the resulting image. If some configuration files are present +in the destination directory, files with the same name are overwritten. +Also only one file with the same name can be used for customization and user +provided files are preferred over default files in +`/usr/share/container-scripts/mysql/`- so it is possible to overwrite them. + +Same configuration directory structure can be used to customize the image +every time the image is started using `docker run`. The directory has to be +mounted into `/opt/app-root/src/` in the image +(`-v ./image-configuration/:/opt/app-root/src/`). +This overwrites customization built into the image. + + +Securing the connection with SSL +-------------------------------- +In order to secure the connection with SSL, use the extending feature described +above. In particular, put the SSL certificates into a separate directory: + + sslapp/mysql-certs/server-cert-selfsigned.pem + sslapp/mysql-certs/server-key.pem + +And then put a separate configuration file into mysql-cfg: + + $> cat sslapp/mysql-cfg/ssl.cnf + [mysqld] + ssl-key=${APP_DATA}/mysql-certs/server-key.pem + ssl-cert=${APP_DATA}/mysql-certs/server-cert-selfsigned.pem + +Such a directory `sslapp` can then be mounted into the container with -v, +or a new container image can be built using s2i. + + +Upgrading and data directory version checking +--------------------------------------------- + +MySQL and MariaDB use versions that consist of three numbers X.Y.Z (e.g. 5.6.23). +For version changes in Z part, the server's binary data format stays compatible and thus no +special upgrade procedure is needed. For upgrades from X.Y to X.Y+1, consider doing manual +steps as described at +https://dev.mysql.com/doc/refman/8.0/en/upgrading-from-previous-series.html. + +Skipping versions like from X.Y to X.Y+2 or downgrading to lower version is not supported; +the only exception is ugrading from MariaDB 5.5 to MariaDB 10.0. + +**Important**: Upgrading to a new version is always risky and users are expected to make a full +back-up of all data before. + +A safer solution to upgrade is to dump all data using `mysqldump` or `mysqldbexport` and then +load the data using `mysql` or `mysqldbimport` into an empty (freshly initialized) database. + +Another way of proceeding with the upgrade is starting the new version of the `mysqld` daemon +and run `mysql_upgrade` right after the start. This so called in-place upgrade is generally +faster for large data directory, but only possible if upgrading from the very previous version, +so skipping versions is not supported. + +This container detects whether the data needs to be upgraded using `mysql_upgrade` and +we can control it by setting `MYSQL_DATADIR_ACTION` variable, which can have one or more of the following values: + + * `upgrade-warn` -- If the data version can be determined and the data come from a different version + of the daemon, a warning is printed but the container starts. This is the default value. + Since historically the version file `mysql_upgrade_info` was not created, when using this option, + the version file is created if not exist, but no `mysql_upgrade` will be called. + However, this automatic creation will be removed after few months, since the version should be + created on most deployments at that point. + * `upgrade-auto` -- `mysql_upgrade` is run at the beginning of the container start, when the local + daemon is running, but only if the data version can be determined and the data come + with the very previous version. A warning is printed if the data come from even older + or newer version. This value effectively enables automatic upgrades, + but it is always risky and users should still back-up all the data before starting the newer container. + Set this option only if you have very good back-ups at any moment and you are fine to fail-over + from the back-up. + * `upgrade-force` -- `mysql_upgrade --force` is run at the beginning of the container start, when the local + daemon is running, no matter what version of the daemon the data come from. + This is also the way to create the missing version file `mysql_upgrade_info` if not present + in the root of the data directory; this file holds information about the version of the data. + +There are also some other actions that you may want to run at the beginning of the container start, +when the local daemon is running, no matter what version of the data is detected: + + * `optimize` -- runs `mysqlcheck --optimize`. It optimizes all the tables. + * `analyze` -- runs `mysqlcheck --analyze`. It analyzes all the tables. + * `disable` -- nothing is done regarding data directory version. + +Multiple values are separated by comma and run in-order, e.g. `MYSQL_DATADIR_ACTION="optimize,analyze"`. + + +Changing the replication binlog_format +-------------------------------------- +Some applications may wish to use `row` binlog_formats (for example, those built + with change-data-capture in mind). The default replication/binlog format is + `statement` but to change it you can set the `MYSQL_BINLOG_FORMAT` environment + variable. For example `MYSQL_BINLOG_FORMAT=row`. Now when you run the database + with `master` replication turned on (ie, set the Docker/container `cmd` to be +`run-mysqld-master`) the binlog will emit the actual data for the rows that change +as opposed to the statements (ie, DML like insert...) that caused the change. + + +Troubleshooting +--------------- +The mysqld deamon in the container logs to the standard output, so the log is available in the container log. The log can be examined by running: + + docker logs + + +See also +-------- +Dockerfile and other sources for this container image are available on +https://github.com/sclorg/mysql-container. +In that repository, Dockerfile for CentOS is called Dockerfile, Dockerfile +for RHEL is called Dockerfile.rhel7. diff --git a/s2i-common/bin/assemble b/s2i-common/bin/assemble new file mode 100755 index 0000000..d65b7e0 --- /dev/null +++ b/s2i-common/bin/assemble @@ -0,0 +1,13 @@ +#!/bin/bash + +set -o errexit +set -o nounset +set -o pipefail + +shopt -s dotglob +echo "---> Installing application source ..." +mv /tmp/src/* ./ 2>/dev/null || true + +# Fix source directory permissions +/usr/libexec/fix-permissions ./ + diff --git a/s2i-common/bin/run b/s2i-common/bin/run new file mode 120000 index 0000000..4b21ab5 --- /dev/null +++ b/s2i-common/bin/run @@ -0,0 +1 @@ +/bin/run-mysqld \ No newline at end of file diff --git a/s2i-common/bin/usage b/s2i-common/bin/usage new file mode 100755 index 0000000..d6a3b9a --- /dev/null +++ b/s2i-common/bin/usage @@ -0,0 +1,8 @@ +#!/bin/sh + +set -o errexit +set -o nounset +set -o pipefail + +groff -t -man -ETascii /help.1 + diff --git a/test/mysql-ephemeral-template.json b/test/mysql-ephemeral-template.json new file mode 100644 index 0000000..48d5486 --- /dev/null +++ b/test/mysql-ephemeral-template.json @@ -0,0 +1,273 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "name": "mysql-ephemeral", + "annotations": { + "openshift.io/display-name": "MySQL (Ephemeral)", + "description": "MySQL database service, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.7/root/usr/share/container-scripts/mysql/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing", + "iconClass": "icon-mysql-database", + "tags": "database,mysql", + "openshift.io/long-description": "This template provides a standalone MySQL server with a database created. The database is not stored on persistent storage, so any restart of the service will result in all data being lost. The database name, username, and password are chosen via parameters when provisioning this service.", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "openshift.io/documentation-url": "https://docs.openshift.org/latest/using_images/db_images/mysql.html", + "openshift.io/support-url": "https://access.redhat.com" + } + }, + "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MYSQL_USER}\n Password: ${MYSQL_PASSWORD}\n Database Name: ${MYSQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:3306/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.7/root/usr/share/container-scripts/mysql/README.md.", + "labels": { + "template": "mysql-ephemeral-template" + }, + "objects": [ + { + "kind": "Secret", + "apiVersion": "v1", + "metadata": { + "name": "${DATABASE_SERVICE_NAME}", + "annotations": { + "template.openshift.io/expose-username": "{.data['database-user']}", + "template.openshift.io/expose-password": "{.data['database-password']}", + "template.openshift.io/expose-root_password": "{.data['database-root-password']}", + "template.openshift.io/expose-database_name": "{.data['database-name']}" + } + }, + "stringData" : { + "database-user" : "${MYSQL_USER}", + "database-password" : "${MYSQL_PASSWORD}", + "database-root-password" : "${MYSQL_ROOT_PASSWORD}", + "database-name" : "${MYSQL_DATABASE}" + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "metadata": { + "name": "${DATABASE_SERVICE_NAME}", + "annotations": { + "template.openshift.io/expose-uri": "mysql://{.spec.clusterIP}:{.spec.ports[?(.name==\"mysql\")].port}" + } + }, + "spec": { + "ports": [ + { + "name": "mysql", + "protocol": "TCP", + "port": 3306, + "targetPort": 3306, + "nodePort": 0 + } + ], + "selector": { + "name": "${DATABASE_SERVICE_NAME}" + }, + "type": "ClusterIP", + "sessionAffinity": "None" + }, + "status": { + "loadBalancer": {} + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${DATABASE_SERVICE_NAME}", + "annotations": { + "template.alpha.openshift.io/wait-for-ready": "true" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "mysql" + ], + "from": { + "kind": "ImageStreamTag", + "name": "mysql:${MYSQL_VERSION}", + "namespace": "${NAMESPACE}" + }, + "lastTriggeredImage": "" + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "name": "${DATABASE_SERVICE_NAME}" + }, + "template": { + "metadata": { + "labels": { + "name": "${DATABASE_SERVICE_NAME}" + } + }, + "spec": { + "containers": [ + { + "name": "mysql", + "image": " ", + "ports": [ + { + "containerPort": 3306, + "protocol": "TCP" + } + ], + "readinessProbe": { + "timeoutSeconds": 1, + "initialDelaySeconds": 5, + "exec": { + "command": [ "/bin/sh", "-i", "-c", + "MYSQL_PWD=\"$MYSQL_PASSWORD\" mysql -h 127.0.0.1 -u $MYSQL_USER -D $MYSQL_DATABASE -e 'SELECT 1'"] + } + }, + "livenessProbe": { + "timeoutSeconds": 1, + "initialDelaySeconds": 30, + "tcpSocket": { + "port": 3306 + } + }, + "env": [ + { + "name": "MYSQL_USER", + "valueFrom": { + "secretKeyRef" : { + "name" : "${DATABASE_SERVICE_NAME}", + "key" : "database-user" + } + } + }, + { + "name": "MYSQL_PASSWORD", + "valueFrom": { + "secretKeyRef" : { + "name" : "${DATABASE_SERVICE_NAME}", + "key" : "database-password" + } + } + }, + { + "name": "MYSQL_ROOT_PASSWORD", + "valueFrom": { + "secretKeyRef" : { + "name" : "${DATABASE_SERVICE_NAME}", + "key" : "database-root-password" + } + } + }, + { + "name": "MYSQL_DATABASE", + "valueFrom": { + "secretKeyRef" : { + "name" : "${DATABASE_SERVICE_NAME}", + "key" : "database-name" + } + } + } + ], + "resources": { + "limits": { + "memory": "${MEMORY_LIMIT}" + } + }, + "volumeMounts": [ + { + "name": "${DATABASE_SERVICE_NAME}-data", + "mountPath": "/var/lib/mysql/data" + } + ], + "terminationMessagePath": "/dev/termination-log", + "imagePullPolicy": "IfNotPresent", + "capabilities": {}, + "securityContext": { + "capabilities": {}, + "privileged": false + } + } + ], + "volumes": [ + { + "name": "${DATABASE_SERVICE_NAME}-data", + "emptyDir": { + "medium": "" + } + } + ], + "restartPolicy": "Always", + "dnsPolicy": "ClusterFirst" + } + } + }, + "status": {} + } + ], + "parameters": [ + { + "name": "MEMORY_LIMIT", + "displayName": "Memory Limit", + "description": "Maximum amount of memory the container can use.", + "value": "512Mi", + "required": true + }, + { + "name": "NAMESPACE", + "displayName": "Namespace", + "description": "The OpenShift Namespace where the ImageStream resides.", + "value": "openshift" + }, + { + "name": "DATABASE_SERVICE_NAME", + "displayName": "Database Service Name", + "description": "The name of the OpenShift Service exposed for the database.", + "value": "mysql", + "required": true + }, + { + "name": "MYSQL_USER", + "displayName": "MySQL Connection Username", + "description": "Username for MySQL user that will be used for accessing the database.", + "generate": "expression", + "from": "user[A-Z0-9]{3}", + "required": true + }, + { + "name": "MYSQL_PASSWORD", + "displayName": "MySQL Connection Password", + "description": "Password for the MySQL connection user.", + "generate": "expression", + "from": "[a-zA-Z0-9]{16}", + "required": true + }, + { + "name": "MYSQL_ROOT_PASSWORD", + "displayName": "MySQL root user Password", + "description": "Password for the MySQL root user.", + "generate": "expression", + "from": "[a-zA-Z0-9]{16}", + "required": true + }, + { + "name": "MYSQL_DATABASE", + "displayName": "MySQL Database Name", + "description": "Name of the MySQL database accessed.", + "value": "sampledb", + "required": true + }, + { + "name": "MYSQL_VERSION", + "displayName": "Version of MySQL Image", + "description": "Version of MySQL image to be used (5.7, or latest).", + "value": "5.7", + "required": true + } + ] +} diff --git a/test/run b/test/run new file mode 100755 index 0000000..6aa7e94 --- /dev/null +++ b/test/run @@ -0,0 +1,629 @@ +#!/bin/bash +# +# Test the MySQL image. +# +# IMAGE_NAME specifies the name of the candidate image used for testing. +# The image has to be available before this script is executed. +# + +set -o errexit +set -o nounset +shopt -s nullglob + +THISDIR=$(dirname ${BASH_SOURCE[0]}) +source ${THISDIR}/test-lib.sh + +TEST_LIST="\ +run_container_creation_tests +run_configuration_tests +run_general_tests +run_change_password_test +run_replication_test +run_doc_test +run_s2i_test +run_ssl_test +run_upgrade_test +" + +if [ -e "${IMAGE_NAME:-}" ] ; then + echo "Error: IMAGE_NAME must be specified" + exit 1 +fi + +CID_FILE_DIR=$(mktemp --suffix=mysql_test_cidfiles -d) +TESTSUITE_RESULT=1 +test_dir="$(readlink -f $(dirname "${BASH_SOURCE[0]}"))" + +s2i_args="--pull-policy=never " + +function cleanup() { + ct_cleanup + + if [ $TESTSUITE_RESULT -eq 0 ] ; then + echo "Tests for ${IMAGE_NAME} succeeded." + else + echo "Tests for ${IMAGE_NAME} failed." + fi +} +trap cleanup EXIT SIGINT + +function mysql_cmd() { + local container_ip="$1"; shift + local login="$1"; shift + local password="$1"; shift + docker run --rm ${CONTAINER_EXTRA_ARGS:-} "$IMAGE_NAME" mysql --host "$container_ip" -u"$login" -p"$password" "$@" db +} + +function test_connection() { + local name=$1 ; shift + local login=$1 ; shift + local password=$1 ; shift + local ip + ip=$(ct_get_cip $name) + echo " Testing MySQL connection to $ip..." + local max_attempts=20 + local sleep_time=2 + local i + local status='' + echo -n " Trying to connect..." + for i in $(seq $max_attempts); do + local status=$(docker inspect -f '{{.State.Status}}' $(ct_get_cid "${name}")) + if [ "${status}" != 'running' ] ; then + break; + fi + echo -n "." + if mysql_cmd "$ip" "$login" "$password" &>/dev/null <<< 'SELECT 1;'; then + echo " OK" + echo " Success!" + return 0 + fi + sleep $sleep_time + done + echo " FAIL" + echo " Giving up: Failed to connect." + if [ "${status}" == 'running' ] ; then + echo " Container is still running." + else + local exit_status=$(docker inspect -f '{{.State.ExitCode}}' ${name}) + echo " Container finised with exit code ${exit_status}." + fi + echo "Logs:" + docker logs $(ct_get_cid $name) + return 1 +} + +function test_mysql() { + local container_ip="$1" + local login="$2" + local password="$3" + + echo " Testing MySQL" + mysql_cmd "$container_ip" "$login" "$password" <<< 'CREATE TABLE tbl (col1 VARCHAR(20), col2 VARCHAR(20));' + mysql_cmd "$container_ip" "$login" "$password" <<< 'INSERT INTO tbl VALUES ("foo1", "bar1");' + mysql_cmd "$container_ip" "$login" "$password" <<< 'INSERT INTO tbl VALUES ("foo2", "bar2");' + mysql_cmd "$container_ip" "$login" "$password" <<< 'INSERT INTO tbl VALUES ("foo3", "bar3");' + mysql_cmd "$container_ip" "$login" "$password" <<< 'SELECT * FROM tbl;' + mysql_cmd "$container_ip" "$login" "$password" <<< 'DROP TABLE tbl;' + echo " Success!" +} + +function create_container() { + local name=$1 ; shift + cidfile="$CID_FILE_DIR/$name" + # create container with a cidfile in a directory for cleanup + local container_id + container_id="$(docker run ${DOCKER_ARGS:-} --cidfile $cidfile -d "$@" $IMAGE_NAME ${CONTAINER_ARGS:-})" + echo " Created container $container_id" +} + +function run_change_password_test() { + local tmpdir=$(mktemp -d) + chmod -R a+rwx "${tmpdir}" + + # Create MySQL container with persistent volume and set the initial password + create_container "testpass1" -e MYSQL_USER=user -e MYSQL_PASSWORD=foo \ + -e MYSQL_DATABASE=db -v ${tmpdir}:/var/lib/mysql/data:Z + test_connection testpass1 user foo + docker stop $(ct_get_cid testpass1) >/dev/null + + # Create second container with changed password + create_container "testpass2" -e MYSQL_USER=user -e MYSQL_PASSWORD=bar \ + -e MYSQL_DATABASE=db -v ${tmpdir}:/var/lib/mysql/data:Z + test_connection testpass2 user bar + + # The old password should not work anymore + if mysql_cmd "$(ct_get_cip testpass2)" user foo -e 'SELECT 1;'; then + return 1 + fi +} + +function run_replication_test() { + local cluster_args="-e MYSQL_MASTER_USER=master -e MYSQL_MASTER_PASSWORD=master -e MYSQL_DATABASE=db" + local max_attempts=30 + + # Run the MySQL master + docker run $cluster_args -e MYSQL_USER=user -e MYSQL_PASSWORD=foo \ + -e MYSQL_ROOT_PASSWORD=root \ + -e MYSQL_INNODB_BUFFER_POOL_SIZE=5M \ + -d --cidfile ${CID_FILE_DIR}/master.cid $IMAGE_NAME mysqld-master >/dev/null + local master_ip + master_ip=$(ct_get_cip master.cid) + + # Run the MySQL slave + docker run $cluster_args -e MYSQL_MASTER_SERVICE_NAME=${master_ip} \ + -e MYSQL_INNODB_BUFFER_POOL_SIZE=5M \ + -d --cidfile ${CID_FILE_DIR}/slave.cid $IMAGE_NAME mysqld-slave >/dev/null + local slave_ip + slave_ip=$(ct_get_cip slave.cid) + + # Now wait till the MASTER will see the SLAVE + local i + for i in $(seq $max_attempts); do + result="$(mysql_cmd "$master_ip" root root -e 'SHOW SLAVE HOSTS;' | grep "$slave_ip" || true)" + if [[ -n "${result}" ]]; then + echo "${slave_ip} successfully registered as SLAVE for ${master_ip}" + break + fi + if [[ "${i}" == "${max_attempts}" ]]; then + echo "The ${slave_ip} failed to register in MASTER" + echo "Dumping logs for $(ct_get_cid slave.cid)" + docker logs $(ct_get_cid slave.cid) + return 1 + fi + sleep 1 + done + + # do some real work to test replication in practice + mysql_cmd "$master_ip" root root -e "CREATE TABLE t1 (a INT); INSERT INTO t1 VALUES (24);" + + # read value from slave and check whether it is expectd + for i in $(seq $max_attempts); do + set +e + result="$(mysql_cmd "${slave_ip}" root root -e "select * from t1 \G" | grep -e ^a | grep 24)" + set -e + if [[ ! -z "${result}" ]]; then + echo "${slave_ip} successfully got value from MASTER ${master_ip}" + break + fi + if [[ "${i}" == "${max_attempts}" ]]; then + echo "The ${slave_ip} failed to see value added on MASTER" + echo "Dumping logs for $(ct_get_cid slave.cid)" + docker logs $(ct_get_cid slave.cid) + return 1 + fi + sleep 1 + done +} + +function assert_login_access() { + local container_ip=$1; shift + local USER=$1 ; shift + local PASS=$1 ; shift + local success=$1 ; shift + + if mysql_cmd "$container_ip" "$USER" "$PASS" <<< 'SELECT 1;' ; then + if $success ; then + echo " $USER($PASS) access granted as expected" + return + fi + else + if ! $success ; then + echo " $USER($PASS) access denied as expected" + return + fi + fi + echo " $USER($PASS) login assertion failed" + exit 1 +} + +function assert_local_access() { + local id="$1" ; shift + if docker exec $(ct_get_cid "$id") bash -c 'mysql -uroot <<< "SELECT 1;"' ; then + echo " local access granted as expected" + return + fi + echo " local access assertion failed" + return 1 +} + +# Make sure the invocation of docker run fails. +function assert_container_creation_fails() { + + # Time the docker run command. It should fail. If it doesn't fail, + # mysqld will keep running so we kill it with SIGKILL to make sure + # timeout returns a non-zero value. + local ret=0 + timeout -s 9 --preserve-status 60s docker run --rm "$@" $IMAGE_NAME >/dev/null || ret=$? + + # Timeout will exit with a high number. + if [ $ret -gt 30 ]; then + return 1 + fi + echo " Success!" +} + +function try_image_invalid_combinations() { + assert_container_creation_fails -e MYSQL_USER=user -e MYSQL_DATABASE=db "$@" + assert_container_creation_fails -e MYSQL_PASSWORD=pass -e MYSQL_DATABASE=db "$@" +} + +function run_container_creation_tests() { + echo " Testing image entrypoint usage" + assert_container_creation_fails + try_image_invalid_combinations + try_image_invalid_combinations -e MYSQL_ROOT_PASSWORD=root_pass + + local VERY_LONG_DB_NAME="very_long_database_name_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + local VERY_LONG_USER_NAME="very_long_user_name_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + assert_container_creation_fails -e MYSQL_USER=user -e MYSQL_PASSWORD=pass + assert_container_creation_fails -e MYSQL_USER=\$invalid -e MYSQL_PASSWORD=pass -e MYSQL_DATABASE=db -e MYSQL_ROOT_PASSWORD=root_pass + assert_container_creation_fails -e MYSQL_USER=$VERY_LONG_USER_NAME -e MYSQL_PASSWORD=pass -e MYSQL_DATABASE=db -e MYSQL_ROOT_PASSWORD=root_pass + assert_container_creation_fails -e MYSQL_USER=user -e MYSQL_PASSWORD="\"" -e MYSQL_DATABASE=db -e MYSQL_ROOT_PASSWORD=root_pass + assert_container_creation_fails -e MYSQL_USER=user -e MYSQL_PASSWORD=pass -e MYSQL_DATABASE=\$invalid -e MYSQL_ROOT_PASSWORD=root_pass + assert_container_creation_fails -e MYSQL_USER=user -e MYSQL_PASSWORD=pass -e MYSQL_DATABASE=$VERY_LONG_DB_NAME -e MYSQL_ROOT_PASSWORD=root_pass + assert_container_creation_fails -e MYSQL_USER=user -e MYSQL_PASSWORD=pass -e MYSQL_DATABASE=db -e MYSQL_ROOT_PASSWORD="\"" + assert_container_creation_fails -e MYSQL_USER=root -e MYSQL_PASSWORD=pass -e MYSQL_DATABASE=db -e MYSQL_ROOT_PASSWORD=pass + echo " Success!" +} + +function test_config_option() { + local container_name="$1" + local configuration="$2" + local option_name="$3" + local option_value="$4" + + if ! echo "$configuration" | grep -qx "$option_name[[:space:]]*=[[:space:]]*$option_value"; then + local configs="$(docker exec -t "$(ct_get_cid $container_name)" bash -c 'set +f; shopt -s nullglob; echo /etc/my.cnf /etc/my.cnf.d/* /opt/rh/mysql*/root/etc/my.cnf /opt/rh/mysql*/root/etc/my.cnf.d/* | paste -s')" + echo >&2 "FAIL: option '$option_name' should have value '$option_value', but it wasn't found in any of the configuration files ($configs):" + echo >&2 + echo >&2 "$configuration" + echo >&2 + return 1 + fi + + return 0 +} + +function run_configuration_tests() { + echo " Testing image configuration settings" + + local container_name=config_test + + create_container \ + "$container_name" \ + --env MYSQL_USER=config_test_user \ + --env MYSQL_PASSWORD=config_test \ + --env MYSQL_DATABASE=db \ + --env MYSQL_LOWER_CASE_TABLE_NAMES=1 \ + --env MYSQL_LOG_QUERIES_ENABLED=1 \ + --env MYSQL_MAX_CONNECTIONS=1337 \ + --env MYSQL_FT_MIN_WORD_LEN=8 \ + --env MYSQL_FT_MAX_WORD_LEN=15 \ + --env MYSQL_MAX_ALLOWED_PACKET=10M \ + --env MYSQL_TABLE_OPEN_CACHE=100 \ + --env MYSQL_SORT_BUFFER_SIZE=256K \ + --env MYSQL_KEY_BUFFER_SIZE=16M \ + --env MYSQL_READ_BUFFER_SIZE=16M \ + --env MYSQL_INNODB_BUFFER_POOL_SIZE=16M \ + --env MYSQL_INNODB_LOG_FILE_SIZE=4M \ + --env MYSQL_INNODB_LOG_BUFFER_SIZE=4M \ + --env WORKAROUND_DOCKER_BUG_14203= + # + + test_connection "$container_name" config_test_user config_test + + # TODO: this check is far from perfect and could be improved: + # - we should look for an option in the desired config, not in all of them + # - we should respect section of the config (now we have duplicated options from a different sections) + local configuration + configuration="$(docker exec -t "$(ct_get_cid $container_name)" bash -c 'set +f; shopt -s nullglob; egrep -hv "^(#|\!|\[|$)" /etc/my.cnf /etc/my.cnf.d/* /opt/rh/mysql*/root/etc/my.cnf /opt/rh/mysql*/root/etc/my.cnf.d/*' | sed 's,\(^[[:space:]]\+\|[[:space:]]\+$\),,' | sort -u)" + + test_config_option "$container_name" "$configuration" lower_case_table_names 1 + test_config_option "$container_name" "$configuration" general_log 1 + test_config_option "$container_name" "$configuration" max_connections 1337 + test_config_option "$container_name" "$configuration" ft_min_word_len 8 + test_config_option "$container_name" "$configuration" ft_max_word_len 15 + test_config_option "$container_name" "$configuration" max_allowed_packet 10M + test_config_option "$container_name" "$configuration" table_open_cache 100 + test_config_option "$container_name" "$configuration" sort_buffer_size 256K + test_config_option "$container_name" "$configuration" key_buffer_size 16M + test_config_option "$container_name" "$configuration" read_buffer_size 16M + test_config_option "$container_name" "$configuration" innodb_buffer_pool_size 16M + test_config_option "$container_name" "$configuration" innodb_log_file_size 4M + test_config_option "$container_name" "$configuration" innodb_log_buffer_size 4M + + docker stop "$(ct_get_cid $container_name)" >/dev/null + + echo " Success!" + echo " Testing image auto-calculated configuration settings" + + container_name=dynamic_config_test + + DOCKER_ARGS='--memory=256m' create_container \ + "$container_name" \ + --env MYSQL_USER=config_test_user \ + --env MYSQL_PASSWORD=config_test \ + --env MYSQL_DATABASE=db + + test_connection "$container_name" config_test_user config_test + + configuration="$(docker exec -t "$(ct_get_cid $container_name)" bash -c 'set +f; shopt -s nullglob; egrep -hv "^(#|\!|\[|$)" /etc/my.cnf /etc/my.cnf.d/* /opt/rh/mysql*/root/etc/my.cnf /opt/rh/mysql*/root/etc/my.cnf.d/*' | sed 's,\(^[[:space:]]\+\|[[:space:]]\+$\),,' | sort -u)" + + test_config_option "$container_name" "$configuration" key_buffer_size 25M + test_config_option "$container_name" "$configuration" read_buffer_size 12M + test_config_option "$container_name" "$configuration" innodb_buffer_pool_size 128M + test_config_option "$container_name" "$configuration" innodb_log_file_size 38M + test_config_option "$container_name" "$configuration" innodb_log_buffer_size 38M + + docker stop "$(ct_get_cid $container_name)" >/dev/null + + echo " Success!" +} + +function run_tests() { + local name=$1 ; shift + envs="-e MYSQL_USER=$USER -e MYSQL_PASSWORD=$PASS -e MYSQL_DATABASE=db" + if [ -v ROOT_PASS ]; then + envs="$envs -e MYSQL_ROOT_PASSWORD=$ROOT_PASS" + fi + create_container $name $envs + test_connection "$name" "$USER" "$PASS" + echo " Testing scl usage" + ct_scl_usage_old $name 'mysql --version' "$VERSION" + echo " Testing login accesses" + local container_ip + container_ip=$(ct_get_cip $name) + assert_login_access "$container_ip" "$USER" "$PASS" true + assert_login_access "$container_ip" "$USER" "${PASS}_foo" false + if [ -v ROOT_PASS ]; then + assert_login_access "$container_ip" root "$ROOT_PASS" true + assert_login_access "$container_ip" root "${ROOT_PASS}_foo" false + else + assert_login_access "$container_ip" root 'foo' false + assert_login_access "$container_ip" root '' false + fi + assert_local_access "$name" + echo " Success!" + test_mysql "$container_ip" "$USER" "$PASS" +} + +run_doc_test() { + echo " Testing documentation in the container image" + ct_doc_content_old "MYSQL\_ROOT\_PASSWORD" volume 3306 + echo " Success!" + echo +} + +_s2i_test_image() { + local container_name="$1" + local mount_opts="$2" + echo " Testing s2i app image with invalid configuration" + assert_container_creation_fails -e MYSQL_USER=root -e MYSQL_PASSWORD=pass -e MYSQL_DATABASE=db -e MYSQL_ROOT_PASSWORD=pass + echo " Testing s2i app image with correct configuration" + create_container \ + "$container_name" \ + --env MYSQL_USER=config_test_user \ + --env MYSQL_PASSWORD=config_test \ + --env MYSQL_DATABASE=db \ + --env MYSQL_OPERATIONS_USER=operations_user \ + --env MYSQL_OPERATIONS_PASSWORD=operations_pass \ + ${mount_opts} + + test_connection "$container_name" operations_user operations_pass + + configuration="$(docker exec -t "$(ct_get_cid $container_name)" bash -c 'set +f; shopt -s nullglob; egrep -hv "^(#|\!|\[|$)" /etc/my.cnf /etc/my.cnf.d/* /opt/rh/mysql*/root/etc/my.cnf /opt/rh/mysql*/root/etc/my.cnf.d/*' | sed 's,\(^[[:space:]]\+\|[[:space:]]\+$\),,' | sort -u)" + + docker stop "$(ct_get_cid $container_name)" >/dev/null +} + +run_s2i_test() { + echo " Testing s2i usage" + s2i usage ${s2i_args} ${IMAGE_NAME} &>/dev/null + + echo " Testing s2i build" + s2i build file://${test_dir}/test-app ${IMAGE_NAME} ${IMAGE_NAME}-testapp + local image_name_backup=${IMAGE_NAME} + export IMAGE_NAME=${IMAGE_NAME}-testapp + + local container_name=s2i_config_build + _s2i_test_image "s2i_config_build" "" + + # return back original value for IMAGE_NAME + export IMAGE_NAME=${image_name_backup} + + echo " Testing s2i mount" + test_app_dir=$(mktemp -d) + cp -Lr ${test_dir}/test-app ${test_app_dir}/ + chown -R 27:27 ${test_app_dir} + _s2i_test_image "_s2i_test_mount" "-v ${test_app_dir}/test-app:/opt/app-root/src/:z" + rm -rf ${test_app_dir} + echo " Success!" +} + +gen_self_signed_cert() { + local output_dir=$1 ; shift + local base_name=$1 ; shift + mkdir -p ${output_dir} + openssl req -newkey rsa:2048 -nodes -keyout ${output_dir}/${base_name}-key.pem -subj '/C=GB/ST=Berkshire/L=Newbury/O=My Server Company' > ${base_name}-req.pem + openssl req -new -x509 -nodes -key ${output_dir}/${base_name}-key.pem -batch > ${output_dir}/${base_name}-cert-selfsigned.pem +} + +run_ssl_test() { + echo " Testing ssl usage" + test_app_dir=$(mktemp -d) + mkdir -p ${test_app_dir}/{mysql-certs,mysql-cfg} + gen_self_signed_cert ${test_app_dir}/mysql-certs server + echo "[mysqld] +ssl-key=\${APP_DATA}/mysql-certs/server-key.pem +ssl-cert=\${APP_DATA}/mysql-certs/server-cert-selfsigned.pem +" >${test_app_dir}/mysql-cfg/ssl.cnf + chown -R 27:27 ${test_app_dir} + local ca_cert_path="/opt/app-root/src/mysql-certs/server-cert-selfsigned.pem" + + create_container \ + "_s2i_test_ssl" \ + --env MYSQL_USER=ssl_test_user \ + --env MYSQL_PASSWORD=ssl_test \ + --env MYSQL_DATABASE=db \ + -v ${test_app_dir}:/opt/app-root/src/:z + + test_connection "_s2i_test_ssl" ssl_test_user ssl_test + ip=$(ct_get_cip _s2i_test_ssl) + + # At least MySQL 5.6 requires ssl-ca option on client side, otherwise the ssl is not used + CONTAINER_EXTRA_ARGS="-v ${test_app_dir}:/opt/app-root/src/:z" + + # MySQL requires --ssl-mode to be set in order to require SSL + case ${VERSION} in + 5*) ssl_mode_opt='--ssl-mode=REQUIRED' + esac + + if mysql_cmd "$ip" "ssl_test_user" "ssl_test" ${ssl_mode_opt:-} --ssl-ca=${ca_cert_path} -e 'show status like "Ssl_cipher" \G' | grep 'Value: [A-Z][A-Z0-9-]*' ; then + echo " Success!" + rm -rf ${test_app_dir} + else + echo " FAIL!" + mysql_cmd "$ip" "ssl_test_user" "ssl_test" --ssl-ca=${ca_cert_path} -e 'show status like "%ssl%" \G' + return 1 + fi +} + +function run_general_tests() { + # Set lower buffer pool size to avoid running out of memory. + export CONTAINER_ARGS="run-mysqld --innodb_buffer_pool_size=5242880" + + # Normal tests + USER=user PASS=pass run_tests no_root + USER=user1 PASS=pass1 ROOT_PASS=r00t run_tests root + # Test with arbitrary uid for the container + DOCKER_ARGS="-u 12345" USER=user PASS=pass run_tests no_root_altuid + DOCKER_ARGS="-u 12345" USER=user1 PASS=pass1 ROOT_PASS=r00t run_tests root_altuid +} + +function get_previous_major_version() { + case "${1}" in + 5.5) echo "5.1" ;; + 5.6) echo "5.5" ;; + 5.7) echo "5.6" ;; + 8.0) echo "5.7" ;; + 10.0) echo "5.5" ;; + 10.1) echo "10.0" ;; + 10.2) echo "10.1" ;; + 10.3) echo "10.2" ;; + *) echo "Non expected version '${1}'" ; return 1 ;; + esac +} + +function run_upgrade_test() { + local tmpdir=$(mktemp -d) + echo " Testing upgrade of the container image" + mkdir "${tmpdir}/data" && chmod -R a+rwx "${tmpdir}" + + # Create MySQL container with persistent volume and set the version from too old version + local datadir=${tmpdir}/data + create_container "testupg1" -e MYSQL_USER=user -e MYSQL_PASSWORD=foo \ + -e MYSQL_DATABASE=db -v ${datadir}:/var/lib/mysql/data:Z + test_connection testupg1 user foo + docker stop $(ct_get_cid testupg1) >/dev/null + + # Simulate datadir without version information + rm -f ${datadir}/mysql_upgrade_info + echo " Testing upgrade from data without version" + # This should work, but warning should be printed + create_container "testupg2" -e MYSQL_USER=user -e MYSQL_PASSWORD=foo \ + -e MYSQL_DATABASE=db -v ${datadir}:/var/lib/mysql/data:Z + test_connection testupg2 user foo + docker stop $(ct_get_cid testupg2) >/dev/null + # Check whether some information is provided + if ! docker logs $(ct_get_cid testupg2) 2>&1 | grep -e 'Version of the data could not be determined' &>/dev/null ; then + echo "Information about missing version file is not available in the logs" + return 1 + fi + # Check whether upgrade did not happen + if docker logs $(ct_get_cid testupg2) 2>&1 | grep -e 'Running mysql_upgrade' &>/dev/null ; then + echo "Upgrade should not be run when information about version is missing" + return 1 + fi + + # Create version file that is too old + echo " Testing upgrade from too old data" + echo "5.0.12" >${datadir}/mysql_upgrade_info + # Create another container with same data and upgrade set to 'upgrade-auto' + assert_container_creation_fails -e MYSQL_USER=user -e MYSQL_PASSWORD=foo \ + -e MYSQL_DATABASE=db -v ${datadir}:/var/lib/mysql/data:Z -e MYSQL_DATADIR_ACTION=upgrade-auto 2>/dev/null + + # Create version file that we can upgrade from + echo " Testing upgrade from previous version" + echo "$(get_previous_major_version ${VERSION}).12" >${datadir}/mysql_upgrade_info + # Create another container with same data and upgrade set to 'upgrade-aauto' + create_container "testupg3" -e MYSQL_USER=user -e MYSQL_PASSWORD=foo \ + -e MYSQL_DATABASE=db -v ${datadir}:/var/lib/mysql/data:Z -e MYSQL_DATADIR_ACTION=upgrade-auto + test_connection testupg3 user foo + docker stop $(ct_get_cid testupg3) >/dev/null + # Check whether some upgrade happened + if ! docker logs $(ct_get_cid testupg3) 2>&1 | grep -qe 'Running mysql_upgrade' ; then + echo "Upgrade did not happen but it should when upgrading from previous version" + docker logs $(ct_get_cid testupg3) + return 1 + fi + + # Create version file that we don't need to upgrade from + echo " Testing upgrade from the same version" + echo "${VERSION}.12" >${datadir}/mysql_upgrade_info + # Create another container with same data and upgrade set to 'upgrade-aauto' + create_container "testupg4" -e MYSQL_USER=user -e MYSQL_PASSWORD=foo \ + -e MYSQL_DATABASE=db -v ${datadir}:/var/lib/mysql/data:Z -e MYSQL_DATADIR_ACTION=upgrade-auto + test_connection testupg4 user foo + docker stop $(ct_get_cid testupg4) >/dev/null + # Check whether some upgrade happened + if docker logs $(ct_get_cid testupg4) 2>&1 | grep -e 'Running mysql_upgrade' &>/dev/null ; then + echo "Upgrade happened but it should not when upgrading from current version" + return 1 + fi + + # Create second container with same data and upgrade set to 'analyze' + echo " Testing running --analyze" + create_container "testupg5" -e MYSQL_USER=user -e MYSQL_PASSWORD=foo \ + -e MYSQL_DATABASE=db -v ${datadir}:/var/lib/mysql/data:Z -e MYSQL_DATADIR_ACTION=analyze + test_connection testupg5 user foo + docker stop $(ct_get_cid testupg5) >/dev/null + # Check whether analyze happened + if ! docker logs $(ct_get_cid testupg5) 2>&1 | grep -e '--analyze --all-databases' &>/dev/null ; then + echo "Analyze did not happen but it should" + return 1 + fi + + # Create another container with same data and upgrade set to 'optimize' + echo " Testing running --optimize" + create_container "testupg6" -e MYSQL_USER=user -e MYSQL_PASSWORD=foo \ + -e MYSQL_DATABASE=db -v ${datadir}:/var/lib/mysql/data:Z -e MYSQL_DATADIR_ACTION=optimize + test_connection testupg6 user foo + docker stop $(ct_get_cid testupg6) >/dev/null + # Check whether optimize happened + if ! docker logs $(ct_get_cid testupg6) 2>&1 | grep -e '--optimize --all-databases' &>/dev/null ; then + echo "Optimize did not happen but it should" + return 1 + fi + + # Create version file that we cannot upgrade from + echo " Testing upgrade from the future version" + echo "20.1.12" >${datadir}/mysql_upgrade_info + assert_container_creation_fails -e MYSQL_USER=user -e MYSQL_PASSWORD=foo \ + -e MYSQL_DATABASE=db -v ${datadir}:/var/lib/mysql/data:Z -e MYSQL_DATADIR_ACTION=upgrade-auto 2>/dev/null + + echo " Upgrade tests succeeded!" + echo +} + +function run_all_tests() { + for test_case in $TEST_LIST; do + echo "Running test $test_case for ${IMAGE_NAME}" + $test_case + done; +} + +# Run the chosen tests +TEST_LIST=${@:-$TEST_LIST} run_all_tests + +TESTSUITE_RESULT=0 + diff --git a/test/run-openshift b/test/run-openshift new file mode 100755 index 0000000..46747b4 --- /dev/null +++ b/test/run-openshift @@ -0,0 +1,122 @@ +#!/bin/bash +# +# Test the MariaDB image in OpenShift. +# +# IMAGE_NAME specifies a name of the candidate image used for testing. +# The image has to be available before this script is executed. +# + +THISDIR=$(dirname ${BASH_SOURCE[0]}) + +source ${THISDIR}/test-lib-openshift.sh + +set -exo nounset + +test -n "${IMAGE_NAME-}" || false 'make sure $IMAGE_NAME is defined' +test -n "${VERSION-}" || false 'make sure $VERSION is defined' + +function check_mysql_os_service_connection() { + local util_image_name="${1}" ; shift + local service_name="${1}" ; shift + local user="${1}" ; shift + local pass="${1}" ; shift + local timeout="${1:-60}" ; shift || : + local pod_ip=$(ct_os_get_service_ip ${service_name}) + + : " Service ${service_name} check ..." + + local cmd="echo 'SELECT 42 as testval\g' | mysql --connect-timeout=15 -h ${pod_ip} -u${user} -p${pass}" + local expected_value='^42' + local output + local ret + SECONDS=0 + + echo -n "Waiting for ${service_name} service becoming ready ..." + while true ; do + output=$(docker run --rm ${util_image_name} bash -c "${cmd}" || :) + echo "${output}" | grep -qe "${expected_value}" && ret=0 || ret=1 + if [ ${ret} -eq 0 ] ; then + echo " PASS" + return 0 + fi + echo -n "." + [ ${SECONDS} -gt ${timeout} ] && break + sleep 3 + done + echo " FAIL" + return 1 +} + +function test_mysql_pure_image() { + local image_name=${1:-centos/mysql-101-centos7} + local image_name_no_namespace=${image_name##*/} + local service_name=${image_name_no_namespace} + + ct_os_new_project + # Create a specific imagestream tag for the image so that oc cannot use anything else + ct_os_upload_image "${image_name}" "$image_name_no_namespace:testing" + + ct_os_deploy_pure_image "$image_name_no_namespace:testing" \ + --name "${service_name}" \ + --env MYSQL_ROOT_PASSWORD=test + + ct_os_wait_pod_ready "${service_name}" 60 + check_mysql_os_service_connection "${image_name}" "${service_name}" root test + + ct_os_delete_project +} + +function test_mysql_template() { + local image_name=${1:-centos/mysql-101-centos7} + local image_name_no_namespace=${image_name##*/} + local service_name=${image_name_no_namespace} + + ct_os_new_project + ct_os_upload_image "${image_name}" "mysql:$VERSION" + + ct_os_deploy_template_image ${THISDIR}/mysql-ephemeral-template.json \ + NAMESPACE="$(oc project -q)" \ + MYSQL_VERSION="$VERSION" \ + DATABASE_SERVICE_NAME="${service_name}" \ + MYSQL_USER=testu \ + MYSQL_PASSWORD=testp \ + MYSQL_DATABASE=testdb + + ct_os_wait_pod_ready "${service_name}" 60 + check_mysql_os_service_connection "${image_name}" "${service_name}" testu testp + + ct_os_delete_project +} + +function test_mysql_s2i() { + local image_name=${1:-centos/mysql-101-centos7} + local app=${2:-https://github.com/sclorg/mysql-container.git} + local context_dir=${3:-test/test-app} + local image_name_no_namespace=${image_name##*/} + local service_name="${image_name_no_namespace}-testing" + + ct_os_new_project + # Create a specific imagestream tag for the image so that oc cannot use anything else + ct_os_upload_image "${image_name}" "$image_name_no_namespace:testing" + + ct_os_deploy_s2i_image "$image_name_no_namespace:testing" "${app}" \ + --context-dir="${context_dir}" \ + --name "${service_name}" \ + --env MYSQL_ROOT_PASSWORD=test \ + --env MYSQL_OPERATIONS_USER=testo \ + --env MYSQL_OPERATIONS_PASSWORD=testo \ + --env MYSQL_DATABASE=testopdb \ + --env MYSQL_USER=testnormal \ + --env MYSQL_PASSWORD=testnormal + + ct_os_wait_pod_ready "${service_name}" 60 + check_mysql_os_service_connection "${image_name}" "${service_name}" testo testo 120 + + ct_os_delete_project +} + +ct_os_cluster_up +test_mysql_pure_image ${IMAGE_NAME} +test_mysql_template ${IMAGE_NAME} +# TODO: Can we make the build against examples inside the same PR? +test_mysql_s2i ${IMAGE_NAME} "https://github.com/sclorg/mysql-container.git" test/test-app diff --git a/test/test-app/mysql-cfg/myconfig.cnf b/test/test-app/mysql-cfg/myconfig.cnf new file mode 100644 index 0000000..7764adf --- /dev/null +++ b/test/test-app/mysql-cfg/myconfig.cnf @@ -0,0 +1,3 @@ +[mysqld] +query-cache-limit=262144 + diff --git a/test/test-app/mysql-data/init.sql b/test/test-app/mysql-data/init.sql new file mode 100644 index 0000000..3159982 --- /dev/null +++ b/test/test-app/mysql-data/init.sql @@ -0,0 +1,4 @@ +CREATE TABLE products (id INTEGER, name VARCHAR(256), price FLOAT, variant INTEGER); +CREATE TABLE products_variant (id INTEGER, name VARCHAR(256)); +INSERT INTO products_variant (id, name) VALUES ('1', 'blue'), ('2', 'green'); + diff --git a/test/test-app/mysql-init/80-add-arbitrary-users.sh b/test/test-app/mysql-init/80-add-arbitrary-users.sh new file mode 100644 index 0000000..55ae2d2 --- /dev/null +++ b/test/test-app/mysql-init/80-add-arbitrary-users.sh @@ -0,0 +1,17 @@ +create_arbitrary_users() { + # Do not care what option is compulsory here, just create what is specified + log_info "Creating user specified by MYSQL_OPERATIONS_USER (${MYSQL_OPERATIONS_USER}) ..." +mysql $mysql_flags <&2 + return 1 + fi + echo "${public_ip}" +} + +# ct_os_run_in_pod POD_NAME CMD +# -------------------- +# Runs [cmd] in the pod specified by prefix [pod_prefix]. +# Arguments: pod_name - full name of the pod +# Arguments: cmd - command to be run in the pod +function ct_os_run_in_pod() { + local pod_name="$1" ; shift + + oc exec "$pod_name" -- "$@" +} + +# ct_os_get_service_ip SERVICE_NAME +# -------------------- +# Returns IP of the service specified by [service_name]. +# Arguments: service_name - name of the service +function ct_os_get_service_ip() { + local service_name="${1}" ; shift + oc get "svc/${service_name}" -o yaml | grep clusterIP | \ + cut -d':' -f2 | grep -oe '172\.30\.[0-9\.]*' +} + + +# ct_os_get_all_pods_status +# -------------------- +# Returns status of all pods. +function ct_os_get_all_pods_status() { + oc get pods -o custom-columns=Ready:status.containerStatuses[0].ready,NAME:.metadata.name +} + +# ct_os_get_all_pods_name +# -------------------- +# Returns the full name of all pods. +function ct_os_get_all_pods_name() { + oc get pods --no-headers -o custom-columns=NAME:.metadata.name +} + +# ct_os_get_pod_status POD_PREFIX +# -------------------- +# Returns status of the pod specified by prefix [pod_prefix]. +# Note: Ignores -build and -deploy pods +# Arguments: pod_prefix - prefix or whole ID of the pod +function ct_os_get_pod_status() { + local pod_prefix="${1}" ; shift + ct_os_get_all_pods_status | grep -e "${pod_prefix}" | grep -Ev "(build|deploy)$" \ + | awk '{print $1}' | head -n 1 +} + +# ct_os_get_pod_name POD_PREFIX +# -------------------- +# Returns the full name of pods specified by prefix [pod_prefix]. +# Note: Ignores -build and -deploy pods +# Arguments: pod_prefix - prefix or whole ID of the pod +function ct_os_get_pod_name() { + local pod_prefix="${1}" ; shift + ct_os_get_all_pods_name | grep -e "^${pod_prefix}" | grep -Ev "(build|deploy)$" +} + +# ct_os_get_pod_ip POD_NAME +# -------------------- +# Returns the ip of the pod specified by [pod_name]. +# Arguments: pod_name - full name of the pod +function ct_os_get_pod_ip() { + local pod_name="${1}" + oc get pod "$pod_name" --no-headers -o custom-columns=IP:status.podIP +} + +# ct_os_check_pod_readiness POD_PREFIX STATUS +# -------------------- +# Checks whether the pod is ready. +# Arguments: pod_prefix - prefix or whole ID of the pod +# Arguments: status - expected status (true, false) +function ct_os_check_pod_readiness() { + local pod_prefix="${1}" ; shift + local status="${1}" ; shift + test "$(ct_os_get_pod_status ${pod_prefix})" == "${status}" +} + +# ct_os_wait_pod_ready POD_PREFIX TIMEOUT +# -------------------- +# Wait maximum [timeout] for the pod becomming ready. +# Arguments: pod_prefix - prefix or whole ID of the pod +# Arguments: timeout - how many seconds to wait seconds +function ct_os_wait_pod_ready() { + local pod_prefix="${1}" ; shift + local timeout="${1}" ; shift + SECONDS=0 + echo -n "Waiting for ${pod_prefix} pod becoming ready ..." + while ! ct_os_check_pod_readiness "${pod_prefix}" "true" ; do + echo -n "." + [ ${SECONDS} -gt ${timeout} ] && echo " FAIL" && return 1 + sleep 3 + done + echo " DONE" +} + +# ct_os_wait_rc_ready POD_PREFIX TIMEOUT +# -------------------- +# Wait maximum [timeout] for the rc having desired number of replicas ready. +# Arguments: pod_prefix - prefix of the replication controller +# Arguments: timeout - how many seconds to wait seconds +function ct_os_wait_rc_ready() { + local pod_prefix="${1}" ; shift + local timeout="${1}" ; shift + SECONDS=0 + echo -n "Waiting for ${pod_prefix} pod becoming ready ..." + while ! test "$((oc get --no-headers statefulsets; oc get --no-headers rc) 2>/dev/null \ + | grep "^${pod_prefix}" | awk '$2==$3 {print "ready"}')" == "ready" ; do + echo -n "." + [ ${SECONDS} -gt ${timeout} ] && echo " FAIL" && return 1 + sleep 3 + done + echo " DONE" +} + +# ct_os_deploy_pure_image IMAGE [ENV_PARAMS, ...] +# -------------------- +# Runs [image] in the openshift and optionally specifies env_params +# as environment variables to the image. +# Arguments: image - prefix or whole ID of the pod to run the cmd in +# Arguments: env_params - environment variables parameters for the images. +function ct_os_deploy_pure_image() { + local image="${1}" ; shift + # ignore error exit code, because oc new-app returns error when image exists + oc new-app ${image} "$@" || : + # let openshift cluster to sync to avoid some race condition errors + sleep 3 +} + +# ct_os_deploy_s2i_image IMAGE APP [ENV_PARAMS, ... ] +# -------------------- +# Runs [image] and [app] in the openshift and optionally specifies env_params +# as environment variables to the image. +# Arguments: image - prefix or whole ID of the pod to run the cmd in +# Arguments: app - url or local path to git repo with the application sources. +# Arguments: env_params - environment variables parameters for the images. +function ct_os_deploy_s2i_image() { + local image="${1}" ; shift + local app="${1}" ; shift + # ignore error exit code, because oc new-app returns error when image exists + oc new-app "${image}~${app}" "$@" || : + + # let openshift cluster to sync to avoid some race condition errors + sleep 3 +} + +# ct_os_deploy_template_image TEMPLATE [ENV_PARAMS, ...] +# -------------------- +# Runs template in the openshift and optionally gives env_params to use +# specific values in the template. +# Arguments: template - prefix or whole ID of the pod to run the cmd in +# Arguments: env_params - environment variables parameters for the template. +# Example usage: ct_os_deploy_template_image mariadb-ephemeral-template.yaml \ +# DATABASE_SERVICE_NAME=mysql-57-centos7 \ +# DATABASE_IMAGE=mysql-57-centos7 \ +# MYSQL_USER=testu \ +# MYSQL_PASSWORD=testp \ +# MYSQL_DATABASE=testdb +function ct_os_deploy_template_image() { + local template="${1}" ; shift + oc process -f "${template}" "$@" | oc create -f - + # let openshift cluster to sync to avoid some race condition errors + sleep 3 +} + +# _ct_os_get_uniq_project_name +# -------------------- +# Returns a uniq name of the OpenShift project. +function _ct_os_get_uniq_project_name() { + local r + while true ; do + r=${RANDOM} + mkdir /var/tmp/os-test-${r} &>/dev/null && echo test-${r} && break + done +} + +# ct_os_new_project [PROJECT] +# -------------------- +# Creates a new project in the openshfit using 'os' command. +# Arguments: project - project name, uses a new random name if omitted +# Expects 'os' command that is properly logged in to the OpenShift cluster. +# Not using mktemp, because we cannot use uppercase characters. +function ct_os_new_project() { + local project_name="${1:-$(_ct_os_get_uniq_project_name)}" ; shift || : + oc new-project ${project_name} + # let openshift cluster to sync to avoid some race condition errors + sleep 3 +} + +# ct_os_delete_project [PROJECT] +# -------------------- +# Deletes the specified project in the openshfit +# Arguments: project - project name, uses the current project if omitted +function ct_os_delete_project() { + local project_name="${1:-$(oc project -q)}" ; shift || : + oc delete project "${project_name}" +} + +# ct_os_docker_login +# -------------------- +# Logs in into docker daemon +function ct_os_docker_login() { + # docker login fails with "404 page not found" error sometimes, just try it more times + for i in `seq 12` ; do + docker login -u developer -p $(oc whoami -t) 172.30.1.1:5000 && return 0 || : + sleep 5 + done + return 1 +} + +# ct_os_upload_image IMAGE [IMAGESTREAM] +# -------------------- +# Uploads image from local registry to the OpenShift internal registry. +# Arguments: image - image name to upload +# Arguments: imagestream - name and tag to use for the internal registry. +# In the format of name:tag ($image_name:latest by default) +function ct_os_upload_image() { + local input_name="${1}" ; shift + local image_name=${input_name##*/} + local imagestream=${1:-$image_name:latest} + local output_name="172.30.1.1:5000/$(oc project -q)/$imagestream" + + ct_os_docker_login + docker tag ${input_name} ${output_name} + docker push ${output_name} +} + +# ct_os_install_in_centos +# -------------------- +# Installs os cluster in CentOS +function ct_os_install_in_centos() { + yum install -y centos-release-openshift-origin + yum install -y wget git net-tools bind-utils iptables-services bridge-utils\ + bash-completion origin-clients docker origin-clients +} + +# ct_os_cluster_up [DIR, IS_PUBLIC, CLUSTER_VERSION] +# -------------------- +# Runs the local OpenShift cluster using 'oc cluster up' and logs in as developer. +# Arguments: dir - directory to keep configuration data in, random if omitted +# Arguments: is_public - sets either private or public hostname for web-UI, +# use "true" for allow remote access to the web-UI, +# "false" is default +# Arguments: cluster_version - version of the OpenShift cluster to use, empty +# means default version of `oc`; example value: v3.7.0; +# also can be specified outside by OC_CLUSTER_VERSION +function ct_os_cluster_up() { + ct_os_cluster_running && echo "Cluster already running. Nothing is done." && return 0 + mkdir -p /var/tmp/openshift + local dir="${1:-$(mktemp -d /var/tmp/openshift/os-data-XXXXXX)}" ; shift || : + local is_public="${1:-'false'}" ; shift || : + local default_cluster_version=${OC_CLUSTER_VERSION:-} + local cluster_version=${1:-${default_cluster_version}} ; shift || : + if ! grep -qe '--insecure-registry.*172\.30\.0\.0' /etc/sysconfig/docker ; then + sed -i "s|OPTIONS='|OPTIONS='--insecure-registry 172.30.0.0/16 |" /etc/sysconfig/docker + fi + + systemctl stop firewalld + setenforce 0 + iptables -F + + systemctl restart docker + local cluster_ip="127.0.0.1" + [ "${is_public}" == "true" ] && cluster_ip=$(ct_get_public_ip) + + mkdir -p ${dir}/{config,data,pv} + oc cluster up --host-data-dir=${dir}/data --host-config-dir=${dir}/config \ + --host-pv-dir=${dir}/pv --use-existing-config --public-hostname=${cluster_ip} \ + ${cluster_version:+--version=$cluster_version } + oc version + oc login -u system:admin + oc project default + ct_os_wait_rc_ready docker-registry 180 + ct_os_wait_rc_ready router 30 + oc login -u developer -p developer + # let openshift cluster to sync to avoid some race condition errors + sleep 3 +} + +# ct_os_cluster_down +# -------------------- +# Shuts down the local OpenShift cluster using 'oc cluster down' +function ct_os_cluster_down() { + oc cluster down +} + +# ct_os_cluster_running +# -------------------- +# Returns 0 if oc cluster is running +function ct_os_cluster_running() { + oc cluster status &>/dev/null +} + +# ct_os_test_s2i_app_func IMAGE APP CONTEXT_DIR CHECK_CMD [OC_ARGS] +# -------------------- +# Runs [image] and [app] in the openshift and optionally specifies env_params +# as environment variables to the image. Then check the container by arbitrary +# function given as argument (such an argument may include string, +# that will be replaced with actual IP). +# Arguments: image - prefix or whole ID of the pod to run the cmd in (compulsory) +# Arguments: app - url or local path to git repo with the application sources (compulsory) +# Arguments: context_dir - sub-directory inside the repository with the application sources (compulsory) +# Arguments: check_command - CMD line that checks whether the container works (compulsory; '' will be replaced with actual IP) +# Arguments: oc_args - all other arguments are used as additional parameters for the `oc new-app` +# command, typically environment variables (optional) +function ct_os_test_s2i_app_func() { + local image_name=${1} + local app=${2} + local context_dir=${3} + local check_command=${4} + local oc_args=${5:-} + local image_name_no_namespace=${image_name##*/} + local service_name="${image_name_no_namespace}-testing" + local image_tagged="${image_name_no_namespace}:testing" + + if [ $# -lt 4 ] || [ -z "${1}" -o -z "${2}" -o -z "${3}" -o -z "${4}" ]; then + echo "ERROR: ct_os_test_s2i_app_func() requires at least 4 arguments that cannot be emtpy." >&2 + return 1 + fi + + ct_os_new_project + # Create a specific imagestream tag for the image so that oc cannot use anything else + ct_os_upload_image "${image_name}" "${image_tagged}" + + local app_param="${app}" + if [ -d "${app}" ] ; then + # for local directory, we need to copy the content, otherwise too smart os command + # pulls the git remote repository instead + app_param=$(ct_obtain_input "${app}") + fi + + ct_os_deploy_s2i_image "${image_tagged}" "${app_param}" \ + --context-dir="${context_dir}" \ + --name "${service_name}" \ + ${oc_args} + + if [ -d "${app}" ] ; then + # in order to avoid weird race seen sometimes, let's wait shortly + # before starting the build explicitly + sleep 5 + oc start-build "${service_name}" --from-dir="${app_param}" + fi + + ct_os_wait_pod_ready "${service_name}" 300 + + local ip=$(ct_os_get_service_ip "${service_name}") + local check_command_exp=$(echo "$check_command" | sed -e "s//$ip/g") + + echo " Checking APP using $check_command_exp ..." + local result=0 + eval "$check_command_exp" || result=1 + + if [ $result -eq 0 ] ; then + echo " Check passed." + else + echo " Check failed." + fi + + ct_os_delete_project + return $result +} + +# ct_os_test_s2i_app IMAGE APP CONTEXT_DIR EXPECTED_OUTPUT [PORT, PROTOCOL, RESPONSE_CODE, OC_ARGS, ... ] +# -------------------- +# Runs [image] and [app] in the openshift and optionally specifies env_params +# as environment variables to the image. Then check the http response. +# Arguments: image - prefix or whole ID of the pod to run the cmd in (compulsory) +# Arguments: app - url or local path to git repo with the application sources (compulsory) +# Arguments: context_dir - sub-directory inside the repository with the application sources (compulsory) +# Arguments: expected_output - PCRE regular expression that must match the response body (compulsory) +# Arguments: port - which port to use (optional; default: 8080) +# Arguments: protocol - which protocol to use (optional; default: http) +# Arguments: response_code - what http response code to expect (optional; default: 200) +# Arguments: oc_args - all other arguments are used as additional parameters for the `oc new-app` +# command, typically environment variables (optional) +function ct_os_test_s2i_app() { + local image_name=${1} + local app=${2} + local context_dir=${3} + local expected_output=${4} + local port=${5:-8080} + local protocol=${6:-http} + local response_code=${7:-200} + local oc_args=${8:-} + + if [ $# -lt 4 ] || [ -z "${1}" -o -z "${2}" -o -z "${3}" -o -z "${4}" ]; then + echo "ERROR: ct_os_test_s2i_app() requires at least 4 arguments that cannot be emtpy." >&2 + return 1 + fi + + ct_os_test_s2i_app_func "${image_name}" \ + "${app}" \ + "${context_dir}" \ + "ct_test_response '${protocol}://:${port}' '${response_code}' '${expected_output}'" \ + "${oc_args}" +} + +# ct_os_test_template_app_func IMAGE APP IMAGE_IN_TEMPLATE CHECK_CMD [OC_ARGS] +# -------------------- +# Runs [image] and [app] in the openshift and optionally specifies env_params +# as environment variables to the image. Then check the container by arbitrary +# function given as argument (such an argument may include string, +# that will be replaced with actual IP). +# Arguments: image_name - prefix or whole ID of the pod to run the cmd in (compulsory) +# Arguments: template - url or local path to a template to use (compulsory) +# Arguments: name_in_template - image name used in the template +# Arguments: check_command - CMD line that checks whether the container works (compulsory; '' will be replaced with actual IP) +# Arguments: oc_args - all other arguments are used as additional parameters for the `oc new-app` +# command, typically environment variables (optional) +# Arguments: other_images - some templates need other image to be pushed into the OpenShift registry, +# specify them in this parameter as "|", where "" is a full image name +# (including registry if needed) and "" is a tag under which the image should be available +# in the OpenShift registry. +function ct_os_test_template_app_func() { + local image_name=${1} + local template=${2} + local name_in_template=${3} + local check_command=${4} + local oc_args=${5:-} + local other_images=${6:-} + + if [ $# -lt 4 ] || [ -z "${1}" -o -z "${2}" -o -z "${3}" -o -z "${4}" ]; then + echo "ERROR: ct_os_test_template_app_func() requires at least 4 arguments that cannot be emtpy." >&2 + return 1 + fi + + local service_name="${name_in_template}-testing" + local image_tagged="${name_in_template}:${VERSION}" + + ct_os_new_project + # Create a specific imagestream tag for the image so that oc cannot use anything else + ct_os_upload_image "${image_name}" "${image_tagged}" + + # upload also other images, that template might need (list of pairs in the format | + local images_tags_a + local i_t + for i_t in ${other_images} ; do + echo "${i_t}" + IFS='|' read -ra image_tag_a <<< "${i_t}" + docker pull "${image_tag_a[0]}" + ct_os_upload_image "${image_tag_a[0]}" "${image_tag_a[1]}" + done + + local local_template=$(ct_obtain_input "${template}") + oc new-app ${local_template} \ + -p NAME="${service_name}" \ + -p NAMESPACE="$(oc project -q)" \ + ${oc_args} + + oc start-build "${service_name}" + + ct_os_wait_pod_ready "${service_name}" 300 + + local ip=$(ct_os_get_service_ip "${service_name}") + local check_command_exp=$(echo "$check_command" | sed -e "s//$ip/g") + + echo " Checking APP using $check_command_exp ..." + local result=0 + eval "$check_command_exp" || result=1 + + if [ $result -eq 0 ] ; then + echo " Check passed." + else + echo " Check failed." + fi + + ct_os_delete_project + return $result +} + +# params: +# ct_os_test_template_app IMAGE APP IMAGE_IN_TEMPLATE EXPECTED_OUTPUT [PORT, PROTOCOL, RESPONSE_CODE, OC_ARGS, ... ] +# -------------------- +# Runs [image] and [app] in the openshift and optionally specifies env_params +# as environment variables to the image. Then check the http response. +# Arguments: image_name - prefix or whole ID of the pod to run the cmd in (compulsory) +# Arguments: template - url or local path to a template to use (compulsory) +# Arguments: name_in_template - image name used in the template +# Arguments: expected_output - PCRE regular expression that must match the response body (compulsory) +# Arguments: port - which port to use (optional; default: 8080) +# Arguments: protocol - which protocol to use (optional; default: http) +# Arguments: response_code - what http response code to expect (optional; default: 200) +# Arguments: oc_args - all other arguments are used as additional parameters for the `oc new-app` +# command, typically environment variables (optional) +# Arguments: other_images - some templates need other image to be pushed into the OpenShift registry, +# specify them in this parameter as "|", where "" is a full image name +# (including registry if needed) and "" is a tag under which the image should be available +# in the OpenShift registry. +function ct_os_test_template_app() { + local image_name=${1} + local template=${2} + local name_in_template=${3} + local expected_output=${4} + local port=${5:-8080} + local protocol=${6:-http} + local response_code=${7:-200} + local oc_args=${8:-} + local other_images=${9:-} + + if [ $# -lt 4 ] || [ -z "${1}" -o -z "${2}" -o -z "${3}" -o -z "${4}" ]; then + echo "ERROR: ct_os_test_template_app() requires at least 4 arguments that cannot be emtpy." >&2 + return 1 + fi + + ct_os_test_template_app_func "${image_name}" \ + "${template}" \ + "${name_in_template}" \ + "ct_test_response '${protocol}://:${port}' '${response_code}' '${expected_output}'" \ + "${oc_args}" \ + "${other_images}" +} + +# ct_os_test_image_update IMAGE IS CHECK_CMD OC_ARGS +# -------------------- +# Runs an image update test with [image] uploaded to [is] imagestream +# and checks the services using an arbitrary function provided in [check_cmd]. +# Arguments: image - prefix or whole ID of the pod to run the cmd in (compulsory) +# Arguments: is - imagestream to upload the images into (compulsory) +# Arguments: check_cmd - command to be run to check functionality of created services (compulsory) +# Arguments: oc_args - arguments to use during oc new-app (compulsory) +ct_os_test_image_update() { + local image_name=$1; shift + local istag=$1; shift + local check_function=$1; shift + local service_name=${image_name##*/} + local old_image="" ip="" check_command_exp="" registry="" + registry=$(ct_registry_from_os "$OS") + old_image="$registry/$image_name" + + echo "Running image update test for: $image_name" + ct_os_new_project + + # Get current image from repository and create an imagestream + docker pull "$old_image:latest" 2>/dev/null + ct_os_upload_image "$old_image" "$istag" + + # Setup example application with curent image + oc new-app "$@" --name "$service_name" + ct_os_wait_pod_ready "$service_name" 60 + + # Check application output + ip=$(ct_os_get_service_ip "$service_name") + check_command_exp=${check_function///$ip} + ct_assert_cmd_success "$check_command_exp" + + # Tag built image into the imagestream and wait for rebuild + ct_os_upload_image "$image_name" "$istag" + ct_os_wait_pod_ready "${service_name}-2" 60 + + # Check application output + ip=$(ct_os_get_service_ip "$service_name") + check_command_exp=${check_function///$ip} + ct_assert_cmd_success "$check_command_exp" + + ct_os_delete_project +} diff --git a/test/test-lib.sh b/test/test-lib.sh new file mode 100644 index 0000000..dfc63d9 --- /dev/null +++ b/test/test-lib.sh @@ -0,0 +1,402 @@ +# +# Test a container image. +# +# Always use sourced from a specific container testfile +# +# reguires definition of CID_FILE_DIR +# CID_FILE_DIR=$(mktemp --suffix=_test_cidfiles -d) +# reguires definition of TEST_LIST +# TEST_LIST="\ +# ctest_container_creation +# ctest_doc_content" + +# Container CI tests +# abbreviated as "ct" + +# may be redefined in the specific container testfile +EXPECTED_EXIT_CODE=0 + +# ct_cleanup +# -------------------- +# Cleans up containers used during tests. Stops and removes all containers +# referenced by cid_files in CID_FILE_DIR. Dumps logs if a container exited +# unexpectedly. Removes the cid_files and CID_FILE_DIR as well. +# Uses: $CID_FILE_DIR - path to directory containing cid_files +# Uses: $EXPECTED_EXIT_CODE - expected container exit code +function ct_cleanup() { + for cid_file in $CID_FILE_DIR/* ; do + local container=$(cat $cid_file) + + : "Stopping and removing container $container..." + docker stop $container + exit_status=$(docker inspect -f '{{.State.ExitCode}}' $container) + if [ "$exit_status" != "$EXPECTED_EXIT_CODE" ]; then + : "Dumping logs for $container" + docker logs $container + fi + docker rm -v $container + rm $cid_file + done + rmdir $CID_FILE_DIR + : "Done." +} + +# ct_enable_cleanup +# -------------------- +# Enables automatic container cleanup after tests. +function ct_enable_cleanup() { + trap ct_cleanup EXIT SIGINT +} + +# ct_get_cid [name] +# -------------------- +# Prints container id from cid_file based on the name of the file. +# Argument: name - name of cid_file where the container id will be stored +# Uses: $CID_FILE_DIR - path to directory containing cid_files +function ct_get_cid() { + local name="$1" ; shift || return 1 + echo $(cat "$CID_FILE_DIR/$name") +} + +# ct_get_cip [id] +# -------------------- +# Prints container ip address based on the container id. +# Argument: id - container id +function ct_get_cip() { + local id="$1" ; shift + docker inspect --format='{{.NetworkSettings.IPAddress}}' $(ct_get_cid "$id") +} + +# ct_wait_for_cid [cid_file] +# -------------------- +# Holds the execution until the cid_file is created. Usually run after container +# creation. +# Argument: cid_file - name of the cid_file that should be created +function ct_wait_for_cid() { + local cid_file=$1 + local max_attempts=10 + local sleep_time=1 + local attempt=1 + local result=1 + while [ $attempt -le $max_attempts ]; do + [ -f $cid_file ] && [ -s $cid_file ] && return 0 + : "Waiting for container start..." + attempt=$(( $attempt + 1 )) + sleep $sleep_time + done + return 1 +} + +# ct_assert_container_creation_fails [container_args] +# -------------------- +# The invocation of docker run should fail based on invalid container_args +# passed to the function. Returns 0 when container fails to start properly. +# Argument: container_args - all arguments are passed directly to dokcer run +# Uses: $CID_FILE_DIR - path to directory containing cid_files +function ct_assert_container_creation_fails() { + local ret=0 + local max_attempts=10 + local attempt=1 + local cid_file=assert + set +e + local old_container_args="${CONTAINER_ARGS-}" + CONTAINER_ARGS="$@" + ct_create_container $cid_file + if [ $? -eq 0 ]; then + local cid=$(ct_get_cid $cid_file) + + while [ "$(docker inspect -f '{{.State.Running}}' $cid)" == "true" ] ; do + sleep 2 + attempt=$(( $attempt + 1 )) + if [ $attempt -gt $max_attempts ]; then + docker stop $cid + ret=1 + break + fi + done + exit_status=$(docker inspect -f '{{.State.ExitCode}}' $cid) + if [ "$exit_status" == "0" ]; then + ret=1 + fi + docker rm -v $cid + rm $CID_FILE_DIR/$cid_file + fi + [ ! -z $old_container_args ] && CONTAINER_ARGS="$old_container_args" + set -e + return $ret +} + +# ct_create_container [name, command] +# -------------------- +# Creates a container using the IMAGE_NAME and CONTAINER_ARGS variables. Also +# stores the container id to a cid_file located in the CID_FILE_DIR, and waits +# for the creation of the file. +# Argument: name - name of cid_file where the container id will be stored +# Argument: command - optional command to be executed in the container +# Uses: $CID_FILE_DIR - path to directory containing cid_files +# Uses: $CONTAINER_ARGS - optional arguments passed directly to docker run +# Uses: $IMAGE_NAME - name of the image being tested +function ct_create_container() { + local cid_file="$CID_FILE_DIR/$1" ; shift + # create container with a cidfile in a directory for cleanup + docker run --cidfile="$cid_file" -d ${CONTAINER_ARGS:-} $IMAGE_NAME "$@" + ct_wait_for_cid $cid_file || return 1 + : "Created container $(cat $cid_file)" +} + +# ct_scl_usage_old [name, command, expected] +# -------------------- +# Tests three ways of running the SCL, by looking for an expected string +# in the output of the command +# Argument: name - name of cid_file where the container id will be stored +# Argument: command - executed inside the container +# Argument: expected - string that is expected to be in the command output +# Uses: $CID_FILE_DIR - path to directory containing cid_files +# Uses: $IMAGE_NAME - name of the image being tested +function ct_scl_usage_old() { + local name="$1" + local command="$2" + local expected="$3" + local out="" + : " Testing the image SCL enable" + out=$(docker run --rm ${IMAGE_NAME} /bin/bash -c "${command}") + if ! echo "${out}" | grep -q "${expected}"; then + echo "ERROR[/bin/bash -c "${command}"] Expected '${expected}', got '${out}'" >&2 + return 1 + fi + out=$(docker exec $(ct_get_cid $name) /bin/bash -c "${command}" 2>&1) + if ! echo "${out}" | grep -q "${expected}"; then + echo "ERROR[exec /bin/bash -c "${command}"] Expected '${expected}', got '${out}'" >&2 + return 1 + fi + out=$(docker exec $(ct_get_cid $name) /bin/sh -ic "${command}" 2>&1) + if ! echo "${out}" | grep -q "${expected}"; then + echo "ERROR[exec /bin/sh -ic "${command}"] Expected '${expected}', got '${out}'" >&2 + return 1 + fi +} + +# ct_doc_content_old [strings] +# -------------------- +# Looks for occurence of stirngs in the documentation files and checks +# the format of the files. Files examined: help.1 +# Argument: strings - strings expected to appear in the documentation +# Uses: $IMAGE_NAME - name of the image being tested +function ct_doc_content_old() { + local tmpdir=$(mktemp -d) + local f + : " Testing documentation in the container image" + # Extract the help files from the container + for f in help.1 ; do + docker run --rm ${IMAGE_NAME} /bin/bash -c "cat /${f}" >${tmpdir}/$(basename ${f}) + # Check whether the files contain some important information + for term in $@ ; do + if ! cat ${tmpdir}/$(basename ${f}) | grep -F -q -e "${term}" ; then + echo "ERROR: File /${f} does not include '${term}'." >&2 + return 1 + fi + done + # Check whether the files use the correct format + for term in TH PP SH ; do + if ! grep -q "^\.${term}" ${tmpdir}/help.1 ; then + echo "ERROR: /help.1 is probably not in troff or groff format, since '${term}' is missing." >&2 + return 1 + fi + done + done + : " Success!" +} + + +# ct_path_append PATH_VARNAME DIRECTORY +# ------------------------------------- +# Append DIRECTORY to VARIABLE of name PATH_VARNAME, the VARIABLE must consist +# of colon-separated list of directories. +ct_path_append () +{ + if eval "test -n \"\${$1-}\""; then + eval "$1=\$2:\$$1" + else + eval "$1=\$2" + fi +} + + +# ct_path_foreach PATH ACTION [ARGS ...] +# -------------------------------------- +# For each DIR in PATH execute ACTION (path is colon separated list of +# directories). The particular calls to ACTION will look like +# '$ ACTION directory [ARGS ...]' +ct_path_foreach () +{ + local dir dirlist action save_IFS + save_IFS=$IFS + IFS=: + dirlist=$1 + action=$2 + shift 2 + for dir in $dirlist; do "$action" "$dir" "$@" ; done + IFS=$save_IFS +} + + +# ct_run_test_list +# -------------------- +# Execute the tests specified by TEST_LIST +# Uses: $TEST_LIST - list of test names +function ct_run_test_list() { + for test_case in $TEST_LIST; do + : "Running test $test_case" + [ -f test/$test_case ] && source test/$test_case + [ -f ../test/$test_case ] && source ../test/$test_case + $test_case + done; +} + +# ct_gen_self_signed_cert_pem +# --------------------------- +# Generates a self-signed PEM certificate pair into specified directory. +# Argument: output_dir - output directory path +# Argument: base_name - base name of the certificate files +# Resulted files will be those: +# /-cert-selfsigned.pem -- public PEM cert +# /-key.pem -- PEM private key +ct_gen_self_signed_cert_pem() { + local output_dir=$1 ; shift + local base_name=$1 ; shift + mkdir -p ${output_dir} + openssl req -newkey rsa:2048 -nodes -keyout ${output_dir}/${base_name}-key.pem -subj '/C=GB/ST=Berkshire/L=Newbury/O=My Server Company' > ${base_name}-req.pem + openssl req -new -x509 -nodes -key ${output_dir}/${base_name}-key.pem -batch > ${output_dir}/${base_name}-cert-selfsigned.pem +} + +# ct_obtain_input FILE|DIR|URL +# -------------------- +# Either copies a file or a directory to a tmp location for local copies, or +# downloads the file from remote location. +# Resulted file path is printed, so it can be later used by calling function. +# Arguments: input - local file, directory or remote URL +function ct_obtain_input() { + local input=$1 + local extension="${input##*.}" + + # Try to use same extension for the temporary file if possible + [[ "${extension}" =~ ^[a-z0-9]*$ ]] && extension=".${extension}" || extension="" + + local output=$(mktemp "/var/tmp/test-input-XXXXXX$extension") + if [ -f "${input}" ] ; then + cp "${input}" "${output}" + elif [ -d "${input}" ] ; then + rm -f "${output}" + cp -r -LH "${input}" "${output}" + elif echo "${input}" | grep -qe '^http\(s\)\?://' ; then + curl "${input}" > "${output}" + else + echo "ERROR: file type not known: ${input}" >&2 + return 1 + fi + echo "${output}" +} + +# ct_test_response +# ---------------- +# Perform GET request to the application container, checks output with +# a reg-exp and HTTP response code. +# Argument: url - request URL path +# Argument: expected_code - expected HTTP response code +# Argument: body_regexp - PCRE regular expression that must match the response body +# Argument: max_attempts - Optional number of attempts (default: 20), three seconds sleep between +# Argument: ignore_error_attempts - Optional number of attempts when we ignore error output (default: 10) +ct_test_response() { + local url="$1" + local expected_code="$2" + local body_regexp="$3" + local max_attempts=${4:-20} + local ignore_error_attempts=${5:-10} + + : " Testing the HTTP(S) response for <${url}>" + local sleep_time=3 + local attempt=1 + local result=1 + local status + local response_code + local response_file=$(mktemp /tmp/ct_test_response_XXXXXX) + while [ ${attempt} -le ${max_attempts} ]; do + curl --connect-timeout 10 -s -w '%{http_code}' "${url}" >${response_file} && status=0 || status=1 + if [ ${status} -eq 0 ]; then + response_code=$(cat ${response_file} | tail -c 3) + if [ "${response_code}" -eq "${expected_code}" ]; then + result=0 + fi + cat ${response_file} | grep -qP -e "${body_regexp}" || result=1; + # Some services return 40x code until they are ready, so let's give them + # some chance and not end with failure right away + # Do not wait if we already have expected outcome though + if [ ${result} -eq 0 -o ${attempt} -gt ${ignore_error_attempts} -o ${attempt} -eq ${max_attempts} ] ; then + break + fi + fi + attempt=$(( ${attempt} + 1 )) + sleep ${sleep_time} + done + rm -f ${response_file} + return ${result} +} + +# ct_registry_from_os OS +# ---------------- +# Transform operating system string [os] into registry url +# Argument: OS - string containing the os version +ct_registry_from_os() { + local registry="" + case $1 in + rhel7) + registry=registry.access.redhat.com + ;; + *) + registry=docker.io + ;; + esac + echo "$registry" +} + +# ct_assert_cmd_success CMD +# ---------------- +# Evaluates [cmd] and fails if it does not succeed. +# Argument: CMD - Command to be run +function ct_assert_cmd_success() { + echo "Checking '$*' for success ..." + if ! eval "$@" &>/dev/null; then + echo " FAIL" + return 1 + fi + echo " PASS" + return 0 +} + +# ct_assert_cmd_failure CMD +# ---------------- +# Evaluates [cmd] and fails if it succeeds. +# Argument: CMD - Command to be run +function ct_assert_cmd_failure() { + echo "Checking '$*' for failure ..." + if eval "$@" &>/dev/null; then + echo " FAIL" + return 1 + fi + echo " PASS" + return 0 +} + + +# ct_random_string [LENGTH=10] +# ---------------------------- +# Generate pseudorandom alphanumeric string of LENGTH bytes, the +# default length is 10. The string is printed on stdout. +ct_random_string() +( + export LC_ALL=C + dd if=/dev/urandom count=1 bs=10k 2>/dev/null \ + | tr -dc 'a-z0-9' \ + | fold -w "${1-10}" \ + | head -n 1 +)