diff --git a/Dockerfile b/Dockerfile index e4c015c..becd1dc 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,6 +1,4 @@ -FROM fedora:28 - -LABEL MAINTAINER "Honza Horak" +FROM registry.fedoraproject.org/f28/s2i-core:latest # MariaDB image for OpenShift. # @@ -13,29 +11,40 @@ LABEL MAINTAINER "Honza Horak" # * $MYSQL_ROOT_PASSWORD (Optional) - Password for the 'root' MySQL account ENV MYSQL_VERSION=10.2 \ - HOME=/var/lib/mysql + APP_DATA=/opt/app-root/src \ + HOME=/var/lib/mysql \ + NAME=mariadb \ + VERSION=10.2 \ + RELEASE="16" \ + ARCH=x86_64 \ + SUMMARY="MariaDB 10.2 SQL database server" \ + DESCRIPTION="MariaDB is a multi-user, multi-threaded SQL database server. The container \ +image provides a containerized packaging of the MariaDB mysqld daemon and client application. \ +The mysqld server daemon accepts connections from clients and provides access to content from \ +MariaDB databases on behalf of the clients." -LABEL summary="MariaDB is a multi-user, multi-threaded SQL database server" \ +LABEL summary="$SUMMARY" \ + description="$DESCRIPTION" \ io.k8s.description="MariaDB is a multi-user, multi-threaded SQL database server" \ io.k8s.display-name="MariaDB 10.2" \ io.openshift.expose-services="3306:mysql" \ - io.openshift.tags="database,mysql,mariadb,mariadb101,galera" - -ENV NAME=mariadb VERSION=10.2 RELEASE=14 ARCH=x86_64 -LABEL BZComponent="$NAME" \ - Name="$FGC/$NAME" \ - Version="$VERSION" \ - Release="$RELEASE.$DISTTAG" \ - Architecture="$ARCH" + io.openshift.tags="database,mysql,mariadb,mariadb102,galera" \ + com.redhat.component="$NAME" \ + name="$FGC/$NAME" \ + version="$VERSION" \ + release="$RELEASE.$DISTTAG" \ + architecture="$ARCH" \ + usage="docker run -d -e MYSQL_USER=user -e MYSQL_PASSWORD=pass -e MYSQL_DATABASE=db -p 3306:3306 $FGC/$NAME" \ + maintainer="SoftwareCollections.org " EXPOSE 3306 # This image must forever use UID 27 for mysql user so our volumes are # safe in the future. This should *never* change, the last test is there # to make sure of that. -RUN INSTALL_PKGS="rsync tar gettext hostname bind-utils mariadb-server policycoreutils" && \ +RUN INSTALL_PKGS="rsync tar gettext hostname bind-utils groff-base shadow-utils mariadb-server policycoreutils" && \ dnf install -y --setopt=tsflags=nodocs $INSTALL_PKGS && \ - rpm -V --noghost $INSTALL_PKGS && \ + rpm -V $INSTALL_PKGS && \ dnf clean all && \ mkdir -p /var/lib/mysql/data && chown -R mysql.0 /var/lib/mysql && \ test "$(id mysql)" = "uid=27(mysql) gid=27(mysql) groups=27(mysql)" @@ -48,13 +57,17 @@ RUN ln -s /usr/bin/python3 /usr/bin/python ENV CONTAINER_SCRIPTS_PATH=/usr/share/container-scripts/mysql \ MYSQL_PREFIX=/usr -ADD root / +COPY 10.2/root-common / +COPY 10.2/s2i-common/bin/ $STI_SCRIPTS_PATH +COPY 10.2/root / # this is needed due to issues with squash # when this directory gets rm'd by the container-setup # script. -RUN rm -rf /etc/my.cnf.d/* -RUN /usr/libexec/container-setup +# Also reset permissions of filesystem to default values +RUN rm -rf /etc/my.cnf.d/* && \ + /usr/libexec/container-setup && \ + rpm-file-permissions VOLUME ["/var/lib/mysql/data"] diff --git a/Dockerfile.fedora b/Dockerfile.fedora new file mode 120000 index 0000000..1d1fe94 --- /dev/null +++ b/Dockerfile.fedora @@ -0,0 +1 @@ +Dockerfile \ No newline at end of file diff --git a/cccp.yml b/cccp.yml new file mode 100644 index 0000000..b63cda7 --- /dev/null +++ b/cccp.yml @@ -0,0 +1 @@ +job-id: mariadb-102-centos7 diff --git a/content_sets.yml b/content_sets.yml new file mode 100644 index 0000000..432c091 --- /dev/null +++ b/content_sets.yml @@ -0,0 +1,10 @@ +# This is a file defining which content sets are needed to update content in +# this image. Data provided here helps determine which images are vulnerable to +# specific CVEs. Generally you should only need to update this file when: +# 1. You start depending on new product +# 2. You are preparing new product release and your content sets will change +--- +x86_64: +- rhel-7-server-rpms +- rhel-7-server-optional-rpms +- rhel-server-rhscl-7-rpms diff --git a/help.md b/help.md new file mode 120000 index 0000000..42061c0 --- /dev/null +++ b/help.md @@ -0,0 +1 @@ +README.md \ No newline at end of file diff --git a/root-common/etc/my.cnf b/root-common/etc/my.cnf new file mode 100644 index 0000000..0844075 --- /dev/null +++ b/root-common/etc/my.cnf @@ -0,0 +1,12 @@ +[mysqld] + +# Disabling symbolic-links is recommended to prevent assorted security risks +symbolic-links = 0 + +# http://www.percona.com/blog/2008/05/31/dns-achilles-heel-mysql-installation/ +skip_name_resolve + +# http://www.chriscalender.com/ignoring-the-lostfound-directory-in-your-datadir/ +ignore-db-dir=lost+found + +!includedir /etc/my.cnf.d diff --git a/root-common/usr/bin/cgroup-limits b/root-common/usr/bin/cgroup-limits new file mode 100755 index 0000000..b9d4edc --- /dev/null +++ b/root-common/usr/bin/cgroup-limits @@ -0,0 +1,92 @@ +#!/usr/bin/python + +""" +Script for parsing cgroup information + +This script will read some limits from the cgroup system and parse +them, printing out "VARIABLE=VALUE" on each line for every limit that is +successfully read. Output of this script can be directly fed into +bash's export command. Recommended usage from a bash script: + + set -o errexit + export_vars=$(cgroup-limits) ; export $export_vars + +Variables currently supported: + MAX_MEMORY_LIMIT_IN_BYTES + Maximum possible limit MEMORY_LIMIT_IN_BYTES can have. This is + currently constant value of 9223372036854775807. + MEMORY_LIMIT_IN_BYTES + Maximum amount of user memory in bytes. If this value is set + to the same value as MAX_MEMORY_LIMIT_IN_BYTES, it means that + there is no limit set. The value is taken from + /sys/fs/cgroup/memory/memory.limit_in_bytes + NUMBER_OF_CORES + Number of detected CPU cores that can be used. This value is + calculated from /sys/fs/cgroup/cpuset/cpuset.cpus + NO_MEMORY_LIMIT + Set to "true" if MEMORY_LIMIT_IN_BYTES is so high that the caller + can act as if no memory limit was set. Undefined otherwise. +""" + +from __future__ import print_function +import sys + + +def _read_file(path): + try: + with open(path, 'r') as f: + return f.read().strip() + except IOError: + return None + + +def get_memory_limit(): + """ + Read memory limit, in bytes. + """ + + limit = _read_file('/sys/fs/cgroup/memory/memory.limit_in_bytes') + if limit is None or not limit.isdigit(): + print("Warning: Can't detect memory limit from cgroups", + file=sys.stderr) + return None + return int(limit) + + +def get_number_of_cores(): + """ + Read number of CPU cores. + """ + + core_count = 0 + + line = _read_file('/sys/fs/cgroup/cpuset/cpuset.cpus') + if line is None: + print("Warning: Can't detect number of CPU cores from cgroups", + file=sys.stderr) + return None + + for group in line.split(','): + core_ids = list(map(int, group.split('-'))) + if len(core_ids) == 2: + core_count += core_ids[1] - core_ids[0] + 1 + else: + core_count += 1 + + return core_count + + +if __name__ == "__main__": + env_vars = { + "MAX_MEMORY_LIMIT_IN_BYTES": 9223372036854775807, + "MEMORY_LIMIT_IN_BYTES": get_memory_limit(), + "NUMBER_OF_CORES": get_number_of_cores() + } + + env_vars = {k: v for k, v in env_vars.items() if v is not None} + + if env_vars.get("MEMORY_LIMIT_IN_BYTES", 0) >= 92233720368547: + env_vars["NO_MEMORY_LIMIT"] = "true" + + for key, value in env_vars.items(): + print("{0}={1}".format(key, value)) diff --git a/root-common/usr/bin/container-entrypoint b/root-common/usr/bin/container-entrypoint new file mode 100755 index 0000000..9d8ad4d --- /dev/null +++ b/root-common/usr/bin/container-entrypoint @@ -0,0 +1,2 @@ +#!/bin/bash +exec "$@" diff --git a/root-common/usr/bin/mysqld-master b/root-common/usr/bin/mysqld-master new file mode 120000 index 0000000..8a0786e --- /dev/null +++ b/root-common/usr/bin/mysqld-master @@ -0,0 +1 @@ +run-mysqld-master \ No newline at end of file diff --git a/root-common/usr/bin/mysqld-slave b/root-common/usr/bin/mysqld-slave new file mode 120000 index 0000000..dc0f58b --- /dev/null +++ b/root-common/usr/bin/mysqld-slave @@ -0,0 +1 @@ +run-mysqld-slave \ No newline at end of file diff --git a/root-common/usr/bin/run-mysqld b/root-common/usr/bin/run-mysqld new file mode 100755 index 0000000..7ffd49e --- /dev/null +++ b/root-common/usr/bin/run-mysqld @@ -0,0 +1,28 @@ +#!/bin/bash + +export_vars=$(cgroup-limits); export $export_vars +source ${CONTAINER_SCRIPTS_PATH}/common.sh +set -eu + +export_setting_variables + +log_volume_info $MYSQL_DATADIR + +# pre-init files +process_extending_files ${APP_DATA}/mysql-pre-init/ ${CONTAINER_SCRIPTS_PATH}/pre-init/ + +if [ ! -d "$MYSQL_DATADIR/mysql" ]; then + initialize_database "$@" +else + start_local_mysql "$@" +fi + +# init files +process_extending_files ${APP_DATA}/mysql-init/ ${CONTAINER_SCRIPTS_PATH}/init/ + +# Restart the MySQL server with public IP bindings +shutdown_local_mysql +unset_env_vars +log_volume_info $MYSQL_DATADIR +log_info 'Running final exec -- Only MySQL server logs after this point' +exec ${MYSQL_PREFIX}/libexec/mysqld --defaults-file=$MYSQL_DEFAULTS_FILE "$@" 2>&1 diff --git a/root-common/usr/bin/run-mysqld-master b/root-common/usr/bin/run-mysqld-master new file mode 100755 index 0000000..c15444d --- /dev/null +++ b/root-common/usr/bin/run-mysqld-master @@ -0,0 +1,46 @@ +#!/bin/bash +# +# This is an entrypoint that runs the MySQL server in the 'master' mode. +# +export_vars=$(cgroup-limits); export $export_vars +source ${CONTAINER_SCRIPTS_PATH}/common.sh +set -eu + +export_setting_variables + +log_volume_info $MYSQL_DATADIR + +export MYSQL_RUNNING_AS_MASTER=1 + +# The 'server-id' for master needs to be constant +export MYSQL_SERVER_ID=1 +log_info "The 'master' server-id is ${MYSQL_SERVER_ID}" + +# pre-init files +process_extending_files ${APP_DATA}/mysql-pre-init/ ${CONTAINER_SCRIPTS_PATH}/pre-init/ + +if [ ! -d "$MYSQL_DATADIR/mysql" ]; then + initialize_database "$@" +else + start_local_mysql "$@" +fi + +log_info 'Setting passwords ...' +[ -f ${CONTAINER_SCRIPTS_PATH}/passwd-change.sh ] && source ${CONTAINER_SCRIPTS_PATH}/passwd-change.sh + +# Setup the 'master' replication on the MySQL server +mysql $mysql_flags <&1 diff --git a/root-common/usr/bin/run-mysqld-slave b/root-common/usr/bin/run-mysqld-slave new file mode 100755 index 0000000..59f0161 --- /dev/null +++ b/root-common/usr/bin/run-mysqld-slave @@ -0,0 +1,58 @@ +#!/bin/bash +# +# This is an entrypoint that runs the MySQL server in the 'slave' mode. +# +export_vars=$(cgroup-limits); export $export_vars +source ${CONTAINER_SCRIPTS_PATH}/common.sh +set -eu + +export_setting_variables + +log_volume_info $MYSQL_DATADIR + +# Just run normal server if the data directory is already initialized +if [ -d "${MYSQL_DATADIR}/mysql" ]; then + exec /usr/bin/run-mysqld "$@" +fi + +export MYSQL_RUNNING_AS_SLAVE=1 + +# Generate the unique 'server-id' for this master +export MYSQL_SERVER_ID=$(server_id) +log_info "The 'slave' server-id is ${MYSQL_SERVER_ID}" + +# pre-init files +process_extending_files ${APP_DATA}/mysql-pre-init/ ${CONTAINER_SCRIPTS_PATH}/pre-init/ + +# Initialize MySQL database and wait for the MySQL master to accept +# connections. +initialize_database "$@" +wait_for_mysql_master + +# Get binlog file and position from master +STATUS_INFO=$(mysql --host "$MYSQL_MASTER_SERVICE_NAME" "-u${MYSQL_MASTER_USER}" "-p${MYSQL_MASTER_PASSWORD}" replication -e 'SELECT gtid from replication limit 1\G') +GTID_VALUE=$(echo "$STATUS_INFO" | grep 'gtid:' | head -n 1 | sed -e 's/^\s*gtid: //') + +# checking STATUS_INFO here because empty GTID_VALUE is valid value +if [ -z "${STATUS_INFO}" ] ; then + echo "Could not read GTID value from master" + exit 1 +fi + +mysql $mysql_flags <&1 diff --git a/root-common/usr/bin/usage b/root-common/usr/bin/usage new file mode 100755 index 0000000..feafb93 --- /dev/null +++ b/root-common/usr/bin/usage @@ -0,0 +1,4 @@ +#!/bin/bash + +cat /usr/share/container-scripts/mysql/README.md + diff --git a/root-common/usr/libexec/container-setup b/root-common/usr/libexec/container-setup new file mode 100755 index 0000000..6160d4e --- /dev/null +++ b/root-common/usr/libexec/container-setup @@ -0,0 +1,59 @@ +#!/bin/bash + +# This function returns all config files that daemon uses and their path +# includes /opt. It is used to get correct path to the config file. +mysql_get_config_files_scl() { + scl enable ${ENABLED_COLLECTIONS} -- my_print_defaults --help --verbose | \ + grep --after=1 '^Default options' | \ + tail -n 1 | \ + grep -o '[^ ]*opt[^ ]*my.cnf' +} + +# This function picks the main config file that deamon uses and we ship in rpm +mysql_get_correct_config() { + # we use the same config in non-SCL packages, not necessary to guess + [ -z "${ENABLED_COLLECTIONS}" ] && echo -n "/etc/my.cnf" && return + + # from all config files read by daemon, pick the first that exists + for f in `mysql_get_config_files_scl` ; do + [ -f "$f" ] && echo "$f" + done | head -n 1 +} + +export MYSQL_CONFIG_FILE=$(mysql_get_correct_config) + +[ -z "$MYSQL_CONFIG_FILE" ] && echo "MYSQL_CONFIG_FILE is empty" && exit 1 + +unset -f mysql_get_correct_config mysql_get_config_files_scl + +# we provide own config files for the container, so clean what rpm ships here +mkdir -p ${MYSQL_CONFIG_FILE}.d +rm -f ${MYSQL_CONFIG_FILE}.d/* + +# we may add options during service init, so we need to have this dir writable by daemon user +chown -R mysql:0 ${MYSQL_CONFIG_FILE}.d ${MYSQL_CONFIG_FILE} +restorecon -R ${MYSQL_CONFIG_FILE}.d ${MYSQL_CONFIG_FILE} + +# API of the container are standard paths /etc/my.cnf and /etc/my.cnf.d +# we already include own /etc/my.cnf for container, but for cases the +# actually used config file is not on standard path /etc/my.cnf, we +# need to move it to the location daemon expects it and create symlinks +if [ "$MYSQL_CONFIG_FILE" != "/etc/my.cnf" ] ; then + rm -rf /etc/my.cnf.d + mv /etc/my.cnf ${MYSQL_CONFIG_FILE} + ln -s ${MYSQL_CONFIG_FILE} /etc/my.cnf + ln -s ${MYSQL_CONFIG_FILE}.d /etc/my.cnf.d +fi + +# setup directory for data +mkdir -p /var/lib/mysql/data +chown -R mysql:0 /var/lib/mysql +restorecon -R /var/lib/mysql + +# Loosen permission bits for group to avoid problems running container with +# arbitrary UID +# When only specifying user, group is 0, that's why /var/lib/mysql must have +# owner mysql.0; that allows to avoid a+rwx for this dir +/usr/libexec/fix-permissions /var/lib/mysql ${MYSQL_CONFIG_FILE}.d ${APP_DATA}/.. +usermod -a -G root mysql + diff --git a/root-common/usr/libexec/fix-permissions b/root-common/usr/libexec/fix-permissions new file mode 100755 index 0000000..820e718 --- /dev/null +++ b/root-common/usr/libexec/fix-permissions @@ -0,0 +1,6 @@ +#!/bin/sh +# Fix permissions on the given directory to allow group read/write of +# regular files and execute of directories. +find $@ -exec chown mysql:0 {} \; +find $@ -exec chmod g+rw {} \; +find $@ -type d -exec chmod g+x {} + diff --git a/root-common/usr/share/container-scripts/mysql/cnf/40-paas.cnf b/root-common/usr/share/container-scripts/mysql/cnf/40-paas.cnf new file mode 100644 index 0000000..e79f2c5 --- /dev/null +++ b/root-common/usr/share/container-scripts/mysql/cnf/40-paas.cnf @@ -0,0 +1,30 @@ +[mysqld] +# +# Settings configured by the user +# + +# Sets how the table names are stored and compared. Default: 0 +lower_case_table_names = ${MYSQL_LOWER_CASE_TABLE_NAMES} + +# Sets whether queries should be logged +general_log = ${MYSQL_LOG_QUERIES_ENABLED} +general_log_file = ${MYSQL_DATADIR}/mysql-query.log + +# The maximum permitted number of simultaneous client connections. Default: 151 +max_connections = ${MYSQL_MAX_CONNECTIONS} + +# The minimum/maximum lengths of the word to be included in a FULLTEXT index. Default: 4/20 +ft_min_word_len = ${MYSQL_FT_MIN_WORD_LEN} +ft_max_word_len = ${MYSQL_FT_MAX_WORD_LEN} + +# In case the native AIO is broken. Default: 1 +# See http://help.directadmin.com/item.php?id=529 +innodb_use_native_aio = ${MYSQL_AIO} + +[myisamchk] +# The minimum/maximum lengths of the word to be included in a FULLTEXT index. Default: 4/20 +# +# To ensure that myisamchk and the server use the same values for full-text +# parameters, we placed them in both sections. +ft_min_word_len = ${MYSQL_FT_MIN_WORD_LEN} +ft_max_word_len = ${MYSQL_FT_MAX_WORD_LEN} diff --git a/root-common/usr/share/container-scripts/mysql/cnf/50-my-tuning.cnf b/root-common/usr/share/container-scripts/mysql/cnf/50-my-tuning.cnf new file mode 100644 index 0000000..e6b33f4 --- /dev/null +++ b/root-common/usr/share/container-scripts/mysql/cnf/50-my-tuning.cnf @@ -0,0 +1,27 @@ +[mysqld] +key_buffer_size = ${MYSQL_KEY_BUFFER_SIZE} +max_allowed_packet = ${MYSQL_MAX_ALLOWED_PACKET} +table_open_cache = ${MYSQL_TABLE_OPEN_CACHE} +sort_buffer_size = ${MYSQL_SORT_BUFFER_SIZE} +read_buffer_size = ${MYSQL_READ_BUFFER_SIZE} +read_rnd_buffer_size = 256K +net_buffer_length = 2K +thread_stack = 256K +myisam_sort_buffer_size = 2M + +# It is recommended that innodb_buffer_pool_size is configured to 50 to 75 percent of system memory. +innodb_buffer_pool_size = ${MYSQL_INNODB_BUFFER_POOL_SIZE} +# Set .._log_file_size to 25 % of buffer pool size +innodb_log_file_size = ${MYSQL_INNODB_LOG_FILE_SIZE} +innodb_log_buffer_size = ${MYSQL_INNODB_LOG_BUFFER_SIZE} + +[mysqldump] +quick +max_allowed_packet = 16M + +[mysql] +no-auto-rehash + +[myisamchk] +key_buffer_size = 8M +sort_buffer_size = 8M diff --git a/root-common/usr/share/container-scripts/mysql/common.sh b/root-common/usr/share/container-scripts/mysql/common.sh new file mode 100644 index 0000000..950eac1 --- /dev/null +++ b/root-common/usr/share/container-scripts/mysql/common.sh @@ -0,0 +1,225 @@ +#!/bin/bash + +source ${CONTAINER_SCRIPTS_PATH}/helpers.sh + +# Data directory where MySQL database files live. The data subdirectory is here +# because .bashrc and my.cnf both live in /var/lib/mysql/ and we don't want a +# volume to override it. +export MYSQL_DATADIR=/var/lib/mysql/data + +# Configuration settings. +export MYSQL_DEFAULTS_FILE=${MYSQL_DEFAULTS_FILE:-/etc/my.cnf} + +function export_setting_variables() { + export MYSQL_BINLOG_FORMAT=${MYSQL_BINLOG_FORMAT:-STATEMENT} + export MYSQL_LOWER_CASE_TABLE_NAMES=${MYSQL_LOWER_CASE_TABLE_NAMES:-0} + export MYSQL_LOG_QUERIES_ENABLED=${MYSQL_LOG_QUERIES_ENABLED:-0} + export MYSQL_MAX_CONNECTIONS=${MYSQL_MAX_CONNECTIONS:-151} + export MYSQL_FT_MIN_WORD_LEN=${MYSQL_FT_MIN_WORD_LEN:-4} + export MYSQL_FT_MAX_WORD_LEN=${MYSQL_FT_MAX_WORD_LEN:-20} + export MYSQL_AIO=${MYSQL_AIO:-1} + export MYSQL_MAX_ALLOWED_PACKET=${MYSQL_MAX_ALLOWED_PACKET:-200M} + export MYSQL_TABLE_OPEN_CACHE=${MYSQL_TABLE_OPEN_CACHE:-400} + export MYSQL_SORT_BUFFER_SIZE=${MYSQL_SORT_BUFFER_SIZE:-256K} + + # Export memory limit variables and calculate limits + local export_vars=$(cgroup-limits) && export $export_vars || exit 1 + if [ -n "${NO_MEMORY_LIMIT:-}" -o -z "${MEMORY_LIMIT_IN_BYTES:-}" ]; then + export MYSQL_KEY_BUFFER_SIZE=${MYSQL_KEY_BUFFER_SIZE:-32M} + export MYSQL_READ_BUFFER_SIZE=${MYSQL_READ_BUFFER_SIZE:-8M} + export MYSQL_INNODB_BUFFER_POOL_SIZE=${MYSQL_INNODB_BUFFER_POOL_SIZE:-32M} + export MYSQL_INNODB_LOG_FILE_SIZE=${MYSQL_INNODB_LOG_FILE_SIZE:-8M} + export MYSQL_INNODB_LOG_BUFFER_SIZE=${MYSQL_INNODB_LOG_BUFFER_SIZE:-8M} + else + export MYSQL_KEY_BUFFER_SIZE=${MYSQL_KEY_BUFFER_SIZE:-$((MEMORY_LIMIT_IN_BYTES/1024/1024/10))M} + export MYSQL_READ_BUFFER_SIZE=${MYSQL_READ_BUFFER_SIZE:-$((MEMORY_LIMIT_IN_BYTES/1024/1024/20))M} + export MYSQL_INNODB_BUFFER_POOL_SIZE=${MYSQL_INNODB_BUFFER_POOL_SIZE:-$((MEMORY_LIMIT_IN_BYTES/1024/1024/2))M} + # We are multiplying by 15 first and dividing by 100 later so we get as much + # precision as possible with whole numbers. Result is 15% of memory. + export MYSQL_INNODB_LOG_FILE_SIZE=${MYSQL_INNODB_LOG_FILE_SIZE:-$((MEMORY_LIMIT_IN_BYTES*15/1024/1024/100))M} + export MYSQL_INNODB_LOG_BUFFER_SIZE=${MYSQL_INNODB_LOG_BUFFER_SIZE:-$((MEMORY_LIMIT_IN_BYTES*15/1024/1024/100))M} + fi +} + +# this stores whether the database was initialized from empty datadir +export MYSQL_DATADIR_FIRST_INIT=false + +# Be paranoid and stricter than we should be. +# https://dev.mysql.com/doc/refman/en/identifiers.html +mysql_identifier_regex='^[a-zA-Z0-9_]+$' +mysql_password_regex='^[a-zA-Z0-9_~!@#$%^&*()-=<>,.?;:|]+$' + +# Variables that are used to connect to local mysql during initialization +mysql_flags="-u root --socket=/tmp/mysql.sock" +admin_flags="--defaults-file=$MYSQL_DEFAULTS_FILE $mysql_flags" + +# Make sure env variables don't propagate to mysqld process. +function unset_env_vars() { + log_info 'Cleaning up environment variables MYSQL_USER, MYSQL_PASSWORD, MYSQL_DATABASE and MYSQL_ROOT_PASSWORD ...' + unset MYSQL_USER MYSQL_PASSWORD MYSQL_DATABASE MYSQL_ROOT_PASSWORD +} + +# Poll until MySQL responds to our ping. +function wait_for_mysql() { + pid=$1 ; shift + + while [ true ]; do + if [ -d "/proc/$pid" ]; then + mysqladmin --socket=/tmp/mysql.sock ping &>/dev/null && log_info "MySQL started successfully" && return 0 + else + return 1 + fi + log_info "Waiting for MySQL to start ..." + sleep 1 + done +} + +# Start local MySQL server with a defaults file +function start_local_mysql() { + log_info 'Starting MySQL server with disabled networking ...' + ${MYSQL_PREFIX}/libexec/mysqld \ + --defaults-file=$MYSQL_DEFAULTS_FILE \ + --skip-networking --socket=/tmp/mysql.sock "$@" & + mysql_pid=$! + wait_for_mysql $mysql_pid +} + +# Shutdown mysql flushing privileges +function shutdown_local_mysql() { + log_info 'Shutting down MySQL ...' + mysqladmin $admin_flags flush-privileges shutdown +} + +# Initialize the MySQL database (create user accounts and the initial database) +function initialize_database() { + log_info 'Initializing database ...' + log_info 'Running mysql_install_db ...' + # Using --rpm since we need mysql_install_db behaves as in RPM + # Using empty --basedir to work-around https://bugzilla.redhat.com/show_bug.cgi?id=1406391 + mysql_install_db --rpm --datadir=$MYSQL_DATADIR --basedir='' + start_local_mysql "$@" + + if [ -v MYSQL_RUNNING_AS_SLAVE ]; then + log_info 'Initialization finished' + return 0 + fi + + if [ -v MYSQL_RUNNING_AS_MASTER ]; then + # Save master status into a separate database. + STATUS_INFO=$(mysql $admin_flags -e 'SHOW MASTER STATUS\G') + BINLOG_POSITION=$(echo "$STATUS_INFO" | grep 'Position:' | head -n 1 | sed -e 's/^\s*Position: //') + BINLOG_FILE=$(echo "$STATUS_INFO" | grep 'File:' | head -n 1 | sed -e 's/^\s*File: //') + GTID_INFO=$(mysql $admin_flags -e "SELECT BINLOG_GTID_POS('$BINLOG_FILE', '$BINLOG_POSITION') AS gtid_value \G") + GTID_VALUE=$(echo "$GTID_INFO" | grep 'gtid_value:' | head -n 1 | sed -e 's/^\s*gtid_value: //') + + mysqladmin $admin_flags create replication + mysql $admin_flags < "10.0" ] ; then +mysql $mysql_flags </dev/null && log_info "MySQL master is ready" && return 0 + sleep 1 + done +} + +# get_matched_files finds file for image extending +function get_matched_files() { + local custom_dir default_dir + custom_dir="$1" + default_dir="$2" + files_matched="$3" + find "$default_dir" -maxdepth 1 -type f -name "$files_matched" -printf "%f\n" + [ -d "$custom_dir" ] && find "$custom_dir" -maxdepth 1 -type f -name "$files_matched" -printf "%f\n" +} + +# process_extending_files process extending files in $1 and $2 directories +# - source all *.sh files +# (if there are files with same name source only file from $1) +function process_extending_files() { + local custom_dir default_dir + custom_dir=$1 + default_dir=$2 + + while read filename ; do + echo "=> sourcing $filename ..." + # Custom file is prefered + if [ -f $custom_dir/$filename ]; then + source $custom_dir/$filename + else + source $default_dir/$filename + fi + done <<<"$(get_matched_files "$custom_dir" "$default_dir" '*.sh' | sort -u)" +} + +# process extending config files in $1 and $2 directories +# - expand variables in *.cnf and copy the files into /etc/my.cnf.d directory +# (if there are files with same name source only file from $1) +function process_extending_config_files() { + local custom_dir default_dir + custom_dir=$1 + default_dir=$2 + + while read filename ; do + echo "=> sourcing $filename ..." + # Custom file is prefered + if [ -f $custom_dir/$filename ]; then + envsubst < $custom_dir/$filename > /etc/my.cnf.d/$filename + else + envsubst < $default_dir/$filename > /etc/my.cnf.d/$filename + fi + done <<<"$(get_matched_files "$custom_dir" "$default_dir" '*.cnf' | sort -u)" +} diff --git a/root-common/usr/share/container-scripts/mysql/helpers.sh b/root-common/usr/share/container-scripts/mysql/helpers.sh new file mode 100644 index 0000000..4e832fc --- /dev/null +++ b/root-common/usr/share/container-scripts/mysql/helpers.sh @@ -0,0 +1,24 @@ +function log_info { + echo "---> `date +%T` $@" +} + +function log_and_run { + log_info "Running $@" + "$@" +} + +function log_volume_info { + CONTAINER_DEBUG=${CONTAINER_DEBUG:-} + if [[ "${CONTAINER_DEBUG,,}" != "true" ]]; then + return + fi + + log_info "Volume info for $@:" + set +e + log_and_run mount + while [ $# -gt 0 ]; do + log_and_run ls -alZ $1 + shift + done + set -e +} diff --git a/root-common/usr/share/container-scripts/mysql/init/50-passwd-change.sh b/root-common/usr/share/container-scripts/mysql/init/50-passwd-change.sh new file mode 100644 index 0000000..9fa0018 --- /dev/null +++ b/root-common/usr/share/container-scripts/mysql/init/50-passwd-change.sh @@ -0,0 +1,49 @@ +password_change() { + log_info 'Setting passwords ...' + + # Set the password for MySQL user and root everytime this container is started. + # This allows to change the password by editing the deployment configuration. + if [[ -v MYSQL_USER && -v MYSQL_PASSWORD ]]; then +mysql $mysql_flags < "10.0" ] ; then +mysql $mysql_flags < "10.0" ] ; then +mysql $mysql_flags < /etc/my.cnf.d/base.cnf + diff --git a/root-common/usr/share/container-scripts/mysql/pre-init/60-replication-config.sh b/root-common/usr/share/container-scripts/mysql/pre-init/60-replication-config.sh new file mode 100644 index 0000000..a923476 --- /dev/null +++ b/root-common/usr/share/container-scripts/mysql/pre-init/60-replication-config.sh @@ -0,0 +1,17 @@ +# mysqld configuration for replication scenarios + +if [ -v MYSQL_RUNNING_AS_MASTER ] || [ -v MYSQL_RUNNING_AS_SLAVE ] ; then + log_info 'Processing basic MySQL configuration for replication (master and slave) files ...' + envsubst < ${CONTAINER_SCRIPTS_PATH}/pre-init/my-repl-gtid.cnf.template > /etc/my.cnf.d/repl-gtid.cnf +fi + +if [ -v MYSQL_RUNNING_AS_MASTER ] ; then + log_info 'Processing basic MySQL configuration for replication (master only) files ...' + envsubst < ${CONTAINER_SCRIPTS_PATH}/pre-init/my-master.cnf.template > /etc/my.cnf.d/master.cnf +fi + +if [ -v MYSQL_RUNNING_AS_SLAVE ] ; then + log_info 'Processing basic MySQL configuration for replication (slave only) files ...' + envsubst < ${CONTAINER_SCRIPTS_PATH}/pre-init/my-slave.cnf.template > /etc/my.cnf.d/slave.cnf +fi + diff --git a/root-common/usr/share/container-scripts/mysql/pre-init/70-s2i-config.sh b/root-common/usr/share/container-scripts/mysql/pre-init/70-s2i-config.sh new file mode 100644 index 0000000..7a8ae5a --- /dev/null +++ b/root-common/usr/share/container-scripts/mysql/pre-init/70-s2i-config.sh @@ -0,0 +1,6 @@ +# additional arbitrary mysqld configuration provided by user using s2i + +log_info 'Processing additional arbitrary MySQL configuration provided by s2i ...' + +process_extending_config_files ${APP_DATA}/mysql-cfg/ ${CONTAINER_SCRIPTS_PATH}/cnf/ + diff --git a/root-common/usr/share/container-scripts/mysql/pre-init/my-base.cnf.template b/root-common/usr/share/container-scripts/mysql/pre-init/my-base.cnf.template new file mode 100644 index 0000000..c654f7f --- /dev/null +++ b/root-common/usr/share/container-scripts/mysql/pre-init/my-base.cnf.template @@ -0,0 +1,5 @@ +[mysqld] +datadir = ${MYSQL_DATADIR} +basedir = ${MYSQL_PREFIX} +plugin-dir = ${MYSQL_PREFIX}/lib64/mysql/plugin + diff --git a/root-common/usr/share/container-scripts/mysql/pre-init/my-master.cnf.template b/root-common/usr/share/container-scripts/mysql/pre-init/my-master.cnf.template new file mode 100644 index 0000000..f434885 --- /dev/null +++ b/root-common/usr/share/container-scripts/mysql/pre-init/my-master.cnf.template @@ -0,0 +1,7 @@ +[mysqld] + +server-id = ${MYSQL_SERVER_ID} +log_bin = ${MYSQL_DATADIR}/mysql-bin.log +binlog_do_db = mysql +binlog_do_db = ${MYSQL_DATABASE} +binlog_format = ${MYSQL_BINLOG_FORMAT} diff --git a/root-common/usr/share/container-scripts/mysql/pre-init/my-repl-gtid.cnf.template b/root-common/usr/share/container-scripts/mysql/pre-init/my-repl-gtid.cnf.template new file mode 100644 index 0000000..a74a74c --- /dev/null +++ b/root-common/usr/share/container-scripts/mysql/pre-init/my-repl-gtid.cnf.template @@ -0,0 +1,4 @@ +[mysqld] + +log-slave-updates = ON + diff --git a/root-common/usr/share/container-scripts/mysql/pre-init/my-slave.cnf.template b/root-common/usr/share/container-scripts/mysql/pre-init/my-slave.cnf.template new file mode 100644 index 0000000..5bdf109 --- /dev/null +++ b/root-common/usr/share/container-scripts/mysql/pre-init/my-slave.cnf.template @@ -0,0 +1,7 @@ +[mysqld] + +server-id = ${MYSQL_SERVER_ID} +log_bin = ${MYSQL_DATADIR}/mysql-bin.log +relay-log = ${MYSQL_DATADIR}/mysql-relay-bin.log +binlog_do_db = mysql +binlog_do_db = ${MYSQL_DATABASE} diff --git a/root-common/usr/share/container-scripts/mysql/scl_enable b/root-common/usr/share/container-scripts/mysql/scl_enable new file mode 100644 index 0000000..5a25432 --- /dev/null +++ b/root-common/usr/share/container-scripts/mysql/scl_enable @@ -0,0 +1,3 @@ +# This will make scl collection binaries work out of box. +unset BASH_ENV PROMPT_COMMAND ENV +source scl_source enable ${ENABLED_COLLECTIONS} diff --git a/root/etc/my.cnf b/root/etc/my.cnf deleted file mode 100644 index 0844075..0000000 --- a/root/etc/my.cnf +++ /dev/null @@ -1,12 +0,0 @@ -[mysqld] - -# Disabling symbolic-links is recommended to prevent assorted security risks -symbolic-links = 0 - -# http://www.percona.com/blog/2008/05/31/dns-achilles-heel-mysql-installation/ -skip_name_resolve - -# http://www.chriscalender.com/ignoring-the-lostfound-directory-in-your-datadir/ -ignore-db-dir=lost+found - -!includedir /etc/my.cnf.d diff --git a/root/help.1 b/root/help.1 deleted file mode 100644 index 59c37b8..0000000 --- a/root/help.1 +++ /dev/null @@ -1,332 +0,0 @@ -.\"t -.\" WARNING: Do not edit this file manually, it is generated from README.md automatically. -.\" -.\"t -.\" Automatically generated by Pandoc 1.16.0.2 -.\" -.TH "MARIADB\-101\-RHEL7" "1" "February 22, 2017" "Container Image Pages" "" -.hy -.SH MariaDB Docker image -.PP -This container image includes MariaDB server 10.1 for OpenShift and -general usage. -Users can choose between RHEL and CentOS based images. -.PP -Dockerfile for CentOS is called Dockerfile, Dockerfile for RHEL is -called Dockerfile.rhel7. -.SS Environment variables and volumes -.PP -The image recognizes the following environment variables that you can -set during initialization by passing \f[C]\-e\ VAR=VALUE\f[] to the -Docker run command. -.PP -.TS -tab(@); -l l. -T{ -Variable name -T}@T{ -Description -T} -_ -T{ -\f[C]MYSQL_USER\f[] -T}@T{ -User name for MySQL account to be created -T} -T{ -\f[C]MYSQL_PASSWORD\f[] -T}@T{ -Password for the user account -T} -T{ -\f[C]MYSQL_DATABASE\f[] -T}@T{ -Database name -T} -T{ -\f[C]MYSQL_ROOT_PASSWORD\f[] -T}@T{ -Password for the root user (optional) -T} -.TE -.PP -The following environment variables influence the MySQL configuration -file. -They are all optional. -.PP -.TS -tab(@); -lw(17.2n) lw(35.5n) lw(17.2n). -T{ -Variable name -T}@T{ -Description -T}@T{ -Default -T} -_ -T{ -\f[C]MYSQL_LOWER_CASE_TABLE_NAMES\f[] -T}@T{ -Sets how the table names are stored and compared -T}@T{ -0 -T} -T{ -\f[C]MYSQL_MAX_CONNECTIONS\f[] -T}@T{ -The maximum permitted number of simultaneous client connections -T}@T{ -151 -T} -T{ -\f[C]MYSQL_MAX_ALLOWED_PACKET\f[] -T}@T{ -The maximum size of one packet or any generated/intermediate string -T}@T{ -200M -T} -T{ -\f[C]MYSQL_FT_MIN_WORD_LEN\f[] -T}@T{ -The minimum length of the word to be included in a FULLTEXT index -T}@T{ -4 -T} -T{ -\f[C]MYSQL_FT_MAX_WORD_LEN\f[] -T}@T{ -The maximum length of the word to be included in a FULLTEXT index -T}@T{ -20 -T} -T{ -\f[C]MYSQL_AIO\f[] -T}@T{ -Controls the \f[C]innodb_use_native_aio\f[] setting value in case the -native AIO is broken. -See http://help.directadmin.com/item.php?id=529 -T}@T{ -1 -T} -T{ -\f[C]MYSQL_TABLE_OPEN_CACHE\f[] -T}@T{ -The number of open tables for all threads -T}@T{ -400 -T} -T{ -\f[C]MYSQL_KEY_BUFFER_SIZE\f[] -T}@T{ -The size of the buffer used for index blocks -T}@T{ -32M (or 10% of available memory) -T} -T{ -\f[C]MYSQL_SORT_BUFFER_SIZE\f[] -T}@T{ -The size of the buffer used for sorting -T}@T{ -256K -T} -T{ -\f[C]MYSQL_READ_BUFFER_SIZE\f[] -T}@T{ -The size of the buffer used for a sequential scan -T}@T{ -8M (or 5% of available memory) -T} -T{ -\f[C]MYSQL_INNODB_BUFFER_POOL_SIZE\f[] -T}@T{ -The size of the buffer pool where InnoDB caches table and index data -T}@T{ -32M (or 50% of available memory) -T} -T{ -\f[C]MYSQL_INNODB_LOG_FILE_SIZE\f[] -T}@T{ -The size of each log file in a log group -T}@T{ -8M (or 15% of available available) -T} -T{ -\f[C]MYSQL_INNODB_LOG_BUFFER_SIZE\f[] -T}@T{ -The size of the buffer that InnoDB uses to write to the log files on -disk -T}@T{ -8M (or 15% of available memory) -T} -T{ -\f[C]MYSQL_DEFAULTS_FILE\f[] -T}@T{ -Point to an alternative configuration file -T}@T{ -/etc/my.cnf -T} -T{ -\f[C]MYSQL_BINLOG_FORMAT\f[] -T}@T{ -Set sets the binlog format, supported values are \f[C]row\f[] and -\f[C]statement\f[] -T}@T{ -statement -T} -.TE -.PP -You can also set the following mount points by passing the -\f[C]\-v\ /host:/container\f[] flag to Docker. -.PP -.TS -tab(@); -l l. -T{ -Volume mount point -T}@T{ -Description -T} -_ -T{ -\f[C]/var/lib/mysql/data\f[] -T}@T{ -MySQL data directory -T} -.TE -.PP -\f[B]Notice: When mouting a directory from the host into the container, -ensure that the mounted directory has the appropriate permissions and -that the owner and group of the directory matches the user UID or name -which is running inside the container.\f[] -.SS Usage -.PP -For this, we will assume that you are using the -\f[C]rhscl/mariadb\-100\-rhel7\f[] image. -If you want to set only the mandatory environment variables and not -store the database in a host directory, execute the following command: -.IP -.nf -\f[C] -$\ docker\ run\ \-d\ \-\-name\ mariadb_database\ \-e\ MYSQL_USER=user\ \-e\ MYSQL_PASSWORD=pass\ \-e\ MYSQL_DATABASE=db\ \-p\ 3306:3306\ rhscl/mariadb\-100\-rhel7 -\f[] -.fi -.PP -This will create a container named \f[C]mariadb_database\f[] running -MySQL with database \f[C]db\f[] and user with credentials -\f[C]user:pass\f[]. -Port 3306 will be exposed and mapped to the host. -If you want your database to be persistent across container executions, -also add a \f[C]\-v\ /host/db/path:/var/lib/mysql/data\f[] argument. -This will be the MySQL data directory. -.PP -If the database directory is not initialized, the entrypoint script will -first run -\f[C]mysql_install_db\f[] (https://dev.mysql.com/doc/refman/5.6/en/mysql-install-db.html) -and setup necessary database users and passwords. -After the database is initialized, or if it was already present, -\f[C]mysqld\f[] is executed and will run as PID 1. -You can stop the detached container by running -\f[C]docker\ stop\ mariadb_database\f[]. -.SS MariaDB auto\-tuning -.PP -When the MySQL image is run with the \f[C]\-\-memory\f[] parameter set -and you didn\[aq]t specify value for some parameters, their values will -be automatically calculated based on the available memory. -.PP -.TS -tab(@); -l l l. -T{ -Variable name -T}@T{ -Configuration parameter -T}@T{ -Relative value -T} -_ -T{ -\f[C]MYSQL_KEY_BUFFER_SIZE\f[] -T}@T{ -\f[C]key_buffer_size\f[] -T}@T{ -10% -T} -T{ -\f[C]MYSQL_READ_BUFFER_SIZE\f[] -T}@T{ -\f[C]read_buffer_size\f[] -T}@T{ -5% -T} -T{ -\f[C]MYSQL_INNODB_BUFFER_POOL_SIZE\f[] -T}@T{ -\f[C]innodb_buffer_pool_size\f[] -T}@T{ -50% -T} -T{ -\f[C]MYSQL_INNODB_LOG_FILE_SIZE\f[] -T}@T{ -\f[C]innodb_log_file_size\f[] -T}@T{ -15% -T} -T{ -\f[C]MYSQL_INNODB_LOG_BUFFER_SIZE\f[] -T}@T{ -\f[C]innodb_log_buffer_size\f[] -T}@T{ -15% -T} -.TE -.SS MySQL root user -.PP -The root user has no password set by default, only allowing local -connections. -You can set it by setting the \f[C]MYSQL_ROOT_PASSWORD\f[] environment -variable. -This will allow you to login to the root account remotely. -Local connections will still not require a password. -.PP -To disable remote root access, simply unset \f[C]MYSQL_ROOT_PASSWORD\f[] -and restart the container. -.SS Changing passwords -.PP -Since passwords are part of the image configuration, the only supported -method to change passwords for the database user (\f[C]MYSQL_USER\f[]) -and root user is by changing the environment variables -\f[C]MYSQL_PASSWORD\f[] and \f[C]MYSQL_ROOT_PASSWORD\f[], respectively. -.PP -Changing database passwords through SQL statements or any way other than -through the environment variables aforementioned will cause a mismatch -between the values stored in the variables and the actual passwords. -Whenever a database container starts it will reset the passwords to the -values stored in the environment variables. -.SS Default my.cnf file -.PP -With environment variables we are able to customize a lot of different -parameters or configurations for the mysql bootstrap configurations. -If you\[aq]d prefer to use your own configuration file, you can override -the \f[C]MYSQL_DEFAULTS_FILE\f[] env variable with the full path of the -file you wish to use. -For example, the default location is \f[C]/etc/my.cnf\f[] but you can -change it to \f[C]/etc/mysql/my.cnf\f[] by setting -\f[C]MYSQL_DEFAULTS_FILE=/etc/mysql/my.cnf\f[] -.SS Changing the replication binlog_format -.PP -Some applications may wish to use \f[C]row\f[] binlog_formats (for -example, those built with change\-data\-capture in mind). -The default replication/binlog format is \f[C]statement\f[] but to -change it you can set the \f[C]MYSQL_BINLOG_FORMAT\f[] environment -variable. -For example \f[C]MYSQL_BINLOG_FORMAT=row\f[]. -Now when you run the database with \f[C]master\f[] replication turned on -(ie, set the Docker/container \f[C]cmd\f[] to be -\f[C]run\-mysqld\-master\f[]) the binlog will emit the actual data for -the rows that change as opposed to the statements (ie, DML like -insert...) that caused the change. -.SH AUTHORS -Red Hat. diff --git a/root/usr/bin/cgroup-limits b/root/usr/bin/cgroup-limits deleted file mode 100755 index b9d4edc..0000000 --- a/root/usr/bin/cgroup-limits +++ /dev/null @@ -1,92 +0,0 @@ -#!/usr/bin/python - -""" -Script for parsing cgroup information - -This script will read some limits from the cgroup system and parse -them, printing out "VARIABLE=VALUE" on each line for every limit that is -successfully read. Output of this script can be directly fed into -bash's export command. Recommended usage from a bash script: - - set -o errexit - export_vars=$(cgroup-limits) ; export $export_vars - -Variables currently supported: - MAX_MEMORY_LIMIT_IN_BYTES - Maximum possible limit MEMORY_LIMIT_IN_BYTES can have. This is - currently constant value of 9223372036854775807. - MEMORY_LIMIT_IN_BYTES - Maximum amount of user memory in bytes. If this value is set - to the same value as MAX_MEMORY_LIMIT_IN_BYTES, it means that - there is no limit set. The value is taken from - /sys/fs/cgroup/memory/memory.limit_in_bytes - NUMBER_OF_CORES - Number of detected CPU cores that can be used. This value is - calculated from /sys/fs/cgroup/cpuset/cpuset.cpus - NO_MEMORY_LIMIT - Set to "true" if MEMORY_LIMIT_IN_BYTES is so high that the caller - can act as if no memory limit was set. Undefined otherwise. -""" - -from __future__ import print_function -import sys - - -def _read_file(path): - try: - with open(path, 'r') as f: - return f.read().strip() - except IOError: - return None - - -def get_memory_limit(): - """ - Read memory limit, in bytes. - """ - - limit = _read_file('/sys/fs/cgroup/memory/memory.limit_in_bytes') - if limit is None or not limit.isdigit(): - print("Warning: Can't detect memory limit from cgroups", - file=sys.stderr) - return None - return int(limit) - - -def get_number_of_cores(): - """ - Read number of CPU cores. - """ - - core_count = 0 - - line = _read_file('/sys/fs/cgroup/cpuset/cpuset.cpus') - if line is None: - print("Warning: Can't detect number of CPU cores from cgroups", - file=sys.stderr) - return None - - for group in line.split(','): - core_ids = list(map(int, group.split('-'))) - if len(core_ids) == 2: - core_count += core_ids[1] - core_ids[0] + 1 - else: - core_count += 1 - - return core_count - - -if __name__ == "__main__": - env_vars = { - "MAX_MEMORY_LIMIT_IN_BYTES": 9223372036854775807, - "MEMORY_LIMIT_IN_BYTES": get_memory_limit(), - "NUMBER_OF_CORES": get_number_of_cores() - } - - env_vars = {k: v for k, v in env_vars.items() if v is not None} - - if env_vars.get("MEMORY_LIMIT_IN_BYTES", 0) >= 92233720368547: - env_vars["NO_MEMORY_LIMIT"] = "true" - - for key, value in env_vars.items(): - print("{0}={1}".format(key, value)) diff --git a/root/usr/bin/container-entrypoint b/root/usr/bin/container-entrypoint deleted file mode 100755 index 9d8ad4d..0000000 --- a/root/usr/bin/container-entrypoint +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/bash -exec "$@" diff --git a/root/usr/bin/mysqld-master b/root/usr/bin/mysqld-master deleted file mode 120000 index 8a0786e..0000000 --- a/root/usr/bin/mysqld-master +++ /dev/null @@ -1 +0,0 @@ -run-mysqld-master \ No newline at end of file diff --git a/root/usr/bin/mysqld-slave b/root/usr/bin/mysqld-slave deleted file mode 120000 index dc0f58b..0000000 --- a/root/usr/bin/mysqld-slave +++ /dev/null @@ -1 +0,0 @@ -run-mysqld-slave \ No newline at end of file diff --git a/root/usr/bin/run-mysqld b/root/usr/bin/run-mysqld deleted file mode 100755 index cd899a7..0000000 --- a/root/usr/bin/run-mysqld +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash - -export_vars=$(cgroup-limits); export $export_vars -source ${CONTAINER_SCRIPTS_PATH}/common.sh -set -eu - -[ -f ${CONTAINER_SCRIPTS_PATH}/validate-variables.sh ] && source ${CONTAINER_SCRIPTS_PATH}/validate-variables.sh - -# Process the MySQL configuration files -log_info 'Processing MySQL configuration files ...' -envsubst < ${CONTAINER_SCRIPTS_PATH}/my-base.cnf.template > /etc/my.cnf.d/base.cnf -envsubst < ${CONTAINER_SCRIPTS_PATH}/my-paas.cnf.template > /etc/my.cnf.d/paas.cnf -envsubst < ${CONTAINER_SCRIPTS_PATH}/my-tuning.cnf.template > /etc/my.cnf.d/tuning.cnf - -if [ ! -d "$MYSQL_DATADIR/mysql" ]; then - initialize_database "$@" -else - start_local_mysql "$@" -fi - -if [ -f ${CONTAINER_SCRIPTS_PATH}/passwd-change.sh ]; then - log_info 'Setting passwords ...' - source ${CONTAINER_SCRIPTS_PATH}/passwd-change.sh -fi -if [ -f ${CONTAINER_SCRIPTS_PATH}/post-init.sh ]; then - log_info 'Sourcing post-init.sh ...' - source ${CONTAINER_SCRIPTS_PATH}/post-init.sh -fi - -# Restart the MySQL server with public IP bindings -shutdown_local_mysql -unset_env_vars -log_volume_info $MYSQL_DATADIR -log_info 'Running final exec -- Only MySQL server logs after this point' -exec ${MYSQL_PREFIX}/libexec/mysqld --defaults-file=$MYSQL_DEFAULTS_FILE "$@" 2>&1 diff --git a/root/usr/bin/run-mysqld-master b/root/usr/bin/run-mysqld-master deleted file mode 100755 index 054889e..0000000 --- a/root/usr/bin/run-mysqld-master +++ /dev/null @@ -1,50 +0,0 @@ -#!/bin/bash -# -# This is an entrypoint that runs the MySQL server in the 'master' mode. -# -export_vars=$(cgroup-limits); export $export_vars -source ${CONTAINER_SCRIPTS_PATH}/common.sh -set -eu - -export MYSQL_RUNNING_AS_MASTER=1 - -[ -f ${CONTAINER_SCRIPTS_PATH}/validate_replication_variables.sh ] && source ${CONTAINER_SCRIPTS_PATH}/validate_replication_variables.sh -[ -f ${CONTAINER_SCRIPTS_PATH}/validate_variables.sh ] && source ${CONTAINER_SCRIPTS_PATH}/validate_variables.sh - -# The 'server-id' for master needs to be constant -export MYSQL_SERVER_ID=1 -log_info "The 'master' server-id is ${MYSQL_SERVER_ID}" - -# Process the MySQL configuration files -log_info 'Processing MySQL configuration files ...' -envsubst < ${CONTAINER_SCRIPTS_PATH}/my-base.cnf.template > /etc/my.cnf.d/base.cnf -envsubst < ${CONTAINER_SCRIPTS_PATH}/my-paas.cnf.template > /etc/my.cnf.d/paas.cnf -envsubst < ${CONTAINER_SCRIPTS_PATH}/my-master.cnf.template > /etc/my.cnf.d/master.cnf -envsubst < ${CONTAINER_SCRIPTS_PATH}/my-repl-gtid.cnf.template > /etc/my.cnf.d/repl-gtid.cnf -envsubst < ${CONTAINER_SCRIPTS_PATH}/my-tuning.cnf.template > /etc/my.cnf.d/tuning.cnf - -if [ ! -d "$MYSQL_DATADIR/mysql" ]; then - initialize_database "$@" -else - start_local_mysql "$@" -fi - -log_info 'Setting passwords ...' -[ -f ${CONTAINER_SCRIPTS_PATH}/passwd-change.sh ] && source ${CONTAINER_SCRIPTS_PATH}/passwd-change.sh - -# Setup the 'master' replication on the MySQL server -mysql $mysql_flags <&1 diff --git a/root/usr/bin/run-mysqld-slave b/root/usr/bin/run-mysqld-slave deleted file mode 100755 index 51acce5..0000000 --- a/root/usr/bin/run-mysqld-slave +++ /dev/null @@ -1,60 +0,0 @@ -#!/bin/bash -# -# This is an entrypoint that runs the MySQL server in the 'slave' mode. -# -export_vars=$(cgroup-limits); export $export_vars -source ${CONTAINER_SCRIPTS_PATH}/common.sh -set -eu - -# Just run normal server if the data directory is already initialized -if [ -d "${MYSQL_DATADIR}/mysql" ]; then - exec /usr/bin/run-mysqld "$@" -fi - -export MYSQL_RUNNING_AS_SLAVE=1 - -[ -f ${CONTAINER_SCRIPTS_PATH}/validate_replication_variables.sh ] && source ${CONTAINER_SCRIPTS_PATH}/validate_replication_variables.sh - -# Generate the unique 'server-id' for this master -export MYSQL_SERVER_ID=$(server_id) -log_info "The 'slave' server-id is ${MYSQL_SERVER_ID}" - -# Process the MySQL configuration files -envsubst < ${CONTAINER_SCRIPTS_PATH}/my-base.cnf.template > /etc/my.cnf.d/base.cnf -envsubst < ${CONTAINER_SCRIPTS_PATH}/my-paas.cnf.template > /etc/my.cnf.d/paas.cnf -envsubst < ${CONTAINER_SCRIPTS_PATH}/my-slave.cnf.template > /etc/my.cnf.d/slave.cnf -envsubst < ${CONTAINER_SCRIPTS_PATH}/my-repl-gtid.cnf.template > /etc/my.cnf.d/repl-gtid.cnf -envsubst < ${CONTAINER_SCRIPTS_PATH}/my-tuning.cnf.template > /etc/my.cnf.d/tuning.cnf - -# Initialize MySQL database and wait for the MySQL master to accept -# connections. -initialize_database "$@" -wait_for_mysql_master - -# Get binlog file and position from master -STATUS_INFO=$(mysql --host "$MYSQL_MASTER_SERVICE_NAME" "-u${MYSQL_MASTER_USER}" "-p${MYSQL_MASTER_PASSWORD}" replication -e 'SELECT gtid from replication limit 1\G') -GTID_VALUE=$(echo "$STATUS_INFO" | grep 'gtid:' | head -n 1 | sed -e 's/^\s*gtid: //') - -# checking STATUS_INFO here because empty GTID_VALUE is valid value -if [ -z "${STATUS_INFO}" ] ; then - echo "Could not read GTID value from master" - exit 1 -fi - -mysql $mysql_flags <&1 diff --git a/root/usr/libexec/container-setup b/root/usr/libexec/container-setup deleted file mode 100755 index 29c6ed2..0000000 --- a/root/usr/libexec/container-setup +++ /dev/null @@ -1,58 +0,0 @@ -#!/bin/bash - -# This function returns all config files that daemon uses and their path -# includes /opt. It is used to get correct path to the config file. -mysql_get_config_files_scl() { - scl enable ${ENABLED_COLLECTIONS} -- my_print_defaults --help --verbose | \ - grep --after=1 '^Default options' | \ - tail -n 1 | \ - grep -o '[^ ]*opt[^ ]*my.cnf' -} - -# This function picks the main config file that deamon uses and we ship in rpm -mysql_get_correct_config() { - # we use the same config in non-SCL packages, not necessary to guess - [ -z "${ENABLED_COLLECTIONS}" ] && echo -n "/etc/my.cnf" && return - - # from all config files read by daemon, pick the first that exists - for f in `mysql_get_config_files_scl` ; do - [ -f "$f" ] && echo "$f" - done | head -n 1 -} - -export MYSQL_CONFIG_FILE=$(mysql_get_correct_config) - -[ -z "$MYSQL_CONFIG_FILE" ] && echo "MYSQL_CONFIG_FILE is empty" && exit 1 - -unset -f mysql_get_correct_config mysql_get_config_files_scl - -# we provide own config files for the container, so clean what rpm ships here -mkdir -p ${MYSQL_CONFIG_FILE}.d -rm -f ${MYSQL_CONFIG_FILE}.d/* - -# we may add options during service init, so we need to have this dir writable by daemon user -chown -R mysql:0 ${MYSQL_CONFIG_FILE}.d ${MYSQL_CONFIG_FILE} -restorecon -R ${MYSQL_CONFIG_FILE}.d ${MYSQL_CONFIG_FILE} - -# API of the container are standard paths /etc/my.cnf and /etc/my.cnf.d -# we already include own /etc/my.cnf for container, but for cases the -# actually used config file is not on standard path /etc/my.cnf, we -# need to move it to the location daemon expects it and create symlinks -if [ "$MYSQL_CONFIG_FILE" != "/etc/my.cnf" ] ; then - rm -rf /etc/my.cnf.d - mv /etc/my.cnf ${MYSQL_CONFIG_FILE} - ln -s ${MYSQL_CONFIG_FILE} /etc/my.cnf - ln -s ${MYSQL_CONFIG_FILE}.d /etc/my.cnf.d -fi - -# setup directory for data -mkdir -p /var/lib/mysql/data -chown -R mysql:0 /var/lib/mysql -restorecon -R /var/lib/mysql - -# Loosen permission bits for group to avoid problems running container with -# arbitrary UID -# When only specifying user, group is 0, that's why /var/lib/mysql must have -# owner mysql.0; that allows to avoid a+rwx for this dir -chmod g+w -R /var/lib/mysql ${MYSQL_CONFIG_FILE}.d - diff --git a/root/usr/share/container-scripts/mysql/README.md b/root/usr/share/container-scripts/mysql/README.md index 656dbd9..dcc31d8 100644 --- a/root/usr/share/container-scripts/mysql/README.md +++ b/root/usr/share/container-scripts/mysql/README.md @@ -1,64 +1,34 @@ -MariaDB Docker image -==================== +MariaDB 10.2 SQL Database Server Docker image +============================================= -This container image includes MariaDB server 10.1 for OpenShift and general usage. +This container image includes MariaDB 10.2 SQL database server for OpenShift and general usage. Users can choose between RHEL and CentOS based images. +The RHEL image is available in the [Red Hat Container Catalog](https://access.redhat.com/containers/#/registry.access.redhat.com/rhscl/mariadb-102-rhel7) +as registry.access.redhat.com/rhscl/mariadb-102-rhel7. +The CentOS image is then available on [Docker Hub](https://hub.docker.com/r/centos/mariadb-102-centos7/) +as centos/mariadb-102-centos7. -Dockerfile for CentOS is called Dockerfile, Dockerfile for RHEL is called -Dockerfile.rhel7. -Environment variables and volumes ----------------------------------- - -The image recognizes the following environment variables that you can set during -initialization by passing `-e VAR=VALUE` to the Docker run command. - -| Variable name | Description | -| :--------------------- | ----------------------------------------- | -| `MYSQL_USER` | User name for MySQL account to be created | -| `MYSQL_PASSWORD` | Password for the user account | -| `MYSQL_DATABASE` | Database name | -| `MYSQL_ROOT_PASSWORD` | Password for the root user (optional) | +Description +----------- -The following environment variables influence the MySQL configuration file. They are all optional. +This container image provides a containerized packaging of the MariaDB mysqld daemon +and client application. The mysqld server daemon accepts connections from clients +and provides access to content from MySQL databases on behalf of the clients. +You can find more information on the MariaDB project from the project Web site +(https://mariadb.org/). -| Variable name | Description | Default -| :------------------------------ | ----------------------------------------------------------------- | ------------------------------- -| `MYSQL_LOWER_CASE_TABLE_NAMES` | Sets how the table names are stored and compared | 0 -| `MYSQL_MAX_CONNECTIONS` | The maximum permitted number of simultaneous client connections | 151 -| `MYSQL_MAX_ALLOWED_PACKET` | The maximum size of one packet or any generated/intermediate string | 200M -| `MYSQL_FT_MIN_WORD_LEN` | The minimum length of the word to be included in a FULLTEXT index | 4 -| `MYSQL_FT_MAX_WORD_LEN` | The maximum length of the word to be included in a FULLTEXT index | 20 -| `MYSQL_AIO` | Controls the `innodb_use_native_aio` setting value in case the native AIO is broken. See http://help.directadmin.com/item.php?id=529 | 1 -| `MYSQL_TABLE_OPEN_CACHE` | The number of open tables for all threads | 400 -| `MYSQL_KEY_BUFFER_SIZE` | The size of the buffer used for index blocks | 32M (or 10% of available memory) -| `MYSQL_SORT_BUFFER_SIZE` | The size of the buffer used for sorting | 256K -| `MYSQL_READ_BUFFER_SIZE` | The size of the buffer used for a sequential scan | 8M (or 5% of available memory) -| `MYSQL_INNODB_BUFFER_POOL_SIZE`| The size of the buffer pool where InnoDB caches table and index data | 32M (or 50% of available memory) -| `MYSQL_INNODB_LOG_FILE_SIZE` | The size of each log file in a log group | 8M (or 15% of available available) -| `MYSQL_INNODB_LOG_BUFFER_SIZE` | The size of the buffer that InnoDB uses to write to the log files on disk | 8M (or 15% of available memory) -| `MYSQL_DEFAULTS_FILE` | Point to an alternative configuration file | /etc/my.cnf -| `MYSQL_BINLOG_FORMAT` | Set sets the binlog format, supported values are `row` and `statement` | statement - -You can also set the following mount points by passing the `-v /host:/container` flag to Docker. - -| Volume mount point | Description | -| :----------------------- | -------------------- | -| `/var/lib/mysql/data` | MySQL data directory | - -**Notice: When mouting a directory from the host into the container, ensure that the mounted -directory has the appropriate permissions and that the owner and group of the directory -matches the user UID or name which is running inside the container.** Usage ---------------------------------- +----- -For this, we will assume that you are using the `rhscl/mariadb-100-rhel7` image. +For this, we will assume that you are using the MariaDB 10.2 container image from the +Red Hat Container Catalog called `rhscl/mariadb-102-rhel7`. If you want to set only the mandatory environment variables and not store the database in a host directory, execute the following command: ``` -$ docker run -d --name mariadb_database -e MYSQL_USER=user -e MYSQL_PASSWORD=pass -e MYSQL_DATABASE=db -p 3306:3306 rhscl/mariadb-100-rhel7 +$ docker run -d --name mariadb_database -e MYSQL_USER=user -e MYSQL_PASSWORD=pass -e MYSQL_DATABASE=db -p 3306:3306 rhscl/mariadb-102-rhel7 ``` This will create a container named `mariadb_database` running MySQL with database @@ -74,6 +44,87 @@ or if it was already present, `mysqld` is executed and will run as PID 1. You ca stop the detached container by running `docker stop mariadb_database`. +Environment variables and volumes +--------------------------------- + +The image recognizes the following environment variables that you can set during +initialization by passing `-e VAR=VALUE` to the Docker run command. + +**`MYSQL_USER`** + User name for MySQL account to be created + +**`MYSQL_PASSWORD`** + Password for the user account + +**`MYSQL_DATABASE`** + Database name + +**`MYSQL_ROOT_PASSWORD`** + Password for the root user (optional) + + +The following environment variables influence the MySQL configuration file. They are all optional. + +**`MYSQL_LOWER_CASE_TABLE_NAMES (default: 0)`** + Sets how the table names are stored and compared + +**`MYSQL_MAX_CONNECTIONS (default: 151)`** + The maximum permitted number of simultaneous client connections + +**`MYSQL_MAX_ALLOWED_PACKET (default: 200M)`** + The maximum size of one packet or any generated/intermediate string + +**`MYSQL_FT_MIN_WORD_LEN (default: 4)`** + The minimum length of the word to be included in a FULLTEXT index + +**`MYSQL_FT_MAX_WORD_LEN (default: 20)`** + The maximum length of the word to be included in a FULLTEXT index + +**`MYSQL_AIO (default: 1)`** + Controls the `innodb_use_native_aio` setting value in case the native AIO is broken. See http://help.directadmin.com/item.php?id=529 + +**`MYSQL_TABLE_OPEN_CACHE (default: 400)`** + The number of open tables for all threads + +**`MYSQL_KEY_BUFFER_SIZE (default: 32M or 10% of available memory)`** + The size of the buffer used for index blocks + +**`MYSQL_SORT_BUFFER_SIZE (default: 256K)`** + The size of the buffer used for sorting + +**`MYSQL_READ_BUFFER_SIZE (default: 8M or 5% of available memory)`** + The size of the buffer used for a sequential scan + +**`MYSQL_INNODB_BUFFER_POOL_SIZE (default: 32M or 50% of available memory)`** + The size of the buffer pool where InnoDB caches table and index data + +**`MYSQL_INNODB_LOG_FILE_SIZE (default: 8M or 15% of available available)`** + The size of each log file in a log group + +**`MYSQL_INNODB_LOG_BUFFER_SIZE (default: 8M or 15% of available memory)`** + The size of the buffer that InnoDB uses to write to the log files on disk + +**`MYSQL_DEFAULTS_FILE (default: /etc/my.cnf)`** + Point to an alternative configuration file + +**`MYSQL_BINLOG_FORMAT (default: statement)`** + Set sets the binlog format, supported values are `row` and `statement` + +**`MYSQL_LOG_QUERIES_ENABLED (default: 0)`** + To enable query logging set this to `1` + + +You can also set the following mount points by passing the `-v /host:/container` flag to Docker. + +**`/var/lib/mysql/data`** + MySQL data directory + + +**Notice: When mouting a directory from the host into the container, ensure that the mounted +directory has the appropriate permissions and that the owner and group of the directory +matches the user UID or name which is running inside the container.** + + MariaDB auto-tuning ------------------- @@ -81,13 +132,21 @@ When the MySQL image is run with the `--memory` parameter set and you didn't specify value for some parameters, their values will be automatically calculated based on the available memory. -| Variable name | Configuration parameter | Relative value -| :-------------------------------| ------------------------- | -------------- -| `MYSQL_KEY_BUFFER_SIZE` | `key_buffer_size` | 10% -| `MYSQL_READ_BUFFER_SIZE` | `read_buffer_size` | 5% -| `MYSQL_INNODB_BUFFER_POOL_SIZE` | `innodb_buffer_pool_size` | 50% -| `MYSQL_INNODB_LOG_FILE_SIZE` | `innodb_log_file_size` | 15% -| `MYSQL_INNODB_LOG_BUFFER_SIZE` | `innodb_log_buffer_size` | 15% +**`MYSQL_KEY_BUFFER_SIZE (default: 10%)`** + `key_buffer_size` + +**`MYSQL_READ_BUFFER_SIZE (default: 5%)`** + `read_buffer_size` + +**`MYSQL_INNODB_BUFFER_POOL_SIZE (default: 50%)`** + `innodb_buffer_pool_size` + +**`MYSQL_INNODB_LOG_FILE_SIZE (default: 15%)`** + `innodb_log_file_size` + +**`MYSQL_INNODB_LOG_BUFFER_SIZE (default: 15%)`** + `innodb_log_buffer_size` + MySQL root user @@ -115,6 +174,7 @@ values stored in the variables and the actual passwords. Whenever a database container starts it will reset the passwords to the values stored in the environment variables. + Default my.cnf file ------------------- With environment variables we are able to customize a lot of different parameters @@ -124,6 +184,82 @@ variable with the full path of the file you wish to use. For example, the defaul location is `/etc/my.cnf` but you can change it to `/etc/mysql/my.cnf` by setting `MYSQL_DEFAULTS_FILE=/etc/mysql/my.cnf` + +Extending image +--------------- +This image can be extended using [source-to-image](https://github.com/openshift/source-to-image). + +For example, to build a customized MariaDB database image `my-mariadb-rhel7` +with a configuration in `~/image-configuration/` run: + +``` +$ s2i build ~/image-configuration/ rhscl/mariadb-102-rhel7 my-mariadb-rhel7 +``` + +The directory passed to `s2i build` can contain these directories: + +`mysql-cfg/` + When starting the container, files from this directory will be used as + a configuration for the `mysqld` daemon. + `envsubst` command is run on this file to still allow customization of + the image using environmental variables + +`mysql-pre-init/` + Shell scripts (`*.sh`) available in this directory are sourced before + `mysqld` daemon is started. + +`mysql-init/` + Shell scripts (`*.sh`) available in this directory are sourced when + `mysqld` daemon is started locally. In this phase, use `${mysql_flags}` + to connect to the locally running daemon, for example `mysql $mysql_flags < dump.sql` + +Variables that can be used in the scripts provided to s2i: + +`$mysql_flags` + arguments for the `mysql` tool that will connect to the locally running `mysqld` during initialization + +`$MYSQL_RUNNING_AS_MASTER` + variable defined when the container is run with `run-mysqld-master` command + +`$MYSQL_RUNNING_AS_SLAVE` + variable defined when the container is run with `run-mysqld-slave` command + +`$MYSQL_DATADIR_FIRST_INIT` + variable defined when the container was initialized from the empty data dir + +During `s2i build` all provided files are copied into `/opt/app-root/src` +directory into the resulting image. If some configuration files are present +in the destination directory, files with the same name are overwritten. +Also only one file with the same name can be used for customization and user +provided files are preferred over default files in +`/usr/share/container-scripts/mysql/`- so it is possible to overwrite them. + +Same configuration directory structure can be used to customize the image +every time the image is started using `docker run`. The directory has to be +mounted into `/opt/app-root/src/` in the image +(`-v ./image-configuration/:/opt/app-root/src/`). +This overwrites customization built into the image. + + +Securing the connection with SSL +-------------------------------- +In order to secure the connection with SSL, use the extending feature described +above. In particular, put the SSL certificates into a separate directory: + + sslapp/mysql-certs/server-cert-selfsigned.pem + sslapp/mysql-certs/server-key.pem + +And then put a separate configuration file into mysql-cfg: + + $> cat sslapp/mysql-cfg/ssl.cnf + [mysqld] + ssl-key=${APP_DATA}/mysql-certs/server-key.pem + ssl-cert=${APP_DATA}/mysql-certs/server-cert-selfsigned.pem + +Such a directory `sslapp` can then be mounted into the container with -v, +or a new container image can be built using s2i. + + Changing the replication binlog_format -------------------------------------- Some applications may wish to use `row` binlog_formats (for example, those built @@ -133,3 +269,18 @@ Some applications may wish to use `row` binlog_formats (for example, those built with `master` replication turned on (ie, set the Docker/container `cmd` to be `run-mysqld-master`) the binlog will emit the actual data for the rows that change as opposed to the statements (ie, DML like insert...) that caused the change. + + +Troubleshooting +--------------- +The mysqld deamon in the container logs to the standard output, so the log is available in the container log. The log can be examined by running: + + docker logs + + +See also +-------- +Dockerfile and other sources for this container image are available on +https://github.com/sclorg/mariadb-container. +In that repository, Dockerfile for CentOS is called Dockerfile, Dockerfile +for RHEL is called Dockerfile.rhel7. diff --git a/root/usr/share/container-scripts/mysql/common.sh b/root/usr/share/container-scripts/mysql/common.sh deleted file mode 100644 index e63a750..0000000 --- a/root/usr/share/container-scripts/mysql/common.sh +++ /dev/null @@ -1,164 +0,0 @@ -#!/bin/bash - -source ${CONTAINER_SCRIPTS_PATH}/helpers.sh - -# Data directory where MySQL database files live. The data subdirectory is here -# because .bashrc and my.cnf both live in /var/lib/mysql/ and we don't want a -# volume to override it. -export MYSQL_DATADIR=/var/lib/mysql/data - -# Configuration settings. -export MYSQL_DEFAULTS_FILE=${MYSQL_DEFAULTS_FILE:-/etc/my.cnf} -export MYSQL_BINLOG_FORMAT=${MYSQL_BINLOG_FORMAT:-STATEMENT} -export MYSQL_LOWER_CASE_TABLE_NAMES=${MYSQL_LOWER_CASE_TABLE_NAMES:-0} -export MYSQL_MAX_CONNECTIONS=${MYSQL_MAX_CONNECTIONS:-151} -export MYSQL_FT_MIN_WORD_LEN=${MYSQL_FT_MIN_WORD_LEN:-4} -export MYSQL_FT_MAX_WORD_LEN=${MYSQL_FT_MAX_WORD_LEN:-20} -export MYSQL_AIO=${MYSQL_AIO:-1} -export MYSQL_MAX_ALLOWED_PACKET=${MYSQL_MAX_ALLOWED_PACKET:-200M} -export MYSQL_TABLE_OPEN_CACHE=${MYSQL_TABLE_OPEN_CACHE:-400} -export MYSQL_SORT_BUFFER_SIZE=${MYSQL_SORT_BUFFER_SIZE:-256K} - -if [ -n "${NO_MEMORY_LIMIT:-}" -o -z "${MEMORY_LIMIT_IN_BYTES:-}" ]; then - key_buffer_size='32M' - read_buffer_size='8M' - innodb_buffer_pool_size='32M' - innodb_log_file_size='8M' - innodb_log_buffer_size='8M' -else - key_buffer_size="$(python -c "print(int((${MEMORY_LIMIT_IN_BYTES}/(1024*1024))*0.1))")M" - read_buffer_size="$(python -c "print(int((${MEMORY_LIMIT_IN_BYTES}/(1024*1024))*0.05))")M" - innodb_buffer_pool_size="$(python -c "print(int((${MEMORY_LIMIT_IN_BYTES}/(1024*1024))*0.5))")M" - innodb_log_file_size="$(python -c "print(int((${MEMORY_LIMIT_IN_BYTES}/(1024*1024))*0.15))")M" - innodb_log_buffer_size="$(python -c "print(int((${MEMORY_LIMIT_IN_BYTES}/(1024*1024))*0.15))")M" -fi -export MYSQL_KEY_BUFFER_SIZE=${MYSQL_KEY_BUFFER_SIZE:-$key_buffer_size} -export MYSQL_READ_BUFFER_SIZE=${MYSQL_READ_BUFFER_SIZE:-$read_buffer_size} -export MYSQL_INNODB_BUFFER_POOL_SIZE=${MYSQL_INNODB_BUFFER_POOL_SIZE:-$innodb_buffer_pool_size} -export MYSQL_INNODB_LOG_FILE_SIZE=${MYSQL_INNODB_LOG_FILE_SIZE:-$innodb_log_file_size} -export MYSQL_INNODB_LOG_BUFFER_SIZE=${MYSQL_INNODB_LOG_BUFFER_SIZE:-$innodb_log_buffer_size} - -# Be paranoid and stricter than we should be. -# https://dev.mysql.com/doc/refman/en/identifiers.html -mysql_identifier_regex='^[a-zA-Z0-9_]+$' -mysql_password_regex='^[a-zA-Z0-9_~!@#$%^&*()-=<>,.?;:|]+$' - -# Variables that are used to connect to local mysql during initialization -mysql_flags="-u root --socket=/tmp/mysql.sock" -admin_flags="--defaults-file=$MYSQL_DEFAULTS_FILE $mysql_flags" - -# Make sure env variables don't propagate to mysqld process. -function unset_env_vars() { - log_info 'Cleaning up environment variables MYSQL_USER, MYSQL_PASSWORD, MYSQL_DATABASE and MYSQL_ROOT_PASSWORD ...' - unset MYSQL_USER MYSQL_PASSWORD MYSQL_DATABASE MYSQL_ROOT_PASSWORD -} - -# Poll until MySQL responds to our ping. -function wait_for_mysql() { - pid=$1 ; shift - - while [ true ]; do - if [ -d "/proc/$pid" ]; then - mysqladmin --socket=/tmp/mysql.sock ping &>/dev/null && log_info "MySQL started successfully" && return 0 - else - return 1 - fi - log_info "Waiting for MySQL to start ..." - sleep 1 - done -} - -# Start local MySQL server with a defaults file -function start_local_mysql() { - log_info 'Starting MySQL server with disabled networking ...' - ${MYSQL_PREFIX}/libexec/mysqld \ - --defaults-file=$MYSQL_DEFAULTS_FILE \ - --skip-networking --socket=/tmp/mysql.sock "$@" & - mysql_pid=$! - wait_for_mysql $mysql_pid -} - -# Shutdown mysql flushing privileges -function shutdown_local_mysql() { - log_info 'Shutting down MySQL ...' - mysqladmin $admin_flags flush-privileges shutdown -} - -# Initialize the MySQL database (create user accounts and the initial database) -function initialize_database() { - log_info 'Initializing database ...' - log_info 'Running mysql_install_db ...' - # Using --rpm since we need mysql_install_db behaves as in RPM - # Using empty --basedir to work-around https://bugzilla.redhat.com/show_bug.cgi?id=1406391 - mysql_install_db --rpm --datadir=$MYSQL_DATADIR --basedir='' - start_local_mysql "$@" - - if [ -v MYSQL_RUNNING_AS_SLAVE ]; then - log_info 'Initialization finished' - return 0 - fi - - if [ -v MYSQL_RUNNING_AS_MASTER ]; then - # Save master status into a separate database. - STATUS_INFO=$(mysql $admin_flags -e 'SHOW MASTER STATUS\G') - BINLOG_POSITION=$(echo "$STATUS_INFO" | grep 'Position:' | head -n 1 | sed -e 's/^\s*Position: //') - BINLOG_FILE=$(echo "$STATUS_INFO" | grep 'File:' | head -n 1 | sed -e 's/^\s*File: //') - GTID_INFO=$(mysql $admin_flags -e "SELECT BINLOG_GTID_POS('$BINLOG_FILE', '$BINLOG_POSITION') AS gtid_value \G") - GTID_VALUE=$(echo "$GTID_INFO" | grep 'gtid_value:' | head -n 1 | sed -e 's/^\s*gtid_value: //') - - mysqladmin $admin_flags create replication - mysql $admin_flags </dev/null && log_info "MySQL master is ready" && return 0 - sleep 1 - done -} diff --git a/root/usr/share/container-scripts/mysql/helpers.sh b/root/usr/share/container-scripts/mysql/helpers.sh deleted file mode 100644 index 4e832fc..0000000 --- a/root/usr/share/container-scripts/mysql/helpers.sh +++ /dev/null @@ -1,24 +0,0 @@ -function log_info { - echo "---> `date +%T` $@" -} - -function log_and_run { - log_info "Running $@" - "$@" -} - -function log_volume_info { - CONTAINER_DEBUG=${CONTAINER_DEBUG:-} - if [[ "${CONTAINER_DEBUG,,}" != "true" ]]; then - return - fi - - log_info "Volume info for $@:" - set +e - log_and_run mount - while [ $# -gt 0 ]; do - log_and_run ls -alZ $1 - shift - done - set -e -} diff --git a/root/usr/share/container-scripts/mysql/my-base.cnf.template b/root/usr/share/container-scripts/mysql/my-base.cnf.template deleted file mode 100644 index c654f7f..0000000 --- a/root/usr/share/container-scripts/mysql/my-base.cnf.template +++ /dev/null @@ -1,5 +0,0 @@ -[mysqld] -datadir = ${MYSQL_DATADIR} -basedir = ${MYSQL_PREFIX} -plugin-dir = ${MYSQL_PREFIX}/lib64/mysql/plugin - diff --git a/root/usr/share/container-scripts/mysql/my-master.cnf.template b/root/usr/share/container-scripts/mysql/my-master.cnf.template deleted file mode 100644 index f434885..0000000 --- a/root/usr/share/container-scripts/mysql/my-master.cnf.template +++ /dev/null @@ -1,7 +0,0 @@ -[mysqld] - -server-id = ${MYSQL_SERVER_ID} -log_bin = ${MYSQL_DATADIR}/mysql-bin.log -binlog_do_db = mysql -binlog_do_db = ${MYSQL_DATABASE} -binlog_format = ${MYSQL_BINLOG_FORMAT} diff --git a/root/usr/share/container-scripts/mysql/my-paas.cnf.template b/root/usr/share/container-scripts/mysql/my-paas.cnf.template deleted file mode 100644 index 11ddd1f..0000000 --- a/root/usr/share/container-scripts/mysql/my-paas.cnf.template +++ /dev/null @@ -1,26 +0,0 @@ -[mysqld] -# -# Settings configured by the user -# - -# Sets how the table names are stored and compared. Default: 0 -lower_case_table_names = ${MYSQL_LOWER_CASE_TABLE_NAMES} - -# The maximum permitted number of simultaneous client connections. Default: 151 -max_connections = ${MYSQL_MAX_CONNECTIONS} - -# The minimum/maximum lengths of the word to be included in a FULLTEXT index. Default: 4/20 -ft_min_word_len = ${MYSQL_FT_MIN_WORD_LEN} -ft_max_word_len = ${MYSQL_FT_MAX_WORD_LEN} - -# In case the native AIO is broken. Default: 1 -# See http://help.directadmin.com/item.php?id=529 -innodb_use_native_aio = ${MYSQL_AIO} - -[myisamchk] -# The minimum/maximum lengths of the word to be included in a FULLTEXT index. Default: 4/20 -# -# To ensure that myisamchk and the server use the same values for full-text -# parameters, we placed them in both sections. -ft_min_word_len = ${MYSQL_FT_MIN_WORD_LEN} -ft_max_word_len = ${MYSQL_FT_MAX_WORD_LEN} diff --git a/root/usr/share/container-scripts/mysql/my-repl-gtid.cnf.template b/root/usr/share/container-scripts/mysql/my-repl-gtid.cnf.template deleted file mode 100644 index a74a74c..0000000 --- a/root/usr/share/container-scripts/mysql/my-repl-gtid.cnf.template +++ /dev/null @@ -1,4 +0,0 @@ -[mysqld] - -log-slave-updates = ON - diff --git a/root/usr/share/container-scripts/mysql/my-slave.cnf.template b/root/usr/share/container-scripts/mysql/my-slave.cnf.template deleted file mode 100644 index 5bdf109..0000000 --- a/root/usr/share/container-scripts/mysql/my-slave.cnf.template +++ /dev/null @@ -1,7 +0,0 @@ -[mysqld] - -server-id = ${MYSQL_SERVER_ID} -log_bin = ${MYSQL_DATADIR}/mysql-bin.log -relay-log = ${MYSQL_DATADIR}/mysql-relay-bin.log -binlog_do_db = mysql -binlog_do_db = ${MYSQL_DATABASE} diff --git a/root/usr/share/container-scripts/mysql/my-tuning.cnf.template b/root/usr/share/container-scripts/mysql/my-tuning.cnf.template deleted file mode 100644 index e90b69a..0000000 --- a/root/usr/share/container-scripts/mysql/my-tuning.cnf.template +++ /dev/null @@ -1,28 +0,0 @@ -[mysqld] -key_buffer_size = ${MYSQL_KEY_BUFFER_SIZE} -max_allowed_packet = ${MYSQL_MAX_ALLOWED_PACKET} -table_open_cache = ${MYSQL_TABLE_OPEN_CACHE} -sort_buffer_size = ${MYSQL_SORT_BUFFER_SIZE} -read_buffer_size = ${MYSQL_READ_BUFFER_SIZE} -read_rnd_buffer_size = 256K -net_buffer_length = 2K -thread_stack = 256K -myisam_sort_buffer_size = 2M - -# It is recommended that innodb_buffer_pool_size is configured to 50 to 75 percent of system memory. -innodb_buffer_pool_size = ${MYSQL_INNODB_BUFFER_POOL_SIZE} -innodb_additional_mem_pool_size = 2M -# Set .._log_file_size to 25 % of buffer pool size -innodb_log_file_size = ${MYSQL_INNODB_LOG_FILE_SIZE} -innodb_log_buffer_size = ${MYSQL_INNODB_LOG_BUFFER_SIZE} - -[mysqldump] -quick -max_allowed_packet = 16M - -[mysql] -no-auto-rehash - -[myisamchk] -key_buffer_size = 8M -sort_buffer_size = 8M diff --git a/root/usr/share/container-scripts/mysql/passwd-change.sh b/root/usr/share/container-scripts/mysql/passwd-change.sh deleted file mode 100644 index ce06f6a..0000000 --- a/root/usr/share/container-scripts/mysql/passwd-change.sh +++ /dev/null @@ -1,23 +0,0 @@ -# Set the password for MySQL user and root everytime this container is started. -# This allows to change the password by editing the deployment configuration. -if [[ -v MYSQL_USER && -v MYSQL_PASSWORD ]]; then - mysql $mysql_flags < Installing application source ..." +mv /tmp/src/* ./ 2>/dev/null || true + +# Fix source directory permissions +/usr/libexec/fix-permissions ./ + diff --git a/s2i-common/bin/run b/s2i-common/bin/run new file mode 120000 index 0000000..4b21ab5 --- /dev/null +++ b/s2i-common/bin/run @@ -0,0 +1 @@ +/bin/run-mysqld \ No newline at end of file diff --git a/s2i-common/bin/usage b/s2i-common/bin/usage new file mode 100755 index 0000000..d6a3b9a --- /dev/null +++ b/s2i-common/bin/usage @@ -0,0 +1,8 @@ +#!/bin/sh + +set -o errexit +set -o nounset +set -o pipefail + +groff -t -man -ETascii /help.1 + diff --git a/sources b/sources deleted file mode 100644 index e69de29..0000000 --- a/sources +++ /dev/null diff --git a/test/mariadb-ephemeral-template.json b/test/mariadb-ephemeral-template.json new file mode 100644 index 0000000..c236fd8 --- /dev/null +++ b/test/mariadb-ephemeral-template.json @@ -0,0 +1,254 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "name": "mariadb-ephemeral", + "annotations": { + "openshift.io/display-name": "MariaDB (Ephemeral)", + "description": "MariaDB database service, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/blob/master/10.2/root/usr/share/container-scripts/mysql/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing", + "iconClass": "icon-mariadb", + "tags": "database,mariadb", + "openshift.io/long-description": "This template provides a standalone MariaDB server with a database created. The database is not stored on persistent storage, so any restart of the service will result in all data being lost. The database name, username, and password are chosen via parameters when provisioning this service.", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "openshift.io/documentation-url": "https://github.com/sclorg/mariadb-container/blob/master/10.2/root/usr/share/container-scripts/mysql/README.md", + "openshift.io/support-url": "https://access.redhat.com" + } + }, + "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MYSQL_USER}\n Password: ${MYSQL_PASSWORD}\n Database Name: ${MYSQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:3306/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/blob/master/10.2/root/usr/share/container-scripts/mysql/README.md.", + "labels": { + "template": "mariadb-ephemeral-template" + }, + "objects": [ + { + "kind": "Secret", + "apiVersion": "v1", + "metadata": { + "name": "${DATABASE_SERVICE_NAME}", + "annotations": { + "template.openshift.io/expose-username": "{.data['database-user']}", + "template.openshift.io/expose-password": "{.data['database-password']}", + "template.openshift.io/expose-root_password": "{.data['database-root-password']}", + "template.openshift.io/expose-database_name": "{.data['database-name']}" + } + }, + "stringData" : { + "database-user" : "${MYSQL_USER}", + "database-password" : "${MYSQL_PASSWORD}", + "database-root-password" : "${MYSQL_ROOT_PASSWORD}", + "database-name" : "${MYSQL_DATABASE}" + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "metadata": { + "name": "${DATABASE_SERVICE_NAME}", + "annotations": { + "template.openshift.io/expose-uri": "mysql://{.spec.clusterIP}:{.spec.ports[?(.name==\"mariadb\")].port}" + } + }, + "spec": { + "ports": [ + { + "name": "mariadb", + "port": 3306 + } + ], + "selector": { + "name": "${DATABASE_SERVICE_NAME}" + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${DATABASE_SERVICE_NAME}", + "annotations": { + "template.alpha.openshift.io/wait-for-ready": "true" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "mariadb" + ], + "from": { + "kind": "ImageStreamTag", + "name": "mariadb:${MARIADB_VERSION}", + "namespace": "${NAMESPACE}" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "name": "${DATABASE_SERVICE_NAME}" + }, + "template": { + "metadata": { + "labels": { + "name": "${DATABASE_SERVICE_NAME}" + } + }, + "spec": { + "containers": [ + { + "name": "mariadb", + "image": " ", + "ports": [ + { + "containerPort": 3306 + } + ], + "readinessProbe": { + "timeoutSeconds": 1, + "initialDelaySeconds": 5, + "exec": { + "command": [ "/bin/sh", "-i", "-c", + "MYSQL_PWD=\"$MYSQL_PASSWORD\" mysql -h 127.0.0.1 -u $MYSQL_USER -D $MYSQL_DATABASE -e 'SELECT 1'"] + } + }, + "livenessProbe": { + "timeoutSeconds": 1, + "initialDelaySeconds": 30, + "tcpSocket": { + "port": 3306 + } + }, + "env": [ + { + "name": "MYSQL_USER", + "valueFrom": { + "secretKeyRef" : { + "name" : "${DATABASE_SERVICE_NAME}", + "key" : "database-user" + } + } + }, + { + "name": "MYSQL_PASSWORD", + "valueFrom": { + "secretKeyRef" : { + "name" : "${DATABASE_SERVICE_NAME}", + "key" : "database-password" + } + } + }, + { + "name": "MYSQL_ROOT_PASSWORD", + "valueFrom": { + "secretKeyRef" : { + "name" : "${DATABASE_SERVICE_NAME}", + "key" : "database-root-password" + } + } + }, + { + "name": "MYSQL_DATABASE", + "valueFrom": { + "secretKeyRef" : { + "name" : "${DATABASE_SERVICE_NAME}", + "key" : "database-name" + } + } + } + ], + "resources": { + "limits": { + "memory": "${MEMORY_LIMIT}" + } + }, + "volumeMounts": [ + { + "name": "${DATABASE_SERVICE_NAME}-data", + "mountPath": "/var/lib/mysql/data" + } + ], + "imagePullPolicy": "IfNotPresent" + } + ], + "volumes": [ + { + "name": "${DATABASE_SERVICE_NAME}-data", + "emptyDir": { + "medium": "" + } + } + ] + } + } + } + } + ], + "parameters": [ + { + "name": "MEMORY_LIMIT", + "displayName": "Memory Limit", + "description": "Maximum amount of memory the container can use.", + "value": "512Mi", + "required": true + }, + { + "name": "NAMESPACE", + "displayName": "Namespace", + "description": "The OpenShift Namespace where the ImageStream resides.", + "value": "openshift" + }, + { + "name": "DATABASE_SERVICE_NAME", + "displayName": "Database Service Name", + "description": "The name of the OpenShift Service exposed for the database.", + "value": "mariadb", + "required": true + }, + { + "name": "MYSQL_USER", + "displayName": "MariaDB Connection Username", + "description": "Username for MariaDB user that will be used for accessing the database.", + "generate": "expression", + "from": "user[A-Z0-9]{3}", + "required": true + }, + { + "name": "MYSQL_PASSWORD", + "displayName": "MariaDB Connection Password", + "description": "Password for the MariaDB connection user.", + "generate": "expression", + "from": "[a-zA-Z0-9]{16}", + "required": true + }, + { + "name": "MYSQL_ROOT_PASSWORD", + "displayName": "MariaDB root Password", + "description": "Password for the MariaDB root user.", + "generate": "expression", + "from": "[a-zA-Z0-9]{16}", + "required": true + }, + { + "name": "MYSQL_DATABASE", + "displayName": "MariaDB Database Name", + "description": "Name of the MariaDB database accessed.", + "value": "sampledb", + "required": true + }, + { + "name": "MARIADB_VERSION", + "displayName": "Version of MariaDB Image", + "description": "Version of MariaDB image to be used (10.1, 10.2 or latest).", + "value": "10.2", + "required": true + } + ] +} diff --git a/test/run b/test/run index df15715..f42f9d6 100755 --- a/test/run +++ b/test/run @@ -10,43 +10,41 @@ set -o errexit set -o nounset shopt -s nullglob -IMAGE_NAME=${IMAGE_NAME-centos/mariadb-101-centos7-candidate} +THISDIR=$(dirname ${BASH_SOURCE[0]}) +source ${THISDIR}/test-lib.sh -CIDFILE_DIR=$(mktemp --suffix=mysql_test_cidfiles -d) +TEST_LIST="\ +run_container_creation_tests +run_configuration_tests +run_general_tests +run_change_password_test +run_replication_test +run_doc_test +run_s2i_test +run_ssl_test +" -function cleanup() { - local cidfile - for cidfile in $CIDFILE_DIR/* ; do - local CONTAINER - CONTAINER=$(cat $cidfile) - - echo "Stopping and removing container $CONTAINER..." - docker stop $CONTAINER >/dev/null - local exit_status - exit_status=$(docker inspect -f '{{.State.ExitCode}}' $CONTAINER) - if [ "$exit_status" != "0" ]; then - echo "Inspecting container $CONTAINER" - docker inspect $CONTAINER - echo "Dumping logs for $CONTAINER" - docker logs $CONTAINER - fi - docker rm -v $CONTAINER >/dev/null - rm $cidfile - echo "Done." - done - rmdir $CIDFILE_DIR -} -trap cleanup EXIT SIGINT +if [ -e "${IMAGE_NAME:-}" ] ; then + echo "Error: IMAGE_NAME must be specified" + exit 1 +fi -function get_cid() { - local id="$1" ; shift || return 1 - echo $(cat "$CIDFILE_DIR/$id") -} +CID_FILE_DIR=$(mktemp --suffix=mysql_test_cidfiles -d) +TESTSUITE_RESULT=1 +test_dir="$(readlink -f $(dirname "${BASH_SOURCE[0]}"))" -function get_container_ip() { - local id="$1" ; shift - docker inspect --format='{{.NetworkSettings.IPAddress}}' $(get_cid "$id") +s2i_args="--pull-policy=never " + +function cleanup() { + ct_cleanup + + if [ $TESTSUITE_RESULT -eq 0 ] ; then + echo "Tests succeeded." + else + echo "Tests failed." + fi } +trap cleanup EXIT SIGINT function mysql_cmd() { local container_ip="$1"; shift @@ -60,7 +58,7 @@ function test_connection() { local login=$1 ; shift local password=$1 ; shift local ip - ip=$(get_container_ip $name) + ip=$(ct_get_cip $name) echo " Testing MySQL connection to $ip..." local max_attempts=20 local sleep_time=2 @@ -74,7 +72,7 @@ function test_connection() { sleep $sleep_time done echo " Giving up: Failed to connect. Logs:" - docker logs $(get_cid $name) + docker logs $(ct_get_cid $name) return 1 } @@ -95,7 +93,7 @@ function test_mysql() { function create_container() { local name=$1 ; shift - cidfile="$CIDFILE_DIR/$name" + cidfile="$CID_FILE_DIR/$name" # create container with a cidfile in a directory for cleanup local container_id container_id="$(docker run ${DOCKER_ARGS:-} --cidfile $cidfile -d "$@" $IMAGE_NAME ${CONTAINER_ARGS:-})" @@ -104,13 +102,13 @@ function create_container() { function run_change_password_test() { local tmpdir=$(mktemp -d) - mkdir "${tmpdir}/data" && chmod -R a+rwx "${tmpdir}" + chmod -R a+rwx "${tmpdir}" # Create MySQL container with persistent volume and set the initial password create_container "testpass1" -e MYSQL_USER=user -e MYSQL_PASSWORD=foo \ -e MYSQL_DATABASE=db -v ${tmpdir}:/var/lib/mysql/data:Z test_connection testpass1 user foo - docker stop $(get_cid testpass1) >/dev/null + docker stop $(ct_get_cid testpass1) >/dev/null # Create second container with changed password create_container "testpass2" -e MYSQL_USER=user -e MYSQL_PASSWORD=bar \ @@ -118,7 +116,7 @@ function run_change_password_test() { test_connection testpass2 user bar # The old password should not work anymore - if mysql_cmd "$(get_container_ip testpass2)" user foo -e 'SELECT 1;'; then + if mysql_cmd "$(ct_get_cip testpass2)" user foo -e 'SELECT 1;'; then return 1 fi } @@ -131,16 +129,16 @@ function run_replication_test() { docker run $cluster_args -e MYSQL_USER=user -e MYSQL_PASSWORD=foo \ -e MYSQL_ROOT_PASSWORD=root \ -e MYSQL_INNODB_BUFFER_POOL_SIZE=5M \ - -d --cidfile ${CIDFILE_DIR}/master.cid $IMAGE_NAME mysqld-master >/dev/null + -d --cidfile ${CID_FILE_DIR}/master.cid $IMAGE_NAME mysqld-master >/dev/null local master_ip - master_ip=$(get_container_ip master.cid) + master_ip=$(ct_get_cip master.cid) # Run the MySQL slave docker run $cluster_args -e MYSQL_MASTER_SERVICE_NAME=${master_ip} \ -e MYSQL_INNODB_BUFFER_POOL_SIZE=5M \ - -d --cidfile ${CIDFILE_DIR}/slave.cid $IMAGE_NAME mysqld-slave >/dev/null + -d --cidfile ${CID_FILE_DIR}/slave.cid $IMAGE_NAME mysqld-slave >/dev/null local slave_ip - slave_ip=$(get_container_ip slave.cid) + slave_ip=$(ct_get_cip slave.cid) # Now wait till the MASTER will see the SLAVE local i @@ -152,8 +150,8 @@ function run_replication_test() { fi if [[ "${i}" == "${max_attempts}" ]]; then echo "The ${slave_ip} failed to register in MASTER" - echo "Dumping logs for $(get_cid slave.cid)" - docker logs $(get_cid slave.cid) + echo "Dumping logs for $(ct_get_cid slave.cid)" + docker logs $(ct_get_cid slave.cid) return 1 fi sleep 1 @@ -173,8 +171,8 @@ function run_replication_test() { fi if [[ "${i}" == "${max_attempts}" ]]; then echo "The ${slave_ip} failed to see value added on MASTER" - echo "Dumping logs for $(get_cid slave.cid)" - docker logs $(get_cid slave.cid) + echo "Dumping logs for $(ct_get_cid slave.cid)" + docker logs $(ct_get_cid slave.cid) return 1 fi sleep 1 @@ -204,7 +202,12 @@ function assert_login_access() { function assert_local_access() { local id="$1" ; shift - docker exec $(get_cid "$id") bash -c 'mysql <<< "SELECT 1;"' + if docker exec $(ct_get_cid "$id") bash -c 'mysql -uroot <<< "SELECT 1;"' ; then + echo " local access granted as expected" + return + fi + echo " local access assertion failed" + return 1 } # Make sure the invocation of docker run fails. @@ -234,9 +237,10 @@ function run_container_creation_tests() { try_image_invalid_combinations -e MYSQL_ROOT_PASSWORD=root_pass local VERY_LONG_DB_NAME="very_long_database_name_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + local VERY_LONG_USER_NAME="very_long_user_name_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" assert_container_creation_fails -e MYSQL_USER=user -e MYSQL_PASSWORD=pass assert_container_creation_fails -e MYSQL_USER=\$invalid -e MYSQL_PASSWORD=pass -e MYSQL_DATABASE=db -e MYSQL_ROOT_PASSWORD=root_pass - assert_container_creation_fails -e MYSQL_USER=very_long_username -e MYSQL_PASSWORD=pass -e MYSQL_DATABASE=db -e MYSQL_ROOT_PASSWORD=root_pass + assert_container_creation_fails -e MYSQL_USER=$VERY_LONG_USER_NAME -e MYSQL_PASSWORD=pass -e MYSQL_DATABASE=db -e MYSQL_ROOT_PASSWORD=root_pass assert_container_creation_fails -e MYSQL_USER=user -e MYSQL_PASSWORD="\"" -e MYSQL_DATABASE=db -e MYSQL_ROOT_PASSWORD=root_pass assert_container_creation_fails -e MYSQL_USER=user -e MYSQL_PASSWORD=pass -e MYSQL_DATABASE=\$invalid -e MYSQL_ROOT_PASSWORD=root_pass assert_container_creation_fails -e MYSQL_USER=user -e MYSQL_PASSWORD=pass -e MYSQL_DATABASE=$VERY_LONG_DB_NAME -e MYSQL_ROOT_PASSWORD=root_pass @@ -252,7 +256,7 @@ function test_config_option() { local option_value="$4" if ! echo "$configuration" | grep -qx "$option_name[[:space:]]*=[[:space:]]*$option_value"; then - local configs="$(docker exec -t "$(get_cid $container_name)" bash -c 'set +f; shopt -s nullglob; echo /etc/my.cnf /etc/my.cnf.d/* /opt/rh/mysql*/root/etc/my.cnf /opt/rh/mysql*/root/etc/my.cnf.d/* | paste -s')" + local configs="$(docker exec -t "$(ct_get_cid $container_name)" bash -c 'set +f; shopt -s nullglob; echo /etc/my.cnf /etc/my.cnf.d/* /opt/rh/mysql*/root/etc/my.cnf /opt/rh/mysql*/root/etc/my.cnf.d/* | paste -s')" echo >&2 "FAIL: option '$option_name' should have value '$option_value', but it wasn't found in any of the configuration files ($configs):" echo >&2 echo >&2 "$configuration" @@ -274,6 +278,7 @@ function run_configuration_tests() { --env MYSQL_PASSWORD=config_test \ --env MYSQL_DATABASE=db \ --env MYSQL_LOWER_CASE_TABLE_NAMES=1 \ + --env MYSQL_LOG_QUERIES_ENABLED=1 \ --env MYSQL_MAX_CONNECTIONS=1337 \ --env MYSQL_FT_MIN_WORD_LEN=8 \ --env MYSQL_FT_MAX_WORD_LEN=15 \ @@ -294,9 +299,10 @@ function run_configuration_tests() { # - we should look for an option in the desired config, not in all of them # - we should respect section of the config (now we have duplicated options from a different sections) local configuration - configuration="$(docker exec -t "$(get_cid $container_name)" bash -c 'set +f; shopt -s nullglob; egrep -hv "^(#|\!|\[|$)" /etc/my.cnf /etc/my.cnf.d/* /opt/rh/mysql*/root/etc/my.cnf /opt/rh/mysql*/root/etc/my.cnf.d/*' | sed 's,\(^[[:space:]]\+\|[[:space:]]\+$\),,' | sort -u)" + configuration="$(docker exec -t "$(ct_get_cid $container_name)" bash -c 'set +f; shopt -s nullglob; egrep -hv "^(#|\!|\[|$)" /etc/my.cnf /etc/my.cnf.d/* /opt/rh/mysql*/root/etc/my.cnf /opt/rh/mysql*/root/etc/my.cnf.d/*' | sed 's,\(^[[:space:]]\+\|[[:space:]]\+$\),,' | sort -u)" test_config_option "$container_name" "$configuration" lower_case_table_names 1 + test_config_option "$container_name" "$configuration" general_log 1 test_config_option "$container_name" "$configuration" max_connections 1337 test_config_option "$container_name" "$configuration" ft_min_word_len 8 test_config_option "$container_name" "$configuration" ft_max_word_len 15 @@ -309,7 +315,7 @@ function run_configuration_tests() { test_config_option "$container_name" "$configuration" innodb_log_file_size 4M test_config_option "$container_name" "$configuration" innodb_log_buffer_size 4M - docker stop "$(get_cid $container_name)" >/dev/null + docker stop "$(ct_get_cid $container_name)" >/dev/null echo " Success!" echo " Testing image auto-calculated configuration settings" @@ -324,7 +330,7 @@ function run_configuration_tests() { test_connection "$container_name" config_test_user config_test - configuration="$(docker exec -t "$(get_cid $container_name)" bash -c 'set +f; shopt -s nullglob; egrep -hv "^(#|\!|\[|$)" /etc/my.cnf /etc/my.cnf.d/* /opt/rh/mysql*/root/etc/my.cnf /opt/rh/mysql*/root/etc/my.cnf.d/*' | sed 's,\(^[[:space:]]\+\|[[:space:]]\+$\),,' | sort -u)" + configuration="$(docker exec -t "$(ct_get_cid $container_name)" bash -c 'set +f; shopt -s nullglob; egrep -hv "^(#|\!|\[|$)" /etc/my.cnf /etc/my.cnf.d/* /opt/rh/mysql*/root/etc/my.cnf /opt/rh/mysql*/root/etc/my.cnf.d/*' | sed 's,\(^[[:space:]]\+\|[[:space:]]\+$\),,' | sort -u)" test_config_option "$container_name" "$configuration" key_buffer_size 25M test_config_option "$container_name" "$configuration" read_buffer_size 12M @@ -332,35 +338,11 @@ function run_configuration_tests() { test_config_option "$container_name" "$configuration" innodb_log_file_size 38M test_config_option "$container_name" "$configuration" innodb_log_buffer_size 38M - docker stop "$(get_cid $container_name)" >/dev/null + docker stop "$(ct_get_cid $container_name)" >/dev/null echo " Success!" } -test_scl_usage() { - local name="$1" - local run_cmd="$2" - local expected="$3" - - echo " Testing the image SCL enable" - local out - out=$(docker run --rm ${IMAGE_NAME} /bin/bash -c "${run_cmd}") - if ! echo "${out}" | grep -q "${expected}"; then - echo "ERROR[/bin/bash -c "${run_cmd}"] Expected '${expected}', got '${out}'" - return 1 - fi - out=$(docker exec $(get_cid $name) /bin/bash -c "${run_cmd}" 2>&1) - if ! echo "${out}" | grep -q "${expected}"; then - echo "ERROR[exec /bin/bash -c "${run_cmd}"] Expected '${expected}', got '${out}'" - return 1 - fi - out=$(docker exec $(get_cid $name) /bin/sh -ic "${run_cmd}" 2>&1) - if ! echo "${out}" | grep -q "${expected}"; then - echo "ERROR[exec /bin/sh -ic "${run_cmd}"] Expected '${expected}', got '${out}'" - return 1 - fi -} - function run_tests() { local name=$1 ; shift envs="-e MYSQL_USER=$USER -e MYSQL_PASSWORD=$PASS -e MYSQL_DATABASE=db" @@ -370,10 +352,10 @@ function run_tests() { create_container $name $envs test_connection "$name" "$USER" "$PASS" echo " Testing scl usage" - test_scl_usage $name 'mysql --version' '10.1' + ct_scl_usage_old $name 'mysql --version' "$VERSION" echo " Testing login accesses" local container_ip - container_ip=$(get_container_ip $name) + container_ip=$(ct_get_cip $name) assert_login_access "$container_ip" "$USER" "$PASS" true assert_login_access "$container_ip" "$USER" "${PASS}_foo" false if [ -v ROOT_PASS ]; then @@ -392,18 +374,16 @@ run_doc_test() { local tmpdir=$(mktemp -d) local f echo " Testing documentation in the container image" - # Extract the help files from the container - for f in /usr/share/container-scripts/mysql/README.md help.1 ; do - docker run --rm ${IMAGE_NAME} /bin/bash -c "cat /${f}" >${tmpdir}/$(basename ${f}) - # Check whether the files include some important information - for term in MYSQL_ROOT_PASSWORD volume 3306 ; do - if ! cat ${tmpdir}/$(basename ${f}) | grep -q -e "${term}" ; then - echo "ERROR: File /${f} does not include '${term}'." - return 1 - fi - done + # Extract the help.1 file from the container + docker run --rm ${IMAGE_NAME} /bin/bash -c "cat /help.1" >${tmpdir}/help.1 + # Check whether the help.1 file includes some important information + for term in "MYSQL\_ROOT\_PASSWORD" volume 3306 ; do + if ! cat ${tmpdir}/help.1 | grep -F -q -e "${term}" ; then + echo "ERROR: File /help.1 does not include '${term}'." + return 1 + fi done - # Check whether the files use the correct format + # Check whether the file uses the correct format if ! file ${tmpdir}/help.1 | grep -q roff ; then echo "ERROR: /help.1 is not in troff or groff format" return 1 @@ -412,26 +392,111 @@ run_doc_test() { echo } -# Tests. +_s2i_test_image() { + local container_name="$1" + local mount_opts="$2" + echo " Testing s2i app image with invalid configuration" + assert_container_creation_fails -e MYSQL_USER=root -e MYSQL_PASSWORD=pass -e MYSQL_DATABASE=db -e MYSQL_ROOT_PASSWORD=pass + echo " Testing s2i app image with correct configuration" + create_container \ + "$container_name" \ + --env MYSQL_USER=config_test_user \ + --env MYSQL_PASSWORD=config_test \ + --env MYSQL_DATABASE=db \ + --env MYSQL_OPERATIONS_USER=operations_user \ + --env MYSQL_OPERATIONS_PASSWORD=operations_pass \ + ${mount_opts} -run_container_creation_tests + test_connection "$container_name" operations_user operations_pass -run_configuration_tests + configuration="$(docker exec -t "$(ct_get_cid $container_name)" bash -c 'set +f; shopt -s nullglob; egrep -hv "^(#|\!|\[|$)" /etc/my.cnf /etc/my.cnf.d/* /opt/rh/mysql*/root/etc/my.cnf /opt/rh/mysql*/root/etc/my.cnf.d/*' | sed 's,\(^[[:space:]]\+\|[[:space:]]\+$\),,' | sort -u)" -# Set lower buffer pool size to avoid running out of memory. -export CONTAINER_ARGS="run-mysqld --innodb_buffer_pool_size=5242880" + docker stop "$(ct_get_cid $container_name)" >/dev/null +} -# Normal tests -USER=user PASS=pass run_tests no_root -USER=user1 PASS=pass1 ROOT_PASS=r00t run_tests root -# Test with arbitrary uid for the container -DOCKER_ARGS="-u 12345" USER=user PASS=pass run_tests no_root_altuid -DOCKER_ARGS="-u 12345" USER=user1 PASS=pass1 ROOT_PASS=r00t run_tests root_altuid +run_s2i_test() { + echo " Testing s2i usage" + s2i usage ${s2i_args} ${IMAGE_NAME} &>/dev/null -# Test the password change -run_change_password_test + echo " Testing s2i build" + s2i build file://${test_dir}/test-app ${IMAGE_NAME} ${IMAGE_NAME}-testapp + local image_name_backup=${IMAGE_NAME} + export IMAGE_NAME=${IMAGE_NAME}-testapp -# Replication tests -run_replication_test + local container_name=s2i_config_build + _s2i_test_image "s2i_config_build" "" + + # return back original value for IMAGE_NAME + export IMAGE_NAME=${image_name_backup} + + echo " Testing s2i mount" + test_app_dir=$(mktemp -d) + cp -Lr ${test_dir}/test-app ${test_app_dir}/ + chown -R 27:27 ${test_app_dir} + _s2i_test_image "_s2i_test_mount" "-v ${test_app_dir}/test-app:/opt/app-root/src/:z" + rm -rf ${test_app_dir} + echo " Success!" +} + +gen_self_signed_cert() { + local output_dir=$1 ; shift + local base_name=$1 ; shift + mkdir -p ${output_dir} + openssl req -newkey rsa:2048 -nodes -keyout ${output_dir}/${base_name}-key.pem -subj '/C=GB/ST=Berkshire/L=Newbury/O=My Server Company' > ${base_name}-req.pem + openssl req -new -x509 -nodes -key ${output_dir}/${base_name}-key.pem -batch > ${output_dir}/${base_name}-cert-selfsigned.pem +} + +run_ssl_test() { + echo " Testing ssl usage" + test_app_dir=$(mktemp -d) + mkdir -p ${test_app_dir}/{mysql-certs,mysql-cfg} + gen_self_signed_cert ${test_app_dir}/mysql-certs server + echo "[mysqld] +ssl-key=\${APP_DATA}/mysql-certs/server-key.pem +ssl-cert=\${APP_DATA}/mysql-certs/server-cert-selfsigned.pem +" >${test_app_dir}/mysql-cfg/ssl.cnf + chown -R 27:27 ${test_app_dir} + + create_container \ + "_s2i_test_ssl" \ + --env MYSQL_USER=ssl_test_user \ + --env MYSQL_PASSWORD=ssl_test \ + --env MYSQL_DATABASE=db \ + -v ${test_app_dir}:/opt/app-root/src/:z + + test_connection "_s2i_test_ssl" ssl_test_user ssl_test + ip=$(ct_get_cip _s2i_test_ssl) + if mysql_cmd "$ip" "ssl_test_user" "ssl_test" --ssl -e 'show status like "Ssl_cipher" \G' | grep 'Value: [A-Z][A-Z0-9-]*' ; then + echo " Success!" + rm -rf ${test_app_dir} + else + echo " FAIL!" + mysql_cmd "$ip" "ssl_test_user" "ssl_test" --ssl -e 'status \G' + return 1 + fi +} + +function run_general_tests() { + # Set lower buffer pool size to avoid running out of memory. + export CONTAINER_ARGS="run-mysqld --innodb_buffer_pool_size=5242880" + + # Normal tests + USER=user PASS=pass run_tests no_root + USER=user1 PASS=pass1 ROOT_PASS=r00t run_tests root + # Test with arbitrary uid for the container + DOCKER_ARGS="-u 12345" USER=user PASS=pass run_tests no_root_altuid + DOCKER_ARGS="-u 12345" USER=user1 PASS=pass1 ROOT_PASS=r00t run_tests root_altuid +} + +function run_all_tests() { + for test_case in $TEST_LIST; do + : "Running test $test_case" + $test_case + done; +} + +# Run the chosen tests +TEST_LIST=${@:-$TEST_LIST} run_all_tests + +TESTSUITE_RESULT=0 -run_doc_test diff --git a/test/run-openshift b/test/run-openshift new file mode 100755 index 0000000..3ba10a7 --- /dev/null +++ b/test/run-openshift @@ -0,0 +1,122 @@ +#!/bin/bash +# +# Test the MariaDB image in OpenShift. +# +# IMAGE_NAME specifies a name of the candidate image used for testing. +# The image has to be available before this script is executed. +# + +THISDIR=$(dirname ${BASH_SOURCE[0]}) + +source ${THISDIR}/test-lib-openshift.sh + +set -exo nounset + +test -n "${IMAGE_NAME-}" || false 'make sure $IMAGE_NAME is defined' +test -n "${VERSION-}" || false 'make sure $VERSION is defined' + +function check_mysql_os_service_connection() { + local util_image_name="${1}" ; shift + local service_name="${1}" ; shift + local user="${1}" ; shift + local pass="${1}" ; shift + local timeout="${1:-60}" ; shift || : + local pod_ip=$(ct_os_get_service_ip ${service_name}) + + : " Service ${service_name} check ..." + + local cmd="echo 'SELECT 42 as testval\g' | mysql --connect-timeout=15 -h ${pod_ip} -u${user} -p${pass}" + local expected_value='^42' + local output + local ret + SECONDS=0 + + echo -n "Waiting for ${service_name} service becoming ready ..." + while true ; do + output=$(docker run --rm ${util_image_name} bash -c "${cmd}" || :) + echo "${output}" | grep -qe "${expected_value}" && ret=0 || ret=1 + if [ ${ret} -eq 0 ] ; then + echo " PASS" + return 0 + fi + echo -n "." + [ ${SECONDS} -gt ${timeout} ] && break + sleep 3 + done + echo " FAIL" + return 1 +} + +function test_mysql_pure_image() { + local image_name=${1:-centos/mariadb-101-centos7} + local image_name_no_namespace=${image_name##*/} + local service_name=${image_name_no_namespace} + + ct_os_new_project + # Create a specific imagestream tag for the image so that oc cannot use anything else + ct_os_upload_image "${image_name}" "$image_name_no_namespace:testing" + + ct_os_deploy_pure_image "$image_name_no_namespace:testing" \ + --name "${service_name}" \ + --env MYSQL_ROOT_PASSWORD=test + + ct_os_wait_pod_ready "${service_name}" 60 + check_mysql_os_service_connection "${image_name}" "${service_name}" root test + + ct_os_delete_project +} + +function test_mysql_template() { + local image_name=${1:-centos/mariadb-101-centos7} + local image_name_no_namespace=${image_name##*/} + local service_name=${image_name_no_namespace} + + ct_os_new_project + ct_os_upload_image "${image_name}" "mariadb:$VERSION" + + ct_os_deploy_template_image ${THISDIR}/mariadb-ephemeral-template.json \ + NAMESPACE="$(oc project -q)" \ + MARIADB_VERSION="$VERSION" \ + DATABASE_SERVICE_NAME="${service_name}" \ + MYSQL_USER=testu \ + MYSQL_PASSWORD=testp \ + MYSQL_DATABASE=testdb + + ct_os_wait_pod_ready "${service_name}" 60 + check_mysql_os_service_connection "${image_name}" "${service_name}" testu testp + + ct_os_delete_project +} + +function test_mysql_s2i() { + local image_name=${1:-centos/mariadb-101-centos7} + local app=${2:-https://github.com/sclorg/mariadb-container.git} + local context_dir=${3:-test/test-app} + local image_name_no_namespace=${image_name##*/} + local service_name="${image_name_no_namespace}-testing" + + ct_os_new_project + # Create a specific imagestream tag for the image so that oc cannot use anything else + ct_os_upload_image "${image_name}" "$image_name_no_namespace:testing" + + ct_os_deploy_s2i_image "$image_name_no_namespace:testing" "${app}" \ + --context-dir="${context_dir}" \ + --name "${service_name}" \ + --env MYSQL_ROOT_PASSWORD=test \ + --env MYSQL_OPERATIONS_USER=testo \ + --env MYSQL_OPERATIONS_PASSWORD=testo \ + --env MYSQL_DATABASE=testopdb \ + --env MYSQL_USER=testnormal \ + --env MYSQL_PASSWORD=testnormal + + ct_os_wait_pod_ready "${service_name}" 60 + check_mysql_os_service_connection "${image_name}" "${service_name}" testo testo 120 + + ct_os_delete_project +} + +ct_os_cluster_up +test_mysql_pure_image ${IMAGE_NAME} +test_mysql_template ${IMAGE_NAME} +# TODO: Can we make the build against examples inside the same PR? +test_mysql_s2i ${IMAGE_NAME} "https://github.com/sclorg/mariadb-container.git" test/test-app diff --git a/test/test-app/mysql-cfg/myconfig.cnf b/test/test-app/mysql-cfg/myconfig.cnf new file mode 100644 index 0000000..7764adf --- /dev/null +++ b/test/test-app/mysql-cfg/myconfig.cnf @@ -0,0 +1,3 @@ +[mysqld] +query-cache-limit=262144 + diff --git a/test/test-app/mysql-data/init.sql b/test/test-app/mysql-data/init.sql new file mode 100644 index 0000000..3159982 --- /dev/null +++ b/test/test-app/mysql-data/init.sql @@ -0,0 +1,4 @@ +CREATE TABLE products (id INTEGER, name VARCHAR(256), price FLOAT, variant INTEGER); +CREATE TABLE products_variant (id INTEGER, name VARCHAR(256)); +INSERT INTO products_variant (id, name) VALUES ('1', 'blue'), ('2', 'green'); + diff --git a/test/test-app/mysql-init/80-add-arbitrary-users.sh b/test/test-app/mysql-init/80-add-arbitrary-users.sh new file mode 100644 index 0000000..55ae2d2 --- /dev/null +++ b/test/test-app/mysql-init/80-add-arbitrary-users.sh @@ -0,0 +1,17 @@ +create_arbitrary_users() { + # Do not care what option is compulsory here, just create what is specified + log_info "Creating user specified by MYSQL_OPERATIONS_USER (${MYSQL_OPERATIONS_USER}) ..." +mysql $mysql_flags <&2 + return 1 + fi + echo "${public_ip}" +} + +# ct_os_run_in_pod POD_NAME CMD +# -------------------- +# Runs [cmd] in the pod specified by prefix [pod_prefix]. +# Arguments: pod_name - full name of the pod +# Arguments: cmd - command to be run in the pod +function ct_os_run_in_pod() { + local pod_name="$1" ; shift + + oc exec "$pod_name" -- "$@" +} + +# ct_os_get_service_ip SERVICE_NAME +# -------------------- +# Returns IP of the service specified by [service_name]. +# Arguments: service_name - name of the service +function ct_os_get_service_ip() { + local service_name="${1}" ; shift + oc get "svc/${service_name}" -o yaml | grep clusterIP | \ + cut -d':' -f2 | grep -oe '172\.30\.[0-9\.]*' +} + + +# ct_os_get_all_pods_status +# -------------------- +# Returns status of all pods. +function ct_os_get_all_pods_status() { + oc get pods -o custom-columns=Ready:status.containerStatuses[0].ready,NAME:.metadata.name +} + +# ct_os_get_all_pods_name +# -------------------- +# Returns the full name of all pods. +function ct_os_get_all_pods_name() { + oc get pods --no-headers -o custom-columns=NAME:.metadata.name +} + +# ct_os_get_pod_status POD_PREFIX +# -------------------- +# Returns status of the pod specified by prefix [pod_prefix]. +# Note: Ignores -build and -deploy pods +# Arguments: pod_prefix - prefix or whole ID of the pod +function ct_os_get_pod_status() { + local pod_prefix="${1}" ; shift + ct_os_get_all_pods_status | grep -e "${pod_prefix}" | grep -Ev "(build|deploy)$" \ + | awk '{print $1}' | head -n 1 +} + +# ct_os_get_pod_name POD_PREFIX +# -------------------- +# Returns the full name of pods specified by prefix [pod_prefix]. +# Note: Ignores -build and -deploy pods +# Arguments: pod_prefix - prefix or whole ID of the pod +function ct_os_get_pod_name() { + local pod_prefix="${1}" ; shift + ct_os_get_all_pods_name | grep -e "^${pod_prefix}" | grep -Ev "(build|deploy)$" +} + +# ct_os_get_pod_ip POD_NAME +# -------------------- +# Returns the ip of the pod specified by [pod_name]. +# Arguments: pod_name - full name of the pod +function ct_os_get_pod_ip() { + local pod_name="${1}" + oc get pod "$pod_name" --no-headers -o custom-columns=IP:status.podIP +} + +# ct_os_check_pod_readiness POD_PREFIX STATUS +# -------------------- +# Checks whether the pod is ready. +# Arguments: pod_prefix - prefix or whole ID of the pod +# Arguments: status - expected status (true, false) +function ct_os_check_pod_readiness() { + local pod_prefix="${1}" ; shift + local status="${1}" ; shift + test "$(ct_os_get_pod_status ${pod_prefix})" == "${status}" +} + +# ct_os_wait_pod_ready POD_PREFIX TIMEOUT +# -------------------- +# Wait maximum [timeout] for the pod becomming ready. +# Arguments: pod_prefix - prefix or whole ID of the pod +# Arguments: timeout - how many seconds to wait seconds +function ct_os_wait_pod_ready() { + local pod_prefix="${1}" ; shift + local timeout="${1}" ; shift + SECONDS=0 + echo -n "Waiting for ${pod_prefix} pod becoming ready ..." + while ! ct_os_check_pod_readiness "${pod_prefix}" "true" ; do + echo -n "." + [ ${SECONDS} -gt ${timeout} ] && echo " FAIL" && return 1 + sleep 3 + done + echo " DONE" +} + +# ct_os_wait_rc_ready POD_PREFIX TIMEOUT +# -------------------- +# Wait maximum [timeout] for the rc having desired number of replicas ready. +# Arguments: pod_prefix - prefix of the replication controller +# Arguments: timeout - how many seconds to wait seconds +function ct_os_wait_rc_ready() { + local pod_prefix="${1}" ; shift + local timeout="${1}" ; shift + SECONDS=0 + echo -n "Waiting for ${pod_prefix} pod becoming ready ..." + while ! test "$((oc get --no-headers statefulsets; oc get --no-headers rc) 2>/dev/null \ + | grep "^${pod_prefix}" | awk '$2==$3 {print "ready"}')" == "ready" ; do + echo -n "." + [ ${SECONDS} -gt ${timeout} ] && echo " FAIL" && return 1 + sleep 3 + done + echo " DONE" +} + +# ct_os_deploy_pure_image IMAGE [ENV_PARAMS, ...] +# -------------------- +# Runs [image] in the openshift and optionally specifies env_params +# as environment variables to the image. +# Arguments: image - prefix or whole ID of the pod to run the cmd in +# Arguments: env_params - environment variables parameters for the images. +function ct_os_deploy_pure_image() { + local image="${1}" ; shift + # ignore error exit code, because oc new-app returns error when image exists + oc new-app ${image} "$@" || : + # let openshift cluster to sync to avoid some race condition errors + sleep 3 +} + +# ct_os_deploy_s2i_image IMAGE APP [ENV_PARAMS, ... ] +# -------------------- +# Runs [image] and [app] in the openshift and optionally specifies env_params +# as environment variables to the image. +# Arguments: image - prefix or whole ID of the pod to run the cmd in +# Arguments: app - url or local path to git repo with the application sources. +# Arguments: env_params - environment variables parameters for the images. +function ct_os_deploy_s2i_image() { + local image="${1}" ; shift + local app="${1}" ; shift + # ignore error exit code, because oc new-app returns error when image exists + oc new-app "${image}~${app}" "$@" || : + + # let openshift cluster to sync to avoid some race condition errors + sleep 3 +} + +# ct_os_deploy_template_image TEMPLATE [ENV_PARAMS, ...] +# -------------------- +# Runs template in the openshift and optionally gives env_params to use +# specific values in the template. +# Arguments: template - prefix or whole ID of the pod to run the cmd in +# Arguments: env_params - environment variables parameters for the template. +# Example usage: ct_os_deploy_template_image mariadb-ephemeral-template.yaml \ +# DATABASE_SERVICE_NAME=mysql-57-centos7 \ +# DATABASE_IMAGE=mysql-57-centos7 \ +# MYSQL_USER=testu \ +# MYSQL_PASSWORD=testp \ +# MYSQL_DATABASE=testdb +function ct_os_deploy_template_image() { + local template="${1}" ; shift + oc process -f "${template}" "$@" | oc create -f - + # let openshift cluster to sync to avoid some race condition errors + sleep 3 +} + +# _ct_os_get_uniq_project_name +# -------------------- +# Returns a uniq name of the OpenShift project. +function _ct_os_get_uniq_project_name() { + local r + while true ; do + r=${RANDOM} + mkdir /var/tmp/os-test-${r} &>/dev/null && echo test-${r} && break + done +} + +# ct_os_new_project [PROJECT] +# -------------------- +# Creates a new project in the openshfit using 'os' command. +# Arguments: project - project name, uses a new random name if omitted +# Expects 'os' command that is properly logged in to the OpenShift cluster. +# Not using mktemp, because we cannot use uppercase characters. +function ct_os_new_project() { + local project_name="${1:-$(_ct_os_get_uniq_project_name)}" ; shift || : + oc new-project ${project_name} + # let openshift cluster to sync to avoid some race condition errors + sleep 3 +} + +# ct_os_delete_project [PROJECT] +# -------------------- +# Deletes the specified project in the openshfit +# Arguments: project - project name, uses the current project if omitted +function ct_os_delete_project() { + local project_name="${1:-$(oc project -q)}" ; shift || : + oc delete project "${project_name}" +} + +# ct_os_docker_login +# -------------------- +# Logs in into docker daemon +function ct_os_docker_login() { + # docker login fails with "404 page not found" error sometimes, just try it more times + for i in `seq 12` ; do + docker login -u developer -p $(oc whoami -t) 172.30.1.1:5000 && return 0 || : + sleep 5 + done + return 1 +} + +# ct_os_upload_image IMAGE [IMAGESTREAM] +# -------------------- +# Uploads image from local registry to the OpenShift internal registry. +# Arguments: image - image name to upload +# Arguments: imagestream - name and tag to use for the internal registry. +# In the format of name:tag ($image_name:latest by default) +function ct_os_upload_image() { + local input_name="${1}" ; shift + local image_name=${input_name##*/} + local imagestream=${1:-$image_name:latest} + local output_name="172.30.1.1:5000/$(oc project -q)/$imagestream" + + ct_os_docker_login + docker tag ${input_name} ${output_name} + docker push ${output_name} +} + +# ct_os_install_in_centos +# -------------------- +# Installs os cluster in CentOS +function ct_os_install_in_centos() { + yum install -y centos-release-openshift-origin + yum install -y wget git net-tools bind-utils iptables-services bridge-utils\ + bash-completion origin-clients docker origin-clients +} + +# ct_os_cluster_up [DIR, IS_PUBLIC, CLUSTER_VERSION] +# -------------------- +# Runs the local OpenShift cluster using 'oc cluster up' and logs in as developer. +# Arguments: dir - directory to keep configuration data in, random if omitted +# Arguments: is_public - sets either private or public hostname for web-UI, +# use "true" for allow remote access to the web-UI, +# "false" is default +# Arguments: cluster_version - version of the OpenShift cluster to use, empty +# means default version of `oc`; example value: v3.7.0; +# also can be specified outside by OC_CLUSTER_VERSION +function ct_os_cluster_up() { + ct_os_cluster_running && echo "Cluster already running. Nothing is done." && return 0 + mkdir -p /var/tmp/openshift + local dir="${1:-$(mktemp -d /var/tmp/openshift/os-data-XXXXXX)}" ; shift || : + local is_public="${1:-'false'}" ; shift || : + local default_cluster_version=${OC_CLUSTER_VERSION:-} + local cluster_version=${1:-${default_cluster_version}} ; shift || : + if ! grep -qe '--insecure-registry.*172\.30\.0\.0' /etc/sysconfig/docker ; then + sed -i "s|OPTIONS='|OPTIONS='--insecure-registry 172.30.0.0/16 |" /etc/sysconfig/docker + fi + + systemctl stop firewalld + setenforce 0 + iptables -F + + systemctl restart docker + local cluster_ip="127.0.0.1" + [ "${is_public}" == "true" ] && cluster_ip=$(ct_get_public_ip) + + mkdir -p ${dir}/{config,data,pv} + oc cluster up --host-data-dir=${dir}/data --host-config-dir=${dir}/config \ + --host-pv-dir=${dir}/pv --use-existing-config --public-hostname=${cluster_ip} \ + ${cluster_version:+--version=$cluster_version } + oc version + oc login -u system:admin + oc project default + ct_os_wait_rc_ready docker-registry 180 + ct_os_wait_rc_ready router 30 + oc login -u developer -p developer + # let openshift cluster to sync to avoid some race condition errors + sleep 3 +} + +# ct_os_cluster_down +# -------------------- +# Shuts down the local OpenShift cluster using 'oc cluster down' +function ct_os_cluster_down() { + oc cluster down +} + +# ct_os_cluster_running +# -------------------- +# Returns 0 if oc cluster is running +function ct_os_cluster_running() { + oc cluster status &>/dev/null +} + +# ct_os_test_s2i_app_func IMAGE APP CONTEXT_DIR CHECK_CMD [OC_ARGS] +# -------------------- +# Runs [image] and [app] in the openshift and optionally specifies env_params +# as environment variables to the image. Then check the container by arbitrary +# function given as argument (such an argument may include string, +# that will be replaced with actual IP). +# Arguments: image - prefix or whole ID of the pod to run the cmd in (compulsory) +# Arguments: app - url or local path to git repo with the application sources (compulsory) +# Arguments: context_dir - sub-directory inside the repository with the application sources (compulsory) +# Arguments: check_command - CMD line that checks whether the container works (compulsory; '' will be replaced with actual IP) +# Arguments: oc_args - all other arguments are used as additional parameters for the `oc new-app` +# command, typically environment variables (optional) +function ct_os_test_s2i_app_func() { + local image_name=${1} + local app=${2} + local context_dir=${3} + local check_command=${4} + local oc_args=${5:-} + local image_name_no_namespace=${image_name##*/} + local service_name="${image_name_no_namespace}-testing" + local image_tagged="${image_name_no_namespace}:testing" + + if [ $# -lt 4 ] || [ -z "${1}" -o -z "${2}" -o -z "${3}" -o -z "${4}" ]; then + echo "ERROR: ct_os_test_s2i_app_func() requires at least 4 arguments that cannot be emtpy." >&2 + return 1 + fi + + ct_os_new_project + # Create a specific imagestream tag for the image so that oc cannot use anything else + ct_os_upload_image "${image_name}" "${image_tagged}" + + local app_param="${app}" + if [ -d "${app}" ] ; then + # for local directory, we need to copy the content, otherwise too smart os command + # pulls the git remote repository instead + app_param=$(ct_obtain_input "${app}") + fi + + ct_os_deploy_s2i_image "${image_tagged}" "${app_param}" \ + --context-dir="${context_dir}" \ + --name "${service_name}" \ + ${oc_args} + + if [ -d "${app}" ] ; then + # in order to avoid weird race seen sometimes, let's wait shortly + # before starting the build explicitly + sleep 5 + oc start-build "${service_name}" --from-dir="${app_param}" + fi + + ct_os_wait_pod_ready "${service_name}" 300 + + local ip=$(ct_os_get_service_ip "${service_name}") + local check_command_exp=$(echo "$check_command" | sed -e "s//$ip/g") + + echo " Checking APP using $check_command_exp ..." + local result=0 + eval "$check_command_exp" || result=1 + + if [ $result -eq 0 ] ; then + echo " Check passed." + else + echo " Check failed." + fi + + ct_os_delete_project + return $result +} + +# ct_os_test_s2i_app IMAGE APP CONTEXT_DIR EXPECTED_OUTPUT [PORT, PROTOCOL, RESPONSE_CODE, OC_ARGS, ... ] +# -------------------- +# Runs [image] and [app] in the openshift and optionally specifies env_params +# as environment variables to the image. Then check the http response. +# Arguments: image - prefix or whole ID of the pod to run the cmd in (compulsory) +# Arguments: app - url or local path to git repo with the application sources (compulsory) +# Arguments: context_dir - sub-directory inside the repository with the application sources (compulsory) +# Arguments: expected_output - PCRE regular expression that must match the response body (compulsory) +# Arguments: port - which port to use (optional; default: 8080) +# Arguments: protocol - which protocol to use (optional; default: http) +# Arguments: response_code - what http response code to expect (optional; default: 200) +# Arguments: oc_args - all other arguments are used as additional parameters for the `oc new-app` +# command, typically environment variables (optional) +function ct_os_test_s2i_app() { + local image_name=${1} + local app=${2} + local context_dir=${3} + local expected_output=${4} + local port=${5:-8080} + local protocol=${6:-http} + local response_code=${7:-200} + local oc_args=${8:-} + + if [ $# -lt 4 ] || [ -z "${1}" -o -z "${2}" -o -z "${3}" -o -z "${4}" ]; then + echo "ERROR: ct_os_test_s2i_app() requires at least 4 arguments that cannot be emtpy." >&2 + return 1 + fi + + ct_os_test_s2i_app_func "${image_name}" \ + "${app}" \ + "${context_dir}" \ + "ct_test_response '${protocol}://:${port}' '${response_code}' '${expected_output}'" \ + "${oc_args}" +} + +# ct_os_test_template_app_func IMAGE APP IMAGE_IN_TEMPLATE CHECK_CMD [OC_ARGS] +# -------------------- +# Runs [image] and [app] in the openshift and optionally specifies env_params +# as environment variables to the image. Then check the container by arbitrary +# function given as argument (such an argument may include string, +# that will be replaced with actual IP). +# Arguments: image_name - prefix or whole ID of the pod to run the cmd in (compulsory) +# Arguments: template - url or local path to a template to use (compulsory) +# Arguments: name_in_template - image name used in the template +# Arguments: check_command - CMD line that checks whether the container works (compulsory; '' will be replaced with actual IP) +# Arguments: oc_args - all other arguments are used as additional parameters for the `oc new-app` +# command, typically environment variables (optional) +# Arguments: other_images - some templates need other image to be pushed into the OpenShift registry, +# specify them in this parameter as "|", where "" is a full image name +# (including registry if needed) and "" is a tag under which the image should be available +# in the OpenShift registry. +function ct_os_test_template_app_func() { + local image_name=${1} + local template=${2} + local name_in_template=${3} + local check_command=${4} + local oc_args=${5:-} + local other_images=${6:-} + + if [ $# -lt 4 ] || [ -z "${1}" -o -z "${2}" -o -z "${3}" -o -z "${4}" ]; then + echo "ERROR: ct_os_test_template_app_func() requires at least 4 arguments that cannot be emtpy." >&2 + return 1 + fi + + local service_name="${name_in_template}-testing" + local image_tagged="${name_in_template}:${VERSION}" + + ct_os_new_project + # Create a specific imagestream tag for the image so that oc cannot use anything else + ct_os_upload_image "${image_name}" "${image_tagged}" + + # upload also other images, that template might need (list of pairs in the format | + local images_tags_a + local i_t + for i_t in ${other_images} ; do + echo "${i_t}" + IFS='|' read -ra image_tag_a <<< "${i_t}" + docker pull "${image_tag_a[0]}" + ct_os_upload_image "${image_tag_a[0]}" "${image_tag_a[1]}" + done + + local local_template=$(ct_obtain_input "${template}") + oc new-app ${local_template} \ + -p NAME="${service_name}" \ + -p NAMESPACE="$(oc project -q)" \ + ${oc_args} + + oc start-build "${service_name}" + + ct_os_wait_pod_ready "${service_name}" 300 + + local ip=$(ct_os_get_service_ip "${service_name}") + local check_command_exp=$(echo "$check_command" | sed -e "s//$ip/g") + + echo " Checking APP using $check_command_exp ..." + local result=0 + eval "$check_command_exp" || result=1 + + if [ $result -eq 0 ] ; then + echo " Check passed." + else + echo " Check failed." + fi + + ct_os_delete_project + return $result +} + +# params: +# ct_os_test_template_app IMAGE APP IMAGE_IN_TEMPLATE EXPECTED_OUTPUT [PORT, PROTOCOL, RESPONSE_CODE, OC_ARGS, ... ] +# -------------------- +# Runs [image] and [app] in the openshift and optionally specifies env_params +# as environment variables to the image. Then check the http response. +# Arguments: image_name - prefix or whole ID of the pod to run the cmd in (compulsory) +# Arguments: template - url or local path to a template to use (compulsory) +# Arguments: name_in_template - image name used in the template +# Arguments: expected_output - PCRE regular expression that must match the response body (compulsory) +# Arguments: port - which port to use (optional; default: 8080) +# Arguments: protocol - which protocol to use (optional; default: http) +# Arguments: response_code - what http response code to expect (optional; default: 200) +# Arguments: oc_args - all other arguments are used as additional parameters for the `oc new-app` +# command, typically environment variables (optional) +# Arguments: other_images - some templates need other image to be pushed into the OpenShift registry, +# specify them in this parameter as "|", where "" is a full image name +# (including registry if needed) and "" is a tag under which the image should be available +# in the OpenShift registry. +function ct_os_test_template_app() { + local image_name=${1} + local template=${2} + local name_in_template=${3} + local expected_output=${4} + local port=${5:-8080} + local protocol=${6:-http} + local response_code=${7:-200} + local oc_args=${8:-} + local other_images=${9:-} + + if [ $# -lt 4 ] || [ -z "${1}" -o -z "${2}" -o -z "${3}" -o -z "${4}" ]; then + echo "ERROR: ct_os_test_template_app() requires at least 4 arguments that cannot be emtpy." >&2 + return 1 + fi + + ct_os_test_template_app_func "${image_name}" \ + "${template}" \ + "${name_in_template}" \ + "ct_test_response '${protocol}://:${port}' '${response_code}' '${expected_output}'" \ + "${oc_args}" \ + "${other_images}" +} + +# ct_os_test_image_update IMAGE IS CHECK_CMD OC_ARGS +# -------------------- +# Runs an image update test with [image] uploaded to [is] imagestream +# and checks the services using an arbitrary function provided in [check_cmd]. +# Arguments: image - prefix or whole ID of the pod to run the cmd in (compulsory) +# Arguments: is - imagestream to upload the images into (compulsory) +# Arguments: check_cmd - command to be run to check functionality of created services (compulsory) +# Arguments: oc_args - arguments to use during oc new-app (compulsory) +ct_os_test_image_update() { + local image_name=$1; shift + local istag=$1; shift + local check_function=$1; shift + local service_name=${image_name##*/} + local old_image="" ip="" check_command_exp="" registry="" + registry=$(ct_registry_from_os "$OS") + old_image="$registry/$image_name" + + echo "Running image update test for: $image_name" + ct_os_new_project + + # Get current image from repository and create an imagestream + docker pull "$old_image:latest" 2>/dev/null + ct_os_upload_image "$old_image" "$istag" + + # Setup example application with curent image + oc new-app "$@" --name "$service_name" + ct_os_wait_pod_ready "$service_name" 60 + + # Check application output + ip=$(ct_os_get_service_ip "$service_name") + check_command_exp=${check_function///$ip} + ct_assert_cmd_success "$check_command_exp" + + # Tag built image into the imagestream and wait for rebuild + ct_os_upload_image "$image_name" "$istag" + ct_os_wait_pod_ready "${service_name}-2" 60 + + # Check application output + ip=$(ct_os_get_service_ip "$service_name") + check_command_exp=${check_function///$ip} + ct_assert_cmd_success "$check_command_exp" + + ct_os_delete_project +} diff --git a/test/test-lib.sh b/test/test-lib.sh new file mode 100644 index 0000000..dfc63d9 --- /dev/null +++ b/test/test-lib.sh @@ -0,0 +1,402 @@ +# +# Test a container image. +# +# Always use sourced from a specific container testfile +# +# reguires definition of CID_FILE_DIR +# CID_FILE_DIR=$(mktemp --suffix=_test_cidfiles -d) +# reguires definition of TEST_LIST +# TEST_LIST="\ +# ctest_container_creation +# ctest_doc_content" + +# Container CI tests +# abbreviated as "ct" + +# may be redefined in the specific container testfile +EXPECTED_EXIT_CODE=0 + +# ct_cleanup +# -------------------- +# Cleans up containers used during tests. Stops and removes all containers +# referenced by cid_files in CID_FILE_DIR. Dumps logs if a container exited +# unexpectedly. Removes the cid_files and CID_FILE_DIR as well. +# Uses: $CID_FILE_DIR - path to directory containing cid_files +# Uses: $EXPECTED_EXIT_CODE - expected container exit code +function ct_cleanup() { + for cid_file in $CID_FILE_DIR/* ; do + local container=$(cat $cid_file) + + : "Stopping and removing container $container..." + docker stop $container + exit_status=$(docker inspect -f '{{.State.ExitCode}}' $container) + if [ "$exit_status" != "$EXPECTED_EXIT_CODE" ]; then + : "Dumping logs for $container" + docker logs $container + fi + docker rm -v $container + rm $cid_file + done + rmdir $CID_FILE_DIR + : "Done." +} + +# ct_enable_cleanup +# -------------------- +# Enables automatic container cleanup after tests. +function ct_enable_cleanup() { + trap ct_cleanup EXIT SIGINT +} + +# ct_get_cid [name] +# -------------------- +# Prints container id from cid_file based on the name of the file. +# Argument: name - name of cid_file where the container id will be stored +# Uses: $CID_FILE_DIR - path to directory containing cid_files +function ct_get_cid() { + local name="$1" ; shift || return 1 + echo $(cat "$CID_FILE_DIR/$name") +} + +# ct_get_cip [id] +# -------------------- +# Prints container ip address based on the container id. +# Argument: id - container id +function ct_get_cip() { + local id="$1" ; shift + docker inspect --format='{{.NetworkSettings.IPAddress}}' $(ct_get_cid "$id") +} + +# ct_wait_for_cid [cid_file] +# -------------------- +# Holds the execution until the cid_file is created. Usually run after container +# creation. +# Argument: cid_file - name of the cid_file that should be created +function ct_wait_for_cid() { + local cid_file=$1 + local max_attempts=10 + local sleep_time=1 + local attempt=1 + local result=1 + while [ $attempt -le $max_attempts ]; do + [ -f $cid_file ] && [ -s $cid_file ] && return 0 + : "Waiting for container start..." + attempt=$(( $attempt + 1 )) + sleep $sleep_time + done + return 1 +} + +# ct_assert_container_creation_fails [container_args] +# -------------------- +# The invocation of docker run should fail based on invalid container_args +# passed to the function. Returns 0 when container fails to start properly. +# Argument: container_args - all arguments are passed directly to dokcer run +# Uses: $CID_FILE_DIR - path to directory containing cid_files +function ct_assert_container_creation_fails() { + local ret=0 + local max_attempts=10 + local attempt=1 + local cid_file=assert + set +e + local old_container_args="${CONTAINER_ARGS-}" + CONTAINER_ARGS="$@" + ct_create_container $cid_file + if [ $? -eq 0 ]; then + local cid=$(ct_get_cid $cid_file) + + while [ "$(docker inspect -f '{{.State.Running}}' $cid)" == "true" ] ; do + sleep 2 + attempt=$(( $attempt + 1 )) + if [ $attempt -gt $max_attempts ]; then + docker stop $cid + ret=1 + break + fi + done + exit_status=$(docker inspect -f '{{.State.ExitCode}}' $cid) + if [ "$exit_status" == "0" ]; then + ret=1 + fi + docker rm -v $cid + rm $CID_FILE_DIR/$cid_file + fi + [ ! -z $old_container_args ] && CONTAINER_ARGS="$old_container_args" + set -e + return $ret +} + +# ct_create_container [name, command] +# -------------------- +# Creates a container using the IMAGE_NAME and CONTAINER_ARGS variables. Also +# stores the container id to a cid_file located in the CID_FILE_DIR, and waits +# for the creation of the file. +# Argument: name - name of cid_file where the container id will be stored +# Argument: command - optional command to be executed in the container +# Uses: $CID_FILE_DIR - path to directory containing cid_files +# Uses: $CONTAINER_ARGS - optional arguments passed directly to docker run +# Uses: $IMAGE_NAME - name of the image being tested +function ct_create_container() { + local cid_file="$CID_FILE_DIR/$1" ; shift + # create container with a cidfile in a directory for cleanup + docker run --cidfile="$cid_file" -d ${CONTAINER_ARGS:-} $IMAGE_NAME "$@" + ct_wait_for_cid $cid_file || return 1 + : "Created container $(cat $cid_file)" +} + +# ct_scl_usage_old [name, command, expected] +# -------------------- +# Tests three ways of running the SCL, by looking for an expected string +# in the output of the command +# Argument: name - name of cid_file where the container id will be stored +# Argument: command - executed inside the container +# Argument: expected - string that is expected to be in the command output +# Uses: $CID_FILE_DIR - path to directory containing cid_files +# Uses: $IMAGE_NAME - name of the image being tested +function ct_scl_usage_old() { + local name="$1" + local command="$2" + local expected="$3" + local out="" + : " Testing the image SCL enable" + out=$(docker run --rm ${IMAGE_NAME} /bin/bash -c "${command}") + if ! echo "${out}" | grep -q "${expected}"; then + echo "ERROR[/bin/bash -c "${command}"] Expected '${expected}', got '${out}'" >&2 + return 1 + fi + out=$(docker exec $(ct_get_cid $name) /bin/bash -c "${command}" 2>&1) + if ! echo "${out}" | grep -q "${expected}"; then + echo "ERROR[exec /bin/bash -c "${command}"] Expected '${expected}', got '${out}'" >&2 + return 1 + fi + out=$(docker exec $(ct_get_cid $name) /bin/sh -ic "${command}" 2>&1) + if ! echo "${out}" | grep -q "${expected}"; then + echo "ERROR[exec /bin/sh -ic "${command}"] Expected '${expected}', got '${out}'" >&2 + return 1 + fi +} + +# ct_doc_content_old [strings] +# -------------------- +# Looks for occurence of stirngs in the documentation files and checks +# the format of the files. Files examined: help.1 +# Argument: strings - strings expected to appear in the documentation +# Uses: $IMAGE_NAME - name of the image being tested +function ct_doc_content_old() { + local tmpdir=$(mktemp -d) + local f + : " Testing documentation in the container image" + # Extract the help files from the container + for f in help.1 ; do + docker run --rm ${IMAGE_NAME} /bin/bash -c "cat /${f}" >${tmpdir}/$(basename ${f}) + # Check whether the files contain some important information + for term in $@ ; do + if ! cat ${tmpdir}/$(basename ${f}) | grep -F -q -e "${term}" ; then + echo "ERROR: File /${f} does not include '${term}'." >&2 + return 1 + fi + done + # Check whether the files use the correct format + for term in TH PP SH ; do + if ! grep -q "^\.${term}" ${tmpdir}/help.1 ; then + echo "ERROR: /help.1 is probably not in troff or groff format, since '${term}' is missing." >&2 + return 1 + fi + done + done + : " Success!" +} + + +# ct_path_append PATH_VARNAME DIRECTORY +# ------------------------------------- +# Append DIRECTORY to VARIABLE of name PATH_VARNAME, the VARIABLE must consist +# of colon-separated list of directories. +ct_path_append () +{ + if eval "test -n \"\${$1-}\""; then + eval "$1=\$2:\$$1" + else + eval "$1=\$2" + fi +} + + +# ct_path_foreach PATH ACTION [ARGS ...] +# -------------------------------------- +# For each DIR in PATH execute ACTION (path is colon separated list of +# directories). The particular calls to ACTION will look like +# '$ ACTION directory [ARGS ...]' +ct_path_foreach () +{ + local dir dirlist action save_IFS + save_IFS=$IFS + IFS=: + dirlist=$1 + action=$2 + shift 2 + for dir in $dirlist; do "$action" "$dir" "$@" ; done + IFS=$save_IFS +} + + +# ct_run_test_list +# -------------------- +# Execute the tests specified by TEST_LIST +# Uses: $TEST_LIST - list of test names +function ct_run_test_list() { + for test_case in $TEST_LIST; do + : "Running test $test_case" + [ -f test/$test_case ] && source test/$test_case + [ -f ../test/$test_case ] && source ../test/$test_case + $test_case + done; +} + +# ct_gen_self_signed_cert_pem +# --------------------------- +# Generates a self-signed PEM certificate pair into specified directory. +# Argument: output_dir - output directory path +# Argument: base_name - base name of the certificate files +# Resulted files will be those: +# /-cert-selfsigned.pem -- public PEM cert +# /-key.pem -- PEM private key +ct_gen_self_signed_cert_pem() { + local output_dir=$1 ; shift + local base_name=$1 ; shift + mkdir -p ${output_dir} + openssl req -newkey rsa:2048 -nodes -keyout ${output_dir}/${base_name}-key.pem -subj '/C=GB/ST=Berkshire/L=Newbury/O=My Server Company' > ${base_name}-req.pem + openssl req -new -x509 -nodes -key ${output_dir}/${base_name}-key.pem -batch > ${output_dir}/${base_name}-cert-selfsigned.pem +} + +# ct_obtain_input FILE|DIR|URL +# -------------------- +# Either copies a file or a directory to a tmp location for local copies, or +# downloads the file from remote location. +# Resulted file path is printed, so it can be later used by calling function. +# Arguments: input - local file, directory or remote URL +function ct_obtain_input() { + local input=$1 + local extension="${input##*.}" + + # Try to use same extension for the temporary file if possible + [[ "${extension}" =~ ^[a-z0-9]*$ ]] && extension=".${extension}" || extension="" + + local output=$(mktemp "/var/tmp/test-input-XXXXXX$extension") + if [ -f "${input}" ] ; then + cp "${input}" "${output}" + elif [ -d "${input}" ] ; then + rm -f "${output}" + cp -r -LH "${input}" "${output}" + elif echo "${input}" | grep -qe '^http\(s\)\?://' ; then + curl "${input}" > "${output}" + else + echo "ERROR: file type not known: ${input}" >&2 + return 1 + fi + echo "${output}" +} + +# ct_test_response +# ---------------- +# Perform GET request to the application container, checks output with +# a reg-exp and HTTP response code. +# Argument: url - request URL path +# Argument: expected_code - expected HTTP response code +# Argument: body_regexp - PCRE regular expression that must match the response body +# Argument: max_attempts - Optional number of attempts (default: 20), three seconds sleep between +# Argument: ignore_error_attempts - Optional number of attempts when we ignore error output (default: 10) +ct_test_response() { + local url="$1" + local expected_code="$2" + local body_regexp="$3" + local max_attempts=${4:-20} + local ignore_error_attempts=${5:-10} + + : " Testing the HTTP(S) response for <${url}>" + local sleep_time=3 + local attempt=1 + local result=1 + local status + local response_code + local response_file=$(mktemp /tmp/ct_test_response_XXXXXX) + while [ ${attempt} -le ${max_attempts} ]; do + curl --connect-timeout 10 -s -w '%{http_code}' "${url}" >${response_file} && status=0 || status=1 + if [ ${status} -eq 0 ]; then + response_code=$(cat ${response_file} | tail -c 3) + if [ "${response_code}" -eq "${expected_code}" ]; then + result=0 + fi + cat ${response_file} | grep -qP -e "${body_regexp}" || result=1; + # Some services return 40x code until they are ready, so let's give them + # some chance and not end with failure right away + # Do not wait if we already have expected outcome though + if [ ${result} -eq 0 -o ${attempt} -gt ${ignore_error_attempts} -o ${attempt} -eq ${max_attempts} ] ; then + break + fi + fi + attempt=$(( ${attempt} + 1 )) + sleep ${sleep_time} + done + rm -f ${response_file} + return ${result} +} + +# ct_registry_from_os OS +# ---------------- +# Transform operating system string [os] into registry url +# Argument: OS - string containing the os version +ct_registry_from_os() { + local registry="" + case $1 in + rhel7) + registry=registry.access.redhat.com + ;; + *) + registry=docker.io + ;; + esac + echo "$registry" +} + +# ct_assert_cmd_success CMD +# ---------------- +# Evaluates [cmd] and fails if it does not succeed. +# Argument: CMD - Command to be run +function ct_assert_cmd_success() { + echo "Checking '$*' for success ..." + if ! eval "$@" &>/dev/null; then + echo " FAIL" + return 1 + fi + echo " PASS" + return 0 +} + +# ct_assert_cmd_failure CMD +# ---------------- +# Evaluates [cmd] and fails if it succeeds. +# Argument: CMD - Command to be run +function ct_assert_cmd_failure() { + echo "Checking '$*' for failure ..." + if eval "$@" &>/dev/null; then + echo " FAIL" + return 1 + fi + echo " PASS" + return 0 +} + + +# ct_random_string [LENGTH=10] +# ---------------------------- +# Generate pseudorandom alphanumeric string of LENGTH bytes, the +# default length is 10. The string is printed on stdout. +ct_random_string() +( + export LC_ALL=C + dd if=/dev/urandom count=1 bs=10k 2>/dev/null \ + | tr -dc 'a-z0-9' \ + | fold -w "${1-10}" \ + | head -n 1 +)