diff --git a/Dockerfile b/Dockerfile index 04ee116..587f9ee 100644 --- a/Dockerfile +++ b/Dockerfile @@ -31,7 +31,6 @@ LABEL summary="$SUMMARY" \ com.redhat.component="$NAME" \ name="$FGC/$NAME" \ version="$VERSION" \ - architecture="$ARCH" \ usage="docker run -d -e MYSQL_USER=user -e MYSQL_PASSWORD=pass -e MYSQL_DATABASE=db -p 3306:3306 $FGC/$NAME" \ maintainer="SoftwareCollections.org " @@ -40,17 +39,13 @@ EXPOSE 3306 # This image must forever use UID 27 for mysql user so our volumes are # safe in the future. This should *never* change, the last test is there # to make sure of that. -RUN INSTALL_PKGS="rsync tar gettext hostname bind-utils groff-base shadow-utils mariadb-server policycoreutils" && \ +RUN INSTALL_PKGS="rsync tar gettext hostname bind-utils groff-base shadow-utils mariadb mariadb-server policycoreutils" && \ dnf install -y --setopt=tsflags=nodocs $INSTALL_PKGS && \ rpm -V $INSTALL_PKGS && \ dnf clean all && \ mkdir -p /var/lib/mysql/data && chown -R mysql.0 /var/lib/mysql && \ test "$(id mysql)" = "uid=27(mysql) gid=27(mysql) groups=27(mysql)" -# On Fedora, we fake missing python binary. In case user installs the python2 -# in the container, this hack will be removed by installing /usr/bin/python from RPM. -RUN ln -s /usr/bin/python3 /usr/bin/python - # Get prefix path and path to scripts rather than hard-code them in scripts ENV CONTAINER_SCRIPTS_PATH=/usr/share/container-scripts/mysql \ MYSQL_PREFIX=/usr diff --git a/root-common/etc/my.cnf b/root-common/etc/my.cnf index 0844075..bfdfbe9 100644 --- a/root-common/etc/my.cnf +++ b/root-common/etc/my.cnf @@ -9,4 +9,7 @@ skip_name_resolve # http://www.chriscalender.com/ignoring-the-lostfound-directory-in-your-datadir/ ignore-db-dir=lost+found +# GlusterFS equivalent of 'lost+found' +ignore-db-dir=.trashcan + !includedir /etc/my.cnf.d diff --git a/root-common/usr/bin/run-mysqld b/root-common/usr/bin/run-mysqld index 7ffd49e..2aa2fa3 100755 --- a/root-common/usr/bin/run-mysqld +++ b/root-common/usr/bin/run-mysqld @@ -3,6 +3,9 @@ export_vars=$(cgroup-limits); export $export_vars source ${CONTAINER_SCRIPTS_PATH}/common.sh set -eu +if [[ -v DEBUG_IGNORE_SCRIPT_FAILURES ]]; then + set +e +fi export_setting_variables diff --git a/root-common/usr/bin/run-mysqld-master b/root-common/usr/bin/run-mysqld-master index c15444d..5550cb7 100755 --- a/root-common/usr/bin/run-mysqld-master +++ b/root-common/usr/bin/run-mysqld-master @@ -2,9 +2,13 @@ # # This is an entrypoint that runs the MySQL server in the 'master' mode. # + export_vars=$(cgroup-limits); export $export_vars source ${CONTAINER_SCRIPTS_PATH}/common.sh set -eu +if [[ -v DEBUG_IGNORE_SCRIPT_FAILURES ]]; then + set +e +fi export_setting_variables diff --git a/root-common/usr/bin/run-mysqld-slave b/root-common/usr/bin/run-mysqld-slave index 59f0161..8a710ba 100755 --- a/root-common/usr/bin/run-mysqld-slave +++ b/root-common/usr/bin/run-mysqld-slave @@ -2,19 +2,18 @@ # # This is an entrypoint that runs the MySQL server in the 'slave' mode. # + export_vars=$(cgroup-limits); export $export_vars source ${CONTAINER_SCRIPTS_PATH}/common.sh set -eu +if [[ -v DEBUG_IGNORE_SCRIPT_FAILURES ]]; then + set +e +fi export_setting_variables log_volume_info $MYSQL_DATADIR -# Just run normal server if the data directory is already initialized -if [ -d "${MYSQL_DATADIR}/mysql" ]; then - exec /usr/bin/run-mysqld "$@" -fi - export MYSQL_RUNNING_AS_SLAVE=1 # Generate the unique 'server-id' for this master @@ -24,35 +23,38 @@ log_info "The 'slave' server-id is ${MYSQL_SERVER_ID}" # pre-init files process_extending_files ${APP_DATA}/mysql-pre-init/ ${CONTAINER_SCRIPTS_PATH}/pre-init/ -# Initialize MySQL database and wait for the MySQL master to accept -# connections. -initialize_database "$@" -wait_for_mysql_master +if [ ! -e "${MYSQL_DATADIR}/mysql" ]; then + # Initialize MySQL database and wait for the MySQL master to accept + # connections. + initialize_database "$@" + wait_for_mysql_master -# Get binlog file and position from master -STATUS_INFO=$(mysql --host "$MYSQL_MASTER_SERVICE_NAME" "-u${MYSQL_MASTER_USER}" "-p${MYSQL_MASTER_PASSWORD}" replication -e 'SELECT gtid from replication limit 1\G') -GTID_VALUE=$(echo "$STATUS_INFO" | grep 'gtid:' | head -n 1 | sed -e 's/^\s*gtid: //') + # Get binlog file and position from master + STATUS_INFO=$(mysql --host "$MYSQL_MASTER_SERVICE_NAME" "-u${MYSQL_MASTER_USER}" "-p${MYSQL_MASTER_PASSWORD}" replication -e 'SELECT gtid from replication limit 1\G') + GTID_VALUE=$(echo "$STATUS_INFO" | grep 'gtid:' | head -n 1 | sed -e 's/^\s*gtid: //') -# checking STATUS_INFO here because empty GTID_VALUE is valid value -if [ -z "${STATUS_INFO}" ] ; then - echo "Could not read GTID value from master" - exit 1 -fi + # checking STATUS_INFO here because empty GTID_VALUE is valid value + if [ -z "${STATUS_INFO}" ] ; then + echo "Could not read GTID value from master" + exit 1 + fi -mysql $mysql_flags <&1 + --report-host=$(hostname -I) "$@" 2>&1 diff --git a/root-common/usr/share/container-scripts/mysql/common.sh b/root-common/usr/share/container-scripts/mysql/common.sh index 950eac1..f214017 100644 --- a/root-common/usr/share/container-scripts/mysql/common.sh +++ b/root-common/usr/share/container-scripts/mysql/common.sh @@ -39,6 +39,7 @@ function export_setting_variables() { export MYSQL_INNODB_LOG_FILE_SIZE=${MYSQL_INNODB_LOG_FILE_SIZE:-$((MEMORY_LIMIT_IN_BYTES*15/1024/1024/100))M} export MYSQL_INNODB_LOG_BUFFER_SIZE=${MYSQL_INNODB_LOG_BUFFER_SIZE:-$((MEMORY_LIMIT_IN_BYTES*15/1024/1024/100))M} fi + export MYSQL_DATADIR_ACTION=${MYSQL_DATADIR_ACTION:-upgrade-warn} } # this stores whether the database was initialized from empty datadir @@ -63,9 +64,9 @@ function unset_env_vars() { function wait_for_mysql() { pid=$1 ; shift - while [ true ]; do + while true; do if [ -d "/proc/$pid" ]; then - mysqladmin --socket=/tmp/mysql.sock ping &>/dev/null && log_info "MySQL started successfully" && return 0 + mysqladmin $admin_flags ping &>/dev/null && log_info "MySQL started successfully" && return 0 else return 1 fi @@ -95,10 +96,14 @@ function initialize_database() { log_info 'Initializing database ...' log_info 'Running mysql_install_db ...' # Using --rpm since we need mysql_install_db behaves as in RPM - # Using empty --basedir to work-around https://bugzilla.redhat.com/show_bug.cgi?id=1406391 - mysql_install_db --rpm --datadir=$MYSQL_DATADIR --basedir='' + mysql_install_db --rpm --datadir=$MYSQL_DATADIR start_local_mysql "$@" + # Running mysql_upgrade creates the mysql_upgrade_info file in the data dir, + # which is necessary to detect which version of the mysqld daemon created the data. + # Checking empty file should not take longer than a second and one extra check should not harm. + mysql_upgrade ${admin_flags} + if [ -v MYSQL_RUNNING_AS_SLAVE ]; then log_info 'Initialization finished' return 0 @@ -162,7 +167,7 @@ EOSQL # into the number. # See: https://dev.mysql.com/doc/refman/en/replication-options.html#option_mysqld_server-id function server_id() { - checksum=$(sha256sum <<< $(hostname -i)) + checksum=$(sha256sum <<< $(hostname -I)) checksum=${checksum:0:14} echo -n $((0x${checksum}%4294967295)) } @@ -223,3 +228,58 @@ function process_extending_config_files() { fi done <<<"$(get_matched_files "$custom_dir" "$default_dir" '*.cnf' | sort -u)" } + +# Converts string version to the integer format (5.5.33 is converted to 505, +# 10.1.23-MariaDB is converted into 1001, etc. +function version2number() { + local version_major=$(echo "$1" | grep -o -e '^[0-9]*\.[0-9]*') + printf %d%02d ${version_major%%.*} ${version_major##*.} +} + +# Converts the version in format of an integer into major.minor +function number2version() { + local numver=${1} + echo $((numver / 100)).$((numver % 100)) +} + +# Prints version of the mysqld that is currently available (string) +function mysqld_version() { + ${MYSQL_PREFIX}/libexec/mysqld -V | awk '{print $3}' +} + +# Returns version from the daemon in integer format +function mysqld_compat_version() { + version2number $(mysqld_version) +} + +# Returns version from the datadir in the integer format +function get_datadir_version() { + local datadir="$1" + local upgrade_info_file=$(get_mysql_upgrade_info_file "$datadir") + [ -r "$upgrade_info_file" ] || return + local version_text=$(cat "$upgrade_info_file" | head -n 1) + version2number "${version_text}" +} + +# Returns name of the file in the datadir that holds version information about the data +function get_mysql_upgrade_info_file() { + local datadir="$1" + echo "$datadir/mysql_upgrade_info" +} + +# Writes version string of the daemon into mysql_upgrade_info file +# (should be only used when the file is missing and only during limited time; +# once most deployments include this version file, we should leave it on +# scripts to generate the file right after initialization or when upgrading) +function write_mysql_upgrade_info_file() { + local datadir="$1" + local version=$(mysqld_version) + local upgrade_info_file=$(get_mysql_upgrade_info_file "$datadir") + if [ -f "$datadir/mysql_upgrade_info" ] ; then + echo "File ${upgrade_info_file} exists, nothing is done." + else + log_info "Storing version '${version}' information into the data dir '${upgrade_info_file}'" + echo "${version}" > "${upgrade_info_file}" + mysqld_version >"$datadir/mysql_upgrade_info" + fi +} diff --git a/root-common/usr/share/container-scripts/mysql/helpers.sh b/root-common/usr/share/container-scripts/mysql/helpers.sh index 4e832fc..22db289 100644 --- a/root-common/usr/share/container-scripts/mysql/helpers.sh +++ b/root-common/usr/share/container-scripts/mysql/helpers.sh @@ -2,6 +2,10 @@ function log_info { echo "---> `date +%T` $@" } +function log_warn { + echo "---> `date +%T` Warning: $@" +} + function log_and_run { log_info "Running $@" "$@" @@ -21,4 +25,7 @@ function log_volume_info { shift done set -e + if [[ -v DEBUG_IGNORE_SCRIPT_FAILURES ]]; then + set +e + fi } diff --git a/root-common/usr/share/container-scripts/mysql/init/40-datadir-action.sh b/root-common/usr/share/container-scripts/mysql/init/40-datadir-action.sh new file mode 100644 index 0000000..2bc8095 --- /dev/null +++ b/root-common/usr/share/container-scripts/mysql/init/40-datadir-action.sh @@ -0,0 +1,111 @@ +upstream_upgrade_info() { + echo -n "For upstream documentation about upgrading, see: " + case ${MYSQL_VERSION} in + 10.0) echo "https://mariadb.com/kb/en/library/upgrading-from-mariadb-55-to-mariadb-100/" ;; + 10.1) echo "https://mariadb.com/kb/en/library/upgrading-from-mariadb-100-to-mariadb-101/" ;; + 10.2) echo "https://mariadb.com/kb/en/library/upgrading-from-mariadb-101-to-mariadb-102/" ;; + 5.6) echo "https://dev.mysql.com/doc/refman/5.6/en/upgrading-from-previous-series.html" ;; + 5.7) echo "https://dev.mysql.com/doc/refman/5.7/en/upgrading-from-previous-series.html" ;; + *) echo "Non expected version '${MYSQL_VERSION}'" ; return 1 ;; + esac +} + +check_datadir_version() { + local datadir="$1" + local datadir_version=$(get_datadir_version "$datadir") + local mysqld_version=$(mysqld_compat_version) + local datadir_version_dot=$(number2version "${datadir_version}") + local mysqld_version_dot=$(number2version "${mysqld_version}") + + for datadir_action in ${MYSQL_DATADIR_ACTION//,/ } ; do + log_info "Running datadir action: ${datadir_action}" + case ${datadir_action} in + upgrade-auto|upgrade-warn) + if [ -z "${datadir_version}" ] || [ "${datadir_version}" -eq 0 ] ; then + # Writing the info file, since historically it was not written + log_warn "Version of the data could not be determined."\ + "It is because the file mysql_upgrade_info is missing in the data directory, which"\ + "is most probably because it was not created when initialization of data directory."\ + "In order to allow seamless updates to the next higher version in the future,"\ + "the file mysql_upgrade_info will be created."\ + "If the data directory was created with a different version than ${mysqld_version_dot},"\ + "it is required to run this container with the MYSQL_DATADIR_ACTION environment variable"\ + "set to 'force', or run 'mysql_upgrade' utility manually; the mysql_upgrade tool"\ + "checks the tables and creates such a file as well. $(upstream_upgrade_info)" + write_mysql_upgrade_info_file "${MYSQL_DATADIR}" + continue + # This is currently a dead-code, but should be enabled after the mysql_upgrade_info + # file gets to the deployments (after few months most of the deployments should already have the file) + log_warn "Version of the data could not be determined."\ + "Running such a container is risky."\ + "The current daemon version is ${mysqld_version_dot}."\ + "If you are not sure whether the data directory is compatible with the current"\ + "version ${mysqld_version_dot}, restore the data from a back-up."\ + "If restoring from a back-up is not possible, create a file 'mysql_upgrade_info'"\ + "that includes version information (${mysqld_version_dot} in this case) in the root"\ + "of the data directory."\ + "In order to create the 'mysql_upgrade_info' file, either run this container with"\ + "the MYSQL_DATADIR_ACTION environment variable set to 'force', or run 'mysql_upgrade' utility"\ + "manually; the mysql_upgrade tool checks the tables and creates such a file as well."\ + "That will enable correct upgrade check in the future. $(upstream_upgrade_info)" + fi + + if [ "${datadir_version}" -eq "${mysqld_version}" ] ; then + log_info "MySQL server version check passed, both server and data directory"\ + "are version ${mysqld_version_dot}." + continue + fi + + if [ $(( ${datadir_version} + 1 )) -eq "${mysqld_version}" -o "${datadir_version}" -eq 505 -a "${mysqld_version}" -eq 1000 ] ; then + log_warn "MySQL server is version ${mysqld_version_dot} and datadir is version"\ + "${datadir_version_dot}, which is a compatible combination." + if [ "${MYSQL_DATADIR_ACTION}" == 'upgrade-auto' ] ; then + log_info "The data directory will be upgraded automatically from ${datadir_version_dot}"\ + "to version ${mysqld_version_dot}. $(upstream_upgrade_info)" + log_and_run mysql_upgrade ${mysql_flags} + else + log_warn "Automatic upgrade is not turned on, proceed with the upgrade."\ + "In order to upgrade the data directory, run this container with the MYSQL_DATADIR_ACTION"\ + "environment variable set to 'upgrade-auto' or run mysql_upgrade manually. $(upstream_upgrade_info)" + fi + else + log_warn "MySQL server is version ${mysqld_version_dot} and datadir is version"\ + "${datadir_version_dot}, which are incompatible. Remember, that upgrade is only supported"\ + "by upstream from previous version and it is not allowed to skip versions. $(upstream_upgrade_info)" + if [ "${datadir_version}" -gt "${mysqld_version}" ] ; then + log_warn "Downgrading to the lower version is not supported. Consider"\ + "dumping data and load them again into a fresh instance. $(upstream_upgrade_info)" + fi + log_warn "Consider restoring the database from a back-up. To ignore this"\ + "warning, set 'MYSQL_DATADIR_ACTION' variable to 'upgrade-force', but this may result in data corruption. $(upstream_upgrade_info)" + return 1 + fi + ;; + + upgrade-force) + log_and_run mysql_upgrade ${mysql_flags} --force + ;; + + optimize) + log_and_run mysqlcheck ${mysql_flags} --optimize --all-databases --force + ;; + + analyze) + log_and_run mysqlcheck ${mysql_flags} --analyze --all-databases --force + ;; + + disable) + log_info "Nothing is done about the data directory." + ;; + *) + log_warn "Unknown value of MYSQL_DATADIR_ACTION variable: '${MYSQL_DATADIR_ACTION}', ignoring." + ;; + esac + done +} + +check_datadir_version "${MYSQL_DATADIR}" + +unset -f check_datadir_version upstream_upgrade_info + + diff --git a/root-common/usr/share/container-scripts/mysql/pre-init/20-validate-variables.sh b/root-common/usr/share/container-scripts/mysql/pre-init/20-validate-variables.sh index 0ed2184..772151c 100644 --- a/root-common/usr/share/container-scripts/mysql/pre-init/20-validate-variables.sh +++ b/root-common/usr/share/container-scripts/mysql/pre-init/20-validate-variables.sh @@ -76,7 +76,6 @@ function validate_variables() { fi } - if ! [ -v MYSQL_RUNNING_AS_SLAVE ] ; then validate_variables fi diff --git a/root/help.1 b/root/help.1 new file mode 100644 index 0000000..f5b80f0 --- /dev/null +++ b/root/help.1 @@ -0,0 +1,439 @@ +.TH MariaDB 10.2 SQL Database Server Docker image +.PP +This container image includes MariaDB 10.2 SQL database server for OpenShift and general usage. +Users can choose between RHEL and CentOS based images. +The RHEL image is available in the Red Hat Container Catalog +\[la]https://access.redhat.com/containers/#/registry.access.redhat.com/rhscl/mariadb-102-rhel7\[ra] +as registry.access.redhat.com/rhscl/mariadb\-102\-rhel7. +The CentOS image is then available on Docker Hub +\[la]https://hub.docker.com/r/centos/mariadb-102-centos7/\[ra] +as centos/mariadb\-102\-centos7. + +.SH Description +.PP +This container image provides a containerized packaging of the MariaDB mysqld daemon +and client application. The mysqld server daemon accepts connections from clients +and provides access to content from MySQL databases on behalf of the clients. +You can find more information on the MariaDB project from the project Web site +( +\[la]https://mariadb.org/\[ra]). + +.SH Usage +.PP +For this, we will assume that you are using the MariaDB 10.2 container image from the +Red Hat Container Catalog called \fB\fCrhscl/mariadb\-102\-rhel7\fR\&. +If you want to set only the mandatory environment variables and not store +the database in a host directory, execute the following command: + +.PP +.RS + +.nf +$ docker run \-d \-\-name mariadb\_database \-e MYSQL\_USER=user \-e MYSQL\_PASSWORD=pass \-e MYSQL\_DATABASE=db \-p 3306:3306 rhscl/mariadb\-102\-rhel7 + +.fi +.RE + +.PP +This will create a container named \fB\fCmariadb\_database\fR running MySQL with database +\fB\fCdb\fR and user with credentials \fB\fCuser:pass\fR\&. Port 3306 will be exposed and mapped +to the host. If you want your database to be persistent across container executions, +also add a \fB\fC\-v /host/db/path:/var/lib/mysql/data\fR argument. This will be the MySQL +data directory. + +.PP +If the database directory is not initialized, the entrypoint script will first +run \fB\fCmysql\_install\_db\fR +\[la]https://dev.mysql.com/doc/refman/5.6/en/mysql-install-db.html\[ra] +and setup necessary database users and passwords. After the database is initialized, +or if it was already present, \fB\fCmysqld\fR is executed and will run as PID 1. You can + stop the detached container by running \fB\fCdocker stop mariadb\_database\fR\&. + +.SH Environment variables and volumes +.PP +The image recognizes the following environment variables that you can set during +initialization by passing \fB\fC\-e VAR=VALUE\fR to the Docker run command. + +.PP +\fB\fB\fCMYSQL\_USER\fR\fP +.br + User name for MySQL account to be created + +.PP +\fB\fB\fCMYSQL\_PASSWORD\fR\fP +.br + Password for the user account + +.PP +\fB\fB\fCMYSQL\_DATABASE\fR\fP +.br + Database name + +.PP +\fB\fB\fCMYSQL\_ROOT\_PASSWORD\fR\fP +.br + Password for the root user (optional) + +.PP +The following environment variables influence the MySQL configuration file. They are all optional. + +.PP +\fB\fB\fCMYSQL\_LOWER\_CASE\_TABLE\_NAMES (default: 0)\fR\fP +.br + Sets how the table names are stored and compared + +.PP +\fB\fB\fCMYSQL\_MAX\_CONNECTIONS (default: 151)\fR\fP +.br + The maximum permitted number of simultaneous client connections + +.PP +\fB\fB\fCMYSQL\_MAX\_ALLOWED\_PACKET (default: 200M)\fR\fP +.br + The maximum size of one packet or any generated/intermediate string + +.PP +\fB\fB\fCMYSQL\_FT\_MIN\_WORD\_LEN (default: 4)\fR\fP +.br + The minimum length of the word to be included in a FULLTEXT index + +.PP +\fB\fB\fCMYSQL\_FT\_MAX\_WORD\_LEN (default: 20)\fR\fP +.br + The maximum length of the word to be included in a FULLTEXT index + +.PP +\fB\fB\fCMYSQL\_AIO (default: 1)\fR\fP +.br + Controls the \fB\fCinnodb\_use\_native\_aio\fR setting value in case the native AIO is broken. See +\[la]http://help.directadmin.com/item.php?id=529\[ra] + +.PP +\fB\fB\fCMYSQL\_TABLE\_OPEN\_CACHE (default: 400)\fR\fP +.br + The number of open tables for all threads + +.PP +\fB\fB\fCMYSQL\_KEY\_BUFFER\_SIZE (default: 32M or 10% of available memory)\fR\fP +.br + The size of the buffer used for index blocks + +.PP +\fB\fB\fCMYSQL\_SORT\_BUFFER\_SIZE (default: 256K)\fR\fP +.br + The size of the buffer used for sorting + +.PP +\fB\fB\fCMYSQL\_READ\_BUFFER\_SIZE (default: 8M or 5% of available memory)\fR\fP +.br + The size of the buffer used for a sequential scan + +.PP +\fB\fB\fCMYSQL\_INNODB\_BUFFER\_POOL\_SIZE (default: 32M or 50% of available memory)\fR\fP +.br + The size of the buffer pool where InnoDB caches table and index data + +.PP +\fB\fB\fCMYSQL\_INNODB\_LOG\_FILE\_SIZE (default: 8M or 15% of available available)\fR\fP +.br + The size of each log file in a log group + +.PP +\fB\fB\fCMYSQL\_INNODB\_LOG\_BUFFER\_SIZE (default: 8M or 15% of available memory)\fR\fP +.br + The size of the buffer that InnoDB uses to write to the log files on disk + +.PP +\fB\fB\fCMYSQL\_DEFAULTS\_FILE (default: /etc/my.cnf)\fR\fP +.br + Point to an alternative configuration file + +.PP +\fB\fB\fCMYSQL\_BINLOG\_FORMAT (default: statement)\fR\fP +.br + Set sets the binlog format, supported values are \fB\fCrow\fR and \fB\fCstatement\fR + +.PP +\fB\fB\fCMYSQL\_LOG\_QUERIES\_ENABLED (default: 0)\fR\fP +.br + To enable query logging set this to \fB\fC1\fR + +.PP +You can also set the following mount points by passing the \fB\fC\-v /host:/container\fR flag to Docker. + +.PP +\fB\fB\fC/var/lib/mysql/data\fR\fP +.br + MySQL data directory + +.PP +\fBNotice: When mouting a directory from the host into the container, ensure that the mounted +directory has the appropriate permissions and that the owner and group of the directory +matches the user UID or name which is running inside the container.\fP + +.SH MariaDB auto\-tuning +.PP +When the MySQL image is run with the \fB\fC\-\-memory\fR parameter set and you didn't +specify value for some parameters, their values will be automatically +calculated based on the available memory. + +.PP +\fB\fB\fCMYSQL\_KEY\_BUFFER\_SIZE (default: 10%)\fR\fP +.br + \fB\fCkey\_buffer\_size\fR + +.PP +\fB\fB\fCMYSQL\_READ\_BUFFER\_SIZE (default: 5%)\fR\fP +.br + \fB\fCread\_buffer\_size\fR + +.PP +\fB\fB\fCMYSQL\_INNODB\_BUFFER\_POOL\_SIZE (default: 50%)\fR\fP +.br + \fB\fCinnodb\_buffer\_pool\_size\fR + +.PP +\fB\fB\fCMYSQL\_INNODB\_LOG\_FILE\_SIZE (default: 15%)\fR\fP +.br + \fB\fCinnodb\_log\_file\_size\fR + +.PP +\fB\fB\fCMYSQL\_INNODB\_LOG\_BUFFER\_SIZE (default: 15%)\fR\fP +.br + \fB\fCinnodb\_log\_buffer\_size\fR + +.SH MySQL root user +.PP +The root user has no password set by default, only allowing local connections. +You can set it by setting the \fB\fCMYSQL\_ROOT\_PASSWORD\fR environment variable. This +will allow you to login to the root account remotely. Local connections will +still not require a password. + +.PP +To disable remote root access, simply unset \fB\fCMYSQL\_ROOT\_PASSWORD\fR and restart +the container. + +.SH Changing passwords +.PP +Since passwords are part of the image configuration, the only supported method +to change passwords for the database user (\fB\fCMYSQL\_USER\fR) and root user is by +changing the environment variables \fB\fCMYSQL\_PASSWORD\fR and \fB\fCMYSQL\_ROOT\_PASSWORD\fR, +respectively. + +.PP +Changing database passwords through SQL statements or any way other than through +the environment variables aforementioned will cause a mismatch between the +values stored in the variables and the actual passwords. Whenever a database +container starts it will reset the passwords to the values stored in the +environment variables. + +.SH Default my.cnf file +.PP +With environment variables we are able to customize a lot of different parameters +or configurations for the mysql bootstrap configurations. If you'd prefer to use +your own configuration file, you can override the \fB\fCMYSQL\_DEFAULTS\_FILE\fR env +variable with the full path of the file you wish to use. For example, the default +location is \fB\fC/etc/my.cnf\fR but you can change it to \fB\fC/etc/mysql/my.cnf\fR by setting + \fB\fCMYSQL\_DEFAULTS\_FILE=/etc/mysql/my.cnf\fR + +.SH Extending image +.PP +This image can be extended using source\-to\-image +\[la]https://github.com/openshift/source-to-image\[ra]\&. + +.PP +For example, to build a customized MariaDB database image \fB\fCmy\-mariadb\-rhel7\fR +with a configuration in \fB\fC\~/image\-configuration/\fR run: + +.PP +.RS + +.nf +$ s2i build \~/image\-configuration/ rhscl/mariadb\-102\-rhel7 my\-mariadb\-rhel7 + +.fi +.RE + +.PP +The directory passed to \fB\fCs2i build\fR can contain these directories: + +.PP +\fB\fCmysql\-cfg/\fR + When starting the container, files from this directory will be used as + a configuration for the \fB\fCmysqld\fR daemon. + \fB\fCenvsubst\fR command is run on this file to still allow customization of + the image using environmental variables + +.PP +\fB\fCmysql\-pre\-init/\fR + Shell scripts (\fB\fC*.sh\fR) available in this directory are sourced before + \fB\fCmysqld\fR daemon is started. + +.PP +\fB\fCmysql\-init/\fR + Shell scripts (\fB\fC*.sh\fR) available in this directory are sourced when + \fB\fCmysqld\fR daemon is started locally. In this phase, use \fB\fC${mysql\_flags}\fR + to connect to the locally running daemon, for example \fB\fCmysql $mysql\_flags < dump.sql\fR + +.PP +Variables that can be used in the scripts provided to s2i: + +.PP +\fB\fC$mysql\_flags\fR + arguments for the \fB\fCmysql\fR tool that will connect to the locally running \fB\fCmysqld\fR during initialization + +.PP +\fB\fC$MYSQL\_RUNNING\_AS\_MASTER\fR + variable defined when the container is run with \fB\fCrun\-mysqld\-master\fR command + +.PP +\fB\fC$MYSQL\_RUNNING\_AS\_SLAVE\fR + variable defined when the container is run with \fB\fCrun\-mysqld\-slave\fR command + +.PP +\fB\fC$MYSQL\_DATADIR\_FIRST\_INIT\fR + variable defined when the container was initialized from the empty data dir + +.PP +During \fB\fCs2i build\fR all provided files are copied into \fB\fC/opt/app\-root/src\fR +directory into the resulting image. If some configuration files are present +in the destination directory, files with the same name are overwritten. +Also only one file with the same name can be used for customization and user +provided files are preferred over default files in +\fB\fC/usr/share/container\-scripts/mysql/\fR\- so it is possible to overwrite them. + +.PP +Same configuration directory structure can be used to customize the image +every time the image is started using \fB\fCdocker run\fR\&. The directory has to be +mounted into \fB\fC/opt/app\-root/src/\fR in the image +(\fB\fC\-v ./image\-configuration/:/opt/app\-root/src/\fR). +This overwrites customization built into the image. + +.SH Securing the connection with SSL +.PP +In order to secure the connection with SSL, use the extending feature described +above. In particular, put the SSL certificates into a separate directory: + +.PP +.RS + +.nf +sslapp/mysql\-certs/server\-cert\-selfsigned.pem +sslapp/mysql\-certs/server\-key.pem + +.fi +.RE + +.PP +And then put a separate configuration file into mysql\-cfg: + +.PP +.RS + +.nf +$> cat sslapp/mysql\-cfg/ssl.cnf +[mysqld] +ssl\-key=${APP\_DATA}/mysql\-certs/server\-key.pem +ssl\-cert=${APP\_DATA}/mysql\-certs/server\-cert\-selfsigned.pem + +.fi +.RE + +.PP +Such a directory \fB\fCsslapp\fR can then be mounted into the container with \-v, +or a new container image can be built using s2i. + +.SH Upgrading and data directory version checking +.PP +MySQL and MariaDB use versions that consist of three numbers X.Y.Z (e.g. 5.6.23). +For version changes in Z part, the server's binary data format stays compatible and thus no +special upgrade procedure is needed. For upgrades from X.Y to X.Y+1, consider doing manual +steps as described at + +\[la]https://mariadb.com/kb/en/library/upgrading-from-mariadb-101-to-mariadb-102/\[ra] + +.PP +Skipping versions like from X.Y to X.Y+2 or downgrading to lower version is not supported; +the only exception is ugrading from MariaDB 5.5 to MariaDB 10.0. + +.PP +\fBImportant\fP: Upgrading to a new version is always risky and users are expected to make a full +back\-up of all data before. + +.PP +A safer solution to upgrade is to dump all data using \fB\fCmysqldump\fR or \fB\fCmysqldbexport\fR and then +load the data using \fB\fCmysql\fR or \fB\fCmysqldbimport\fR into an empty (freshly initialized) database. + +.PP +Another way of proceeding with the upgrade is starting the new version of the \fB\fCmysqld\fR daemon +and run \fB\fCmysql\_upgrade\fR right after the start. This so called in\-place upgrade is generally +faster for large data directory, but only possible if upgrading from the very previous version, +so skipping versions is not supported. + +.PP +This container detects whether the data needs to be upgraded using \fB\fCmysql\_upgrade\fR and +we can control it by setting \fB\fCMYSQL\_DATADIR\_ACTION\fR variable, which can have one or more of the following values: +.IP \(bu 2 +\fB\fCupgrade\-warn\fR \-\- If the data version can be determined and the data come from a different version +of the daemon, a warning is printed but the container starts. This is the default value. +Since historically the version file \fB\fCmysql\_upgrade\_info\fR was not created, when using this option, +the version file is created if not exist, but no \fB\fCmysql\_upgrade\fR will be called. +However, this automatic creation will be removed after few months, since the version should be +created on most deployments at that point. +.IP \(bu 2 +\fB\fCupgrade\-auto\fR \-\- \fB\fCmysql\_upgrade\fR is run at the beginning of the container start, when the local +daemon is running, but only if the data version can be determined and the data come +with the very previous version. A warning is printed if the data come from even older +or newer version. This value effectively enables automatic upgrades, +but it is always risky and users should still back\-up all the data before starting the newer container. +Set this option only if you have very good back\-ups at any moment and you are fine to fail\-over +from the back\-up. +.IP \(bu 2 +\fB\fCupgrade\-force\fR \-\- \fB\fCmysql\_upgrade \-\-force\fR is run at the beginning of the container start, when the local +daemon is running, no matter what version of the daemon the data come from. +This is also the way to create the missing version file \fB\fCmysql\_upgrade\_info\fR if not present +in the root of the data directory; this file holds information about the version of the data. + +.PP +There are also some other actions that you may want to run at the beginning of the container start, +when the local daemon is running, no matter what version of the data is detected: +.IP \(bu 2 +\fB\fCoptimize\fR \-\- runs \fB\fCmysqlcheck \-\-optimize\fR\&. It optimizes all the tables. +.IP \(bu 2 +\fB\fCanalyze\fR \-\- runs \fB\fCmysqlcheck \-\-analyze\fR\&. It analyzes all the tables. +.IP \(bu 2 +\fB\fCdisable\fR \-\- nothing is done regarding data directory version. + +.PP +Multiple values are separated by comma and run in\-order, e.g. \fB\fCMYSQL\_DATADIR\_ACTION="optimize,analyze"\fR\&. + +.SH Changing the replication binlog\_format +.PP +Some applications may wish to use \fB\fCrow\fR binlog\_formats (for example, those built + with change\-data\-capture in mind). The default replication/binlog format is + \fB\fCstatement\fR but to change it you can set the \fB\fCMYSQL\_BINLOG\_FORMAT\fR environment + variable. For example \fB\fCMYSQL\_BINLOG\_FORMAT=row\fR\&. Now when you run the database + with \fB\fCmaster\fR replication turned on (ie, set the Docker/container \fB\fCcmd\fR to be +\fB\fCrun\-mysqld\-master\fR) the binlog will emit the actual data for the rows that change +as opposed to the statements (ie, DML like insert...) that caused the change. + +.SH Troubleshooting +.PP +The mysqld deamon in the container logs to the standard output, so the log is available in the container log. The log can be examined by running: + +.PP +.RS + +.nf +docker logs + +.fi +.RE + +.SH See also +.PP +Dockerfile and other sources for this container image are available on + +\[la]https://github.com/sclorg/mariadb-container\[ra]\&. +In that repository, Dockerfile for CentOS is called Dockerfile, Dockerfile +for RHEL is called Dockerfile.rhel7. diff --git a/root/usr/share/container-scripts/mysql/README.md b/root/usr/share/container-scripts/mysql/README.md index dcc31d8..19fc851 100644 --- a/root/usr/share/container-scripts/mysql/README.md +++ b/root/usr/share/container-scripts/mysql/README.md @@ -260,6 +260,60 @@ Such a directory `sslapp` can then be mounted into the container with -v, or a new container image can be built using s2i. +Upgrading and data directory version checking +--------------------------------------------- + +MySQL and MariaDB use versions that consist of three numbers X.Y.Z (e.g. 5.6.23). +For version changes in Z part, the server's binary data format stays compatible and thus no +special upgrade procedure is needed. For upgrades from X.Y to X.Y+1, consider doing manual +steps as described at +https://mariadb.com/kb/en/library/upgrading-from-mariadb-101-to-mariadb-102/ + +Skipping versions like from X.Y to X.Y+2 or downgrading to lower version is not supported; +the only exception is ugrading from MariaDB 5.5 to MariaDB 10.0. + +**Important**: Upgrading to a new version is always risky and users are expected to make a full +back-up of all data before. + +A safer solution to upgrade is to dump all data using `mysqldump` or `mysqldbexport` and then +load the data using `mysql` or `mysqldbimport` into an empty (freshly initialized) database. + +Another way of proceeding with the upgrade is starting the new version of the `mysqld` daemon +and run `mysql_upgrade` right after the start. This so called in-place upgrade is generally +faster for large data directory, but only possible if upgrading from the very previous version, +so skipping versions is not supported. + +This container detects whether the data needs to be upgraded using `mysql_upgrade` and +we can control it by setting `MYSQL_DATADIR_ACTION` variable, which can have one or more of the following values: + + * `upgrade-warn` -- If the data version can be determined and the data come from a different version + of the daemon, a warning is printed but the container starts. This is the default value. + Since historically the version file `mysql_upgrade_info` was not created, when using this option, + the version file is created if not exist, but no `mysql_upgrade` will be called. + However, this automatic creation will be removed after few months, since the version should be + created on most deployments at that point. + * `upgrade-auto` -- `mysql_upgrade` is run at the beginning of the container start, when the local + daemon is running, but only if the data version can be determined and the data come + with the very previous version. A warning is printed if the data come from even older + or newer version. This value effectively enables automatic upgrades, + but it is always risky and users should still back-up all the data before starting the newer container. + Set this option only if you have very good back-ups at any moment and you are fine to fail-over + from the back-up. + * `upgrade-force` -- `mysql_upgrade --force` is run at the beginning of the container start, when the local + daemon is running, no matter what version of the daemon the data come from. + This is also the way to create the missing version file `mysql_upgrade_info` if not present + in the root of the data directory; this file holds information about the version of the data. + +There are also some other actions that you may want to run at the beginning of the container start, +when the local daemon is running, no matter what version of the data is detected: + + * `optimize` -- runs `mysqlcheck --optimize`. It optimizes all the tables. + * `analyze` -- runs `mysqlcheck --analyze`. It analyzes all the tables. + * `disable` -- nothing is done regarding data directory version. + +Multiple values are separated by comma and run in-order, e.g. `MYSQL_DATADIR_ACTION="optimize,analyze"`. + + Changing the replication binlog_format -------------------------------------- Some applications may wish to use `row` binlog_formats (for example, those built diff --git a/test/mariadb-ephemeral-template.json b/test/mariadb-ephemeral-template.json index c236fd8..27db3fd 100644 --- a/test/mariadb-ephemeral-template.json +++ b/test/mariadb-ephemeral-template.json @@ -246,7 +246,7 @@ { "name": "MARIADB_VERSION", "displayName": "Version of MariaDB Image", - "description": "Version of MariaDB image to be used (10.1, 10.2 or latest).", + "description": "Version of MariaDB image to be used (10.2 or latest).", "value": "10.2", "required": true } diff --git a/test/run b/test/run index f42f9d6..7296cb2 100755 --- a/test/run +++ b/test/run @@ -22,6 +22,7 @@ run_replication_test run_doc_test run_s2i_test run_ssl_test +run_upgrade_test " if [ -e "${IMAGE_NAME:-}" ] ; then @@ -39,9 +40,9 @@ function cleanup() { ct_cleanup if [ $TESTSUITE_RESULT -eq 0 ] ; then - echo "Tests succeeded." + echo "Tests for ${IMAGE_NAME} succeeded." else - echo "Tests failed." + echo "Tests for ${IMAGE_NAME} failed." fi } trap cleanup EXIT SIGINT @@ -50,7 +51,7 @@ function mysql_cmd() { local container_ip="$1"; shift local login="$1"; shift local password="$1"; shift - docker run --rm "$IMAGE_NAME" mysql --host "$container_ip" -u"$login" -p"$password" "$@" db + docker run --rm ${CONTAINER_EXTRA_ARGS:-} "$IMAGE_NAME" mysql --host "$container_ip" -u"$login" -p"$password" "$@" db } function test_connection() { @@ -63,15 +64,30 @@ function test_connection() { local max_attempts=20 local sleep_time=2 local i + local status='' + echo -n " Trying to connect..." for i in $(seq $max_attempts); do - echo " Trying to connect..." - if mysql_cmd "$ip" "$login" "$password" <<< 'SELECT 1;'; then + local status=$(docker inspect -f '{{.State.Status}}' $(ct_get_cid "${name}")) + if [ "${status}" != 'running' ] ; then + break; + fi + echo -n "." + if mysql_cmd "$ip" "$login" "$password" &>/dev/null <<< 'SELECT 1;'; then + echo " OK" echo " Success!" return 0 fi sleep $sleep_time done - echo " Giving up: Failed to connect. Logs:" + echo " FAIL" + echo " Giving up: Failed to connect." + if [ "${status}" == 'running' ] ; then + echo " Container is still running." + else + local exit_status=$(docker inspect -f '{{.State.ExitCode}}' ${name}) + echo " Container finised with exit code ${exit_status}." + fi + echo "Logs:" docker logs $(ct_get_cid $name) return 1 } @@ -97,7 +113,7 @@ function create_container() { # create container with a cidfile in a directory for cleanup local container_id container_id="$(docker run ${DOCKER_ARGS:-} --cidfile $cidfile -d "$@" $IMAGE_NAME ${CONTAINER_ARGS:-})" - echo "Created container $container_id" + echo " Created container $container_id" } function run_change_password_test() { @@ -223,6 +239,7 @@ function assert_container_creation_fails() { if [ $ret -gt 30 ]; then return 1 fi + echo " Success!" } function try_image_invalid_combinations() { @@ -371,23 +388,8 @@ function run_tests() { } run_doc_test() { - local tmpdir=$(mktemp -d) - local f echo " Testing documentation in the container image" - # Extract the help.1 file from the container - docker run --rm ${IMAGE_NAME} /bin/bash -c "cat /help.1" >${tmpdir}/help.1 - # Check whether the help.1 file includes some important information - for term in "MYSQL\_ROOT\_PASSWORD" volume 3306 ; do - if ! cat ${tmpdir}/help.1 | grep -F -q -e "${term}" ; then - echo "ERROR: File /help.1 does not include '${term}'." - return 1 - fi - done - # Check whether the file uses the correct format - if ! file ${tmpdir}/help.1 | grep -q roff ; then - echo "ERROR: /help.1 is not in troff or groff format" - return 1 - fi + ct_doc_content_old "MYSQL\_ROOT\_PASSWORD" volume 3306 echo " Success!" echo } @@ -395,9 +397,9 @@ run_doc_test() { _s2i_test_image() { local container_name="$1" local mount_opts="$2" - echo " Testing s2i app image with invalid configuration" + echo " Testing s2i app image with invalid configuration" assert_container_creation_fails -e MYSQL_USER=root -e MYSQL_PASSWORD=pass -e MYSQL_DATABASE=db -e MYSQL_ROOT_PASSWORD=pass - echo " Testing s2i app image with correct configuration" + echo " Testing s2i app image with correct configuration" create_container \ "$container_name" \ --env MYSQL_USER=config_test_user \ @@ -416,10 +418,10 @@ _s2i_test_image() { run_s2i_test() { echo " Testing s2i usage" - s2i usage ${s2i_args} ${IMAGE_NAME} &>/dev/null + ct_s2i_usage ${IMAGE_NAME} ${s2i_args} &>/dev/null echo " Testing s2i build" - s2i build file://${test_dir}/test-app ${IMAGE_NAME} ${IMAGE_NAME}-testapp + ct_s2i_build_as_df file://${test_dir}/test-app ${IMAGE_NAME} ${IMAGE_NAME}-testapp local image_name_backup=${IMAGE_NAME} export IMAGE_NAME=${IMAGE_NAME}-testapp @@ -456,6 +458,7 @@ ssl-key=\${APP_DATA}/mysql-certs/server-key.pem ssl-cert=\${APP_DATA}/mysql-certs/server-cert-selfsigned.pem " >${test_app_dir}/mysql-cfg/ssl.cnf chown -R 27:27 ${test_app_dir} + local ca_cert_path="/opt/app-root/src/mysql-certs/server-cert-selfsigned.pem" create_container \ "_s2i_test_ssl" \ @@ -466,14 +469,25 @@ ssl-cert=\${APP_DATA}/mysql-certs/server-cert-selfsigned.pem test_connection "_s2i_test_ssl" ssl_test_user ssl_test ip=$(ct_get_cip _s2i_test_ssl) - if mysql_cmd "$ip" "ssl_test_user" "ssl_test" --ssl -e 'show status like "Ssl_cipher" \G' | grep 'Value: [A-Z][A-Z0-9-]*' ; then + + # At least MySQL 5.6 requires ssl-ca option on client side, otherwise the ssl is not used + CONTAINER_EXTRA_ARGS="-v ${test_app_dir}:/opt/app-root/src/:z" + + # MySQL requires --ssl-mode to be set in order to require SSL + case ${VERSION} in + 5*) ssl_mode_opt='--ssl-mode=REQUIRED' + esac + + if mysql_cmd "$ip" "ssl_test_user" "ssl_test" ${ssl_mode_opt:-} --ssl-ca=${ca_cert_path} -e 'show status like "Ssl_cipher" \G' | grep 'Value: [A-Z][A-Z0-9-]*' ; then echo " Success!" rm -rf ${test_app_dir} else echo " FAIL!" - mysql_cmd "$ip" "ssl_test_user" "ssl_test" --ssl -e 'status \G' + mysql_cmd "$ip" "ssl_test_user" "ssl_test" --ssl-ca=${ca_cert_path} -e 'show status like "%ssl%" \G' return 1 fi + # Clear the global variable content after we are done using it + CONTAINER_EXTRA_ARGS="" } function run_general_tests() { @@ -484,13 +498,128 @@ function run_general_tests() { USER=user PASS=pass run_tests no_root USER=user1 PASS=pass1 ROOT_PASS=r00t run_tests root # Test with arbitrary uid for the container - DOCKER_ARGS="-u 12345" USER=user PASS=pass run_tests no_root_altuid - DOCKER_ARGS="-u 12345" USER=user1 PASS=pass1 ROOT_PASS=r00t run_tests root_altuid + DOCKER_ARGS="--user 12345" USER=user PASS=pass run_tests no_root_altuid + DOCKER_ARGS="--user 12345" USER=user1 PASS=pass1 ROOT_PASS=r00t run_tests root_altuid +} + +function get_previous_major_version() { + case "${1}" in + 5.5) echo "5.1" ;; + 5.6) echo "5.5" ;; + 5.7) echo "5.6" ;; + 8.0) echo "5.7" ;; + 10.0) echo "5.5" ;; + 10.1) echo "10.0" ;; + 10.2) echo "10.1" ;; + 10.3) echo "10.2" ;; + *) echo "Non expected version '${1}'" ; return 1 ;; + esac +} + +function run_upgrade_test() { + local tmpdir=$(mktemp -d) + echo " Testing upgrade of the container image" + mkdir "${tmpdir}/data" && chmod -R a+rwx "${tmpdir}" + + # Create MySQL container with persistent volume and set the version from too old version + local datadir=${tmpdir}/data + create_container "testupg1" -e MYSQL_USER=user -e MYSQL_PASSWORD=foo \ + -e MYSQL_DATABASE=db -v ${datadir}:/var/lib/mysql/data:Z + test_connection testupg1 user foo + docker stop $(ct_get_cid testupg1) >/dev/null + + # Simulate datadir without version information + rm -f ${datadir}/mysql_upgrade_info + echo " Testing upgrade from data without version" + # This should work, but warning should be printed + create_container "testupg2" -e MYSQL_USER=user -e MYSQL_PASSWORD=foo \ + -e MYSQL_DATABASE=db -v ${datadir}:/var/lib/mysql/data:Z + test_connection testupg2 user foo + docker stop $(ct_get_cid testupg2) >/dev/null + # Check whether some information is provided + if ! docker logs $(ct_get_cid testupg2) 2>&1 | grep -e 'Version of the data could not be determined' &>/dev/null ; then + echo "Information about missing version file is not available in the logs" + return 1 + fi + # Check whether upgrade did not happen + if docker logs $(ct_get_cid testupg2) 2>&1 | grep -e 'Running mysql_upgrade' &>/dev/null ; then + echo "Upgrade should not be run when information about version is missing" + return 1 + fi + + # Create version file that is too old + echo " Testing upgrade from too old data" + echo "5.0.12" >${datadir}/mysql_upgrade_info + # Create another container with same data and upgrade set to 'upgrade-auto' + assert_container_creation_fails -e MYSQL_USER=user -e MYSQL_PASSWORD=foo \ + -e MYSQL_DATABASE=db -v ${datadir}:/var/lib/mysql/data:Z -e MYSQL_DATADIR_ACTION=upgrade-auto 2>/dev/null + + # Create version file that we can upgrade from + echo " Testing upgrade from previous version" + echo "$(get_previous_major_version ${VERSION}).12" >${datadir}/mysql_upgrade_info + # Create another container with same data and upgrade set to 'upgrade-aauto' + create_container "testupg3" -e MYSQL_USER=user -e MYSQL_PASSWORD=foo \ + -e MYSQL_DATABASE=db -v ${datadir}:/var/lib/mysql/data:Z -e MYSQL_DATADIR_ACTION=upgrade-auto + test_connection testupg3 user foo + docker stop $(ct_get_cid testupg3) >/dev/null + # Check whether some upgrade happened + if ! docker logs $(ct_get_cid testupg3) 2>&1 | grep -qe 'Running mysql_upgrade' ; then + echo "Upgrade did not happen but it should when upgrading from previous version" + docker logs $(ct_get_cid testupg3) + return 1 + fi + + # Create version file that we don't need to upgrade from + echo " Testing upgrade from the same version" + echo "${VERSION}.12" >${datadir}/mysql_upgrade_info + # Create another container with same data and upgrade set to 'upgrade-aauto' + create_container "testupg4" -e MYSQL_USER=user -e MYSQL_PASSWORD=foo \ + -e MYSQL_DATABASE=db -v ${datadir}:/var/lib/mysql/data:Z -e MYSQL_DATADIR_ACTION=upgrade-auto + test_connection testupg4 user foo + docker stop $(ct_get_cid testupg4) >/dev/null + # Check whether some upgrade happened + if docker logs $(ct_get_cid testupg4) 2>&1 | grep -e 'Running mysql_upgrade' &>/dev/null ; then + echo "Upgrade happened but it should not when upgrading from current version" + return 1 + fi + + # Create second container with same data and upgrade set to 'analyze' + echo " Testing running --analyze" + create_container "testupg5" -e MYSQL_USER=user -e MYSQL_PASSWORD=foo \ + -e MYSQL_DATABASE=db -v ${datadir}:/var/lib/mysql/data:Z -e MYSQL_DATADIR_ACTION=analyze + test_connection testupg5 user foo + docker stop $(ct_get_cid testupg5) >/dev/null + # Check whether analyze happened + if ! docker logs $(ct_get_cid testupg5) 2>&1 | grep -e '--analyze --all-databases' &>/dev/null ; then + echo "Analyze did not happen but it should" + return 1 + fi + + # Create another container with same data and upgrade set to 'optimize' + echo " Testing running --optimize" + create_container "testupg6" -e MYSQL_USER=user -e MYSQL_PASSWORD=foo \ + -e MYSQL_DATABASE=db -v ${datadir}:/var/lib/mysql/data:Z -e MYSQL_DATADIR_ACTION=optimize + test_connection testupg6 user foo + docker stop $(ct_get_cid testupg6) >/dev/null + # Check whether optimize happened + if ! docker logs $(ct_get_cid testupg6) 2>&1 | grep -e '--optimize --all-databases' &>/dev/null ; then + echo "Optimize did not happen but it should" + return 1 + fi + + # Create version file that we cannot upgrade from + echo " Testing upgrade from the future version" + echo "20.1.12" >${datadir}/mysql_upgrade_info + assert_container_creation_fails -e MYSQL_USER=user -e MYSQL_PASSWORD=foo \ + -e MYSQL_DATABASE=db -v ${datadir}:/var/lib/mysql/data:Z -e MYSQL_DATADIR_ACTION=upgrade-auto 2>/dev/null + + echo " Upgrade tests succeeded!" + echo } function run_all_tests() { for test_case in $TEST_LIST; do - : "Running test $test_case" + echo "Running test $test_case for ${IMAGE_NAME}" $test_case done; } diff --git a/test/test-lib-openshift.sh b/test/test-lib-openshift.sh index 53bb9d3..f988ac5 100644 --- a/test/test-lib-openshift.sh +++ b/test/test-lib-openshift.sh @@ -223,7 +223,7 @@ function _ct_os_get_uniq_project_name() { local r while true ; do r=${RANDOM} - mkdir /var/tmp/os-test-${r} &>/dev/null && echo test-${r} && break + mkdir /var/tmp/sclorg-test-${r} &>/dev/null && echo sclorg-test-${r} && break done } @@ -234,6 +234,10 @@ function _ct_os_get_uniq_project_name() { # Expects 'os' command that is properly logged in to the OpenShift cluster. # Not using mktemp, because we cannot use uppercase characters. function ct_os_new_project() { + if [ "${CT_SKIP_NEW_PROJECT:-false}" == 'true' ] ; then + echo "Creating project skipped." + return + fi local project_name="${1:-$(_ct_os_get_uniq_project_name)}" ; shift || : oc new-project ${project_name} # let openshift cluster to sync to avoid some race condition errors @@ -245,17 +249,38 @@ function ct_os_new_project() { # Deletes the specified project in the openshfit # Arguments: project - project name, uses the current project if omitted function ct_os_delete_project() { + if [ "${CT_SKIP_NEW_PROJECT:-false}" == 'true' ] ; then + echo "Deleting project skipped, cleaning objects only." + ct_delete_all_objects + return + fi local project_name="${1:-$(oc project -q)}" ; shift || : oc delete project "${project_name}" } +# ct_delete_all_objects +# ----------------- +# Deletes all objects within the project. +# Handy when we have one project and want to run more tests. +function ct_delete_all_objects() { + for x in bc builds dc is isimage istag po pv pvc rc routes secrets svc ; do + oc delete $x --all + done + # for some objects it takes longer to be really deleted, so a dummy sleep + # to avoid some races when other test can see not-yet-deleted objects and can fail + sleep 10 +} + # ct_os_docker_login # -------------------- # Logs in into docker daemon +# Uses global REGISRTY_ADDRESS environment variable for arbitrary registry address. +# Does not do anything if REGISTRY_ADDRESS is set. function ct_os_docker_login() { + [ -n "${REGISTRY_ADDRESS:-}" ] && "REGISTRY_ADDRESS set, not trying to docker login." && return 0 # docker login fails with "404 page not found" error sometimes, just try it more times for i in `seq 12` ; do - docker login -u developer -p $(oc whoami -t) 172.30.1.1:5000 && return 0 || : + docker login -u developer -p $(oc whoami -t) ${REGISRTY_ADDRESS:-172.30.1.1:5000} && return 0 || : sleep 5 done return 1 @@ -267,11 +292,12 @@ function ct_os_docker_login() { # Arguments: image - image name to upload # Arguments: imagestream - name and tag to use for the internal registry. # In the format of name:tag ($image_name:latest by default) +# Uses global REGISRTY_ADDRESS environment variable for arbitrary registry address. function ct_os_upload_image() { local input_name="${1}" ; shift local image_name=${input_name##*/} local imagestream=${1:-$image_name:latest} - local output_name="172.30.1.1:5000/$(oc project -q)/$imagestream" + local output_name="${REGISRTY_ADDRESS:-172.30.1.1:5000}/$(oc project -q)/$imagestream" ct_os_docker_login docker tag ${input_name} ${output_name} @@ -295,10 +321,12 @@ function ct_os_install_in_centos() { # use "true" for allow remote access to the web-UI, # "false" is default # Arguments: cluster_version - version of the OpenShift cluster to use, empty -# means default version of `oc`; example value: v3.7.0; +# means default version of `oc`; example value: 3.7; # also can be specified outside by OC_CLUSTER_VERSION function ct_os_cluster_up() { ct_os_cluster_running && echo "Cluster already running. Nothing is done." && return 0 + ct_os_logged_in && echo "Already logged in to a cluster. Nothing is done." && return 0 + mkdir -p /var/tmp/openshift local dir="${1:-$(mktemp -d /var/tmp/openshift/os-data-XXXXXX)}" ; shift || : local is_public="${1:-'false'}" ; shift || : @@ -308,7 +336,7 @@ function ct_os_cluster_up() { sed -i "s|OPTIONS='|OPTIONS='--insecure-registry 172.30.0.0/16 |" /etc/sysconfig/docker fi - systemctl stop firewalld + systemctl stop firewalld || : setenforce 0 iptables -F @@ -316,10 +344,25 @@ function ct_os_cluster_up() { local cluster_ip="127.0.0.1" [ "${is_public}" == "true" ] && cluster_ip=$(ct_get_public_ip) + if [ -n "${cluster_version}" ] ; then + # if $cluster_version is not set, we simply use oc that is available + ct_os_set_path_oc "${cluster_version}" + fi + mkdir -p ${dir}/{config,data,pv} - oc cluster up --host-data-dir=${dir}/data --host-config-dir=${dir}/config \ - --host-pv-dir=${dir}/pv --use-existing-config --public-hostname=${cluster_ip} \ - ${cluster_version:+--version=$cluster_version } + case $(oc version| head -n 1) in + "oc v3.1"?.*) + oc cluster up --base-dir="${dir}/data" --public-hostname="${cluster_ip}" + ;; + "oc v3."*) + oc cluster up --host-data-dir="${dir}/data" --host-config-dir="${dir}/config" \ + --host-pv-dir="${dir}/pv" --use-existing-config --public-hostname="${cluster_ip}" + ;; + *) + echo "ERROR: Unexpected oc version." >&2 + return 1 + ;; + esac oc version oc login -u system:admin oc project default @@ -344,6 +387,96 @@ function ct_os_cluster_running() { oc cluster status &>/dev/null } +# ct_os_logged_in +# --------------- +# Returns 0 if logged in to a cluster (remote or local) +function ct_os_logged_in() { + oc whoami >/dev/null +} + +# ct_os_set_path_oc OC_VERSION +# -------------------- +# This is a trick that helps using correct version of the `oc`: +# The input is version of the openshift in format v3.6.0 etc. +# If the currently available version of oc is not of this version, +# it first takes a look into /usr/local/oc-/bin directory, +# and if not found there it downloads the community release from github. +# In the end the PATH variable is changed, so the other tests can still use just 'oc'. +# Arguments: oc_version - X.Y part of the version of OSE (e.g. 3.9) +function ct_os_set_path_oc() { + local oc_version=$(ct_os_get_latest_ver $1) + local oc_path + + if oc version | grep -q "oc ${oc_version%.*}." ; then + echo "Binary oc found already available in version ${oc_version}: `which oc` Doing noting." + return 0 + fi + + # first check whether we already have oc available in /usr/local + local installed_oc_path="/usr/local/oc-${oc_version%.*}/bin" + + if [ -x "${installed_oc_path}/oc" ] ; then + oc_path="${installed_oc_path}" + echo "Binary oc found in ${installed_oc_path}" >&2 + else + # oc not available in /usr/local, try to download it from github (community release) + oc_path="/tmp/oc-${oc_version}-bin" + ct_os_download_upstream_oc "${oc_version}" "${oc_path}" + fi + if [ -z "${oc_path}/oc" ] ; then + echo "ERROR: oc not found installed, nor downloaded" >&1 + return 1 + fi + export PATH="${oc_path}:${PATH}" + if ! oc version | grep -q "oc ${oc_version%.*}." ; then + echo "ERROR: something went wrong, oc located at ${oc_path}, but oc of version ${oc_version} not found in PATH ($PATH)" >&1 + return 1 + else + echo "PATH set correctly, binary oc found in version ${oc_version}: `which oc`" + fi +} + +# ct_os_get_latest_ver VERSION_PART_X +# -------------------- +# Returns full version (vX.Y.Z) from part of the version (X.Y) +# Arguments: vxy - X.Y part of the version +# Returns vX.Y.Z variant of the version +function ct_os_get_latest_ver(){ + local vxy="v$1" + for vz in {3..0} ; do + curl -sif "https://github.com/openshift/origin/releases/tag/${vxy}.${vz}" >/dev/null && echo "${vxy}.${vz}" && return 0 + done + echo "ERROR: version ${vxy} not found in https://github.com/openshift/origin/tags" >&2 + return 1 +} + +# ct_os_download_upstream_oc OC_VERSION OUTPUT_DIR +# -------------------- +# Downloads a particular version of openshift-origin-client-tools from +# github into specified output directory +# Arguments: oc_version - version of OSE (e.g. v3.7.2) +# Arguments: output_dir - output directory +function ct_os_download_upstream_oc() { + local oc_version=$1 + local output_dir=$2 + + # check whether we already have the binary in place + [ -x "${output_dir}/oc" ] && return 0 + + mkdir -p "${output_dir}" + # using html output instead of https://api.github.com/repos/openshift/origin/releases/tags/${oc_version}, + # because API is limited for number of queries if not authenticated + tarball=$(curl -si "https://github.com/openshift/origin/releases/tag/${oc_version}" | grep -o -e "openshift-origin-client-tools-${oc_version}-[a-f0-9]*-linux-64bit.tar.gz" | head -n 1) + + # download, unpack the binaries and then put them into output directory + echo "Downloading https://github.com/openshift/origin/releases/download/${oc_version}/${tarball} into ${output_dir}/" >&2 + curl -sL https://github.com/openshift/origin/releases/download/${oc_version}/"${tarball}" | tar -C "${output_dir}" -xz + mv -f "${output_dir}"/"${tarball%.tar.gz}"/* "${output_dir}/" + + rmdir "${output_dir}"/"${tarball%.tar.gz}" +} + + # ct_os_test_s2i_app_func IMAGE APP CONTEXT_DIR CHECK_CMD [OC_ARGS] # -------------------- # Runs [image] and [app] in the openshift and optionally specifies env_params @@ -362,9 +495,10 @@ function ct_os_test_s2i_app_func() { local context_dir=${3} local check_command=${4} local oc_args=${5:-} + local import_image=${6:-} local image_name_no_namespace=${image_name##*/} local service_name="${image_name_no_namespace}-testing" - local image_tagged="${image_name_no_namespace}:testing" + local image_tagged="${image_name_no_namespace}:${VERSION}" if [ $# -lt 4 ] || [ -z "${1}" -o -z "${2}" -o -z "${3}" -o -z "${4}" ]; then echo "ERROR: ct_os_test_s2i_app_func() requires at least 4 arguments that cannot be emtpy." >&2 @@ -373,7 +507,19 @@ function ct_os_test_s2i_app_func() { ct_os_new_project # Create a specific imagestream tag for the image so that oc cannot use anything else - ct_os_upload_image "${image_name}" "${image_tagged}" + if [ "${CT_SKIP_UPLOAD_IMAGE:-false}" == 'true' ] ; then + if [ -n "${import_image}" ] ; then + echo "Importing image ${import_image} as ${image_name}:${VERSION}" + oc import-image ${image_name}:${VERSION} --from ${import_image} --confirm + else + echo "Uploading and importing image skipped." + fi + else + if [ -n "${import_image}" ] ; then + echo "Warning: Import image ${import_image} requested, but uploading image ${image_name} instead." + fi + ct_os_upload_image "${image_name}" "${image_tagged}" + fi local app_param="${app}" if [ -d "${app}" ] ; then @@ -435,6 +581,7 @@ function ct_os_test_s2i_app() { local protocol=${6:-http} local response_code=${7:-200} local oc_args=${8:-} + local import_image=${9:-} if [ $# -lt 4 ] || [ -z "${1}" -o -z "${2}" -o -z "${3}" -o -z "${4}" ]; then echo "ERROR: ct_os_test_s2i_app() requires at least 4 arguments that cannot be emtpy." >&2 @@ -444,8 +591,8 @@ function ct_os_test_s2i_app() { ct_os_test_s2i_app_func "${image_name}" \ "${app}" \ "${context_dir}" \ - "ct_test_response '${protocol}://:${port}' '${response_code}' '${expected_output}'" \ - "${oc_args}" + "ct_os_test_response_internal '${protocol}://:${port}' '${response_code}' '${expected_output}'" \ + "${oc_args}" "${import_image}" } # ct_os_test_template_app_func IMAGE APP IMAGE_IN_TEMPLATE CHECK_CMD [OC_ARGS] @@ -471,6 +618,7 @@ function ct_os_test_template_app_func() { local check_command=${4} local oc_args=${5:-} local other_images=${6:-} + local import_image=${7:-} if [ $# -lt 4 ] || [ -z "${1}" -o -z "${2}" -o -z "${3}" -o -z "${4}" ]; then echo "ERROR: ct_os_test_template_app_func() requires at least 4 arguments that cannot be emtpy." >&2 @@ -481,27 +629,39 @@ function ct_os_test_template_app_func() { local image_tagged="${name_in_template}:${VERSION}" ct_os_new_project + # Create a specific imagestream tag for the image so that oc cannot use anything else - ct_os_upload_image "${image_name}" "${image_tagged}" - - # upload also other images, that template might need (list of pairs in the format | - local images_tags_a - local i_t - for i_t in ${other_images} ; do - echo "${i_t}" - IFS='|' read -ra image_tag_a <<< "${i_t}" - docker pull "${image_tag_a[0]}" - ct_os_upload_image "${image_tag_a[0]}" "${image_tag_a[1]}" - done + if [ "${CT_SKIP_UPLOAD_IMAGE:-false}" == 'true' ] ; then + if [ -n "${import_image}" ] ; then + echo "Importing image ${import_image} as ${image_name}:${VERSION}" + oc import-image ${image_name}:${VERSION} --from ${import_image} --confirm + else + echo "Uploading and importing image skipped." + fi + else + if [ -n "${import_image}" ] ; then + echo "Warning: Import image ${import_image} requested, but uploading image ${image_name} instead." + fi + ct_os_upload_image "${image_name}" "${image_tagged}" + + # upload also other images, that template might need (list of pairs in the format | + local images_tags_a + local i_t + for i_t in ${other_images} ; do + echo "${i_t}" + IFS='|' read -ra image_tag_a <<< "${i_t}" + docker pull "${image_tag_a[0]}" + ct_os_upload_image "${image_tag_a[0]}" "${image_tag_a[1]}" + done + fi local local_template=$(ct_obtain_input "${template}") + local namespace=${CT_NAMESPACE:-$(oc project -q)} oc new-app ${local_template} \ - -p NAME="${service_name}" \ - -p NAMESPACE="$(oc project -q)" \ + --name "${name_in_template}" \ + -p NAMESPACE="${namespace}" \ ${oc_args} - oc start-build "${service_name}" - ct_os_wait_pod_ready "${service_name}" 300 local ip=$(ct_os_get_service_ip "${service_name}") @@ -549,6 +709,7 @@ function ct_os_test_template_app() { local response_code=${7:-200} local oc_args=${8:-} local other_images=${9:-} + local import_image=${10:-} if [ $# -lt 4 ] || [ -z "${1}" -o -z "${2}" -o -z "${3}" -o -z "${4}" ]; then echo "ERROR: ct_os_test_template_app() requires at least 4 arguments that cannot be emtpy." >&2 @@ -558,27 +719,28 @@ function ct_os_test_template_app() { ct_os_test_template_app_func "${image_name}" \ "${template}" \ "${name_in_template}" \ - "ct_test_response '${protocol}://:${port}' '${response_code}' '${expected_output}'" \ + "ct_os_test_response_internal '${protocol}://:${port}' '${response_code}' '${expected_output}'" \ "${oc_args}" \ - "${other_images}" + "${other_images}" \ + "${import_image}" } -# ct_os_test_image_update IMAGE IS CHECK_CMD OC_ARGS +# ct_os_test_image_update IMAGE_NAME OLD_IMAGE ISTAG CHECK_FUNCTION OC_ARGS # -------------------- # Runs an image update test with [image] uploaded to [is] imagestream -# and checks the services using an arbitrary function provided in [check_cmd]. -# Arguments: image - prefix or whole ID of the pod to run the cmd in (compulsory) -# Arguments: is - imagestream to upload the images into (compulsory) -# Arguments: check_cmd - command to be run to check functionality of created services (compulsory) +# and checks the services using an arbitrary function provided in [check_function]. +# Arguments: image_name - prefix or whole ID of the pod to run the cmd in (compulsory) +# Arguments: old_image - valid name of the image from the registry +# Arguments: istag - imagestream to upload the images into (compulsory) +# Arguments: check_function - command to be run to check functionality of created services (compulsory) # Arguments: oc_args - arguments to use during oc new-app (compulsory) ct_os_test_image_update() { local image_name=$1; shift + local old_image=$1; shift local istag=$1; shift local check_function=$1; shift local service_name=${image_name##*/} - local old_image="" ip="" check_command_exp="" registry="" - registry=$(ct_registry_from_os "$OS") - old_image="$registry/$image_name" + local ip="" check_command_exp="" echo "Running image update test for: $image_name" ct_os_new_project @@ -607,3 +769,157 @@ ct_os_test_image_update() { ct_os_delete_project } + +# ct_os_deploy_cmd_image IMAGE_NAME +# -------------------- +# Runs a special command pod, a pod that does nothing, but includes utilities for testing. +# A typical usage is a mysql pod that includes mysql commandline, that we need for testing. +# Running commands inside this command pod is done via ct_os_cmd_image_run function. +# The pod is not run again if already running. +# Arguments: image_name - image to be used as a command pod +function ct_os_deploy_cmd_image() { + local image_name=${1} + oc get pod command-app &>/dev/null && echo "command POD already running" && return 0 + echo "command POD not running yet, will start one called command-app" + oc create -f - <" + local sleep_time=3 + local attempt=1 + local result=1 + local status + local response_code + local response_file=$(mktemp /tmp/ct_test_response_XXXXXX) + local util_image_name='python:3.6' + + ct_os_deploy_cmd_image "${util_image_name}" + + while [ ${attempt} -le ${max_attempts} ]; do + ct_os_cmd_image_run "curl --connect-timeout 10 -s -w '%{http_code}' '${url}'" >${response_file} && status=0 || status=1 + if [ ${status} -eq 0 ]; then + response_code=$(cat ${response_file} | tail -c 3) + if [ "${response_code}" -eq "${expected_code}" ]; then + result=0 + fi + cat ${response_file} | grep -qP -e "${body_regexp}" || result=1; + # Some services return 40x code until they are ready, so let's give them + # some chance and not end with failure right away + # Do not wait if we already have expected outcome though + if [ ${result} -eq 0 -o ${attempt} -gt ${ignore_error_attempts} -o ${attempt} -eq ${max_attempts} ] ; then + break + fi + fi + attempt=$(( ${attempt} + 1 )) + sleep ${sleep_time} + done + rm -f ${response_file} + return ${result} +} + +# ct_os_get_image_from_pod +# ------------------------ +# Print image identifier from an existing pod to stdout +# Argument: pod_prefix - prefix or full name of the pod to get image from +ct_os_get_image_from_pod() { + local pod_prefix=$1 ; shift + local pod_name=$(ct_os_get_pod_name $pod_prefix) + oc get "po/${pod_name}" -o yaml | sed -ne 's/^\s*image:\s*\(.*\)\s*$/\1/ p' | head -1 +} + +# ct_os_check_cmd_internal +# ---------------- +# Runs a specified command, checks exit code and compares the output with expected regexp. +# That all is done inside an image in the cluster, so the function is used +# typically in clusters that are not accessible outside. +# The check is repeated until timeout. +# Argument: util_image_name - name of the image in the cluster that is used for running the cmd +# Argument: service_name - kubernetes' service name to work with (IP address is taken from this one) +# Argument: check_command - command that is run within the util_image_name container +# Argument: expected_content_match - regexp that must be in the output (use .* to ignore check) +# Argument: timeout - number of seconds to wait till the check succeeds +function ct_os_check_cmd_internal() { + local util_image_name=$1 ; shift + local service_name=$1 ; shift + local check_command=$1 ; shift + local expected_content_match=${1:-.*} ; shift + local timeout=${1:-60} ; shift || : + + : " Service ${service_name} check ..." + + local output + local ret + local ip=$(ct_os_get_service_ip "${service_name}") + local check_command_exp=$(echo "$check_command" | sed -e "s//$ip/g") + + ct_os_deploy_cmd_image $(ct_os_get_image_from_pod "${util_image_name}" | head -n 1) + SECONDS=0 + + echo -n "Waiting for ${service_name} service becoming ready ..." + while true ; do + output=$(ct_os_cmd_image_run "$check_command_exp") + ret=$? + echo "${output}" | grep -qe "${expected_content_match}" || ret=1 + if [ ${ret} -eq 0 ] ; then + echo " PASS" + return 0 + fi + echo -n "." + [ ${SECONDS} -gt ${timeout} ] && break + sleep 3 + done + echo " FAIL" + return 1 +} + diff --git a/test/test-lib.sh b/test/test-lib.sh index dfc63d9..e372870 100644 --- a/test/test-lib.sh +++ b/test/test-lib.sh @@ -208,6 +208,29 @@ function ct_doc_content_old() { } +# ct_npm_works +# -------------------- +# Checks existance of the npm tool and runs it. +function ct_npm_works() { + local tmpdir=$(mktemp -d) + : " Testing npm in the container image" + docker run --rm ${IMAGE_NAME} /bin/bash -c "npm --version" >${tmpdir}/version + + if [ $? -ne 0 ] ; then + echo "ERROR: 'npm --version' does not work inside the image ${IMAGE_NAME}." >&2 + return 1 + fi + + docker run --rm ${IMAGE_NAME} /bin/bash -c "npm install jquery && test -f node_modules/jquery/src/jquery.js" + if [ $? -ne 0 ] ; then + echo "ERROR: npm could not install jquery inside the image ${IMAGE_NAME}." >&2 + return 1 + fi + + : " Success!" +} + + # ct_path_append PATH_VARNAME DIRECTORY # ------------------------------------- # Append DIRECTORY to VARIABLE of name PATH_VARNAME, the VARIABLE must consist @@ -284,7 +307,7 @@ function ct_obtain_input() { local output=$(mktemp "/var/tmp/test-input-XXXXXX$extension") if [ -f "${input}" ] ; then - cp "${input}" "${output}" + cp -f "${input}" "${output}" elif [ -d "${input}" ] ; then rm -f "${output}" cp -r -LH "${input}" "${output}" @@ -400,3 +423,85 @@ ct_random_string() | fold -w "${1-10}" \ | head -n 1 ) + +# ct_s2i_usage IMG_NAME [S2I_ARGS] +# ---------------------------- +# Create a container and run the usage script inside +# Argument: IMG_NAME - name of the image to be used for the container run +# Argument: S2I_ARGS - Additional list of source-to-image arguments, currently unused. +ct_s2i_usage() +{ + local img_name=$1; shift + local s2i_args="$*"; + local usage_command="/usr/libexec/s2i/usage" + docker run --rm "$img_name" bash -c "$usage_command" +} + +# ct_s2i_build_as_df APP_PATH SRC_IMAGE DST_IMAGE [S2I_ARGS] +# ---------------------------- +# Create a new s2i app image from local sources in a similar way as source-to-image would have used. +# Argument: APP_PATH - local path to the app sources to be used in the test +# Argument: SRC_IMAGE - image to be used as a base for the s2i build +# Argument: DST_IMAGE - image name to be used during the tagging of the s2i build result +# Argument: S2I_ARGS - Additional list of source-to-image arguments. +# Only used to check for pull-policy=never and environment variable definitions. +ct_s2i_build_as_df() +{ + local app_path=$1; shift + local src_image=$1; shift + local dst_image=$1; shift + local s2i_args="$*"; + local local_app=upload/src/ + local local_scripts=upload/scripts/ + local user_id= + local df_name= + local tmpdir= + # Use /tmp to not pollute cwd + tmpdir=$(mktemp -d) + df_name=$(mktemp -p "$tmpdir" Dockerfile.XXXX) + pushd "$tmpdir" + # Check if the image is available locally and try to pull it if it is not + docker images "$src_image" &>/dev/null || echo "$s2i_args" | grep -q "pull-policy=never" || docker pull "$src_image" + user_id=$(docker inspect -f "{{.ContainerConfig.User}}" "$src_image") + # Strip file:// from APP_PATH and copy its contents into current context + mkdir -p "$local_app" + cp -r "${app_path/file:\/\//}/." "$local_app" + [ -d "$local_app/.s2i/bin/" ] && mv "$local_app/.s2i/bin" "$local_scripts" + # Create a Dockerfile named df_name and fill it with proper content + #FIXME: Some commands could be combined into a single layer but not sure if worth the trouble for testing purposes + cat <"$df_name" +FROM $src_image +LABEL "io.openshift.s2i.build.image"="$src_image" \\ + "io.openshift.s2i.build.source-location"="$app_path" +USER root +COPY $local_app /tmp/src +EOF + [ -d "$local_scripts" ] && echo "COPY $local_scripts /tmp/scripts" >> "$df_name" && + echo "RUN chown -R $user_id:0 /tmp/scripts" >>"$df_name" + echo "RUN chown -R $user_id:0 /tmp/src" >>"$df_name" + # Check for custom environment variables inside .s2i/ folder + if [ -e "$local_app/.s2i/environment" ]; then + # Remove any comments and add the contents as ENV commands to the Dockerfile + sed '/^\s*#.*$/d' "$local_app/.s2i/environment" | while read -r line; do + echo "ENV $line" >>"$df_name" + done + fi + # Filter out env var definitions from $s2i_args and create Dockerfile ENV commands out of them + echo "$s2i_args" | grep -o -e '\(-e\|--env\)[[:space:]=]\S*=\S*' | sed -e 's/-e /ENV /' -e 's/--env[ =]/ENV /' >>"$df_name" + echo "USER $user_id" >>"$df_name" + # If exists, run the custom assemble script, else default to /usr/libexec/s2i/assemble + if [ -x "$local_scripts/assemble" ]; then + echo "RUN /tmp/scripts/assemble" >>"$df_name" + else + echo "RUN /usr/libexec/s2i/assemble" >>"$df_name" + fi + # If exists, set the custom run script as CMD, else default to /usr/libexec/s2i/run + if [ -x "$local_scripts/run" ]; then + echo "CMD /tmp/scripts/run" >>"$df_name" + else + echo "CMD /usr/libexec/s2i/run" >>"$df_name" + fi + # Run the build and tag the result + docker build -f "$df_name" -t "$dst_image" . + popd +}