diff --git a/3.11 b/3.11 new file mode 120000 index 0000000..945c9b4 --- /dev/null +++ b/3.11 @@ -0,0 +1 @@ +. \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index 644a2cb..6d6b464 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM registry.fedoraproject.org/fedora:rawhide +FROM registry.fedoraproject.org/f30/s2i-core:latest # Cassandra image for Fedora. # @@ -6,10 +6,10 @@ FROM registry.fedoraproject.org/fedora:rawhide # * /var/lib/cassandra/data - Datastore for Cassandra # Environment: -LABEL MAINTAINER "Augusto Caringi" - -ENV NAME=cassandra \ +ENV CASSANDRA_VERSION=3.11 \ + NAME=cassandra \ VERSION=0 \ + RELEASE=2 \ ARCH=x86_64 \ SUMMARY="Cassandra is an OpenSource database for high-scale application" \ DESCRIPTION="Cassandra is a partitioned row store. Rows are organized \ @@ -18,27 +18,25 @@ distribute your data across multiple machines in an application-transparent \ matter. Cassandra will automatically re-partition as machines are \ added/removed from the cluster. Row store means that like relational \ databases, Cassandra organizes data by rows and columns. The Cassandra Query \ -Language (CQL) is a close relative of SQL." +Language (CQL) is a close relative of SQL." \ + # Set paths to avoid hard-coding them in scripts. + HOME=/var/lib/cassandra \ + CASSANDRA_CONF_DIR=/etc/cassandra/ \ + CONTAINER_SCRIPTS_BASE=/usr/share/container-scripts \ + CONTAINER_SCRIPTS_PATH=/usr/share/container-scripts/cassandra LABEL summary="$SUMMARY" \ description="$DESCRIPTION" \ - com.redhat.component="$NAME" \ + io.k8s.description="$SUMMARY" \ + io.k8s.display-name="Cassandra $CASSANDRA_VERSION" \ + io.openshift.expose-services="7199:cassandra,9042:cql" \ + io.openshift.tags="database,cassandra,cassandra3" \ name="$FGC/$NAME" \ version=0 \ - architecture="$ARCH" \ + com.redhat.component="$NAME" \ usage="docker run cassandra" \ - help="help.1" - -LABEL io.k8s.description="$SUMMARY" \ - io.k8s.display-name="Cassandra 3.11" \ - io.openshift.expose-services="7199:cassandra" \ - io.openshift.tags="database,cassandra,cassandra39" - -ENV CASSANDRA_VERSION=3.11 \ - # Set paths to avoid hard-coding them in scripts. - HOME=/var/lib/cassandra \ - CASSANDRA_CONF_DIR=/etc/cassandra/ \ - CONTAINER_SCRIPTS_PATH=/usr/share/container-scripts/cassandra + help="help.1" \ + maintainer="SoftwareCollections.org " # 7000: intra-node communication # 7001: TLS intra-node communication @@ -46,22 +44,20 @@ ENV CASSANDRA_VERSION=3.11 \ # 9042: CQL EXPOSE 7000 7001 7199 9042 -RUN INSTALL_PKGS="cassandra-server cassandra jemalloc" && \ +RUN INSTALL_PKGS="cassandra-server cassandra hostname" && \ dnf install -y --setopt=tsflags=nodocs $INSTALL_PKGS && \ rpm -V $INSTALL_PKGS && \ dnf clean all -VOLUME ["/var/lib/cassandra"] - ADD root / -# Container setup -# RUN chown cassandra:0 /etc/cassandra/cassandra.yaml && \ -# chown -R cassandra.0 /var/lib/cassandra/ && \ - # Loosen permission bits to avoid problems running container with arbitrary UID -# chmod -R g+rwx /var/lib/cassandra - #sed -ri 's/# JVM_OPTS="\$JVM_OPTS -Djava.rmi.server.hostname=/JVM_OPTS="\$JVM_OPTS -Djava.rmi.server.hostname=172.17.0.2/' /usr/share/cassandra/cassandra-env.sh && \ - #sed -ri 's/LOCAL_JMX=yes/LOCAL_JMX=no/' /usr/share/cassandra/cassandra-env.sh +COPY ./s2i/bin/ $STI_SCRIPTS_PATH + +RUN chown -R cassandra $HOME $CONTAINER_SCRIPTS_BASE&& \ + /usr/libexec/fix-permissions $HOME $CONTAINER_SCRIPTS_BASE $CASSANDRA_CONF_DIR /var/log/cassandra && \ + rpm-file-permissions + +VOLUME ["$HOME"] USER 143 diff --git a/Dockerfile.fedora b/Dockerfile.fedora new file mode 120000 index 0000000..1d1fe94 --- /dev/null +++ b/Dockerfile.fedora @@ -0,0 +1 @@ +Dockerfile \ No newline at end of file diff --git a/help.md b/help.md new file mode 120000 index 0000000..42061c0 --- /dev/null +++ b/help.md @@ -0,0 +1 @@ +README.md \ No newline at end of file diff --git a/root/help.1 b/root/help.1 new file mode 100644 index 0000000..dec13de --- /dev/null +++ b/root/help.1 @@ -0,0 +1,199 @@ +.TH Cassandra container +.PP +This repository contains Dockerfiles for Cassandra images for general usage and OpenShift. +Currently only CentOS based image is available. The CentOS image is then available on +Docker Hub +\[la]https://hub.docker.com/r/centos/cassandra-3-centos7/\[ra] as centos/cassandra\-3\-centos7. + +.SH Description +.PP +This container image provides a containerized packaging of the Cassandra daemon +and client application. The cassandra server daemon accepts connections from clients +and provides access to content from Cassandra databases on behalf of the clients. +You can find more information on the Cassandra project from the project Web site +( +\[la]https://cassandra.apache.org/\[ra]). + +.SH Usage +.PP +For this, we will assume that you are using the \fB\fCcentos/cassandra\-3\-centos7\fR image. +If you want to set only the mandatory environment variables and store the database +in the \fB\fC/home/user/database\fR directory on the host filesystem, execute the following command: + +.PP +.RS + +.nf +$ docker run \-d \-e CASSANDRA\_ADMIN\_PASSWORD= \-v /home/user/database:/var/opt/rh/sclo\-cassandra3/lib/cassandra:Z centos/cassandra\-3\-centos7 + +.fi +.RE + +.SH Environment variables and Volumes +.PP +The image recognizes the following environment variables that you can set during +initialization by passing \fB\fC\-e VAR=VALUE\fR to the Docker run command. + +.TS +allbox; +l l +l l . +\fB\fCVariable name\fR \fB\fCDescription\fR +CASSANDRA\_ADMIN\_PASSWORD Password for the admin user +.TE + +.PP +The following environment variables influence the Cassandra configuration file. They are all optional. + +.TS +allbox; +l l l +l l l . +\fB\fCVariable name\fR \fB\fCDescription\fR \fB\fCDefault\fR +CASSANDRA\_CLUSTER\_NAME The name of the cluster. \&'Test Cluster' +T{ +CASSANDRA\_DISK\_OPTIMIZATION\_STRATEGY +T} T{ +The strategy for optimizing disk reads. +T} ssd +CASSANDRA\_ENDPOINT\_SNITCH T{ +Cassandra uses the snitch to locate nodes and route requests. +T} SimpleSnitch +CASSANDRA\_NUM\_TOKENS T{ +Defines the number of tokens randomly assigned to this node. +T} 256 +CASSANDRA\_RPC\_ADDRESS T{ +The listen address for client connections. +T} \&' ' +T{ +CASSANDRA\_KEY\_CACHE\_SIZE\_IN\_MB +T} T{ +Maximum size of the key cache in memory. +T} \&' ' +CASSANDRA\_CONCURRENT\_READS T{ +Allows operations to queue low enough in the stack so that the OS and drives can reorder them. +T} 32 +CASSANDRA\_CONCURRENT\_WRITES T{ +Writes in Cassandra are rarely I/O bound, so the ideal number of concurrent writes depends on the number of CPU cores on the node. The recommended value is 8 × number\_of\_cpu\_cores. +T} 32 +T{ +CASSANDRA\_MEMTABLE\_ALLOCATION\_TYPE +T} T{ +The method Cassandra uses to allocate and manage memtable memory. +T} \&'heap\_buffers' +T{ +CASSANDRA\_MEMTABLE\_CLEANUP\_THRESHOLD +T} T{ +Ratio used for automatic memtable flush. +T} 0.5 +T{ +CASSANDRA\_MEMTABLE\_FLUSH\_WRITERS +T} T{ +The number of memtable flush writer threads. +T} 1 +T{ +CASSANDRA\_CONCURRENT\_COMPACTORS +T} T{ +Number of concurrent compaction processes allowed to run simultaneously on a node. +T} \&' ' +T{ +CASSANDRA\_COMPACTION\_THROUGHPUT\_MB\_PER\_SEC +T} T{ +Throttles compaction to the specified Mb/second across the instance. +T} 16 +T{ +CASSANDRA\_COUNTER\_CACHE\_SIZE\_IN\_MB +T} T{ +Maximum size of the counter cache in memory. +T} \&' ' +T{ +CASSANDRA\_INTERNODE\_COMPRESSION +T} T{ +Controls whether traffic between nodes is compressed. +T} all +T{ +CASSANDRA\_GC\_WARN\_THRESHOLD\_IN\_MS +T} T{ +Any GC pause longer than this interval is logged at the WARN level. +T} 1000 +CASSANDRA\_AUTO\_BOOTSTRAP T{ +It causes new (non\-seed) nodes migrate the right data to themselves automatically. +T} true +.TE + +.PP +More details about each variable can be found at: +\[la]http://docs.datastax.com/en/cassandra/3.0/cassandra/configuration/configCassandra_yaml.html\[ra] + +.PP +You can also set the following mount points by passing the \fB\fC\-v /host:/container\fR flag to Docker. + +.TS +allbox; +l l +l l . +\fB\fCVolume mount point\fR \fB\fCDescription\fR +T{ +/var/opt/rh/sclo\-cassandra3/lib/cassandra +T} Cassandra data directory +.TE + +.PP +\fBNotice: When mouting a directory from the host into the container, ensure that the mounted +directory has the appropriate permissions and that the owner and group of the directory +matches the user UID or name which is running inside the container.\fP + +.SH Ports +.PP +By default, Cassandra uses 7000 for cluster communication (7001 if SSL is enabled), 9042 for native protocol clients, +and 7199 for JMX. The internode communication and native protocol ports are configurable in the Cassandra Configuration +File (cassandra.yaml). The JMX port is configurable in cassandra\-env.sh (through JVM options). All ports are TCP. + +.SH Documentation +.PP +See +\[la]http://cassandra.apache.org/doc/latest/\[ra] + +.SH Requirements.IP \(bu 2 +Memory: For production 32 GB to 512 GB; the minimum is 8 GB for Cassandra nodes. For development in non\-loading +testing environments: no less than 4 GB. +.IP \(bu 2 +CPU: For production 16\-core CPU processors are the current price\-performance sweet spot. For development in +non\-loading testing environments: 2\-core CPU processors are sufficient. +.IP \(bu 2 +Disk space: SSDs are recommended for Cassandra nodes. The size depends on the compaction strategy used. With SSDs, +you can use a maximum of 3 to 5 TB per node of disk space for uncompressed data. +.IP \(bu 2 +Network: Recommended bandwidth is 1000 Mb/s (gigabit) or greater. + +.PP +More on hardware requirements on +\[la]https://docs.datastax.com/en/landing_page/doc/landing_page/planning/planningHardware.html\[ra] + +.SH Custom configuration file +.PP +It is allowed to use custom configuration files for cassandra server. + +.PP +To use custom configuration file in container it has to be mounted into \fB\fC/etc/opt/rh/sclo\-cassandra3/cassandra/cassandra.yaml\fR\&. +For example to use configuration file stored in \fB\fC/home/user\fR directory use this option for \fB\fCdocker run\fR command: +\fB\fC\-v /home/user/cassandra.yaml:/etc/opt/rh/sclo\-cassandra3/cassandra/cassandra.yaml:Z\fR\&. + +.PP +To configure multiple JVM options a \fB\fCjvm.options\fR file needs to be mounted into the container. For example to use +configuration file stored in \fB\fC/home/user\fR directory use this option for +\fB\fCdocker run\fR command: \fB\fC\-v /home/user/jvm.options:/etc/opt/rh/sclo\-cassandra3/cassandra/jvm.options:Z\fR\&. + +.SH Troubleshooting +.PP +The cassandra daemon in the container logs to the standard output, so the log is available in the container log. The log +can be examined by running: + +.PP +docker logs + +.SH See also +.PP +Dockerfile and other sources for this container image are available on +\[la]https://github.com/sclorg/cassandra-container\[ra]\&. +In that repository, Dockerfile for CentOS is called Dockerfile, Dockerfile for RHEL (Work\-in\-progress) is called Dockerfile.rhel7. diff --git a/root/usr/bin/run-cassandra b/root/usr/bin/run-cassandra index 6579e02..93de386 100755 --- a/root/usr/bin/run-cassandra +++ b/root/usr/bin/run-cassandra @@ -10,7 +10,7 @@ save_env_config_vars if is_authorization_on; then echo "Authorization already enabled for this container, not setting new admin password..." -elif test "$(ls -A "$HOME/cassandra" 2> /dev/null)"; then +elif test "$(ls -A "$HOME/data" 2> /dev/null)"; then # If the container is using a not empty provided volume *and* authorization if turned off, # don't try to set a new CASSANDRA_ADMIN_PASSWORD, just enable authorization echo "Using a populated data directory, CASSANDRA_ADMIN_PASSWORD already set" @@ -28,4 +28,4 @@ else fi fi -exec cassandra -f +exec cassandra -f -R diff --git a/root/usr/libexec/fix-permissions b/root/usr/libexec/fix-permissions new file mode 100755 index 0000000..f383d1b --- /dev/null +++ b/root/usr/libexec/fix-permissions @@ -0,0 +1,7 @@ +#!/bin/sh +# Fix permissions on the given directory to allow group read/write of +# regular files and execute of directories. +find $@ -exec chown cassandra {} \; +find $@ -exec chgrp 0 {} \; +find $@ -exec chmod g+rw {} \; +find $@ -type d -exec chmod g+x {} + diff --git a/root/usr/share/container-scripts/cassandra/README.md b/root/usr/share/container-scripts/cassandra/README.md index 89dd74a..31def4b 100644 --- a/root/usr/share/container-scripts/cassandra/README.md +++ b/root/usr/share/container-scripts/cassandra/README.md @@ -2,6 +2,8 @@ Cassandra container =================== This repository contains Dockerfiles for Cassandra images for general usage and OpenShift. +Currently only CentOS based image is available. The CentOS image is then available on +[Docker Hub](https://hub.docker.com/r/centos/cassandra-3-centos7/) as centos/cassandra-3-centos7. Description ----------- @@ -12,6 +14,17 @@ and provides access to content from Cassandra databases on behalf of the clients You can find more information on the Cassandra project from the project Web site (https://cassandra.apache.org/). +Usage +----- + +For this, we will assume that you are using the `centos/cassandra-3-centos7` image. +If you want to set only the mandatory environment variables and store the database +in the `/home/user/database` directory on the host filesystem, execute the following command: + +``` +$ docker run -d -e CASSANDRA_ADMIN_PASSWORD= -v /home/user/database:/var/opt/rh/sclo-cassandra3/lib/cassandra:Z centos/cassandra-3-centos7 +``` + Environment variables and Volumes --------------------------------- @@ -33,51 +46,37 @@ The following environment variables influence the Cassandra configuration file. | CASSANDRA_NUM_TOKENS | Defines the number of tokens randomly assigned to this node. | 256 | CASSANDRA_RPC_ADDRESS | The listen address for client connections. | ' ' | CASSANDRA_KEY_CACHE_SIZE_IN_MB | Maximum size of the key cache in memory. | ' ' -| CASSANDRA_CONCURRENT_READS | Allows operations to queue low enough in the stack so that the OS - and drives can reorder them. | 32 -| CASSANDRA_CONCURRENT_WRITES | Writes in Cassandra are rarely I/O bound, so the ideal number of - concurrent writes depends on the number of CPU cores on the node. - The recommended value is 8 × number_of_cpu_cores. | 32 +| CASSANDRA_CONCURRENT_READS | Allows operations to queue low enough in the stack so that the OS and drives can reorder them. | 32 +| CASSANDRA_CONCURRENT_WRITES | Writes in Cassandra are rarely I/O bound, so the ideal number of concurrent writes depends on the number of CPU cores on the node. The recommended value is 8 × number_of_cpu_cores. | 32 | CASSANDRA_MEMTABLE_ALLOCATION_TYPE | The method Cassandra uses to allocate and manage memtable memory. | 'heap_buffers' | CASSANDRA_MEMTABLE_CLEANUP_THRESHOLD | Ratio used for automatic memtable flush. | 0.5 | CASSANDRA_MEMTABLE_FLUSH_WRITERS | The number of memtable flush writer threads. | 1 -| CASSANDRA_CONCURRENT_COMPACTORS | Number of concurrent compaction processes allowed to run - simultaneously on a node. | ' ' +| CASSANDRA_CONCURRENT_COMPACTORS | Number of concurrent compaction processes allowed to run simultaneously on a node. | ' ' | CASSANDRA_COMPACTION_THROUGHPUT_MB_PER_SEC | Throttles compaction to the specified Mb/second across the instance. | 16 | CASSANDRA_COUNTER_CACHE_SIZE_IN_MB | Maximum size of the counter cache in memory. | ' ' | CASSANDRA_INTERNODE_COMPRESSION | Controls whether traffic between nodes is compressed. | all | CASSANDRA_GC_WARN_THRESHOLD_IN_MS | Any GC pause longer than this interval is logged at the WARN level. | 1000 -| CASSANDRA_AUTO_BOOTSTRAP | It causes new (non-seed) nodes migrate the right data to themselves - automatically. | true +| CASSANDRA_AUTO_BOOTSTRAP | It causes new (non-seed) nodes migrate the right data to themselves automatically. | true -More deatils about each variable can be found at: http://docs.datastax.com/en/cassandra/3.0/cassandra/configuration/configCassandra_yaml.html +More details about each variable can be found at: http://docs.datastax.com/en/cassandra/3.0/cassandra/configuration/configCassandra_yaml.html You can also set the following mount points by passing the `-v /host:/container` flag to Docker. -| Volume mount point | Description | -| :----------------------- | ------------------------ | -| /var/lib/cassandra/data | Cassandra data directory | +| Volume mount point | Description | +| :----------------------------------------- | ------------------------ | +| /var/opt/rh/sclo-cassandra3/lib/cassandra | Cassandra data directory | **Notice: When mouting a directory from the host into the container, ensure that the mounted directory has the appropriate permissions and that the owner and group of the directory matches the user UID or name which is running inside the container.** -Usage ------ - -For this, we will assume that you are using the `fedora/cassandra-311` image. -If you want to set only the mandatory environment variables and store the database -in the `/home/user/database` directory on the host filesystem, execute the following command: - -``` -$ docker run -d -v /home/user/database:/var/lib/cassandra/data fedora/cassandra-311 -``` - Ports ----- -By default, Cassandra uses 7000 for cluster communication (7001 if SSL is enabled), 9042 for native protocol clients, and 7199 for JMX. The internode communication and native protocol ports are configurable in the Cassandra Configuration File (cassandra.yaml). The JMX port is configurable in cassandra-env.sh (through JVM options). All ports are TCP. +By default, Cassandra uses 7000 for cluster communication (7001 if SSL is enabled), 9042 for native protocol clients, +and 7199 for JMX. The internode communication and native protocol ports are configurable in the Cassandra Configuration +File (cassandra.yaml). The JMX port is configurable in cassandra-env.sh (through JVM options). All ports are TCP. Documentation @@ -89,9 +88,12 @@ See http://cassandra.apache.org/doc/latest/ Requirements ------------ -* Memory: For production 32 GB to 512 GB; the minimum is 8 GB for Cassandra nodes. For development in non-loading testing environments: no less than 4 GB. -* CPU: For production 16-core CPU processors are the current price-performance sweet spot. For development in non-loading testing environments: 2-core CPU processors are sufficient. -* Disk space: SSDs are recommended for Cassandra nodes. The size depends on the compaction strategy used. With SSDs, you can use a maximum of 3 to 5 TB per node of disk space for uncompressed data. +* Memory: For production 32 GB to 512 GB; the minimum is 8 GB for Cassandra nodes. For development in non-loading +testing environments: no less than 4 GB. +* CPU: For production 16-core CPU processors are the current price-performance sweet spot. For development in +non-loading testing environments: 2-core CPU processors are sufficient. +* Disk space: SSDs are recommended for Cassandra nodes. The size depends on the compaction strategy used. With SSDs, +you can use a maximum of 3 to 5 TB per node of disk space for uncompressed data. * Network: Recommended bandwidth is 1000 Mb/s (gigabit) or greater. More on hardware requirements on https://docs.datastax.com/en/landing_page/doc/landing_page/planning/planningHardware.html @@ -102,15 +104,20 @@ Custom configuration file It is allowed to use custom configuration files for cassandra server. -To use custom configuration file in container it has to be mounted into `/etc/cassandra/cassandra.yaml`. For example to use configuration file stored in `/home/user` directory use this option for `docker run` command: `-v /home/user/cassandra.yaml:/etc/cassandra/cassandra.yaml:Z`. +To use custom configuration file in container it has to be mounted into `/etc/opt/rh/sclo-cassandra3/cassandra/cassandra.yaml`. +For example to use configuration file stored in `/home/user` directory use this option for `docker run` command: +`-v /home/user/cassandra.yaml:/etc/opt/rh/sclo-cassandra3/cassandra/cassandra.yaml:Z`. -To configure multiple JVM options a `jvm.options` file needs to be mounted into the container. For example to use configuration file stored in `/home/user` directory use this option for `docker run` command: `-v /home/user/jvm.options:/etc/cassandra/jvm.options:Z`. +To configure multiple JVM options a `jvm.options` file needs to be mounted into the container. For example to use +configuration file stored in `/home/user` directory use this option for +`docker run` command: `-v /home/user/jvm.options:/etc/opt/rh/sclo-cassandra3/cassandra/jvm.options:Z`. Troubleshooting --------------- -The cassandra deamon in the container logs to the standard output, so the log is available in the container log. The log can be examined by running: +The cassandra daemon in the container logs to the standard output, so the log is available in the container log. The log +can be examined by running: docker logs @@ -118,4 +125,5 @@ docker logs See also -------- -The Dockerfile is using a cassandra rpm package from Fedora. More information is found at: https://admin.fedoraproject.org/pkgdb/package/rpms/cassandra/ +Dockerfile and other sources for this container image are available on https://github.com/sclorg/cassandra-container. +In that repository, Dockerfile for CentOS is called Dockerfile, Dockerfile for RHEL (Work-in-progress) is called Dockerfile.rhel7. diff --git a/root/usr/share/container-scripts/cassandra/common.sh b/root/usr/share/container-scripts/cassandra/common.sh index 7372948..3674798 100644 --- a/root/usr/share/container-scripts/cassandra/common.sh +++ b/root/usr/share/container-scripts/cassandra/common.sh @@ -1,8 +1,9 @@ #!/bin/bash set -o pipefail +set -ex -CASSANDRA_CONF_DIR="/etc/cassandra/" +#CASSANDRA_CONF_DIR="/etc/cassandra/" CASSANDRA_CONF_FILE="cassandra.yaml" HOSTNAME=$(cat /proc/sys/kernel/hostname) @@ -99,7 +100,7 @@ function create_admin_user() { # echo config changed # start cassandra with authentication - cassandra >/dev/null & + cassandra -R >/dev/null & # echo starting server # add admin super user with CASSANDRA_ADMIN_PASSWORD via the default super user diff --git a/root/usr/share/container-scripts/cassandra/scl_enable b/root/usr/share/container-scripts/cassandra/scl_enable new file mode 100644 index 0000000..5a25432 --- /dev/null +++ b/root/usr/share/container-scripts/cassandra/scl_enable @@ -0,0 +1,3 @@ +# This will make scl collection binaries work out of box. +unset BASH_ENV PROMPT_COMMAND ENV +source scl_source enable ${ENABLED_COLLECTIONS} diff --git a/s2i/bin/assemble b/s2i/bin/assemble new file mode 100755 index 0000000..c1472bd --- /dev/null +++ b/s2i/bin/assemble @@ -0,0 +1,14 @@ +#!/bin/bash + +set -o errexit +set -o nounset +set -o pipefail + +shopt -s dotglob +echo "---> Installing application source ..." + + +mv /tmp/src/* ./ + +# Fix source directory permissions +/usr/libexec/fix-permissions ./ diff --git a/s2i/bin/usage b/s2i/bin/usage new file mode 100755 index 0000000..9f41312 --- /dev/null +++ b/s2i/bin/usage @@ -0,0 +1 @@ +groff -t -man -ETascii /help.1 diff --git a/sources b/sources deleted file mode 100644 index e69de29..0000000 --- a/sources +++ /dev/null diff --git a/test/cassandra-ephemeral-template.json b/test/cassandra-ephemeral-template.json new file mode 100644 index 0000000..63f664b --- /dev/null +++ b/test/cassandra-ephemeral-template.json @@ -0,0 +1,216 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "name": "cassandra-ephemeral", + "annotations": { + "openshift.io/display-name": "Cassandra (Ephemeral)", + "description": "Cassandra database service, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/cassandra-container/blob/master/3.11/root/usr/share/container-scripts/cassandra/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing", + "iconClass": "icon-cassandra", + "tags": "database,cassandra", + "openshift.io/long-description": "This template provides a standalone Cassandra server with a database created. The database is not stored on persistent storage, so any restart of the service will result in all data being lost. The database admin password are chosen via parameters when provisioning this service.", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "openshift.io/documentation-url": "https://github.com/sclorg/cassandra-container/blob/master/3.11/root/usr/share/container-scripts/cassandra/README.md", + "openshift.io/support-url": "https://access.redhat.com" + } + }, + "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Database Name: ${CASSANDRA_DATABASE}\n Connection URL: cql://${DATABASE_SERVICE_NAME}:9042/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/cassandra-container/blob/master/3.11/root/usr/share/container-scripts/cassandra/README.md.", + "labels": { + "template": "cassandra-ephemeral-template" + }, + "objects": [ + { + "kind": "Secret", + "apiVersion": "v1", + "metadata": { + "name": "${DATABASE_SERVICE_NAME}", + "annotations": { + "template.openshift.io/expose-admin_password": "{.data['database-admin-password']}", + "template.openshift.io/expose-database_name": "{.data['database-name']}" + } + }, + "stringData" : { + "database-admin-password" : "${CASSANDRA_ADMIN_PASSWORD}", + "database-name" : "${CASSANDRA_DATABASE}" + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "metadata": { + "name": "${DATABASE_SERVICE_NAME}", + "annotations": { + "template.openshift.io/expose-uri": "cql://{.spec.clusterIP}:{.spec.ports[?(.name==\"cassandra\")].port}" + } + }, + "spec": { + "ports": [ + { + "name": "cassandra", + "port": 9042 + } + ], + "selector": { + "name": "${DATABASE_SERVICE_NAME}" + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${DATABASE_SERVICE_NAME}", + "annotations": { + "template.alpha.openshift.io/wait-for-ready": "true" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "cassandra" + ], + "from": { + "kind": "ImageStreamTag", + "name": "cassandra:${CASSANDRA_VERSION}", + "namespace": "${NAMESPACE}" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "name": "${DATABASE_SERVICE_NAME}" + }, + "template": { + "metadata": { + "labels": { + "name": "${DATABASE_SERVICE_NAME}" + } + }, + "spec": { + "containers": [ + { + "name": "cassandra", + "image": " ", + "ports": [ + { + "containerPort": 9042 + } + ], + "readinessProbe": { + "timeoutSeconds": 5, + "initialDelaySeconds": 120, + "exec": { + "command": [ "/bin/sh", "-i", "-c", + "cqlsh `hostname -I` -u admin -p $CASSANDRA_ADMIN_PASSWORD -e 'SELECT cluster_name FROM system.local; exit'"] + } + }, + "livenessProbe": { + "timeoutSeconds": 5, + "initialDelaySeconds": 160, + "tcpSocket": { + "port": 9042 + } + }, + "env": [ + { + "name": "CASSANDRA_ADMIN_PASSWORD", + "valueFrom": { + "secretKeyRef" : { + "name" : "${DATABASE_SERVICE_NAME}", + "key" : "database-admin-password" + } + } + }, + { + "name": "CASSANDRA_DATABASE", + "valueFrom": { + "secretKeyRef" : { + "name" : "${DATABASE_SERVICE_NAME}", + "key" : "database-name" + } + } + } + ], + "resources": { + "limits": { + "memory": "${MEMORY_LIMIT}" + } + }, + "volumeMounts": [ + { + "name": "${DATABASE_SERVICE_NAME}-data", + "mountPath": "/var/opt/rh/sclo-cassandra3/lib/cassandra" + } + ], + "imagePullPolicy": "IfNotPresent" + } + ], + "volumes": [ + { + "name": "${DATABASE_SERVICE_NAME}-data", + "emptyDir": { + "medium": "" + } + } + ] + } + } + } + } + ], + "parameters": [ + { + "name": "MEMORY_LIMIT", + "displayName": "Memory Limit", + "description": "Maximum amount of memory the container can use.", + "value": "512Mi", + "required": true + }, + { + "name": "NAMESPACE", + "displayName": "Namespace", + "description": "The OpenShift Namespace where the ImageStream resides.", + "value": "openshift" + }, + { + "name": "DATABASE_SERVICE_NAME", + "displayName": "Database Service Name", + "description": "The name of the OpenShift Service exposed for the database.", + "value": "cassandra", + "required": true + }, + { + "name": "CASSANDRA_ADMIN_PASSWORD", + "displayName": "Cassandra admin Password", + "description": "Password for the Cassandra admin user.", + "generate": "expression", + "from": "[a-zA-Z0-9]{16}", + "required": true + }, + { + "name": "CASSANDRA_DATABASE", + "displayName": "Cassandra Database Name", + "description": "Name of the Cassandra database accessed.", + "value": "sampledb", + "required": true + }, + { + "name": "CASSANDRA_VERSION", + "displayName": "Version of Cassandra Image", + "description": "Version of Cassandra image to be used (3.11 or latest).", + "value": "3.11", + "required": true + } + ] +} diff --git a/test/common b/test/common deleted file mode 100644 index 54cd9d1..0000000 --- a/test/common +++ /dev/null @@ -1,109 +0,0 @@ -# -# Test a container image. -# -# Always use sourced from a specific container testfile -# -# reguires definition of CID_FILE_DIR -# CID_FILE_DIR=$(mktemp --suffix=_test_cidfiles -d) - -# may be redefined in the specific container testfile -EXPECTED_EXIT_CODE=0 - -function cleanup() { - for cid_file in $CID_FILE_DIR/* ; do - CONTAINER=$(cat $cid_file) - - : "Stopping and removing container $CONTAINER..." - docker stop $CONTAINER - exit_status=$(docker inspect -f '{{.State.ExitCode}}' $CONTAINER) - if [ "$exit_status" != "$EXPECTED_EXIT_CODE" ]; then - : "Dumping logs for $CONTAINER" - docker logs $CONTAINER - fi - docker rm $CONTAINER - rm $cid_file - done - rmdir $CID_FILE_DIR - : "Done." -} -trap cleanup EXIT SIGINT - -function get_cid() { - local name="$1" ; shift || return 1 - echo $(cat "$CID_FILE_DIR/$name") -} - -function get_container_ip() { - local id="$1" ; shift - docker inspect --format='{{.NetworkSettings.IPAddress}}' $(get_cid "$id") -} - -function wait_for_cid() { - local max_attempts=10 - local sleep_time=1 - local attempt=1 - local result=1 - while [ $attempt -le $max_attempts ]; do - [ -f $cid_file ] && [ -s $cid_file ] && break - : "Waiting for container start..." - attempt=$(( $attempt + 1 )) - sleep $sleep_time - done -} - -# Make sure the invocation of docker run fails. -function assert_container_creation_fails() { - - # Time the docker run command. It should fail. If it doesn't fail, - # container will keep running so we kill it with SIGKILL to make sure - # timeout returns a non-zero value. - set +e - timeout -s SIGTERM --preserve-status 10s docker run --rm "$@" $IMAGE_NAME - ret=$? - set -e - - # Timeout will exit with a high number. - if [ $ret -gt 128 ]; then - return 1 - fi -} - -# to pass some arguments you need to specify CONTAINER_ARGS variable -function create_container() { - cid_file="$CID_FILE_DIR/$1" ; shift - # create container with a cidfile in a directory for cleanup - docker run ${CONTAINER_ARGS:-} --cidfile="$cid_file" -d $IMAGE_NAME "$@" - : "Created container $(cat $cid_file)" - wait_for_cid -} - -function run_doc_test() { - local tmpdir=$(mktemp -d) - local f - : " Testing documentation in the container image" - # Extract the help files from the container - for f in help.1 ; do - docker run --rm ${IMAGE_NAME} /bin/bash -c "cat /${f}" >${tmpdir}/$(basename ${f}) - # Check whether the files include some important information - for term in $@ ; do - if ! cat ${tmpdir}/$(basename ${f}) | grep -F -q -e "${term}" ; then - echo "ERROR: File /${f} does not include '${term}'." - return 1 - fi - done - done - # Check whether the files use the correct format - if ! file ${tmpdir}/help.1 | grep -q roff ; then - echo "ERROR: /help.1 is not in troff or groff format" - return 1 - fi - : " Success!" -} - -function run_all_tests() { - for test_case in $TEST_LIST; do - : "Running test $test_case" - $test_case - done; -} - diff --git a/test/ctest_configuration b/test/ctest_configuration new file mode 100644 index 0000000..ebac953 --- /dev/null +++ b/test/ctest_configuration @@ -0,0 +1,8 @@ +# +# Container configuration test for Cassandra image. +# +# Always use sourced from a specific container testfile + +function ctest_configuration() { + echo " Success!" +} diff --git a/test/ctest_container_creation b/test/ctest_container_creation new file mode 100644 index 0000000..4ce5319 --- /dev/null +++ b/test/ctest_container_creation @@ -0,0 +1,14 @@ +# +# Container creation test for Cassandra image. +# +# Always use sourced from a specific container testfile + +function ctest_container_creation() { + #echo " Testing wrong user variables usage" + #ct_assert_container_creation_fails -e CASSANDRA_CLUSTER_NAME=Test + #echo " Success!" + + echo " Testing good user variables usage" + ct_assert_container_creation_fails -e CASSANDRA_ADMIN_PASSWORD=r00t || [ $? -eq 1 ] + echo " Success!" +} diff --git a/test/ctest_doc_content b/test/ctest_doc_content new file mode 100644 index 0000000..16a09b9 --- /dev/null +++ b/test/ctest_doc_content @@ -0,0 +1,10 @@ +# +# Documentation content test for Cassandra image. +# +# Always use sourced from a specific container testfile + +function ctest_doc_content() { + : " Testing documentation content" + ct_doc_content_old CASSANDRA\\_ADMIN\\_PASSWORD cluster + : " Success!" +} diff --git a/test/ctest_general b/test/ctest_general new file mode 100644 index 0000000..1c28402 --- /dev/null +++ b/test/ctest_general @@ -0,0 +1,10 @@ +# +# General test for Cassandra image. +# +# Always use sourced from a specific container testfile + +function ctest_general() { + USER="admin" PASS="r00t" test_general admin + # Test with random uid in container + CONTAINER_ARGS="-u 12345" USER="admin" PASS="r00t" test_general admin_altuid +} diff --git a/test/ctest_multi_node b/test/ctest_multi_node new file mode 100644 index 0000000..feaed38 --- /dev/null +++ b/test/ctest_multi_node @@ -0,0 +1,36 @@ +# +# Container multi node test for Cassandra image. +# +# Always use sourced from a specific container testfile + +function ctest_multi_node() { + # Run the Cassandra node 1 + USER="admin" + PASS="r00t" + + # Create first node + #CONTAINER_ARGS="-e CASSANDRA_ADMIN_PASSWORD=$PASS -e CASSANDRA_AUTO_BOOTSTRAP=false -e CASSANDRA_SEEDS=127.0.0.1,172.17.0.3" + CONTAINER_ARGS="-e CASSANDRA_ADMIN_PASSWORD=$PASS -e CASSANDRA_AUTO_BOOTSTRAP=false" + ct_create_container node1.cid + local node1_ip + node1_ip=$(ct_get_cip node1.cid) + sleep 20 + test_connection node1.cid + + # Create second node + CONTAINER_ARGS="-e CASSANDRA_ADMIN_PASSWORD=$PASS -e CASSANDRA_AUTO_BOOTSTRAP=false -e CASSANDRA_SEEDS=127.0.0.1,$node1_ip" + ct_create_container node2.cid + local node2_ip + node2_ip=$(ct_get_cip node2.cid) + sleep 20 + test_connection node2.cid + + # Give some time to Cassandra nodes start + sleep 10 + + # Test nodetool + docker exec $(ct_get_cid "node1.cid") bash -c 'nodetool status' + docker exec $(ct_get_cid "node2.cid") bash -c 'nodetool status' + + echo " Success!" +} diff --git a/test/lib b/test/lib new file mode 100644 index 0000000..5aab708 --- /dev/null +++ b/test/lib @@ -0,0 +1,57 @@ +# +# Library of shared functions for Cassandra image tests. +# +# Always use sourced from a specific container testfile + +test -n "${IMAGE_NAME-}" || false 'make sure $IMAGE_NAME is defined' +test -n "${VERSION-}" || false 'make sure $VERSION is defined' + +# get_version_number [version_string] +# -------------------- +# Extracts the version number from provided version string. +# e.g. 3.0upg => 3.0 +function get_version_number() { + echo $1 | sed -e 's/^\([0-9.]*\).*/\1/' +} + +function cassandra_cmd() { + docker run --rm $IMAGE_NAME cqlsh $CONTAINER_IP -u "$USER" -p "$PASS" -e "${@}" +} + +function test_connection() { + local name=$1 ; shift + CONTAINER_IP=$(ct_get_cip $name) + echo " Testing Cassandra connection to $CONTAINER_IP..." + local max_attempts=30 + local sleep_time=2 + for i in $(seq $max_attempts); do + echo " Trying to connect..." + set +e + cassandra_cmd "show version" + status=$? + set -e + if [ $status -eq 0 ]; then + sleep $sleep_time + cassandra_cmd "show version" || continue + echo " Success!" + return 0 + fi + sleep $sleep_time + done + echo " Giving up: Failed to connect. Logs:" + docker logs $(ct_get_cid $name) + return 1 +} + +function test_general() { + local name=$1 ; shift + CONTAINER_ARGS="-e CASSANDRA_ADMIN_PASSWORD=$PASS" + ct_create_container $name + CONTAINER_IP=$(ct_get_cip $name) + test_connection $name + echo " Testing scl usage" + ct_scl_usage_old $name 'cassandra -v' $(get_version_number $VERSION) + test_cassandra $name + cid=$(ct_get_cid $name) + docker stop $cid +} diff --git a/test/run b/test/run index 0067ce5..3f2579b 100755 --- a/test/run +++ b/test/run @@ -9,199 +9,42 @@ set -exo nounset shopt -s nullglob -IMAGE_NAME=${IMAGE_NAME-mycass} - -source test/common +. test/test-lib.sh +. test/lib +# Disabled test: ctest_multi_node TEST_LIST="\ -run_container_creation_tests -run_configuration_tests -run_general_tests -run_mount_config_test" -# the change password tests does not work or make sense in cassandra -#run_change_password_test -# the doc test is not working yet -#run_doc_test CASSANDRA_ADMIN_PASSWORD volume 9042" - -test $# -eq 1 -a "${1-}" == --list && echo "$TEST_LIST" && exit 0 -test -n "${IMAGE_NAME-}" || false 'make sure $IMAGE_NAME is defined' - -CID_FILE_DIR=$(mktemp --suffix=cassandra_test_cidfiles -d) - -# used in cleanup function -EXPECTED_EXIT_CODE=143 - -function cqlsh_cmd() { - docker run --rm "$IMAGE_NAME" cqlsh "$@" -} - -function remove_container() { - local name="$1" ; shift - CONTAINER=$(get_cid ${name}) - : "Stopping and removing container $CONTAINER..." - docker exec $CONTAINER nodetool stopdaemon || [ "$?" == "137" ] - while [ "$(docker inspect -f '{{.State.Running}}' $CONTAINER)" == "true" ] ; do - sleep 2 - done - exit_status=$(docker inspect -f '{{.State.ExitCode}}' $CONTAINER) - if [ "$exit_status" != "0" ]; then - : "Dumping logs for $CONTAINER" - docker logs $CONTAINER | tail - fi - docker rm $CONTAINER - rm $CID_FILE_DIR/$name - : "Removed." -} - -function test_config_option() { - local setting=$1 ; shift - local value=$1 ; shift - local name="configuration_${setting}" - - CONTAINER_ARGS=" --e CASSANDRA_${setting^^}=${value} +ctest_container_creation +ctest_configuration +ctest_general +ctest_doc_content " - create_container $name - - test_connection $name - - # If nothing is found, grep returns 1 and test fails. - docker exec $(get_cid ${name}) bash -c "cat /etc/cassandra/cassandra.yaml | grep -q ${setting}:\ ${value}" - - remove_container $name -} - -function test_connection() { - local name="$1" ; shift - local ip=$(get_container_ip $name) - : " Testing cqlsh connection to $ip..." - local max_attempts=20 - local sleep_time=2 - local i - for i in $(seq $max_attempts); do - : " Trying to connect..." - if cqlsh_cmd "$ip" "$@" <<< 'exit'; then - : " Success!" - return 0 - fi - sleep $sleep_time - done - echo "ERROR: Giving up, failed to connect." - return 1 -} - -function test_general() { - : " Testing general usage ('$1')" - local name=$1 ; shift - create_container $name +if [ -e "${IMAGE_NAME:-}" ] ; then + echo "Error: IMAGE_NAME must be specified" + exit 1 +fi - test_connection $name - test_cqlsh $name - - remove_container $name -} - -function test_cqlsh() { - local name="$1" ; shift - local ip=$(get_container_ip $name) - : " Testing basic cqlsh commands" - cqlsh_cmd "$ip" <<< 'CREATE KEYSPACE cycling WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 };' - cqlsh_cmd "$ip" <<< 'CREATE TABLE cycling.cyclist_name ( id UUID, fname text, lname text, PRIMARY KEY (id));' - cqlsh_cmd "$ip" <<< 'INSERT INTO cycling.cyclist_name (id, fname, lname) VALUES (7562c0f3-2f6c-41da-b276-88abac471eaf, 'john', 'smith');' - cqlsh_cmd "$ip" <<< 'INSERT INTO cycling.cyclist_name (id, fname, lname) VALUES (e69be414-f7eb-4e5a-b635-446ee5849810, 'john', 'doe');' - cqlsh_cmd "$ip" <<< 'INSERT INTO cycling.cyclist_name (id, fname, lname) VALUES (d4aa012a-8d71-4189-bcc8-24a858b713b6, 'john', 'smith');' - cqlsh_cmd "$ip" <<< 'SELECT * FROM cycling.cyclist_name WHERE lname = 'smith' ALLOW FILTERING;' - cqlsh_cmd "$ip" <<< 'DROP TABLE cycling.cyclist_name;' - cqlsh_cmd "$ip" <<< 'DROP KEYSPACE cycling;' - : " Success!" -} - -function run_container_creation_tests() { - -# there are no invalid combinations of variables in cassandra yet -# : " Testing invalid combinations of variables" -# assert_container_creation_fails -# : " Success!" - - : " Testing invalid values of variables" - assert_container_creation_fails -e CASSANDRA_CLUSTER_NAME=cool cluster - : " Success!" -} - -function run_configuration_tests() { - : " Testing image configuration settings" - test_config_option num_tokens 256 - # not allowing to have a space character in the cluster name - test_config_option cluster_name cool_cluster - : " Success!" -} +CID_FILE_DIR=$(mktemp --suffix=cassandra_test_cidfiles -d) -function run_general_tests() { - CONTAINER_ARGS= test_general no_admin - # Test with arbitrary uid for the container - # the permissions are not set for arbitrary user to run the container - #CONTAINER_ARGS="-u 12345" run_tests no_admin_altuid -} +TEST_DIR="$(readlink -f $(dirname "${BASH_SOURCE[0]}"))" -function run_mount_config_test() { - local name="mount_config" - : " Testing config file mount" - local tmpdir=$(mktemp -d) - chmod a+rwx $tmpdir - config_file=${tmpdir}/cassandra.yaml - echo 'cluster_name: cool_cluster -commitlog_sync: periodic -commitlog_sync_period_in_ms: 10000 -partitioner: org.apache.cassandra.dht.Murmur3Partitioner -endpoint_snitch: SimpleSnitch -start_native_transport: true -seed_provider: - - class_name: org.apache.cassandra.locator.SimpleSeedProvider - parameters: - - seeds: "172.17.0.2"' > $config_file - chmod a+r ${config_file} - CONTAINER_ARGS=" --v ${config_file}:/etc/cassandra/cassandra.yaml:Z -" - create_container $name - # need this to wait for the container to start up - test_connection $name - : " Testing if mounted config file works" - docker exec $(get_cid ${name}) nodetool describecluster | grep -q Name:\ cool_cluster - rm -r $tmpdir - : " Success!" +function cleanup() { + ct_cleanup } +trap cleanup EXIT SIGINT -function run_change_password_test() { - local name="change_password" - local admin_password='adminPassword' - local volume_dir - volume_dir=`mktemp -d --tmpdir cassandra-testdata.XXXXX` - chmod a+rwx ${volume_dir} - CONTAINER_ARGS=" --e CASSANDRA_ADMIN_PASSWORD=${admin_password} --v ${volume_dir}:/var/lib/cassandra/data:Z -" - create_container $name - # need this to wait for the container to start up - test_connection $name -u admin -p ${admin_password} - - echo " Changing passwords" - docker stop $(get_cid ${name}) - CONTAINER_ARGS=" --e CASSANDRA_ADMIN_PASSWORD=NEW_${admin_password} --v ${volume_dir}:/var/lib/cassandra/data:Z -" - create_container "${name}_NEW" - # need this to wait for the container to start up - test_connection "${name}_NEW" -u admin -p NEW_${admin_password} - # need to remove volume_dir with sudo because of permissions of files written - # by the Docker container - sudo rm -rf ${volume_dir} - echo " Success!" +function test_cassandra() { + echo " Testing Cassandra" + cassandra_cmd "CREATE KEYSPACE IF NOT EXISTS k1 WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'} AND durable_writes = true;" + cassandra_cmd "USE K1; CREATE TABLE IF NOT EXISTS person (id text, name text, surname text, email text, PRIMARY KEY (id));" + cassandra_cmd "USE K1; INSERT INTO person (id, name, surname, email) VALUES ('003', 'Harry', 'Potter', 'harry@example.com');" + cassandra_cmd "USE K1; SELECT email FROM person WHERE id='003';" + cassandra_cmd "USE K1; DELETE FROM person WHERE id='003';" + cassandra_cmd "USE K1; DROP TABLE person" + echo " Success!" } # Run the chosen tests -TEST_LIST=${@:-$TEST_LIST} run_all_tests +TEST_LIST=${@:-$TEST_LIST} ct_run_test_list diff --git a/test/run-openshift b/test/run-openshift new file mode 100755 index 0000000..ee52c59 --- /dev/null +++ b/test/run-openshift @@ -0,0 +1,99 @@ +#!/bin/bash +# +# Test the Cassandra image in OpenShift. +# +# IMAGE_NAME specifies a name of the candidate image used for testing. +# The image has to be available before this script is executed. +# + +THISDIR=$(dirname ${BASH_SOURCE[0]}) + +source ${THISDIR}/test-lib-openshift.sh + +set -exo nounset + +test -n "${IMAGE_NAME-}" || false 'make sure $IMAGE_NAME is defined' +test -n "${VERSION-}" || false 'make sure $VERSION is defined' + +ct_os_enable_print_logs + +function check_cassandra_os_service_connection() { + local util_image_name="${1}" ; shift + local service_name="${1}" ; shift + local user="${1}" ; shift + local pass="${1}" ; shift + local timeout="${1:-120}" ; shift || : + local pod_ip=$(ct_os_get_service_ip ${service_name}) + + : " Service ${service_name} check ..." + + local cmd="echo 'SELECT cluster_name FROM system.local; exit' | cqlsh --connect-timeout=15 ${pod_ip} -u${user} -p${pass}" + local expected_value='Test Cluster' + local output + local ret + SECONDS=0 + + echo -n "Waiting for ${service_name} service becoming ready ..." + while true ; do + output=$(docker run --rm ${util_image_name} bash -c "${cmd}" || :) + echo "${output}" | grep -qe "${expected_value}" && ret=0 || ret=1 + if [ ${ret} -eq 0 ] ; then + echo " PASS" + return 0 + fi + echo -n "." + [ ${SECONDS} -gt ${timeout} ] && break + sleep 3 + done + echo " FAIL" + return 1 +} + +function test_cassandra_pure_image() { + local image_name=$1 + local image_name_no_namespace=${image_name##*/} + local service_name=${image_name_no_namespace} + + ct_os_new_project + # Create a specific imagestream tag for the image so that oc cannot use anything else + ct_os_upload_image "${image_name}" "$image_name_no_namespace:testing" + + ct_os_deploy_pure_image "${image_name_no_namespace}:testing" \ + --name "${service_name}" \ + --env CASSANDRA_ADMIN_PASSWORD=r00t MAX_HEAP_SIZE="2G" HEAP_NEWSIZE="200M" + + sleep 60 + ct_os_wait_pod_ready "${service_name}" 180 + check_cassandra_os_service_connection "${image_name}" "${service_name}" admin r00t + + ct_os_delete_project +} + +function test_cassandra_template() { + local image_name=$1 + local image_name_no_namespace=${image_name##*/} + local service_name=${image_name_no_namespace} + + ct_os_new_project + ct_os_upload_image "${image_name}" "cassandra:$VERSION" + + ct_os_deploy_template_image ${THISDIR}/cassandra-ephemeral-template.json \ + NAMESPACE="$(oc project -q)" \ + MEMORY_LIMIT=3Gi \ + CASSANDRA_VERSION="$VERSION" \ + DATABASE_SERVICE_NAME="${service_name}" \ + CASSANDRA_ADMIN_PASSWORD=testdb + + sleep 60 + ct_os_wait_pod_ready "${service_name}" 180 + + check_cassandra_os_service_connection "${image_name}" "${service_name}" admin testdb + + ct_os_delete_project +} + +ct_os_cluster_up +test_cassandra_pure_image ${IMAGE_NAME} +test_cassandra_template ${IMAGE_NAME} +#test_cassandra_s2i ${IMAGE_NAME} "https://github.com/hhorak/cassandra-container.git#s2i-support-3" test/test-app +ct_os_cluster_down diff --git a/test/test-app/cassandra-data/init.cql b/test/test-app/cassandra-data/init.cql new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/test/test-app/cassandra-data/init.cql diff --git a/test/test-lib-openshift.sh b/test/test-lib-openshift.sh new file mode 100644 index 0000000..f988ac5 --- /dev/null +++ b/test/test-lib-openshift.sh @@ -0,0 +1,925 @@ +# Set of functions for testing docker images in OpenShift using 'oc' command + +# ct_os_get_status +# -------------------- +# Returns status of all objects to make debugging easier. +function ct_os_get_status() { + oc get all + oc status +} + +# ct_os_print_logs +# -------------------- +# Returns status of all objects and logs from all pods. +function ct_os_print_logs() { + ct_os_get_status + while read pod_name; do + echo "INFO: printing logs for pod ${pod_name}" + oc logs ${pod_name} + done < <(oc get pods --no-headers=true -o custom-columns=NAME:.metadata.name) +} + +# ct_os_enable_print_logs +# -------------------- +# Enables automatic printing of pod logs on ERR. +function ct_os_enable_print_logs() { + set -E + trap ct_os_print_logs ERR +} + +# ct_get_public_ip +# -------------------- +# Returns best guess for the IP that the node is accessible from other computers. +# This is a bit funny heuristic, simply goes through all IPv4 addresses that +# hostname -I returns and de-prioritizes IP addresses commonly used for local +# addressing. The rest of addresses are taken as public with higher probability. +function ct_get_public_ip() { + local hostnames=$(hostname -I) + local public_ip='' + local found_ip + for guess_exp in '127\.0\.0\.1' '192\.168\.[0-9\.]*' '172\.[0-9\.]*' \ + '10\.[0-9\.]*' '[0-9\.]*' ; do + found_ip=$(echo "${hostnames}" | grep -oe "${guess_exp}") + if [ -n "${found_ip}" ] ; then + hostnames=$(echo "${hostnames}" | sed -e "s/${found_ip}//") + public_ip="${found_ip}" + fi + done + if [ -z "${public_ip}" ] ; then + echo "ERROR: public IP could not be guessed." >&2 + return 1 + fi + echo "${public_ip}" +} + +# ct_os_run_in_pod POD_NAME CMD +# -------------------- +# Runs [cmd] in the pod specified by prefix [pod_prefix]. +# Arguments: pod_name - full name of the pod +# Arguments: cmd - command to be run in the pod +function ct_os_run_in_pod() { + local pod_name="$1" ; shift + + oc exec "$pod_name" -- "$@" +} + +# ct_os_get_service_ip SERVICE_NAME +# -------------------- +# Returns IP of the service specified by [service_name]. +# Arguments: service_name - name of the service +function ct_os_get_service_ip() { + local service_name="${1}" ; shift + oc get "svc/${service_name}" -o yaml | grep clusterIP | \ + cut -d':' -f2 | grep -oe '172\.30\.[0-9\.]*' +} + + +# ct_os_get_all_pods_status +# -------------------- +# Returns status of all pods. +function ct_os_get_all_pods_status() { + oc get pods -o custom-columns=Ready:status.containerStatuses[0].ready,NAME:.metadata.name +} + +# ct_os_get_all_pods_name +# -------------------- +# Returns the full name of all pods. +function ct_os_get_all_pods_name() { + oc get pods --no-headers -o custom-columns=NAME:.metadata.name +} + +# ct_os_get_pod_status POD_PREFIX +# -------------------- +# Returns status of the pod specified by prefix [pod_prefix]. +# Note: Ignores -build and -deploy pods +# Arguments: pod_prefix - prefix or whole ID of the pod +function ct_os_get_pod_status() { + local pod_prefix="${1}" ; shift + ct_os_get_all_pods_status | grep -e "${pod_prefix}" | grep -Ev "(build|deploy)$" \ + | awk '{print $1}' | head -n 1 +} + +# ct_os_get_pod_name POD_PREFIX +# -------------------- +# Returns the full name of pods specified by prefix [pod_prefix]. +# Note: Ignores -build and -deploy pods +# Arguments: pod_prefix - prefix or whole ID of the pod +function ct_os_get_pod_name() { + local pod_prefix="${1}" ; shift + ct_os_get_all_pods_name | grep -e "^${pod_prefix}" | grep -Ev "(build|deploy)$" +} + +# ct_os_get_pod_ip POD_NAME +# -------------------- +# Returns the ip of the pod specified by [pod_name]. +# Arguments: pod_name - full name of the pod +function ct_os_get_pod_ip() { + local pod_name="${1}" + oc get pod "$pod_name" --no-headers -o custom-columns=IP:status.podIP +} + +# ct_os_check_pod_readiness POD_PREFIX STATUS +# -------------------- +# Checks whether the pod is ready. +# Arguments: pod_prefix - prefix or whole ID of the pod +# Arguments: status - expected status (true, false) +function ct_os_check_pod_readiness() { + local pod_prefix="${1}" ; shift + local status="${1}" ; shift + test "$(ct_os_get_pod_status ${pod_prefix})" == "${status}" +} + +# ct_os_wait_pod_ready POD_PREFIX TIMEOUT +# -------------------- +# Wait maximum [timeout] for the pod becomming ready. +# Arguments: pod_prefix - prefix or whole ID of the pod +# Arguments: timeout - how many seconds to wait seconds +function ct_os_wait_pod_ready() { + local pod_prefix="${1}" ; shift + local timeout="${1}" ; shift + SECONDS=0 + echo -n "Waiting for ${pod_prefix} pod becoming ready ..." + while ! ct_os_check_pod_readiness "${pod_prefix}" "true" ; do + echo -n "." + [ ${SECONDS} -gt ${timeout} ] && echo " FAIL" && return 1 + sleep 3 + done + echo " DONE" +} + +# ct_os_wait_rc_ready POD_PREFIX TIMEOUT +# -------------------- +# Wait maximum [timeout] for the rc having desired number of replicas ready. +# Arguments: pod_prefix - prefix of the replication controller +# Arguments: timeout - how many seconds to wait seconds +function ct_os_wait_rc_ready() { + local pod_prefix="${1}" ; shift + local timeout="${1}" ; shift + SECONDS=0 + echo -n "Waiting for ${pod_prefix} pod becoming ready ..." + while ! test "$((oc get --no-headers statefulsets; oc get --no-headers rc) 2>/dev/null \ + | grep "^${pod_prefix}" | awk '$2==$3 {print "ready"}')" == "ready" ; do + echo -n "." + [ ${SECONDS} -gt ${timeout} ] && echo " FAIL" && return 1 + sleep 3 + done + echo " DONE" +} + +# ct_os_deploy_pure_image IMAGE [ENV_PARAMS, ...] +# -------------------- +# Runs [image] in the openshift and optionally specifies env_params +# as environment variables to the image. +# Arguments: image - prefix or whole ID of the pod to run the cmd in +# Arguments: env_params - environment variables parameters for the images. +function ct_os_deploy_pure_image() { + local image="${1}" ; shift + # ignore error exit code, because oc new-app returns error when image exists + oc new-app ${image} "$@" || : + # let openshift cluster to sync to avoid some race condition errors + sleep 3 +} + +# ct_os_deploy_s2i_image IMAGE APP [ENV_PARAMS, ... ] +# -------------------- +# Runs [image] and [app] in the openshift and optionally specifies env_params +# as environment variables to the image. +# Arguments: image - prefix or whole ID of the pod to run the cmd in +# Arguments: app - url or local path to git repo with the application sources. +# Arguments: env_params - environment variables parameters for the images. +function ct_os_deploy_s2i_image() { + local image="${1}" ; shift + local app="${1}" ; shift + # ignore error exit code, because oc new-app returns error when image exists + oc new-app "${image}~${app}" "$@" || : + + # let openshift cluster to sync to avoid some race condition errors + sleep 3 +} + +# ct_os_deploy_template_image TEMPLATE [ENV_PARAMS, ...] +# -------------------- +# Runs template in the openshift and optionally gives env_params to use +# specific values in the template. +# Arguments: template - prefix or whole ID of the pod to run the cmd in +# Arguments: env_params - environment variables parameters for the template. +# Example usage: ct_os_deploy_template_image mariadb-ephemeral-template.yaml \ +# DATABASE_SERVICE_NAME=mysql-57-centos7 \ +# DATABASE_IMAGE=mysql-57-centos7 \ +# MYSQL_USER=testu \ +# MYSQL_PASSWORD=testp \ +# MYSQL_DATABASE=testdb +function ct_os_deploy_template_image() { + local template="${1}" ; shift + oc process -f "${template}" "$@" | oc create -f - + # let openshift cluster to sync to avoid some race condition errors + sleep 3 +} + +# _ct_os_get_uniq_project_name +# -------------------- +# Returns a uniq name of the OpenShift project. +function _ct_os_get_uniq_project_name() { + local r + while true ; do + r=${RANDOM} + mkdir /var/tmp/sclorg-test-${r} &>/dev/null && echo sclorg-test-${r} && break + done +} + +# ct_os_new_project [PROJECT] +# -------------------- +# Creates a new project in the openshfit using 'os' command. +# Arguments: project - project name, uses a new random name if omitted +# Expects 'os' command that is properly logged in to the OpenShift cluster. +# Not using mktemp, because we cannot use uppercase characters. +function ct_os_new_project() { + if [ "${CT_SKIP_NEW_PROJECT:-false}" == 'true' ] ; then + echo "Creating project skipped." + return + fi + local project_name="${1:-$(_ct_os_get_uniq_project_name)}" ; shift || : + oc new-project ${project_name} + # let openshift cluster to sync to avoid some race condition errors + sleep 3 +} + +# ct_os_delete_project [PROJECT] +# -------------------- +# Deletes the specified project in the openshfit +# Arguments: project - project name, uses the current project if omitted +function ct_os_delete_project() { + if [ "${CT_SKIP_NEW_PROJECT:-false}" == 'true' ] ; then + echo "Deleting project skipped, cleaning objects only." + ct_delete_all_objects + return + fi + local project_name="${1:-$(oc project -q)}" ; shift || : + oc delete project "${project_name}" +} + +# ct_delete_all_objects +# ----------------- +# Deletes all objects within the project. +# Handy when we have one project and want to run more tests. +function ct_delete_all_objects() { + for x in bc builds dc is isimage istag po pv pvc rc routes secrets svc ; do + oc delete $x --all + done + # for some objects it takes longer to be really deleted, so a dummy sleep + # to avoid some races when other test can see not-yet-deleted objects and can fail + sleep 10 +} + +# ct_os_docker_login +# -------------------- +# Logs in into docker daemon +# Uses global REGISRTY_ADDRESS environment variable for arbitrary registry address. +# Does not do anything if REGISTRY_ADDRESS is set. +function ct_os_docker_login() { + [ -n "${REGISTRY_ADDRESS:-}" ] && "REGISTRY_ADDRESS set, not trying to docker login." && return 0 + # docker login fails with "404 page not found" error sometimes, just try it more times + for i in `seq 12` ; do + docker login -u developer -p $(oc whoami -t) ${REGISRTY_ADDRESS:-172.30.1.1:5000} && return 0 || : + sleep 5 + done + return 1 +} + +# ct_os_upload_image IMAGE [IMAGESTREAM] +# -------------------- +# Uploads image from local registry to the OpenShift internal registry. +# Arguments: image - image name to upload +# Arguments: imagestream - name and tag to use for the internal registry. +# In the format of name:tag ($image_name:latest by default) +# Uses global REGISRTY_ADDRESS environment variable for arbitrary registry address. +function ct_os_upload_image() { + local input_name="${1}" ; shift + local image_name=${input_name##*/} + local imagestream=${1:-$image_name:latest} + local output_name="${REGISRTY_ADDRESS:-172.30.1.1:5000}/$(oc project -q)/$imagestream" + + ct_os_docker_login + docker tag ${input_name} ${output_name} + docker push ${output_name} +} + +# ct_os_install_in_centos +# -------------------- +# Installs os cluster in CentOS +function ct_os_install_in_centos() { + yum install -y centos-release-openshift-origin + yum install -y wget git net-tools bind-utils iptables-services bridge-utils\ + bash-completion origin-clients docker origin-clients +} + +# ct_os_cluster_up [DIR, IS_PUBLIC, CLUSTER_VERSION] +# -------------------- +# Runs the local OpenShift cluster using 'oc cluster up' and logs in as developer. +# Arguments: dir - directory to keep configuration data in, random if omitted +# Arguments: is_public - sets either private or public hostname for web-UI, +# use "true" for allow remote access to the web-UI, +# "false" is default +# Arguments: cluster_version - version of the OpenShift cluster to use, empty +# means default version of `oc`; example value: 3.7; +# also can be specified outside by OC_CLUSTER_VERSION +function ct_os_cluster_up() { + ct_os_cluster_running && echo "Cluster already running. Nothing is done." && return 0 + ct_os_logged_in && echo "Already logged in to a cluster. Nothing is done." && return 0 + + mkdir -p /var/tmp/openshift + local dir="${1:-$(mktemp -d /var/tmp/openshift/os-data-XXXXXX)}" ; shift || : + local is_public="${1:-'false'}" ; shift || : + local default_cluster_version=${OC_CLUSTER_VERSION:-} + local cluster_version=${1:-${default_cluster_version}} ; shift || : + if ! grep -qe '--insecure-registry.*172\.30\.0\.0' /etc/sysconfig/docker ; then + sed -i "s|OPTIONS='|OPTIONS='--insecure-registry 172.30.0.0/16 |" /etc/sysconfig/docker + fi + + systemctl stop firewalld || : + setenforce 0 + iptables -F + + systemctl restart docker + local cluster_ip="127.0.0.1" + [ "${is_public}" == "true" ] && cluster_ip=$(ct_get_public_ip) + + if [ -n "${cluster_version}" ] ; then + # if $cluster_version is not set, we simply use oc that is available + ct_os_set_path_oc "${cluster_version}" + fi + + mkdir -p ${dir}/{config,data,pv} + case $(oc version| head -n 1) in + "oc v3.1"?.*) + oc cluster up --base-dir="${dir}/data" --public-hostname="${cluster_ip}" + ;; + "oc v3."*) + oc cluster up --host-data-dir="${dir}/data" --host-config-dir="${dir}/config" \ + --host-pv-dir="${dir}/pv" --use-existing-config --public-hostname="${cluster_ip}" + ;; + *) + echo "ERROR: Unexpected oc version." >&2 + return 1 + ;; + esac + oc version + oc login -u system:admin + oc project default + ct_os_wait_rc_ready docker-registry 180 + ct_os_wait_rc_ready router 30 + oc login -u developer -p developer + # let openshift cluster to sync to avoid some race condition errors + sleep 3 +} + +# ct_os_cluster_down +# -------------------- +# Shuts down the local OpenShift cluster using 'oc cluster down' +function ct_os_cluster_down() { + oc cluster down +} + +# ct_os_cluster_running +# -------------------- +# Returns 0 if oc cluster is running +function ct_os_cluster_running() { + oc cluster status &>/dev/null +} + +# ct_os_logged_in +# --------------- +# Returns 0 if logged in to a cluster (remote or local) +function ct_os_logged_in() { + oc whoami >/dev/null +} + +# ct_os_set_path_oc OC_VERSION +# -------------------- +# This is a trick that helps using correct version of the `oc`: +# The input is version of the openshift in format v3.6.0 etc. +# If the currently available version of oc is not of this version, +# it first takes a look into /usr/local/oc-/bin directory, +# and if not found there it downloads the community release from github. +# In the end the PATH variable is changed, so the other tests can still use just 'oc'. +# Arguments: oc_version - X.Y part of the version of OSE (e.g. 3.9) +function ct_os_set_path_oc() { + local oc_version=$(ct_os_get_latest_ver $1) + local oc_path + + if oc version | grep -q "oc ${oc_version%.*}." ; then + echo "Binary oc found already available in version ${oc_version}: `which oc` Doing noting." + return 0 + fi + + # first check whether we already have oc available in /usr/local + local installed_oc_path="/usr/local/oc-${oc_version%.*}/bin" + + if [ -x "${installed_oc_path}/oc" ] ; then + oc_path="${installed_oc_path}" + echo "Binary oc found in ${installed_oc_path}" >&2 + else + # oc not available in /usr/local, try to download it from github (community release) + oc_path="/tmp/oc-${oc_version}-bin" + ct_os_download_upstream_oc "${oc_version}" "${oc_path}" + fi + if [ -z "${oc_path}/oc" ] ; then + echo "ERROR: oc not found installed, nor downloaded" >&1 + return 1 + fi + export PATH="${oc_path}:${PATH}" + if ! oc version | grep -q "oc ${oc_version%.*}." ; then + echo "ERROR: something went wrong, oc located at ${oc_path}, but oc of version ${oc_version} not found in PATH ($PATH)" >&1 + return 1 + else + echo "PATH set correctly, binary oc found in version ${oc_version}: `which oc`" + fi +} + +# ct_os_get_latest_ver VERSION_PART_X +# -------------------- +# Returns full version (vX.Y.Z) from part of the version (X.Y) +# Arguments: vxy - X.Y part of the version +# Returns vX.Y.Z variant of the version +function ct_os_get_latest_ver(){ + local vxy="v$1" + for vz in {3..0} ; do + curl -sif "https://github.com/openshift/origin/releases/tag/${vxy}.${vz}" >/dev/null && echo "${vxy}.${vz}" && return 0 + done + echo "ERROR: version ${vxy} not found in https://github.com/openshift/origin/tags" >&2 + return 1 +} + +# ct_os_download_upstream_oc OC_VERSION OUTPUT_DIR +# -------------------- +# Downloads a particular version of openshift-origin-client-tools from +# github into specified output directory +# Arguments: oc_version - version of OSE (e.g. v3.7.2) +# Arguments: output_dir - output directory +function ct_os_download_upstream_oc() { + local oc_version=$1 + local output_dir=$2 + + # check whether we already have the binary in place + [ -x "${output_dir}/oc" ] && return 0 + + mkdir -p "${output_dir}" + # using html output instead of https://api.github.com/repos/openshift/origin/releases/tags/${oc_version}, + # because API is limited for number of queries if not authenticated + tarball=$(curl -si "https://github.com/openshift/origin/releases/tag/${oc_version}" | grep -o -e "openshift-origin-client-tools-${oc_version}-[a-f0-9]*-linux-64bit.tar.gz" | head -n 1) + + # download, unpack the binaries and then put them into output directory + echo "Downloading https://github.com/openshift/origin/releases/download/${oc_version}/${tarball} into ${output_dir}/" >&2 + curl -sL https://github.com/openshift/origin/releases/download/${oc_version}/"${tarball}" | tar -C "${output_dir}" -xz + mv -f "${output_dir}"/"${tarball%.tar.gz}"/* "${output_dir}/" + + rmdir "${output_dir}"/"${tarball%.tar.gz}" +} + + +# ct_os_test_s2i_app_func IMAGE APP CONTEXT_DIR CHECK_CMD [OC_ARGS] +# -------------------- +# Runs [image] and [app] in the openshift and optionally specifies env_params +# as environment variables to the image. Then check the container by arbitrary +# function given as argument (such an argument may include string, +# that will be replaced with actual IP). +# Arguments: image - prefix or whole ID of the pod to run the cmd in (compulsory) +# Arguments: app - url or local path to git repo with the application sources (compulsory) +# Arguments: context_dir - sub-directory inside the repository with the application sources (compulsory) +# Arguments: check_command - CMD line that checks whether the container works (compulsory; '' will be replaced with actual IP) +# Arguments: oc_args - all other arguments are used as additional parameters for the `oc new-app` +# command, typically environment variables (optional) +function ct_os_test_s2i_app_func() { + local image_name=${1} + local app=${2} + local context_dir=${3} + local check_command=${4} + local oc_args=${5:-} + local import_image=${6:-} + local image_name_no_namespace=${image_name##*/} + local service_name="${image_name_no_namespace}-testing" + local image_tagged="${image_name_no_namespace}:${VERSION}" + + if [ $# -lt 4 ] || [ -z "${1}" -o -z "${2}" -o -z "${3}" -o -z "${4}" ]; then + echo "ERROR: ct_os_test_s2i_app_func() requires at least 4 arguments that cannot be emtpy." >&2 + return 1 + fi + + ct_os_new_project + # Create a specific imagestream tag for the image so that oc cannot use anything else + if [ "${CT_SKIP_UPLOAD_IMAGE:-false}" == 'true' ] ; then + if [ -n "${import_image}" ] ; then + echo "Importing image ${import_image} as ${image_name}:${VERSION}" + oc import-image ${image_name}:${VERSION} --from ${import_image} --confirm + else + echo "Uploading and importing image skipped." + fi + else + if [ -n "${import_image}" ] ; then + echo "Warning: Import image ${import_image} requested, but uploading image ${image_name} instead." + fi + ct_os_upload_image "${image_name}" "${image_tagged}" + fi + + local app_param="${app}" + if [ -d "${app}" ] ; then + # for local directory, we need to copy the content, otherwise too smart os command + # pulls the git remote repository instead + app_param=$(ct_obtain_input "${app}") + fi + + ct_os_deploy_s2i_image "${image_tagged}" "${app_param}" \ + --context-dir="${context_dir}" \ + --name "${service_name}" \ + ${oc_args} + + if [ -d "${app}" ] ; then + # in order to avoid weird race seen sometimes, let's wait shortly + # before starting the build explicitly + sleep 5 + oc start-build "${service_name}" --from-dir="${app_param}" + fi + + ct_os_wait_pod_ready "${service_name}" 300 + + local ip=$(ct_os_get_service_ip "${service_name}") + local check_command_exp=$(echo "$check_command" | sed -e "s//$ip/g") + + echo " Checking APP using $check_command_exp ..." + local result=0 + eval "$check_command_exp" || result=1 + + if [ $result -eq 0 ] ; then + echo " Check passed." + else + echo " Check failed." + fi + + ct_os_delete_project + return $result +} + +# ct_os_test_s2i_app IMAGE APP CONTEXT_DIR EXPECTED_OUTPUT [PORT, PROTOCOL, RESPONSE_CODE, OC_ARGS, ... ] +# -------------------- +# Runs [image] and [app] in the openshift and optionally specifies env_params +# as environment variables to the image. Then check the http response. +# Arguments: image - prefix or whole ID of the pod to run the cmd in (compulsory) +# Arguments: app - url or local path to git repo with the application sources (compulsory) +# Arguments: context_dir - sub-directory inside the repository with the application sources (compulsory) +# Arguments: expected_output - PCRE regular expression that must match the response body (compulsory) +# Arguments: port - which port to use (optional; default: 8080) +# Arguments: protocol - which protocol to use (optional; default: http) +# Arguments: response_code - what http response code to expect (optional; default: 200) +# Arguments: oc_args - all other arguments are used as additional parameters for the `oc new-app` +# command, typically environment variables (optional) +function ct_os_test_s2i_app() { + local image_name=${1} + local app=${2} + local context_dir=${3} + local expected_output=${4} + local port=${5:-8080} + local protocol=${6:-http} + local response_code=${7:-200} + local oc_args=${8:-} + local import_image=${9:-} + + if [ $# -lt 4 ] || [ -z "${1}" -o -z "${2}" -o -z "${3}" -o -z "${4}" ]; then + echo "ERROR: ct_os_test_s2i_app() requires at least 4 arguments that cannot be emtpy." >&2 + return 1 + fi + + ct_os_test_s2i_app_func "${image_name}" \ + "${app}" \ + "${context_dir}" \ + "ct_os_test_response_internal '${protocol}://:${port}' '${response_code}' '${expected_output}'" \ + "${oc_args}" "${import_image}" +} + +# ct_os_test_template_app_func IMAGE APP IMAGE_IN_TEMPLATE CHECK_CMD [OC_ARGS] +# -------------------- +# Runs [image] and [app] in the openshift and optionally specifies env_params +# as environment variables to the image. Then check the container by arbitrary +# function given as argument (such an argument may include string, +# that will be replaced with actual IP). +# Arguments: image_name - prefix or whole ID of the pod to run the cmd in (compulsory) +# Arguments: template - url or local path to a template to use (compulsory) +# Arguments: name_in_template - image name used in the template +# Arguments: check_command - CMD line that checks whether the container works (compulsory; '' will be replaced with actual IP) +# Arguments: oc_args - all other arguments are used as additional parameters for the `oc new-app` +# command, typically environment variables (optional) +# Arguments: other_images - some templates need other image to be pushed into the OpenShift registry, +# specify them in this parameter as "|", where "" is a full image name +# (including registry if needed) and "" is a tag under which the image should be available +# in the OpenShift registry. +function ct_os_test_template_app_func() { + local image_name=${1} + local template=${2} + local name_in_template=${3} + local check_command=${4} + local oc_args=${5:-} + local other_images=${6:-} + local import_image=${7:-} + + if [ $# -lt 4 ] || [ -z "${1}" -o -z "${2}" -o -z "${3}" -o -z "${4}" ]; then + echo "ERROR: ct_os_test_template_app_func() requires at least 4 arguments that cannot be emtpy." >&2 + return 1 + fi + + local service_name="${name_in_template}-testing" + local image_tagged="${name_in_template}:${VERSION}" + + ct_os_new_project + + # Create a specific imagestream tag for the image so that oc cannot use anything else + if [ "${CT_SKIP_UPLOAD_IMAGE:-false}" == 'true' ] ; then + if [ -n "${import_image}" ] ; then + echo "Importing image ${import_image} as ${image_name}:${VERSION}" + oc import-image ${image_name}:${VERSION} --from ${import_image} --confirm + else + echo "Uploading and importing image skipped." + fi + else + if [ -n "${import_image}" ] ; then + echo "Warning: Import image ${import_image} requested, but uploading image ${image_name} instead." + fi + ct_os_upload_image "${image_name}" "${image_tagged}" + + # upload also other images, that template might need (list of pairs in the format | + local images_tags_a + local i_t + for i_t in ${other_images} ; do + echo "${i_t}" + IFS='|' read -ra image_tag_a <<< "${i_t}" + docker pull "${image_tag_a[0]}" + ct_os_upload_image "${image_tag_a[0]}" "${image_tag_a[1]}" + done + fi + + local local_template=$(ct_obtain_input "${template}") + local namespace=${CT_NAMESPACE:-$(oc project -q)} + oc new-app ${local_template} \ + --name "${name_in_template}" \ + -p NAMESPACE="${namespace}" \ + ${oc_args} + + ct_os_wait_pod_ready "${service_name}" 300 + + local ip=$(ct_os_get_service_ip "${service_name}") + local check_command_exp=$(echo "$check_command" | sed -e "s//$ip/g") + + echo " Checking APP using $check_command_exp ..." + local result=0 + eval "$check_command_exp" || result=1 + + if [ $result -eq 0 ] ; then + echo " Check passed." + else + echo " Check failed." + fi + + ct_os_delete_project + return $result +} + +# params: +# ct_os_test_template_app IMAGE APP IMAGE_IN_TEMPLATE EXPECTED_OUTPUT [PORT, PROTOCOL, RESPONSE_CODE, OC_ARGS, ... ] +# -------------------- +# Runs [image] and [app] in the openshift and optionally specifies env_params +# as environment variables to the image. Then check the http response. +# Arguments: image_name - prefix or whole ID of the pod to run the cmd in (compulsory) +# Arguments: template - url or local path to a template to use (compulsory) +# Arguments: name_in_template - image name used in the template +# Arguments: expected_output - PCRE regular expression that must match the response body (compulsory) +# Arguments: port - which port to use (optional; default: 8080) +# Arguments: protocol - which protocol to use (optional; default: http) +# Arguments: response_code - what http response code to expect (optional; default: 200) +# Arguments: oc_args - all other arguments are used as additional parameters for the `oc new-app` +# command, typically environment variables (optional) +# Arguments: other_images - some templates need other image to be pushed into the OpenShift registry, +# specify them in this parameter as "|", where "" is a full image name +# (including registry if needed) and "" is a tag under which the image should be available +# in the OpenShift registry. +function ct_os_test_template_app() { + local image_name=${1} + local template=${2} + local name_in_template=${3} + local expected_output=${4} + local port=${5:-8080} + local protocol=${6:-http} + local response_code=${7:-200} + local oc_args=${8:-} + local other_images=${9:-} + local import_image=${10:-} + + if [ $# -lt 4 ] || [ -z "${1}" -o -z "${2}" -o -z "${3}" -o -z "${4}" ]; then + echo "ERROR: ct_os_test_template_app() requires at least 4 arguments that cannot be emtpy." >&2 + return 1 + fi + + ct_os_test_template_app_func "${image_name}" \ + "${template}" \ + "${name_in_template}" \ + "ct_os_test_response_internal '${protocol}://:${port}' '${response_code}' '${expected_output}'" \ + "${oc_args}" \ + "${other_images}" \ + "${import_image}" +} + +# ct_os_test_image_update IMAGE_NAME OLD_IMAGE ISTAG CHECK_FUNCTION OC_ARGS +# -------------------- +# Runs an image update test with [image] uploaded to [is] imagestream +# and checks the services using an arbitrary function provided in [check_function]. +# Arguments: image_name - prefix or whole ID of the pod to run the cmd in (compulsory) +# Arguments: old_image - valid name of the image from the registry +# Arguments: istag - imagestream to upload the images into (compulsory) +# Arguments: check_function - command to be run to check functionality of created services (compulsory) +# Arguments: oc_args - arguments to use during oc new-app (compulsory) +ct_os_test_image_update() { + local image_name=$1; shift + local old_image=$1; shift + local istag=$1; shift + local check_function=$1; shift + local service_name=${image_name##*/} + local ip="" check_command_exp="" + + echo "Running image update test for: $image_name" + ct_os_new_project + + # Get current image from repository and create an imagestream + docker pull "$old_image:latest" 2>/dev/null + ct_os_upload_image "$old_image" "$istag" + + # Setup example application with curent image + oc new-app "$@" --name "$service_name" + ct_os_wait_pod_ready "$service_name" 60 + + # Check application output + ip=$(ct_os_get_service_ip "$service_name") + check_command_exp=${check_function///$ip} + ct_assert_cmd_success "$check_command_exp" + + # Tag built image into the imagestream and wait for rebuild + ct_os_upload_image "$image_name" "$istag" + ct_os_wait_pod_ready "${service_name}-2" 60 + + # Check application output + ip=$(ct_os_get_service_ip "$service_name") + check_command_exp=${check_function///$ip} + ct_assert_cmd_success "$check_command_exp" + + ct_os_delete_project +} + +# ct_os_deploy_cmd_image IMAGE_NAME +# -------------------- +# Runs a special command pod, a pod that does nothing, but includes utilities for testing. +# A typical usage is a mysql pod that includes mysql commandline, that we need for testing. +# Running commands inside this command pod is done via ct_os_cmd_image_run function. +# The pod is not run again if already running. +# Arguments: image_name - image to be used as a command pod +function ct_os_deploy_cmd_image() { + local image_name=${1} + oc get pod command-app &>/dev/null && echo "command POD already running" && return 0 + echo "command POD not running yet, will start one called command-app" + oc create -f - <" + local sleep_time=3 + local attempt=1 + local result=1 + local status + local response_code + local response_file=$(mktemp /tmp/ct_test_response_XXXXXX) + local util_image_name='python:3.6' + + ct_os_deploy_cmd_image "${util_image_name}" + + while [ ${attempt} -le ${max_attempts} ]; do + ct_os_cmd_image_run "curl --connect-timeout 10 -s -w '%{http_code}' '${url}'" >${response_file} && status=0 || status=1 + if [ ${status} -eq 0 ]; then + response_code=$(cat ${response_file} | tail -c 3) + if [ "${response_code}" -eq "${expected_code}" ]; then + result=0 + fi + cat ${response_file} | grep -qP -e "${body_regexp}" || result=1; + # Some services return 40x code until they are ready, so let's give them + # some chance and not end with failure right away + # Do not wait if we already have expected outcome though + if [ ${result} -eq 0 -o ${attempt} -gt ${ignore_error_attempts} -o ${attempt} -eq ${max_attempts} ] ; then + break + fi + fi + attempt=$(( ${attempt} + 1 )) + sleep ${sleep_time} + done + rm -f ${response_file} + return ${result} +} + +# ct_os_get_image_from_pod +# ------------------------ +# Print image identifier from an existing pod to stdout +# Argument: pod_prefix - prefix or full name of the pod to get image from +ct_os_get_image_from_pod() { + local pod_prefix=$1 ; shift + local pod_name=$(ct_os_get_pod_name $pod_prefix) + oc get "po/${pod_name}" -o yaml | sed -ne 's/^\s*image:\s*\(.*\)\s*$/\1/ p' | head -1 +} + +# ct_os_check_cmd_internal +# ---------------- +# Runs a specified command, checks exit code and compares the output with expected regexp. +# That all is done inside an image in the cluster, so the function is used +# typically in clusters that are not accessible outside. +# The check is repeated until timeout. +# Argument: util_image_name - name of the image in the cluster that is used for running the cmd +# Argument: service_name - kubernetes' service name to work with (IP address is taken from this one) +# Argument: check_command - command that is run within the util_image_name container +# Argument: expected_content_match - regexp that must be in the output (use .* to ignore check) +# Argument: timeout - number of seconds to wait till the check succeeds +function ct_os_check_cmd_internal() { + local util_image_name=$1 ; shift + local service_name=$1 ; shift + local check_command=$1 ; shift + local expected_content_match=${1:-.*} ; shift + local timeout=${1:-60} ; shift || : + + : " Service ${service_name} check ..." + + local output + local ret + local ip=$(ct_os_get_service_ip "${service_name}") + local check_command_exp=$(echo "$check_command" | sed -e "s//$ip/g") + + ct_os_deploy_cmd_image $(ct_os_get_image_from_pod "${util_image_name}" | head -n 1) + SECONDS=0 + + echo -n "Waiting for ${service_name} service becoming ready ..." + while true ; do + output=$(ct_os_cmd_image_run "$check_command_exp") + ret=$? + echo "${output}" | grep -qe "${expected_content_match}" || ret=1 + if [ ${ret} -eq 0 ] ; then + echo " PASS" + return 0 + fi + echo -n "." + [ ${SECONDS} -gt ${timeout} ] && break + sleep 3 + done + echo " FAIL" + return 1 +} + diff --git a/test/test-lib.sh b/test/test-lib.sh new file mode 100644 index 0000000..e372870 --- /dev/null +++ b/test/test-lib.sh @@ -0,0 +1,507 @@ +# +# Test a container image. +# +# Always use sourced from a specific container testfile +# +# reguires definition of CID_FILE_DIR +# CID_FILE_DIR=$(mktemp --suffix=_test_cidfiles -d) +# reguires definition of TEST_LIST +# TEST_LIST="\ +# ctest_container_creation +# ctest_doc_content" + +# Container CI tests +# abbreviated as "ct" + +# may be redefined in the specific container testfile +EXPECTED_EXIT_CODE=0 + +# ct_cleanup +# -------------------- +# Cleans up containers used during tests. Stops and removes all containers +# referenced by cid_files in CID_FILE_DIR. Dumps logs if a container exited +# unexpectedly. Removes the cid_files and CID_FILE_DIR as well. +# Uses: $CID_FILE_DIR - path to directory containing cid_files +# Uses: $EXPECTED_EXIT_CODE - expected container exit code +function ct_cleanup() { + for cid_file in $CID_FILE_DIR/* ; do + local container=$(cat $cid_file) + + : "Stopping and removing container $container..." + docker stop $container + exit_status=$(docker inspect -f '{{.State.ExitCode}}' $container) + if [ "$exit_status" != "$EXPECTED_EXIT_CODE" ]; then + : "Dumping logs for $container" + docker logs $container + fi + docker rm -v $container + rm $cid_file + done + rmdir $CID_FILE_DIR + : "Done." +} + +# ct_enable_cleanup +# -------------------- +# Enables automatic container cleanup after tests. +function ct_enable_cleanup() { + trap ct_cleanup EXIT SIGINT +} + +# ct_get_cid [name] +# -------------------- +# Prints container id from cid_file based on the name of the file. +# Argument: name - name of cid_file where the container id will be stored +# Uses: $CID_FILE_DIR - path to directory containing cid_files +function ct_get_cid() { + local name="$1" ; shift || return 1 + echo $(cat "$CID_FILE_DIR/$name") +} + +# ct_get_cip [id] +# -------------------- +# Prints container ip address based on the container id. +# Argument: id - container id +function ct_get_cip() { + local id="$1" ; shift + docker inspect --format='{{.NetworkSettings.IPAddress}}' $(ct_get_cid "$id") +} + +# ct_wait_for_cid [cid_file] +# -------------------- +# Holds the execution until the cid_file is created. Usually run after container +# creation. +# Argument: cid_file - name of the cid_file that should be created +function ct_wait_for_cid() { + local cid_file=$1 + local max_attempts=10 + local sleep_time=1 + local attempt=1 + local result=1 + while [ $attempt -le $max_attempts ]; do + [ -f $cid_file ] && [ -s $cid_file ] && return 0 + : "Waiting for container start..." + attempt=$(( $attempt + 1 )) + sleep $sleep_time + done + return 1 +} + +# ct_assert_container_creation_fails [container_args] +# -------------------- +# The invocation of docker run should fail based on invalid container_args +# passed to the function. Returns 0 when container fails to start properly. +# Argument: container_args - all arguments are passed directly to dokcer run +# Uses: $CID_FILE_DIR - path to directory containing cid_files +function ct_assert_container_creation_fails() { + local ret=0 + local max_attempts=10 + local attempt=1 + local cid_file=assert + set +e + local old_container_args="${CONTAINER_ARGS-}" + CONTAINER_ARGS="$@" + ct_create_container $cid_file + if [ $? -eq 0 ]; then + local cid=$(ct_get_cid $cid_file) + + while [ "$(docker inspect -f '{{.State.Running}}' $cid)" == "true" ] ; do + sleep 2 + attempt=$(( $attempt + 1 )) + if [ $attempt -gt $max_attempts ]; then + docker stop $cid + ret=1 + break + fi + done + exit_status=$(docker inspect -f '{{.State.ExitCode}}' $cid) + if [ "$exit_status" == "0" ]; then + ret=1 + fi + docker rm -v $cid + rm $CID_FILE_DIR/$cid_file + fi + [ ! -z $old_container_args ] && CONTAINER_ARGS="$old_container_args" + set -e + return $ret +} + +# ct_create_container [name, command] +# -------------------- +# Creates a container using the IMAGE_NAME and CONTAINER_ARGS variables. Also +# stores the container id to a cid_file located in the CID_FILE_DIR, and waits +# for the creation of the file. +# Argument: name - name of cid_file where the container id will be stored +# Argument: command - optional command to be executed in the container +# Uses: $CID_FILE_DIR - path to directory containing cid_files +# Uses: $CONTAINER_ARGS - optional arguments passed directly to docker run +# Uses: $IMAGE_NAME - name of the image being tested +function ct_create_container() { + local cid_file="$CID_FILE_DIR/$1" ; shift + # create container with a cidfile in a directory for cleanup + docker run --cidfile="$cid_file" -d ${CONTAINER_ARGS:-} $IMAGE_NAME "$@" + ct_wait_for_cid $cid_file || return 1 + : "Created container $(cat $cid_file)" +} + +# ct_scl_usage_old [name, command, expected] +# -------------------- +# Tests three ways of running the SCL, by looking for an expected string +# in the output of the command +# Argument: name - name of cid_file where the container id will be stored +# Argument: command - executed inside the container +# Argument: expected - string that is expected to be in the command output +# Uses: $CID_FILE_DIR - path to directory containing cid_files +# Uses: $IMAGE_NAME - name of the image being tested +function ct_scl_usage_old() { + local name="$1" + local command="$2" + local expected="$3" + local out="" + : " Testing the image SCL enable" + out=$(docker run --rm ${IMAGE_NAME} /bin/bash -c "${command}") + if ! echo "${out}" | grep -q "${expected}"; then + echo "ERROR[/bin/bash -c "${command}"] Expected '${expected}', got '${out}'" >&2 + return 1 + fi + out=$(docker exec $(ct_get_cid $name) /bin/bash -c "${command}" 2>&1) + if ! echo "${out}" | grep -q "${expected}"; then + echo "ERROR[exec /bin/bash -c "${command}"] Expected '${expected}', got '${out}'" >&2 + return 1 + fi + out=$(docker exec $(ct_get_cid $name) /bin/sh -ic "${command}" 2>&1) + if ! echo "${out}" | grep -q "${expected}"; then + echo "ERROR[exec /bin/sh -ic "${command}"] Expected '${expected}', got '${out}'" >&2 + return 1 + fi +} + +# ct_doc_content_old [strings] +# -------------------- +# Looks for occurence of stirngs in the documentation files and checks +# the format of the files. Files examined: help.1 +# Argument: strings - strings expected to appear in the documentation +# Uses: $IMAGE_NAME - name of the image being tested +function ct_doc_content_old() { + local tmpdir=$(mktemp -d) + local f + : " Testing documentation in the container image" + # Extract the help files from the container + for f in help.1 ; do + docker run --rm ${IMAGE_NAME} /bin/bash -c "cat /${f}" >${tmpdir}/$(basename ${f}) + # Check whether the files contain some important information + for term in $@ ; do + if ! cat ${tmpdir}/$(basename ${f}) | grep -F -q -e "${term}" ; then + echo "ERROR: File /${f} does not include '${term}'." >&2 + return 1 + fi + done + # Check whether the files use the correct format + for term in TH PP SH ; do + if ! grep -q "^\.${term}" ${tmpdir}/help.1 ; then + echo "ERROR: /help.1 is probably not in troff or groff format, since '${term}' is missing." >&2 + return 1 + fi + done + done + : " Success!" +} + + +# ct_npm_works +# -------------------- +# Checks existance of the npm tool and runs it. +function ct_npm_works() { + local tmpdir=$(mktemp -d) + : " Testing npm in the container image" + docker run --rm ${IMAGE_NAME} /bin/bash -c "npm --version" >${tmpdir}/version + + if [ $? -ne 0 ] ; then + echo "ERROR: 'npm --version' does not work inside the image ${IMAGE_NAME}." >&2 + return 1 + fi + + docker run --rm ${IMAGE_NAME} /bin/bash -c "npm install jquery && test -f node_modules/jquery/src/jquery.js" + if [ $? -ne 0 ] ; then + echo "ERROR: npm could not install jquery inside the image ${IMAGE_NAME}." >&2 + return 1 + fi + + : " Success!" +} + + +# ct_path_append PATH_VARNAME DIRECTORY +# ------------------------------------- +# Append DIRECTORY to VARIABLE of name PATH_VARNAME, the VARIABLE must consist +# of colon-separated list of directories. +ct_path_append () +{ + if eval "test -n \"\${$1-}\""; then + eval "$1=\$2:\$$1" + else + eval "$1=\$2" + fi +} + + +# ct_path_foreach PATH ACTION [ARGS ...] +# -------------------------------------- +# For each DIR in PATH execute ACTION (path is colon separated list of +# directories). The particular calls to ACTION will look like +# '$ ACTION directory [ARGS ...]' +ct_path_foreach () +{ + local dir dirlist action save_IFS + save_IFS=$IFS + IFS=: + dirlist=$1 + action=$2 + shift 2 + for dir in $dirlist; do "$action" "$dir" "$@" ; done + IFS=$save_IFS +} + + +# ct_run_test_list +# -------------------- +# Execute the tests specified by TEST_LIST +# Uses: $TEST_LIST - list of test names +function ct_run_test_list() { + for test_case in $TEST_LIST; do + : "Running test $test_case" + [ -f test/$test_case ] && source test/$test_case + [ -f ../test/$test_case ] && source ../test/$test_case + $test_case + done; +} + +# ct_gen_self_signed_cert_pem +# --------------------------- +# Generates a self-signed PEM certificate pair into specified directory. +# Argument: output_dir - output directory path +# Argument: base_name - base name of the certificate files +# Resulted files will be those: +# /-cert-selfsigned.pem -- public PEM cert +# /-key.pem -- PEM private key +ct_gen_self_signed_cert_pem() { + local output_dir=$1 ; shift + local base_name=$1 ; shift + mkdir -p ${output_dir} + openssl req -newkey rsa:2048 -nodes -keyout ${output_dir}/${base_name}-key.pem -subj '/C=GB/ST=Berkshire/L=Newbury/O=My Server Company' > ${base_name}-req.pem + openssl req -new -x509 -nodes -key ${output_dir}/${base_name}-key.pem -batch > ${output_dir}/${base_name}-cert-selfsigned.pem +} + +# ct_obtain_input FILE|DIR|URL +# -------------------- +# Either copies a file or a directory to a tmp location for local copies, or +# downloads the file from remote location. +# Resulted file path is printed, so it can be later used by calling function. +# Arguments: input - local file, directory or remote URL +function ct_obtain_input() { + local input=$1 + local extension="${input##*.}" + + # Try to use same extension for the temporary file if possible + [[ "${extension}" =~ ^[a-z0-9]*$ ]] && extension=".${extension}" || extension="" + + local output=$(mktemp "/var/tmp/test-input-XXXXXX$extension") + if [ -f "${input}" ] ; then + cp -f "${input}" "${output}" + elif [ -d "${input}" ] ; then + rm -f "${output}" + cp -r -LH "${input}" "${output}" + elif echo "${input}" | grep -qe '^http\(s\)\?://' ; then + curl "${input}" > "${output}" + else + echo "ERROR: file type not known: ${input}" >&2 + return 1 + fi + echo "${output}" +} + +# ct_test_response +# ---------------- +# Perform GET request to the application container, checks output with +# a reg-exp and HTTP response code. +# Argument: url - request URL path +# Argument: expected_code - expected HTTP response code +# Argument: body_regexp - PCRE regular expression that must match the response body +# Argument: max_attempts - Optional number of attempts (default: 20), three seconds sleep between +# Argument: ignore_error_attempts - Optional number of attempts when we ignore error output (default: 10) +ct_test_response() { + local url="$1" + local expected_code="$2" + local body_regexp="$3" + local max_attempts=${4:-20} + local ignore_error_attempts=${5:-10} + + : " Testing the HTTP(S) response for <${url}>" + local sleep_time=3 + local attempt=1 + local result=1 + local status + local response_code + local response_file=$(mktemp /tmp/ct_test_response_XXXXXX) + while [ ${attempt} -le ${max_attempts} ]; do + curl --connect-timeout 10 -s -w '%{http_code}' "${url}" >${response_file} && status=0 || status=1 + if [ ${status} -eq 0 ]; then + response_code=$(cat ${response_file} | tail -c 3) + if [ "${response_code}" -eq "${expected_code}" ]; then + result=0 + fi + cat ${response_file} | grep -qP -e "${body_regexp}" || result=1; + # Some services return 40x code until they are ready, so let's give them + # some chance and not end with failure right away + # Do not wait if we already have expected outcome though + if [ ${result} -eq 0 -o ${attempt} -gt ${ignore_error_attempts} -o ${attempt} -eq ${max_attempts} ] ; then + break + fi + fi + attempt=$(( ${attempt} + 1 )) + sleep ${sleep_time} + done + rm -f ${response_file} + return ${result} +} + +# ct_registry_from_os OS +# ---------------- +# Transform operating system string [os] into registry url +# Argument: OS - string containing the os version +ct_registry_from_os() { + local registry="" + case $1 in + rhel7) + registry=registry.access.redhat.com + ;; + *) + registry=docker.io + ;; + esac + echo "$registry" +} + +# ct_assert_cmd_success CMD +# ---------------- +# Evaluates [cmd] and fails if it does not succeed. +# Argument: CMD - Command to be run +function ct_assert_cmd_success() { + echo "Checking '$*' for success ..." + if ! eval "$@" &>/dev/null; then + echo " FAIL" + return 1 + fi + echo " PASS" + return 0 +} + +# ct_assert_cmd_failure CMD +# ---------------- +# Evaluates [cmd] and fails if it succeeds. +# Argument: CMD - Command to be run +function ct_assert_cmd_failure() { + echo "Checking '$*' for failure ..." + if eval "$@" &>/dev/null; then + echo " FAIL" + return 1 + fi + echo " PASS" + return 0 +} + + +# ct_random_string [LENGTH=10] +# ---------------------------- +# Generate pseudorandom alphanumeric string of LENGTH bytes, the +# default length is 10. The string is printed on stdout. +ct_random_string() +( + export LC_ALL=C + dd if=/dev/urandom count=1 bs=10k 2>/dev/null \ + | tr -dc 'a-z0-9' \ + | fold -w "${1-10}" \ + | head -n 1 +) + +# ct_s2i_usage IMG_NAME [S2I_ARGS] +# ---------------------------- +# Create a container and run the usage script inside +# Argument: IMG_NAME - name of the image to be used for the container run +# Argument: S2I_ARGS - Additional list of source-to-image arguments, currently unused. +ct_s2i_usage() +{ + local img_name=$1; shift + local s2i_args="$*"; + local usage_command="/usr/libexec/s2i/usage" + docker run --rm "$img_name" bash -c "$usage_command" +} + +# ct_s2i_build_as_df APP_PATH SRC_IMAGE DST_IMAGE [S2I_ARGS] +# ---------------------------- +# Create a new s2i app image from local sources in a similar way as source-to-image would have used. +# Argument: APP_PATH - local path to the app sources to be used in the test +# Argument: SRC_IMAGE - image to be used as a base for the s2i build +# Argument: DST_IMAGE - image name to be used during the tagging of the s2i build result +# Argument: S2I_ARGS - Additional list of source-to-image arguments. +# Only used to check for pull-policy=never and environment variable definitions. +ct_s2i_build_as_df() +{ + local app_path=$1; shift + local src_image=$1; shift + local dst_image=$1; shift + local s2i_args="$*"; + local local_app=upload/src/ + local local_scripts=upload/scripts/ + local user_id= + local df_name= + local tmpdir= + # Use /tmp to not pollute cwd + tmpdir=$(mktemp -d) + df_name=$(mktemp -p "$tmpdir" Dockerfile.XXXX) + pushd "$tmpdir" + # Check if the image is available locally and try to pull it if it is not + docker images "$src_image" &>/dev/null || echo "$s2i_args" | grep -q "pull-policy=never" || docker pull "$src_image" + user_id=$(docker inspect -f "{{.ContainerConfig.User}}" "$src_image") + # Strip file:// from APP_PATH and copy its contents into current context + mkdir -p "$local_app" + cp -r "${app_path/file:\/\//}/." "$local_app" + [ -d "$local_app/.s2i/bin/" ] && mv "$local_app/.s2i/bin" "$local_scripts" + # Create a Dockerfile named df_name and fill it with proper content + #FIXME: Some commands could be combined into a single layer but not sure if worth the trouble for testing purposes + cat <"$df_name" +FROM $src_image +LABEL "io.openshift.s2i.build.image"="$src_image" \\ + "io.openshift.s2i.build.source-location"="$app_path" +USER root +COPY $local_app /tmp/src +EOF + [ -d "$local_scripts" ] && echo "COPY $local_scripts /tmp/scripts" >> "$df_name" && + echo "RUN chown -R $user_id:0 /tmp/scripts" >>"$df_name" + echo "RUN chown -R $user_id:0 /tmp/src" >>"$df_name" + # Check for custom environment variables inside .s2i/ folder + if [ -e "$local_app/.s2i/environment" ]; then + # Remove any comments and add the contents as ENV commands to the Dockerfile + sed '/^\s*#.*$/d' "$local_app/.s2i/environment" | while read -r line; do + echo "ENV $line" >>"$df_name" + done + fi + # Filter out env var definitions from $s2i_args and create Dockerfile ENV commands out of them + echo "$s2i_args" | grep -o -e '\(-e\|--env\)[[:space:]=]\S*=\S*' | sed -e 's/-e /ENV /' -e 's/--env[ =]/ENV /' >>"$df_name" + echo "USER $user_id" >>"$df_name" + # If exists, run the custom assemble script, else default to /usr/libexec/s2i/assemble + if [ -x "$local_scripts/assemble" ]; then + echo "RUN /tmp/scripts/assemble" >>"$df_name" + else + echo "RUN /usr/libexec/s2i/assemble" >>"$df_name" + fi + # If exists, set the custom run script as CMD, else default to /usr/libexec/s2i/run + if [ -x "$local_scripts/run" ]; then + echo "CMD /tmp/scripts/run" >>"$df_name" + else + echo "CMD /usr/libexec/s2i/run" >>"$df_name" + fi + # Run the build and tag the result + docker build -f "$df_name" -t "$dst_image" . + popd +}