Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/nextcloud/server.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJörn Friedrich Dreyer <jfd@butonic.de>2015-09-28 17:38:01 +0300
committerJörn Friedrich Dreyer <jfd@butonic.de>2015-10-06 17:27:25 +0300
commit5646e39248d9ac2bade491dc99c88a10b3dfbc34 (patch)
treea330a10aa0979d00b3449205c95cf484af6d03b2 /tests/objectstore
parentbb4246c9a8dc40d844ad94492cb40ece5bd35ef6 (diff)
test objectstore with ceph docker
use default config for swift primary storage test config allow testsuite to complete fix timeout, script cleanup, enable debug for now use btrfs loopback device, requires privileged container and absolute path throw exception when storage has problems debug by echo ... sleep more, more debug
Diffstat (limited to 'tests/objectstore')
-rwxr-xr-xtests/objectstore/entrypoint.sh271
-rwxr-xr-xtests/objectstore/start-swift-ceph.sh108
-rwxr-xr-xtests/objectstore/stop-swift-ceph.sh38
3 files changed, 417 insertions, 0 deletions
diff --git a/tests/objectstore/entrypoint.sh b/tests/objectstore/entrypoint.sh
new file mode 100755
index 00000000000..b490d760a2f
--- /dev/null
+++ b/tests/objectstore/entrypoint.sh
@@ -0,0 +1,271 @@
+#!/bin/bash
+set -e
+
+: ${CLUSTER:=ceph}
+: ${RGW_NAME:=$(hostname -s)}
+: ${MON_NAME:=$(hostname -s)}
+: ${RGW_CIVETWEB_PORT:=80}
+: ${OSD_SIZE:=100}
+
+: ${KEYSTONE_ADMIN_TOKEN:=admin}
+: ${KEYSTONE_ADMIN_PORT:=35357}
+: ${KEYSTONE_PUBLIC_PORT:=5001}
+
+: ${KEYSTONE_SERVICE:=${CLUSTER}}
+: ${KEYSTONE_ENDPOINT_REGION:=region}
+
+: ${KEYSTONE_ADMIN_USER:=admin}
+: ${KEYSTONE_ADMIN_TENANT:=admin}
+: ${KEYSTONE_ADMIN_PASS:=admin}
+
+ip_address=$(head -n1 /etc/hosts | cut -d" " -f1)
+: ${MON_IP:=${ip_address}}
+subnet=$(ip route | grep "src ${ip_address}" | cut -d" " -f1)
+: ${CEPH_NETWORK:=${subnet}}
+
+#######
+# MON #
+#######
+
+if [ ! -n "$CEPH_NETWORK" ]; then
+ echo "ERROR- CEPH_NETWORK must be defined as the name of the network for the OSDs"
+ exit 1
+fi
+
+if [ ! -n "$MON_IP" ]; then
+ echo "ERROR- MON_IP must be defined as the IP address of the monitor"
+ exit 1
+fi
+
+# bootstrap MON
+if [ ! -e /etc/ceph/ceph.conf ]; then
+ fsid=$(uuidgen)
+ cat <<ENDHERE >/etc/ceph/${CLUSTER}.conf
+[global]
+fsid = $fsid
+mon initial members = ${MON_NAME}
+mon host = ${MON_IP}
+auth cluster required = cephx
+auth service required = cephx
+auth client required = cephx
+osd crush chooseleaf type = 0
+osd journal size = 100
+osd pool default pg num = 8
+osd pool default pgp num = 8
+osd pool default size = 1
+public network = ${CEPH_NETWORK}
+cluster network = ${CEPH_NETWORK}
+debug ms = 1
+
+[mon]
+debug mon = 20
+debug paxos = 20
+debug auth = 20
+
+[osd]
+debug osd = 20
+debug filestore = 20
+debug journal = 20
+debug monc = 20
+
+[mds]
+debug mds = 20
+debug mds balancer = 20
+debug mds log = 20
+debug mds migrator = 20
+
+[client.radosgw.gateway]
+rgw keystone url = http://${MON_IP}:${KEYSTONE_ADMIN_PORT}
+rgw keystone admin token = ${KEYSTONE_ADMIN_TOKEN}
+rgw keystone accepted roles = _member_
+ENDHERE
+
+ # Generate administrator key
+ ceph-authtool /etc/ceph/${CLUSTER}.client.admin.keyring --create-keyring --gen-key -n client.admin --set-uid=0 --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow'
+
+ # Generate the mon. key
+ ceph-authtool /etc/ceph/${CLUSTER}.mon.keyring --create-keyring --gen-key -n mon. --cap mon 'allow *'
+
+ # Generate initial monitor map
+ monmaptool --create --add ${MON_NAME} ${MON_IP} --fsid ${fsid} /etc/ceph/monmap
+fi
+
+# If we don't have a monitor keyring, this is a new monitor
+if [ ! -e /var/lib/ceph/mon/${CLUSTER}-${MON_NAME}/keyring ]; then
+
+ if [ ! -e /etc/ceph/${CLUSTER}.client.admin.keyring ]; then
+ echo "ERROR- /etc/ceph/${CLUSTER}.client.admin.keyring must exist; get it from your existing mon"
+ exit 2
+ fi
+
+ if [ ! -e /etc/ceph/${CLUSTER}.mon.keyring ]; then
+ echo "ERROR- /etc/ceph/${CLUSTER}.mon.keyring must exist. You can extract it from your current monitor by running 'ceph auth get mon. -o /tmp/${CLUSTER}.mon.keyring'"
+ exit 3
+ fi
+
+ if [ ! -e /etc/ceph/monmap ]; then
+ echo "ERROR- /etc/ceph/monmap must exist. You can extract it from your current monitor by running 'ceph mon getmap -o /tmp/monmap'"
+ exit 4
+ fi
+
+ # Import the client.admin keyring and the monitor keyring into a new, temporary one
+ ceph-authtool /tmp/${CLUSTER}.mon.keyring --create-keyring --import-keyring /etc/ceph/${CLUSTER}.client.admin.keyring
+ ceph-authtool /tmp/${CLUSTER}.mon.keyring --import-keyring /etc/ceph/${CLUSTER}.mon.keyring
+
+ # Make the monitor directory
+ mkdir -p /var/lib/ceph/mon/${CLUSTER}-${MON_NAME}
+
+ # Prepare the monitor daemon's directory with the map and keyring
+ ceph-mon --mkfs -i ${MON_NAME} --monmap /etc/ceph/monmap --keyring /tmp/${CLUSTER}.mon.keyring
+
+ # Clean up the temporary key
+ rm /tmp/${CLUSTER}.mon.keyring
+fi
+
+# start MON
+ceph-mon -i ${MON_NAME} --public-addr ${MON_IP}:6789
+
+# change replica size
+ceph osd pool set rbd size 1
+
+
+#######
+# OSD #
+#######
+
+if [ ! -e /var/lib/ceph/osd/${CLUSTER}-0/keyring ]; then
+ # bootstrap OSD
+ mkdir -p /var/lib/ceph/osd/${CLUSTER}-0
+ # HACK create btrfs loopback device
+ echo "creating osd storage image"
+ dd if=/dev/zero of=/tmp/osddata bs=1M count=${OSD_SIZE}
+ mkfs.btrfs /tmp/osddata
+ echo "mounting via loopback"
+ mount -o loop /tmp/osddata /var/lib/ceph/osd/${CLUSTER}-0
+ echo "now mounted:"
+ mount
+ # end HACK
+ echo "creating osd"
+ ceph osd create
+ echo "creating osd filesystem"
+ ceph-osd -i 0 --mkfs
+ echo "creating osd keyring"
+ ceph auth get-or-create osd.0 osd 'allow *' mon 'allow profile osd' -o /var/lib/ceph/osd/${CLUSTER}-0/keyring
+ echo "configuring osd crush"
+ ceph osd crush add 0 1 root=default host=$(hostname -s)
+ echo "adding osd keyring"
+ ceph-osd -i 0 -k /var/lib/ceph/osd/${CLUSTER}-0/keyring
+fi
+
+# start OSD
+echo "starting osd"
+ceph-osd --cluster=${CLUSTER} -i 0
+
+#sleep 10
+
+#######
+# MDS #
+#######
+
+if [ ! -e /var/lib/ceph/mds/${CLUSTER}-0/keyring ]; then
+ # create ceph filesystem
+ echo "creating osd pool"
+ ceph osd pool create cephfs_data 8
+ echo "creating osd pool metadata"
+ ceph osd pool create cephfs_metadata 8
+ echo "creating cephfs"
+ ceph fs new cephfs cephfs_metadata cephfs_data
+
+ # bootstrap MDS
+ mkdir -p /var/lib/ceph/mds/${CLUSTER}-0
+ echo "creating mds auth"
+ ceph auth get-or-create mds.0 mds 'allow' osd 'allow *' mon 'allow profile mds' > /var/lib/ceph/mds/${CLUSTER}-0/keyring
+fi
+
+# start MDS
+echo "starting mds"
+ceph-mds --cluster=${CLUSTER} -i 0
+
+#sleep 10
+
+
+#######
+# RGW #
+#######
+
+if [ ! -e /var/lib/ceph/radosgw/${RGW_NAME}/keyring ]; then
+ # bootstrap RGW
+ mkdir -p /var/lib/ceph/radosgw/${RGW_NAME}
+ echo "creating rgw auth"
+ ceph auth get-or-create client.radosgw.gateway osd 'allow rwx' mon 'allow rw' -o /var/lib/ceph/radosgw/${RGW_NAME}/keyring
+fi
+
+# start RGW
+echo "starting rgw"
+radosgw -c /etc/ceph/ceph.conf -n client.radosgw.gateway -k /var/lib/ceph/radosgw/${RGW_NAME}/keyring --rgw-socket-path="" --rgw-frontends="civetweb port=${RGW_CIVETWEB_PORT}"
+
+
+#######
+# API #
+#######
+
+# start ceph-rest-api
+echo "starting rest api"
+ceph-rest-api -n client.admin &
+
+############
+# Keystone #
+############
+
+if [ ! -e /etc/keystone/${CLUSTER}.conf ]; then
+ cat <<ENDHERE > /etc/keystone/${CLUSTER}.conf
+[DEFAULT]
+admin_token=${KEYSTONE_ADMIN_TOKEN}
+admin_port=${KEYSTONE_ADMIN_PORT}
+public_port=${KEYSTONE_PUBLIC_PORT}
+
+[database]
+connection = sqlite:////var/lib/keystone/keystone.db
+ENDHERE
+
+ # start Keystone
+ echo "starting keystone"
+ keystone-all --config-file /etc/keystone/${CLUSTER}.conf &
+
+ # wait until up
+ while ! nc ${MON_IP} ${KEYSTONE_ADMIN_PORT} </dev/null; do
+ sleep 1
+ done
+
+ export OS_SERVICE_TOKEN=${KEYSTONE_ADMIN_TOKEN}
+ export OS_SERVICE_ENDPOINT=http://${MON_IP}:${KEYSTONE_ADMIN_PORT}/v2.0
+
+ echo "creating keystone service ${KEYSTONE_SERVICE}"
+ keystone service-create --name ${KEYSTONE_SERVICE} --type object-store
+ echo "creating keystone endpoint ${KEYSTONE_SERVICE}"
+ keystone endpoint-create --service ${KEYSTONE_SERVICE} \
+ --region ${KEYSTONE_ENDPOINT_REGION} \
+ --publicurl http://${MON_IP}:${RGW_CIVETWEB_PORT}/swift/v1 \
+ --internalurl http://${MON_IP}:${RGW_CIVETWEB_PORT}/swift/v1 \
+ --adminurl http://${MON_IP}:${RGW_CIVETWEB_PORT}/swift/v1
+
+ echo "creating keystone user ${KEYSTONE_ADMIN_USER}"
+ keystone user-create --name=${KEYSTONE_ADMIN_USER} --pass=${KEYSTONE_ADMIN_PASS} --email=dev@null.com
+ echo "creating keystone tenant ${KEYSTONE_ADMIN_TENANT}"
+ keystone tenant-create --name=${KEYSTONE_ADMIN_TENANT} --description=admin
+ echo "adding keystone role _member_"
+ keystone user-role-add --user=${KEYSTONE_ADMIN_USER} --tenant=${KEYSTONE_ADMIN_TENANT} --role=_member_
+
+ echo "creating keystone role admin"
+ keystone role-create --name=admin
+ echo "adding keystone role admin"
+ keystone user-role-add --user=${KEYSTONE_ADMIN_USER} --tenant=${KEYSTONE_ADMIN_TENANT} --role=admin
+fi
+
+
+#########
+# WATCH #
+#########
+
+echo "watching ceph"
+exec ceph -w \ No newline at end of file
diff --git a/tests/objectstore/start-swift-ceph.sh b/tests/objectstore/start-swift-ceph.sh
new file mode 100755
index 00000000000..91a813f8bb3
--- /dev/null
+++ b/tests/objectstore/start-swift-ceph.sh
@@ -0,0 +1,108 @@
+#!/bin/bash
+#
+# ownCloud
+#
+# This script start a docker container to test the files_external tests
+# against. It will also change the files_external config to use the docker
+# container as testing environment. This is reverted in the stop step.W
+#
+# Set environment variable DEBUG to print config file
+#
+# @author Morris Jobke
+# @author Robin McCorkell
+# @copyright 2015 ownCloud
+
+if ! command -v docker >/dev/null 2>&1; then
+ echo "No docker executable found - skipped docker setup"
+ exit 0;
+fi
+
+echo "Docker executable found - setup docker"
+
+
+docker_image=xenopathic/ceph-keystone
+
+echo "Fetch recent ${docker_image} docker image"
+docker pull ${docker_image}
+
+# retrieve current folder to place the config in the parent folder
+thisFolder="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+
+port=5034
+
+user=test
+pass=testing
+tenant=testenant
+region=testregion
+service=testceph
+
+container=`docker run -d \
+ -e KEYSTONE_PUBLIC_PORT=${port} \
+ -e KEYSTONE_ADMIN_USER=${user} \
+ -e KEYSTONE_ADMIN_PASS=${pass} \
+ -e KEYSTONE_ADMIN_TENANT=${tenant} \
+ -e KEYSTONE_ENDPOINT_REGION=${region} \
+ -e KEYSTONE_SERVICE=${service} \
+ -e OSD_SIZE=300 \
+ -v ${thisFolder}/entrypoint.sh:/entrypoint.sh \
+ --privileged \
+ --entrypoint /entrypoint.sh ${docker_image}`
+
+host=`docker inspect $container | grep IPAddress | cut -d '"' -f 4`
+
+
+echo "${docker_image} container: $container"
+
+# put container IDs into a file to drop them after the test run (keep in mind that multiple tests run in parallel on the same host)
+echo $container >> $thisFolder/dockerContainerCeph.$EXECUTOR_NUMBER.swift
+
+echo -n "Waiting for ceph initialization"
+starttime=$(date +%s)
+# support for GNU netcat and BSD netcat
+while ! (nc -c -w 1 ${host} 80 </dev/null >&/dev/null \
+ || nc -w 1 ${host} 80 </dev/null >&/dev/null); do
+ sleep 1
+ echo -n '.'
+ if (( $(date +%s) > starttime + 160 )); then
+ echo
+ echo "[ERROR] Waited 120 seconds, no response" >&2
+ exit 1
+ fi
+done
+echo
+sleep 20 # the keystone server also needs some time to fully initialize
+
+cat > $thisFolder/swift.config.php <<DELIM
+<?php
+\$CONFIG = array (
+'objectstore' => array(
+ 'class' => 'OC\\Files\\ObjectStore\\Swift',
+ 'arguments' => array(
+ 'username' => '$user',
+ 'password' => '$pass',
+ 'container' => 'owncloud-autotest$EXECUTOR_NUMBER',
+ 'autocreate' => true,
+ 'region' => '$region',
+ 'url' => 'http://$host:$port/v2.0',
+ 'tenantName' => '$tenant',
+ 'serviceName' => '$service',
+ ),
+),
+);
+
+DELIM
+
+if [ -n "$DEBUG" ]; then
+ echo "############## DEBUG info ###############"
+ echo "### Docker info"
+ docker info
+ echo "### Docker images"
+ docker images
+ echo "### current mountpoints"
+ mount
+ echo "### contents of $thisFolder/swift.config.php"
+ cat $thisFolder/swift.config.php
+ echo "### contents of $thisFolder/dockerContainerCeph.$EXECUTOR_NUMBER.swift"
+ cat $thisFolder/dockerContainerCeph.$EXECUTOR_NUMBER.swift
+ echo "############## DEBUG info end ###########"
+fi
diff --git a/tests/objectstore/stop-swift-ceph.sh b/tests/objectstore/stop-swift-ceph.sh
new file mode 100755
index 00000000000..fcf5fdfdcd7
--- /dev/null
+++ b/tests/objectstore/stop-swift-ceph.sh
@@ -0,0 +1,38 @@
+#!/bin/bash
+#
+# ownCloud
+#
+# This script stops the docker container the files_external tests were run
+# against. It will also revert the config changes done in start step.
+#
+# @author Morris Jobke
+# @author Robin McCorkell
+# @copyright 2015 ownCloud
+
+if ! command -v docker >/dev/null 2>&1; then
+ echo "No docker executable found - skipped docker stop"
+ exit 0;
+fi
+
+echo "Docker executable found - stop and remove docker containers"
+
+# retrieve current folder to remove the config from the parent folder
+thisFolder=`echo $0 | replace "stop-swift-ceph.sh" ""`
+
+if [ -z "$thisFolder" ]; then
+ thisFolder="."
+fi;
+
+# stopping and removing docker containers
+for container in `cat $thisFolder/dockerContainerCeph.$EXECUTOR_NUMBER.swift`; do
+ if [ -n "$DEBUG" ]; then
+ docker logs $container
+ fi
+ echo "Stopping and removing docker container $container"
+ # kills running container and removes it
+ docker rm -f $container
+done;
+
+# cleanup
+rm $thisFolder/swift.config.php
+rm $thisFolder/dockerContainerCeph.$EXECUTOR_NUMBER.swift \ No newline at end of file