Add a new e2e test; fix some bugs/usability problems

This commit is contained in:
Daniel Smith
2014-06-13 16:26:38 -07:00
parent a40529b379
commit 5626703634
5 changed files with 103 additions and 27 deletions

52
hack/e2e-suite/guestbook.sh Executable file
View File

@@ -0,0 +1,52 @@
#!/bin/bash
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Assumes a running Kubernetes test cluster; verifies that the guestbook example
# works. Assumes that we're being called by hack/e2e-test.sh (we use some env
# vars it sets up).
set -e
HAVE_JQ=$(which jq)
if [[ -z ${HAVE_JQ} ]]; then
echo "Please install jq, e.g.: sudo apt-get install jq"
exit 1
fi
source "${KUBE_REPO_ROOT}/cluster/util.sh"
GUESTBOOK="${KUBE_REPO_ROOT}/examples/guestbook"
# Launch the guestbook example
$CLOUDCFG -c "${GUESTBOOK}/redis-master.json" create /pods
$CLOUDCFG -c "${GUESTBOOK}/redis-master-service.json" create /services
$CLOUDCFG -c "${GUESTBOOK}/redis-slave-controller.json" create /replicationControllers
sleep 5
POD_LIST_1=$($CLOUDCFG -json list pods | jq ".items[].id")
echo "Pods running: ${POD_LIST_1}"
$CLOUDCFG stop redisSlaveController
# Needed until issue #103 gets fixed
sleep 25
$CLOUDCFG rm redisSlaveController
$CLOUDCFG delete services/redismaster
$CLOUDCFG delete pods/redis-master-2
POD_LIST_2=$($CLOUDCFG -json list pods | jq ".items[].id")
echo "Pods running after shutdown: ${POD_LIST_2}"
exit 0

View File

@@ -17,6 +17,11 @@
# Starts a Kubernetes cluster, runs the e2e test suite, and shuts it
# down.
# For debugging of this test's components, it's helpful to leave the test
# cluster running.
ALREADY_UP=${1:-0}
LEAVE_UP=${2:-0}
# Exit on error
set -e
@@ -27,26 +32,30 @@ export CLOUDCFG="${KUBE_REPO_ROOT}/cluster/cloudcfg.sh"
source "${KUBE_REPO_ROOT}/cluster/util.sh"
# Build a release
$(dirname $0)/../release/release.sh
if [[ ${ALREADY_UP} -ne 1 ]]; then
# Build a release
$(dirname $0)/../release/release.sh
# Now bring a test cluster up with that release.
$(dirname $0)/../cluster/kube-up.sh
# Now bring a test cluster up with that release.
$(dirname $0)/../cluster/kube-up.sh
fi
# Detect the project into $PROJECT if it isn't set
detect-project
set +e
# Open up port 80 & 8080 so common containers on minions can be reached
gcutil addfirewall \
--norespect_terminal_width \
--project ${PROJECT} \
--target_tags ${MINION_TAG} \
--allowed tcp:80 \
--allowed tcp:8080 \
--network ${NETWORK} \
${MINION_TAG}-http-alt
if [[ ${ALREADY_UP} -ne 1 ]]; then
# Open up port 80 & 8080 so common containers on minions can be reached
gcutil addfirewall \
--norespect_terminal_width \
--project ${PROJECT} \
--target_tags ${MINION_TAG} \
--allowed tcp:80 \
--allowed tcp:8080 \
--network ${NETWORK} \
${MINION_TAG}-http-alt
fi
# Auto shutdown cluster when we exit
function shutdown-test-cluster () {
@@ -58,17 +67,25 @@ function shutdown-test-cluster () {
${MINION_TAG}-http-alt &
$(dirname $0)/../cluster/kube-down.sh > /dev/null &
}
trap shutdown-test-cluster EXIT
if [[ ${LEAVE_UP} -ne 1 ]]; then
trap shutdown-test-cluster EXIT
fi
any_failed=0
for test_file in "$(dirname $0)/e2e-suite/*.sh"; do
$test_file
if [[ -z $? ]]; then
echo "${test_file}: passed!"
for test_file in $(ls $(dirname $0)/e2e-suite/); do
"$(dirname $0)/e2e-suite/${test_file}"
result="$?"
if [[ "${result}" -eq "0" ]]; then
echo "${test_file} returned ${result}; passed!"
else
echo "${test_file}: FAILED!"
echo "${test_file} returned ${result}; FAIL!"
any_failed=1
fi
done
if [[ ${any_failed} -ne 0 ]]; then
echo "At least one test failed."
fi
exit ${any_failed}