main
James E. Blair 13 years ago
commit 1ec915e6b5

@ -1,214 +1,46 @@
#!/usr/bin/env bash
# **exercise.sh** - using the cloud can be fun
# we will use the ``nova`` cli tool provided by the ``python-novaclient``
# package
#
# This script exits on an error so that errors don't compound and you see
# only the first error that occured.
set -o errexit
# Print the commands being run so that we can see the command that triggers
# an error. It is also useful for following allowing as the install occurs.
set -o xtrace
# Settings
# ========
# Use openrc + stackrc + localrc for settings
source ./openrc
# Get a token for clients that don't support service catalog
# ==========================================================
# manually create a token by querying keystone (sending JSON data). Keystone
# returns a token and catalog of endpoints. We use python to parse the token
# and save it.
TOKEN=`curl -s -d "{\"auth\":{\"passwordCredentials\": {\"username\": \"$NOVA_USERNAME\", \"password\": \"$NOVA_API_KEY\"}}}" -H "Content-type: application/json" http://$HOST_IP:5000/v2.0/tokens | python -c "import sys; import json; tok = json.loads(sys.stdin.read()); print tok['access']['token']['id'];"`
# Launching a server
# ==================
# List servers for tenant:
nova list
# Images
# ------
# Nova has a **deprecated** way of listing images.
nova image-list
# But we recommend using glance directly
glance -A $TOKEN index
# Let's grab the id of the first AMI image to launch
IMAGE=`glance -A $TOKEN index | egrep ami | cut -d" " -f1`
# Security Groups
# ---------------
SECGROUP=test_secgroup
# List of secgroups:
nova secgroup-list
# Create a secgroup
nova secgroup-create $SECGROUP "test_secgroup description"
# determine flavor
# ----------------
# List of flavors:
nova flavor-list
# and grab the first flavor in the list to launch
FLAVOR=`nova flavor-list | head -n 4 | tail -n 1 | cut -d"|" -f2`
NAME="myserver"
nova boot --flavor $FLAVOR --image $IMAGE $NAME --security_groups=$SECGROUP
# Testing
# =======
# First check if it spins up (becomes active and responds to ping on
# internal ip). If you run this script from a nova node, you should
# bypass security groups and have direct access to the server.
# Waiting for boot
# ----------------
# Max time to wait while vm goes from build to active state
ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-10}
# Max time till the vm is bootable
BOOT_TIMEOUT=${BOOT_TIMEOUT:-15}
# Max time to wait for proper association and dis-association.
ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-10}
# check that the status is active within ACTIVE_TIMEOUT seconds
if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $NAME | grep status | grep -q ACTIVE; do sleep 1; done"; then
echo "server didn't become active!"
exit 1
fi
# get the IP of the server
IP=`nova show $NAME | grep "private network" | cut -d"|" -f3`
# for single node deployments, we can ping private ips
MULTI_HOST=${MULTI_HOST:-0}
if [ "$MULTI_HOST" = "0" ]; then
# sometimes the first ping fails (10 seconds isn't enough time for the VM's
# network to respond?), so let's ping for a default of 15 seconds with a
# timeout of a second for each ping.
if ! timeout $BOOT_TIMEOUT sh -c "while ! ping -c1 -w1 $IP; do sleep 1; done"; then
echo "Couldn't ping server"
exit 1
# Run everything in the exercises/ directory that isn't explicitly disabled
# comma separated list of script basenames to skip
# to refrain from exercising euca.sh use SKIP_EXERCISES=euca
SKIP_EXERCISES=${SKIP_EXERCISES:-""}
# Locate the scripts we should run
EXERCISE_DIR=$(dirname "$0")/exercises
basenames=$(for b in `ls $EXERCISE_DIR/*.sh`; do basename $b .sh; done)
# Track the state of each script
passes=""
failures=""
skips=""
# Loop over each possible script (by basename)
for script in $basenames; do
if [[ "$SKIP_EXERCISES" =~ $script ]] ; then
skips="$skips $script"
else
echo =========================
echo Running $script
echo =========================
$EXERCISE_DIR/$script.sh
if [[ $? -ne 0 ]] ; then
failures="$failures $script"
else
passes="$passes $script"
fi
fi
else
# On a multi-host system, without vm net access, do a sleep to wait for the boot
sleep $BOOT_TIMEOUT
fi
# Security Groups & Floating IPs
# ------------------------------
# allow icmp traffic (ping)
nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0
# List rules for a secgroup
nova secgroup-list-rules $SECGROUP
# allocate a floating ip
nova floating-ip-create
# store floating address
FLOATING_IP=`nova floating-ip-list | grep None | head -1 | cut -d '|' -f2 | sed 's/ //g'`
# add floating ip to our server
nova add-floating-ip $NAME $FLOATING_IP
# test we can ping our floating ip within ASSOCIATE_TIMEOUT seconds
if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! ping -c1 -w1 $FLOATING_IP; do sleep 1; done"; then
echo "Couldn't ping server with floating ip"
exit 1
fi
# pause the VM and verify we can't ping it anymore
nova pause $NAME
sleep 2
if ( ping -c1 -w1 $IP); then
echo "Pause failure - ping shouldn't work"
exit 1
fi
if ( ping -c1 -w1 $FLOATING_IP); then
echo "Pause failure - ping floating ips shouldn't work"
exit 1
fi
# unpause the VM and verify we can ping it again
nova unpause $NAME
sleep 2
ping -c1 -w1 $IP
# dis-allow icmp traffic (ping)
nova secgroup-delete-rule $SECGROUP icmp -1 -1 0.0.0.0/0
# FIXME (anthony): make xs support security groups
if [ "$VIRT_DRIVER" != "xenserver" ]; then
# test we can aren't able to ping our floating ip within ASSOCIATE_TIMEOUT seconds
if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ping -c1 -w1 $FLOATING_IP; do sleep 1; done"; then
print "Security group failure - ping should not be allowed!"
echo "Couldn't ping server with floating ip"
exit 1
fi
fi
# de-allocate the floating ip
nova floating-ip-delete $FLOATING_IP
# shutdown the server
nova delete $NAME
# Delete a secgroup
nova secgroup-delete $SECGROUP
# FIXME: validate shutdown within 5 seconds
# (nova show $NAME returns 1 or status != ACTIVE)?
# Testing Euca2ools
# ==================
# make sure that we can describe instances
euca-describe-instances
if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
# Testing Swift
# =============
# Check if we have to swift via keystone
swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD stat
# We start by creating a test container
swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD post testcontainer
# add some files into it.
swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD upload testcontainer /etc/issue
# list them
swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD list testcontainer
# And we may want to delete them now that we have tested that
# everything works.
swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD delete testcontainer
fi
done
# output status of exercise run
echo =========================
echo =========================
for script in $skips; do
echo SKIP $script
done
for script in $passes; do
echo PASS $script
done
for script in $failures; do
echo FAILED $script
done

@ -0,0 +1,36 @@
#!/usr/bin/env bash
# we will use the ``euca2ools`` cli tool that wraps the python boto
# library to test ec2 compatibility
#
# This script exits on an error so that errors don't compound and you see
# only the first error that occured.
set -o errexit
# Print the commands being run so that we can see the command that triggers
# an error. It is also useful for following allowing as the install occurs.
set -o xtrace
# Settings
# ========
# Use openrc + stackrc + localrc for settings
pushd $(cd $(dirname "$0")/.. && pwd)
source ./openrc
popd
# find a machine image to boot
IMAGE=`euca-describe-images | grep machine | cut -f2`
# launch it
INSTANCE=`euca-run-instances $IMAGE | grep INSTANCE | cut -f2`
# assure it has booted within a reasonable time
if ! timeout $RUNNING_TIMEOUT sh -c "while euca-describe-instances $INSTANCE | grep -q running; do sleep 1; done"; then
echo "server didn't become active within $RUNNING_TIMEOUT seconds"
exit 1
fi
euca-terminate-instances $INSTANCE

@ -0,0 +1,190 @@
#!/usr/bin/env bash
# **exercise.sh** - using the cloud can be fun
# we will use the ``nova`` cli tool provided by the ``python-novaclient``
# package
#
# This script exits on an error so that errors don't compound and you see
# only the first error that occured.
set -o errexit
# Print the commands being run so that we can see the command that triggers
# an error. It is also useful for following allowing as the install occurs.
set -o xtrace
# Settings
# ========
# Use openrc + stackrc + localrc for settings
pushd $(cd $(dirname "$0")/.. && pwd)
source ./openrc
popd
# Get a token for clients that don't support service catalog
# ==========================================================
# manually create a token by querying keystone (sending JSON data). Keystone
# returns a token and catalog of endpoints. We use python to parse the token
# and save it.
TOKEN=`curl -s -d "{\"auth\":{\"passwordCredentials\": {\"username\": \"$NOVA_USERNAME\", \"password\": \"$NOVA_API_KEY\"}}}" -H "Content-type: application/json" http://$HOST_IP:5000/v2.0/tokens | python -c "import sys; import json; tok = json.loads(sys.stdin.read()); print tok['access']['token']['id'];"`
# Launching a server
# ==================
# List servers for tenant:
nova list
# Images
# ------
# Nova has a **deprecated** way of listing images.
nova image-list
# But we recommend using glance directly
glance -A $TOKEN index
# Let's grab the id of the first AMI image to launch
IMAGE=`glance -A $TOKEN index | egrep ami | cut -d" " -f1`
# Security Groups
# ---------------
SECGROUP=test_secgroup
# List of secgroups:
nova secgroup-list
# Create a secgroup
nova secgroup-create $SECGROUP "test_secgroup description"
# determine flavor
# ----------------
# List of flavors:
nova flavor-list
# and grab the first flavor in the list to launch
FLAVOR=`nova flavor-list | head -n 4 | tail -n 1 | cut -d"|" -f2`
NAME="myserver"
nova boot --flavor $FLAVOR --image $IMAGE $NAME --security_groups=$SECGROUP
# Testing
# =======
# First check if it spins up (becomes active and responds to ping on
# internal ip). If you run this script from a nova node, you should
# bypass security groups and have direct access to the server.
# Waiting for boot
# ----------------
# Max time to wait while vm goes from build to active state
ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-10}
# Max time till the vm is bootable
BOOT_TIMEOUT=${BOOT_TIMEOUT:-15}
# Max time to wait for proper association and dis-association.
ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-10}
# check that the status is active within ACTIVE_TIMEOUT seconds
if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $NAME | grep status | grep -q ACTIVE; do sleep 1; done"; then
echo "server didn't become active!"
exit 1
fi
# get the IP of the server
IP=`nova show $NAME | grep "private network" | cut -d"|" -f3`
# for single node deployments, we can ping private ips
MULTI_HOST=${MULTI_HOST:-0}
if [ "$MULTI_HOST" = "0" ]; then
# sometimes the first ping fails (10 seconds isn't enough time for the VM's
# network to respond?), so let's ping for a default of 15 seconds with a
# timeout of a second for each ping.
if ! timeout $BOOT_TIMEOUT sh -c "while ! ping -c1 -w1 $IP; do sleep 1; done"; then
echo "Couldn't ping server"
exit 1
fi
else
# On a multi-host system, without vm net access, do a sleep to wait for the boot
sleep $BOOT_TIMEOUT
fi
# Security Groups & Floating IPs
# ------------------------------
# allow icmp traffic (ping)
nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0
# List rules for a secgroup
nova secgroup-list-rules $SECGROUP
# allocate a floating ip
nova floating-ip-create
# store floating address
FLOATING_IP=`nova floating-ip-list | grep None | head -1 | cut -d '|' -f2 | sed 's/ //g'`
# add floating ip to our server
nova add-floating-ip $NAME $FLOATING_IP
# test we can ping our floating ip within ASSOCIATE_TIMEOUT seconds
if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! ping -c1 -w1 $FLOATING_IP; do sleep 1; done"; then
echo "Couldn't ping server with floating ip"
exit 1
fi
# pause the VM and verify we can't ping it anymore
nova pause $NAME
sleep 2
if ( ping -c1 -w1 $IP); then
echo "Pause failure - ping shouldn't work"
exit 1
fi
if ( ping -c1 -w1 $FLOATING_IP); then
echo "Pause failure - ping floating ips shouldn't work"
exit 1
fi
# unpause the VM and verify we can ping it again
nova unpause $NAME
sleep 2
ping -c1 -w1 $IP
# dis-allow icmp traffic (ping)
nova secgroup-delete-rule $SECGROUP icmp -1 -1 0.0.0.0/0
# FIXME (anthony): make xs support security groups
if [ "$VIRT_DRIVER" != "xenserver" ]; then
# test we can aren't able to ping our floating ip within ASSOCIATE_TIMEOUT seconds
if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ping -c1 -w1 $FLOATING_IP; do sleep 1; done"; then
print "Security group failure - ping should not be allowed!"
echo "Couldn't ping server with floating ip"
exit 1
fi
fi
# de-allocate the floating ip
nova floating-ip-delete $FLOATING_IP
# shutdown the server
nova delete $NAME
# Delete a secgroup
nova secgroup-delete $SECGROUP
# FIXME: validate shutdown within 5 seconds
# (nova show $NAME returns 1 or status != ACTIVE)?

@ -0,0 +1,40 @@
#!/usr/bin/env bash
# Test swift via the command line tools that ship with it.
# This script exits on an error so that errors don't compound and you see
# only the first error that occured.
set -o errexit
# Print the commands being run so that we can see the command that triggers
# an error. It is also useful for following allowing as the install occurs.
set -o xtrace
# Settings
# ========
# Use openrc + stackrc + localrc for settings
pushd $(cd $(dirname "$0")/.. && pwd)
source ./openrc
popd
# Testing Swift
# =============
# Check if we have to swift via keystone
swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD stat
# We start by creating a test container
swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD post testcontainer
# add some files into it.
swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD upload testcontainer /etc/issue
# list them
swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD list testcontainer
# And we may want to delete them now that we have tested that
# everything works.
swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD delete testcontainer

@ -1,4 +1,5 @@
dnsmasq-base
dnsmasq-utils # for dhcp_release
kpartx
parted
arping # used for send_arp_for_ha option in nova-network

@ -1,18 +0,0 @@
# a collection of packages that speed up installation as they are dependencies
# of packages we can't install during bootstraping (rabbitmq-server,
# mysql-server, libvirt-bin)
#
# NOTE: only add packages to this file that aren't needed directly
mysql-common
mysql-client-5.1
erlang-base
erlang-ssl
erlang-nox
erlang-inets
erlang-mnesia
libhtml-template-perl
gettext-base
libavahi-client3
libxml2-utils
libpciaccess0
libparted0debian1

@ -24,7 +24,7 @@ registry_port = 9191
# Log to this file. Make sure you do not set the same log
# file for both the API and registry servers!
log_file = %DEST%/glance/api.log
#log_file = %DEST%/glance/api.log
# Send logs to syslog (/dev/log) instead of to file specified by `log_file`
use_syslog = %SYSLOG%

@ -13,7 +13,7 @@ bind_port = 9191
# Log to this file. Make sure you do not set the same log
# file for both the API and registry servers!
log_file = %DEST%/glance/registry.log
#log_file = %DEST%/glance/registry.log
# Where to store images
filesystem_store_datadir = %DEST%/glance/images

@ -0,0 +1,127 @@
#######
# EC2 #
#######
[composite:ec2]
use = egg:Paste#urlmap
/: ec2versions
/services/Cloud: ec2cloud
/services/Admin: ec2admin
/latest: ec2metadata
/2007-01-19: ec2metadata
/2007-03-01: ec2metadata
/2007-08-29: ec2metadata
/2007-10-10: ec2metadata
/2007-12-15: ec2metadata
/2008-02-01: ec2metadata
/2008-09-01: ec2metadata
/2009-04-04: ec2metadata
/1.0: ec2metadata
[pipeline:ec2cloud]
pipeline = logrequest totoken authtoken keystonecontext cloudrequest authorizer ec2executor
[pipeline:ec2admin]
pipeline = logrequest totoken authtoken keystonecontext adminrequest authorizer ec2executor
[pipeline:ec2metadata]
pipeline = logrequest ec2md
[pipeline:ec2versions]
pipeline = logrequest ec2ver
[filter:logrequest]
paste.filter_factory = nova.api.ec2:RequestLogging.factory
[filter:ec2lockout]
paste.filter_factory = nova.api.ec2:Lockout.factory
[filter:totoken]
paste.filter_factory = keystone.middleware.ec2_token:EC2Token.factory
[filter:ec2noauth]
paste.filter_factory = nova.api.ec2:NoAuth.factory
[filter:authenticate]
paste.filter_factory = nova.api.ec2:Authenticate.factory
[filter:cloudrequest]
controller = nova.api.ec2.cloud.CloudController
paste.filter_factory = nova.api.ec2:Requestify.factory
[filter:adminrequest]
controller = nova.api.ec2.admin.AdminController
paste.filter_factory = nova.api.ec2:Requestify.factory
[filter:authorizer]
paste.filter_factory = nova.api.ec2:Authorizer.factory
[app:ec2executor]
paste.app_factory = nova.api.ec2:Executor.factory
[app:ec2ver]
paste.app_factory = nova.api.ec2:Versions.factory
[app:ec2md]
paste.app_factory = nova.api.ec2.metadatarequesthandler:MetadataRequestHandler.factory
#############
# Openstack #
#############
[composite:osapi]
use = egg:Paste#urlmap
/: osversions
/v1.0: openstackapi10
/v1.1: openstackapi11
[pipeline:openstackapi10]
pipeline = faultwrap authtoken keystonecontext ratelimit osapiapp10
[pipeline:openstackapi11]
pipeline = faultwrap authtoken keystonecontext ratelimit extensions osapiapp11
[filter:faultwrap]
paste.filter_factory = nova.api.openstack:FaultWrapper.factory
[filter:auth]
paste.filter_factory = nova.api.openstack.auth:AuthMiddleware.factory
[filter:noauth]
paste.filter_factory = nova.api.openstack.auth:NoAuthMiddleware.factory
[filter:ratelimit]
paste.filter_factory = nova.api.openstack.limits:RateLimitingMiddleware.factory
[filter:extensions]
paste.filter_factory = nova.api.openstack.extensions:ExtensionMiddleware.factory
[app:osapiapp10]
paste.app_factory = nova.api.openstack:APIRouterV10.factory
[app:osapiapp11]
paste.app_factory = nova.api.openstack:APIRouterV11.factory
[pipeline:osversions]
pipeline = faultwrap osversionapp
[app:osversionapp]
paste.app_factory = nova.api.openstack.versions:Versions.factory
##########
# Shared #
##########
[filter:keystonecontext]
paste.filter_factory = keystone.middleware.nova_keystone_context:NovaKeystoneContext.factory
[filter:authtoken]
paste.filter_factory = keystone.middleware.auth_token:filter_factory
service_protocol = http
service_host = 127.0.0.1
service_port = 5000
auth_host = 127.0.0.1
auth_port = 35357
auth_protocol = http
auth_uri = http://127.0.0.1:5000/
admin_token = %SERVICE_TOKEN%

@ -49,3 +49,14 @@ export EC2_SECRET_KEY=${ADMIN_PASSWORD:-secrete}
# set log level to DEBUG (helps debug issues)
# export NOVACLIENT_DEBUG=1
# Max time till the vm is bootable
export BOOT_TIMEOUT=${BOOT_TIMEOUT:-15}
# Max time to wait while vm goes from build to active state
export ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-10}
# Max time from run instance command until it is running
export RUNNING_TIMEOUT=${RUNNING_TIMEOUT:-$(($BOOT_TIMEOUT + $ACTIVE_TIMEOUT))}
# Max time to wait for proper IP association and dis-association.
export ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-10}

@ -103,8 +103,7 @@ if [[ $EUID -eq 0 ]]; then
# since this script runs as a normal user, we need to give that user
# ability to run sudo
apt_get update
apt_get install sudo
dpkg -l sudo || apt_get update && apt_get install sudo
if ! getent passwd stack >/dev/null; then
echo "Creating a user called stack"
@ -121,7 +120,7 @@ if [[ $EUID -eq 0 ]]; then
echo "Copying files to stack user"
STACK_DIR="$DEST/${PWD##*/}"
cp -r -f "$PWD" "$STACK_DIR"
chown -R $USER "$STACK_DIR"
chown -R stack "$STACK_DIR"
if [[ "$SHELL_AFTER_RUN" != "no" ]]; then
exec su -c "set -e; cd $STACK_DIR; bash stack.sh; bash" stack
else
@ -233,7 +232,7 @@ VLAN_INTERFACE=${VLAN_INTERFACE:-$PUBLIC_INTERFACE}
# Multi-host is a mode where each compute node runs its own network node. This
# allows network operations and routing for a VM to occur on the server that is
# running the VM - removing a SPOF and bandwidth bottleneck.
MULTI_HOST=${MULTI_HOST:-0}
MULTI_HOST=${MULTI_HOST:-False}
# If you are using FlatDHCP on multiple hosts, set the ``FLAT_INTERFACE``
# variable but make sure that the interface doesn't already have an
@ -326,7 +325,7 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
# can never change.
read_password SWIFT_HASH "ENTER A RANDOM SWIFT HASH."
fi
# Keystone
# --------
@ -591,13 +590,12 @@ fi
# ----
if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then
# We are going to use the sample http middleware configuration from the
# keystone project to launch nova. This paste config adds the configuration
# required for nova to validate keystone tokens - except we need to switch
# the config to use our service token instead (instead of the invalid token
# 999888777666).
cp $KEYSTONE_DIR/examples/paste/nova-api-paste.ini $NOVA_DIR/bin
sed -e "s,999888777666,$SERVICE_TOKEN,g" -i $NOVA_DIR/bin/nova-api-paste.ini
# We are going to use a sample http middleware configuration based on the
# one from the keystone project to launch nova. This paste config adds
# the configuration required for nova to validate keystone tokens. We add
# our own service token to the configuration.
cp $FILES/nova-api-paste.ini $NOVA_DIR/bin
sed -e "s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g" -i $NOVA_DIR/bin/nova-api-paste.ini
fi
if [[ "$ENABLED_SERVICES" =~ "n-cpu" ]]; then
@ -679,13 +677,13 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
USER_GROUP=$(id -g)
sudo mkdir -p ${SWIFT_DATA_LOCATION}/drives
sudo chown -R $USER:${USER_GROUP} ${SWIFT_DATA_LOCATION}/drives
# We then create a loopback disk and format it to XFS.
if [[ ! -e ${SWIFT_DATA_LOCATION}/drives/images/swift.img ]];then
mkdir -p ${SWIFT_DATA_LOCATION}/drives/images
sudo touch ${SWIFT_DATA_LOCATION}/drives/images/swift.img
sudo chown $USER: ${SWIFT_DATA_LOCATION}/drives/images/swift.img
dd if=/dev/zero of=${SWIFT_DATA_LOCATION}/drives/images/swift.img \
bs=1024 count=0 seek=${SWIFT_LOOPBACK_DISK_SIZE}
mkfs.xfs -f -i size=1024 ${SWIFT_DATA_LOCATION}/drives/images/swift.img
@ -702,9 +700,9 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
# We then create link to that mounted location so swift would know
# where to go.
for x in {1..4}; do sudo ln -sf ${SWIFT_DATA_LOCATION}/drives/sdb1/$x ${SWIFT_DATA_LOCATION}/$x; done
# We now have to emulate a few different servers into one we
# create all the directories needed for swift
# create all the directories needed for swift
tmpd=""
for d in ${SWIFT_DATA_LOCATION}/drives/sdb1/{1..4} \
${SWIFT_CONFIG_LOCATION}/{object,container,account}-server \
@ -720,7 +718,7 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
# swift-init has a bug using /etc/swift until bug #885595 is fixed
# we have to create a link
sudo ln -s ${SWIFT_CONFIG_LOCATION} /etc/swift
# Swift use rsync to syncronize between all the different
# partitions (which make more sense when you have a multi-node
# setup) we configure it with our version of rsync.
@ -756,7 +754,7 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
local bind_port=$2
local log_facility=$3
local node_number
for node_number in {1..4};do
node_path=${SWIFT_DATA_LOCATION}/${node_number}
sed -e "s,%SWIFT_CONFIG_LOCATION%,${SWIFT_CONFIG_LOCATION},;s,%USER%,$USER,;s,%NODE_PATH%,${node_path},;s,%BIND_PORT%,${bind_port},;s,%LOG_FACILITY%,${log_facility}," \
@ -783,14 +781,14 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
# We then can start rsync.
sudo /etc/init.d/rsync restart || :
# Create our ring for the object/container/account.
/usr/local/bin/swift-remakerings
# And now we launch swift-startmain to get our cluster running
# ready to be tested.
/usr/local/bin/swift-startmain || :
unset s swift_hash swift_auth_server tmpd
fi
@ -858,15 +856,16 @@ add_nova_flag "--ec2_dmz_host=$EC2_DMZ_HOST"
add_nova_flag "--rabbit_host=$RABBIT_HOST"
add_nova_flag "--rabbit_password=$RABBIT_PASSWORD"
add_nova_flag "--glance_api_servers=$GLANCE_HOSTPORT"
add_nova_flag "--force_dhcp_release"
if [ -n "$INSTANCES_PATH" ]; then
add_nova_flag "--instances_path=$INSTANCES_PATH"
fi
if [ -n "$MULTI_HOST" ]; then
add_nova_flag "--multi_host=$MULTI_HOST"
add_nova_flag "--send_arp_for_ha=1"
if [ "$MULTI_HOST" != "False" ]; then
add_nova_flag "--multi_host"
add_nova_flag "--send_arp_for_ha"
fi
if [ "$SYSLOG" != "False" ]; then
add_nova_flag "--use_syslog=1"
add_nova_flag "--use_syslog"
fi
# XenServer
@ -942,6 +941,10 @@ function screen_it {
NL=`echo -ne '\015'`
if [[ "$ENABLED_SERVICES" =~ "$1" ]]; then
screen -S stack -X screen -t $1
# sleep to allow bash to be ready to be send the command - we are
# creating a new window in screen and then sends characters, so if
# bash isn't running by the time we send the command, nothing happens
sleep 1
screen -S stack -p $1 -X stuff "$2$NL"
fi
}

@ -0,0 +1,248 @@
#!/usr/bin/env bash
# Make sure that we have the proper version of ubuntu (only works on oneiric)
if ! egrep -q "oneiric" /etc/lsb-release; then
echo "This script only works with ubuntu oneiric."
exit 1
fi
# Keep track of the current directory
TOOLS_DIR=$(cd $(dirname "$0") && pwd)
TOP_DIR=`cd $TOOLS_DIR/..; pwd`
cd $TOP_DIR
# Source params
source ./stackrc
# Ubuntu distro to install
DIST_NAME=${DIST_NAME:-oneiric}
# Configure how large the VM should be
GUEST_SIZE=${GUEST_SIZE:-10G}
# exit on error to stop unexpected errors
set -o errexit
set -o xtrace
# Abort if localrc is not set
if [ ! -e $TOP_DIR/localrc ]; then
echo "You must have a localrc with ALL necessary passwords defined before proceeding."
echo "See stack.sh for required passwords."
exit 1
fi
# Install deps if needed
DEPS="kvm libvirt-bin kpartx cloud-utils"
dpkg -l $DEPS || apt-get install -y --force-yes $DEPS
# Where to store files and instances
WORK_DIR=${WORK_DIR:-/opt/kvmstack}
# Where to store images
image_dir=$WORK_DIR/images/$DIST_NAME
mkdir -p $image_dir
# Original version of built image
uec_url=http://uec-images.ubuntu.com/$DIST_NAME/current/$DIST_NAME-server-cloudimg-amd64.tar.gz
tarball=$image_dir/$(basename $uec_url)
# download the base uec image if we haven't already
if [ ! -f $tarball ]; then
curl $uec_url -o $tarball
(cd $image_dir && tar -Sxvzf $tarball)
resize-part-image $image_dir/*.img $GUEST_SIZE $image_dir/disk
cp $image_dir/*-vmlinuz-virtual $image_dir/kernel
fi
# Configure the root password of the vm to be the same as ``ADMIN_PASSWORD``
ROOT_PASSWORD=${ADMIN_PASSWORD:-password}
# Name of our instance, used by libvirt
GUEST_NAME=${GUEST_NAME:-devstack}
# Mop up after previous runs
virsh destroy $GUEST_NAME || true
# Where this vm is stored
vm_dir=$WORK_DIR/instances/$GUEST_NAME
# Create vm dir and remove old disk
mkdir -p $vm_dir
rm -f $vm_dir/disk
# Create a copy of the base image
qemu-img create -f qcow2 -b $image_dir/disk $vm_dir/disk
# Back to devstack
cd $TOP_DIR
GUEST_NETWORK=${GUEST_NETWORK:-1}
GUEST_RECREATE_NET=${GUEST_RECREATE_NET:-yes}
GUEST_IP=${GUEST_IP:-192.168.$GUEST_NETWORK.50}
GUEST_CIDR=${GUEST_CIDR:-$GUEST_IP/24}
GUEST_NETMASK=${GUEST_NETMASK:-255.255.255.0}
GUEST_GATEWAY=${GUEST_GATEWAY:-192.168.$GUEST_NETWORK.1}
GUEST_MAC=${GUEST_MAC:-"02:16:3e:07:69:`printf '%02X' $GUEST_NETWORK`"}
GUEST_RAM=${GUEST_RAM:-1524288}
GUEST_CORES=${GUEST_CORES:-1}
# libvirt.xml configuration
NET_XML=$vm_dir/net.xml
cat > $NET_XML <<EOF
<network>
<name>devstack-$GUEST_NETWORK</name>
<bridge name="stackbr%d" />
<forward/>
<ip address="$GUEST_GATEWAY" netmask="$GUEST_NETMASK">
<dhcp>
<range start='192.168.$GUEST_NETWORK.2' end='192.168.$GUEST_NETWORK.127' />
</dhcp>
</ip>
</network>
EOF
if [[ "$GUEST_RECREATE_NET" == "yes" ]]; then
virsh net-destroy devstack-$GUEST_NETWORK || true
# destroying the network isn't enough to delete the leases
rm -f /var/lib/libvirt/dnsmasq/devstack-$GUEST_NETWORK.leases
virsh net-create $vm_dir/net.xml
fi
# libvirt.xml configuration
LIBVIRT_XML=$vm_dir/libvirt.xml
cat > $LIBVIRT_XML <<EOF
<domain type='kvm'>
<name>$GUEST_NAME</name>
<memory>$GUEST_RAM</memory>
<os>
<type>hvm</type>
<kernel>$image_dir/kernel</kernel>
<cmdline>root=/dev/vda ro console=ttyS0 init=/usr/lib/cloud-init/uncloud-init ds=nocloud-net;s=http://192.168.$GUEST_NETWORK.1:4567/ ubuntu-pass=ubuntu</cmdline>
</os>
<features>
<acpi/>
</features>
<clock offset='utc'/>
<vcpu>$GUEST_CORES</vcpu>
<devices>
<disk type='file'>
<driver type='qcow2'/>
<source file='$vm_dir/disk'/>
<target dev='vda' bus='virtio'/>
</disk>
<interface type='network'>
<source network='devstack-$GUEST_NETWORK'/>
</interface>
<!-- The order is significant here. File must be defined first -->
<serial type="file">
<source path='$vm_dir/console.log'/>
<target port='1'/>
</serial>
<console type='pty' tty='/dev/pts/2'>
<source path='/dev/pts/2'/>
<target port='0'/>
</console>
<serial type='pty'>
<source path='/dev/pts/2'/>
<target port='0'/>
</serial>
<graphics type='vnc' port='-1' autoport='yes' keymap='en-us' listen='0.0.0.0'/>
</devices>
</domain>
EOF
rm -rf $vm_dir/uec
cp -r $TOOLS_DIR/uec $vm_dir/uec
# set metadata
cat > $vm_dir/uec/meta-data<<EOF
hostname: $GUEST_NAME
instance-id: i-hop
instance-type: m1.ignore
local-hostname: $GUEST_NAME.local
EOF
# set metadata
cat > $vm_dir/uec/user-data<<EOF
#!/bin/bash
# hostname needs to resolve for rabbit
sed -i "s/127.0.0.1/127.0.0.1 \`hostname\`/" /etc/hosts
apt-get update
apt-get install git sudo -y
git clone https://github.com/cloudbuilders/devstack.git
cd devstack
git remote set-url origin `cd $TOP_DIR; git remote show origin | grep Fetch | awk '{print $3}'`
git fetch
git checkout `git rev-parse HEAD`
cat > localrc <<LOCAL_EOF
ROOTSLEEP=0
`cat $TOP_DIR/localrc`
LOCAL_EOF
./stack.sh
EOF
# (re)start a metadata service
(
pid=`lsof -iTCP@192.168.$GUEST_NETWORK.1:4567 -n | awk '{print $2}' | tail -1`
[ -z "$pid" ] || kill -9 $pid
)
cd $vm_dir/uec
python meta.py 192.168.$GUEST_NETWORK.1:4567 &
# Create the instance
virsh create $vm_dir/libvirt.xml
# Tail the console log till we are done
WAIT_TILL_LAUNCH=${WAIT_TILL_LAUNCH:-1}
if [ "$WAIT_TILL_LAUNCH" = "1" ]; then
set +o xtrace
# Done creating the container, let's tail the log
echo
echo "============================================================="
echo " -- YAY! --"
echo "============================================================="
echo
echo "We're done launching the vm, about to start tailing the"
echo "stack.sh log. It will take a second or two to start."
echo
echo "Just CTRL-C at any time to stop tailing."
while [ ! -e "$vm_dir/console.log" ]; do
sleep 1
done
tail -F $vm_dir/console.log &
TAIL_PID=$!
function kill_tail() {
kill $TAIL_PID
exit 1
}
# Let Ctrl-c kill tail and exit
trap kill_tail SIGINT
echo "Waiting stack.sh to finish..."
while ! egrep -q '^stack.sh (completed|failed)' $vm_dir/console.log ; do
sleep 1
done
set -o xtrace
kill $TAIL_PID
if ! grep -q "^stack.sh completed in" $vm_dir/console.log; then
exit 1
fi
echo ""
echo "Finished - Zip-a-dee Doo-dah!"
fi

@ -0,0 +1,74 @@
#!/usr/bin/env bash
# Echo commands
set -o xtrace
# Exit on error to stop unexpected errors
set -o errexit
# Keep track of the current directory
TOOLS_DIR=$(cd $(dirname "$0") && pwd)
TOP_DIR=`cd $TOOLS_DIR/..; pwd`
# Change dir to top of devstack
cd $TOP_DIR
# Echo usage
usage() {
echo "Add stack user and keys"
echo ""
echo "Usage: $0 [full path to raw uec base image]"
}
# Make sure this is a raw image
if ! qemu-img info $1 | grep -q "file format: raw"; then
usage
exit 1
fi
# Mount the image
DEST=/opt/stack
STAGING_DIR=/tmp/`echo $1 | sed "s/\//_/g"`.stage.user
mkdir -p $STAGING_DIR
umount $STAGING_DIR || true
sleep 1
mount -t ext4 -o loop $1 $STAGING_DIR
mkdir -p $STAGING_DIR/$DEST
# Create a stack user that is a member of the libvirtd group so that stack
# is able to interact with libvirt.
chroot $STAGING_DIR groupadd libvirtd || true
chroot $STAGING_DIR useradd stack -s /bin/bash -d $DEST -G libvirtd || true
# Add a simple password - pass
echo stack:pass | chroot $STAGING_DIR chpasswd
# Configure sudo
grep -q "^#includedir.*/etc/sudoers.d" $STAGING_DIR/etc/sudoers ||
echo "#includedir /etc/sudoers.d" | sudo tee -a $STAGING_DIR/etc/sudoers
cp $TOP_DIR/files/sudo/* $STAGING_DIR/etc/sudoers.d/
sed -e "s,%USER%,$USER,g" -i $STAGING_DIR/etc/sudoers.d/*
# and has sudo ability (in the future this should be limited to only what
# stack requires)
echo "stack ALL=(ALL) NOPASSWD: ALL" >> $STAGING_DIR/etc/sudoers
# Gracefully cp only if source file/dir exists
function cp_it {
if [ -e $1 ] || [ -d $1 ]; then
cp -pRL $1 $2
fi
}
# Copy over your ssh keys and env if desired
cp_it ~/.ssh $STAGING_DIR/$DEST/.ssh
cp_it ~/.ssh/id_rsa.pub $STAGING_DIR/$DEST/.ssh/authorized_keys
cp_it ~/.gitconfig $STAGING_DIR/$DEST/.gitconfig
cp_it ~/.vimrc $STAGING_DIR/$DEST/.vimrc
cp_it ~/.bashrc $STAGING_DIR/$DEST/.bashrc
# Give stack ownership over $DEST so it may do the work needed
chroot $STAGING_DIR chown -R stack $DEST
# Unmount
umount $STAGING_DIR

@ -0,0 +1,29 @@
import sys
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from SimpleHTTPServer import SimpleHTTPRequestHandler
def main(host, port, HandlerClass = SimpleHTTPRequestHandler,
ServerClass = HTTPServer, protocol="HTTP/1.0"):
"""simple http server that listens on a give address:port"""
server_address = (host, port)
HandlerClass.protocol_version = protocol
httpd = ServerClass(server_address, HandlerClass)
sa = httpd.socket.getsockname()
print "Serving HTTP on", sa[0], "port", sa[1], "..."
httpd.serve_forever()
if __name__ == '__main__':
if sys.argv[1:]:
address = sys.argv[1]
else:
address = '0.0.0.0'
if ':' in address:
host, port = address.split(':')
else:
host = address
port = 8080
main(host, int(port))

@ -0,0 +1,53 @@
#!/usr/bin/env bash
# Echo commands
set -o xtrace
# Exit on error to stop unexpected errors
set -o errexit
# Keep track of the current directory
TOOLS_DIR=$(cd $(dirname "$0") && pwd)
TOP_DIR=`cd $TOOLS_DIR/..; pwd`
# Change dir to top of devstack
cd $TOP_DIR
# Echo usage
usage() {
echo "Cache OpenStack dependencies on a uec image to speed up performance."
echo ""
echo "Usage: $0 [full path to raw uec base image]"
}
# Make sure this is a raw image
if ! qemu-img info $1 | grep -q "file format: raw"; then
usage
exit 1
fi
# Make sure we are in the correct dir
if [ ! -d files/apts ]; then
echo "Please run this script from devstack/tools/"
exit 1
fi
# Mount the image
STAGING_DIR=/tmp/`echo $1 | sed "s/\//_/g"`.stage
mkdir -p $STAGING_DIR
umount $STAGING_DIR || true
sleep 1
mount -t ext4 -o loop $1 $STAGING_DIR
# Make sure that base requirements are installed
cp /etc/resolv.conf $STAGING_DIR/etc/resolv.conf
# Perform caching on the base image to speed up subsequent runs
chroot $STAGING_DIR apt-get update
chroot $STAGING_DIR apt-get install -y --download-only `cat files/apts/* | grep NOPRIME | cut -d\# -f1`
chroot $STAGING_DIR apt-get install -y --force-yes `cat files/apts/* | grep -v NOPRIME | cut -d\# -f1` || true
mkdir -p $STAGING_DIR/var/cache/pip
PIP_DOWNLOAD_CACHE=/var/cache/pip chroot $STAGING_DIR pip install `cat files/pips/*` || true
# Unmount
umount $STAGING_DIR
Loading…
Cancel
Save