Merge branch 'master' into stable-diablo
This commit is contained in:
commit
e46bec6e3f
15 changed files with 475 additions and 239 deletions
244
exercise.sh
244
exercise.sh
|
@ -1,214 +1,46 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# **exercise.sh** - using the cloud can be fun
|
||||
# Run everything in the exercises/ directory that isn't explicitly disabled
|
||||
|
||||
# we will use the ``nova`` cli tool provided by the ``python-novaclient``
|
||||
# package
|
||||
#
|
||||
# comma separated list of script basenames to skip
|
||||
# to refrain from exercising euca.sh use SKIP_EXERCISES=euca
|
||||
SKIP_EXERCISES=${SKIP_EXERCISES:-""}
|
||||
|
||||
# Locate the scripts we should run
|
||||
EXERCISE_DIR=$(dirname "$0")/exercises
|
||||
basenames=$(for b in `ls $EXERCISE_DIR/*.sh`; do basename $b .sh; done)
|
||||
|
||||
# This script exits on an error so that errors don't compound and you see
|
||||
# only the first error that occured.
|
||||
set -o errexit
|
||||
# Track the state of each script
|
||||
passes=""
|
||||
failures=""
|
||||
skips=""
|
||||
|
||||
# Print the commands being run so that we can see the command that triggers
|
||||
# an error. It is also useful for following allowing as the install occurs.
|
||||
set -o xtrace
|
||||
|
||||
|
||||
# Settings
|
||||
# ========
|
||||
|
||||
# Use openrc + stackrc + localrc for settings
|
||||
source ./openrc
|
||||
|
||||
# Get a token for clients that don't support service catalog
|
||||
# ==========================================================
|
||||
|
||||
# manually create a token by querying keystone (sending JSON data). Keystone
|
||||
# returns a token and catalog of endpoints. We use python to parse the token
|
||||
# and save it.
|
||||
|
||||
TOKEN=`curl -s -d "{\"auth\":{\"passwordCredentials\": {\"username\": \"$NOVA_USERNAME\", \"password\": \"$NOVA_API_KEY\"}}}" -H "Content-type: application/json" http://$HOST_IP:5000/v2.0/tokens | python -c "import sys; import json; tok = json.loads(sys.stdin.read()); print tok['access']['token']['id'];"`
|
||||
|
||||
# Launching a server
|
||||
# ==================
|
||||
|
||||
# List servers for tenant:
|
||||
nova list
|
||||
|
||||
# Images
|
||||
# ------
|
||||
|
||||
# Nova has a **deprecated** way of listing images.
|
||||
nova image-list
|
||||
|
||||
# But we recommend using glance directly
|
||||
glance -A $TOKEN index
|
||||
|
||||
# Let's grab the id of the first AMI image to launch
|
||||
IMAGE=`glance -A $TOKEN index | egrep ami | cut -d" " -f1`
|
||||
|
||||
# Security Groups
|
||||
# ---------------
|
||||
SECGROUP=test_secgroup
|
||||
|
||||
# List of secgroups:
|
||||
nova secgroup-list
|
||||
|
||||
# Create a secgroup
|
||||
nova secgroup-create $SECGROUP "test_secgroup description"
|
||||
|
||||
# determine flavor
|
||||
# ----------------
|
||||
|
||||
# List of flavors:
|
||||
nova flavor-list
|
||||
|
||||
# and grab the first flavor in the list to launch
|
||||
FLAVOR=`nova flavor-list | head -n 4 | tail -n 1 | cut -d"|" -f2`
|
||||
|
||||
NAME="myserver"
|
||||
|
||||
nova boot --flavor $FLAVOR --image $IMAGE $NAME --security_groups=$SECGROUP
|
||||
|
||||
# Testing
|
||||
# =======
|
||||
|
||||
# First check if it spins up (becomes active and responds to ping on
|
||||
# internal ip). If you run this script from a nova node, you should
|
||||
# bypass security groups and have direct access to the server.
|
||||
|
||||
# Waiting for boot
|
||||
# ----------------
|
||||
|
||||
# Max time to wait while vm goes from build to active state
|
||||
ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-10}
|
||||
|
||||
# Max time till the vm is bootable
|
||||
BOOT_TIMEOUT=${BOOT_TIMEOUT:-15}
|
||||
|
||||
# Max time to wait for proper association and dis-association.
|
||||
ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-10}
|
||||
|
||||
# check that the status is active within ACTIVE_TIMEOUT seconds
|
||||
if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $NAME | grep status | grep -q ACTIVE; do sleep 1; done"; then
|
||||
echo "server didn't become active!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# get the IP of the server
|
||||
IP=`nova show $NAME | grep "private network" | cut -d"|" -f3`
|
||||
|
||||
# for single node deployments, we can ping private ips
|
||||
MULTI_HOST=${MULTI_HOST:-0}
|
||||
if [ "$MULTI_HOST" = "0" ]; then
|
||||
# sometimes the first ping fails (10 seconds isn't enough time for the VM's
|
||||
# network to respond?), so let's ping for a default of 15 seconds with a
|
||||
# timeout of a second for each ping.
|
||||
if ! timeout $BOOT_TIMEOUT sh -c "while ! ping -c1 -w1 $IP; do sleep 1; done"; then
|
||||
echo "Couldn't ping server"
|
||||
exit 1
|
||||
# Loop over each possible script (by basename)
|
||||
for script in $basenames; do
|
||||
if [[ "$SKIP_EXERCISES" =~ $script ]] ; then
|
||||
skips="$skips $script"
|
||||
else
|
||||
echo =========================
|
||||
echo Running $script
|
||||
echo =========================
|
||||
$EXERCISE_DIR/$script.sh
|
||||
if [[ $? -ne 0 ]] ; then
|
||||
failures="$failures $script"
|
||||
else
|
||||
passes="$passes $script"
|
||||
fi
|
||||
fi
|
||||
else
|
||||
# On a multi-host system, without vm net access, do a sleep to wait for the boot
|
||||
sleep $BOOT_TIMEOUT
|
||||
fi
|
||||
done
|
||||
|
||||
# Security Groups & Floating IPs
|
||||
# ------------------------------
|
||||
|
||||
# allow icmp traffic (ping)
|
||||
nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0
|
||||
|
||||
# List rules for a secgroup
|
||||
nova secgroup-list-rules $SECGROUP
|
||||
|
||||
# allocate a floating ip
|
||||
nova floating-ip-create
|
||||
|
||||
# store floating address
|
||||
FLOATING_IP=`nova floating-ip-list | grep None | head -1 | cut -d '|' -f2 | sed 's/ //g'`
|
||||
|
||||
# add floating ip to our server
|
||||
nova add-floating-ip $NAME $FLOATING_IP
|
||||
|
||||
# test we can ping our floating ip within ASSOCIATE_TIMEOUT seconds
|
||||
if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! ping -c1 -w1 $FLOATING_IP; do sleep 1; done"; then
|
||||
echo "Couldn't ping server with floating ip"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# pause the VM and verify we can't ping it anymore
|
||||
nova pause $NAME
|
||||
|
||||
sleep 2
|
||||
|
||||
if ( ping -c1 -w1 $IP); then
|
||||
echo "Pause failure - ping shouldn't work"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ( ping -c1 -w1 $FLOATING_IP); then
|
||||
echo "Pause failure - ping floating ips shouldn't work"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# unpause the VM and verify we can ping it again
|
||||
nova unpause $NAME
|
||||
|
||||
sleep 2
|
||||
|
||||
ping -c1 -w1 $IP
|
||||
|
||||
# dis-allow icmp traffic (ping)
|
||||
nova secgroup-delete-rule $SECGROUP icmp -1 -1 0.0.0.0/0
|
||||
|
||||
# FIXME (anthony): make xs support security groups
|
||||
if [ "$VIRT_DRIVER" != "xenserver" ]; then
|
||||
# test we can aren't able to ping our floating ip within ASSOCIATE_TIMEOUT seconds
|
||||
if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ping -c1 -w1 $FLOATING_IP; do sleep 1; done"; then
|
||||
print "Security group failure - ping should not be allowed!"
|
||||
echo "Couldn't ping server with floating ip"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# de-allocate the floating ip
|
||||
nova floating-ip-delete $FLOATING_IP
|
||||
|
||||
# shutdown the server
|
||||
nova delete $NAME
|
||||
|
||||
# Delete a secgroup
|
||||
nova secgroup-delete $SECGROUP
|
||||
|
||||
# FIXME: validate shutdown within 5 seconds
|
||||
# (nova show $NAME returns 1 or status != ACTIVE)?
|
||||
|
||||
# Testing Euca2ools
|
||||
# ==================
|
||||
|
||||
# make sure that we can describe instances
|
||||
euca-describe-instances
|
||||
|
||||
if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
|
||||
# Testing Swift
|
||||
# =============
|
||||
|
||||
# Check if we have to swift via keystone
|
||||
swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD stat
|
||||
|
||||
# We start by creating a test container
|
||||
swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD post testcontainer
|
||||
|
||||
# add some files into it.
|
||||
swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD upload testcontainer /etc/issue
|
||||
|
||||
# list them
|
||||
swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD list testcontainer
|
||||
|
||||
# And we may want to delete them now that we have tested that
|
||||
# everything works.
|
||||
swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD delete testcontainer
|
||||
fi
|
||||
# output status of exercise run
|
||||
echo =========================
|
||||
echo =========================
|
||||
for script in $skips; do
|
||||
echo SKIP $script
|
||||
done
|
||||
for script in $passes; do
|
||||
echo PASS $script
|
||||
done
|
||||
for script in $failures; do
|
||||
echo FAILED $script
|
||||
done
|
||||
|
|
36
exercises/euca.sh
Executable file
36
exercises/euca.sh
Executable file
|
@ -0,0 +1,36 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# we will use the ``euca2ools`` cli tool that wraps the python boto
|
||||
# library to test ec2 compatibility
|
||||
#
|
||||
|
||||
# This script exits on an error so that errors don't compound and you see
|
||||
# only the first error that occured.
|
||||
set -o errexit
|
||||
|
||||
# Print the commands being run so that we can see the command that triggers
|
||||
# an error. It is also useful for following allowing as the install occurs.
|
||||
set -o xtrace
|
||||
|
||||
|
||||
# Settings
|
||||
# ========
|
||||
|
||||
# Use openrc + stackrc + localrc for settings
|
||||
pushd $(cd $(dirname "$0")/.. && pwd)
|
||||
source ./openrc
|
||||
popd
|
||||
|
||||
# find a machine image to boot
|
||||
IMAGE=`euca-describe-images | grep machine | cut -f2`
|
||||
|
||||
# launch it
|
||||
INSTANCE=`euca-run-instances $IMAGE | grep INSTANCE | cut -f2`
|
||||
|
||||
# assure it has booted within a reasonable time
|
||||
if ! timeout $RUNNING_TIMEOUT sh -c "while euca-describe-instances $INSTANCE | grep -q running; do sleep 1; done"; then
|
||||
echo "server didn't become active within $RUNNING_TIMEOUT seconds"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
euca-terminate-instances $INSTANCE
|
190
exercises/floating_ips.sh
Executable file
190
exercises/floating_ips.sh
Executable file
|
@ -0,0 +1,190 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# **exercise.sh** - using the cloud can be fun
|
||||
|
||||
# we will use the ``nova`` cli tool provided by the ``python-novaclient``
|
||||
# package
|
||||
#
|
||||
|
||||
|
||||
# This script exits on an error so that errors don't compound and you see
|
||||
# only the first error that occured.
|
||||
set -o errexit
|
||||
|
||||
# Print the commands being run so that we can see the command that triggers
|
||||
# an error. It is also useful for following allowing as the install occurs.
|
||||
set -o xtrace
|
||||
|
||||
|
||||
# Settings
|
||||
# ========
|
||||
|
||||
# Use openrc + stackrc + localrc for settings
|
||||
pushd $(cd $(dirname "$0")/.. && pwd)
|
||||
source ./openrc
|
||||
popd
|
||||
|
||||
# Get a token for clients that don't support service catalog
|
||||
# ==========================================================
|
||||
|
||||
# manually create a token by querying keystone (sending JSON data). Keystone
|
||||
# returns a token and catalog of endpoints. We use python to parse the token
|
||||
# and save it.
|
||||
|
||||
TOKEN=`curl -s -d "{\"auth\":{\"passwordCredentials\": {\"username\": \"$NOVA_USERNAME\", \"password\": \"$NOVA_API_KEY\"}}}" -H "Content-type: application/json" http://$HOST_IP:5000/v2.0/tokens | python -c "import sys; import json; tok = json.loads(sys.stdin.read()); print tok['access']['token']['id'];"`
|
||||
|
||||
# Launching a server
|
||||
# ==================
|
||||
|
||||
# List servers for tenant:
|
||||
nova list
|
||||
|
||||
# Images
|
||||
# ------
|
||||
|
||||
# Nova has a **deprecated** way of listing images.
|
||||
nova image-list
|
||||
|
||||
# But we recommend using glance directly
|
||||
glance -A $TOKEN index
|
||||
|
||||
# Let's grab the id of the first AMI image to launch
|
||||
IMAGE=`glance -A $TOKEN index | egrep ami | cut -d" " -f1`
|
||||
|
||||
# Security Groups
|
||||
# ---------------
|
||||
SECGROUP=test_secgroup
|
||||
|
||||
# List of secgroups:
|
||||
nova secgroup-list
|
||||
|
||||
# Create a secgroup
|
||||
nova secgroup-create $SECGROUP "test_secgroup description"
|
||||
|
||||
# determine flavor
|
||||
# ----------------
|
||||
|
||||
# List of flavors:
|
||||
nova flavor-list
|
||||
|
||||
# and grab the first flavor in the list to launch
|
||||
FLAVOR=`nova flavor-list | head -n 4 | tail -n 1 | cut -d"|" -f2`
|
||||
|
||||
NAME="myserver"
|
||||
|
||||
nova boot --flavor $FLAVOR --image $IMAGE $NAME --security_groups=$SECGROUP
|
||||
|
||||
# Testing
|
||||
# =======
|
||||
|
||||
# First check if it spins up (becomes active and responds to ping on
|
||||
# internal ip). If you run this script from a nova node, you should
|
||||
# bypass security groups and have direct access to the server.
|
||||
|
||||
# Waiting for boot
|
||||
# ----------------
|
||||
|
||||
# Max time to wait while vm goes from build to active state
|
||||
ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-10}
|
||||
|
||||
# Max time till the vm is bootable
|
||||
BOOT_TIMEOUT=${BOOT_TIMEOUT:-15}
|
||||
|
||||
# Max time to wait for proper association and dis-association.
|
||||
ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-10}
|
||||
|
||||
# check that the status is active within ACTIVE_TIMEOUT seconds
|
||||
if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $NAME | grep status | grep -q ACTIVE; do sleep 1; done"; then
|
||||
echo "server didn't become active!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# get the IP of the server
|
||||
IP=`nova show $NAME | grep "private network" | cut -d"|" -f3`
|
||||
|
||||
# for single node deployments, we can ping private ips
|
||||
MULTI_HOST=${MULTI_HOST:-0}
|
||||
if [ "$MULTI_HOST" = "0" ]; then
|
||||
# sometimes the first ping fails (10 seconds isn't enough time for the VM's
|
||||
# network to respond?), so let's ping for a default of 15 seconds with a
|
||||
# timeout of a second for each ping.
|
||||
if ! timeout $BOOT_TIMEOUT sh -c "while ! ping -c1 -w1 $IP; do sleep 1; done"; then
|
||||
echo "Couldn't ping server"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
# On a multi-host system, without vm net access, do a sleep to wait for the boot
|
||||
sleep $BOOT_TIMEOUT
|
||||
fi
|
||||
|
||||
# Security Groups & Floating IPs
|
||||
# ------------------------------
|
||||
|
||||
# allow icmp traffic (ping)
|
||||
nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0
|
||||
|
||||
# List rules for a secgroup
|
||||
nova secgroup-list-rules $SECGROUP
|
||||
|
||||
# allocate a floating ip
|
||||
nova floating-ip-create
|
||||
|
||||
# store floating address
|
||||
FLOATING_IP=`nova floating-ip-list | grep None | head -1 | cut -d '|' -f2 | sed 's/ //g'`
|
||||
|
||||
# add floating ip to our server
|
||||
nova add-floating-ip $NAME $FLOATING_IP
|
||||
|
||||
# test we can ping our floating ip within ASSOCIATE_TIMEOUT seconds
|
||||
if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! ping -c1 -w1 $FLOATING_IP; do sleep 1; done"; then
|
||||
echo "Couldn't ping server with floating ip"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# pause the VM and verify we can't ping it anymore
|
||||
nova pause $NAME
|
||||
|
||||
sleep 2
|
||||
|
||||
if ( ping -c1 -w1 $IP); then
|
||||
echo "Pause failure - ping shouldn't work"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ( ping -c1 -w1 $FLOATING_IP); then
|
||||
echo "Pause failure - ping floating ips shouldn't work"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# unpause the VM and verify we can ping it again
|
||||
nova unpause $NAME
|
||||
|
||||
sleep 2
|
||||
|
||||
ping -c1 -w1 $IP
|
||||
|
||||
# dis-allow icmp traffic (ping)
|
||||
nova secgroup-delete-rule $SECGROUP icmp -1 -1 0.0.0.0/0
|
||||
|
||||
# FIXME (anthony): make xs support security groups
|
||||
if [ "$VIRT_DRIVER" != "xenserver" ]; then
|
||||
# test we can aren't able to ping our floating ip within ASSOCIATE_TIMEOUT seconds
|
||||
if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ping -c1 -w1 $FLOATING_IP; do sleep 1; done"; then
|
||||
print "Security group failure - ping should not be allowed!"
|
||||
echo "Couldn't ping server with floating ip"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# de-allocate the floating ip
|
||||
nova floating-ip-delete $FLOATING_IP
|
||||
|
||||
# shutdown the server
|
||||
nova delete $NAME
|
||||
|
||||
# Delete a secgroup
|
||||
nova secgroup-delete $SECGROUP
|
||||
|
||||
# FIXME: validate shutdown within 5 seconds
|
||||
# (nova show $NAME returns 1 or status != ACTIVE)?
|
||||
|
40
exercises/swift.sh
Executable file
40
exercises/swift.sh
Executable file
|
@ -0,0 +1,40 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Test swift via the command line tools that ship with it.
|
||||
|
||||
# This script exits on an error so that errors don't compound and you see
|
||||
# only the first error that occured.
|
||||
set -o errexit
|
||||
|
||||
# Print the commands being run so that we can see the command that triggers
|
||||
# an error. It is also useful for following allowing as the install occurs.
|
||||
set -o xtrace
|
||||
|
||||
|
||||
# Settings
|
||||
# ========
|
||||
|
||||
# Use openrc + stackrc + localrc for settings
|
||||
pushd $(cd $(dirname "$0")/.. && pwd)
|
||||
source ./openrc
|
||||
popd
|
||||
|
||||
|
||||
# Testing Swift
|
||||
# =============
|
||||
|
||||
# Check if we have to swift via keystone
|
||||
swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD stat
|
||||
|
||||
# We start by creating a test container
|
||||
swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD post testcontainer
|
||||
|
||||
# add some files into it.
|
||||
swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD upload testcontainer /etc/issue
|
||||
|
||||
# list them
|
||||
swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD list testcontainer
|
||||
|
||||
# And we may want to delete them now that we have tested that
|
||||
# everything works.
|
||||
swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD delete testcontainer
|
|
@ -1,4 +1,5 @@
|
|||
dnsmasq-base
|
||||
dnsmasq-utils # for dhcp_release
|
||||
kpartx
|
||||
parted
|
||||
arping # used for send_arp_for_ha option in nova-network
|
||||
|
@ -38,4 +39,5 @@ python-boto
|
|||
|
||||
# Stuff for diablo volumes
|
||||
iscsitarget
|
||||
iscsitarget-dkms
|
||||
lvm2
|
||||
|
|
|
@ -1,18 +0,0 @@
|
|||
# a collection of packages that speed up installation as they are dependencies
|
||||
# of packages we can't install during bootstraping (rabbitmq-server,
|
||||
# mysql-server, libvirt-bin)
|
||||
#
|
||||
# NOTE: only add packages to this file that aren't needed directly
|
||||
mysql-common
|
||||
mysql-client-5.1
|
||||
erlang-base
|
||||
erlang-ssl
|
||||
erlang-nox
|
||||
erlang-inets
|
||||
erlang-mnesia
|
||||
libhtml-template-perl
|
||||
gettext-base
|
||||
libavahi-client3
|
||||
libxml2-utils
|
||||
libpciaccess0
|
||||
libparted0debian1
|
|
@ -24,7 +24,7 @@ registry_port = 9191
|
|||
|
||||
# Log to this file. Make sure you do not set the same log
|
||||
# file for both the API and registry servers!
|
||||
log_file = %DEST%/glance/api.log
|
||||
#log_file = %DEST%/glance/api.log
|
||||
|
||||
# Send logs to syslog (/dev/log) instead of to file specified by `log_file`
|
||||
use_syslog = %SYSLOG%
|
||||
|
|
|
@ -13,7 +13,7 @@ bind_port = 9191
|
|||
|
||||
# Log to this file. Make sure you do not set the same log
|
||||
# file for both the API and registry servers!
|
||||
log_file = %DEST%/glance/registry.log
|
||||
#log_file = %DEST%/glance/registry.log
|
||||
|
||||
# Where to store images
|
||||
filesystem_store_datadir = %DEST%/glance/images
|
||||
|
|
|
@ -16,6 +16,7 @@ account_autocreate = true
|
|||
use = egg:swiftkeystone2#keystone2
|
||||
keystone_admin_token = %SERVICE_TOKEN%
|
||||
keystone_url = http://localhost:35357/v2.0
|
||||
keystone_admin_group = Member
|
||||
|
||||
[filter:tempauth]
|
||||
use = egg:swift#tempauth
|
||||
|
|
11
openrc
11
openrc
|
@ -49,3 +49,14 @@ export EC2_SECRET_KEY=${ADMIN_PASSWORD:-secrete}
|
|||
# set log level to DEBUG (helps debug issues)
|
||||
# export NOVACLIENT_DEBUG=1
|
||||
|
||||
# Max time till the vm is bootable
|
||||
export BOOT_TIMEOUT=${BOOT_TIMEOUT:-15}
|
||||
|
||||
# Max time to wait while vm goes from build to active state
|
||||
export ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-10}
|
||||
|
||||
# Max time from run instance command until it is running
|
||||
export RUNNING_TIMEOUT=${RUNNING_TIMEOUT:-$(($BOOT_TIMEOUT + $ACTIVE_TIMEOUT))}
|
||||
|
||||
# Max time to wait for proper IP association and dis-association.
|
||||
export ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-10}
|
||||
|
|
22
stack.sh
22
stack.sh
|
@ -159,6 +159,9 @@ Q_PLUGIN=${Q_PLUGIN:-openvswitch}
|
|||
# Specify which services to launch. These generally correspond to screen tabs
|
||||
ENABLED_SERVICES=${ENABLED_SERVICES:-g-api,g-reg,key,n-api,n-cpu,n-net,n-sch,n-vnc,horizon,mysql,rabbit}
|
||||
|
||||
# Name of the lvm volume group to use/create for iscsi volumes
|
||||
VOLUME_GROUP=${VOLUME_GROUP:-nova-volumes}
|
||||
|
||||
# Nova hypervisor configuration. We default to libvirt whth **kvm** but will
|
||||
# drop back to **qemu** if we are unable to load the kvm module. Stack.sh can
|
||||
# also install an **LXC** based system.
|
||||
|
@ -368,7 +371,7 @@ apt_get update
|
|||
apt_get install `cat $FILES/apts/* | cut -d\# -f1 | grep -Ev "mysql-server|rabbitmq-server|memcached"`
|
||||
|
||||
# install python requirements
|
||||
sudo PIP_DOWNLOAD_CACHE=/var/cache/pip pip install `cat $FILES/pips/*`
|
||||
sudo PIP_DOWNLOAD_CACHE=/var/cache/pip pip install --use-mirrors `cat $FILES/pips/*`
|
||||
|
||||
# git clone only if directory doesn't exist already. Since ``DEST`` might not
|
||||
# be owned by the installation user, we create the directory and change the
|
||||
|
@ -394,7 +397,7 @@ function git_clone {
|
|||
# remove the existing ignored files (like pyc) as they cause breakage
|
||||
# (due to the py files having older timestamps than our pyc, so python
|
||||
# thinks the pyc files are correct using them)
|
||||
sudo git clean -f -d
|
||||
find $GIT_DEST -name '*.pyc' -delete
|
||||
git checkout -f origin/$GIT_BRANCH
|
||||
# a local branch might not exist
|
||||
git branch -D $GIT_BRANCH || true
|
||||
|
@ -691,7 +694,7 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
|
|||
|
||||
# swift-init has a bug using /etc/swift until bug #885595 is fixed
|
||||
# we have to create a link
|
||||
sudo ln -s ${SWIFT_CONFIG_LOCATION} /etc/swift
|
||||
sudo ln -sf ${SWIFT_CONFIG_LOCATION} /etc/swift
|
||||
|
||||
# Swift use rsync to syncronize between all the different
|
||||
# partitions (which make more sense when you have a multi-node
|
||||
|
@ -704,6 +707,11 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
|
|||
# configured keystone it will checkout the directory.
|
||||
if [[ "$ENABLED_SERVICES" =~ "key" ]]; then
|
||||
swift_auth_server=keystone
|
||||
|
||||
# We install the memcache server as this is will be used by the
|
||||
# middleware to cache the tokens auths for a long this is needed.
|
||||
apt_get install memcached
|
||||
|
||||
# We need a special version of bin/swift which understand the
|
||||
# OpenStack api 2.0, we download it until this is getting
|
||||
# integrated in swift.
|
||||
|
@ -778,12 +786,12 @@ if [[ "$ENABLED_SERVICES" =~ "n-vol" ]]; then
|
|||
#
|
||||
# By default, the backing file is 2G in size, and is stored in /opt/stack.
|
||||
#
|
||||
if ! sudo vgdisplay | grep -q nova-volumes; then
|
||||
if ! sudo vgdisplay | grep -q $VOLUME_GROUP; then
|
||||
VOLUME_BACKING_FILE=${VOLUME_BACKING_FILE:-$DEST/nova-volumes-backing-file}
|
||||
VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-2052M}
|
||||
truncate -s $VOLUME_BACKING_FILE_SIZE $VOLUME_BACKING_FILE
|
||||
DEV=`sudo losetup -f --show $VOLUME_BACKING_FILE`
|
||||
sudo vgcreate nova-volumes $DEV
|
||||
sudo vgcreate $VOLUME_GROUP $DEV
|
||||
fi
|
||||
|
||||
# Configure iscsitarget
|
||||
|
@ -812,6 +820,9 @@ if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then
|
|||
else
|
||||
add_nova_flag "--network_manager=nova.network.manager.$NET_MAN"
|
||||
fi
|
||||
if [[ "$ENABLED_SERVICES" =~ "n-vol" ]]; then
|
||||
add_nova_flag "--volume_group=$VOLUME_GROUP"
|
||||
fi
|
||||
add_nova_flag "--my_ip=$HOST_IP"
|
||||
add_nova_flag "--public_interface=$PUBLIC_INTERFACE"
|
||||
add_nova_flag "--vlan_interface=$VLAN_INTERFACE"
|
||||
|
@ -826,6 +837,7 @@ add_nova_flag "--ec2_dmz_host=$EC2_DMZ_HOST"
|
|||
add_nova_flag "--rabbit_host=$RABBIT_HOST"
|
||||
add_nova_flag "--rabbit_password=$RABBIT_PASSWORD"
|
||||
add_nova_flag "--glance_api_servers=$GLANCE_HOSTPORT"
|
||||
add_nova_flag "--force_dhcp_release"
|
||||
if [ -n "$INSTANCES_PATH" ]; then
|
||||
add_nova_flag "--instances_path=$INSTANCES_PATH"
|
||||
fi
|
||||
|
|
|
@ -1,11 +1,14 @@
|
|||
#!/bin/bash -e
|
||||
# build_pxe_boot.sh - Create a PXE boot environment
|
||||
# build_pxe_env.sh - Create a PXE boot environment
|
||||
#
|
||||
# build_pxe_boot.sh destdir
|
||||
# build_pxe_env.sh destdir
|
||||
#
|
||||
# Requires Ubuntu Oneiric
|
||||
#
|
||||
# Assumes syslinux is installed
|
||||
# Only needs to run as root if the destdir permissions require it
|
||||
|
||||
dpkg -l syslinux || apt-get install -y syslinux
|
||||
|
||||
DEST_DIR=${1:-/tmp}/tftpboot
|
||||
PXEDIR=${PXEDIR:-/var/cache/devstack/pxe}
|
||||
OPWD=`pwd`
|
|
@ -1,8 +1,8 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Make sure that we have the proper version of ubuntu (only works on natty/oneiric)
|
||||
if ! egrep -q "oneiric|natty" /etc/lsb-release; then
|
||||
echo "This script only works with ubuntu oneiric and natty"
|
||||
# Make sure that we have the proper version of ubuntu (only works on oneiric)
|
||||
if ! egrep -q "oneiric" /etc/lsb-release; then
|
||||
echo "This script only works with ubuntu oneiric."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
@ -33,8 +33,8 @@ if [ ! -e $TOP_DIR/localrc ]; then
|
|||
fi
|
||||
|
||||
# Install deps if needed
|
||||
DEPS="kvm libvirt-bin kpartx"
|
||||
dpkg -l $DEPS || apt-get install -y --force-yes $DEPS
|
||||
DEPS="kvm libvirt-bin kpartx cloud-utils curl"
|
||||
apt-get install -y --force-yes $DEPS
|
||||
|
||||
# Where to store files and instances
|
||||
WORK_DIR=${WORK_DIR:-/opt/kvmstack}
|
||||
|
|
74
tools/setup_stack_user.sh
Executable file
74
tools/setup_stack_user.sh
Executable file
|
@ -0,0 +1,74 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Echo commands
|
||||
set -o xtrace
|
||||
|
||||
# Exit on error to stop unexpected errors
|
||||
set -o errexit
|
||||
|
||||
# Keep track of the current directory
|
||||
TOOLS_DIR=$(cd $(dirname "$0") && pwd)
|
||||
TOP_DIR=`cd $TOOLS_DIR/..; pwd`
|
||||
|
||||
# Change dir to top of devstack
|
||||
cd $TOP_DIR
|
||||
|
||||
# Echo usage
|
||||
usage() {
|
||||
echo "Add stack user and keys"
|
||||
echo ""
|
||||
echo "Usage: $0 [full path to raw uec base image]"
|
||||
}
|
||||
|
||||
# Make sure this is a raw image
|
||||
if ! qemu-img info $1 | grep -q "file format: raw"; then
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Mount the image
|
||||
DEST=/opt/stack
|
||||
STAGING_DIR=/tmp/`echo $1 | sed "s/\//_/g"`.stage.user
|
||||
mkdir -p $STAGING_DIR
|
||||
umount $STAGING_DIR || true
|
||||
sleep 1
|
||||
mount -t ext4 -o loop $1 $STAGING_DIR
|
||||
mkdir -p $STAGING_DIR/$DEST
|
||||
|
||||
# Create a stack user that is a member of the libvirtd group so that stack
|
||||
# is able to interact with libvirt.
|
||||
chroot $STAGING_DIR groupadd libvirtd || true
|
||||
chroot $STAGING_DIR useradd stack -s /bin/bash -d $DEST -G libvirtd || true
|
||||
|
||||
# Add a simple password - pass
|
||||
echo stack:pass | chroot $STAGING_DIR chpasswd
|
||||
|
||||
# Configure sudo
|
||||
grep -q "^#includedir.*/etc/sudoers.d" $STAGING_DIR/etc/sudoers ||
|
||||
echo "#includedir /etc/sudoers.d" | sudo tee -a $STAGING_DIR/etc/sudoers
|
||||
cp $TOP_DIR/files/sudo/* $STAGING_DIR/etc/sudoers.d/
|
||||
sed -e "s,%USER%,$USER,g" -i $STAGING_DIR/etc/sudoers.d/*
|
||||
|
||||
# and has sudo ability (in the future this should be limited to only what
|
||||
# stack requires)
|
||||
echo "stack ALL=(ALL) NOPASSWD: ALL" >> $STAGING_DIR/etc/sudoers
|
||||
|
||||
# Gracefully cp only if source file/dir exists
|
||||
function cp_it {
|
||||
if [ -e $1 ] || [ -d $1 ]; then
|
||||
cp -pRL $1 $2
|
||||
fi
|
||||
}
|
||||
|
||||
# Copy over your ssh keys and env if desired
|
||||
cp_it ~/.ssh $STAGING_DIR/$DEST/.ssh
|
||||
cp_it ~/.ssh/id_rsa.pub $STAGING_DIR/$DEST/.ssh/authorized_keys
|
||||
cp_it ~/.gitconfig $STAGING_DIR/$DEST/.gitconfig
|
||||
cp_it ~/.vimrc $STAGING_DIR/$DEST/.vimrc
|
||||
cp_it ~/.bashrc $STAGING_DIR/$DEST/.bashrc
|
||||
|
||||
# Give stack ownership over $DEST so it may do the work needed
|
||||
chroot $STAGING_DIR chown -R stack $DEST
|
||||
|
||||
# Unmount
|
||||
umount $STAGING_DIR
|
53
tools/warm_apts_and_pips.sh
Executable file
53
tools/warm_apts_and_pips.sh
Executable file
|
@ -0,0 +1,53 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Echo commands
|
||||
set -o xtrace
|
||||
|
||||
# Exit on error to stop unexpected errors
|
||||
set -o errexit
|
||||
|
||||
# Keep track of the current directory
|
||||
TOOLS_DIR=$(cd $(dirname "$0") && pwd)
|
||||
TOP_DIR=`cd $TOOLS_DIR/..; pwd`
|
||||
|
||||
# Change dir to top of devstack
|
||||
cd $TOP_DIR
|
||||
|
||||
# Echo usage
|
||||
usage() {
|
||||
echo "Cache OpenStack dependencies on a uec image to speed up performance."
|
||||
echo ""
|
||||
echo "Usage: $0 [full path to raw uec base image]"
|
||||
}
|
||||
|
||||
# Make sure this is a raw image
|
||||
if ! qemu-img info $1 | grep -q "file format: raw"; then
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Make sure we are in the correct dir
|
||||
if [ ! -d files/apts ]; then
|
||||
echo "Please run this script from devstack/tools/"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Mount the image
|
||||
STAGING_DIR=/tmp/`echo $1 | sed "s/\//_/g"`.stage
|
||||
mkdir -p $STAGING_DIR
|
||||
umount $STAGING_DIR || true
|
||||
sleep 1
|
||||
mount -t ext4 -o loop $1 $STAGING_DIR
|
||||
|
||||
# Make sure that base requirements are installed
|
||||
cp /etc/resolv.conf $STAGING_DIR/etc/resolv.conf
|
||||
|
||||
# Perform caching on the base image to speed up subsequent runs
|
||||
chroot $STAGING_DIR apt-get update
|
||||
chroot $STAGING_DIR apt-get install -y --download-only `cat files/apts/* | grep NOPRIME | cut -d\# -f1`
|
||||
chroot $STAGING_DIR apt-get install -y --force-yes `cat files/apts/* | grep -v NOPRIME | cut -d\# -f1` || true
|
||||
mkdir -p $STAGING_DIR/var/cache/pip
|
||||
PIP_DOWNLOAD_CACHE=/var/cache/pip chroot $STAGING_DIR pip install `cat files/pips/*` || true
|
||||
|
||||
# Unmount
|
||||
umount $STAGING_DIR
|
Loading…
Reference in a new issue