Merge branch 'master' of https://github.com/cloudbuilders/devstack into test
commit
1ec915e6b5
@ -1,214 +1,46 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
# **exercise.sh** - using the cloud can be fun
|
# Run everything in the exercises/ directory that isn't explicitly disabled
|
||||||
|
|
||||||
# we will use the ``nova`` cli tool provided by the ``python-novaclient``
|
# comma separated list of script basenames to skip
|
||||||
# package
|
# to refrain from exercising euca.sh use SKIP_EXERCISES=euca
|
||||||
#
|
SKIP_EXERCISES=${SKIP_EXERCISES:-""}
|
||||||
|
|
||||||
|
# Locate the scripts we should run
|
||||||
# This script exits on an error so that errors don't compound and you see
|
EXERCISE_DIR=$(dirname "$0")/exercises
|
||||||
# only the first error that occured.
|
basenames=$(for b in `ls $EXERCISE_DIR/*.sh`; do basename $b .sh; done)
|
||||||
set -o errexit
|
|
||||||
|
# Track the state of each script
|
||||||
# Print the commands being run so that we can see the command that triggers
|
passes=""
|
||||||
# an error. It is also useful for following allowing as the install occurs.
|
failures=""
|
||||||
set -o xtrace
|
skips=""
|
||||||
|
|
||||||
|
# Loop over each possible script (by basename)
|
||||||
# Settings
|
for script in $basenames; do
|
||||||
# ========
|
if [[ "$SKIP_EXERCISES" =~ $script ]] ; then
|
||||||
|
skips="$skips $script"
|
||||||
# Use openrc + stackrc + localrc for settings
|
else
|
||||||
source ./openrc
|
echo =========================
|
||||||
|
echo Running $script
|
||||||
# Get a token for clients that don't support service catalog
|
echo =========================
|
||||||
# ==========================================================
|
$EXERCISE_DIR/$script.sh
|
||||||
|
if [[ $? -ne 0 ]] ; then
|
||||||
# manually create a token by querying keystone (sending JSON data). Keystone
|
failures="$failures $script"
|
||||||
# returns a token and catalog of endpoints. We use python to parse the token
|
else
|
||||||
# and save it.
|
passes="$passes $script"
|
||||||
|
fi
|
||||||
TOKEN=`curl -s -d "{\"auth\":{\"passwordCredentials\": {\"username\": \"$NOVA_USERNAME\", \"password\": \"$NOVA_API_KEY\"}}}" -H "Content-type: application/json" http://$HOST_IP:5000/v2.0/tokens | python -c "import sys; import json; tok = json.loads(sys.stdin.read()); print tok['access']['token']['id'];"`
|
|
||||||
|
|
||||||
# Launching a server
|
|
||||||
# ==================
|
|
||||||
|
|
||||||
# List servers for tenant:
|
|
||||||
nova list
|
|
||||||
|
|
||||||
# Images
|
|
||||||
# ------
|
|
||||||
|
|
||||||
# Nova has a **deprecated** way of listing images.
|
|
||||||
nova image-list
|
|
||||||
|
|
||||||
# But we recommend using glance directly
|
|
||||||
glance -A $TOKEN index
|
|
||||||
|
|
||||||
# Let's grab the id of the first AMI image to launch
|
|
||||||
IMAGE=`glance -A $TOKEN index | egrep ami | cut -d" " -f1`
|
|
||||||
|
|
||||||
# Security Groups
|
|
||||||
# ---------------
|
|
||||||
SECGROUP=test_secgroup
|
|
||||||
|
|
||||||
# List of secgroups:
|
|
||||||
nova secgroup-list
|
|
||||||
|
|
||||||
# Create a secgroup
|
|
||||||
nova secgroup-create $SECGROUP "test_secgroup description"
|
|
||||||
|
|
||||||
# determine flavor
|
|
||||||
# ----------------
|
|
||||||
|
|
||||||
# List of flavors:
|
|
||||||
nova flavor-list
|
|
||||||
|
|
||||||
# and grab the first flavor in the list to launch
|
|
||||||
FLAVOR=`nova flavor-list | head -n 4 | tail -n 1 | cut -d"|" -f2`
|
|
||||||
|
|
||||||
NAME="myserver"
|
|
||||||
|
|
||||||
nova boot --flavor $FLAVOR --image $IMAGE $NAME --security_groups=$SECGROUP
|
|
||||||
|
|
||||||
# Testing
|
|
||||||
# =======
|
|
||||||
|
|
||||||
# First check if it spins up (becomes active and responds to ping on
|
|
||||||
# internal ip). If you run this script from a nova node, you should
|
|
||||||
# bypass security groups and have direct access to the server.
|
|
||||||
|
|
||||||
# Waiting for boot
|
|
||||||
# ----------------
|
|
||||||
|
|
||||||
# Max time to wait while vm goes from build to active state
|
|
||||||
ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-10}
|
|
||||||
|
|
||||||
# Max time till the vm is bootable
|
|
||||||
BOOT_TIMEOUT=${BOOT_TIMEOUT:-15}
|
|
||||||
|
|
||||||
# Max time to wait for proper association and dis-association.
|
|
||||||
ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-10}
|
|
||||||
|
|
||||||
# check that the status is active within ACTIVE_TIMEOUT seconds
|
|
||||||
if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $NAME | grep status | grep -q ACTIVE; do sleep 1; done"; then
|
|
||||||
echo "server didn't become active!"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# get the IP of the server
|
|
||||||
IP=`nova show $NAME | grep "private network" | cut -d"|" -f3`
|
|
||||||
|
|
||||||
# for single node deployments, we can ping private ips
|
|
||||||
MULTI_HOST=${MULTI_HOST:-0}
|
|
||||||
if [ "$MULTI_HOST" = "0" ]; then
|
|
||||||
# sometimes the first ping fails (10 seconds isn't enough time for the VM's
|
|
||||||
# network to respond?), so let's ping for a default of 15 seconds with a
|
|
||||||
# timeout of a second for each ping.
|
|
||||||
if ! timeout $BOOT_TIMEOUT sh -c "while ! ping -c1 -w1 $IP; do sleep 1; done"; then
|
|
||||||
echo "Couldn't ping server"
|
|
||||||
exit 1
|
|
||||||
fi
|
fi
|
||||||
else
|
done
|
||||||
# On a multi-host system, without vm net access, do a sleep to wait for the boot
|
|
||||||
sleep $BOOT_TIMEOUT
|
# output status of exercise run
|
||||||
fi
|
echo =========================
|
||||||
|
echo =========================
|
||||||
# Security Groups & Floating IPs
|
for script in $skips; do
|
||||||
# ------------------------------
|
echo SKIP $script
|
||||||
|
done
|
||||||
# allow icmp traffic (ping)
|
for script in $passes; do
|
||||||
nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0
|
echo PASS $script
|
||||||
|
done
|
||||||
# List rules for a secgroup
|
for script in $failures; do
|
||||||
nova secgroup-list-rules $SECGROUP
|
echo FAILED $script
|
||||||
|
done
|
||||||
# allocate a floating ip
|
|
||||||
nova floating-ip-create
|
|
||||||
|
|
||||||
# store floating address
|
|
||||||
FLOATING_IP=`nova floating-ip-list | grep None | head -1 | cut -d '|' -f2 | sed 's/ //g'`
|
|
||||||
|
|
||||||
# add floating ip to our server
|
|
||||||
nova add-floating-ip $NAME $FLOATING_IP
|
|
||||||
|
|
||||||
# test we can ping our floating ip within ASSOCIATE_TIMEOUT seconds
|
|
||||||
if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! ping -c1 -w1 $FLOATING_IP; do sleep 1; done"; then
|
|
||||||
echo "Couldn't ping server with floating ip"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# pause the VM and verify we can't ping it anymore
|
|
||||||
nova pause $NAME
|
|
||||||
|
|
||||||
sleep 2
|
|
||||||
|
|
||||||
if ( ping -c1 -w1 $IP); then
|
|
||||||
echo "Pause failure - ping shouldn't work"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if ( ping -c1 -w1 $FLOATING_IP); then
|
|
||||||
echo "Pause failure - ping floating ips shouldn't work"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# unpause the VM and verify we can ping it again
|
|
||||||
nova unpause $NAME
|
|
||||||
|
|
||||||
sleep 2
|
|
||||||
|
|
||||||
ping -c1 -w1 $IP
|
|
||||||
|
|
||||||
# dis-allow icmp traffic (ping)
|
|
||||||
nova secgroup-delete-rule $SECGROUP icmp -1 -1 0.0.0.0/0
|
|
||||||
|
|
||||||
# FIXME (anthony): make xs support security groups
|
|
||||||
if [ "$VIRT_DRIVER" != "xenserver" ]; then
|
|
||||||
# test we can aren't able to ping our floating ip within ASSOCIATE_TIMEOUT seconds
|
|
||||||
if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ping -c1 -w1 $FLOATING_IP; do sleep 1; done"; then
|
|
||||||
print "Security group failure - ping should not be allowed!"
|
|
||||||
echo "Couldn't ping server with floating ip"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
# de-allocate the floating ip
|
|
||||||
nova floating-ip-delete $FLOATING_IP
|
|
||||||
|
|
||||||
# shutdown the server
|
|
||||||
nova delete $NAME
|
|
||||||
|
|
||||||
# Delete a secgroup
|
|
||||||
nova secgroup-delete $SECGROUP
|
|
||||||
|
|
||||||
# FIXME: validate shutdown within 5 seconds
|
|
||||||
# (nova show $NAME returns 1 or status != ACTIVE)?
|
|
||||||
|
|
||||||
# Testing Euca2ools
|
|
||||||
# ==================
|
|
||||||
|
|
||||||
# make sure that we can describe instances
|
|
||||||
euca-describe-instances
|
|
||||||
|
|
||||||
if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
|
|
||||||
# Testing Swift
|
|
||||||
# =============
|
|
||||||
|
|
||||||
# Check if we have to swift via keystone
|
|
||||||
swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD stat
|
|
||||||
|
|
||||||
# We start by creating a test container
|
|
||||||
swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD post testcontainer
|
|
||||||
|
|
||||||
# add some files into it.
|
|
||||||
swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD upload testcontainer /etc/issue
|
|
||||||
|
|
||||||
# list them
|
|
||||||
swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD list testcontainer
|
|
||||||
|
|
||||||
# And we may want to delete them now that we have tested that
|
|
||||||
# everything works.
|
|
||||||
swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD delete testcontainer
|
|
||||||
fi
|
|
||||||
|
@ -0,0 +1,36 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
# we will use the ``euca2ools`` cli tool that wraps the python boto
|
||||||
|
# library to test ec2 compatibility
|
||||||
|
#
|
||||||
|
|
||||||
|
# This script exits on an error so that errors don't compound and you see
|
||||||
|
# only the first error that occured.
|
||||||
|
set -o errexit
|
||||||
|
|
||||||
|
# Print the commands being run so that we can see the command that triggers
|
||||||
|
# an error. It is also useful for following allowing as the install occurs.
|
||||||
|
set -o xtrace
|
||||||
|
|
||||||
|
|
||||||
|
# Settings
|
||||||
|
# ========
|
||||||
|
|
||||||
|
# Use openrc + stackrc + localrc for settings
|
||||||
|
pushd $(cd $(dirname "$0")/.. && pwd)
|
||||||
|
source ./openrc
|
||||||
|
popd
|
||||||
|
|
||||||
|
# find a machine image to boot
|
||||||
|
IMAGE=`euca-describe-images | grep machine | cut -f2`
|
||||||
|
|
||||||
|
# launch it
|
||||||
|
INSTANCE=`euca-run-instances $IMAGE | grep INSTANCE | cut -f2`
|
||||||
|
|
||||||
|
# assure it has booted within a reasonable time
|
||||||
|
if ! timeout $RUNNING_TIMEOUT sh -c "while euca-describe-instances $INSTANCE | grep -q running; do sleep 1; done"; then
|
||||||
|
echo "server didn't become active within $RUNNING_TIMEOUT seconds"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
euca-terminate-instances $INSTANCE
|
@ -0,0 +1,190 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
# **exercise.sh** - using the cloud can be fun
|
||||||
|
|
||||||
|
# we will use the ``nova`` cli tool provided by the ``python-novaclient``
|
||||||
|
# package
|
||||||
|
#
|
||||||
|
|
||||||
|
|
||||||
|
# This script exits on an error so that errors don't compound and you see
|
||||||
|
# only the first error that occured.
|
||||||
|
set -o errexit
|
||||||
|
|
||||||
|
# Print the commands being run so that we can see the command that triggers
|
||||||
|
# an error. It is also useful for following allowing as the install occurs.
|
||||||
|
set -o xtrace
|
||||||
|
|
||||||
|
|
||||||
|
# Settings
|
||||||
|
# ========
|
||||||
|
|
||||||
|
# Use openrc + stackrc + localrc for settings
|
||||||
|
pushd $(cd $(dirname "$0")/.. && pwd)
|
||||||
|
source ./openrc
|
||||||
|
popd
|
||||||
|
|
||||||
|
# Get a token for clients that don't support service catalog
|
||||||
|
# ==========================================================
|
||||||
|
|
||||||
|
# manually create a token by querying keystone (sending JSON data). Keystone
|
||||||
|
# returns a token and catalog of endpoints. We use python to parse the token
|
||||||
|
# and save it.
|
||||||
|
|
||||||
|
TOKEN=`curl -s -d "{\"auth\":{\"passwordCredentials\": {\"username\": \"$NOVA_USERNAME\", \"password\": \"$NOVA_API_KEY\"}}}" -H "Content-type: application/json" http://$HOST_IP:5000/v2.0/tokens | python -c "import sys; import json; tok = json.loads(sys.stdin.read()); print tok['access']['token']['id'];"`
|
||||||
|
|
||||||
|
# Launching a server
|
||||||
|
# ==================
|
||||||
|
|
||||||
|
# List servers for tenant:
|
||||||
|
nova list
|
||||||
|
|
||||||
|
# Images
|
||||||
|
# ------
|
||||||
|
|
||||||
|
# Nova has a **deprecated** way of listing images.
|
||||||
|
nova image-list
|
||||||
|
|
||||||
|
# But we recommend using glance directly
|
||||||
|
glance -A $TOKEN index
|
||||||
|
|
||||||
|
# Let's grab the id of the first AMI image to launch
|
||||||
|
IMAGE=`glance -A $TOKEN index | egrep ami | cut -d" " -f1`
|
||||||
|
|
||||||
|
# Security Groups
|
||||||
|
# ---------------
|
||||||
|
SECGROUP=test_secgroup
|
||||||
|
|
||||||
|
# List of secgroups:
|
||||||
|
nova secgroup-list
|
||||||
|
|
||||||
|
# Create a secgroup
|
||||||
|
nova secgroup-create $SECGROUP "test_secgroup description"
|
||||||
|
|
||||||
|
# determine flavor
|
||||||
|
# ----------------
|
||||||
|
|
||||||
|
# List of flavors:
|
||||||
|
nova flavor-list
|
||||||
|
|
||||||
|
# and grab the first flavor in the list to launch
|
||||||
|
FLAVOR=`nova flavor-list | head -n 4 | tail -n 1 | cut -d"|" -f2`
|
||||||
|
|
||||||
|
NAME="myserver"
|
||||||
|
|
||||||
|
nova boot --flavor $FLAVOR --image $IMAGE $NAME --security_groups=$SECGROUP
|
||||||
|
|
||||||
|
# Testing
|
||||||
|
# =======
|
||||||
|
|
||||||
|
# First check if it spins up (becomes active and responds to ping on
|
||||||
|
# internal ip). If you run this script from a nova node, you should
|
||||||
|
# bypass security groups and have direct access to the server.
|
||||||
|
|
||||||
|
# Waiting for boot
|
||||||
|
# ----------------
|
||||||
|
|
||||||
|
# Max time to wait while vm goes from build to active state
|
||||||
|
ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-10}
|
||||||
|
|
||||||
|
# Max time till the vm is bootable
|
||||||
|
BOOT_TIMEOUT=${BOOT_TIMEOUT:-15}
|
||||||
|
|
||||||
|
# Max time to wait for proper association and dis-association.
|
||||||
|
ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-10}
|
||||||
|
|
||||||
|
# check that the status is active within ACTIVE_TIMEOUT seconds
|
||||||
|
if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $NAME | grep status | grep -q ACTIVE; do sleep 1; done"; then
|
||||||
|
echo "server didn't become active!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# get the IP of the server
|
||||||
|
IP=`nova show $NAME | grep "private network" | cut -d"|" -f3`
|
||||||
|
|
||||||
|
# for single node deployments, we can ping private ips
|
||||||
|
MULTI_HOST=${MULTI_HOST:-0}
|
||||||
|
if [ "$MULTI_HOST" = "0" ]; then
|
||||||
|
# sometimes the first ping fails (10 seconds isn't enough time for the VM's
|
||||||
|
# network to respond?), so let's ping for a default of 15 seconds with a
|
||||||
|
# timeout of a second for each ping.
|
||||||
|
if ! timeout $BOOT_TIMEOUT sh -c "while ! ping -c1 -w1 $IP; do sleep 1; done"; then
|
||||||
|
echo "Couldn't ping server"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
# On a multi-host system, without vm net access, do a sleep to wait for the boot
|
||||||
|
sleep $BOOT_TIMEOUT
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Security Groups & Floating IPs
|
||||||
|
# ------------------------------
|
||||||
|
|
||||||
|
# allow icmp traffic (ping)
|
||||||
|
nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0
|
||||||
|
|
||||||
|
# List rules for a secgroup
|
||||||
|
nova secgroup-list-rules $SECGROUP
|
||||||
|
|
||||||
|
# allocate a floating ip
|
||||||
|
nova floating-ip-create
|
||||||
|
|
||||||
|
# store floating address
|
||||||
|
FLOATING_IP=`nova floating-ip-list | grep None | head -1 | cut -d '|' -f2 | sed 's/ //g'`
|
||||||
|
|
||||||
|
# add floating ip to our server
|
||||||
|
nova add-floating-ip $NAME $FLOATING_IP
|
||||||
|
|
||||||
|
# test we can ping our floating ip within ASSOCIATE_TIMEOUT seconds
|
||||||
|
if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! ping -c1 -w1 $FLOATING_IP; do sleep 1; done"; then
|
||||||
|
echo "Couldn't ping server with floating ip"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# pause the VM and verify we can't ping it anymore
|
||||||
|
nova pause $NAME
|
||||||
|
|
||||||
|
sleep 2
|
||||||
|
|
||||||
|
if ( ping -c1 -w1 $IP); then
|
||||||
|
echo "Pause failure - ping shouldn't work"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ( ping -c1 -w1 $FLOATING_IP); then
|
||||||
|
echo "Pause failure - ping floating ips shouldn't work"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# unpause the VM and verify we can ping it again
|
||||||
|
nova unpause $NAME
|
||||||
|
|
||||||
|
sleep 2
|
||||||
|
|
||||||
|
ping -c1 -w1 $IP
|
||||||
|
|
||||||
|
# dis-allow icmp traffic (ping)
|
||||||
|
nova secgroup-delete-rule $SECGROUP icmp -1 -1 0.0.0.0/0
|
||||||
|
|
||||||
|
# FIXME (anthony): make xs support security groups
|
||||||
|
if [ "$VIRT_DRIVER" != "xenserver" ]; then
|
||||||
|
# test we can aren't able to ping our floating ip within ASSOCIATE_TIMEOUT seconds
|
||||||
|
if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ping -c1 -w1 $FLOATING_IP; do sleep 1; done"; then
|
||||||
|
print "Security group failure - ping should not be allowed!"
|
||||||
|
echo "Couldn't ping server with floating ip"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# de-allocate the floating ip
|
||||||
|
nova floating-ip-delete $FLOATING_IP
|
||||||
|
|
||||||
|
# shutdown the server
|
||||||
|
nova delete $NAME
|
||||||
|
|
||||||
|
# Delete a secgroup
|
||||||
|
nova secgroup-delete $SECGROUP
|
||||||
|
|
||||||
|
# FIXME: validate shutdown within 5 seconds
|
||||||
|
# (nova show $NAME returns 1 or status != ACTIVE)?
|
||||||
|
|
@ -0,0 +1,40 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
# Test swift via the command line tools that ship with it.
|
||||||
|
|
||||||
|
# This script exits on an error so that errors don't compound and you see
|
||||||
|
# only the first error that occured.
|
||||||
|
set -o errexit
|
||||||
|
|
||||||
|
# Print the commands being run so that we can see the command that triggers
|
||||||
|
# an error. It is also useful for following allowing as the install occurs.
|
||||||
|
set -o xtrace
|
||||||
|
|
||||||
|
|
||||||
|
# Settings
|
||||||
|
# ========
|
||||||
|
|
||||||
|
# Use openrc + stackrc + localrc for settings
|
||||||
|
pushd $(cd $(dirname "$0")/.. && pwd)
|
||||||
|
source ./openrc
|
||||||
|
popd
|
||||||
|
|
||||||
|
|
||||||
|
# Testing Swift
|
||||||
|
# =============
|
||||||
|
|
||||||
|
# Check if we have to swift via keystone
|
||||||
|
swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD stat
|
||||||
|
|
||||||
|
# We start by creating a test container
|
||||||
|
swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD post testcontainer
|
||||||
|
|
||||||
|
# add some files into it.
|
||||||
|
swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD upload testcontainer /etc/issue
|
||||||
|
|
||||||
|
# list them
|
||||||
|
swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD list testcontainer
|
||||||
|
|
||||||
|
# And we may want to delete them now that we have tested that
|
||||||
|
# everything works.
|
||||||
|
swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD delete testcontainer
|
@ -1,18 +0,0 @@
|
|||||||
# a collection of packages that speed up installation as they are dependencies
|
|
||||||
# of packages we can't install during bootstraping (rabbitmq-server,
|
|
||||||
# mysql-server, libvirt-bin)
|
|
||||||
#
|
|
||||||
# NOTE: only add packages to this file that aren't needed directly
|
|
||||||
mysql-common
|
|
||||||
mysql-client-5.1
|
|
||||||
erlang-base
|
|
||||||
erlang-ssl
|
|
||||||
erlang-nox
|
|
||||||
erlang-inets
|
|
||||||
erlang-mnesia
|
|
||||||
libhtml-template-perl
|
|
||||||
gettext-base
|
|
||||||
libavahi-client3
|
|
||||||
libxml2-utils
|
|
||||||
libpciaccess0
|
|
||||||
libparted0debian1
|
|
@ -0,0 +1,127 @@
|
|||||||
|
#######
|
||||||
|
# EC2 #
|
||||||
|
#######
|
||||||
|
|
||||||
|
[composite:ec2]
|
||||||
|
use = egg:Paste#urlmap
|
||||||
|
/: ec2versions
|
||||||
|
/services/Cloud: ec2cloud
|
||||||
|
/services/Admin: ec2admin
|
||||||
|
/latest: ec2metadata
|
||||||
|
/2007-01-19: ec2metadata
|
||||||
|
/2007-03-01: ec2metadata
|
||||||
|
/2007-08-29: ec2metadata
|
||||||
|
/2007-10-10: ec2metadata
|
||||||
|
/2007-12-15: ec2metadata
|
||||||
|
/2008-02-01: ec2metadata
|
||||||
|
/2008-09-01: ec2metadata
|
||||||
|
/2009-04-04: ec2metadata
|
||||||
|
/1.0: ec2metadata
|
||||||
|
|
||||||
|
[pipeline:ec2cloud]
|
||||||
|
pipeline = logrequest totoken authtoken keystonecontext cloudrequest authorizer ec2executor
|
||||||
|
|
||||||
|
[pipeline:ec2admin]
|
||||||
|
pipeline = logrequest totoken authtoken keystonecontext adminrequest authorizer ec2executor
|
||||||
|
|
||||||
|
[pipeline:ec2metadata]
|
||||||
|
pipeline = logrequest ec2md
|
||||||
|
|
||||||
|
[pipeline:ec2versions]
|
||||||
|
pipeline = logrequest ec2ver
|
||||||
|
|
||||||
|
[filter:logrequest]
|
||||||
|
paste.filter_factory = nova.api.ec2:RequestLogging.factory
|
||||||
|
|
||||||
|
[filter:ec2lockout]
|
||||||
|
paste.filter_factory = nova.api.ec2:Lockout.factory
|
||||||
|
|
||||||
|
[filter:totoken]
|
||||||
|
paste.filter_factory = keystone.middleware.ec2_token:EC2Token.factory
|
||||||
|
|
||||||
|
[filter:ec2noauth]
|
||||||
|
paste.filter_factory = nova.api.ec2:NoAuth.factory
|
||||||
|
|
||||||
|
[filter:authenticate]
|
||||||
|
paste.filter_factory = nova.api.ec2:Authenticate.factory
|
||||||
|
|
||||||
|
[filter:cloudrequest]
|
||||||
|
controller = nova.api.ec2.cloud.CloudController
|
||||||
|
paste.filter_factory = nova.api.ec2:Requestify.factory
|
||||||
|
|
||||||
|
[filter:adminrequest]
|
||||||
|
controller = nova.api.ec2.admin.AdminController
|
||||||
|
paste.filter_factory = nova.api.ec2:Requestify.factory
|
||||||
|
|
||||||
|
[filter:authorizer]
|
||||||
|
paste.filter_factory = nova.api.ec2:Authorizer.factory
|
||||||
|
|
||||||
|
[app:ec2executor]
|
||||||
|
paste.app_factory = nova.api.ec2:Executor.factory
|
||||||
|
|
||||||
|
[app:ec2ver]
|
||||||
|
paste.app_factory = nova.api.ec2:Versions.factory
|
||||||
|
|
||||||
|
[app:ec2md]
|
||||||
|
paste.app_factory = nova.api.ec2.metadatarequesthandler:MetadataRequestHandler.factory
|
||||||
|
|
||||||
|
#############
|
||||||
|
# Openstack #
|
||||||
|
#############
|
||||||
|
|
||||||
|
[composite:osapi]
|
||||||
|
use = egg:Paste#urlmap
|
||||||
|
/: osversions
|
||||||
|
/v1.0: openstackapi10
|
||||||
|
/v1.1: openstackapi11
|
||||||
|
|
||||||
|
[pipeline:openstackapi10]
|
||||||
|
pipeline = faultwrap authtoken keystonecontext ratelimit osapiapp10
|
||||||
|
|
||||||
|
[pipeline:openstackapi11]
|
||||||
|
pipeline = faultwrap authtoken keystonecontext ratelimit extensions osapiapp11
|
||||||
|
|
||||||
|
[filter:faultwrap]
|
||||||
|
paste.filter_factory = nova.api.openstack:FaultWrapper.factory
|
||||||
|
|
||||||
|
[filter:auth]
|
||||||
|
paste.filter_factory = nova.api.openstack.auth:AuthMiddleware.factory
|
||||||
|
|
||||||
|
[filter:noauth]
|
||||||
|
paste.filter_factory = nova.api.openstack.auth:NoAuthMiddleware.factory
|
||||||
|
|
||||||
|
[filter:ratelimit]
|
||||||
|
paste.filter_factory = nova.api.openstack.limits:RateLimitingMiddleware.factory
|
||||||
|
|
||||||
|
[filter:extensions]
|
||||||
|
paste.filter_factory = nova.api.openstack.extensions:ExtensionMiddleware.factory
|
||||||
|
|
||||||
|
[app:osapiapp10]
|
||||||
|
paste.app_factory = nova.api.openstack:APIRouterV10.factory
|
||||||
|
|
||||||
|
[app:osapiapp11]
|
||||||
|
paste.app_factory = nova.api.openstack:APIRouterV11.factory
|
||||||
|
|
||||||
|
[pipeline:osversions]
|
||||||
|
pipeline = faultwrap osversionapp
|
||||||
|
|
||||||
|
[app:osversionapp]
|
||||||
|
paste.app_factory = nova.api.openstack.versions:Versions.factory
|
||||||
|
|
||||||
|
##########
|
||||||
|
# Shared #
|
||||||
|
##########
|
||||||
|
|
||||||
|
[filter:keystonecontext]
|
||||||
|
paste.filter_factory = keystone.middleware.nova_keystone_context:NovaKeystoneContext.factory
|
||||||
|
|
||||||
|
[filter:authtoken]
|
||||||
|
paste.filter_factory = keystone.middleware.auth_token:filter_factory
|
||||||
|
service_protocol = http
|
||||||
|
service_host = 127.0.0.1
|
||||||
|
service_port = 5000
|
||||||
|
auth_host = 127.0.0.1
|
||||||
|
auth_port = 35357
|
||||||
|
auth_protocol = http
|
||||||
|
auth_uri = http://127.0.0.1:5000/
|
||||||
|
admin_token = %SERVICE_TOKEN%
|
@ -0,0 +1,248 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
# Make sure that we have the proper version of ubuntu (only works on oneiric)
|
||||||
|
if ! egrep -q "oneiric" /etc/lsb-release; then
|
||||||
|
echo "This script only works with ubuntu oneiric."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Keep track of the current directory
|
||||||
|
TOOLS_DIR=$(cd $(dirname "$0") && pwd)
|
||||||
|
TOP_DIR=`cd $TOOLS_DIR/..; pwd`
|
||||||
|
|
||||||
|
cd $TOP_DIR
|
||||||
|
|
||||||
|
# Source params
|
||||||
|
source ./stackrc
|
||||||
|
|
||||||
|
# Ubuntu distro to install
|
||||||
|
DIST_NAME=${DIST_NAME:-oneiric}
|
||||||
|
|
||||||
|
# Configure how large the VM should be
|
||||||
|
GUEST_SIZE=${GUEST_SIZE:-10G}
|
||||||
|
|
||||||
|
# exit on error to stop unexpected errors
|
||||||
|
set -o errexit
|
||||||
|
set -o xtrace
|
||||||
|
|
||||||
|
# Abort if localrc is not set
|
||||||
|
if [ ! -e $TOP_DIR/localrc ]; then
|
||||||
|
echo "You must have a localrc with ALL necessary passwords defined before proceeding."
|
||||||
|
echo "See stack.sh for required passwords."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Install deps if needed
|
||||||
|
DEPS="kvm libvirt-bin kpartx cloud-utils"
|
||||||
|
dpkg -l $DEPS || apt-get install -y --force-yes $DEPS
|
||||||
|
|
||||||
|
# Where to store files and instances
|
||||||
|
WORK_DIR=${WORK_DIR:-/opt/kvmstack}
|
||||||
|
|
||||||
|
# Where to store images
|
||||||
|
image_dir=$WORK_DIR/images/$DIST_NAME
|
||||||
|
mkdir -p $image_dir
|
||||||
|
|
||||||
|
# Original version of built image
|
||||||
|
uec_url=http://uec-images.ubuntu.com/$DIST_NAME/current/$DIST_NAME-server-cloudimg-amd64.tar.gz
|
||||||
|
tarball=$image_dir/$(basename $uec_url)
|
||||||
|
|
||||||
|
# download the base uec image if we haven't already
|
||||||
|
if [ ! -f $tarball ]; then
|
||||||
|
curl $uec_url -o $tarball
|
||||||
|
(cd $image_dir && tar -Sxvzf $tarball)
|
||||||
|
resize-part-image $image_dir/*.img $GUEST_SIZE $image_dir/disk
|
||||||
|
cp $image_dir/*-vmlinuz-virtual $image_dir/kernel
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
# Configure the root password of the vm to be the same as ``ADMIN_PASSWORD``
|
||||||
|
ROOT_PASSWORD=${ADMIN_PASSWORD:-password}
|
||||||
|
|
||||||
|
# Name of our instance, used by libvirt
|
||||||
|
GUEST_NAME=${GUEST_NAME:-devstack}
|
||||||
|
|
||||||
|
# Mop up after previous runs
|
||||||
|
virsh destroy $GUEST_NAME || true
|
||||||
|
|
||||||
|
# Where this vm is stored
|
||||||
|
vm_dir=$WORK_DIR/instances/$GUEST_NAME
|
||||||
|
|
||||||
|
# Create vm dir and remove old disk
|
||||||
|
mkdir -p $vm_dir
|
||||||
|
rm -f $vm_dir/disk
|
||||||
|
|
||||||
|
# Create a copy of the base image
|
||||||
|
qemu-img create -f qcow2 -b $image_dir/disk $vm_dir/disk
|
||||||
|
|
||||||
|
# Back to devstack
|
||||||
|
cd $TOP_DIR
|
||||||
|
|
||||||
|
GUEST_NETWORK=${GUEST_NETWORK:-1}
|
||||||
|
GUEST_RECREATE_NET=${GUEST_RECREATE_NET:-yes}
|
||||||
|
GUEST_IP=${GUEST_IP:-192.168.$GUEST_NETWORK.50}
|
||||||
|
GUEST_CIDR=${GUEST_CIDR:-$GUEST_IP/24}
|
||||||
|
GUEST_NETMASK=${GUEST_NETMASK:-255.255.255.0}
|
||||||
|
GUEST_GATEWAY=${GUEST_GATEWAY:-192.168.$GUEST_NETWORK.1}
|
||||||
|
GUEST_MAC=${GUEST_MAC:-"02:16:3e:07:69:`printf '%02X' $GUEST_NETWORK`"}
|
||||||
|
GUEST_RAM=${GUEST_RAM:-1524288}
|
||||||
|
GUEST_CORES=${GUEST_CORES:-1}
|
||||||
|
|
||||||
|
# libvirt.xml configuration
|
||||||
|
NET_XML=$vm_dir/net.xml
|
||||||
|
cat > $NET_XML <<EOF
|
||||||
|
<network>
|
||||||
|
<name>devstack-$GUEST_NETWORK</name>
|
||||||
|
<bridge name="stackbr%d" />
|
||||||
|
<forward/>
|
||||||
|
<ip address="$GUEST_GATEWAY" netmask="$GUEST_NETMASK">
|
||||||
|
<dhcp>
|
||||||
|
<range start='192.168.$GUEST_NETWORK.2' end='192.168.$GUEST_NETWORK.127' />
|
||||||
|
</dhcp>
|
||||||
|
</ip>
|
||||||
|
</network>
|
||||||
|
EOF
|
||||||
|
|
||||||
|
if [[ "$GUEST_RECREATE_NET" == "yes" ]]; then
|
||||||
|
virsh net-destroy devstack-$GUEST_NETWORK || true
|
||||||
|
# destroying the network isn't enough to delete the leases
|
||||||
|
rm -f /var/lib/libvirt/dnsmasq/devstack-$GUEST_NETWORK.leases
|
||||||
|
virsh net-create $vm_dir/net.xml
|
||||||
|
fi
|
||||||
|
|
||||||
|
# libvirt.xml configuration
|
||||||
|
LIBVIRT_XML=$vm_dir/libvirt.xml
|
||||||
|
cat > $LIBVIRT_XML <<EOF
|
||||||
|
<domain type='kvm'>
|
||||||
|
<name>$GUEST_NAME</name>
|
||||||
|
<memory>$GUEST_RAM</memory>
|
||||||
|
<os>
|
||||||
|
<type>hvm</type>
|
||||||
|
<kernel>$image_dir/kernel</kernel>
|
||||||
|
<cmdline>root=/dev/vda ro console=ttyS0 init=/usr/lib/cloud-init/uncloud-init ds=nocloud-net;s=http://192.168.$GUEST_NETWORK.1:4567/ ubuntu-pass=ubuntu</cmdline>
|
||||||
|
</os>
|
||||||
|
<features>
|
||||||
|
<acpi/>
|
||||||
|
</features>
|
||||||
|
<clock offset='utc'/>
|
||||||
|
<vcpu>$GUEST_CORES</vcpu>
|
||||||
|
<devices>
|
||||||
|
<disk type='file'>
|
||||||
|
<driver type='qcow2'/>
|
||||||
|
<source file='$vm_dir/disk'/>
|
||||||
|
<target dev='vda' bus='virtio'/>
|
||||||
|
</disk>
|
||||||
|
|
||||||
|
<interface type='network'>
|
||||||
|
<source network='devstack-$GUEST_NETWORK'/>
|
||||||
|
</interface>
|
||||||
|
|
||||||
|
<!-- The order is significant here. File must be defined first -->
|
||||||
|
<serial type="file">
|
||||||
|
<source path='$vm_dir/console.log'/>
|
||||||
|
<target port='1'/>
|
||||||
|
</serial>
|
||||||
|
|
||||||
|
<console type='pty' tty='/dev/pts/2'>
|
||||||
|
<source path='/dev/pts/2'/>
|
||||||
|
<target port='0'/>
|
||||||
|
</console>
|
||||||
|
|
||||||
|
<serial type='pty'>
|
||||||
|
<source path='/dev/pts/2'/>
|
||||||
|
<target port='0'/>
|
||||||
|
</serial>
|
||||||
|
|
||||||
|
<graphics type='vnc' port='-1' autoport='yes' keymap='en-us' listen='0.0.0.0'/>
|
||||||
|
</devices>
|
||||||
|
</domain>
|
||||||
|
EOF
|
||||||
|
|
||||||
|
|
||||||
|
rm -rf $vm_dir/uec
|
||||||
|
cp -r $TOOLS_DIR/uec $vm_dir/uec
|
||||||
|
|
||||||
|
# set metadata
|
||||||
|
cat > $vm_dir/uec/meta-data<<EOF
|
||||||
|
hostname: $GUEST_NAME
|
||||||
|
instance-id: i-hop
|
||||||
|
instance-type: m1.ignore
|
||||||
|
local-hostname: $GUEST_NAME.local
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# set metadata
|
||||||
|
cat > $vm_dir/uec/user-data<<EOF
|
||||||
|
#!/bin/bash
|
||||||
|
# hostname needs to resolve for rabbit
|
||||||
|
sed -i "s/127.0.0.1/127.0.0.1 \`hostname\`/" /etc/hosts
|
||||||
|
apt-get update
|
||||||
|
apt-get install git sudo -y
|
||||||
|
git clone https://github.com/cloudbuilders/devstack.git
|
||||||
|
cd devstack
|
||||||
|
git remote set-url origin `cd $TOP_DIR; git remote show origin | grep Fetch | awk '{print $3}'`
|
||||||
|
git fetch
|
||||||
|
git checkout `git rev-parse HEAD`
|
||||||
|
cat > localrc <<LOCAL_EOF
|
||||||
|
ROOTSLEEP=0
|
||||||
|
`cat $TOP_DIR/localrc`
|
||||||
|
LOCAL_EOF
|
||||||
|
./stack.sh
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# (re)start a metadata service
|
||||||
|
(
|
||||||
|
pid=`lsof -iTCP@192.168.$GUEST_NETWORK.1:4567 -n | awk '{print $2}' | tail -1`
|
||||||
|
[ -z "$pid" ] || kill -9 $pid
|
||||||
|
)
|
||||||
|
cd $vm_dir/uec
|
||||||
|
python meta.py 192.168.$GUEST_NETWORK.1:4567 &
|
||||||
|
|
||||||
|
# Create the instance
|
||||||
|
virsh create $vm_dir/libvirt.xml
|
||||||
|
|
||||||
|
# Tail the console log till we are done
|
||||||
|
WAIT_TILL_LAUNCH=${WAIT_TILL_LAUNCH:-1}
|
||||||
|
if [ "$WAIT_TILL_LAUNCH" = "1" ]; then
|
||||||
|
set +o xtrace
|
||||||
|
# Done creating the container, let's tail the log
|
||||||
|
echo
|
||||||
|
echo "============================================================="
|
||||||
|
echo " -- YAY! --"
|
||||||
|
echo "============================================================="
|
||||||
|
echo
|
||||||
|
echo "We're done launching the vm, about to start tailing the"
|
||||||
|
echo "stack.sh log. It will take a second or two to start."
|
||||||
|
echo
|
||||||
|
echo "Just CTRL-C at any time to stop tailing."
|
||||||
|
|
||||||
|
while [ ! -e "$vm_dir/console.log" ]; do
|
||||||
|
sleep 1
|
||||||
|
done
|
||||||
|
|
||||||
|
tail -F $vm_dir/console.log &
|
||||||
|
|
||||||
|
TAIL_PID=$!
|
||||||
|
|
||||||
|
function kill_tail() {
|
||||||
|
kill $TAIL_PID
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
# Let Ctrl-c kill tail and exit
|
||||||
|
trap kill_tail SIGINT
|
||||||
|
|
||||||
|
echo "Waiting stack.sh to finish..."
|
||||||
|
while ! egrep -q '^stack.sh (completed|failed)' $vm_dir/console.log ; do
|
||||||
|
sleep 1
|
||||||
|
done
|
||||||
|
|
||||||
|
set -o xtrace
|
||||||
|
|
||||||
|
kill $TAIL_PID
|
||||||
|
|
||||||
|
if ! grep -q "^stack.sh completed in" $vm_dir/console.log; then
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo ""
|
||||||
|
echo "Finished - Zip-a-dee Doo-dah!"
|
||||||
|
fi
|
@ -0,0 +1,74 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
# Echo commands
|
||||||
|
set -o xtrace
|
||||||
|
|
||||||
|
# Exit on error to stop unexpected errors
|
||||||
|
set -o errexit
|
||||||
|
|
||||||
|
# Keep track of the current directory
|
||||||
|
TOOLS_DIR=$(cd $(dirname "$0") && pwd)
|
||||||
|
TOP_DIR=`cd $TOOLS_DIR/..; pwd`
|
||||||
|
|
||||||
|
# Change dir to top of devstack
|
||||||
|
cd $TOP_DIR
|
||||||
|
|
||||||
|
# Echo usage
|
||||||
|
usage() {
|
||||||
|
echo "Add stack user and keys"
|
||||||
|
echo ""
|
||||||
|
echo "Usage: $0 [full path to raw uec base image]"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Make sure this is a raw image
|
||||||
|
if ! qemu-img info $1 | grep -q "file format: raw"; then
|
||||||
|
usage
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Mount the image
|
||||||
|
DEST=/opt/stack
|
||||||
|
STAGING_DIR=/tmp/`echo $1 | sed "s/\//_/g"`.stage.user
|
||||||
|
mkdir -p $STAGING_DIR
|
||||||
|
umount $STAGING_DIR || true
|
||||||
|
sleep 1
|
||||||
|
mount -t ext4 -o loop $1 $STAGING_DIR
|
||||||
|
mkdir -p $STAGING_DIR/$DEST
|
||||||
|
|
||||||
|
# Create a stack user that is a member of the libvirtd group so that stack
|
||||||
|
# is able to interact with libvirt.
|
||||||
|
chroot $STAGING_DIR groupadd libvirtd || true
|
||||||
|
chroot $STAGING_DIR useradd stack -s /bin/bash -d $DEST -G libvirtd || true
|
||||||
|
|
||||||
|
# Add a simple password - pass
|
||||||
|
echo stack:pass | chroot $STAGING_DIR chpasswd
|
||||||
|
|
||||||
|
# Configure sudo
|
||||||
|
grep -q "^#includedir.*/etc/sudoers.d" $STAGING_DIR/etc/sudoers ||
|
||||||
|
echo "#includedir /etc/sudoers.d" | sudo tee -a $STAGING_DIR/etc/sudoers
|
||||||
|
cp $TOP_DIR/files/sudo/* $STAGING_DIR/etc/sudoers.d/
|
||||||
|
sed -e "s,%USER%,$USER,g" -i $STAGING_DIR/etc/sudoers.d/*
|
||||||
|
|
||||||
|
# and has sudo ability (in the future this should be limited to only what
|
||||||
|
# stack requires)
|
||||||
|
echo "stack ALL=(ALL) NOPASSWD: ALL" >> $STAGING_DIR/etc/sudoers
|
||||||
|
|
||||||
|
# Gracefully cp only if source file/dir exists
|
||||||
|
function cp_it {
|
||||||
|
if [ -e $1 ] || [ -d $1 ]; then
|
||||||
|
cp -pRL $1 $2
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Copy over your ssh keys and env if desired
|
||||||
|
cp_it ~/.ssh $STAGING_DIR/$DEST/.ssh
|
||||||
|
cp_it ~/.ssh/id_rsa.pub $STAGING_DIR/$DEST/.ssh/authorized_keys
|
||||||
|
cp_it ~/.gitconfig $STAGING_DIR/$DEST/.gitconfig
|
||||||
|
cp_it ~/.vimrc $STAGING_DIR/$DEST/.vimrc
|
||||||
|
cp_it ~/.bashrc $STAGING_DIR/$DEST/.bashrc
|
||||||
|
|
||||||
|
# Give stack ownership over $DEST so it may do the work needed
|
||||||
|
chroot $STAGING_DIR chown -R stack $DEST
|
||||||
|
|
||||||
|
# Unmount
|
||||||
|
umount $STAGING_DIR
|
@ -0,0 +1,29 @@
|
|||||||
|
import sys
|
||||||
|
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
|
||||||
|
from SimpleHTTPServer import SimpleHTTPRequestHandler
|
||||||
|
|
||||||
|
def main(host, port, HandlerClass = SimpleHTTPRequestHandler,
|
||||||
|
ServerClass = HTTPServer, protocol="HTTP/1.0"):
|
||||||
|
"""simple http server that listens on a give address:port"""
|
||||||
|
|
||||||
|
server_address = (host, port)
|
||||||
|
|
||||||
|
HandlerClass.protocol_version = protocol
|
||||||
|
httpd = ServerClass(server_address, HandlerClass)
|
||||||
|
|
||||||
|
sa = httpd.socket.getsockname()
|
||||||
|
print "Serving HTTP on", sa[0], "port", sa[1], "..."
|
||||||
|
httpd.serve_forever()
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
if sys.argv[1:]:
|
||||||
|
address = sys.argv[1]
|
||||||
|
else:
|
||||||
|
address = '0.0.0.0'
|
||||||
|
if ':' in address:
|
||||||
|
host, port = address.split(':')
|
||||||
|
else:
|
||||||
|
host = address
|
||||||
|
port = 8080
|
||||||
|
|
||||||
|
main(host, int(port))
|
@ -0,0 +1,53 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
# Echo commands
|
||||||
|
set -o xtrace
|
||||||
|
|
||||||
|
# Exit on error to stop unexpected errors
|
||||||
|
set -o errexit
|
||||||
|
|
||||||
|
# Keep track of the current directory
|
||||||
|
TOOLS_DIR=$(cd $(dirname "$0") && pwd)
|
||||||
|
TOP_DIR=`cd $TOOLS_DIR/..; pwd`
|
||||||
|
|
||||||
|
# Change dir to top of devstack
|
||||||
|
cd $TOP_DIR
|
||||||
|
|
||||||
|
# Echo usage
|
||||||
|
usage() {
|
||||||
|
echo "Cache OpenStack dependencies on a uec image to speed up performance."
|
||||||
|
echo ""
|
||||||
|
echo "Usage: $0 [full path to raw uec base image]"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Make sure this is a raw image
|
||||||
|
if ! qemu-img info $1 | grep -q "file format: raw"; then
|
||||||
|
usage
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Make sure we are in the correct dir
|
||||||
|
if [ ! -d files/apts ]; then
|
||||||
|
echo "Please run this script from devstack/tools/"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Mount the image
|
||||||
|
STAGING_DIR=/tmp/`echo $1 | sed "s/\//_/g"`.stage
|
||||||
|
mkdir -p $STAGING_DIR
|
||||||
|
umount $STAGING_DIR || true
|
||||||
|
sleep 1
|
||||||
|
mount -t ext4 -o loop $1 $STAGING_DIR
|
||||||
|
|
||||||
|
# Make sure that base requirements are installed
|
||||||
|
cp /etc/resolv.conf $STAGING_DIR/etc/resolv.conf
|
||||||
|
|
||||||
|
# Perform caching on the base image to speed up subsequent runs
|
||||||
|
chroot $STAGING_DIR apt-get update
|
||||||
|
chroot $STAGING_DIR apt-get install -y --download-only `cat files/apts/* | grep NOPRIME | cut -d\# -f1`
|
||||||
|
chroot $STAGING_DIR apt-get install -y --force-yes `cat files/apts/* | grep -v NOPRIME | cut -d\# -f1` || true
|
||||||
|
mkdir -p $STAGING_DIR/var/cache/pip
|
||||||
|
PIP_DOWNLOAD_CACHE=/var/cache/pip chroot $STAGING_DIR pip install `cat files/pips/*` || true
|
||||||
|
|
||||||
|
# Unmount
|
||||||
|
umount $STAGING_DIR
|
Loading…
Reference in New Issue