Merge remote-tracking branch 'origin/master' into jenkins
This commit is contained in:
commit
b996b2b855
9 changed files with 506 additions and 39 deletions
|
@ -165,7 +165,7 @@ ping -c1 -w1 $IP
|
|||
nova secgroup-delete-rule $SECGROUP icmp -1 -1 0.0.0.0/0
|
||||
|
||||
# FIXME (anthony): make xs support security groups
|
||||
if [ "$VIRT_DRIVER" != "xenserver"]; then
|
||||
if [ "$VIRT_DRIVER" != "xenserver" ]; then
|
||||
# test we can aren't able to ping our floating ip within ASSOCIATE_TIMEOUT seconds
|
||||
if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ping -c1 -w1 $FLOATING_IP; do sleep 1; done"; then
|
||||
print "Security group failure - ping should not be allowed!"
|
||||
|
|
127
files/nova-api-paste.ini
Normal file
127
files/nova-api-paste.ini
Normal file
|
@ -0,0 +1,127 @@
|
|||
#######
|
||||
# EC2 #
|
||||
#######
|
||||
|
||||
[composite:ec2]
|
||||
use = egg:Paste#urlmap
|
||||
/: ec2versions
|
||||
/services/Cloud: ec2cloud
|
||||
/services/Admin: ec2admin
|
||||
/latest: ec2metadata
|
||||
/2007-01-19: ec2metadata
|
||||
/2007-03-01: ec2metadata
|
||||
/2007-08-29: ec2metadata
|
||||
/2007-10-10: ec2metadata
|
||||
/2007-12-15: ec2metadata
|
||||
/2008-02-01: ec2metadata
|
||||
/2008-09-01: ec2metadata
|
||||
/2009-04-04: ec2metadata
|
||||
/1.0: ec2metadata
|
||||
|
||||
[pipeline:ec2cloud]
|
||||
pipeline = logrequest totoken authtoken keystonecontext cloudrequest authorizer ec2executor
|
||||
|
||||
[pipeline:ec2admin]
|
||||
pipeline = logrequest totoken authtoken keystonecontext adminrequest authorizer ec2executor
|
||||
|
||||
[pipeline:ec2metadata]
|
||||
pipeline = logrequest ec2md
|
||||
|
||||
[pipeline:ec2versions]
|
||||
pipeline = logrequest ec2ver
|
||||
|
||||
[filter:logrequest]
|
||||
paste.filter_factory = nova.api.ec2:RequestLogging.factory
|
||||
|
||||
[filter:ec2lockout]
|
||||
paste.filter_factory = nova.api.ec2:Lockout.factory
|
||||
|
||||
[filter:totoken]
|
||||
paste.filter_factory = keystone.middleware.ec2_token:EC2Token.factory
|
||||
|
||||
[filter:ec2noauth]
|
||||
paste.filter_factory = nova.api.ec2:NoAuth.factory
|
||||
|
||||
[filter:authenticate]
|
||||
paste.filter_factory = nova.api.ec2:Authenticate.factory
|
||||
|
||||
[filter:cloudrequest]
|
||||
controller = nova.api.ec2.cloud.CloudController
|
||||
paste.filter_factory = nova.api.ec2:Requestify.factory
|
||||
|
||||
[filter:adminrequest]
|
||||
controller = nova.api.ec2.admin.AdminController
|
||||
paste.filter_factory = nova.api.ec2:Requestify.factory
|
||||
|
||||
[filter:authorizer]
|
||||
paste.filter_factory = nova.api.ec2:Authorizer.factory
|
||||
|
||||
[app:ec2executor]
|
||||
paste.app_factory = nova.api.ec2:Executor.factory
|
||||
|
||||
[app:ec2ver]
|
||||
paste.app_factory = nova.api.ec2:Versions.factory
|
||||
|
||||
[app:ec2md]
|
||||
paste.app_factory = nova.api.ec2.metadatarequesthandler:MetadataRequestHandler.factory
|
||||
|
||||
#############
|
||||
# Openstack #
|
||||
#############
|
||||
|
||||
[composite:osapi]
|
||||
use = egg:Paste#urlmap
|
||||
/: osversions
|
||||
/v1.0: openstackapi10
|
||||
/v1.1: openstackapi11
|
||||
|
||||
[pipeline:openstackapi10]
|
||||
pipeline = faultwrap authtoken keystonecontext ratelimit osapiapp10
|
||||
|
||||
[pipeline:openstackapi11]
|
||||
pipeline = faultwrap authtoken keystonecontext ratelimit extensions osapiapp11
|
||||
|
||||
[filter:faultwrap]
|
||||
paste.filter_factory = nova.api.openstack:FaultWrapper.factory
|
||||
|
||||
[filter:auth]
|
||||
paste.filter_factory = nova.api.openstack.auth:AuthMiddleware.factory
|
||||
|
||||
[filter:noauth]
|
||||
paste.filter_factory = nova.api.openstack.auth:NoAuthMiddleware.factory
|
||||
|
||||
[filter:ratelimit]
|
||||
paste.filter_factory = nova.api.openstack.limits:RateLimitingMiddleware.factory
|
||||
|
||||
[filter:extensions]
|
||||
paste.filter_factory = nova.api.openstack.extensions:ExtensionMiddleware.factory
|
||||
|
||||
[app:osapiapp10]
|
||||
paste.app_factory = nova.api.openstack:APIRouterV10.factory
|
||||
|
||||
[app:osapiapp11]
|
||||
paste.app_factory = nova.api.openstack:APIRouterV11.factory
|
||||
|
||||
[pipeline:osversions]
|
||||
pipeline = faultwrap osversionapp
|
||||
|
||||
[app:osversionapp]
|
||||
paste.app_factory = nova.api.openstack.versions:Versions.factory
|
||||
|
||||
##########
|
||||
# Shared #
|
||||
##########
|
||||
|
||||
[filter:keystonecontext]
|
||||
paste.filter_factory = keystone.middleware.nova_keystone_context:NovaKeystoneContext.factory
|
||||
|
||||
[filter:authtoken]
|
||||
paste.filter_factory = keystone.middleware.auth_token:filter_factory
|
||||
service_protocol = http
|
||||
service_host = 127.0.0.1
|
||||
service_port = 5000
|
||||
auth_host = 127.0.0.1
|
||||
auth_port = 35357
|
||||
auth_protocol = http
|
||||
auth_uri = http://127.0.0.1:5000/
|
||||
admin_token = %SERVICE_TOKEN%
|
|
@ -41,7 +41,7 @@ Cmnd_Alias NOVACMDS = /bin/chmod /var/lib/nova/tmp/*/root/.ssh, \
|
|||
/usr/bin/socat, \
|
||||
/sbin/parted, \
|
||||
/usr/sbin/dnsmasq, \
|
||||
/usr/bin/arping
|
||||
/usr/sbin/arping
|
||||
|
||||
%USER% ALL = (root) NOPASSWD: SETENV: NOVACMDS
|
||||
|
||||
|
|
59
stack.sh
59
stack.sh
|
@ -103,8 +103,7 @@ if [[ $EUID -eq 0 ]]; then
|
|||
|
||||
# since this script runs as a normal user, we need to give that user
|
||||
# ability to run sudo
|
||||
apt_get update
|
||||
apt_get install sudo
|
||||
dpkg -l sudo || apt_get update && apt_get install sudo
|
||||
|
||||
if ! getent passwd stack >/dev/null; then
|
||||
echo "Creating a user called stack"
|
||||
|
@ -121,7 +120,7 @@ if [[ $EUID -eq 0 ]]; then
|
|||
echo "Copying files to stack user"
|
||||
STACK_DIR="$DEST/${PWD##*/}"
|
||||
cp -r -f "$PWD" "$STACK_DIR"
|
||||
chown -R $USER "$STACK_DIR"
|
||||
chown -R stack "$STACK_DIR"
|
||||
if [[ "$SHELL_AFTER_RUN" != "no" ]]; then
|
||||
exec su -c "set -e; cd $STACK_DIR; bash stack.sh; bash" stack
|
||||
else
|
||||
|
@ -175,6 +174,9 @@ if [ ! -n "$HOST_IP" ]; then
|
|||
HOST_IP=`LC_ALL=C /sbin/ifconfig | grep -m 1 'inet addr:'| cut -d: -f2 | awk '{print $1}'`
|
||||
fi
|
||||
|
||||
# Service startup timeout
|
||||
SERVICE_TIMEOUT=${SERVICE_TIMEOUT:-60}
|
||||
|
||||
# Generic helper to configure passwords
|
||||
function read_password {
|
||||
set +o xtrace
|
||||
|
@ -230,7 +232,7 @@ VLAN_INTERFACE=${VLAN_INTERFACE:-$PUBLIC_INTERFACE}
|
|||
# Multi-host is a mode where each compute node runs its own network node. This
|
||||
# allows network operations and routing for a VM to occur on the server that is
|
||||
# running the VM - removing a SPOF and bandwidth bottleneck.
|
||||
MULTI_HOST=${MULTI_HOST:-0}
|
||||
MULTI_HOST=${MULTI_HOST:-False}
|
||||
|
||||
# If you are using FlatDHCP on multiple hosts, set the ``FLAT_INTERFACE``
|
||||
# variable but make sure that the interface doesn't already have an
|
||||
|
@ -323,7 +325,7 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
|
|||
# can never change.
|
||||
read_password SWIFT_HASH "ENTER A RANDOM SWIFT HASH."
|
||||
fi
|
||||
|
||||
|
||||
# Keystone
|
||||
# --------
|
||||
|
||||
|
@ -562,13 +564,12 @@ fi
|
|||
# ----
|
||||
|
||||
if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then
|
||||
# We are going to use the sample http middleware configuration from the
|
||||
# keystone project to launch nova. This paste config adds the configuration
|
||||
# required for nova to validate keystone tokens - except we need to switch
|
||||
# the config to use our service token instead (instead of the invalid token
|
||||
# 999888777666).
|
||||
cp $KEYSTONE_DIR/examples/paste/nova-api-paste.ini $NOVA_DIR/bin
|
||||
sed -e "s,999888777666,$SERVICE_TOKEN,g" -i $NOVA_DIR/bin/nova-api-paste.ini
|
||||
# We are going to use a sample http middleware configuration based on the
|
||||
# one from the keystone project to launch nova. This paste config adds
|
||||
# the configuration required for nova to validate keystone tokens. We add
|
||||
# our own service token to the configuration.
|
||||
cp $FILES/nova-api-paste.ini $NOVA_DIR/bin
|
||||
sed -e "s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g" -i $NOVA_DIR/bin/nova-api-paste.ini
|
||||
fi
|
||||
|
||||
if [[ "$ENABLED_SERVICES" =~ "n-cpu" ]]; then
|
||||
|
@ -650,13 +651,13 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
|
|||
USER_GROUP=$(id -g)
|
||||
sudo mkdir -p ${SWIFT_DATA_LOCATION}/drives
|
||||
sudo chown -R $USER:${USER_GROUP} ${SWIFT_DATA_LOCATION}/drives
|
||||
|
||||
|
||||
# We then create a loopback disk and format it to XFS.
|
||||
if [[ ! -e ${SWIFT_DATA_LOCATION}/drives/images/swift.img ]];then
|
||||
mkdir -p ${SWIFT_DATA_LOCATION}/drives/images
|
||||
sudo touch ${SWIFT_DATA_LOCATION}/drives/images/swift.img
|
||||
sudo chown $USER: ${SWIFT_DATA_LOCATION}/drives/images/swift.img
|
||||
|
||||
|
||||
dd if=/dev/zero of=${SWIFT_DATA_LOCATION}/drives/images/swift.img \
|
||||
bs=1024 count=0 seek=${SWIFT_LOOPBACK_DISK_SIZE}
|
||||
mkfs.xfs -f -i size=1024 ${SWIFT_DATA_LOCATION}/drives/images/swift.img
|
||||
|
@ -673,9 +674,9 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
|
|||
# We then create link to that mounted location so swift would know
|
||||
# where to go.
|
||||
for x in {1..4}; do sudo ln -sf ${SWIFT_DATA_LOCATION}/drives/sdb1/$x ${SWIFT_DATA_LOCATION}/$x; done
|
||||
|
||||
|
||||
# We now have to emulate a few different servers into one we
|
||||
# create all the directories needed for swift
|
||||
# create all the directories needed for swift
|
||||
tmpd=""
|
||||
for d in ${SWIFT_DATA_LOCATION}/drives/sdb1/{1..4} \
|
||||
${SWIFT_CONFIG_LOCATION}/{object,container,account}-server \
|
||||
|
@ -691,7 +692,7 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
|
|||
# swift-init has a bug using /etc/swift until bug #885595 is fixed
|
||||
# we have to create a link
|
||||
sudo ln -s ${SWIFT_CONFIG_LOCATION} /etc/swift
|
||||
|
||||
|
||||
# Swift use rsync to syncronize between all the different
|
||||
# partitions (which make more sense when you have a multi-node
|
||||
# setup) we configure it with our version of rsync.
|
||||
|
@ -727,7 +728,7 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
|
|||
local bind_port=$2
|
||||
local log_facility=$3
|
||||
local node_number
|
||||
|
||||
|
||||
for node_number in {1..4};do
|
||||
node_path=${SWIFT_DATA_LOCATION}/${node_number}
|
||||
sed -e "s,%SWIFT_CONFIG_LOCATION%,${SWIFT_CONFIG_LOCATION},;s,%USER%,$USER,;s,%NODE_PATH%,${node_path},;s,%BIND_PORT%,${bind_port},;s,%LOG_FACILITY%,${log_facility}," \
|
||||
|
@ -754,14 +755,14 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
|
|||
|
||||
# We then can start rsync.
|
||||
sudo /etc/init.d/rsync restart || :
|
||||
|
||||
|
||||
# Create our ring for the object/container/account.
|
||||
/usr/local/bin/swift-remakerings
|
||||
|
||||
# And now we launch swift-startmain to get our cluster running
|
||||
# ready to be tested.
|
||||
/usr/local/bin/swift-startmain || :
|
||||
|
||||
|
||||
unset s swift_hash swift_auth_server tmpd
|
||||
fi
|
||||
|
||||
|
@ -828,12 +829,12 @@ add_nova_flag "--glance_api_servers=$GLANCE_HOSTPORT"
|
|||
if [ -n "$INSTANCES_PATH" ]; then
|
||||
add_nova_flag "--instances_path=$INSTANCES_PATH"
|
||||
fi
|
||||
if [ -n "$MULTI_HOST" ]; then
|
||||
add_nova_flag "--multi_host=$MULTI_HOST"
|
||||
add_nova_flag "--send_arp_for_ha=1"
|
||||
if [ "$MULTI_HOST" != "False" ]; then
|
||||
add_nova_flag "--multi_host"
|
||||
add_nova_flag "--send_arp_for_ha"
|
||||
fi
|
||||
if [ "$SYSLOG" != "False" ]; then
|
||||
add_nova_flag "--use_syslog=1"
|
||||
add_nova_flag "--use_syslog"
|
||||
fi
|
||||
|
||||
# XenServer
|
||||
|
@ -909,6 +910,10 @@ function screen_it {
|
|||
NL=`echo -ne '\015'`
|
||||
if [[ "$ENABLED_SERVICES" =~ "$1" ]]; then
|
||||
screen -S stack -X screen -t $1
|
||||
# sleep to allow bash to be ready to be send the command - we are
|
||||
# creating a new window in screen and then sends characters, so if
|
||||
# bash isn't running by the time we send the command, nothing happens
|
||||
sleep 1
|
||||
screen -S stack -p $1 -X stuff "$2$NL"
|
||||
fi
|
||||
}
|
||||
|
@ -926,7 +931,7 @@ fi
|
|||
if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then
|
||||
screen_it g-api "cd $GLANCE_DIR; bin/glance-api --config-file=etc/glance-api.conf"
|
||||
echo "Waiting for g-api ($GLANCE_HOSTPORT) to start..."
|
||||
if ! timeout 60 sh -c "while ! wget -q -O- http://$GLANCE_HOSTPORT; do sleep 1; done"; then
|
||||
if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget -q -O- http://$GLANCE_HOSTPORT; do sleep 1; done"; then
|
||||
echo "g-api did not start"
|
||||
exit 1
|
||||
fi
|
||||
|
@ -936,7 +941,7 @@ fi
|
|||
if [[ "$ENABLED_SERVICES" =~ "key" ]]; then
|
||||
screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone --config-file $KEYSTONE_CONF -d"
|
||||
echo "Waiting for keystone to start..."
|
||||
if ! timeout 60 sh -c "while ! wget -q -O- http://127.0.0.1:5000; do sleep 1; done"; then
|
||||
if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget -q -O- http://127.0.0.1:5000; do sleep 1; done"; then
|
||||
echo "keystone did not start"
|
||||
exit 1
|
||||
fi
|
||||
|
@ -946,7 +951,7 @@ fi
|
|||
if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then
|
||||
screen_it n-api "cd $NOVA_DIR && $NOVA_DIR/bin/nova-api"
|
||||
echo "Waiting for nova-api to start..."
|
||||
if ! timeout 60 sh -c "while ! wget -q -O- http://127.0.0.1:8774; do sleep 1; done"; then
|
||||
if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget -q -O- http://127.0.0.1:8774; do sleep 1; done"; then
|
||||
echo "nova-api did not start"
|
||||
exit 1
|
||||
fi
|
||||
|
|
248
tools/build_uec.sh
Executable file
248
tools/build_uec.sh
Executable file
|
@ -0,0 +1,248 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Make sure that we have the proper version of ubuntu (only works on natty/oneiric)
|
||||
if ! egrep -q "oneiric|natty" /etc/lsb-release; then
|
||||
echo "This script only works with ubuntu oneiric and natty"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Keep track of the current directory
|
||||
TOOLS_DIR=$(cd $(dirname "$0") && pwd)
|
||||
TOP_DIR=`cd $TOOLS_DIR/..; pwd`
|
||||
|
||||
cd $TOP_DIR
|
||||
|
||||
# Source params
|
||||
source ./stackrc
|
||||
|
||||
# Ubuntu distro to install
|
||||
DIST_NAME=${DIST_NAME:-oneiric}
|
||||
|
||||
# Configure how large the VM should be
|
||||
GUEST_SIZE=${GUEST_SIZE:-10G}
|
||||
|
||||
# exit on error to stop unexpected errors
|
||||
set -o errexit
|
||||
set -o xtrace
|
||||
|
||||
# Abort if localrc is not set
|
||||
if [ ! -e $TOP_DIR/localrc ]; then
|
||||
echo "You must have a localrc with ALL necessary passwords defined before proceeding."
|
||||
echo "See stack.sh for required passwords."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Install deps if needed
|
||||
DEPS="kvm libvirt-bin kpartx"
|
||||
dpkg -l $DEPS || apt-get install -y --force-yes $DEPS
|
||||
|
||||
# Where to store files and instances
|
||||
WORK_DIR=${WORK_DIR:-/opt/kvmstack}
|
||||
|
||||
# Where to store images
|
||||
image_dir=$WORK_DIR/images/$DIST_NAME
|
||||
mkdir -p $image_dir
|
||||
|
||||
# Original version of built image
|
||||
uec_url=http://uec-images.ubuntu.com/$DIST_NAME/current/$DIST_NAME-server-cloudimg-amd64.tar.gz
|
||||
tarball=$image_dir/$(basename $uec_url)
|
||||
|
||||
# download the base uec image if we haven't already
|
||||
if [ ! -f $tarball ]; then
|
||||
curl $uec_url -o $tarball
|
||||
(cd $image_dir && tar -Sxvzf $tarball)
|
||||
resize-part-image $image_dir/*.img $GUEST_SIZE $image_dir/disk
|
||||
cp $image_dir/*-vmlinuz-virtual $image_dir/kernel
|
||||
fi
|
||||
|
||||
|
||||
# Configure the root password of the vm to be the same as ``ADMIN_PASSWORD``
|
||||
ROOT_PASSWORD=${ADMIN_PASSWORD:-password}
|
||||
|
||||
# Name of our instance, used by libvirt
|
||||
GUEST_NAME=${GUEST_NAME:-devstack}
|
||||
|
||||
# Mop up after previous runs
|
||||
virsh destroy $GUEST_NAME || true
|
||||
|
||||
# Where this vm is stored
|
||||
vm_dir=$WORK_DIR/instances/$GUEST_NAME
|
||||
|
||||
# Create vm dir and remove old disk
|
||||
mkdir -p $vm_dir
|
||||
rm -f $vm_dir/disk
|
||||
|
||||
# Create a copy of the base image
|
||||
qemu-img create -f qcow2 -b $image_dir/disk $vm_dir/disk
|
||||
|
||||
# Back to devstack
|
||||
cd $TOP_DIR
|
||||
|
||||
GUEST_NETWORK=${GUEST_NETWORK:-1}
|
||||
GUEST_RECREATE_NET=${GUEST_RECREATE_NET:-yes}
|
||||
GUEST_IP=${GUEST_IP:-192.168.$GUEST_NETWORK.50}
|
||||
GUEST_CIDR=${GUEST_CIDR:-$GUEST_IP/24}
|
||||
GUEST_NETMASK=${GUEST_NETMASK:-255.255.255.0}
|
||||
GUEST_GATEWAY=${GUEST_GATEWAY:-192.168.$GUEST_NETWORK.1}
|
||||
GUEST_MAC=${GUEST_MAC:-"02:16:3e:07:69:`printf '%02X' $GUEST_NETWORK`"}
|
||||
GUEST_RAM=${GUEST_RAM:-1524288}
|
||||
GUEST_CORES=${GUEST_CORES:-1}
|
||||
|
||||
# libvirt.xml configuration
|
||||
NET_XML=$vm_dir/net.xml
|
||||
cat > $NET_XML <<EOF
|
||||
<network>
|
||||
<name>devstack-$GUEST_NETWORK</name>
|
||||
<bridge name="stackbr%d" />
|
||||
<forward/>
|
||||
<ip address="$GUEST_GATEWAY" netmask="$GUEST_NETMASK">
|
||||
<dhcp>
|
||||
<range start='192.168.$GUEST_NETWORK.2' end='192.168.$GUEST_NETWORK.127' />
|
||||
</dhcp>
|
||||
</ip>
|
||||
</network>
|
||||
EOF
|
||||
|
||||
if [[ "$GUEST_RECREATE_NET" == "yes" ]]; then
|
||||
virsh net-destroy devstack-$GUEST_NETWORK || true
|
||||
# destroying the network isn't enough to delete the leases
|
||||
rm -f /var/lib/libvirt/dnsmasq/devstack-$GUEST_NETWORK.leases
|
||||
virsh net-create $vm_dir/net.xml
|
||||
fi
|
||||
|
||||
# libvirt.xml configuration
|
||||
LIBVIRT_XML=$vm_dir/libvirt.xml
|
||||
cat > $LIBVIRT_XML <<EOF
|
||||
<domain type='kvm'>
|
||||
<name>$GUEST_NAME</name>
|
||||
<memory>$GUEST_RAM</memory>
|
||||
<os>
|
||||
<type>hvm</type>
|
||||
<kernel>$image_dir/kernel</kernel>
|
||||
<cmdline>root=/dev/vda ro console=ttyS0 init=/usr/lib/cloud-init/uncloud-init ds=nocloud-net;s=http://192.168.$GUEST_NETWORK.1:4567/ ubuntu-pass=ubuntu</cmdline>
|
||||
</os>
|
||||
<features>
|
||||
<acpi/>
|
||||
</features>
|
||||
<clock offset='utc'/>
|
||||
<vcpu>$GUEST_CORES</vcpu>
|
||||
<devices>
|
||||
<disk type='file'>
|
||||
<driver type='qcow2'/>
|
||||
<source file='$vm_dir/disk'/>
|
||||
<target dev='vda' bus='virtio'/>
|
||||
</disk>
|
||||
|
||||
<interface type='network'>
|
||||
<source network='devstack-$GUEST_NETWORK'/>
|
||||
</interface>
|
||||
|
||||
<!-- The order is significant here. File must be defined first -->
|
||||
<serial type="file">
|
||||
<source path='$vm_dir/console.log'/>
|
||||
<target port='1'/>
|
||||
</serial>
|
||||
|
||||
<console type='pty' tty='/dev/pts/2'>
|
||||
<source path='/dev/pts/2'/>
|
||||
<target port='0'/>
|
||||
</console>
|
||||
|
||||
<serial type='pty'>
|
||||
<source path='/dev/pts/2'/>
|
||||
<target port='0'/>
|
||||
</serial>
|
||||
|
||||
<graphics type='vnc' port='-1' autoport='yes' keymap='en-us' listen='0.0.0.0'/>
|
||||
</devices>
|
||||
</domain>
|
||||
EOF
|
||||
|
||||
|
||||
rm -rf $vm_dir/uec
|
||||
cp -r $TOOLS_DIR/uec $vm_dir/uec
|
||||
|
||||
# set metadata
|
||||
cat > $vm_dir/uec/meta-data<<EOF
|
||||
hostname: $GUEST_NAME
|
||||
instance-id: i-hop
|
||||
instance-type: m1.ignore
|
||||
local-hostname: $GUEST_NAME.local
|
||||
EOF
|
||||
|
||||
# set metadata
|
||||
cat > $vm_dir/uec/user-data<<EOF
|
||||
#!/bin/bash
|
||||
# hostname needs to resolve for rabbit
|
||||
sed -i "s/127.0.0.1/127.0.0.1 \`hostname\`/" /etc/hosts
|
||||
apt-get update
|
||||
apt-get install git sudo -y
|
||||
git clone https://github.com/cloudbuilders/devstack.git
|
||||
cd devstack
|
||||
git remote set-url origin `cd $TOP_DIR; git remote show origin | grep Fetch | awk '{print $3}'`
|
||||
git fetch
|
||||
git checkout `git rev-parse HEAD`
|
||||
cat > localrc <<LOCAL_EOF
|
||||
ROOTSLEEP=0
|
||||
`cat $TOP_DIR/localrc`
|
||||
LOCAL_EOF
|
||||
./stack.sh
|
||||
EOF
|
||||
|
||||
# (re)start a metadata service
|
||||
(
|
||||
pid=`lsof -iTCP@192.168.$GUEST_NETWORK.1:4567 -n | awk '{print $2}' | tail -1`
|
||||
[ -z "$pid" ] || kill -9 $pid
|
||||
)
|
||||
cd $vm_dir/uec
|
||||
python meta.py 192.168.$GUEST_NETWORK.1:4567 &
|
||||
|
||||
# Create the instance
|
||||
virsh create $vm_dir/libvirt.xml
|
||||
|
||||
# Tail the console log till we are done
|
||||
WAIT_TILL_LAUNCH=${WAIT_TILL_LAUNCH:-1}
|
||||
if [ "$WAIT_TILL_LAUNCH" = "1" ]; then
|
||||
set +o xtrace
|
||||
# Done creating the container, let's tail the log
|
||||
echo
|
||||
echo "============================================================="
|
||||
echo " -- YAY! --"
|
||||
echo "============================================================="
|
||||
echo
|
||||
echo "We're done launching the vm, about to start tailing the"
|
||||
echo "stack.sh log. It will take a second or two to start."
|
||||
echo
|
||||
echo "Just CTRL-C at any time to stop tailing."
|
||||
|
||||
while [ ! -e "$vm_dir/console.log" ]; do
|
||||
sleep 1
|
||||
done
|
||||
|
||||
tail -F $vm_dir/console.log &
|
||||
|
||||
TAIL_PID=$!
|
||||
|
||||
function kill_tail() {
|
||||
kill $TAIL_PID
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Let Ctrl-c kill tail and exit
|
||||
trap kill_tail SIGINT
|
||||
|
||||
echo "Waiting stack.sh to finish..."
|
||||
while ! egrep -q '^stack.sh (completed|failed)' $vm_dir/console.log ; do
|
||||
sleep 1
|
||||
done
|
||||
|
||||
set -o xtrace
|
||||
|
||||
kill $TAIL_PID
|
||||
|
||||
if ! grep -q "^stack.sh completed in" $vm_dir/console.log; then
|
||||
exit 1
|
||||
fi
|
||||
echo ""
|
||||
echo "Finished - Zip-a-dee Doo-dah!"
|
||||
fi
|
|
@ -14,6 +14,9 @@ MIN_PKGS=${MIN_PKGS:-"apt-utils gpgv openssh-server"}
|
|||
TOOLS_DIR=$(cd $(dirname "$0") && pwd)
|
||||
TOP_DIR=`cd $TOOLS_DIR/..; pwd`
|
||||
|
||||
# exit on error to stop unexpected errors
|
||||
set -o errexit
|
||||
|
||||
usage() {
|
||||
echo "Usage: $0 - Prepare Ubuntu images"
|
||||
echo ""
|
||||
|
@ -44,6 +47,14 @@ cleanup() {
|
|||
trap 2; kill -2 $$
|
||||
}
|
||||
|
||||
# apt-get wrapper to just get arguments set correctly
|
||||
function apt_get() {
|
||||
local sudo="sudo"
|
||||
[ "$(id -u)" = "0" ] && sudo="env"
|
||||
$sudo DEBIAN_FRONTEND=noninteractive apt-get \
|
||||
--option "Dpkg::Options::=--force-confold" --assume-yes "$@"
|
||||
}
|
||||
|
||||
while getopts f:hmr: c; do
|
||||
case $c in
|
||||
f) FORMAT=$OPTARG
|
||||
|
@ -107,7 +118,14 @@ case $DIST_NAME in
|
|||
;;
|
||||
esac
|
||||
|
||||
trap cleanup SIGHUP SIGINT SIGTERM
|
||||
trap cleanup SIGHUP SIGINT SIGTERM SIGQUIT
|
||||
|
||||
# Check for dependencies
|
||||
|
||||
if [ ! -x "`which qemu-img`" -o ! -x "`which qemu-nbd`" ]; then
|
||||
# Missing KVM?
|
||||
apt_get install qemu-kvm
|
||||
fi
|
||||
|
||||
# Prepare the base image
|
||||
|
||||
|
|
29
tools/uec/meta.py
Normal file
29
tools/uec/meta.py
Normal file
|
@ -0,0 +1,29 @@
|
|||
import sys
|
||||
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
|
||||
from SimpleHTTPServer import SimpleHTTPRequestHandler
|
||||
|
||||
def main(host, port, HandlerClass = SimpleHTTPRequestHandler,
|
||||
ServerClass = HTTPServer, protocol="HTTP/1.0"):
|
||||
"""simple http server that listens on a give address:port"""
|
||||
|
||||
server_address = (host, port)
|
||||
|
||||
HandlerClass.protocol_version = protocol
|
||||
httpd = ServerClass(server_address, HandlerClass)
|
||||
|
||||
sa = httpd.socket.getsockname()
|
||||
print "Serving HTTP on", sa[0], "port", sa[1], "..."
|
||||
httpd.serve_forever()
|
||||
|
||||
if __name__ == '__main__':
|
||||
if sys.argv[1:]:
|
||||
address = sys.argv[1]
|
||||
else:
|
||||
address = '0.0.0.0'
|
||||
if ':' in address:
|
||||
host, port = address.split(':')
|
||||
else:
|
||||
host = address
|
||||
port = 8080
|
||||
|
||||
main(host, int(port))
|
|
@ -226,16 +226,21 @@ mkdir -p /boot/guest
|
|||
SR_UUID=`xe sr-list --minimal name-label="Local storage"`
|
||||
xe sr-param-set uuid=$SR_UUID other-config:i18n-key=local-storage
|
||||
|
||||
# Uninstall previous runs
|
||||
xe vm-list --minimal name-label="$LABEL" | xargs ./scripts/uninstall-os-vpx.sh
|
||||
|
||||
# Destroy any instances that were launched
|
||||
for uuid in `xe vm-list | grep -1 instance | grep uuid | sed "s/.*\: //g"`; do
|
||||
echo "Shutting down nova instance $uuid"
|
||||
xe vm-unpause uuid=$uuid || true
|
||||
xe vm-shutdown uuid=$uuid
|
||||
xe vm-destroy uuid=$uuid
|
||||
done
|
||||
# Shutdown previous runs
|
||||
DO_SHUTDOWN=${DO_SHUTDOWN:-1}
|
||||
if [ "$DO_SHUTDOWN" = "1" ]; then
|
||||
# Shutdown all domU's that created previously
|
||||
xe vm-list --minimal name-label="$LABEL" | xargs ./scripts/uninstall-os-vpx.sh
|
||||
|
||||
# Destroy any instances that were launched
|
||||
for uuid in `xe vm-list | grep -1 instance | grep uuid | sed "s/.*\: //g"`; do
|
||||
echo "Shutting down nova instance $uuid"
|
||||
xe vm-unpause uuid=$uuid || true
|
||||
xe vm-shutdown uuid=$uuid
|
||||
xe vm-destroy uuid=$uuid
|
||||
done
|
||||
fi
|
||||
|
||||
# Path to head xva. By default keep overwriting the same one to save space
|
||||
USE_SEPARATE_XVAS=${USE_SEPARATE_XVAS:-0}
|
||||
|
|
35
tools/xen/build_domU_multi.sh
Executable file
35
tools/xen/build_domU_multi.sh
Executable file
|
@ -0,0 +1,35 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Echo commands
|
||||
set -o xtrace
|
||||
|
||||
# Head node host, which runs glance, api, keystone
|
||||
HEAD_PUB_IP=${HEAD_PUB_IP:-192.168.1.57}
|
||||
HEAD_MGT_IP=${HEAD_MGT_IP:-172.16.100.57}
|
||||
|
||||
COMPUTE_PUB_IP=${COMPUTE_PUB_IP:-192.168.1.58}
|
||||
COMPUTE_MGT_IP=${COMPUTE_MGT_IP:-172.16.100.58}
|
||||
|
||||
# Networking params
|
||||
FLOATING_RANGE=${FLOATING_RANGE:-192.168.1.196/30}
|
||||
|
||||
# Variables common amongst all hosts in the cluster
|
||||
COMMON_VARS="$STACKSH_PARAMS MYSQL_HOST=$HEAD_MGT_IP RABBIT_HOST=$HEAD_MGT_IP GLANCE_HOSTPORT=$HEAD_MGT_IP:9292 FLOATING_RANGE=$FLOATING_RANGE"
|
||||
|
||||
# Helper to launch containers
|
||||
function build_domU {
|
||||
GUEST_NAME=$1 PUB_IP=$2 MGT_IP=$3 DO_SHUTDOWN=$4 TERMINATE=$TERMINATE STACKSH_PARAMS="$COMMON_VARS $5" ./build_domU.sh
|
||||
}
|
||||
|
||||
# Launch the head node - headnode uses a non-ip domain name,
|
||||
# because rabbit won't launch with an ip addr hostname :(
|
||||
build_domU HEADNODE $HEAD_PUB_IP $HEAD_MGT_IP 1 "ENABLED_SERVICES=g-api,g-reg,key,n-api,n-sch,n-vnc,horizon,mysql,rabbit"
|
||||
|
||||
# Wait till the head node is up
|
||||
while ! curl -L http://$HEAD_PUB_IP | grep -q username; do
|
||||
echo "Waiting for head node ($HEAD_PUB_IP) to start..."
|
||||
sleep 5
|
||||
done
|
||||
|
||||
# Build the HA compute host
|
||||
build_domU $COMPUTE_PUB_IP $COMPUTE_PUB_IP $COMPUTE_MGT_IP 0 "ENABLED_SERVICES=n-cpu,n-net,n-api"
|
Loading…
Reference in a new issue