Merge branch 'master' into syslog

This commit is contained in:
James E. Blair 2011-10-26 15:45:52 -04:00
commit a0988480dd
13 changed files with 428 additions and 152 deletions

View file

@ -19,44 +19,8 @@ set -o xtrace
# Settings
# ========
# Use stackrc and localrc for settings
source ./stackrc
HOST=${HOST:-localhost}
# Nova original used project_id as the *account* that owned resources (servers,
# ip address, ...) With the addition of Keystone we have standardized on the
# term **tenant** as the entity that owns the resources. **novaclient** still
# uses the old deprecated terms project_id. Note that this field should now be
# set to tenant_name, not tenant_id.
export NOVA_PROJECT_ID=${TENANT:-demo}
# In addition to the owning entity (tenant), nova stores the entity performing
# the action as the **user**.
export NOVA_USERNAME=${USERNAME:-demo}
# With Keystone you pass the keystone password instead of an api key.
export NOVA_API_KEY=${ADMIN_PASSWORD:-secrete}
# With the addition of Keystone, to use an openstack cloud you should
# authenticate against keystone, which returns a **Token** and **Service
# Catalog**. The catalog contains the endpoint for all services the user/tenant
# has access to - including nova, glance, keystone, swift, ... We currently
# recommend using the 2.0 *auth api*.
#
# *NOTE*: Using the 2.0 *auth api* does mean that compute api is 2.0. We will
# use the 1.1 *compute api*
export NOVA_URL=${NOVA_URL:-http://$HOST:5000/v2.0/}
# Currently novaclient needs you to specify the *compute api* version. This
# needs to match the config of your catalog returned by Keystone.
export NOVA_VERSION=1.1
# FIXME - why does this need to be specified?
export NOVA_REGION_NAME=RegionOne
# set log level to DEBUG (helps debug issues)
export NOVACLIENT_DEBUG=1
# Use openrc + stackrc + localrc for settings
source ./openrc
# Get a token for clients that don't support service catalog
# ==========================================================
@ -65,7 +29,7 @@ export NOVACLIENT_DEBUG=1
# returns a token and catalog of endpoints. We use python to parse the token
# and save it.
TOKEN=`curl -s -d "{\"auth\":{\"passwordCredentials\": {\"username\": \"$NOVA_USERNAME\", \"password\": \"$NOVA_API_KEY\"}}}" -H "Content-type: application/json" http://$HOST:5000/v2.0/tokens | python -c "import sys; import json; tok = json.loads(sys.stdin.read()); print tok['access']['token']['id'];"`
TOKEN=`curl -s -d "{\"auth\":{\"passwordCredentials\": {\"username\": \"$NOVA_USERNAME\", \"password\": \"$NOVA_API_KEY\"}}}" -H "Content-type: application/json" http://$HOST_IP:5000/v2.0/tokens | python -c "import sys; import json; tok = json.loads(sys.stdin.read()); print tok['access']['token']['id'];"`
# Launching a server
# ==================
@ -95,8 +59,8 @@ nova secgroup-list
# Create a secgroup
nova secgroup-create $SECGROUP "test_secgroup description"
# Flavors
# -------
# determine flavor
# ----------------
# List of flavors:
nova flavor-list
@ -108,6 +72,16 @@ NAME="myserver"
nova boot --flavor $FLAVOR --image $IMAGE $NAME --security_groups=$SECGROUP
# Testing
# =======
# First check if it spins up (becomes active and responds to ping on
# internal ip). If you run this script from a nova node, you should
# bypass security groups and have direct access to the server.
# Waiting for boot
# ----------------
# let's give it 10 seconds to launch
sleep 10
@ -117,6 +91,9 @@ nova show $NAME | grep status | grep -q ACTIVE
# get the IP of the server
IP=`nova show $NAME | grep "private network" | cut -d"|" -f3`
# for single node deployments, we can ping private ips
MULTI_HOST=${MULTI_HOST:-0}
if [ "$MULTI_HOST" = "0" ]; then
# ping it once (timeout of a second)
ping -c1 -w1 $IP || true
@ -125,7 +102,12 @@ ping -c1 -w1 $IP || true
sleep 5
ping -c1 -w1 $IP
# allow icmp traffic
fi
# Security Groups & Floating IPs
# ------------------------------
# allow icmp traffic (ping)
nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0
# List rules for a secgroup
@ -135,31 +117,31 @@ nova secgroup-list-rules $SECGROUP
nova floating-ip-create
# store floating address
FIP=`nova floating-ip-list | grep None | head -1 | cut -d '|' -f2 | sed 's/ //g'`
FLOATING_IP=`nova floating-ip-list | grep None | head -1 | cut -d '|' -f2 | sed 's/ //g'`
# add floating ip to our server
nova add-floating-ip $NAME $FIP
nova add-floating-ip $NAME $FLOATING_IP
# sleep for a smidge
sleep 1
sleep 5
# ping our fip
ping -c1 -w1 $FIP
# ping our floating ip
ping -c1 -w1 $FLOATING_IP
# dis-allow icmp traffic
# dis-allow icmp traffic (ping)
nova secgroup-delete-rule $SECGROUP icmp -1 -1 0.0.0.0/0
# sleep for a smidge
sleep 1
sleep 5
# ping our fip
if ( ping -c1 -w1 $FIP); then
# ping our floating ip
if ( ping -c1 -w1 $FLOATING_IP ); then
print "Security group failure - ping should not be allowed!"
exit 1
fi
# de-allocate the floating ip
nova floating-ip-delete $FIP
nova floating-ip-delete $FLOATING_IP
# shutdown the server
nova delete $NAME
@ -169,3 +151,9 @@ nova secgroup-delete $SECGROUP
# FIXME: validate shutdown within 5 seconds
# (nova show $NAME returns 1 or status != ACTIVE)?
# Testing Euca2ools
# ==================
# make sure that we can describe instances
euca-describe-instances

View file

@ -16,3 +16,4 @@ iputils-ping
wget
curl
tcpdump
euca2ools # only for testing client

View file

@ -33,3 +33,7 @@ python-suds
python-lockfile
python-m2crypto
python-boto
# Stuff for diablo volumes
iscsitarget
lvm2

View file

@ -36,8 +36,8 @@ $BIN_DIR/keystone-manage $* endpointTemplates add RegionOne keystone http://%HOS
# Tokens
$BIN_DIR/keystone-manage $* token add %SERVICE_TOKEN% admin admin 2015-02-05T00:00
# EC2 related creds - note we are setting the token to user_password
# EC2 related creds - note we are setting the secret key to ADMIN_PASSWORD
# but keystone doesn't parse them - it is just a blob from keystone's
# point of view
$BIN_DIR/keystone-manage $* credentials add admin EC2 'admin_%ADMIN_PASSWORD%' admin admin || echo "no support for adding credentials"
$BIN_DIR/keystone-manage $* credentials add demo EC2 'demo_%ADMIN_PASSWORD%' demo demo || echo "no support for adding credentials"
$BIN_DIR/keystone-manage $* credentials add admin EC2 'admin' '%ADMIN_PASSWORD%' admin || echo "no support for adding credentials"
$BIN_DIR/keystone-manage $* credentials add demo EC2 'demo' '%ADMIN_PASSWORD%' demo || echo "no support for adding credentials"

51
openrc Normal file
View file

@ -0,0 +1,51 @@
#!/usr/bin/env bash
# Load local configuration
source ./stackrc
# Set api host endpoint
HOST_IP=${HOST_IP:-127.0.0.1}
# Nova original used project_id as the *account* that owned resources (servers,
# ip address, ...) With the addition of Keystone we have standardized on the
# term **tenant** as the entity that owns the resources. **novaclient** still
# uses the old deprecated terms project_id. Note that this field should now be
# set to tenant_name, not tenant_id.
export NOVA_PROJECT_ID=${TENANT:-demo}
# In addition to the owning entity (tenant), nova stores the entity performing
# the action as the **user**.
export NOVA_USERNAME=${USERNAME:-demo}
# With Keystone you pass the keystone password instead of an api key.
export NOVA_API_KEY=${ADMIN_PASSWORD:-secrete}
# With the addition of Keystone, to use an openstack cloud you should
# authenticate against keystone, which returns a **Token** and **Service
# Catalog**. The catalog contains the endpoint for all services the user/tenant
# has access to - including nova, glance, keystone, swift, ... We currently
# recommend using the 2.0 *auth api*.
#
# *NOTE*: Using the 2.0 *auth api* does not mean that compute api is 2.0. We
# will use the 1.1 *compute api*
export NOVA_URL=${NOVA_URL:-http://$HOST_IP:5000/v2.0/}
# Currently novaclient needs you to specify the *compute api* version. This
# needs to match the config of your catalog returned by Keystone.
export NOVA_VERSION=${NOVA_VERSION:-1.1}
# FIXME - why does this need to be specified?
export NOVA_REGION_NAME=${NOVA_REGION_NAME:-RegionOne}
# Set the ec2 url so euca2ools works
export EC2_URL=${EC2_URL:-http://$HOST_IP:8773/services/Cloud}
# Access key is set in the initial keystone data to be the same as username
export EC2_ACCESS_KEY=${USERNAME:-demo}
# Secret key is set in the initial keystone data to the admin password
export EC2_SECRET_KEY=${ADMIN_PASSWORD:-secrete}
# set log level to DEBUG (helps debug issues)
# export NOVACLIENT_DEBUG=1

View file

@ -305,20 +305,31 @@ sudo PIP_DOWNLOAD_CACHE=/var/cache/pip pip install `cat $FILES/pips/*`
# be owned by the installation user, we create the directory and change the
# ownership to the proper user.
function git_clone {
# if there is an existing checkout, move it out of the way
if [[ "$RECLONE" == "yes" ]]; then
# FIXME(ja): if we were smarter we could speed up RECLONE by
# using the old git repo as the basis of our new clone...
if [ -d $2 ]; then
mv $2 /tmp/stack.`date +%s`
fi
fi
if [ ! -d $2 ]; then
git clone $1 $2
GIT_REMOTE=$1
GIT_DEST=$2
GIT_BRANCH=$3
# do a full clone only if the directory doesn't exist
if [ ! -d $GIT_DEST ]; then
git clone $GIT_REMOTE $GIT_DEST
cd $2
# This checkout syntax works for both branches and tags
git checkout $3
git checkout $GIT_BRANCH
elif [[ "$RECLONE" == "yes" ]]; then
# if it does exist then simulate what clone does if asked to RECLONE
cd $GIT_DEST
# set the url to pull from and fetch
git remote set-url origin $GIT_REMOTE
git fetch origin
# remove the existing ignored files (like pyc) as they cause breakage
# (due to the py files having older timestamps than our pyc, so python
# thinks the pyc files are correct using them)
sudo git clean -f -d
git checkout -f origin/$GIT_BRANCH
# a local branch might not exist
git branch -D $GIT_BRANCH || true
git checkout -b $GIT_BRANCH
fi
}
@ -475,11 +486,15 @@ fi
# Nova
# ----
# We are going to use the sample http middleware configuration from the keystone
# project to launch nova. This paste config adds the configuration required
# for nova to validate keystone tokens - except we need to switch the config
# to use our service token instead (instead of the invalid token 999888777666).
sudo sed -e "s,999888777666,$SERVICE_TOKEN,g" -i $KEYSTONE_DIR/examples/paste/nova-api-paste.ini
if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then
# We are going to use the sample http middleware configuration from the
# keystone project to launch nova. This paste config adds the configuration
# required for nova to validate keystone tokens - except we need to switch
# the config to use our service token instead (instead of the invalid token
# 999888777666).
cp $KEYSTONE_DIR/examples/paste/nova-api-paste.ini $NOVA_DIR/bin
sed -e "s,999888777666,$SERVICE_TOKEN,g" -i $NOVA_DIR/bin/nova-api-paste.ini
fi
if [[ "$ENABLED_SERVICES" =~ "n-cpu" ]]; then
@ -552,6 +567,31 @@ if [[ "$ENABLED_SERVICES" =~ "n-net" ]]; then
mkdir -p $NOVA_DIR/networks
fi
# Volume Service
# --------------
if [[ "$ENABLED_SERVICES" =~ "n-vol" ]]; then
#
# Configure a default volume group called 'nova-volumes' for the nova-volume
# service if it does not yet exist. If you don't wish to use a file backed
# volume group, create your own volume group called 'nova-volumes' before
# invoking stack.sh.
#
# By default, the backing file is 2G in size, and is stored in /opt/stack.
#
if ! sudo vgdisplay | grep -q nova-volumes; then
VOLUME_BACKING_FILE=${VOLUME_BACKING_FILE:-/opt/stack/nova-volumes-backing-file}
VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-2052M}
truncate -s $VOLUME_BACKING_FILE_SIZE $VOLUME_BACKING_FILE
DEV=`sudo losetup -f --show $VOLUME_BACKING_FILE`
sudo vgcreate nova-volumes $DEV
fi
# Configure iscsitarget
sudo sed 's/ISCSITARGET_ENABLE=false/ISCSITARGET_ENABLE=true/' -i /etc/default/iscsitarget
sudo /etc/init.d/iscsitarget restart
fi
function add_nova_flag {
echo "$1" >> $NOVA_DIR/bin/nova.conf
}
@ -571,7 +611,7 @@ add_nova_flag "--libvirt_type=$LIBVIRT_TYPE"
add_nova_flag "--osapi_extensions_path=$OPENSTACKX_DIR/extensions"
add_nova_flag "--vncproxy_url=http://$HOST_IP:6080"
add_nova_flag "--vncproxy_wwwroot=$NOVNC_DIR/"
add_nova_flag "--api_paste_config=$KEYSTONE_DIR/examples/paste/nova-api-paste.ini"
add_nova_flag "--api_paste_config=$NOVA_DIR/bin/nova-api-paste.ini"
add_nova_flag "--image_service=nova.image.glance.GlanceImageService"
add_nova_flag "--ec2_dmz_host=$EC2_DMZ_HOST"
add_nova_flag "--rabbit_host=$RABBIT_HOST"
@ -583,6 +623,7 @@ if [ -n "$FLAT_INTERFACE" ]; then
fi
if [ -n "$MULTI_HOST" ]; then
add_nova_flag "--multi_host=$MULTI_HOST"
add_nova_flag "--send_arp_for_ha=1"
fi
if [ "$SYSLOG" != "False" ]; then
add_nova_flag "--use_syslog=1"
@ -695,6 +736,7 @@ fi
# within the context of our original shell (so our groups won't be updated).
# Use 'sg' to execute nova-compute as a member of the libvirtd group.
screen_it n-cpu "cd $NOVA_DIR && sg libvirtd $NOVA_DIR/bin/nova-compute"
screen_it n-vol "cd $NOVA_DIR && $NOVA_DIR/bin/nova-volume"
screen_it n-net "cd $NOVA_DIR && $NOVA_DIR/bin/nova-network"
screen_it n-sch "cd $NOVA_DIR && $NOVA_DIR/bin/nova-scheduler"
screen_it n-vnc "cd $NOVNC_DIR && ./utils/nova-wsproxy.py 6080 --web . --flagfile=../nova/bin/nova.conf"
@ -722,8 +764,8 @@ if [[ "$ENABLED_SERVICES" =~ "g-reg" ]]; then
for image_url in ${IMAGE_URLS//,/ }; do
# Downloads the image (uec ami+aki style), then extracts it.
IMAGE_FNAME=`echo "$image_url" | python -c "import sys; print sys.stdin.read().split('/')[-1]"`
IMAGE_NAME=`echo "$IMAGE_FNAME" | python -c "import sys; print sys.stdin.read().split('.tar.gz')[0].split('.tgz')[0]"`
IMAGE_FNAME=`basename "$image_url"`
IMAGE_NAME=`basename "$IMAGE_FNAME" .tar.gz`
if [ ! -f $FILES/$IMAGE_FNAME ]; then
wget -c $image_url -O $FILES/$IMAGE_FNAME
fi

View file

@ -12,14 +12,14 @@ KEYSTONE_BRANCH=diablo
# a websockets/html5 or flash powered VNC console for vm instances
NOVNC_REPO=https://github.com/cloudbuilders/noVNC.git
NOVNC_BRANCH=master
NOVNC_BRANCH=diablo
# django powered web control panel for openstack
DASH_REPO=https://github.com/cloudbuilders/openstack-dashboard.git
DASH_BRANCH=diablo
# python client library to nova that dashboard (and others) use
NOVACLIENT_REPO=https://github.com/cloudbuilders/python-novaclient.git
NOVACLIENT_REPO=https://github.com/rackspace/python-novaclient.git
NOVACLIENT_BRANCH=master
# openstackx is a collection of extensions to openstack.compute & nova

View file

@ -1,5 +1,8 @@
#!/usr/bin/env bash
# exit on error to stop unexpected errors
set -o errexit
# Make sure that we have the proper version of ubuntu
UBUNTU_VERSION=`cat /etc/lsb-release | grep CODENAME | sed 's/.*=//g'`
if [ ! "oneiric" = "$UBUNTU_VERSION" ]; then
@ -16,9 +19,6 @@ set -o xtrace
TOOLS_DIR=$(cd $(dirname "$0") && pwd)
TOP_DIR=$TOOLS_DIR/..
# Configure the root password of the vm
ROOT_PASSWORD=${ROOT_PASSWORD:password}
# Where to store files and instances
KVMSTACK_DIR=${KVMSTACK_DIR:-/opt/kvmstack}
@ -41,24 +41,28 @@ fi
# Source params
source ./stackrc
# Configure the root password of the vm to be the same as ``ADMIN_PASSWORD``
ROOT_PASSWORD=${ADMIN_PASSWORD:-password}
# Base image (natty by default)
DIST_NAME=${DIST_NAME:-natty}
IMAGE_FNAME=$DIST_NAME.raw
# Name of our instance, used by libvirt
GUEST_NAME=${GUEST_NAME:-kvmstack}
# Original version of built image
BASE_IMAGE=$KVMSTACK_DIR/images/natty.raw
BASE_IMAGE=$KVMSTACK_DIR/images/$DIST_NAME.raw
# Copy of base image, which we pre-install with tasty treats
BASE_IMAGE_COPY=$IMAGES_DIR/$DIST_NAME.raw.copy
# Name of our instance, used by libvirt
CONTAINER_NAME=${CONTAINER_NAME:-kvmstack}
VM_IMAGE=$IMAGES_DIR/$DIST_NAME.$GUEST_NAME.raw
# Mop up after previous runs
virsh destroy $CONTAINER_NAME
virsh destroy $GUEST_NAME || true
# Where this vm is stored
VM_DIR=$KVMSTACK_DIR/instances/$CONTAINER_NAME
VM_DIR=$KVMSTACK_DIR/instances/$GUEST_NAME
# Create vm dir
mkdir -p $VM_DIR
@ -70,14 +74,14 @@ mkdir -p $COPY_DIR
# Create the base image if it does not yet exist
if [ ! -e $IMAGES_DIR/$IMAGE_FNAME ]; then
cd $TOOLS_DIR
./make_image.sh -m -r 5000 natty raw
mv natty.raw $BASE_IMAGE
./make_image.sh -m -r 5000 $DIST_NAME raw
mv $DIST_NAME.raw $BASE_IMAGE
cd $TOP_DIR
fi
# Create a copy of the base image
if [ ! -e $BASE_IMAGE_COPY ]; then
cp -p $BASE_IMAGE $BASE_IMAGE_COPY
if [ ! -e $VM_IMAGE ]; then
cp -p $BASE_IMAGE $VM_IMAGE
fi
# Unmount the copied base image
@ -98,8 +102,8 @@ function kill_unmount() {
exit 1
}
# Install deps
apt-get install -y --force-yes kvm libvirt-bin kpartx
# Install deps if needed
dpkg -l kvm libvirt-bin kpartx || apt-get install -y --force-yes kvm libvirt-bin kpartx
# Let Ctrl-c kill tail and exit
trap kill_unmount SIGINT
@ -108,7 +112,7 @@ trap kill_unmount SIGINT
DEST=${DEST:-/opt/stack}
# Mount the file system
mount -o loop,offset=32256 $BASE_IMAGE_COPY $COPY_DIR
mount -o loop,offset=32256 $VM_IMAGE $COPY_DIR
# git clone only if directory doesn't exist already. Since ``DEST`` might not
# be owned by the installation user, we create the directory and change the
@ -148,29 +152,28 @@ git_clone $OPENSTACKX_REPO $COPY_DIR/$DEST/openstackx $OPENSTACKX_BRANCH
git_clone $KEYSTONE_REPO $COPY_DIR/$DEST/keystone $KEYSTONE_BRANCH
git_clone $NOVNC_REPO $COPY_DIR/$DEST/noVNC $NOVNC_BRANCH
# Unmount the filesystems
unmount_images
# Back to devstack
cd $TOP_DIR
# Unmount the filesystems
unmount_images
# Network configuration variables
BRIDGE=${BRIDGE:-br0}
CONTAINER=${CONTAINER:-STACK}
CONTAINER_IP=${CONTAINER_IP:-192.168.1.50}
CONTAINER_CIDR=${CONTAINER_CIDR:-$CONTAINER_IP/24}
CONTAINER_NETMASK=${CONTAINER_NETMASK:-255.255.255.0}
CONTAINER_GATEWAY=${CONTAINER_GATEWAY:-192.168.1.1}
CONTAINER_MAC=${CONTAINER_MAC:-"02:16:3e:07:69:`printf '%02X' $(echo $CONTAINER_IP | sed "s/.*\.//")`"}
CONTAINER_RAM=${CONTAINER_RAM:-1524288}
CONTAINER_CORES=${CONTAINER_CORES:-1}
GUEST_IP=${GUEST_IP:-192.168.1.50}
GUEST_CIDR=${GUEST_CIDR:-$GUEST_IP/24}
GUEST_NETMASK=${GUEST_NETMASK:-255.255.255.0}
GUEST_GATEWAY=${GUEST_GATEWAY:-192.168.1.1}
GUEST_MAC=${GUEST_MAC:-"02:16:3e:07:69:`printf '%02X' $(echo $GUEST_IP | sed "s/.*\.//")`"}
GUEST_RAM=${GUEST_RAM:-1524288}
GUEST_CORES=${GUEST_CORES:-1}
# libvirt.xml configuration
LIBVIRT_XML=libvirt.xml
LIBVIRT_XML=$VM_DIR/libvirt.xml
cat > $LIBVIRT_XML <<EOF
<domain type='kvm'>
<name>$CONTAINER_NAME</name>
<memory>$CONTAINER_RAM</memory>
<name>$GUEST_NAME</name>
<memory>$GUEST_RAM</memory>
<os>
<type>hvm</type>
<bootmenu enable='yes'/>
@ -178,7 +181,7 @@ cat > $LIBVIRT_XML <<EOF
<features>
<acpi/>
</features>
<vcpu>$CONTAINER_CORES</vcpu>
<vcpu>$GUEST_CORES</vcpu>
<devices>
<disk type='file'>
<driver type='qcow2'/>
@ -188,7 +191,7 @@ cat > $LIBVIRT_XML <<EOF
<interface type='bridge'>
<source bridge='$BRIDGE'/>
<mac address='$CONTAINER_MAC'/>
<mac address='$GUEST_MAC'/>
</interface>
<!-- The order is significant here. File must be defined first -->
@ -231,13 +234,15 @@ cd $VM_DIR
rm -f $VM_DIR/disk
# Create our instance fs
qemu-img create -f qcow2 -b $BASE_IMAGE_COPY disk
sleep 5
qemu-img create -f qcow2 -b $VM_IMAGE disk
# Connect our nbd and wait till it is mountable
qemu-nbd -c $NBD disk
sleep 5
NBD_DEV=`basename $NBD`
if ! timeout 60 sh -c "while ! [ -e /sys/block/$NBD_DEV/pid ]; do sleep 1; done"; then
echo "Couldn't connect $NBD"
exit 1
fi
# Mount the instance
mount $NBD $ROOTFS -o offset=32256 -t ext4
@ -250,13 +255,13 @@ iface lo inet loopback
auto eth0
iface eth0 inet static
address $CONTAINER_IP
netmask $CONTAINER_NETMASK
gateway $CONTAINER_GATEWAY
address $GUEST_IP
netmask $GUEST_NETMASK
gateway $GUEST_GATEWAY
EOF
# User configuration for the instance
chroot $ROOTFS groupadd libvirtd
chroot $ROOTFS groupadd libvirtd || true
chroot $ROOTFS useradd stack -s /bin/bash -d $DEST -G libvirtd
cp -pr $TOOLS_DIR/.. $ROOTFS/$DEST/devstack
echo "root:$ROOT_PASSWORD" | chroot $ROOTFS chpasswd
@ -280,6 +285,15 @@ if [ "$COPYENV" = "1" ]; then
cp_it ~/.bashrc $ROOTFS/$DEST/.bashrc
fi
# pre-cache uec images
for image_url in ${IMAGE_URLS//,/ }; do
IMAGE_FNAME=`basename "$image_url"`
if [ ! -f $IMAGES_DIR/$IMAGE_FNAME ]; then
wget -c $image_url -O $IMAGES_DIR/$IMAGE_FNAME
fi
cp $IMAGES_DIR/$IMAGE_FNAME $ROOTFS/$DEST/devstack/files
done
# Configure the runner
RUN_SH=$ROOTFS/$DEST/run.sh
cat > $RUN_SH <<EOF
@ -306,7 +320,7 @@ chmod 755 $RUN_SH
RC_LOCAL=$ROOTFS/etc/init.d/local
cat > $RC_LOCAL <<EOF
#!/bin/sh -e
# Reboot if this is our first run to enable console log on natty :(
# Reboot if this is our first run to enable console log on $DIST_NAME :(
if [ ! -e /root/firstlaunch ]; then
touch /root/firstlaunch
reboot -f
@ -372,6 +386,10 @@ if [ "$WAIT_TILL_LAUNCH" = "1" ]; then
done
kill $TAIL_PID
if grep -q "stack.sh failed" $VM_DIR/console.log; then
exit 1
fi
echo ""
echo "Finished - Zip-a-dee Doo-dah!"
fi

View file

@ -27,18 +27,19 @@ CWD=`pwd`
# Configurable params
BRIDGE=${BRIDGE:-br0}
CONTAINER=${CONTAINER:-STACK}
CONTAINER_IP=${CONTAINER_IP:-192.168.1.50}
CONTAINER_CIDR=${CONTAINER_CIDR:-$CONTAINER_IP/24}
CONTAINER_NETMASK=${CONTAINER_NETMASK:-255.255.255.0}
CONTAINER_GATEWAY=${CONTAINER_GATEWAY:-192.168.1.1}
GUEST_NAME=${GUEST_NAME:-STACK}
GUEST_IP=${GUEST_IP:-192.168.1.50}
GUEST_CIDR=${GUEST_CIDR:-$GUEST_IP/24}
GUEST_NETMASK=${GUEST_NETMASK:-255.255.255.0}
GUEST_GATEWAY=${GUEST_GATEWAY:-192.168.1.1}
NAMESERVER=${NAMESERVER:-`cat /etc/resolv.conf | grep nameserver | head -1 | cut -d " " -f2`}
COPYENV=${COPYENV:-1}
DEST=${DEST:-/opt/stack}
WAIT_TILL_LAUNCH=${WAIT_TILL_LAUNCH:-1}
# Param string to pass to stack.sh. Like "EC2_DMZ_HOST=192.168.1.1 MYSQL_USER=nova"
STACKSH_PARAMS=${STACKSH_PARAMS:-}
# By default, n-vol is disabled for lxc, as iscsitarget doesn't work properly in lxc
STACKSH_PARAMS=${STACKSH_PARAMS:-"ENABLED_SERVICES=g-api,g-reg,key,n-api,n-cpu,n-net,n-sch,n-vnc,dash,mysql,rabbit"}
# Option to use the version of devstack on which we are currently working
USE_CURRENT_DEVSTACK=${USE_CURRENT_DEVSTACK:-1}
@ -59,22 +60,22 @@ if ! which cgdelete | grep -q cgdelete; then
fi
# Create lxc configuration
LXC_CONF=/tmp/$CONTAINER.conf
LXC_CONF=/tmp/$GUEST_NAME.conf
cat > $LXC_CONF <<EOF
lxc.network.type = veth
lxc.network.link = $BRIDGE
lxc.network.flags = up
lxc.network.ipv4 = $CONTAINER_CIDR
lxc.network.ipv4 = $GUEST_CIDR
# allow tap/tun devices
lxc.cgroup.devices.allow = c 10:200 rwm
EOF
# Shutdown any existing container
lxc-stop -n $CONTAINER
lxc-stop -n $GUEST_NAME
# This kills zombie containers
if [ -d /cgroup/$CONTAINER ]; then
cgdelete -r cpu,net_cls:$CONTAINER
if [ -d /cgroup/$GUEST_NAME ]; then
cgdelete -r cpu,net_cls:$GUEST_NAME
fi
# git clone only if directory doesn't exist already. Since ``DEST`` might not
@ -94,9 +95,9 @@ function git_clone {
# Helper to create the container
function create_lxc {
if [ "natty" = "$UBUNTU_VERSION" ]; then
lxc-create -n $CONTAINER -t natty -f $LXC_CONF
lxc-create -n $GUEST_NAME -t natty -f $LXC_CONF
else
lxc-create -n $CONTAINER -t ubuntu -f $LXC_CONF
lxc-create -n $GUEST_NAME -t ubuntu -f $LXC_CONF
fi
}
@ -116,7 +117,7 @@ fi
if [ ! -f $CACHEDIR/bootstrapped ]; then
# by deleting the container, we force lxc-create to re-bootstrap (lxc is
# lazy and doesn't do anything if a container already exists)
lxc-destroy -n $CONTAINER
lxc-destroy -n $GUEST_NAME
# trigger the initial debootstrap
create_lxc
touch $CACHEDIR/bootstrapped
@ -152,7 +153,7 @@ if [ "$USE_CURRENT_DEVSTACK" = "1" ]; then
fi
# Destroy the old container
lxc-destroy -n $CONTAINER
lxc-destroy -n $GUEST_NAME
# If this call is to TERMINATE the container then exit
if [ "$TERMINATE" = "1" ]; then
@ -163,7 +164,7 @@ fi
create_lxc
# Specify where our container rootfs lives
ROOTFS=/var/lib/lxc/$CONTAINER/rootfs/
ROOTFS=/var/lib/lxc/$GUEST_NAME/rootfs/
# Create a stack user that is a member of the libvirtd group so that stack
# is able to interact with libvirt.
@ -213,9 +214,9 @@ iface lo inet loopback
auto eth0
iface eth0 inet static
address $CONTAINER_IP
netmask $CONTAINER_NETMASK
gateway $CONTAINER_GATEWAY
address $GUEST_IP
netmask $GUEST_NETMASK
gateway $GUEST_GATEWAY
EOF
# Configure the runner
@ -226,7 +227,7 @@ cat > $RUN_SH <<EOF
echo "nameserver $NAMESERVER" | sudo resolvconf -a eth0
# Make there is a default route - needed for natty
if ! route | grep -q default; then
sudo ip route add default via $CONTAINER_GATEWAY
sudo ip route add default via $GUEST_GATEWAY
fi
sleep 1
@ -264,7 +265,7 @@ if ! mount | grep -q cgroup; then
fi
# Start our container
lxc-start -d -n $CONTAINER
lxc-start -d -n $GUEST_NAME
if [ "$WAIT_TILL_LAUNCH" = "1" ]; then
# Done creating the container, let's tail the log
@ -300,6 +301,11 @@ if [ "$WAIT_TILL_LAUNCH" = "1" ]; then
done
kill $TAIL_PID
if grep -q "stack.sh failed" $ROOTFS/$DEST/run.sh.log; then
exit 1
fi
echo ""
echo "Finished - Zip-a-dee Doo-dah!"
fi

View file

@ -18,7 +18,7 @@ COMMON_VARS="MYSQL_HOST=$HEAD_HOST RABBIT_HOST=$HEAD_HOST GLANCE_HOSTPORT=$HEAD_
# Helper to launch containers
function run_lxc {
# For some reason container names with periods can cause issues :/
CONTAINER=$1 CONTAINER_IP=$2 CONTAINER_NETMASK=$NETMASK CONTAINER_GATEWAY=$GATEWAY NAMESERVER=$NAMESERVER TERMINATE=$TERMINATE STACKSH_PARAMS="$COMMON_VARS $3" ./build_lxc.sh
GUEST_NAME=$1 GUEST_IP=$2 GUEST_NETMASK=$NETMASK GUEST_GATEWAY=$GATEWAY NAMESERVER=$NAMESERVER TERMINATE=$TERMINATE STACKSH_PARAMS="$COMMON_VARS $3" ./build_lxc.sh
}
# Launch the head node - headnode uses a non-ip domain name,

View file

@ -99,11 +99,10 @@ git_clone $DASH_REPO $DEST/dash $DASH_BRANCH
git_clone $NOVACLIENT_REPO $DEST/python-novaclient $NOVACLIENT_BRANCH
git_clone $OPENSTACKX_REPO $DEST/openstackx $OPENSTACKX_BRANCH
# Use this version of devstack?
if [ "$USE_CURRENT_DEVSTACK" = "1" ]; then
# Use this version of devstack
rm -rf $CHROOTCACHE/natty-stack/$DEST/devstack
cp -pr $CWD $CHROOTCACHE/natty-stack/$DEST/devstack
fi
chroot $CHROOTCACHE/natty-stack chown -R stack $DEST/devstack
# Configure host network for DHCP
mkdir -p $CHROOTCACHE/natty-stack/etc/network

159
tools/get_uec_image.sh Executable file
View file

@ -0,0 +1,159 @@
#!/bin/bash
# get_uec_image.sh - Prepare Ubuntu images in various formats
#
# Supported formats: qcow (kvm), vmdk (vmserver), vdi (vbox), vhd (vpc), raw
#
# Required to run as root
CACHEDIR=${CACHEDIR:-/var/cache/devstack}
FORMAT=${FORMAT:-qcow2}
ROOTSIZE=${ROOTSIZE:-2000}
MIN_PKGS=${MIN_PKGS:-"apt-utils gpgv openssh-server"}
usage() {
echo "Usage: $0 - Prepare Ubuntu images"
echo ""
echo "$0 [-f format] [-r rootsize] release imagefile"
echo ""
echo "-f format - image format: qcow2 (default), vmdk, vdi, vhd, xen, raw, fs"
echo "-r size - root fs size in MB (min 2000MB)"
echo "release - Ubuntu release: jaunty - oneric"
echo "imagefile - output image file"
exit 1
}
while getopts f:hmr: c; do
case $c in
f) FORMAT=$OPTARG
;;
h) usage
;;
m) MINIMAL=1
;;
r) ROOTSIZE=$OPTARG
if $(( ROOTSIZE < 2000 )); then
echo "root size must be greater than 2000MB"
exit 1
fi
;;
esac
done
shift `expr $OPTIND - 1`
if [ ! "$#" -eq "2" ]; then
usage
fi
# Default args
DIST_NAME=$1
IMG_FILE=$2
case $FORMAT in
kvm|qcow2) FORMAT=qcow2
QFORMAT=qcow2
;;
vmserver|vmdk)
FORMAT=vmdk
QFORMAT=vmdk
;;
vbox|vdi) FORMAT=vdi
QFORMAT=vdi
;;
vhd|vpc) FORMAT=vhd
QFORMAT=vpc
;;
xen) FORMAT=raw
QFORMAT=raw
;;
raw) FORMAT=raw
QFORMAT=raw
;;
*) echo "Unknown format: $FORMAT"
usage
esac
case $DIST_NAME in
oneiric) ;;
natty) ;;
maverick) ;;
lucid) ;;
karmic) ;;
jaunty) ;;
*) echo "Unknown release: $DIST_NAME"
usage
;;
esac
# Set up nbd
modprobe nbd max_part=63
NBD=${NBD:-/dev/nbd9}
NBD_DEV=`basename $NBD`
# Prepare the base image
# Get the UEC image
UEC_NAME=$DIST_NAME-server-cloudimg-amd64
if [ ! -e $CACHEDIR/$UEC_NAME-disk1.img ]; then
(cd $CACHEDIR; wget -N http://uec-images.ubuntu.com/$DIST_NAME/current/$UEC_NAME-disk1.img)
# Connect to nbd and wait till it is ready
qemu-nbd -d $NBD
qemu-nbd -c $NBD $CACHEDIR/$UEC_NAME-disk1.img
if ! timeout 60 sh -c "while ! [ -e /sys/block/$NBD_DEV/pid ]; do sleep 1; done"; then
echo "Couldn't connect $NBD"
exit 1
fi
MNTDIR=`mktemp -d mntXXXXXXXX`
mount -t ext4 ${NBD}p1 $MNTDIR
# Install our required packages
cp -p files/sources.list $MNTDIR/etc/apt/sources.list
cp -p /etc/resolv.conf $MNTDIR/etc/resolv.conf
chroot $MNTDIR apt-get update
chroot $MNTDIR apt-get install -y $MIN_PKGS
rm -f $MNTDIR/etc/resolv.conf
umount $MNTDIR
rmdir $MNTDIR
qemu-nbd -d $NBD
fi
if [ "$FORMAT" = "qcow2" ]; then
# Just copy image
cp -p $CACHEDIR/$UEC_NAME-disk1.img $IMG_FILE
else
# Convert image
qemu-img convert -O $QFORMAT $CACHEDIR/$UEC_NAME-disk1.img $IMG_FILE
fi
# Resize the image if necessary
if [ $ROOTSIZE -gt 2000 ]; then
# Resize the container
qemu-img resize $IMG_FILE +$((ROOTSIZE - 2000))M
# Connect to nbd and wait till it is ready
qemu-nbd -c $NBD $IMG_FILE
if ! timeout 60 sh -c "while ! [ -e /sys/block/$NBD_DEV/pid ]; do sleep 1; done"; then
echo "Couldn't connect $NBD"
exit 1
fi
# Resize partition 1 to full size of the disk image
echo "d
n
p
1
2
t
83
a
1
w
" | fdisk $NBD
fsck -t ext4 -f ${NBD}p1
resize2fs ${NBD}p1
qemu-nbd -d $NBD
fi

View file

@ -65,6 +65,13 @@ if [ -n "$IMAGEONLY" ]; then
RELEASE="pass"
fi
# Make sure that we have the proper version of ubuntu
UBUNTU_VERSION=`cat /etc/lsb-release | grep CODENAME | sed 's/.*=//g'`
if [ "$UBUNTU_VERSION" = "natty" -a "$RELEASE" = "oneiric" ]; then
echo "natty installs can't build oneiric images"
exit 1
fi
case $FORMAT in
kvm|qcow2) FORMAT=qcow2
QFORMAT=qcow2
@ -97,6 +104,7 @@ case $FORMAT in
esac
case $RELEASE in
oneiric) ;;
natty) ;;
maverick) ;;
lucid) ;;