diff --git a/exercise.sh b/exercise.sh index 1a812f4..3f2c94e 100755 --- a/exercise.sh +++ b/exercise.sh @@ -19,44 +19,8 @@ set -o xtrace # Settings # ======== -# Use stackrc and localrc for settings -source ./stackrc - -HOST=${HOST:-localhost} - -# Nova original used project_id as the *account* that owned resources (servers, -# ip address, ...) With the addition of Keystone we have standardized on the -# term **tenant** as the entity that owns the resources. **novaclient** still -# uses the old deprecated terms project_id. Note that this field should now be -# set to tenant_name, not tenant_id. -export NOVA_PROJECT_ID=${TENANT:-demo} - -# In addition to the owning entity (tenant), nova stores the entity performing -# the action as the **user**. -export NOVA_USERNAME=${USERNAME:-demo} - -# With Keystone you pass the keystone password instead of an api key. -export NOVA_API_KEY=${ADMIN_PASSWORD:-secrete} - -# With the addition of Keystone, to use an openstack cloud you should -# authenticate against keystone, which returns a **Token** and **Service -# Catalog**. The catalog contains the endpoint for all services the user/tenant -# has access to - including nova, glance, keystone, swift, ... We currently -# recommend using the 2.0 *auth api*. -# -# *NOTE*: Using the 2.0 *auth api* does mean that compute api is 2.0. We will -# use the 1.1 *compute api* -export NOVA_URL=${NOVA_URL:-http://$HOST:5000/v2.0/} - -# Currently novaclient needs you to specify the *compute api* version. This -# needs to match the config of your catalog returned by Keystone. -export NOVA_VERSION=1.1 - -# FIXME - why does this need to be specified? -export NOVA_REGION_NAME=RegionOne - -# set log level to DEBUG (helps debug issues) -export NOVACLIENT_DEBUG=1 +# Use openrc + stackrc + localrc for settings +source ./openrc # Get a token for clients that don't support service catalog # ========================================================== @@ -65,7 +29,7 @@ export NOVACLIENT_DEBUG=1 # returns a token and catalog of endpoints. We use python to parse the token # and save it. -TOKEN=`curl -s -d "{\"auth\":{\"passwordCredentials\": {\"username\": \"$NOVA_USERNAME\", \"password\": \"$NOVA_API_KEY\"}}}" -H "Content-type: application/json" http://$HOST:5000/v2.0/tokens | python -c "import sys; import json; tok = json.loads(sys.stdin.read()); print tok['access']['token']['id'];"` +TOKEN=`curl -s -d "{\"auth\":{\"passwordCredentials\": {\"username\": \"$NOVA_USERNAME\", \"password\": \"$NOVA_API_KEY\"}}}" -H "Content-type: application/json" http://$HOST_IP:5000/v2.0/tokens | python -c "import sys; import json; tok = json.loads(sys.stdin.read()); print tok['access']['token']['id'];"` # Launching a server # ================== @@ -95,8 +59,8 @@ nova secgroup-list # Create a secgroup nova secgroup-create $SECGROUP "test_secgroup description" -# Flavors -# ------- +# determine flavor +# ---------------- # List of flavors: nova flavor-list @@ -108,6 +72,16 @@ NAME="myserver" nova boot --flavor $FLAVOR --image $IMAGE $NAME --security_groups=$SECGROUP +# Testing +# ======= + +# First check if it spins up (becomes active and responds to ping on +# internal ip). If you run this script from a nova node, you should +# bypass security groups and have direct access to the server. + +# Waiting for boot +# ---------------- + # let's give it 10 seconds to launch sleep 10 @@ -117,15 +91,23 @@ nova show $NAME | grep status | grep -q ACTIVE # get the IP of the server IP=`nova show $NAME | grep "private network" | cut -d"|" -f3` -# ping it once (timeout of a second) -ping -c1 -w1 $IP || true +# for single node deployments, we can ping private ips +MULTI_HOST=${MULTI_HOST:-0} +if [ "$MULTI_HOST" = "0" ]; then + # ping it once (timeout of a second) + ping -c1 -w1 $IP || true -# sometimes the first ping fails (10 seconds isn't enough time for the VM's -# network to respond?), so let's wait 5 seconds and really test ping -sleep 5 + # sometimes the first ping fails (10 seconds isn't enough time for the VM's + # network to respond?), so let's wait 5 seconds and really test ping + sleep 5 -ping -c1 -w1 $IP -# allow icmp traffic + ping -c1 -w1 $IP +fi + +# Security Groups & Floating IPs +# ------------------------------ + +# allow icmp traffic (ping) nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0 # List rules for a secgroup @@ -135,31 +117,31 @@ nova secgroup-list-rules $SECGROUP nova floating-ip-create # store floating address -FIP=`nova floating-ip-list | grep None | head -1 | cut -d '|' -f2 | sed 's/ //g'` +FLOATING_IP=`nova floating-ip-list | grep None | head -1 | cut -d '|' -f2 | sed 's/ //g'` # add floating ip to our server -nova add-floating-ip $NAME $FIP +nova add-floating-ip $NAME $FLOATING_IP # sleep for a smidge -sleep 1 +sleep 5 -# ping our fip -ping -c1 -w1 $FIP +# ping our floating ip +ping -c1 -w1 $FLOATING_IP -# dis-allow icmp traffic +# dis-allow icmp traffic (ping) nova secgroup-delete-rule $SECGROUP icmp -1 -1 0.0.0.0/0 # sleep for a smidge -sleep 1 +sleep 5 -# ping our fip -if ( ping -c1 -w1 $FIP); then +# ping our floating ip +if ( ping -c1 -w1 $FLOATING_IP ); then print "Security group failure - ping should not be allowed!" exit 1 fi # de-allocate the floating ip -nova floating-ip-delete $FIP +nova floating-ip-delete $FLOATING_IP # shutdown the server nova delete $NAME @@ -169,3 +151,9 @@ nova secgroup-delete $SECGROUP # FIXME: validate shutdown within 5 seconds # (nova show $NAME returns 1 or status != ACTIVE)? + +# Testing Euca2ools +# ================== + +# make sure that we can describe instances +euca-describe-instances diff --git a/files/apts/general b/files/apts/general index b47a60d..31fa752 100644 --- a/files/apts/general +++ b/files/apts/general @@ -16,3 +16,4 @@ iputils-ping wget curl tcpdump +euca2ools # only for testing client diff --git a/files/apts/nova b/files/apts/nova index eb85e26..594f2da 100644 --- a/files/apts/nova +++ b/files/apts/nova @@ -33,3 +33,7 @@ python-suds python-lockfile python-m2crypto python-boto + +# Stuff for diablo volumes +iscsitarget +lvm2 diff --git a/files/keystone_data.sh b/files/keystone_data.sh index 2cca345..5349311 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -36,8 +36,8 @@ $BIN_DIR/keystone-manage $* endpointTemplates add RegionOne keystone http://%HOS # Tokens $BIN_DIR/keystone-manage $* token add %SERVICE_TOKEN% admin admin 2015-02-05T00:00 -# EC2 related creds - note we are setting the token to user_password +# EC2 related creds - note we are setting the secret key to ADMIN_PASSWORD # but keystone doesn't parse them - it is just a blob from keystone's # point of view -$BIN_DIR/keystone-manage $* credentials add admin EC2 'admin_%ADMIN_PASSWORD%' admin admin || echo "no support for adding credentials" -$BIN_DIR/keystone-manage $* credentials add demo EC2 'demo_%ADMIN_PASSWORD%' demo demo || echo "no support for adding credentials" +$BIN_DIR/keystone-manage $* credentials add admin EC2 'admin' '%ADMIN_PASSWORD%' admin || echo "no support for adding credentials" +$BIN_DIR/keystone-manage $* credentials add demo EC2 'demo' '%ADMIN_PASSWORD%' demo || echo "no support for adding credentials" diff --git a/openrc b/openrc new file mode 100644 index 0000000..324780b --- /dev/null +++ b/openrc @@ -0,0 +1,51 @@ +#!/usr/bin/env bash + +# Load local configuration +source ./stackrc + +# Set api host endpoint +HOST_IP=${HOST_IP:-127.0.0.1} + +# Nova original used project_id as the *account* that owned resources (servers, +# ip address, ...) With the addition of Keystone we have standardized on the +# term **tenant** as the entity that owns the resources. **novaclient** still +# uses the old deprecated terms project_id. Note that this field should now be +# set to tenant_name, not tenant_id. +export NOVA_PROJECT_ID=${TENANT:-demo} + +# In addition to the owning entity (tenant), nova stores the entity performing +# the action as the **user**. +export NOVA_USERNAME=${USERNAME:-demo} + +# With Keystone you pass the keystone password instead of an api key. +export NOVA_API_KEY=${ADMIN_PASSWORD:-secrete} + +# With the addition of Keystone, to use an openstack cloud you should +# authenticate against keystone, which returns a **Token** and **Service +# Catalog**. The catalog contains the endpoint for all services the user/tenant +# has access to - including nova, glance, keystone, swift, ... We currently +# recommend using the 2.0 *auth api*. +# +# *NOTE*: Using the 2.0 *auth api* does not mean that compute api is 2.0. We +# will use the 1.1 *compute api* +export NOVA_URL=${NOVA_URL:-http://$HOST_IP:5000/v2.0/} + +# Currently novaclient needs you to specify the *compute api* version. This +# needs to match the config of your catalog returned by Keystone. +export NOVA_VERSION=${NOVA_VERSION:-1.1} + +# FIXME - why does this need to be specified? +export NOVA_REGION_NAME=${NOVA_REGION_NAME:-RegionOne} + +# Set the ec2 url so euca2ools works +export EC2_URL=${EC2_URL:-http://$HOST_IP:8773/services/Cloud} + +# Access key is set in the initial keystone data to be the same as username +export EC2_ACCESS_KEY=${USERNAME:-demo} + +# Secret key is set in the initial keystone data to the admin password +export EC2_SECRET_KEY=${ADMIN_PASSWORD:-secrete} + +# set log level to DEBUG (helps debug issues) +# export NOVACLIENT_DEBUG=1 + diff --git a/stack.sh b/stack.sh index 02267d2..add76bd 100755 --- a/stack.sh +++ b/stack.sh @@ -305,20 +305,31 @@ sudo PIP_DOWNLOAD_CACHE=/var/cache/pip pip install `cat $FILES/pips/*` # be owned by the installation user, we create the directory and change the # ownership to the proper user. function git_clone { - # if there is an existing checkout, move it out of the way - if [[ "$RECLONE" == "yes" ]]; then - # FIXME(ja): if we were smarter we could speed up RECLONE by - # using the old git repo as the basis of our new clone... - if [ -d $2 ]; then - mv $2 /tmp/stack.`date +%s` - fi - fi - if [ ! -d $2 ]; then - git clone $1 $2 + GIT_REMOTE=$1 + GIT_DEST=$2 + GIT_BRANCH=$3 + + # do a full clone only if the directory doesn't exist + if [ ! -d $GIT_DEST ]; then + git clone $GIT_REMOTE $GIT_DEST cd $2 # This checkout syntax works for both branches and tags - git checkout $3 + git checkout $GIT_BRANCH + elif [[ "$RECLONE" == "yes" ]]; then + # if it does exist then simulate what clone does if asked to RECLONE + cd $GIT_DEST + # set the url to pull from and fetch + git remote set-url origin $GIT_REMOTE + git fetch origin + # remove the existing ignored files (like pyc) as they cause breakage + # (due to the py files having older timestamps than our pyc, so python + # thinks the pyc files are correct using them) + sudo git clean -f -d + git checkout -f origin/$GIT_BRANCH + # a local branch might not exist + git branch -D $GIT_BRANCH || true + git checkout -b $GIT_BRANCH fi } @@ -475,11 +486,15 @@ fi # Nova # ---- -# We are going to use the sample http middleware configuration from the keystone -# project to launch nova. This paste config adds the configuration required -# for nova to validate keystone tokens - except we need to switch the config -# to use our service token instead (instead of the invalid token 999888777666). -sudo sed -e "s,999888777666,$SERVICE_TOKEN,g" -i $KEYSTONE_DIR/examples/paste/nova-api-paste.ini +if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then + # We are going to use the sample http middleware configuration from the + # keystone project to launch nova. This paste config adds the configuration + # required for nova to validate keystone tokens - except we need to switch + # the config to use our service token instead (instead of the invalid token + # 999888777666). + cp $KEYSTONE_DIR/examples/paste/nova-api-paste.ini $NOVA_DIR/bin + sed -e "s,999888777666,$SERVICE_TOKEN,g" -i $NOVA_DIR/bin/nova-api-paste.ini +fi if [[ "$ENABLED_SERVICES" =~ "n-cpu" ]]; then @@ -552,6 +567,31 @@ if [[ "$ENABLED_SERVICES" =~ "n-net" ]]; then mkdir -p $NOVA_DIR/networks fi +# Volume Service +# -------------- + +if [[ "$ENABLED_SERVICES" =~ "n-vol" ]]; then + # + # Configure a default volume group called 'nova-volumes' for the nova-volume + # service if it does not yet exist. If you don't wish to use a file backed + # volume group, create your own volume group called 'nova-volumes' before + # invoking stack.sh. + # + # By default, the backing file is 2G in size, and is stored in /opt/stack. + # + if ! sudo vgdisplay | grep -q nova-volumes; then + VOLUME_BACKING_FILE=${VOLUME_BACKING_FILE:-/opt/stack/nova-volumes-backing-file} + VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-2052M} + truncate -s $VOLUME_BACKING_FILE_SIZE $VOLUME_BACKING_FILE + DEV=`sudo losetup -f --show $VOLUME_BACKING_FILE` + sudo vgcreate nova-volumes $DEV + fi + + # Configure iscsitarget + sudo sed 's/ISCSITARGET_ENABLE=false/ISCSITARGET_ENABLE=true/' -i /etc/default/iscsitarget + sudo /etc/init.d/iscsitarget restart +fi + function add_nova_flag { echo "$1" >> $NOVA_DIR/bin/nova.conf } @@ -571,7 +611,7 @@ add_nova_flag "--libvirt_type=$LIBVIRT_TYPE" add_nova_flag "--osapi_extensions_path=$OPENSTACKX_DIR/extensions" add_nova_flag "--vncproxy_url=http://$HOST_IP:6080" add_nova_flag "--vncproxy_wwwroot=$NOVNC_DIR/" -add_nova_flag "--api_paste_config=$KEYSTONE_DIR/examples/paste/nova-api-paste.ini" +add_nova_flag "--api_paste_config=$NOVA_DIR/bin/nova-api-paste.ini" add_nova_flag "--image_service=nova.image.glance.GlanceImageService" add_nova_flag "--ec2_dmz_host=$EC2_DMZ_HOST" add_nova_flag "--rabbit_host=$RABBIT_HOST" @@ -583,6 +623,7 @@ if [ -n "$FLAT_INTERFACE" ]; then fi if [ -n "$MULTI_HOST" ]; then add_nova_flag "--multi_host=$MULTI_HOST" + add_nova_flag "--send_arp_for_ha=1" fi if [ "$SYSLOG" != "False" ]; then add_nova_flag "--use_syslog=1" @@ -695,6 +736,7 @@ fi # within the context of our original shell (so our groups won't be updated). # Use 'sg' to execute nova-compute as a member of the libvirtd group. screen_it n-cpu "cd $NOVA_DIR && sg libvirtd $NOVA_DIR/bin/nova-compute" +screen_it n-vol "cd $NOVA_DIR && $NOVA_DIR/bin/nova-volume" screen_it n-net "cd $NOVA_DIR && $NOVA_DIR/bin/nova-network" screen_it n-sch "cd $NOVA_DIR && $NOVA_DIR/bin/nova-scheduler" screen_it n-vnc "cd $NOVNC_DIR && ./utils/nova-wsproxy.py 6080 --web . --flagfile=../nova/bin/nova.conf" @@ -722,8 +764,8 @@ if [[ "$ENABLED_SERVICES" =~ "g-reg" ]]; then for image_url in ${IMAGE_URLS//,/ }; do # Downloads the image (uec ami+aki style), then extracts it. - IMAGE_FNAME=`echo "$image_url" | python -c "import sys; print sys.stdin.read().split('/')[-1]"` - IMAGE_NAME=`echo "$IMAGE_FNAME" | python -c "import sys; print sys.stdin.read().split('.tar.gz')[0].split('.tgz')[0]"` + IMAGE_FNAME=`basename "$image_url"` + IMAGE_NAME=`basename "$IMAGE_FNAME" .tar.gz` if [ ! -f $FILES/$IMAGE_FNAME ]; then wget -c $image_url -O $FILES/$IMAGE_FNAME fi diff --git a/stackrc b/stackrc index aaee6ec..9ba1043 100644 --- a/stackrc +++ b/stackrc @@ -12,14 +12,14 @@ KEYSTONE_BRANCH=diablo # a websockets/html5 or flash powered VNC console for vm instances NOVNC_REPO=https://github.com/cloudbuilders/noVNC.git -NOVNC_BRANCH=master +NOVNC_BRANCH=diablo # django powered web control panel for openstack DASH_REPO=https://github.com/cloudbuilders/openstack-dashboard.git DASH_BRANCH=diablo # python client library to nova that dashboard (and others) use -NOVACLIENT_REPO=https://github.com/cloudbuilders/python-novaclient.git +NOVACLIENT_REPO=https://github.com/rackspace/python-novaclient.git NOVACLIENT_BRANCH=master # openstackx is a collection of extensions to openstack.compute & nova diff --git a/tools/build_kvm.sh b/tools/build_kvm.sh index 207f86b..36457d5 100755 --- a/tools/build_kvm.sh +++ b/tools/build_kvm.sh @@ -1,5 +1,8 @@ #!/usr/bin/env bash +# exit on error to stop unexpected errors +set -o errexit + # Make sure that we have the proper version of ubuntu UBUNTU_VERSION=`cat /etc/lsb-release | grep CODENAME | sed 's/.*=//g'` if [ ! "oneiric" = "$UBUNTU_VERSION" ]; then @@ -16,9 +19,6 @@ set -o xtrace TOOLS_DIR=$(cd $(dirname "$0") && pwd) TOP_DIR=$TOOLS_DIR/.. -# Configure the root password of the vm -ROOT_PASSWORD=${ROOT_PASSWORD:password} - # Where to store files and instances KVMSTACK_DIR=${KVMSTACK_DIR:-/opt/kvmstack} @@ -41,24 +41,28 @@ fi # Source params source ./stackrc +# Configure the root password of the vm to be the same as ``ADMIN_PASSWORD`` +ROOT_PASSWORD=${ADMIN_PASSWORD:-password} + + # Base image (natty by default) DIST_NAME=${DIST_NAME:-natty} IMAGE_FNAME=$DIST_NAME.raw +# Name of our instance, used by libvirt +GUEST_NAME=${GUEST_NAME:-kvmstack} + # Original version of built image -BASE_IMAGE=$KVMSTACK_DIR/images/natty.raw +BASE_IMAGE=$KVMSTACK_DIR/images/$DIST_NAME.raw # Copy of base image, which we pre-install with tasty treats -BASE_IMAGE_COPY=$IMAGES_DIR/$DIST_NAME.raw.copy - -# Name of our instance, used by libvirt -CONTAINER_NAME=${CONTAINER_NAME:-kvmstack} +VM_IMAGE=$IMAGES_DIR/$DIST_NAME.$GUEST_NAME.raw # Mop up after previous runs -virsh destroy $CONTAINER_NAME +virsh destroy $GUEST_NAME || true # Where this vm is stored -VM_DIR=$KVMSTACK_DIR/instances/$CONTAINER_NAME +VM_DIR=$KVMSTACK_DIR/instances/$GUEST_NAME # Create vm dir mkdir -p $VM_DIR @@ -70,14 +74,14 @@ mkdir -p $COPY_DIR # Create the base image if it does not yet exist if [ ! -e $IMAGES_DIR/$IMAGE_FNAME ]; then cd $TOOLS_DIR - ./make_image.sh -m -r 5000 natty raw - mv natty.raw $BASE_IMAGE + ./make_image.sh -m -r 5000 $DIST_NAME raw + mv $DIST_NAME.raw $BASE_IMAGE cd $TOP_DIR fi # Create a copy of the base image -if [ ! -e $BASE_IMAGE_COPY ]; then - cp -p $BASE_IMAGE $BASE_IMAGE_COPY +if [ ! -e $VM_IMAGE ]; then + cp -p $BASE_IMAGE $VM_IMAGE fi # Unmount the copied base image @@ -98,8 +102,8 @@ function kill_unmount() { exit 1 } -# Install deps -apt-get install -y --force-yes kvm libvirt-bin kpartx +# Install deps if needed +dpkg -l kvm libvirt-bin kpartx || apt-get install -y --force-yes kvm libvirt-bin kpartx # Let Ctrl-c kill tail and exit trap kill_unmount SIGINT @@ -108,7 +112,7 @@ trap kill_unmount SIGINT DEST=${DEST:-/opt/stack} # Mount the file system -mount -o loop,offset=32256 $BASE_IMAGE_COPY $COPY_DIR +mount -o loop,offset=32256 $VM_IMAGE $COPY_DIR # git clone only if directory doesn't exist already. Since ``DEST`` might not # be owned by the installation user, we create the directory and change the @@ -148,29 +152,28 @@ git_clone $OPENSTACKX_REPO $COPY_DIR/$DEST/openstackx $OPENSTACKX_BRANCH git_clone $KEYSTONE_REPO $COPY_DIR/$DEST/keystone $KEYSTONE_BRANCH git_clone $NOVNC_REPO $COPY_DIR/$DEST/noVNC $NOVNC_BRANCH -# Unmount the filesystems -unmount_images - # Back to devstack cd $TOP_DIR +# Unmount the filesystems +unmount_images + # Network configuration variables BRIDGE=${BRIDGE:-br0} -CONTAINER=${CONTAINER:-STACK} -CONTAINER_IP=${CONTAINER_IP:-192.168.1.50} -CONTAINER_CIDR=${CONTAINER_CIDR:-$CONTAINER_IP/24} -CONTAINER_NETMASK=${CONTAINER_NETMASK:-255.255.255.0} -CONTAINER_GATEWAY=${CONTAINER_GATEWAY:-192.168.1.1} -CONTAINER_MAC=${CONTAINER_MAC:-"02:16:3e:07:69:`printf '%02X' $(echo $CONTAINER_IP | sed "s/.*\.//")`"} -CONTAINER_RAM=${CONTAINER_RAM:-1524288} -CONTAINER_CORES=${CONTAINER_CORES:-1} +GUEST_IP=${GUEST_IP:-192.168.1.50} +GUEST_CIDR=${GUEST_CIDR:-$GUEST_IP/24} +GUEST_NETMASK=${GUEST_NETMASK:-255.255.255.0} +GUEST_GATEWAY=${GUEST_GATEWAY:-192.168.1.1} +GUEST_MAC=${GUEST_MAC:-"02:16:3e:07:69:`printf '%02X' $(echo $GUEST_IP | sed "s/.*\.//")`"} +GUEST_RAM=${GUEST_RAM:-1524288} +GUEST_CORES=${GUEST_CORES:-1} # libvirt.xml configuration -LIBVIRT_XML=libvirt.xml +LIBVIRT_XML=$VM_DIR/libvirt.xml cat > $LIBVIRT_XML < - $CONTAINER_NAME - $CONTAINER_RAM + $GUEST_NAME + $GUEST_RAM hvm @@ -178,7 +181,7 @@ cat > $LIBVIRT_XML < - $CONTAINER_CORES + $GUEST_CORES @@ -188,7 +191,7 @@ cat > $LIBVIRT_XML < - + @@ -231,13 +234,15 @@ cd $VM_DIR rm -f $VM_DIR/disk # Create our instance fs -qemu-img create -f qcow2 -b $BASE_IMAGE_COPY disk - -sleep 5 +qemu-img create -f qcow2 -b $VM_IMAGE disk +# Connect our nbd and wait till it is mountable qemu-nbd -c $NBD disk - -sleep 5 +NBD_DEV=`basename $NBD` +if ! timeout 60 sh -c "while ! [ -e /sys/block/$NBD_DEV/pid ]; do sleep 1; done"; then + echo "Couldn't connect $NBD" + exit 1 +fi # Mount the instance mount $NBD $ROOTFS -o offset=32256 -t ext4 @@ -250,13 +255,13 @@ iface lo inet loopback auto eth0 iface eth0 inet static - address $CONTAINER_IP - netmask $CONTAINER_NETMASK - gateway $CONTAINER_GATEWAY + address $GUEST_IP + netmask $GUEST_NETMASK + gateway $GUEST_GATEWAY EOF # User configuration for the instance -chroot $ROOTFS groupadd libvirtd +chroot $ROOTFS groupadd libvirtd || true chroot $ROOTFS useradd stack -s /bin/bash -d $DEST -G libvirtd cp -pr $TOOLS_DIR/.. $ROOTFS/$DEST/devstack echo "root:$ROOT_PASSWORD" | chroot $ROOTFS chpasswd @@ -280,6 +285,15 @@ if [ "$COPYENV" = "1" ]; then cp_it ~/.bashrc $ROOTFS/$DEST/.bashrc fi +# pre-cache uec images +for image_url in ${IMAGE_URLS//,/ }; do + IMAGE_FNAME=`basename "$image_url"` + if [ ! -f $IMAGES_DIR/$IMAGE_FNAME ]; then + wget -c $image_url -O $IMAGES_DIR/$IMAGE_FNAME + fi + cp $IMAGES_DIR/$IMAGE_FNAME $ROOTFS/$DEST/devstack/files +done + # Configure the runner RUN_SH=$ROOTFS/$DEST/run.sh cat > $RUN_SH < $RC_LOCAL < $LXC_CONF < $RUN_SH <