merge origin/master
commit
16b6efab4c
@ -1,3 +1,5 @@
|
||||
proto
|
||||
*~
|
||||
*.log
|
||||
src
|
||||
localrc
|
||||
|
@ -1,18 +0,0 @@
|
||||
# a collection of packages that speed up installation as they are dependencies
|
||||
# of packages we can't install during bootstraping (rabbitmq-server,
|
||||
# mysql-server, libvirt-bin)
|
||||
#
|
||||
# NOTE: only add packages to this file that aren't needed directly
|
||||
mysql-common
|
||||
mysql-client-5.1
|
||||
erlang-base
|
||||
erlang-ssl
|
||||
erlang-nox
|
||||
erlang-inets
|
||||
erlang-mnesia
|
||||
libhtml-template-perl
|
||||
gettext-base
|
||||
libavahi-client3
|
||||
libxml2-utils
|
||||
libpciaccess0
|
||||
libparted0debian1
|
@ -0,0 +1,17 @@
|
||||
curl
|
||||
gcc
|
||||
memcached # NOPRIME
|
||||
python-configobj
|
||||
python-coverage
|
||||
python-dev
|
||||
python-eventlet
|
||||
python-greenlet
|
||||
python-netifaces
|
||||
python-nose
|
||||
python-pastedeploy
|
||||
python-setuptools
|
||||
python-simplejson
|
||||
python-webob
|
||||
python-xattr
|
||||
sqlite3
|
||||
xfsprogs
|
@ -0,0 +1,127 @@
|
||||
#######
|
||||
# EC2 #
|
||||
#######
|
||||
|
||||
[composite:ec2]
|
||||
use = egg:Paste#urlmap
|
||||
/: ec2versions
|
||||
/services/Cloud: ec2cloud
|
||||
/services/Admin: ec2admin
|
||||
/latest: ec2metadata
|
||||
/2007-01-19: ec2metadata
|
||||
/2007-03-01: ec2metadata
|
||||
/2007-08-29: ec2metadata
|
||||
/2007-10-10: ec2metadata
|
||||
/2007-12-15: ec2metadata
|
||||
/2008-02-01: ec2metadata
|
||||
/2008-09-01: ec2metadata
|
||||
/2009-04-04: ec2metadata
|
||||
/1.0: ec2metadata
|
||||
|
||||
[pipeline:ec2cloud]
|
||||
pipeline = logrequest totoken authtoken keystonecontext cloudrequest authorizer ec2executor
|
||||
|
||||
[pipeline:ec2admin]
|
||||
pipeline = logrequest totoken authtoken keystonecontext adminrequest authorizer ec2executor
|
||||
|
||||
[pipeline:ec2metadata]
|
||||
pipeline = logrequest ec2md
|
||||
|
||||
[pipeline:ec2versions]
|
||||
pipeline = logrequest ec2ver
|
||||
|
||||
[filter:logrequest]
|
||||
paste.filter_factory = nova.api.ec2:RequestLogging.factory
|
||||
|
||||
[filter:ec2lockout]
|
||||
paste.filter_factory = nova.api.ec2:Lockout.factory
|
||||
|
||||
[filter:totoken]
|
||||
paste.filter_factory = keystone.middleware.ec2_token:EC2Token.factory
|
||||
|
||||
[filter:ec2noauth]
|
||||
paste.filter_factory = nova.api.ec2:NoAuth.factory
|
||||
|
||||
[filter:authenticate]
|
||||
paste.filter_factory = nova.api.ec2:Authenticate.factory
|
||||
|
||||
[filter:cloudrequest]
|
||||
controller = nova.api.ec2.cloud.CloudController
|
||||
paste.filter_factory = nova.api.ec2:Requestify.factory
|
||||
|
||||
[filter:adminrequest]
|
||||
controller = nova.api.ec2.admin.AdminController
|
||||
paste.filter_factory = nova.api.ec2:Requestify.factory
|
||||
|
||||
[filter:authorizer]
|
||||
paste.filter_factory = nova.api.ec2:Authorizer.factory
|
||||
|
||||
[app:ec2executor]
|
||||
paste.app_factory = nova.api.ec2:Executor.factory
|
||||
|
||||
[app:ec2ver]
|
||||
paste.app_factory = nova.api.ec2:Versions.factory
|
||||
|
||||
[app:ec2md]
|
||||
paste.app_factory = nova.api.ec2.metadatarequesthandler:MetadataRequestHandler.factory
|
||||
|
||||
#############
|
||||
# Openstack #
|
||||
#############
|
||||
|
||||
[composite:osapi]
|
||||
use = egg:Paste#urlmap
|
||||
/: osversions
|
||||
/v1.0: openstackapi10
|
||||
/v1.1: openstackapi11
|
||||
|
||||
[pipeline:openstackapi10]
|
||||
pipeline = faultwrap authtoken keystonecontext ratelimit osapiapp10
|
||||
|
||||
[pipeline:openstackapi11]
|
||||
pipeline = faultwrap authtoken keystonecontext ratelimit extensions osapiapp11
|
||||
|
||||
[filter:faultwrap]
|
||||
paste.filter_factory = nova.api.openstack:FaultWrapper.factory
|
||||
|
||||
[filter:auth]
|
||||
paste.filter_factory = nova.api.openstack.auth:AuthMiddleware.factory
|
||||
|
||||
[filter:noauth]
|
||||
paste.filter_factory = nova.api.openstack.auth:NoAuthMiddleware.factory
|
||||
|
||||
[filter:ratelimit]
|
||||
paste.filter_factory = nova.api.openstack.limits:RateLimitingMiddleware.factory
|
||||
|
||||
[filter:extensions]
|
||||
paste.filter_factory = nova.api.openstack.extensions:ExtensionMiddleware.factory
|
||||
|
||||
[app:osapiapp10]
|
||||
paste.app_factory = nova.api.openstack:APIRouterV10.factory
|
||||
|
||||
[app:osapiapp11]
|
||||
paste.app_factory = nova.api.openstack:APIRouterV11.factory
|
||||
|
||||
[pipeline:osversions]
|
||||
pipeline = faultwrap osversionapp
|
||||
|
||||
[app:osversionapp]
|
||||
paste.app_factory = nova.api.openstack.versions:Versions.factory
|
||||
|
||||
##########
|
||||
# Shared #
|
||||
##########
|
||||
|
||||
[filter:keystonecontext]
|
||||
paste.filter_factory = keystone.middleware.nova_keystone_context:NovaKeystoneContext.factory
|
||||
|
||||
[filter:authtoken]
|
||||
paste.filter_factory = keystone.middleware.auth_token:filter_factory
|
||||
service_protocol = http
|
||||
service_host = 127.0.0.1
|
||||
service_port = 5000
|
||||
auth_host = 127.0.0.1
|
||||
auth_port = 35357
|
||||
auth_protocol = http
|
||||
auth_uri = http://127.0.0.1:5000/
|
||||
admin_token = %SERVICE_TOKEN%
|
@ -0,0 +1,20 @@
|
||||
[DEFAULT]
|
||||
devices = %NODE_PATH%/node
|
||||
mount_check = false
|
||||
bind_port = %BIND_PORT%
|
||||
user = %USER%
|
||||
log_facility = LOG_LOCAL%LOG_FACILITY%
|
||||
swift_dir = %SWIFT_CONFIG_LOCATION%
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = account-server
|
||||
|
||||
[app:account-server]
|
||||
use = egg:swift#account
|
||||
|
||||
[account-replicator]
|
||||
vm_test_mode = yes
|
||||
|
||||
[account-auditor]
|
||||
|
||||
[account-reaper]
|
@ -0,0 +1,22 @@
|
||||
[DEFAULT]
|
||||
devices = %NODE_PATH%/node
|
||||
mount_check = false
|
||||
bind_port = %BIND_PORT%
|
||||
user = %USER%
|
||||
log_facility = LOG_LOCAL%LOG_FACILITY%
|
||||
swift_dir = %SWIFT_CONFIG_LOCATION%
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = container-server
|
||||
|
||||
[app:container-server]
|
||||
use = egg:swift#container
|
||||
|
||||
[container-replicator]
|
||||
vm_test_mode = yes
|
||||
|
||||
[container-updater]
|
||||
|
||||
[container-auditor]
|
||||
|
||||
[container-sync]
|
@ -0,0 +1,20 @@
|
||||
[DEFAULT]
|
||||
devices = %NODE_PATH%/node
|
||||
mount_check = false
|
||||
bind_port = %BIND_PORT%
|
||||
user = %USER%
|
||||
log_facility = LOG_LOCAL%LOG_FACILITY%
|
||||
swift_dir = %SWIFT_CONFIG_LOCATION%
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = object-server
|
||||
|
||||
[app:object-server]
|
||||
use = egg:swift#object
|
||||
|
||||
[object-replicator]
|
||||
vm_test_mode = yes
|
||||
|
||||
[object-updater]
|
||||
|
||||
[object-auditor]
|
@ -0,0 +1,32 @@
|
||||
[DEFAULT]
|
||||
bind_port = 8080
|
||||
user = %USER%
|
||||
log_facility = LOG_LOCAL1
|
||||
swift_dir = %SWIFT_CONFIG_LOCATION%
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = healthcheck cache %AUTH_SERVER% proxy-server
|
||||
|
||||
[app:proxy-server]
|
||||
use = egg:swift#proxy
|
||||
allow_account_management = true
|
||||
account_autocreate = true
|
||||
|
||||
[filter:keystone]
|
||||
use = egg:swiftkeystone2#keystone2
|
||||
keystone_admin_token = %SERVICE_TOKEN%
|
||||
keystone_url = http://localhost:35357/v2.0
|
||||
|
||||
[filter:tempauth]
|
||||
use = egg:swift#tempauth
|
||||
user_admin_admin = admin .admin .reseller_admin
|
||||
user_test_tester = testing .admin
|
||||
user_test2_tester2 = testing2 .admin
|
||||
user_test_tester3 = testing3
|
||||
bind_ip = 0.0.0.0
|
||||
|
||||
[filter:healthcheck]
|
||||
use = egg:swift#healthcheck
|
||||
|
||||
[filter:cache]
|
||||
use = egg:swift#memcache
|
@ -0,0 +1,79 @@
|
||||
uid = %USER%
|
||||
gid = %GROUP%
|
||||
log file = /var/log/rsyncd.log
|
||||
pid file = /var/run/rsyncd.pid
|
||||
address = 127.0.0.1
|
||||
|
||||
[account6012]
|
||||
max connections = 25
|
||||
path = %SWIFT_DATA_LOCATION%/1/node/
|
||||
read only = false
|
||||
lock file = /var/lock/account6012.lock
|
||||
|
||||
[account6022]
|
||||
max connections = 25
|
||||
path = %SWIFT_DATA_LOCATION%/2/node/
|
||||
read only = false
|
||||
lock file = /var/lock/account6022.lock
|
||||
|
||||
[account6032]
|
||||
max connections = 25
|
||||
path = %SWIFT_DATA_LOCATION%/3/node/
|
||||
read only = false
|
||||
lock file = /var/lock/account6032.lock
|
||||
|
||||
[account6042]
|
||||
max connections = 25
|
||||
path = %SWIFT_DATA_LOCATION%/4/node/
|
||||
read only = false
|
||||
lock file = /var/lock/account6042.lock
|
||||
|
||||
|
||||
[container6011]
|
||||
max connections = 25
|
||||
path = %SWIFT_DATA_LOCATION%/1/node/
|
||||
read only = false
|
||||
lock file = /var/lock/container6011.lock
|
||||
|
||||
[container6021]
|
||||
max connections = 25
|
||||
path = %SWIFT_DATA_LOCATION%/2/node/
|
||||
read only = false
|
||||
lock file = /var/lock/container6021.lock
|
||||
|
||||
[container6031]
|
||||
max connections = 25
|
||||
path = %SWIFT_DATA_LOCATION%/3/node/
|
||||
read only = false
|
||||
lock file = /var/lock/container6031.lock
|
||||
|
||||
[container6041]
|
||||
max connections = 25
|
||||
path = %SWIFT_DATA_LOCATION%/4/node/
|
||||
read only = false
|
||||
lock file = /var/lock/container6041.lock
|
||||
|
||||
|
||||
[object6010]
|
||||
max connections = 25
|
||||
path = %SWIFT_DATA_LOCATION%/1/node/
|
||||
read only = false
|
||||
lock file = /var/lock/object6010.lock
|
||||
|
||||
[object6020]
|
||||
max connections = 25
|
||||
path = %SWIFT_DATA_LOCATION%/2/node/
|
||||
read only = false
|
||||
lock file = /var/lock/object6020.lock
|
||||
|
||||
[object6030]
|
||||
max connections = 25
|
||||
path = %SWIFT_DATA_LOCATION%/3/node/
|
||||
read only = false
|
||||
lock file = /var/lock/object6030.lock
|
||||
|
||||
[object6040]
|
||||
max connections = 25
|
||||
path = %SWIFT_DATA_LOCATION%/4/node/
|
||||
read only = false
|
||||
lock file = /var/lock/object6040.lock
|
@ -0,0 +1,26 @@
|
||||
#!/bin/bash
|
||||
|
||||
cd %SWIFT_CONFIG_LOCATION%
|
||||
|
||||
rm -f *.builder *.ring.gz backups/*.builder backups/*.ring.gz
|
||||
|
||||
swift-ring-builder object.builder create %SWIFT_PARTITION_POWER_SIZE% 3 1
|
||||
swift-ring-builder object.builder add z1-127.0.0.1:6010/sdb1 1
|
||||
swift-ring-builder object.builder add z2-127.0.0.1:6020/sdb2 1
|
||||
swift-ring-builder object.builder add z3-127.0.0.1:6030/sdb3 1
|
||||
swift-ring-builder object.builder add z4-127.0.0.1:6040/sdb4 1
|
||||
swift-ring-builder object.builder rebalance
|
||||
|
||||
swift-ring-builder container.builder create %SWIFT_PARTITION_POWER_SIZE% 3 1
|
||||
swift-ring-builder container.builder add z1-127.0.0.1:6011/sdb1 1
|
||||
swift-ring-builder container.builder add z2-127.0.0.1:6021/sdb2 1
|
||||
swift-ring-builder container.builder add z3-127.0.0.1:6031/sdb3 1
|
||||
swift-ring-builder container.builder add z4-127.0.0.1:6041/sdb4 1
|
||||
swift-ring-builder container.builder rebalance
|
||||
|
||||
swift-ring-builder account.builder create %SWIFT_PARTITION_POWER_SIZE% 3 1
|
||||
swift-ring-builder account.builder add z1-127.0.0.1:6012/sdb1 1
|
||||
swift-ring-builder account.builder add z2-127.0.0.1:6022/sdb2 1
|
||||
swift-ring-builder account.builder add z3-127.0.0.1:6032/sdb3 1
|
||||
swift-ring-builder account.builder add z4-127.0.0.1:6042/sdb4 1
|
||||
swift-ring-builder account.builder rebalance
|
@ -0,0 +1,3 @@
|
||||
#!/bin/bash
|
||||
|
||||
swift-init all restart
|
@ -0,0 +1,3 @@
|
||||
[swift-hash]
|
||||
# random unique string that can never change (DO NOT LOSE)
|
||||
swift_hash_path_suffix = %SWIFT_HASH%
|
@ -1,402 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# exit on error to stop unexpected errors
|
||||
set -o errexit
|
||||
|
||||
# Make sure that we have the proper version of ubuntu
|
||||
UBUNTU_VERSION=`cat /etc/lsb-release | grep CODENAME | sed 's/.*=//g'`
|
||||
if [ ! "oneiric" = "$UBUNTU_VERSION" ]; then
|
||||
if [ ! "natty" = "$UBUNTU_VERSION" ]; then
|
||||
echo "This script only works with oneiric and natty"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Echo commands
|
||||
set -o xtrace
|
||||
|
||||
# Keep track of the current directory
|
||||
TOOLS_DIR=$(cd $(dirname "$0") && pwd)
|
||||
TOP_DIR=$TOOLS_DIR/..
|
||||
|
||||
# Where to store files and instances
|
||||
KVMSTACK_DIR=${KVMSTACK_DIR:-/opt/kvmstack}
|
||||
|
||||
# Where to store images
|
||||
IMAGES_DIR=$KVMSTACK_DIR/images
|
||||
|
||||
# Create images dir
|
||||
mkdir -p $IMAGES_DIR
|
||||
|
||||
# Move to top devstack dir
|
||||
cd $TOP_DIR
|
||||
|
||||
# Abort if localrc is not set
|
||||
if [ ! -e ./localrc ]; then
|
||||
echo "You must have a localrc with ALL necessary passwords defined before proceeding."
|
||||
echo "See stack.sh for required passwords."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Source params
|
||||
source ./stackrc
|
||||
|
||||
# Configure the root password of the vm to be the same as ``ADMIN_PASSWORD``
|
||||
ROOT_PASSWORD=${ADMIN_PASSWORD:-password}
|
||||
|
||||
|
||||
# Base image (natty by default)
|
||||
DIST_NAME=${DIST_NAME:-natty}
|
||||
IMAGE_FNAME=$DIST_NAME.raw
|
||||
|
||||
# Name of our instance, used by libvirt
|
||||
GUEST_NAME=${GUEST_NAME:-kvmstack}
|
||||
|
||||
# Original version of built image
|
||||
BASE_IMAGE=$KVMSTACK_DIR/images/$DIST_NAME.raw
|
||||
|
||||
# Copy of base image, which we pre-install with tasty treats
|
||||
VM_IMAGE=$IMAGES_DIR/$DIST_NAME.$GUEST_NAME.raw
|
||||
|
||||
# Mop up after previous runs
|
||||
virsh destroy $GUEST_NAME || true
|
||||
|
||||
# Where this vm is stored
|
||||
VM_DIR=$KVMSTACK_DIR/instances/$GUEST_NAME
|
||||
|
||||
# Create vm dir
|
||||
mkdir -p $VM_DIR
|
||||
|
||||
# Mount point into copied base image
|
||||
COPY_DIR=$VM_DIR/copy
|
||||
mkdir -p $COPY_DIR
|
||||
|
||||
# Create the base image if it does not yet exist
|
||||
if [ ! -e $IMAGES_DIR/$IMAGE_FNAME ]; then
|
||||
cd $TOOLS_DIR
|
||||
./make_image.sh -m -r 5000 $DIST_NAME raw
|
||||
mv $DIST_NAME.raw $BASE_IMAGE
|
||||
cd $TOP_DIR
|
||||
fi
|
||||
|
||||
# Create a copy of the base image
|
||||
if [ ! -e $VM_IMAGE ]; then
|
||||
cp -p $BASE_IMAGE $VM_IMAGE
|
||||
fi
|
||||
|
||||
# Unmount the copied base image
|
||||
function unmount_images() {
|
||||
# unmount the filesystem
|
||||
while df | grep -q $COPY_DIR; do
|
||||
umount $COPY_DIR || echo 'ok'
|
||||
sleep 1
|
||||
done
|
||||
}
|
||||
|
||||
# Unmount from failed runs
|
||||
unmount_images
|
||||
|
||||
# Ctrl-c catcher
|
||||
function kill_unmount() {
|
||||
unmount_images
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Install deps if needed
|
||||
dpkg -l kvm libvirt-bin kpartx || apt-get install -y --force-yes kvm libvirt-bin kpartx
|
||||
|
||||
# Let Ctrl-c kill tail and exit
|
||||
trap kill_unmount SIGINT
|
||||
|
||||
# Where Openstack code will live in image
|
||||
DEST=${DEST:-/opt/stack}
|
||||
|
||||
# Mount the file system
|
||||
mount -o loop,offset=32256 $VM_IMAGE $COPY_DIR
|
||||
|
||||
# git clone only if directory doesn't exist already. Since ``DEST`` might not
|
||||
# be owned by the installation user, we create the directory and change the
|
||||
# ownership to the proper user.
|
||||
function git_clone {
|
||||
if [ ! -d $2 ]; then
|
||||
sudo mkdir $2
|
||||
sudo chown `whoami` $2
|
||||
git clone $1 $2
|
||||
cd $2
|
||||
# This checkout syntax works for both branches and tags
|
||||
git checkout $3
|
||||
fi
|
||||
}
|
||||
|
||||
# Make sure that base requirements are installed
|
||||
cp /etc/resolv.conf $COPY_DIR/etc/resolv.conf
|
||||
chroot $COPY_DIR apt-get update
|
||||
chroot $COPY_DIR apt-get install -y --force-yes `cat files/apts/* | cut -d\# -f1 | egrep -v "(rabbitmq|libvirt-bin|mysql-server)"`
|
||||
chroot $COPY_DIR apt-get install -y --download-only rabbitmq-server libvirt-bin mysql-server
|
||||
chroot $COPY_DIR pip install `cat files/pips/*`
|
||||
|
||||
# Clean out code repos if directed to do so
|
||||
if [ "$CLEAN" = "1" ]; then
|
||||
rm -rf $COPY_DIR/$DEST
|
||||
fi
|
||||
|
||||
# Cache openstack code
|
||||
mkdir -p $COPY_DIR/$DEST
|
||||
git_clone $NOVA_REPO $COPY_DIR/$DEST/nova $NOVA_BRANCH
|
||||
git_clone $GLANCE_REPO $COPY_DIR/$DEST/glance $GLANCE_BRANCH
|
||||
git_clone $KEYSTONE_REPO $COPY_DIR/$DESTkeystone $KEYSTONE_BRANCH
|
||||
git_clone $NOVNC_REPO $COPY_DIR/$DEST/noVNC $NOVNC_BRANCH
|
||||
git_clone $HORIZON_REPO $COPY_DIR/$DEST/horizon $HORIZON_BRANCH $HORIZON_TAG
|
||||
git_clone $NOVACLIENT_REPO $COPY_DIR/$DEST/python-novaclient $NOVACLIENT_BRANCH
|
||||
git_clone $OPENSTACKX_REPO $COPY_DIR/$DEST/openstackx $OPENSTACKX_BRANCH
|
||||
git_clone $KEYSTONE_REPO $COPY_DIR/$DEST/keystone $KEYSTONE_BRANCH
|
||||
git_clone $NOVNC_REPO $COPY_DIR/$DEST/noVNC $NOVNC_BRANCH
|
||||
|
||||
# Back to devstack
|
||||
cd $TOP_DIR
|
||||
|
||||
# Unmount the filesystems
|
||||
unmount_images
|
||||
|
||||
# Network configuration variables
|
||||
BRIDGE=${BRIDGE:-br0}
|
||||
GUEST_IP=${GUEST_IP:-192.168.1.50}
|
||||
GUEST_CIDR=${GUEST_CIDR:-$GUEST_IP/24}
|
||||
GUEST_NETMASK=${GUEST_NETMASK:-255.255.255.0}
|
||||
GUEST_GATEWAY=${GUEST_GATEWAY:-192.168.1.1}
|
||||
GUEST_MAC=${GUEST_MAC:-"02:16:3e:07:69:`printf '%02X' $(echo $GUEST_IP | sed "s/.*\.//")`"}
|
||||
GUEST_RAM=${GUEST_RAM:-1524288}
|
||||
GUEST_CORES=${GUEST_CORES:-1}
|
||||
|
||||
# libvirt.xml configuration
|
||||
LIBVIRT_XML=$VM_DIR/libvirt.xml
|
||||
cat > $LIBVIRT_XML <<EOF
|
||||
<domain type='kvm'>
|
||||
<name>$GUEST_NAME</name>
|
||||
<memory>$GUEST_RAM</memory>
|
||||
<os>
|
||||
<type>hvm</type>
|
||||
<bootmenu enable='yes'/>
|
||||
</os>
|
||||
<features>
|
||||
<acpi/>
|
||||
</features>
|
||||
<vcpu>$GUEST_CORES</vcpu>
|
||||
<devices>
|
||||
<disk type='file'>
|
||||
<driver type='qcow2'/>
|
||||
<source file='$VM_DIR/disk'/>
|
||||
<target dev='vda' bus='virtio'/>
|
||||
</disk>
|
||||
|
||||
<interface type='bridge'>
|
||||
<source bridge='$BRIDGE'/>
|
||||
<mac address='$GUEST_MAC'/>
|
||||
</interface>
|
||||
|
||||
<!-- The order is significant here. File must be defined first -->
|
||||
<serial type="file">
|
||||
<source path='$VM_DIR/console.log'/>
|
||||
<target port='1'/>
|
||||
</serial>
|
||||
|
||||
<console type='pty' tty='/dev/pts/2'>
|
||||
<source path='/dev/pts/2'/>
|
||||
<target port='0'/>
|
||||
</console>
|
||||
|
||||
<serial type='pty'>
|
||||
<source path='/dev/pts/2'/>
|
||||
<target port='0'/>
|
||||
</serial>
|
||||
|
||||
<graphics type='vnc' port='-1' autoport='yes' keymap='en-us' listen='0.0.0.0'/>
|
||||
</devices>
|
||||
</domain>
|
||||
EOF
|
||||
|
||||
# Mount point for instance fs
|
||||
ROOTFS=$VM_DIR/root
|
||||
mkdir -p $ROOTFS
|
||||
|
||||
# Make sure we have nbd-ness
|
||||
modprobe nbd max_part=63
|
||||
|
||||
# Which NBD device to use?
|
||||
NBD=${NBD:-/dev/nbd5}
|
||||
|
||||
# Clean up from previous runs
|
||||
umount $ROOTFS || echo 'ok'
|
||||
qemu-nbd -d $NBD || echo 'ok'
|
||||
|
||||
# Clean up old runs
|
||||
cd $VM_DIR
|
||||
rm -f $VM_DIR/disk
|
||||
|
||||
# Create our instance fs
|
||||
qemu-img create -f qcow2 -b $VM_IMAGE disk
|
||||
|
||||
# Connect our nbd and wait till it is mountable
|
||||
qemu-nbd -c $NBD disk
|
||||
NBD_DEV=`basename $NBD`
|
||||
if ! timeout 60 sh -c "while ! [ -e /sys/block/$NBD_DEV/pid ]; do sleep 1; done"; then
|
||||
echo "Couldn't connect $NBD"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Mount the instance
|
||||
mount $NBD $ROOTFS -o offset=32256 -t ext4
|
||||
|
||||
# Configure instance network
|
||||
INTERFACES=$ROOTFS/etc/network/interfaces
|
||||
cat > $INTERFACES <<EOF
|
||||
auto lo
|
||||
iface lo inet loopback
|
||||
|
||||
auto eth0
|
||||
iface eth0 inet static
|
||||
address $GUEST_IP
|
||||
netmask $GUEST_NETMASK
|
||||
gateway $GUEST_GATEWAY
|
||||
EOF
|
||||
|
||||
# User configuration for the instance
|
||||
chroot $ROOTFS groupadd libvirtd || true
|
||||
chroot $ROOTFS useradd stack -s /bin/bash -d $DEST -G libvirtd
|
||||
cp -pr $TOOLS_DIR/.. $ROOTFS/$DEST/devstack
|
||||
echo "root:$ROOT_PASSWORD" | chroot $ROOTFS chpasswd
|
||||
echo "stack:pass" | chroot $ROOTFS chpasswd
|
||||
echo "stack ALL=(ALL) NOPASSWD: ALL" >> $ROOTFS/etc/sudoers
|
||||
|
||||
# Gracefully cp only if source file/dir exists
|
||||
function cp_it {
|
||||
if [ -e $1 ] || [ -d $1 ]; then
|
||||
cp -pRL $1 $2
|
||||
fi
|
||||
}
|
||||
|
||||
# Copy over your ssh keys and env if desired
|
||||
COPYENV=${COPYENV:-1}
|
||||
if [ "$COPYENV" = "1" ]; then
|
||||
cp_it ~/.ssh $ROOTFS/$DEST/.ssh
|
||||
cp_it ~/.ssh/id_rsa.pub $ROOTFS/$DEST/.ssh/authorized_keys
|
||||
cp_it ~/.gitconfig $ROOTFS/$DEST/.gitconfig
|
||||
cp_it ~/.vimrc $ROOTFS/$DEST/.vimrc
|
||||
cp_it ~/.bashrc $ROOTFS/$DEST/.bashrc
|
||||
fi
|
||||
|
||||
# pre-cache uec images
|
||||
for image_url in ${IMAGE_URLS//,/ }; do
|
||||
IMAGE_FNAME=`basename "$image_url"`
|
||||
if [ ! -f $IMAGES_DIR/$IMAGE_FNAME ]; then
|
||||
wget -c $image_url -O $IMAGES_DIR/$IMAGE_FNAME
|
||||
fi
|
||||
cp $IMAGES_DIR/$IMAGE_FNAME $ROOTFS/$DEST/devstack/files
|
||||
done
|
||||
|
||||
# Configure the runner
|
||||
RUN_SH=$ROOTFS/$DEST/run.sh
|
||||
cat > $RUN_SH <<EOF
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Kill any existing screens
|
||||
killall screen
|
||||
|
||||
# Install and run stack.sh
|
||||
sudo apt-get update
|
||||
sudo apt-get -y --force-yes install git-core vim-nox sudo
|
||||
if [ ! -d "$DEST/devstack" ]; then
|
||||
git clone git://github.com/cloudbuilders/devstack.git $DEST/devstack
|
||||
fi
|
||||
cd $DEST/devstack && $STACKSH_PARAMS FORCE=yes ./stack.sh > /$DEST/run.sh.log
|
||||
echo >> /$DEST/run.sh.log
|
||||
echo >> /$DEST/run.sh.log
|
||||
echo "All done! Time to start clicking." >> /$DEST/run.sh.log
|
||||
cat $DEST/run.sh.log
|
||||
EOF
|
||||
chmod 755 $RUN_SH
|
||||
|
||||
# Make runner launch on boot
|
||||
RC_LOCAL=$ROOTFS/etc/init.d/local
|
||||
cat > $RC_LOCAL <<EOF
|
||||
#!/bin/sh -e
|
||||
# Reboot if this is our first run to enable console log on $DIST_NAME :(
|
||||
if [ ! -e /root/firstlaunch ]; then
|
||||
touch /root/firstlaunch
|
||||
reboot -f
|
||||
exit 0
|
||||
fi
|
||||
su -c "$DEST/run.sh" stack
|
||||
EOF
|
||||
chmod +x $RC_LOCAL
|
||||
chroot $ROOTFS sudo update-rc.d local defaults 80
|
||||
|
||||
# Make our ip address hostnames look nice at the command prompt
|
||||
echo "export PS1='${debian_chroot:+($debian_chroot)}\\u@\\H:\\w\\$ '" >> $ROOTFS/$DEST/.bashrc
|
||||
echo "export PS1='${debian_chroot:+($debian_chroot)}\\u@\\H:\\w\\$ '" >> $ROOTFS/etc/profile
|
||||
|
||||
# Give stack ownership over $DEST so it may do the work needed
|
||||
chroot $ROOTFS chown -R stack $DEST
|
||||
|
||||
# Change boot params so that we get a console log
|
||||
sudo sed -e "s/quiet splash/splash console=ttyS0 console=ttyS1,19200n8/g" -i $ROOTFS/boot/grub/menu.lst
|
||||
sudo sed -e "s/^hiddenmenu//g" -i $ROOTFS/boot/grub/menu.lst
|
||||
|
||||
# Set the hostname
|
||||
echo $GUEST_NAME > $ROOTFS/etc/hostname
|
||||
|
||||
# We need the hostname to resolve for rabbit to launch
|
||||
if ! grep -q $GUEST_NAME $ROOTFS/etc/hosts; then
|
||||
echo "$GUEST_IP $GUEST_NAME" >> $ROOTFS/etc/hosts
|
||||
fi
|
||||
|
||||
# Unmount
|
||||
umount $ROOTFS || echo 'ok'
|
||||
qemu-nbd -d $NBD
|
||||
|
||||
# Create the instance
|
||||
cd $VM_DIR && virsh create libvirt.xml
|
||||
|
||||
# Tail the console log till we are done
|
||||
WAIT_TILL_LAUNCH=${WAIT_TILL_LAUNCH:-1}
|
||||
if [ "$WAIT_TILL_LAUNCH" = "1" ]; then
|
||||
# Done creating the container, let's tail the log
|
||||
echo
|
||||
echo "============================================================="
|
||||
echo " -- YAY! --"
|
||||
echo "============================================================="
|
||||
echo
|
||||
echo "We're done launching the vm, about to start tailing the"
|
||||
echo "stack.sh log. It will take a second or two to start."
|
||||
echo
|
||||
echo "Just CTRL-C at any time to stop tailing."
|
||||
|
||||
while [ ! -e "$VM_DIR/console.log" ]; do
|
||||
sleep 1
|
||||
done
|
||||
|
||||
tail -F $VM_DIR/console.log &
|
||||
|
||||
TAIL_PID=$!
|
||||
|
||||
function kill_tail() {
|
||||
kill $TAIL_PID
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Let Ctrl-c kill tail and exit
|
||||
trap kill_tail SIGINT
|
||||
|
||||
echo "Waiting stack.sh to finish..."
|
||||
while ! cat $VM_DIR/console.log | grep -q 'All done' ; do
|
||||
sleep 5
|
||||
done
|
||||
|
||||
kill $TAIL_PID
|
||||
|
||||
if grep -q "stack.sh failed" $VM_DIR/console.log; then
|
||||
exit 1
|
||||
fi
|
||||
echo ""
|
||||
echo "Finished - Zip-a-dee Doo-dah!"
|
||||
fi
|
@ -0,0 +1,248 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Make sure that we have the proper version of ubuntu (only works on oneiric)
|
||||
if ! egrep -q "oneiric" /etc/lsb-release; then
|
||||
echo "This script only works with ubuntu oneiric."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Keep track of the current directory
|
||||
TOOLS_DIR=$(cd $(dirname "$0") && pwd)
|
||||
TOP_DIR=`cd $TOOLS_DIR/..; pwd`
|
||||
|
||||
cd $TOP_DIR
|
||||
|
||||
# Source params
|
||||
source ./stackrc
|
||||
|
||||
# Ubuntu distro to install
|
||||
DIST_NAME=${DIST_NAME:-oneiric}
|
||||
|
||||
# Configure how large the VM should be
|
||||
GUEST_SIZE=${GUEST_SIZE:-10G}
|
||||
|
||||
# exit on error to stop unexpected errors
|
||||
set -o errexit
|
||||
set -o xtrace
|
||||
|
||||
# Abort if localrc is not set
|
||||
if [ ! -e $TOP_DIR/localrc ]; then
|
||||
echo "You must have a localrc with ALL necessary passwords defined before proceeding."
|
||||
echo "See stack.sh for required passwords."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Install deps if needed
|
||||
DEPS="kvm libvirt-bin kpartx cloud-utils"
|
||||
dpkg -l $DEPS || apt-get install -y --force-yes $DEPS
|
||||
|
||||
# Where to store files and instances
|
||||
WORK_DIR=${WORK_DIR:-/opt/kvmstack}
|
||||
|
||||
# Where to store images
|
||||
image_dir=$WORK_DIR/images/$DIST_NAME
|
||||
mkdir -p $image_dir
|
||||
|
||||
# Original version of built image
|
||||
uec_url=http://uec-images.ubuntu.com/$DIST_NAME/current/$DIST_NAME-server-cloudimg-amd64.tar.gz
|
||||
tarball=$image_dir/$(basename $uec_url)
|
||||
|
||||
# download the base uec image if we haven't already
|
||||
if [ ! -f $tarball ]; then
|
||||
curl $uec_url -o $tarball
|
||||
(cd $image_dir && tar -Sxvzf $tarball)
|
||||
resize-part-image $image_dir/*.img $GUEST_SIZE $image_dir/disk
|
||||
cp $image_dir/*-vmlinuz-virtual $image_dir/kernel
|
||||
fi
|
||||
|
||||
|
||||
# Configure the root password of the vm to be the same as ``ADMIN_PASSWORD``
|
||||
ROOT_PASSWORD=${ADMIN_PASSWORD:-password}
|
||||
|
||||
# Name of our instance, used by libvirt
|
||||
GUEST_NAME=${GUEST_NAME:-devstack}
|
||||
|
||||
# Mop up after previous runs
|
||||
virsh destroy $GUEST_NAME || true
|
||||
|
||||
# Where this vm is stored
|
||||
vm_dir=$WORK_DIR/instances/$GUEST_NAME
|
||||
|
||||
# Create vm dir and remove old disk
|
||||
mkdir -p $vm_dir
|
||||
rm -f $vm_dir/disk
|
||||
|
||||
# Create a copy of the base image
|
||||
qemu-img create -f qcow2 -b $image_dir/disk $vm_dir/disk
|
||||
|
||||
# Back to devstack
|
||||
cd $TOP_DIR
|
||||
|
||||
GUEST_NETWORK=${GUEST_NETWORK:-1}
|
||||
GUEST_RECREATE_NET=${GUEST_RECREATE_NET:-yes}
|
||||
GUEST_IP=${GUEST_IP:-192.168.$GUEST_NETWORK.50}
|
||||
GUEST_CIDR=${GUEST_CIDR:-$GUEST_IP/24}
|
||||
GUEST_NETMASK=${GUEST_NETMASK:-255.255.255.0}
|
||||
GUEST_GATEWAY=${GUEST_GATEWAY:-192.168.$GUEST_NETWORK.1}
|
||||
GUEST_MAC=${GUEST_MAC:-"02:16:3e:07:69:`printf '%02X' $GUEST_NETWORK`"}
|
||||
GUEST_RAM=${GUEST_RAM:-1524288}
|
||||
GUEST_CORES=${GUEST_CORES:-1}
|
||||
|
||||
# libvirt.xml configuration
|
||||
NET_XML=$vm_dir/net.xml
|
||||
cat > $NET_XML <<EOF
|
||||
<network>
|
||||
<name>devstack-$GUEST_NETWORK</name>
|
||||
<bridge name="stackbr%d" />
|
||||
<forward/>
|
||||
<ip address="$GUEST_GATEWAY" netmask="$GUEST_NETMASK">
|
||||
<dhcp>
|
||||
<range start='192.168.$GUEST_NETWORK.2' end='192.168.$GUEST_NETWORK.127' />
|
||||
</dhcp>
|
||||
</ip>
|
||||
</network>
|
||||
EOF
|
||||
|
||||
if [[ "$GUEST_RECREATE_NET" == "yes" ]]; then
|
||||
virsh net-destroy devstack-$GUEST_NETWORK || true
|
||||
# destroying the network isn't enough to delete the leases
|
||||
rm -f /var/lib/libvirt/dnsmasq/devstack-$GUEST_NETWORK.leases
|
||||
virsh net-create $vm_dir/net.xml
|
||||
fi
|
||||
|
||||
# libvirt.xml configuration
|
||||
LIBVIRT_XML=$vm_dir/libvirt.xml
|
||||
cat > $LIBVIRT_XML <<EOF
|
||||
<domain type='kvm'>
|
||||
<name>$GUEST_NAME</name>
|
||||
<memory>$GUEST_RAM</memory>
|
||||
<os>
|
||||
<type>hvm</type>
|
||||
<kernel>$image_dir/kernel</kernel>
|
||||
<cmdline>root=/dev/vda ro console=ttyS0 init=/usr/lib/cloud-init/uncloud-init ds=nocloud-net;s=http://192.168.$GUEST_NETWORK.1:4567/ ubuntu-pass=ubuntu</cmdline>
|
||||
</os>
|
||||
<features>
|
||||
<acpi/>
|
||||
</features>
|
||||
<clock offset='utc'/>
|
||||
<vcpu>$GUEST_CORES</vcpu>
|
||||
<devices>
|
||||
<disk type='file'>
|
||||
<driver type='qcow2'/>
|
||||
<source file='$vm_dir/disk'/>
|
||||
<target dev='vda' bus='virtio'/>
|
||||
</disk>
|
||||
|
||||
<interface type='network'>
|
||||
<source network='devstack-$GUEST_NETWORK'/>
|
||||
</interface>
|
||||
|
||||
<!-- The order is significant here. File must be defined first -->
|
||||
<serial type="file">
|
||||
<source path='$vm_dir/console.log'/>
|
||||
<target port='1'/>
|
||||
</serial>
|
||||
|
||||
<console type='pty' tty='/dev/pts/2'>
|
||||
<source path='/dev/pts/2'/>
|
||||
<target port='0'/>
|
||||
</console>
|
||||
|
||||
<serial type='pty'>
|
||||
<source path='/dev/pts/2'/>
|
||||
<target port='0'/>
|
||||
</serial>
|
||||
|
||||
<graphics type='vnc' port='-1' autoport='yes' keymap='en-us' listen='0.0.0.0'/>
|
||||
</devices>
|
||||
</domain>
|
||||
EOF
|
||||
|
||||
|
||||
rm -rf $vm_dir/uec
|
||||
cp -r $TOOLS_DIR/uec $vm_dir/uec
|
||||
|
||||
# set metadata
|
||||
cat > $vm_dir/uec/meta-data<<EOF
|
||||
hostname: $GUEST_NAME
|
||||
instance-id: i-hop
|
||||
instance-type: m1.ignore
|
||||
local-hostname: $GUEST_NAME.local
|
||||
EOF
|
||||
|
||||
# set metadata
|
||||
cat > $vm_dir/uec/user-data<<EOF
|
||||
#!/bin/bash
|
||||
# hostname needs to resolve for rabbit
|
||||
sed -i "s/127.0.0.1/127.0.0.1 \`hostname\`/" /etc/hosts
|
||||
apt-get update
|
||||
apt-get install git sudo -y
|
||||
git clone https://github.com/cloudbuilders/devstack.git
|
||||
cd devstack
|
||||
git remote set-url origin `cd $TOP_DIR; git remote show origin | grep Fetch | awk '{print $3}'`
|
||||
git fetch
|
||||
git checkout `git rev-parse HEAD`
|
||||
cat > localrc <<LOCAL_EOF
|
||||
ROOTSLEEP=0
|
||||
`cat $TOP_DIR/localrc`
|
||||
LOCAL_EOF
|
||||
./stack.sh
|
||||
EOF
|
||||
|
||||
# (re)start a metadata service
|
||||
(
|
||||
pid=`lsof -iTCP@192.168.$GUEST_NETWORK.1:4567 -n | awk '{print $2}' | tail -1`
|
||||
[ -z "$pid" ] || kill -9 $pid
|
||||
)
|
||||
cd $vm_dir/uec
|
||||
python meta.py 192.168.$GUEST_NETWORK.1:4567 &
|
||||
|
||||
# Create the instance
|
||||
virsh create $vm_dir/libvirt.xml
|
||||
|
||||
# Tail the console log till we are done
|
||||
WAIT_TILL_LAUNCH=${WAIT_TILL_LAUNCH:-1}
|
||||
if [ "$WAIT_TILL_LAUNCH" = "1" ]; then
|
||||
set +o xtrace
|
||||
# Done creating the container, let's tail the log
|
||||
echo
|
||||
echo "============================================================="
|
||||
echo " -- YAY! --"
|
||||
echo "============================================================="
|
||||
echo
|
||||
echo "We're done launching the vm, about to start tailing the"
|
||||
echo "stack.sh log. It will take a second or two to start."
|
||||
echo
|
||||
echo "Just CTRL-C at any time to stop tailing."
|
||||
|
||||
while [ ! -e "$vm_dir/console.log" ]; do
|
||||
sleep 1
|
||||
done
|
||||
|
||||
tail -F $vm_dir/console.log &
|
||||
|
||||
TAIL_PID=$!
|
||||
|
||||
function kill_tail() {
|
||||
kill $TAIL_PID
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Let Ctrl-c kill tail and exit
|
||||
trap kill_tail SIGINT
|
||||
|
||||
echo "Waiting stack.sh to finish..."
|
||||
while ! egrep -q '^stack.sh (completed|failed)' $vm_dir/console.log ; do
|
||||
sleep 1
|
||||
done
|
||||
|
||||
set -o xtrace
|
||||
|
||||
kill $TAIL_PID
|
||||
|
||||
if ! grep -q "^stack.sh completed in" $vm_dir/console.log; then
|
||||
exit 1
|
||||
fi
|
||||
echo ""
|
||||
echo "Finished - Zip-a-dee Doo-dah!"
|
||||
fi
|
@ -0,0 +1,74 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Echo commands
|
||||
set -o xtrace
|
||||
|
||||
# Exit on error to stop unexpected errors
|
||||
set -o errexit
|
||||
|
||||
# Keep track of the current directory
|
||||
TOOLS_DIR=$(cd $(dirname "$0") && pwd)
|
||||
TOP_DIR=`cd $TOOLS_DIR/..; pwd`
|
||||
|
||||
# Change dir to top of devstack
|
||||
cd $TOP_DIR
|
||||
|
||||
# Echo usage
|
||||
usage() {
|
||||
echo "Add stack user and keys"
|
||||
echo ""
|
||||
echo "Usage: $0 [full path to raw uec base image]"
|
||||
}
|
||||
|
||||
# Make sure this is a raw image
|
||||
if ! qemu-img info $1 | grep -q "file format: raw"; then
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Mount the image
|
||||
DEST=/opt/stack
|
||||
STAGING_DIR=/tmp/`echo $1 | sed "s/\//_/g"`.stage.user
|
||||
mkdir -p $STAGING_DIR
|
||||
umount $STAGING_DIR || true
|
||||
sleep 1
|
||||
mount -t ext4 -o loop $1 $STAGING_DIR
|
||||
mkdir -p $STAGING_DIR/$DEST
|
||||
|
||||
# Create a stack user that is a member of the libvirtd group so that stack
|
||||
# is able to interact with libvirt.
|
||||
chroot $STAGING_DIR groupadd libvirtd || true
|
||||
chroot $STAGING_DIR useradd stack -s /bin/bash -d $DEST -G libvirtd || true
|
||||
|
||||
# Add a simple password - pass
|
||||
echo stack:pass | chroot $STAGING_DIR chpasswd
|
||||
|
||||
# Configure sudo
|
||||
grep -q "^#includedir.*/etc/sudoers.d" $STAGING_DIR/etc/sudoers ||
|
||||
echo "#includedir /etc/sudoers.d" | sudo tee -a $STAGING_DIR/etc/sudoers
|
||||
cp $TOP_DIR/files/sudo/* $STAGING_DIR/etc/sudoers.d/
|
||||
sed -e "s,%USER%,$USER,g" -i $STAGING_DIR/etc/sudoers.d/*
|
||||
|
||||
# and has sudo ability (in the future this should be limited to only what
|
||||
# stack requires)
|
||||
echo "stack ALL=(ALL) NOPASSWD: ALL" >> $STAGING_DIR/etc/sudoers
|
||||
|
||||
# Gracefully cp only if source file/dir exists
|
||||
function cp_it {
|
||||
if [ -e $1 ] || [ -d $1 ]; then
|
||||
cp -pRL $1 $2
|
||||
fi
|
||||
}
|
||||
|
||||
# Copy over your ssh keys and env if desired
|
||||
cp_it ~/.ssh $STAGING_DIR/$DEST/.ssh
|
||||
cp_it ~/.ssh/id_rsa.pub $STAGING_DIR/$DEST/.ssh/authorized_keys
|
||||
cp_it ~/.gitconfig $STAGING_DIR/$DEST/.gitconfig
|
||||
cp_it ~/.vimrc $STAGING_DIR/$DEST/.vimrc
|
||||
cp_it ~/.bashrc $STAGING_DIR/$DEST/.bashrc
|
||||
|
||||
# Give stack ownership over $DEST so it may do the work needed
|
||||
chroot $STAGING_DIR chown -R stack $DEST
|
||||
|
||||
# Unmount
|
||||
umount $STAGING_DIR
|
@ -0,0 +1,29 @@
|
||||
import sys
|
||||
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
|
||||
from SimpleHTTPServer import SimpleHTTPRequestHandler
|
||||
|
||||
def main(host, port, HandlerClass = SimpleHTTPRequestHandler,
|
||||
ServerClass = HTTPServer, protocol="HTTP/1.0"):
|
||||
"""simple http server that listens on a give address:port"""
|
||||
|
||||
server_address = (host, port)
|
||||
|
||||
HandlerClass.protocol_version = protocol
|
||||
httpd = ServerClass(server_address, HandlerClass)
|
||||
|
||||
sa = httpd.socket.getsockname()
|
||||
print "Serving HTTP on", sa[0], "port", sa[1], "..."
|
||||
httpd.serve_forever()
|
||||
|
||||
if __name__ == '__main__':
|
||||
if sys.argv[1:]:
|
||||
address = sys.argv[1]
|
||||
else:
|
||||
address = '0.0.0.0'
|
||||
if ':' in address:
|
||||
host, port = address.split(':')
|
||||
else:
|
||||
host = address
|
||||
port = 8080
|
||||
|
||||
main(host, int(port))
|
@ -0,0 +1,53 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Echo commands
|
||||
set -o xtrace
|
||||
|
||||
# Exit on error to stop unexpected errors
|
||||
set -o errexit
|
||||
|
||||
# Keep track of the current directory
|
||||
TOOLS_DIR=$(cd $(dirname "$0") && pwd)
|
||||
TOP_DIR=`cd $TOOLS_DIR/..; pwd`
|
||||
|
||||
# Change dir to top of devstack
|
||||
cd $TOP_DIR
|
||||
|
||||
# Echo usage
|
||||
usage() {
|
||||
echo "Cache OpenStack dependencies on a uec image to speed up performance."
|
||||
echo ""
|
||||
echo "Usage: $0 [full path to raw uec base image]"
|
||||
}
|
||||
|
||||
# Make sure this is a raw image
|
||||
if ! qemu-img info $1 | grep -q "file format: raw"; then
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Make sure we are in the correct dir
|
||||
if [ ! -d files/apts ]; then
|
||||
echo "Please run this script from devstack/tools/"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Mount the image
|
||||
STAGING_DIR=/tmp/`echo $1 | sed "s/\//_/g"`.stage
|
||||
mkdir -p $STAGING_DIR
|
||||
umount $STAGING_DIR || true
|
||||
sleep 1
|
||||
mount -t ext4 -o loop $1 $STAGING_DIR
|
||||
|
||||
# Make sure that base requirements are installed
|
||||
cp /etc/resolv.conf $STAGING_DIR/etc/resolv.conf
|
||||
|
||||
# Perform caching on the base image to speed up subsequent runs
|
||||
chroot $STAGING_DIR apt-get update
|
||||
chroot $STAGING_DIR apt-get install -y --download-only `cat files/apts/* | grep NOPRIME | cut -d\# -f1`
|
||||
chroot $STAGING_DIR apt-get install -y --force-yes `cat files/apts/* | grep -v NOPRIME | cut -d\# -f1` || true
|
||||
mkdir -p $STAGING_DIR/var/cache/pip
|
||||
PIP_DOWNLOAD_CACHE=/var/cache/pip chroot $STAGING_DIR pip install `cat files/pips/*` || true
|
||||
|
||||
# Unmount
|
||||
umount $STAGING_DIR
|
@ -0,0 +1,35 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Echo commands
|
||||
set -o xtrace
|
||||
|
||||
# Head node host, which runs glance, api, keystone
|
||||
HEAD_PUB_IP=${HEAD_PUB_IP:-192.168.1.57}
|
||||
HEAD_MGT_IP=${HEAD_MGT_IP:-172.16.100.57}
|
||||
|
||||
COMPUTE_PUB_IP=${COMPUTE_PUB_IP:-192.168.1.58}
|
||||
COMPUTE_MGT_IP=${COMPUTE_MGT_IP:-172.16.100.58}
|
||||
|
||||
# Networking params
|
||||
FLOATING_RANGE=${FLOATING_RANGE:-192.168.1.196/30}
|
||||
|
||||
# Variables common amongst all hosts in the cluster
|
||||
COMMON_VARS="$STACKSH_PARAMS MYSQL_HOST=$HEAD_MGT_IP RABBIT_HOST=$HEAD_MGT_IP GLANCE_HOSTPORT=$HEAD_MGT_IP:9292 FLOATING_RANGE=$FLOATING_RANGE"
|
||||
|
||||
# Helper to launch containers
|
||||
function build_domU {
|
||||
GUEST_NAME=$1 PUB_IP=$2 MGT_IP=$3 DO_SHUTDOWN=$4 TERMINATE=$TERMINATE STACKSH_PARAMS="$COMMON_VARS $5" ./build_domU.sh
|
||||
}
|
||||
|
||||
# Launch the head node - headnode uses a non-ip domain name,
|
||||
# because rabbit won't launch with an ip addr hostname :(
|
||||
build_domU HEADNODE $HEAD_PUB_IP $HEAD_MGT_IP 1 "ENABLED_SERVICES=g-api,g-reg,key,n-api,n-sch,n-vnc,horizon,mysql,rabbit"
|
||||
|
||||
# Wait till the head node is up
|
||||
while ! curl -L http://$HEAD_PUB_IP | grep -q username; do
|
||||
echo "Waiting for head node ($HEAD_PUB_IP) to start..."
|
||||
sleep 5
|
||||
done
|
||||
|
||||
# Build the HA compute host
|
||||
build_domU $COMPUTE_PUB_IP $COMPUTE_PUB_IP $COMPUTE_MGT_IP 0 "ENABLED_SERVICES=n-cpu,n-net,n-api"
|
Loading…
Reference in New Issue