Merge remote-tracking branch 'upstream/master' into rcb-master

This commit is contained in:
Dean Troyer 2011-10-11 17:57:16 -05:00
commit 336e0001a6
32 changed files with 2643 additions and 0 deletions

3
.gitignore vendored Normal file
View file

@ -0,0 +1,3 @@
proto
*~
localrc

42
README Normal file
View file

@ -0,0 +1,42 @@
Tool to quickly deploy openstack dev environments.
# Goals
* To quickly build dev openstack environments in clean natty environments
* To describe working configurations of openstack (which code branches work together? what do config files look like for those branches?)
* To make it easier for developers to dive into openstack so that they can productively contribute without having to understand every part of the system at once
* To make it easy to prototype cross-project features
Be sure to carefully read these scripts before you run them as they install software and may alter your networking configuration.
# To start a dev cloud on your local machine (installing on a dedicated vm is safer!):
./stack.sh
If working correctly, you should be able to access openstack endpoints, like:
* Dashboard: http://myhost/
* Keystone: http://myhost:5000/v2.0/
# To start a dev cloud in an lxc container:
./build_lxc.sh
You will need to configure a bridge and network on your host machine (by default br0) before starting build_lxc.sh. A sample host-only network configuration can be found in lxc_network_hostonlyplusnat.sh.
# Customizing
You can tweak environment variables by creating file name 'localrc' should you need to override defaults. It is likely that you will need to do this to tweak your networking configuration should you need to access your cloud from a different host.
# Todo
* Add python-novaclient cli support
* syslog
* allow rabbit connection to be specified via environment variables with sensible defaults
* Add volume support
* Add quantum support
# Future
* idea: move from screen to tmux?
* idea: create a live-cd / vmware preview image using this?

251
build_lxc.sh Executable file
View file

@ -0,0 +1,251 @@
#!/usr/bin/env bash
# Sanity check
if [ "$EUID" -ne "0" ]; then
echo "This script must be run with root privileges."
exit 1
fi
# Warn users who aren't on natty
if ! grep -q natty /etc/lsb-release; then
echo "WARNING: this script has only been tested on natty"
fi
# Source params
source ./stackrc
# Store cwd
CWD=`pwd`
# Configurable params
BRIDGE=${BRIDGE:-br0}
CONTAINER=${CONTAINER:-STACK}
CONTAINER_IP=${CONTAINER_IP:-192.168.1.50}
CONTAINER_CIDR=${CONTAINER_CIDR:-$CONTAINER_IP/24}
CONTAINER_NETMASK=${CONTAINER_NETMASK:-255.255.255.0}
CONTAINER_GATEWAY=${CONTAINER_GATEWAY:-192.168.1.1}
NAMESERVER=${NAMESERVER:-$CONTAINER_GATEWAY}
COPYENV=${COPYENV:-1}
DEST=${DEST:-/opt/stack}
# Param string to pass to stack.sh. Like "EC2_DMZ_HOST=192.168.1.1 MYSQL_USER=nova"
STACKSH_PARAMS=${STACKSH_PARAMS:-}
# Option to use the version of devstack on which we are currently working
USE_CURRENT_DEVSTACK=${USE_CURRENT_DEVSTACK:-1}
# Install deps
apt-get install -y lxc debootstrap
# Install cgroup-bin from source, since the packaging is buggy and possibly incompatible with our setup
if ! which cgdelete | grep -q cgdelete; then
apt-get install -y g++ bison flex libpam0g-dev make
wget http://sourceforge.net/projects/libcg/files/libcgroup/v0.37.1/libcgroup-0.37.1.tar.bz2/download -O /tmp/libcgroup-0.37.1.tar.bz2
cd /tmp && bunzip2 libcgroup-0.37.1.tar.bz2 && tar xfv libcgroup-0.37.1.tar
cd libcgroup-0.37.1
./configure
make install
ldconfig
fi
# Create lxc configuration
LXC_CONF=/tmp/$CONTAINER.conf
cat > $LXC_CONF <<EOF
lxc.network.type = veth
lxc.network.link = $BRIDGE
lxc.network.flags = up
lxc.network.ipv4 = $CONTAINER_CIDR
# allow tap/tun devices
lxc.cgroup.devices.allow = c 10:200 rwm
EOF
# Shutdown any existing container
lxc-stop -n $CONTAINER
# This kills zombie containers
if [ -d /cgroup/$CONTAINER ]; then
cgdelete -r cpu,net_cls:$CONTAINER
fi
# git clone only if directory doesn't exist already. Since ``DEST`` might not
# be owned by the installation user, we create the directory and change the
# ownership to the proper user.
function git_clone {
if [ ! -d $2 ]; then
sudo mkdir $2
sudo chown `whoami` $2
git clone $1 $2
cd $2
# This checkout syntax works for both branches and tags
git checkout $3
fi
}
# Location of the base image directory
CACHEDIR=/var/cache/lxc/natty/rootfs-amd64
# Provide option to do totally clean install
if [ "$CLEAR_LXC_CACHE" = "1" ]; then
rm -rf $CACHEDIR
fi
# Warm the base image on first install
if [ ! -f $CACHEDIR/bootstrapped ]; then
# by deleting the container, we force lxc-create to re-bootstrap (lxc is
# lazy and doesn't do anything if a container already exists)
lxc-destroy -n $CONTAINER
# trigger the initial debootstrap
lxc-create -n $CONTAINER -t natty -f $LXC_CONF
chroot $CACHEDIR apt-get update
chroot $CACHEDIR apt-get install -y --force-yes `cat files/apts/* | cut -d\# -f1 | egrep -v "(rabbitmq|libvirt-bin|mysql-server)"`
chroot $CACHEDIR pip install `cat files/pips/*`
touch $CACHEDIR/bootstrapped
fi
# Clean out code repos if directed to do so
if [ "$CLEAN" = "1" ]; then
rm -rf $CACHEDIR/$DEST
fi
# Cache openstack code
mkdir -p $CACHEDIR/$DEST
git_clone $NOVA_REPO $CACHEDIR/$DEST/nova $NOVA_BRANCH
git_clone $GLANCE_REPO $CACHEDIR/$DEST/glance $GLANCE_BRANCH
git_clone $KEYSTONE_REPO $CACHEDIR/$DESTkeystone $KEYSTONE_BRANCH
git_clone $NOVNC_REPO $CACHEDIR/$DEST/novnc $NOVNC_BRANCH
git_clone $DASH_REPO $CACHEDIR/$DEST/dash $DASH_BRANCH $DASH_TAG
git_clone $NOVACLIENT_REPO $CACHEDIR/$DEST/python-novaclient $NOVACLIENT_BRANCH
git_clone $OPENSTACKX_REPO $CACHEDIR/$DEST/openstackx $OPENSTACKX_BRANCH
# Use this version of devstack?
if [ "$USE_CURRENT_DEVSTACK" = "1" ]; then
rm -rf $CACHEDIR/$DEST/devstack
cp -pr $CWD $CACHEDIR/$DEST/devstack
fi
# Destroy the old container
lxc-destroy -n $CONTAINER
# If this call is to TERMINATE the container then exit
if [ "$TERMINATE" = "1" ]; then
exit
fi
# Create the container
lxc-create -n $CONTAINER -t natty -f $LXC_CONF
# Specify where our container rootfs lives
ROOTFS=/var/lib/lxc/$CONTAINER/rootfs/
# Create a stack user that is a member of the libvirtd group so that stack
# is able to interact with libvirt.
chroot $ROOTFS groupadd libvirtd
chroot $ROOTFS useradd stack -s /bin/bash -d $DEST -G libvirtd
# a simple password - pass
echo stack:pass | chroot $ROOTFS chpasswd
# and has sudo ability (in the future this should be limited to only what
# stack requires)
echo "stack ALL=(ALL) NOPASSWD: ALL" >> $ROOTFS/etc/sudoers
# Copy kernel modules
mkdir -p $ROOTFS/lib/modules/`uname -r`/kernel
cp -p /lib/modules/`uname -r`/modules.dep $ROOTFS/lib/modules/`uname -r`/
cp -pR /lib/modules/`uname -r`/kernel/net $ROOTFS/lib/modules/`uname -r`/kernel/
# Gracefully cp only if source file/dir exists
function cp_it {
if [ -e $1 ] || [ -d $1 ]; then
cp -pRL $1 $2
fi
}
# Copy over your ssh keys and env if desired
if [ "$COPYENV" = "1" ]; then
cp_it ~/.ssh $ROOTFS/$DEST/.ssh
cp_it ~/.ssh/id_rsa.pub $ROOTFS/$DEST/.ssh/authorized_keys
cp_it ~/.gitconfig $ROOTFS/$DEST/.gitconfig
cp_it ~/.vimrc $ROOTFS/$DEST/.vimrc
cp_it ~/.bashrc $ROOTFS/$DEST/.bashrc
fi
# Make our ip address hostnames look nice at the command prompt
echo "export PS1='${debian_chroot:+($debian_chroot)}\\u@\\H:\\w\\$ '" >> $ROOTFS/$DEST/.bashrc
echo "export PS1='${debian_chroot:+($debian_chroot)}\\u@\\H:\\w\\$ '" >> $ROOTFS/etc/profile
# Give stack ownership over $DEST so it may do the work needed
chroot $ROOTFS chown -R stack $DEST
# Configure instance network
INTERFACES=$ROOTFS/etc/network/interfaces
cat > $INTERFACES <<EOF
auto lo
iface lo inet loopback
auto eth0
iface eth0 inet static
address $CONTAINER_IP
netmask $CONTAINER_NETMASK
gateway $CONTAINER_GATEWAY
EOF
# Configure the runner
RUN_SH=$ROOTFS/$DEST/run.sh
cat > $RUN_SH <<EOF
#!/usr/bin/env bash
# Make sure dns is set up
echo "nameserver $NAMESERVER" | sudo resolvconf -a eth0
sleep 1
# Kill any existing screens
killall screen
# Install and run stack.sh
sudo apt-get update
sudo apt-get -y --force-yes install git-core vim-nox sudo
if [ ! -d "$DEST/devstack" ]; then
git clone git://github.com/cloudbuilders/devstack.git $DEST/devstack
fi
cd $DEST/devstack && $STACKSH_PARAMS ./stack.sh > /$DEST/run.sh.log
echo >> /$DEST/run.sh.log
echo >> /$DEST/run.sh.log
echo "All done! Time to start clicking." >> /$DEST/run.sh.log
EOF
# Make the run.sh executable
chmod 755 $RUN_SH
# Make runner launch on boot
RC_LOCAL=$ROOTFS/etc/rc.local
cat > $RC_LOCAL <<EOF
#!/bin/sh -e
su -c "$DEST/run.sh" stack
EOF
# Configure cgroup directory
if ! mount | grep -q cgroup; then
mkdir -p /cgroup
mount none -t cgroup /cgroup
fi
# Start our container
lxc-start -d -n $CONTAINER
# Done creating the container, let's tail the log
echo
echo "============================================================="
echo " -- YAY! --"
echo "============================================================="
echo
echo "We're done creating the container, about to start tailing the"
echo "stack.sh log. It will take a second or two to start."
echo
echo "Just CTRL-C at any time to stop tailing."
while [ ! -e "$ROOTFS/$DEST/run.sh.log" ]; do
sleep 1
done
tail -F $ROOTFS/$DEST/run.sh.log

39
build_lxc_multi.sh Executable file
View file

@ -0,0 +1,39 @@
#!/usr/bin/env bash
# Head node host, which runs glance, api, keystone
HEAD_HOST=${HEAD_HOST:-192.168.1.52}
COMPUTE_HOSTS=${COMPUTE_HOSTS:-192.168.1.53,192.168.1.54}
# Networking params
NAMESERVER=${NAMESERVER:-192.168.1.1}
GATEWAY=${GATEWAY:-192.168.1.1}
NETMASK=${NETMASK:-255.255.255.0}
FLOATING_RANGE=${FLOATING_RANGE:-192.168.1.196/30}
# Setting this to 1 shuts down and destroys our containers without relaunching.
TERMINATE=${TERMINATE:-0}
# Variables common amongst all hosts in the cluster
COMMON_VARS="MYSQL_HOST=$HEAD_HOST RABBIT_HOST=$HEAD_HOST GLANCE_HOSTPORT=$HEAD_HOST:9292 NET_MAN=FlatDHCPManager FLAT_INTERFACE=eth0 FLOATING_RANGE=$FLOATING_RANGE MULTI_HOST=1"
# Helper to launch containers
function run_lxc {
# For some reason container names with periods can cause issues :/
CONTAINER=$1 CONTAINER_IP=$2 CONTAINER_NETMASK=$NETMASK CONTAINER_GATEWAY=$GATEWAY NAMESERVER=$NAMESERVER TERMINATE=$TERMINATE STACKSH_PARAMS="$COMMON_VARS $3" ./build_lxc.sh
}
# Launch the head node - headnode uses a non-ip domain name,
# because rabbit won't launch with an ip addr hostname :(
run_lxc STACKMASTER $HEAD_HOST "ENABLED_SERVICES=g-api,g-reg,key,n-api,n-sch,n-vnc,dash,mysql,rabbit"
# Wait till the head node is up
if [ ! "$TERMINATE" = "1" ]; then
while ! wget -q -O- http://$HEAD_HOST | grep -q username; do
echo "Waiting for head node ($HEAD_HOST) to start..."
sleep 5
done
fi
# Launch the compute hosts
for compute_host in ${COMPUTE_HOSTS//,/ }; do
run_lxc $compute_host $compute_host "ENABLED_SERVICES=n-cpu,n-net,n-api"
done

117
build_nfs.sh Executable file
View file

@ -0,0 +1,117 @@
#!/bin/bash
PROGDIR=`dirname $0`
CHROOTCACHE=${CHROOTCACHE:-/var/cache/devstack}
# Source params
source ./stackrc
# Store cwd
CWD=`pwd`
NAME=$1
NFSDIR="/nfs/$NAME"
DEST=${DEST:-/opt/stack}
# Option to use the version of devstack on which we are currently working
USE_CURRENT_DEVSTACK=${USE_CURRENT_DEVSTACK:-1}
# remove old nfs filesystem if one exists
rm -rf $DEST
# clean install of natty
if [ ! -d $CHROOTCACHE/natty-base ]; then
$PROGDIR/make_image.sh -C natty $CHROOTCACHE/natty-base
# copy kernel modules...
# NOTE(ja): is there a better way to do this?
cp -pr /lib/modules/`uname -r` $CHROOTCACHE/natty-base/lib/modules
# a simple password - pass
echo root:pass | chroot $CHROOTCACHE/natty-base chpasswd
fi
# prime natty with as many apt/pips as we can
if [ ! -d $CHROOTCACHE/natty-dev ]; then
rsync -azH $CHROOTCACHE/natty-base/ $CHROOTCACHE/natty-dev/
chroot $CHROOTCACHE/natty-dev apt-get install -y `cat files/apts/* | cut -d\# -f1 | egrep -v "(rabbitmq|libvirt-bin|mysql-server)"`
chroot $CHROOTCACHE/natty-dev pip install `cat files/pips/*`
# Create a stack user that is a member of the libvirtd group so that stack
# is able to interact with libvirt.
chroot $CHROOTCACHE/natty-dev groupadd libvirtd
chroot $CHROOTCACHE/natty-dev useradd stack -s /bin/bash -d $DEST -G libvirtd
mkdir -p $CHROOTCACHE/natty-dev/$DEST
chown stack $CHROOTCACHE/natty-dev/$DEST
# a simple password - pass
echo stack:pass | chroot $CHROOTCACHE/natty-dev chpasswd
# and has sudo ability (in the future this should be limited to only what
# stack requires)
echo "stack ALL=(ALL) NOPASSWD: ALL" >> $CHROOTCACHE/natty-dev/etc/sudoers
fi
# clone git repositories onto the system
# ======================================
if [ ! -d $CHROOTCACHE/natty-stack ]; then
rsync -azH $CHROOTCACHE/natty-dev/ $CHROOTCACHE/natty-stack/
fi
# git clone only if directory doesn't exist already. Since ``DEST`` might not
# be owned by the installation user, we create the directory and change the
# ownership to the proper user.
function git_clone {
# clone new copy or fetch latest changes
CHECKOUT=$CHROOTCACHE/natty-stack$2
if [ ! -d $CHECKOUT ]; then
mkdir -p $CHECKOUT
git clone $1 $CHECKOUT
else
pushd $CHECKOUT
git fetch
popd
fi
# FIXME(ja): checkout specified version (should works for branches and tags)
pushd $CHECKOUT
# checkout the proper branch/tag
git checkout $3
# force our local version to be the same as the remote version
git reset --hard origin/$3
popd
# give ownership to the stack user
chroot $CHROOTCACHE/natty-stack/ chown -R stack $2
}
git_clone $NOVA_REPO $DEST/nova $NOVA_BRANCH
git_clone $GLANCE_REPO $DEST/glance $GLANCE_BRANCH
git_clone $KEYSTONE_REPO $DEST/keystone $KEYSTONE_BRANCH
git_clone $NOVNC_REPO $DEST/novnc $NOVNC_BRANCH
git_clone $DASH_REPO $DEST/dash $DASH_BRANCH $DASH_TAG
git_clone $NOVACLIENT_REPO $DEST/python-novaclient $NOVACLIENT_BRANCH
git_clone $OPENSTACKX_REPO $DEST/openstackx $OPENSTACKX_BRANCH
chroot $CHROOTCACHE/natty-stack mkdir -p $DEST/files
wget -c http://images.ansolabs.com/tty.tgz -O $CHROOTCACHE/natty-stack$DEST/files/tty.tgz
# Use this version of devstack?
if [ "$USE_CURRENT_DEVSTACK" = "1" ]; then
rm -rf $CHROOTCACHE/natty-stack/$DEST/devstack
cp -pr $CWD $CHROOTCACHE/natty-stack/$DEST/devstack
fi
cp -pr $CHROOTCACHE/natty-stack $NFSDIR
# set hostname
echo $NAME > $NFSDIR/etc/hostname
echo "127.0.0.1 localhost $NAME" > $NFSDIR/etc/hosts
# injecting root's public ssh key if it exists
if [ -f /root/.ssh/id_rsa.pub ]; then
mkdir $NFSDIR/root/.ssh
chmod 700 $NFSDIR/root/.ssh
cp /root/.ssh/id_rsa.pub $NFSDIR/root/.ssh/authorized_keys
fi

116
build_pxe_boot.sh Executable file
View file

@ -0,0 +1,116 @@
#!/bin/bash -e
# build_pxe_boot.sh - Create a PXE boot environment
#
# build_pxe_boot.sh [-k kernel-version] destdir
#
# Assumes syslinux is installed
# Assumes devstack files are in `pwd`/pxe
# Only needs to run as root if the destdir permissions require it
UBUNTU_MIRROR=http://archive.ubuntu.com/ubuntu/dists/natty/main/installer-amd64/current/images/netboot/ubuntu-installer/amd64
MEMTEST_VER=4.10
MEMTEST_BIN=memtest86+-${MEMTEST_VER}.bin
MEMTEST_URL=http://www.memtest.org/download/${MEMTEST_VER}/
KVER=`uname -r`
if [ "$1" = "-k" ]; then
KVER=$2
shift;shift
fi
DEST_DIR=${1:-/tmp}/tftpboot
PXEDIR=${PXEDIR:-/var/cache/devstack/pxe}
OPWD=`pwd`
PROGDIR=`dirname $0`
mkdir -p $DEST_DIR/pxelinux.cfg
cd $DEST_DIR
for i in memdisk menu.c32 pxelinux.0; do
cp -p /usr/lib/syslinux/$i $DEST_DIR
done
DEFAULT=$DEST_DIR/pxelinux.cfg/default
cat >$DEFAULT <<EOF
default menu.c32
prompt 0
timeout 0
MENU TITLE PXE Boot Menu
EOF
# Setup devstack boot
mkdir -p $DEST_DIR/ubuntu
if [ ! -d $PXEDIR ]; then
mkdir -p $PXEDIR
fi
if [ ! -r $PXEDIR/vmlinuz-${KVER} ]; then
sudo chmod 644 /boot/vmlinuz-${KVER}
if [ ! -r /boot/vmlinuz-${KVER} ]; then
echo "No kernel found"
else
cp -p /boot/vmlinuz-${KVER} $PXEDIR
fi
fi
cp -p $PXEDIR/vmlinuz-${KVER} $DEST_DIR/ubuntu
if [ ! -r $PXEDIR/stack-initrd.gz ]; then
cd $OPWD
sudo $PROGDIR/build_pxe_ramdisk.sh $PXEDIR/stack-initrd.gz
fi
cp -p $PXEDIR/stack-initrd.gz $DEST_DIR/ubuntu
cat >>$DEFAULT <<EOF
LABEL devstack
MENU LABEL ^devstack
MENU DEFAULT
KERNEL ubuntu/vmlinuz-$KVER
APPEND initrd=ubuntu/stack-initrd.gz ramdisk_size=2109600 root=/dev/ram0
EOF
# Get Ubuntu
if [ -d $PXEDIR ]; then
cp -p $PXEDIR/natty-base-initrd.gz $DEST_DIR/ubuntu
fi
cat >>$DEFAULT <<EOF
LABEL ubuntu
MENU LABEL ^Ubuntu Natty
KERNEL ubuntu/vmlinuz-$KVER
APPEND initrd=ubuntu/natty-base-initrd.gz ramdisk_size=419600 root=/dev/ram0
EOF
# Get Memtest
cd $DEST_DIR
if [ ! -r $MEMTEST_BIN ]; then
wget -N --quiet ${MEMTEST_URL}/${MEMTEST_BIN}.gz
gunzip $MEMTEST_BIN
fi
cat >>$DEFAULT <<EOF
LABEL memtest
MENU LABEL ^Memtest86+
KERNEL $MEMTEST_BIN
EOF
# Get FreeDOS
mkdir -p $DEST_DIR/freedos
cd $DEST_DIR/freedos
wget -N --quiet http://www.fdos.org/bootdisks/autogen/FDSTD.288.gz
gunzip -f FDSTD.288.gz
cat >>$DEFAULT <<EOF
LABEL freedos
MENU LABEL ^FreeDOS bootdisk
KERNEL memdisk
APPEND initrd=freedos/FDSTD.288
EOF
# Local disk boot
cat >>$DEFAULT <<EOF
LABEL local
MENU LABEL ^Local disk
MENU DEFAULT
LOCALBOOT 0
EOF

135
build_pxe_ramdisk.sh Executable file
View file

@ -0,0 +1,135 @@
#!/bin/bash
if [ ! "$#" -eq "1" ]; then
echo "$0 builds a gziped natty openstack install"
echo "usage: $0 dest"
exit 1
fi
PROGDIR=`dirname $0`
CHROOTCACHE=${CHROOTCACHE:-/var/cache/devstack}
# Source params
source ./stackrc
# Store cwd
CWD=`pwd`
DEST=${DEST:-/opt/stack}
# Option to use the version of devstack on which we are currently working
USE_CURRENT_DEVSTACK=${USE_CURRENT_DEVSTACK:-1}
# clean install of natty
if [ ! -d $CHROOTCACHE/natty-base ]; then
$PROGDIR/make_image.sh -C natty $CHROOTCACHE/natty-base
# copy kernel modules...
# NOTE(ja): is there a better way to do this?
cp -pr /lib/modules/`uname -r` $CHROOTCACHE/natty-base/lib/modules
# a simple password - pass
echo root:pass | chroot $CHROOTCACHE/natty-base chpasswd
fi
# prime natty with as many apt/pips as we can
if [ ! -d $CHROOTCACHE/natty-dev ]; then
rsync -azH $CHROOTCACHE/natty-base/ $CHROOTCACHE/natty-dev/
chroot $CHROOTCACHE/natty-dev apt-get install -y `cat files/apts/* | cut -d\# -f1 | egrep -v "(rabbitmq|libvirt-bin|mysql-server)"`
chroot $CHROOTCACHE/natty-dev pip install `cat files/pips/*`
# Create a stack user that is a member of the libvirtd group so that stack
# is able to interact with libvirt.
chroot $CHROOTCACHE/natty-dev groupadd libvirtd
chroot $CHROOTCACHE/natty-dev useradd stack -s /bin/bash -d $DEST -G libvirtd
mkdir -p $CHROOTCACHE/natty-dev/$DEST
chown stack $CHROOTCACHE/natty-dev/$DEST
# a simple password - pass
echo stack:pass | chroot $CHROOTCACHE/natty-dev chpasswd
# and has sudo ability (in the future this should be limited to only what
# stack requires)
echo "stack ALL=(ALL) NOPASSWD: ALL" >> $CHROOTCACHE/natty-dev/etc/sudoers
fi
# clone git repositories onto the system
# ======================================
if [ ! -d $CHROOTCACHE/natty-stack ]; then
rsync -azH $CHROOTCACHE/natty-dev/ $CHROOTCACHE/natty-stack/
fi
# git clone only if directory doesn't exist already. Since ``DEST`` might not
# be owned by the installation user, we create the directory and change the
# ownership to the proper user.
function git_clone {
# clone new copy or fetch latest changes
CHECKOUT=$CHROOTCACHE/natty-stack$2
if [ ! -d $CHECKOUT ]; then
mkdir -p $CHECKOUT
git clone $1 $CHECKOUT
else
pushd $CHECKOUT
git fetch
popd
fi
# FIXME(ja): checkout specified version (should works for branches and tags)
pushd $CHECKOUT
# checkout the proper branch/tag
git checkout $3
# force our local version to be the same as the remote version
git reset --hard origin/$3
popd
# give ownership to the stack user
chroot $CHROOTCACHE/natty-stack/ chown -R stack $2
}
git_clone $NOVA_REPO $DEST/nova $NOVA_BRANCH
git_clone $GLANCE_REPO $DEST/glance $GLANCE_BRANCH
git_clone $KEYSTONE_REPO $DEST/keystone $KEYSTONE_BRANCH
git_clone $NOVNC_REPO $DEST/novnc $NOVNC_BRANCH
git_clone $DASH_REPO $DEST/dash $DASH_BRANCH
git_clone $NOVACLIENT_REPO $DEST/python-novaclient $NOVACLIENT_BRANCH
git_clone $OPENSTACKX_REPO $DEST/openstackx $OPENSTACKX_BRANCH
# Use this version of devstack?
if [ "$USE_CURRENT_DEVSTACK" = "1" ]; then
rm -rf $CHROOTCACHE/natty-stack/$DEST/devstack
cp -pr $CWD $CHROOTCACHE/natty-stack/$DEST/devstack
fi
# Configure host network for DHCP
mkdir -p $CHROOTCACHE/natty-stack/etc/network
cat > $CHROOTCACHE/natty-stack/etc/network/interfaces <<EOF
auto lo
iface lo inet loopback
auto eth0
iface eth0 inet dhcp
EOF
# build a new image
BASE=$CHROOTCACHE/build.$$
IMG=$BASE.img
MNT=$BASE/
# (quickly) create a 2GB blank filesystem
dd bs=1 count=1 seek=$((2*1024*1024*1024)) if=/dev/zero of=$IMG
# force it to be initialized as ext2
mkfs.ext2 -F $IMG
# mount blank image loopback and load it
mkdir -p $MNT
mount -o loop $IMG $MNT
rsync -azH $CHROOTCACHE/natty-stack/ $MNT
# umount and cleanup
umount $MNT
rmdir $MNT
# gzip into final location
gzip -1 $IMG -c > $1

82
exercise.sh Executable file
View file

@ -0,0 +1,82 @@
#!/usr/bin/env bash
# **exercise.sh** - using the cloud can be fun
# we will use the ``nova`` cli tool provided by the ``python-novaclient``
# package
#
# This script exits on an error so that errors don't compound and you see
# only the first error that occured.
set -o errexit
# Print the commands being run so that we can see the command that triggers
# an error. It is also useful for following allowing as the install occurs.
set -o xtrace
# Settings
# ========
HOST=${HOST:-localhost}
# Nova original used project_id as the *account* that owned resources (servers,
# ip address, ...) With the addition of Keystone we have standardized on the
# term **tenant** as the entity that owns the resources. **novaclient** still
# uses the old deprecated terms project_id. Note that this field should now be
# set to tenant_name, not tenant_id.
export NOVA_PROJECT_ID=${TENANT:-demo}
# In addition to the owning entity (tenant), nova stores the entity performing
# the action as the **user**.
export NOVA_USERNAME=${USERNAME:-demo}
# With Keystone you pass the keystone password instead of an api key.
export NOVA_API_KEY=${PASSWORD:-secrete}
# With the addition of Keystone, to use an openstack cloud you should
# authenticate against keystone, which returns a **Token** and **Service
# Catalog**. The catalog contains the endpoint for all services the user/tenant
# has access to - including nova, glance, keystone, swift, ... We currently
# recommend using the 2.0 *auth api*.
#
# *NOTE*: Using the 2.0 *auth api* does mean that compute api is 2.0. We will
# use the 1.1 *compute api*
export NOVA_URL=${NOVA_URL:-http://$HOST:5000/v2.0/}
# Currently novaclient needs you to specify the *compute api* version. This
# needs to match the config of your catalog returned by Keystone.
export NOVA_VERSION=1.1
# FIXME - why does this need to be specified?
export NOVA_REGION_NAME=RegionOne
# Get a token for clients that don't support service catalog
# ==========================================================
SERVICE_TOKEN=`curl -s -d "{\"auth\":{\"passwordCredentials\": {\"username\": \"$NOVA_PROJECT_ID\", \"password\": \"$NOVA_API_KEY\"}}}" -H "Content-type: application/json" http://$HOST:5000/v2.0/tokens | python -c "import sys; import json; tok = json.loads(sys.stdin.read()); print tok['access']['token']['id'];"`
# Launching a server
# ==================
# List servers for tenant:
nova list
# List of flavors:
nova flavor-list
# Images
# ------
# Nova has a **deprecated** way of listing images.
nova image-list
# But we recommend using glance directly
glance -A $SERVICE_TOKEN index
# show details of the active servers::
#
# nova show 1234
#
nova list | grep ACTIVE | cut -d \| -f2 | xargs -n1 nova show

View file

@ -0,0 +1,27 @@
<VirtualHost *:80>
WSGIScriptAlias / %DASH_DIR%/openstack-dashboard/dashboard/wsgi/django.wsgi
WSGIDaemonProcess dashboard user=%USER% group=%USER% processes=3 threads=10
SetEnv APACHE_RUN_USER %USER%
SetEnv APACHE_RUN_GROUP %USER%
WSGIProcessGroup dashboard
DocumentRoot %DASH_DIR%/.blackhole/
Alias /media %DASH_DIR%/openstack-dashboard/media
<Directory />
Options FollowSymLinks
AllowOverride None
</Directory>
<Directory %DASH_DIR%/>
Options Indexes FollowSymLinks MultiViews
AllowOverride None
Order allow,deny
allow from all
</Directory>
ErrorLog /var/log/apache2/error.log
LogLevel warn
CustomLog /var/log/apache2/access.log combined
</VirtualHost>

4
files/apts/dash Normal file
View file

@ -0,0 +1,4 @@
apache2
libapache2-mod-wsgi
python-dateutil
python-anyjson

14
files/apts/general Normal file
View file

@ -0,0 +1,14 @@
pep8
pylint
python-pip
screen
unzip
wget
psmisc
git-core
lsof # useful when debugging
openssh-server
vim-nox
locate # useful when debugging
python-virtualenv
python-unittest2

8
files/apts/glance Normal file
View file

@ -0,0 +1,8 @@
python-eventlet
python-routes
python-greenlet
python-argparse
python-sqlalchemy
python-wsgiref
python-pastedeploy
python-xattr

15
files/apts/keystone Normal file
View file

@ -0,0 +1,15 @@
python-setuptools
python-dev
python-lxml
python-pastescript
python-pastedeploy
python-paste
sqlite3
python-pysqlite2
python-sqlalchemy
python-webob
python-greenlet
python-routes
libldap2-dev
libsasl2-dev

35
files/apts/nova Normal file
View file

@ -0,0 +1,35 @@
dnsmasq-base
kpartx
mysql-server
python-mysqldb
kvm
gawk
iptables
ebtables
sqlite3
sudo
kvm
libvirt-bin
vlan
curl
rabbitmq-server
socat # used by ajaxterm
python-mox
python-paste
python-migrate
python-gflags
python-greenlet
python-libvirt
python-libxml2
python-routes
python-netaddr
python-pastedeploy
python-eventlet
python-cheetah
python-carrot
python-tempita
python-sqlalchemy
python-suds
python-lockfile
python-m2crypto
python-boto

1
files/apts/novnc Normal file
View file

@ -0,0 +1 @@
python-numpy

18
files/apts/preseed Normal file
View file

@ -0,0 +1,18 @@
# a collection of packages that speed up installation as they are dependencies
# of packages we can't install during bootstraping (rabbitmq-server,
# mysql-server, libvirt-bin)
#
# NOTE: only add packages to this file that aren't needed directly
mysql-common
mysql-client-5.1
erlang-base
erlang-ssl
erlang-nox
erlang-inets
erlang-mnesia
libhtml-template-perl
gettext-base
libavahi-client3
libxml2-utils
libpciaccess0
libparted0debian1

98
files/dash_settings.py Normal file
View file

@ -0,0 +1,98 @@
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
PROD = False
USE_SSL = False
LOCAL_PATH = os.path.dirname(os.path.abspath(__file__))
# FIXME: We need to change this to mysql, instead of sqlite.
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(LOCAL_PATH, 'dashboard_openstack.sqlite3'),
},
}
CACHE_BACKEND = 'dummy://'
# Add apps to dash installation.
INSTALLED_APPS = (
'dashboard',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_openstack',
'django_openstack.templatetags',
'mailer',
)
# Send email to the console by default
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# Or send them to /dev/null
#EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
# django-mailer uses a different settings attribute
MAILER_EMAIL_BACKEND = EMAIL_BACKEND
# Configure these for your outgoing email host
# EMAIL_HOST = 'smtp.my-company.com'
# EMAIL_PORT = 25
# EMAIL_HOST_USER = 'djangomail'
# EMAIL_HOST_PASSWORD = 'top-secret!'
# FIXME: This needs to be changed to allow for multi-node setup.
OPENSTACK_KEYSTONE_URL = "http://localhost:5000/v2.0/"
OPENSTACK_KEYSTONE_ADMIN_URL = "http://localhost:35357/v2.0"
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "Member"
# NOTE(tres): Available services should come from the service
# catalog in Keystone.
SWIFT_ENABLED = False
# Configure quantum connection details for networking
QUANTUM_ENABLED = False
QUANTUM_URL = '127.0.0.1'
QUANTUM_PORT = '9696'
QUANTUM_TENANT = '1234'
QUANTUM_CLIENT_VERSION='0.1'
# No monitoring links currently
EXTERNAL_MONITORING = []
# Uncomment the following segment to silence most logging
# django.db and boto DEBUG logging is extremely verbose.
#LOGGING = {
# 'version': 1,
# # set to True will disable all logging except that specified, unless
# # nothing is specified except that django.db.backends will still log,
# # even when set to True, so disable explicitly
# 'disable_existing_loggers': False,
# 'handlers': {
# 'null': {
# 'level': 'DEBUG',
# 'class': 'django.utils.log.NullHandler',
# },
# 'console': {
# 'level': 'DEBUG',
# 'class': 'logging.StreamHandler',
# },
# },
# 'loggers': {
# # Comment or Uncomment these to turn on/off logging output
# 'django.db.backends': {
# 'handlers': ['null'],
# 'propagate': False,
# },
# 'django_openstack': {
# 'handlers': ['null'],
# 'propagate': False,
# },
# }
#}
# How much ram on each compute host?
COMPUTE_HOST_RAM_GB = 16

178
files/glance-api.conf Normal file
View file

@ -0,0 +1,178 @@
[DEFAULT]
# Show more verbose log output (sets INFO log level output)
verbose = True
# Show debugging output in logs (sets DEBUG log level output)
debug = True
# Which backend store should Glance use by default is not specified
# in a request to add a new image to Glance? Default: 'file'
# Available choices are 'file', 'swift', and 's3'
default_store = file
# Address to bind the API server
bind_host = 0.0.0.0
# Port the bind the API server to
bind_port = 9292
# Address to find the registry server
registry_host = 0.0.0.0
# Port the registry server is listening on
registry_port = 9191
# Log to this file. Make sure you do not set the same log
# file for both the API and registry servers!
log_file = %DEST%/glance/api.log
# Send logs to syslog (/dev/log) instead of to file specified by `log_file`
use_syslog = False
# ============ Notification System Options =====================
# Notifications can be sent when images are create, updated or deleted.
# There are three methods of sending notifications, logging (via the
# log_file directive), rabbit (via a rabbitmq queue) or noop (no
# notifications sent, the default)
notifier_strategy = noop
# Configuration options if sending notifications via rabbitmq (these are
# the defaults)
rabbit_host = localhost
rabbit_port = 5672
rabbit_use_ssl = false
rabbit_userid = guest
rabbit_password = guest
rabbit_virtual_host = /
rabbit_notification_topic = glance_notifications
# ============ Filesystem Store Options ========================
# Directory that the Filesystem backend store
# writes image data to
filesystem_store_datadir = %DEST%/glance/images/
# ============ Swift Store Options =============================
# Address where the Swift authentication service lives
swift_store_auth_address = 127.0.0.1:8080/v1.0/
# User to authenticate against the Swift authentication service
swift_store_user = jdoe
# Auth key for the user authenticating against the
# Swift authentication service
swift_store_key = a86850deb2742ec3cb41518e26aa2d89
# Container within the account that the account should use
# for storing images in Swift
swift_store_container = glance
# Do we create the container if it does not exist?
swift_store_create_container_on_put = False
# What size, in MB, should Glance start chunking image files
# and do a large object manifest in Swift? By default, this is
# the maximum object size in Swift, which is 5GB
swift_store_large_object_size = 5120
# When doing a large object manifest, what size, in MB, should
# Glance write chunks to Swift? This amount of data is written
# to a temporary disk buffer during the process of chunking
# the image file, and the default is 200MB
swift_store_large_object_chunk_size = 200
# Whether to use ServiceNET to communicate with the Swift storage servers.
# (If you aren't RACKSPACE, leave this False!)
#
# To use ServiceNET for authentication, prefix hostname of
# `swift_store_auth_address` with 'snet-'.
# Ex. https://example.com/v1.0/ -> https://snet-example.com/v1.0/
swift_enable_snet = False
# ============ S3 Store Options =============================
# Address where the S3 authentication service lives
s3_store_host = 127.0.0.1:8080/v1.0/
# User to authenticate against the S3 authentication service
s3_store_access_key = <20-char AWS access key>
# Auth key for the user authenticating against the
# S3 authentication service
s3_store_secret_key = <40-char AWS secret key>
# Container within the account that the account should use
# for storing images in S3. Note that S3 has a flat namespace,
# so you need a unique bucket name for your glance images. An
# easy way to do this is append your AWS access key to "glance".
# S3 buckets in AWS *must* be lowercased, so remember to lowercase
# your AWS access key if you use it in your bucket name below!
s3_store_bucket = <lowercased 20-char aws access key>glance
# Do we create the bucket if it does not exist?
s3_store_create_bucket_on_put = False
# ============ Image Cache Options ========================
image_cache_enabled = False
# Directory that the Image Cache writes data to
# Make sure this is also set in glance-pruner.conf
image_cache_datadir = /var/lib/glance/image-cache/
# Number of seconds after which we should consider an incomplete image to be
# stalled and eligible for reaping
image_cache_stall_timeout = 86400
# ============ Delayed Delete Options =============================
# Turn on/off delayed delete
delayed_delete = False
# Delayed delete time in seconds
scrub_time = 43200
# Directory that the scrubber will use to remind itself of what to delete
# Make sure this is also set in glance-scrubber.conf
scrubber_datadir = /var/lib/glance/scrubber
[pipeline:glance-api]
#pipeline = versionnegotiation context apiv1app
# NOTE: use the following pipeline for keystone
pipeline = versionnegotiation authtoken context apiv1app
# To enable Image Cache Management API replace pipeline with below:
# pipeline = versionnegotiation context imagecache apiv1app
# NOTE: use the following pipeline for keystone auth (with caching)
# pipeline = versionnegotiation authtoken context imagecache apiv1app
[pipeline:versions]
pipeline = versionsapp
[app:versionsapp]
paste.app_factory = glance.api.versions:app_factory
[app:apiv1app]
paste.app_factory = glance.api.v1:app_factory
[filter:versionnegotiation]
paste.filter_factory = glance.api.middleware.version_negotiation:filter_factory
[filter:imagecache]
paste.filter_factory = glance.api.middleware.image_cache:filter_factory
[filter:context]
paste.filter_factory = glance.common.context:filter_factory
[filter:authtoken]
paste.filter_factory = keystone.middleware.auth_token:filter_factory
service_protocol = http
service_host = 127.0.0.1
service_port = 5000
auth_host = 127.0.0.1
auth_port = 35357
auth_protocol = http
auth_uri = http://127.0.0.1:5000/
admin_token = %SERVICE_TOKEN%

View file

@ -0,0 +1,70 @@
[DEFAULT]
# Show more verbose log output (sets INFO log level output)
verbose = True
# Show debugging output in logs (sets DEBUG log level output)
debug = True
# Address to bind the registry server
bind_host = 0.0.0.0
# Port the bind the registry server to
bind_port = 9191
# Log to this file. Make sure you do not set the same log
# file for both the API and registry servers!
log_file = %DEST%/glance/registry.log
# Where to store images
filesystem_store_datadir = %DEST%/glance/images
# Send logs to syslog (/dev/log) instead of to file specified by `log_file`
use_syslog = False
# SQLAlchemy connection string for the reference implementation
# registry server. Any valid SQLAlchemy connection string is fine.
# See: http://www.sqlalchemy.org/docs/05/reference/sqlalchemy/connections.html#sqlalchemy.create_engine
sql_connection = %SQL_CONN%
# Period in seconds after which SQLAlchemy should reestablish its connection
# to the database.
#
# MySQL uses a default `wait_timeout` of 8 hours, after which it will drop
# idle connections. This can result in 'MySQL Gone Away' exceptions. If you
# notice this, you can lower this value to ensure that SQLAlchemy reconnects
# before MySQL can drop the connection.
sql_idle_timeout = 3600
# Limit the api to return `param_limit_max` items in a call to a container. If
# a larger `limit` query param is provided, it will be reduced to this value.
api_limit_max = 1000
# If a `limit` query param is not provided in an api request, it will
# default to `limit_param_default`
limit_param_default = 25
[pipeline:glance-registry]
#pipeline = context registryapp
# NOTE: use the following pipeline for keystone
pipeline = authtoken keystone_shim context registryapp
[app:registryapp]
paste.app_factory = glance.registry.server:app_factory
[filter:context]
context_class = glance.registry.context.RequestContext
paste.filter_factory = glance.common.context:filter_factory
[filter:authtoken]
paste.filter_factory = keystone.middleware.auth_token:filter_factory
service_protocol = http
service_host = 127.0.0.1
service_port = 5000
auth_host = 127.0.0.1
auth_port = 35357
auth_protocol = http
auth_uri = http://127.0.0.1:5000/
admin_token = %SERVICE_TOKEN%
[filter:keystone_shim]
paste.filter_factory = keystone.middleware.glance_auth_token:filter_factory

86
files/keystone.conf Normal file
View file

@ -0,0 +1,86 @@
[DEFAULT]
# Show more verbose log output (sets INFO log level output)
verbose = False
# Show debugging output in logs (sets DEBUG log level output)
debug = False
# Which backend store should Keystone use by default.
# Default: 'sqlite'
# Available choices are 'sqlite' [future will include LDAP, PAM, etc]
default_store = sqlite
# Log to this file. Make sure you do not set the same log
# file for both the API and registry servers!
log_file = %DEST%/keystone/keystone.log
# List of backends to be configured
backends = keystone.backends.sqlalchemy
#For LDAP support, add: ,keystone.backends.ldap
# Dictionary Maps every service to a header.Missing services would get header
# X_(SERVICE_NAME) Key => Service Name, Value => Header Name
service-header-mappings = {
'nova' : 'X-Server-Management-Url',
'swift' : 'X-Storage-Url',
'cdn' : 'X-CDN-Management-Url'}
# Address to bind the API server
# TODO Properties defined within app not available via pipeline.
service_host = 0.0.0.0
# Port the bind the API server to
service_port = 5000
# Address to bind the Admin API server
admin_host = 0.0.0.0
# Port the bind the Admin API server to
admin_port = 35357
#Role that allows to perform admin operations.
keystone-admin-role = KeystoneAdmin
#Role that allows to perform service admin operations.
keystone-service-admin-role = KeystoneServiceAdmin
[keystone.backends.sqlalchemy]
# SQLAlchemy connection string for the reference implementation registry
# server. Any valid SQLAlchemy connection string is fine.
# See: http://bit.ly/ideIpI
#sql_connection = sqlite:///keystone.db
sql_connection = %SQL_CONN%
backend_entities = ['UserRoleAssociation', 'Endpoints', 'Role', 'Tenant',
'User', 'Credentials', 'EndpointTemplates', 'Token',
'Service']
# Period in seconds after which SQLAlchemy should reestablish its connection
# to the database.
sql_idle_timeout = 30
[pipeline:admin]
pipeline =
urlrewritefilter
admin_api
[pipeline:keystone-legacy-auth]
pipeline =
urlrewritefilter
legacy_auth
RAX-KEY-extension
service_api
[app:service_api]
paste.app_factory = keystone.server:service_app_factory
[app:admin_api]
paste.app_factory = keystone.server:admin_app_factory
[filter:urlrewritefilter]
paste.filter_factory = keystone.middleware.url:filter_factory
[filter:legacy_auth]
paste.filter_factory = keystone.frontends.legacy_token_auth:filter_factory
[filter:RAX-KEY-extension]
paste.filter_factory = keystone.contrib.extensions.service.raxkey.frontend:filter_factory

43
files/keystone_data.sh Executable file
View file

@ -0,0 +1,43 @@
#!/bin/bash
BIN_DIR=${BIN_DIR:-.}
# Tenants
$BIN_DIR/keystone-manage $* tenant add admin
$BIN_DIR/keystone-manage $* tenant add demo
$BIN_DIR/keystone-manage $* tenant add invisible_to_admin
# Users
$BIN_DIR/keystone-manage $* user add admin %ADMIN_PASSWORD%
$BIN_DIR/keystone-manage $* user add demo %ADMIN_PASSWORD%
# Roles
$BIN_DIR/keystone-manage $* role add Admin
$BIN_DIR/keystone-manage $* role add Member
$BIN_DIR/keystone-manage $* role add KeystoneAdmin
$BIN_DIR/keystone-manage $* role add KeystoneServiceAdmin
$BIN_DIR/keystone-manage $* role grant Admin admin admin
$BIN_DIR/keystone-manage $* role grant Member demo demo
$BIN_DIR/keystone-manage $* role grant Member demo invisible_to_admin
$BIN_DIR/keystone-manage $* role grant Admin admin demo
$BIN_DIR/keystone-manage $* role grant Admin admin
$BIN_DIR/keystone-manage $* role grant KeystoneAdmin admin
$BIN_DIR/keystone-manage $* role grant KeystoneServiceAdmin admin
# Services
$BIN_DIR/keystone-manage $* service add nova compute "Nova Compute Service"
$BIN_DIR/keystone-manage $* service add glance image "Glance Image Service"
$BIN_DIR/keystone-manage $* service add keystone identity "Keystone Identity Service"
#endpointTemplates
$BIN_DIR/keystone-manage $* endpointTemplates add RegionOne nova http://%HOST_IP%:8774/v1.1/%tenant_id% http://%HOST_IP%:8774/v1.1/%tenant_id% http://%HOST_IP%:8774/v1.1/%tenant_id% 1 1
$BIN_DIR/keystone-manage $* endpointTemplates add RegionOne glance http://%HOST_IP%:9292/v1.1/%tenant_id% http://%HOST_IP%:9292/v1.1/%tenant_id% http://%HOST_IP%:9292/v1.1/%tenant_id% 1 1
$BIN_DIR/keystone-manage $* endpointTemplates add RegionOne keystone http://%HOST_IP%:5000/v2.0 http://%HOST_IP%:35357/v2.0 http://%HOST_IP%:5000/v2.0 1 1
# $BIN_DIR/keystone-manage $* endpointTemplates add RegionOne swift http://%HOST_IP%:8080/v1/AUTH_%tenant_id% http://%HOST_IP%:8080/ http://%HOST_IP%:8080/v1/AUTH_%tenant_id% 1 1
# Tokens
$BIN_DIR/keystone-manage $* token add %SERVICE_TOKEN% admin admin 2015-02-05T00:00
# EC2 related creds - note we are setting the token to user_password
# but keystone doesn't parse them - it is just a blob from keystone's
# point of view
$BIN_DIR/keystone-manage $* credentials add admin EC2 'admin_%ADMIN_PASSWORD%' admin admin || echo "no support for adding credentials"
$BIN_DIR/keystone-manage $* credentials add demo EC2 'demo_%ADMIN_PASSWORD%' demo demo || echo "no support for adding credentials"

21
files/pips/dash Normal file
View file

@ -0,0 +1,21 @@
nose==1.0.0
Django==1.3
django-nose==0.1.2
django-mailer
django-registration==0.7
kombu
python-cloudfiles
python-dateutil
routes
webob
sqlalchemy
paste
PasteDeploy
sqlalchemy-migrate
eventlet
xattr
pep8
pylint
-e git+https://github.com/jacobian/openstack.compute.git#egg=openstack

1
files/pips/keystone Normal file
View file

@ -0,0 +1 @@
PassLib

9
files/screenrc Normal file
View file

@ -0,0 +1,9 @@
hardstatus on
hardstatus alwayslastline
hardstatus string "%{.bW}%-w%{.rW}%n %t%{-}%+w %=%{..G}%H %{..Y}%d/%m %c"
defscrollback 1024
vbell off
startup_message off

9
files/sources.list Normal file
View file

@ -0,0 +1,9 @@
deb http://us.archive.ubuntu.com/ubuntu/ natty main restricted
deb http://us.archive.ubuntu.com/ubuntu/ natty-updates main restricted
deb http://us.archive.ubuntu.com/ubuntu/ natty universe
deb http://us.archive.ubuntu.com/ubuntu/ natty-updates universe
deb http://us.archive.ubuntu.com/ubuntu/ natty multiverse
deb http://us.archive.ubuntu.com/ubuntu/ natty-updates multiverse
deb http://security.ubuntu.com/ubuntu natty-security main restricted
deb http://security.ubuntu.com/ubuntu natty-security universe
deb http://security.ubuntu.com/ubuntu natty-security multiverse

1
files/sudo/nova Normal file
View file

@ -0,0 +1 @@
socat

93
lxc_network_hostonlyplusnat.sh Executable file
View file

@ -0,0 +1,93 @@
#!/bin/bash
# Print some usage info
function usage {
echo "Usage: $0 [OPTION] [host_ip]"
echo "Set up temporary networking for LXC"
echo ""
echo " -n, --dry-run Just print the commands that would execute."
echo " -h, --help Print this usage message."
echo ""
exit
}
# Allow passing the ip address on the command line.
function process_option {
case "$1" in
-h|--help) usage;;
-n|--dry-run) dry_run=1;;
*) host_ip="$1"
esac
}
# Set up some defaults
host_ip=
dry_run=0
bridge=br0
DRIER=
# Process the args
for arg in "$@"; do
process_option $arg
done
if [ $dry_run ]; then
DRIER=echo
fi
if [ "$UID" -ne "0" ]; then
echo "This script must be run with root privileges."
exit 1
fi
# Check for bridge-utils.
BRCTL=`which brctl`
if [ ! -x "$BRCTL" ]; then
echo "This script requires you to install bridge-utils."
echo "Try: sudo apt-get install bridge-utils."
exit 1
fi
# Scare off the nubs.
echo "====================================================="
echo
echo "WARNING"
echo
echo "This script will modify your current network setup,"
echo "this can be a scary thing and it is recommended that"
echo "you have something equivalent to physical access to"
echo "this machine before continuing in case your network"
echo "gets all funky."
echo
echo "If you don't want to continue, hit CTRL-C now."
if [ -z "$host_ip" ];
then
echo "Otherwise, please type in your host's ip address and"
echo "hit enter."
echo
echo "====================================================="
read host_ip
else
echo "Otherwise hit enter."
echo
echo "====================================================="
read accept
fi
# Add a bridge interface, this will choke if there is already
# a bridge named $bridge
$DRIER $BRCTL addbr $bridge
$DRIER ip addr add 192.168.1.1/24 dev $bridge
if [ $dry_run ]; then
echo "echo 1 > /proc/sys/net/ipv4/ip_forward"
else
echo 1 > /proc/sys/net/ipv4/ip_forward
fi
$DRIER ifconfig $bridge up
# Set up the NAT for the instances
$DRIER iptables -t nat -A POSTROUTING -s 192.168.1.0/24 -j SNAT --to-source $host_ip
$DRIER iptables -I FORWARD -s 192.168.1.0/24 -j ACCEPT

178
make_image.sh Executable file
View file

@ -0,0 +1,178 @@
#!/bin/bash
# make_image.sh - Create Ubuntu images in various formats
#
# Supported formats: qcow (kvm), vmdk (vmserver), vdi (vbox), vhd (vpc), raw
#
# Requires sudo to root
ROOTSIZE=${ROOTSIZE:-8192}
SWAPSIZE=${SWAPSIZE:-1024}
MIN_PKGS=${MIN_PKGS:-"apt-utils gpgv openssh-server"}
usage() {
echo "Usage: $0 - Create Ubuntu images"
echo ""
echo "$0 [-m] [-r rootsize] [-s swapsize] release format"
echo "$0 -C [-m] release chrootdir"
echo "$0 -I [-r rootsize] [-s swapsize] chrootdir format"
echo ""
echo "-C - Create the initial chroot dir"
echo "-I - Create the final image from a chroot"
echo "-m - minimal installation"
echo "-r size - root fs size in MB"
echo "-s size - swap fs size in MB"
echo "release - Ubuntu release: jaunty - oneric"
echo "format - image format: qcow2, vmdk, vdi, vhd, xen, raw, fs"
exit 1
}
while getopts CIhmr:s: c; do
case $c in
C) CHROOTONLY=1
;;
I) IMAGEONLY=1
;;
h) usage
;;
m) MINIMAL=1
;;
r) ROOTSIZE=$OPTARG
;;
s) SWAPSIZE=$OPTARG
;;
esac
done
shift `expr $OPTIND - 1`
if [ ! "$#" -eq "2" -o -n "$CHROOTONLY" -a -n "$IMAGEONLY" ]; then
usage
fi
# Default args
RELEASE=$1
FORMAT=$2
CHROOTDIR=""
if [ -n "$CHROOTONLY" ]; then
RELEASE=$1
CHROOTDIR=$2
FORMAT="pass"
fi
if [ -n "$IMAGEONLY" ]; then
CHROOTDIR=$1
FORMAT=$2
RELEASE="pass"
fi
case $FORMAT in
kvm|qcow2) FORMAT=qcow2
QFORMAT=qcow2
HYPER=kvm
;;
vmserver|vmdk)
FORMAT=vmdk
QFORMAT=vmdk
HYPER=vmserver
;;
vbox|vdi) FORMAT=vdi
QFORMAT=vdi
HYPER=kvm
;;
vhd|vpc) FORMAT=vhd
QFORMAT=vpc
HYPER=kvm
;;
xen) FORMAT=raw
QFORMAT=raw
HYPER=xen
;;
raw) FORMAT=raw
QFORMAT=raw
HYPER=kvm
;;
pass) ;;
*) echo "Unknown format: $FORMAT"
usage
esac
case $RELEASE in
natty) ;;
maverick) ;;
lucid) ;;
karmic) ;;
jaunty) ;;
pass) ;;
*) echo "Unknown release: $RELEASE"
usage
;;
esac
# Install stuff if necessary
if [ -z `which vmbuilder` ]; then
sudo apt-get install ubuntu-vm-builder
fi
if [ -n "$CHROOTONLY" ]; then
# Build a chroot directory
HYPER=kvm
if [ "$MINIMAL" = 1 ]; then
ARGS="--variant=minbase"
for i in $MIN_PKGS; do
ARGS="$ARGS --addpkg=$i"
done
fi
sudo vmbuilder $HYPER ubuntu $ARGS \
--suite $RELEASE \
--only-chroot \
--chroot-dir=$CHROOTDIR \
--overwrite \
--addpkg=$MIN_PKGS \
sudo cp -p files/sources.list $CHROOTDIR/etc/apt/sources.list
sudo chroot $CHROOTDIR apt-get update
exit 0
fi
# Build the image
TMPDIR=tmp
TMPDISK=`mktemp imgXXXXXXXX`
SIZE=$[$ROOTSIZE+$SWAPSIZE+1]
dd if=/dev/null of=$TMPDISK bs=1M seek=$SIZE count=1
if [ -n "$IMAGEONLY" ]; then
# Build image from chroot
sudo vmbuilder $HYPER ubuntu $ARGS \
--existing-chroot=$CHROOTDIR \
--overwrite \
--rootsize=$ROOTSIZE \
--swapsize=$SWAPSIZE \
--tmpfs - \
--raw=$TMPDISK \
else
# Do the whole shebang in one pass
ARGS="--variant=minbase"
for i in $MIN_PKGS; do
ARGS="$ARGS --addpkg=$i"
done
sudo vmbuilder $HYPER ubuntu $ARGS \
--suite $RELEASE \
--overwrite \
--rootsize=$ROOTSIZE \
--swapsize=$SWAPSIZE \
--tmpfs - \
--raw=$TMPDISK \
fi
if [ "$FORMAT" = "raw" ]; then
# Get image
mv $TMPDISK $RELEASE.$FORMAT
else
# Convert image
qemu-img convert -O $QFORMAT $TMPDISK $RELEASE.$FORMAT
rm $TMPDISK
fi
rm -rf ubuntu-$HYPER

672
stack.sh Executable file
View file

@ -0,0 +1,672 @@
#!/usr/bin/env bash
# **stack.sh** is an opinionated openstack developer installation.
# This script installs and configures *nova*, *glance*, *dashboard* and *keystone*
# This script allows you to specify configuration options of what git
# repositories to use, enabled services, network configuration and various
# passwords. If you are crafty you can run the script on multiple nodes using
# shared settings for common resources (mysql, rabbitmq) and build a multi-node
# developer install.
# To keep this script simple we assume you are running on an **Ubuntu 11.04
# Natty** machine. It should work in a VM or physical server. Additionally we
# put the list of *apt* and *pip* dependencies and other configuration files in
# this repo. So start by grabbing this script and the dependencies.
# Learn more and get the most recent version at http://devstack.org
# Sanity Check
# ============
# Record the start time. This allows us to print how long this script takes to run.
START_TIME=`python -c "import time; print time.time()"`
# Warn users who aren't on natty, but allow them to override check and attempt
# installation with ``FORCE=yes ./stack``
if ! grep -q natty /etc/lsb-release; then
echo "WARNING: this script has only been tested on natty"
if [[ "$FORCE" != "yes" ]]; then
echo "If you wish to run this script anyway run with FORCE=yes"
exit 1
fi
fi
# stack.sh keeps the list of **apt** and **pip** dependencies in external
# files, along with config templates and other useful files. You can find these
# in the ``files`` directory (next to this script). We will reference this
# directory using the ``FILES`` variable in this script.
FILES=`pwd`/files
if [ ! -d $FILES ]; then
echo "ERROR: missing devstack/files - did you grab more than just stack.sh?"
exit 1
fi
# OpenStack is designed to be run as a regular user (Dashboard will fail to run
# as root, since apache refused to startup serve content from root user). If
# stack.sh is run as root, it automatically creates a stack user with
# sudo privileges and runs as that user.
if [[ $EUID -eq 0 ]]; then
echo "You are running this script as root."
# since this script runs as a normal user, we need to give that user
# ability to run sudo
apt-get update
apt-get install -qqy sudo
if ! getent passwd | grep -q stack; then
echo "Creating a user called stack"
useradd -U -G sudo -s /bin/bash -m stack
fi
echo "Giving stack user passwordless sudo priviledges"
echo "stack ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
echo "Copying files to stack user"
cp -r -f `pwd` /home/stack/
THIS_DIR=$(basename $(dirname $(readlink -f $0)))
chown -R stack /home/stack/$THIS_DIR
echo "Running the script as stack in 3 seconds..."
sleep 3
if [[ "$SHELL_AFTER_RUN" != "no" ]]; then
exec su -c "cd /home/stack/$THIS_DIR/; bash stack.sh; bash" stack
else
exec su -c "cd /home/stack/$THIS_DIR/; bash stack.sh" stack
fi
exit 0
fi
# So that errors don't compound we exit on any errors so you see only the
# first error that occured.
set -o errexit
# Print the commands being run so that we can see the command that triggers
# an error. It is also useful for following allowing as the install occurs.
set -o xtrace
# Settings
# ========
# This script is customizable through setting environment variables. If you
# want to override a setting you can either::
#
# export MYSQL_PASS=anothersecret
# ./stack.sh
#
# You can also pass options on a single line ``MYSQL_PASS=simple ./stack.sh``
#
# Additionally, you can put any local variables into a ``localrc`` file, like::
#
# MYSQL_PASS=anothersecret
# MYSQL_USER=hellaroot
#
# We try to have sensible defaults, so you should be able to run ``./stack.sh``
# in most cases.
#
# We our settings from ``stackrc``. This file is distributed with devstack and
# contains locations for what repositories to use. If you want to use other
# repositories and branches, you can add your own settings with another file
# called ``localrc``
#
# If ``localrc`` exists, then ``stackrc`` will load those settings. This is
# useful for changing a branch or repostiory to test other versions. Also you
# can store your other settings like **MYSQL_PASS** or **ADMIN_PASSWORD** instead
# of letting devstack generate random ones for you.
source ./stackrc
# Destination path for installation ``DEST``
DEST=${DEST:-/opt/stack}
sudo mkdir -p $DEST
sudo chown `whoami` $DEST
# Set the destination directories for openstack projects
NOVA_DIR=$DEST/nova
DASH_DIR=$DEST/dash
GLANCE_DIR=$DEST/glance
KEYSTONE_DIR=$DEST/keystone
NOVACLIENT_DIR=$DEST/python-novaclient
OPENSTACKX_DIR=$DEST/openstackx
NOVNC_DIR=$DEST/noVNC
# Specify which services to launch. These generally correspond to screen tabs
ENABLED_SERVICES=${ENABLED_SERVICES:-g-api,g-reg,key,n-api,n-cpu,n-net,n-sch,n-vnc,dash,mysql,rabbit}
# Nova hypervisor configuration. We default to **kvm** but will drop back to
# **qemu** if we are unable to load the kvm module. Stack.sh can also install
# an **LXC** based system.
LIBVIRT_TYPE=${LIBVIRT_TYPE:-kvm}
# nova supports pluggable schedulers. ``SimpleScheduler`` should work in most
# cases unless you are working on multi-zone mode.
SCHEDULER=${SCHEDULER:-nova.scheduler.simple.SimpleScheduler}
# Use the first IP unless an explicit is set by ``HOST_IP`` environment variable
if [ ! -n "$HOST_IP" ]; then
HOST_IP=`LC_ALL=C /sbin/ifconfig | grep -m 1 'inet addr:'| cut -d: -f2 | awk '{print $1}'`
fi
# Nova Network Configuration
# --------------------------
# FIXME: more documentation about why these are important flags. Also
# we should make sure we use the same variable names as the flag names.
PUBLIC_INTERFACE=${PUBLIC_INTERFACE:-eth0}
FIXED_RANGE=${FIXED_RANGE:-10.0.0.0/24}
FIXED_NETWORK_SIZE=${FIXED_NETWORK_SIZE:-256}
FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.1/28}
NET_MAN=${NET_MAN:-FlatDHCPManager}
EC2_DMZ_HOST=${EC2_DMZ_HOST:-$HOST_IP}
FLAT_NETWORK_BRIDGE=${FLAT_NETWORK_BRIDGE:-br100}
VLAN_INTERFACE=${VLAN_INTERFACE:-$PUBLIC_INTERFACE}
# Multi-host is a mode where each compute node runs its own network node. This
# allows network operations and routing for a VM to occur on the server that is
# running the VM - removing a SPOF and bandwidth bottleneck.
MULTI_HOST=${MULTI_HOST:-0}
# If you are using FlatDHCP on multiple hosts, set the ``FLAT_INTERFACE``
# variable but make sure that the interface doesn't already have an
# ip or you risk breaking things.
#
# **DHCP Warning**: If your flat interface device uses DHCP, there will be a
# hiccup while the network is moved from the flat interface to the flat network
# bridge. This will happen when you launch your first instance. Upon launch
# you will lose all connectivity to the node, and the vm launch will probably
# fail.
#
# If you are running on a single node and don't need to access the VMs from
# devices other than that node, you can set the flat interface to the same
# value as ``FLAT_NETWORK_BRIDGE``. This will stop the network hiccup from
# occuring.
FLAT_INTERFACE=${FLAT_INTERFACE:-eth0}
## FIXME(ja): should/can we check that FLAT_INTERFACE is sane?
# MySQL & RabbitMQ
# ----------------
# We configure Nova, Dashboard, Glance and Keystone to use MySQL as their
# database server. While they share a single server, each has their own
# database and tables.
# By default this script will install and configure MySQL. If you want to
# use an existing server, you can pass in the user/password/host parameters.
# You will need to send the same ``MYSQL_PASS`` to every host if you are doing
# a multi-node devstack installation.
MYSQL_USER=${MYSQL_USER:-root}
MYSQL_PASS=${MYSQL_PASS:-`openssl rand -hex 12`}
MYSQL_HOST=${MYSQL_HOST:-localhost}
# don't specify /db in this string, so we can use it for multiple services
BASE_SQL_CONN=${BASE_SQL_CONN:-mysql://$MYSQL_USER:$MYSQL_PASS@$MYSQL_HOST}
# Rabbit connection info
RABBIT_HOST=${RABBIT_HOST:-localhost}
RABBIT_PASSWORD=${RABBIT_PASSWORD:-`openssl rand -hex 12`}
# Glance connection info. Note the port must be specified.
GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$HOST_IP:9292}
# Keystone
# --------
# Service Token - Openstack components need to have an admin token
# to validate user tokens.
SERVICE_TOKEN=${SERVICE_TOKEN:-`openssl rand -hex 12`}
# Dash currently truncates usernames and passwords at 20 characters
# so use 10 bytes
ADMIN_PASSWORD=${ADMIN_PASSWORD:-`openssl rand -hex 10`}
# Install Packages
# ================
#
# Openstack uses a fair number of other projects.
# install apt requirements
sudo apt-get install -qqy `cat $FILES/apts/* | cut -d\# -f1 | grep -Ev "mysql-server|rabbitmq-server"`
# install python requirements
sudo PIP_DOWNLOAD_CACHE=/var/cache/pip pip install `cat $FILES/pips/*`
# git clone only if directory doesn't exist already. Since ``DEST`` might not
# be owned by the installation user, we create the directory and change the
# ownership to the proper user.
function git_clone {
if [ ! -d $2 ]; then
sudo mkdir $2
sudo chown `whoami` $2
git clone $1 $2
cd $2
# This checkout syntax works for both branches and tags
git checkout $3
fi
}
# compute service
git_clone $NOVA_REPO $NOVA_DIR $NOVA_BRANCH
# image catalog service
git_clone $GLANCE_REPO $GLANCE_DIR $GLANCE_BRANCH
# unified auth system (manages accounts/tokens)
git_clone $KEYSTONE_REPO $KEYSTONE_DIR $KEYSTONE_BRANCH
# a websockets/html5 or flash powered VNC console for vm instances
git_clone $NOVNC_REPO $NOVNC_DIR $NOVNC_BRANCH
# django powered web control panel for openstack
git_clone $DASH_REPO $DASH_DIR $DASH_BRANCH $DASH_TAG
# python client library to nova that dashboard (and others) use
git_clone $NOVACLIENT_REPO $NOVACLIENT_DIR $NOVACLIENT_BRANCH
# openstackx is a collection of extensions to openstack.compute & nova
# that is *deprecated*. The code is being moved into python-novaclient & nova.
git_clone $OPENSTACKX_REPO $OPENSTACKX_DIR $OPENSTACKX_BRANCH
# Initialization
# ==============
# setup our checkouts so they are installed into python path
# allowing ``import nova`` or ``import glance.client``
cd $NOVA_DIR; sudo python setup.py develop
cd $NOVACLIENT_DIR; sudo python setup.py develop
cd $KEYSTONE_DIR; sudo python setup.py develop
cd $GLANCE_DIR; sudo python setup.py develop
cd $OPENSTACKX_DIR; sudo python setup.py develop
cd $DASH_DIR/django-openstack; sudo python setup.py develop
cd $DASH_DIR/openstack-dashboard; sudo python setup.py develop
# Add a useful screenrc. This isn't required to run openstack but is we do
# it since we are going to run the services in screen for simple
cp $FILES/screenrc ~/.screenrc
## TODO: update current user to allow sudo for all commands in files/sudo/*
# Rabbit
# ---------
if [[ "$ENABLED_SERVICES" =~ "rabbit" ]]; then
# Install and start rabbitmq-server
sudo apt-get install -y -q rabbitmq-server
# change the rabbit password since the default is "guest"
sudo rabbitmqctl change_password guest $RABBIT_PASSWORD
fi
# Mysql
# ---------
if [[ "$ENABLED_SERVICES" =~ "mysql" ]]; then
# Seed configuration with mysql password so that apt-get install doesn't
# prompt us for a password upon install.
cat <<MYSQL_PRESEED | sudo debconf-set-selections
mysql-server-5.1 mysql-server/root_password password $MYSQL_PASS
mysql-server-5.1 mysql-server/root_password_again password $MYSQL_PASS
mysql-server-5.1 mysql-server/start_on_boot boolean true
MYSQL_PRESEED
# Install and start mysql-server
sudo apt-get -y -q install mysql-server
# Update the DB to give user $MYSQL_USER@% full control of the all databases:
sudo mysql -uroot -p$MYSQL_PASS -e "GRANT ALL PRIVILEGES ON *.* TO '$MYSQL_USER'@'%' identified by '$MYSQL_PASS';"
# Edit /etc/mysql/my.cnf to change bind-address from localhost (127.0.0.1) to any (0.0.0.0) and restart the mysql service:
sudo sed -i 's/127.0.0.1/0.0.0.0/g' /etc/mysql/my.cnf
sudo service mysql restart
fi
# Dashboard
# ---------
# Setup the django dashboard application to serve via apache/wsgi
if [[ "$ENABLED_SERVICES" =~ "dash" ]]; then
# Dash currently imports quantum even if you aren't using it. Instead
# of installing quantum we can create a simple module that will pass the
# initial imports
mkdir -p $DASH_DIR/openstack-dashboard/quantum || true
touch $DASH_DIR/openstack-dashboard/quantum/__init__.py
touch $DASH_DIR/openstack-dashboard/quantum/client.py
# ``local_settings.py`` is used to override dashboard default settings.
cp $FILES/dash_settings.py $DASH_DIR/openstack-dashboard/local/local_settings.py
cd $DASH_DIR/openstack-dashboard
dashboard/manage.py syncdb
# create an empty directory that apache uses as docroot
sudo mkdir -p $DASH_DIR/.blackhole
## Configure apache's 000-default to run dashboard
sudo cp $FILES/000-default.template /etc/apache2/sites-enabled/000-default
sudo sed -e "s,%USER%,$USER,g" -i /etc/apache2/sites-enabled/000-default
sudo sed -e "s,%DASH_DIR%,$DASH_DIR,g" -i /etc/apache2/sites-enabled/000-default
fi
# Glance
# ------
if [[ "$ENABLED_SERVICES" =~ "g-reg" ]]; then
GLANCE_IMAGE_DIR=$DEST/glance/images
# Delete existing images
rm -rf $GLANCE_IMAGE_DIR
# Use local glance directories
mkdir -p $GLANCE_IMAGE_DIR
# (re)create glance database
mysql -u$MYSQL_USER -p$MYSQL_PASS -e 'DROP DATABASE IF EXISTS glance;'
mysql -u$MYSQL_USER -p$MYSQL_PASS -e 'CREATE DATABASE glance;'
# Copy over our glance-registry.conf
GLANCE_CONF=$GLANCE_DIR/etc/glance-registry.conf
cp $FILES/glance-registry.conf $GLANCE_CONF
sudo sed -e "s,%SQL_CONN%,$BASE_SQL_CONN/glance,g" -i $GLANCE_CONF
sudo sed -e "s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g" -i $GLANCE_CONF
sudo sed -e "s,%DEST%,$DEST,g" -i $GLANCE_CONF
GLANCE_API_CONF=$GLANCE_DIR/etc/glance-api.conf
cp $FILES/glance-api.conf $GLANCE_API_CONF
sudo sed -e "s,%DEST%,$DEST,g" -i $GLANCE_API_CONF
sudo sed -e "s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g" -i $GLANCE_API_CONF
fi
# Nova
# ----
# We are going to use the sample http middleware configuration from the keystone
# project to launch nova. This paste config adds the configuration required
# for nova to validate keystone tokens - except we need to switch the config
# to use our admin token instead (instead of the token from their sample data).
sudo sed -e "s,999888777666,$SERVICE_TOKEN,g" -i $KEYSTONE_DIR/examples/paste/nova-api-paste.ini
if [[ "$ENABLED_SERVICES" =~ "n-cpu" ]]; then
# Virtualization Configuration
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# attempt to load modules: network block device - used to manage qcow images
sudo modprobe nbd || true
# Check for kvm (hardware based virtualization). If unable to load kvm,
# set the libvirt type to qemu. Note: many systems come with hardware
# virtualization disabled in BIOS.
if [[ "$LIBVIRT_TYPE" == "kvm" ]]; then
sudo modprobe kvm || true
if [ ! -e /dev/kvm ]; then
echo "WARNING: Switching to QEMU"
LIBVIRT_TYPE=qemu
fi
fi
# Install and configure **LXC** if specified. LXC is another approach to
# splitting a system into many smaller parts. LXC uses cgroups and chroot
# to simulate multiple systems.
if [[ "$LIBVIRT_TYPE" == "lxc" ]]; then
sudo apt-get install lxc -y
# lxc requires cgroups to be configured on /cgroup
sudo mkdir -p /cgroup
if ! grep -q cgroup /etc/fstab; then
echo none /cgroup cgroup cpuacct,memory,devices,cpu,freezer,blkio 0 0 | sudo tee -a /etc/fstab
fi
if ! mount -n | grep -q cgroup; then
sudo mount /cgroup
fi
fi
# User needs to be member of libvirtd group for nova-compute to use libvirt.
sudo usermod -a -G libvirtd `whoami`
# if kvm wasn't running before we need to restart libvirt to enable it
sudo /etc/init.d/libvirt-bin restart
# Instance Storage
# ~~~~~~~~~~~~~~~~
# Nova stores each instance in its own directory.
mkdir -p $NOVA_DIR/instances
# if there is a partition labeled nova-instances use it (ext filesystems
# can be labeled via e2label)
## FIXME: if already mounted this blows up...
if [ -L /dev/disk/by-label/nova-instances ]; then
sudo mount -L nova-instances $NOVA_DIR/instances
sudo chown -R `whoami` $NOVA_DIR/instances
fi
# Clean out the instances directory.
sudo rm -rf $NOVA_DIR/instances/*
fi
if [[ "$ENABLED_SERVICES" =~ "n-net" ]]; then
# delete traces of nova networks from prior runs
sudo killall dnsmasq || true
rm -rf $NOVA_DIR/networks
mkdir -p $NOVA_DIR/networks
fi
function add_nova_flag {
echo "$1" >> $NOVA_DIR/bin/nova.conf
}
# (re)create nova.conf
rm -f $NOVA_DIR/bin/nova.conf
add_nova_flag "--verbose"
add_nova_flag "--nodaemon"
add_nova_flag "--scheduler_driver=$SCHEDULER"
add_nova_flag "--dhcpbridge_flagfile=$NOVA_DIR/bin/nova.conf"
add_nova_flag "--network_manager=nova.network.manager.$NET_MAN"
add_nova_flag "--my_ip=$HOST_IP"
add_nova_flag "--public_interface=$PUBLIC_INTERFACE"
add_nova_flag "--vlan_interface=$VLAN_INTERFACE"
add_nova_flag "--sql_connection=$BASE_SQL_CONN/nova"
add_nova_flag "--libvirt_type=$LIBVIRT_TYPE"
add_nova_flag "--osapi_extensions_path=$OPENSTACKX_DIR/extensions"
add_nova_flag "--vncproxy_url=http://$HOST_IP:6080"
add_nova_flag "--vncproxy_wwwroot=$NOVNC_DIR/"
add_nova_flag "--api_paste_config=$KEYSTONE_DIR/examples/paste/nova-api-paste.ini"
add_nova_flag "--image_service=nova.image.glance.GlanceImageService"
add_nova_flag "--ec2_dmz_host=$EC2_DMZ_HOST"
add_nova_flag "--rabbit_host=$RABBIT_HOST"
add_nova_flag "--rabbit_password=$RABBIT_PASSWORD"
add_nova_flag "--glance_api_servers=$GLANCE_HOSTPORT"
add_nova_flag "--flat_network_bridge=$FLAT_NETWORK_BRIDGE"
if [ -n "$FLAT_INTERFACE" ]; then
add_nova_flag "--flat_interface=$FLAT_INTERFACE"
fi
if [ -n "$MULTI_HOST" ]; then
add_nova_flag "--multi_host=$MULTI_HOST"
fi
# Nova Database
# ~~~~~~~~~~~~~
# All nova components talk to a central database. We will need to do this step
# only once for an entire cluster.
if [[ "$ENABLED_SERVICES" =~ "mysql" ]]; then
# (re)create nova database
mysql -u$MYSQL_USER -p$MYSQL_PASS -e 'DROP DATABASE IF EXISTS nova;'
mysql -u$MYSQL_USER -p$MYSQL_PASS -e 'CREATE DATABASE nova;'
# (re)create nova database
$NOVA_DIR/bin/nova-manage db sync
# create a small network
$NOVA_DIR/bin/nova-manage network create private $FIXED_RANGE 1 $FIXED_NETWORK_SIZE
# create some floating ips
$NOVA_DIR/bin/nova-manage floating create $FLOATING_RANGE
fi
# Keystone
# --------
if [[ "$ENABLED_SERVICES" =~ "key" ]]; then
# (re)create keystone database
mysql -u$MYSQL_USER -p$MYSQL_PASS -e 'DROP DATABASE IF EXISTS keystone;'
mysql -u$MYSQL_USER -p$MYSQL_PASS -e 'CREATE DATABASE keystone;'
# FIXME (anthony) keystone should use keystone.conf.example
KEYSTONE_CONF=$KEYSTONE_DIR/etc/keystone.conf
cp $FILES/keystone.conf $KEYSTONE_CONF
sudo sed -e "s,%SQL_CONN%,$BASE_SQL_CONN/keystone,g" -i $KEYSTONE_CONF
sudo sed -e "s,%DEST%,$DEST,g" -i $KEYSTONE_CONF
# keystone_data.sh creates our admin user and our ``SERVICE_TOKEN``.
KEYSTONE_DATA=$KEYSTONE_DIR/bin/keystone_data.sh
cp $FILES/keystone_data.sh $KEYSTONE_DATA
sudo sed -e "s,%HOST_IP%,$HOST_IP,g" -i $KEYSTONE_DATA
sudo sed -e "s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g" -i $KEYSTONE_DATA
sudo sed -e "s,%ADMIN_PASSWORD%,$ADMIN_PASSWORD,g" -i $KEYSTONE_DATA
# initialize keystone with default users/endpoints
BIN_DIR=$KEYSTONE_DIR/bin bash $KEYSTONE_DATA
fi
# Launch Services
# ===============
# nova api crashes if we start it with a regular screen command,
# so send the start command by forcing text into the window.
# Only run the services specified in ``ENABLED_SERVICES``
# our screen helper to launch a service in a hidden named screen
function screen_it {
NL=`echo -ne '\015'`
if [[ "$ENABLED_SERVICES" =~ "$1" ]]; then
screen -S nova -X screen -t $1
screen -S nova -p $1 -X stuff "$2$NL"
fi
}
# create a new named screen to run processes in
screen -d -m -S nova -t nova
sleep 1
# launch the glance registery service
if [[ "$ENABLED_SERVICES" =~ "g-reg" ]]; then
screen_it g-reg "cd $GLANCE_DIR; bin/glance-registry --config-file=etc/glance-registry.conf"
fi
# launch the glance api and wait for it to answer before continuing
if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then
screen_it g-api "cd $GLANCE_DIR; bin/glance-api --config-file=etc/glance-api.conf"
while ! wget -q -O- http://$GLANCE_HOSTPORT; do
echo "Waiting for g-api ($GLANCE_HOSTPORT) to start..."
sleep 1
done
fi
# launch the keystone and wait for it to answer before continuing
if [[ "$ENABLED_SERVICES" =~ "key" ]]; then
screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone --config-file $KEYSTONE_CONF -d"
while ! wget -q -O- http://127.0.0.1:5000; do
echo "Waiting for keystone to start..."
sleep 1
done
fi
# launch the nova-api and wait for it to answer before continuing
if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then
screen_it n-api "cd $NOVA_DIR && $NOVA_DIR/bin/nova-api"
while ! wget -q -O- http://127.0.0.1:8774; do
echo "Waiting for nova-api to start..."
sleep 1
done
fi
# Launching nova-compute should be as simple as running ``nova-compute`` but
# have to do a little more than that in our script. Since we add the group
# ``libvirtd`` to our user in this script, when nova-compute is run it is
# within the context of our original shell (so our groups won't be updated).
# We can send the command nova-compute to the ``newgrp`` command to execute
# in a specific context.
screen_it n-cpu "cd $NOVA_DIR && echo $NOVA_DIR/bin/nova-compute | newgrp libvirtd"
screen_it n-net "cd $NOVA_DIR && $NOVA_DIR/bin/nova-network"
screen_it n-sch "cd $NOVA_DIR && $NOVA_DIR/bin/nova-scheduler"
screen_it n-vnc "cd $NOVNC_DIR && ./utils/nova-wsproxy.py 6080 --web . --flagfile=../nova/bin/nova.conf"
screen_it dash "cd $DASH_DIR && sudo /etc/init.d/apache2 restart; sudo tail -f /var/log/apache2/error.log"
# Install Images
# ==============
# Upload a couple images to glance. **TTY** is a simple small image that use the
# lets you login to it with username/password of user/password. TTY is useful
# for basic functionality. We all include an Ubuntu cloud build of **Natty**.
# Natty uses cloud-init, supporting login via keypair and sending scripts as
# userdata.
#
# Read more about cloud-init at https://help.ubuntu.com/community/CloudInit
if [[ "$ENABLED_SERVICES" =~ "g-reg" ]]; then
# create a directory for the downloadedthe images tarballs.
mkdir -p $FILES/images
# Debug Image (TTY)
# -----------------
# Downloads the image (ami/aki/ari style), then extracts it. Upon extraction
# we upload to glance with the glance cli tool. TTY is a stripped down
# version of ubuntu.
if [ ! -f $FILES/tty.tgz ]; then
wget -c http://images.ansolabs.com/tty.tgz -O $FILES/tty.tgz
fi
# extract ami-tty/image, aki-tty/image & ari-tty/image
tar -zxf $FILES/tty.tgz -C $FILES/images
# Use glance client to add the kernel, ramdisk and finally the root
# filesystem. We parse the results of the uploads to get glance IDs of the
# ramdisk and kernel and use them for the root filesystem.
RVAL=`glance add -A $SERVICE_TOKEN name="tty-kernel" is_public=true container_format=aki disk_format=aki < $FILES/images/aki-tty/image`
KERNEL_ID=`echo $RVAL | cut -d":" -f2 | tr -d " "`
RVAL=`glance add -A $SERVICE_TOKEN name="tty-ramdisk" is_public=true container_format=ari disk_format=ari < $FILES/images/ari-tty/image`
RAMDISK_ID=`echo $RVAL | cut -d":" -f2 | tr -d " "`
glance add -A $SERVICE_TOKEN name="tty" is_public=true container_format=ami disk_format=ami kernel_id=$KERNEL_ID ramdisk_id=$RAMDISK_ID < $FILES/images/ami-tty/image
# Ubuntu 11.04 aka Natty
# ----------------------
# Downloaded from ubuntu enterprise cloud images. This
# image doesn't use the ramdisk functionality
if [ ! -f $FILES/natty.tgz ]; then
wget -c http://uec-images.ubuntu.com/natty/current/natty-server-cloudimg-amd64.tar.gz -O $FILES/natty.tgz
fi
tar -zxf $FILES/natty.tgz -C $FILES/images
RVAL=`glance add -A $SERVICE_TOKEN name="uec-natty-kernel" is_public=true container_format=aki disk_format=aki < $FILES/images/natty-server-cloudimg-amd64-vmlinuz-virtual`
KERNEL_ID=`echo $RVAL | cut -d":" -f2 | tr -d " "`
glance add -A $SERVICE_TOKEN name="uec-natty" is_public=true container_format=ami disk_format=ami kernel_id=$KERNEL_ID < $FILES/images/natty-server-cloudimg-amd64.img
fi
# Using the cloud
# ===============
# If you installed the dashboard on this server, then you should be able
# to access the site using your browser.
if [[ "$ENABLED_SERVICES" =~ "dash" ]]; then
echo "dashboard is now available at http://$HOST_IP/"
fi
# If keystone is present, you can point nova cli to this server
if [[ "$ENABLED_SERVICES" =~ "key" ]]; then
echo "keystone is serving at http://$HOST_IP:5000/v2.0/"
echo "examples on using novaclient command line is in exercise.sh"
echo "the default users are: admin and demo"
echo "the password: $ADMIN_PASSWORD"
fi
# Fin
# ===
# End our timer and give a timing summary
END_TIME=`python -c "import time; print time.time()"`
ELAPSED=`python -c "print $END_TIME - $START_TIME"`
echo "stack.sh completed in $ELAPSED seconds."

33
stackrc Normal file
View file

@ -0,0 +1,33 @@
# compute service
NOVA_REPO=https://github.com/cloudbuilders/nova.git
NOVA_BRANCH=diablo
# image catalog service
GLANCE_REPO=https://github.com/cloudbuilders/glance.git
GLANCE_BRANCH=diablo
# unified auth system (manages accounts/tokens)
KEYSTONE_REPO=https://github.com/cloudbuilders/keystone.git
KEYSTONE_BRANCH=diablo
# a websockets/html5 or flash powered VNC console for vm instances
NOVNC_REPO=https://github.com/cloudbuilders/noVNC.git
NOVNC_BRANCH=master
# django powered web control panel for openstack
DASH_REPO=https://github.com/cloudbuilders/openstack-dashboard.git
DASH_BRANCH=master
# python client library to nova that dashboard (and others) use
NOVACLIENT_REPO=https://github.com/cloudbuilders/python-novaclient.git
NOVACLIENT_BRANCH=master
# openstackx is a collection of extensions to openstack.compute & nova
# that is *deprecated*. The code is being moved into python-novaclient & nova.
OPENSTACKX_REPO=https://github.com/cloudbuilders/openstackx.git
OPENSTACKX_BRANCH=diablo
# allow local overrides of env variables
if [ -f ./localrc ]; then
source ./localrc
fi

154
tools/install_openvpn.sh Normal file
View file

@ -0,0 +1,154 @@
#!/bin/bash
# install_openvpn.sh - Install OpenVPN and generate required certificates
#
# install_openvpn.sh --client name
# install_openvpn.sh --server [name]
#
# name is used on the CN of the generated cert, and the filename of
# the configuration, certificate and key files.
#
# --server mode configures the host with a running OpenVPN server instance
# --client mode creates a tarball of a client configuration for this server
# VPN Config
VPN_SERVER=${VPN_SERVER:-`ifconfig eth0 | awk "/inet addr:/ { print \$2 }" | cut -d: -f2`} # 50.56.12.212
VPN_PROTO=${VPN_PROTO:-tcp}
VPN_PORT=${VPN_PORT:-6081}
VPN_DEV=${VPN_DEV:-tun}
VPN_CLIENT_NET=${VPN_CLIENT_NET:-172.16.28.0}
VPN_CLIENT_MASK=${VPN_CLIENT_MASK:-255.255.255.0}
VPN_LOCAL_NET=${VPN_LOCAL_NET:-10.0.0.0}
VPN_LOCAL_MASK=${VPN_LOCAL_MASK:-255.255.0.0}
VPN_DIR=/etc/openvpn
CA_DIR=/etc/openvpn/easy-rsa
usage() {
echo "$0 - OpenVPN install and certificate generation"
echo ""
echo "$0 --client name"
echo "$0 --server [name]"
echo ""
echo " --server mode configures the host with a running OpenVPN server instance"
echo " --client mode creates a tarball of a client configuration for this server"
exit 1
}
if [ -z $1 ]; then
usage
fi
# Install OpenVPN
if [ ! -x `which openvpn` ]; then
apt-get install -y openvpn bridge-utils
fi
if [ ! -d $CA_DIR ]; then
cp -pR /usr/share/doc/openvpn/examples/easy-rsa/2.0/ $CA_DIR
fi
OPWD=`pwd`
cd $CA_DIR
source ./vars
# Override the defaults
export KEY_COUNTRY="US"
export KEY_PROVINCE="TX"
export KEY_CITY="SanAntonio"
export KEY_ORG="Cloudbuilders"
export KEY_EMAIL="rcb@lists.rackspace.com"
if [ ! -r $CA_DIR/keys/dh1024.pem ]; then
# Initialize a new CA
$CA_DIR/clean-all
$CA_DIR/build-dh
$CA_DIR/pkitool --initca
openvpn --genkey --secret $CA_DIR/keys/ta.key ## Build a TLS key
fi
do_server() {
NAME=$1
# Generate server certificate
$CA_DIR/pkitool --server $NAME
(cd $CA_DIR/keys;
cp $NAME.crt $NAME.key ca.crt dh1024.pem ta.key $VPN_DIR
)
cat >$VPN_DIR/$NAME.conf <<EOF
proto $VPN_PROTO
port $VPN_PORT
dev $VPN_DEV
cert $NAME.crt
key $NAME.key # This file should be kept secret
ca ca.crt
dh dh1024.pem
duplicate-cn
server $VPN_CLIENT_NET $VPN_CLIENT_MASK
ifconfig-pool-persist ipp.txt
push "route $VPN_LOCAL_NET $VPN_LOCAL_MASK"
comp-lzo
user nobody
group nobody
persist-key
persist-tun
status openvpn-status.log
EOF
/etc/init.d/openvpn restart
}
do_client() {
NAME=$1
# Generate a client certificate
$CA_DIR/pkitool $NAME
TMP_DIR=`mktemp -d`
(cd $CA_DIR/keys;
cp -p ca.crt ta.key $NAME.key $NAME.crt $TMP_DIR
)
if [ -r $VPN_DIR/hostname ]; then
HOST=`cat $VPN_DIR/hostname`
else
HOST=`hostname`
fi
cat >$TMP_DIR/$HOST.conf <<EOF
proto $VPN_PROTO
port $VPN_PORT
dev $VPN_DEV
cert $NAME.crt
key $NAME.key # This file should be kept secret
ca ca.crt
client
remote $VPN_SERVER $VPN_PORT
resolv-retry infinite
nobind
user nobody
group nobody
persist-key
persist-tun
comp-lzo
verb 3
EOF
(cd $TMP_DIR; tar cf $OPWD/$NAME.tar *)
rm -rf $TMP_DIR
echo "Client certificate and configuration is in $OPWD/$NAME.tar"
}
# Process command line args
case $1 in
--client) if [ -z $2 ]; then
usage
fi
do_client $2
;;
--server) if [ -z $2 ]; then
NAME=`hostname`
else
NAME=$2
# Save for --client use
echo $NAME >$VPN_DIR/hostname
fi
do_server $NAME
;;
--clean) $CA_DIR/clean-all
;;
*) usage
esac

90
upload_image.sh Executable file
View file

@ -0,0 +1,90 @@
#!/bin/bash
# upload_image.sh - Upload Ubuntu images (create if necessary) in various formats
# Supported formats: qcow (kvm), vmdk (vmserver), vdi (vbox), vhd (vpc)
# Requires sudo to root
usage() {
echo "$0 - Upload images to OpenStack"
echo ""
echo "$0 [-h host] [-p port] release format"
exit 1
}
HOST=${HOST:-localhost}
PORT=${PORT:-9292}
DEST=${DEST:-/opt/stack}
while getopts h:p: c; do
case $c in
h) HOST=$OPTARG
;;
p) PORT=$OPTARG
;;
esac
done
shift `expr $OPTIND - 1`
RELEASE=$1
FORMAT=$2
case $FORMAT in
kvm|qcow2) FORMAT=qcow2
TARGET=kvm
;;
vmserver|vmdk)
FORMAT=vmdk
TARGET=vmserver
;;
vbox|vdi) TARGET=kvm
FORMAT=vdi
;;
vhd|vpc) TARGET=kvm
FORMAT=vhd
;;
*) echo "Unknown format: $FORMAT"
usage
esac
case $RELEASE in
natty) ;;
maverick) ;;
lucid) ;;
karmic) ;;
jaunty) ;;
*) if [ ! -r $RELEASE.$FORMAT ]; then
echo "Unknown release: $RELEASE"
usage
fi
;;
esac
GLANCE=`which glance`
if [ -z "$GLANCE" ]; then
if [ -x "$DEST/glance/bin/glance" ]; then
# Look for stack.sh's install
GLANCE="$DEST/glance/bin/glance"
else
# Install Glance client in $DEST
echo "Glance not found, must install client"
OWD=`pwd`
cd $DEST
sudo apt-get install python-pip python-eventlet python-routes python-greenlet python-argparse python-sqlalchemy python-wsgiref python-pastedeploy python-xattr
sudo pip install kombu
sudo git clone https://github.com/cloudbuilders/glance.git
cd glance
sudo python setup.py develop
cd $OWD
GLANCE=`which glance`
fi
fi
# Create image if it doesn't exist
if [ ! -r $RELEASE.$FORMAT ]; then
DIR=`dirname $0`
echo "$RELEASE.$FORMAT not found, creating..."
$DIR/make_image.sh $RELEASE $FORMAT
fi
# Upload the image
echo "Uploading image $RELEASE.$FORMAT to $HOST"
$GLANCE add name=$RELEASE.$FORMAT is_public=true disk_format=$FORMAT --host $HOST --port $PORT <$RELEASE.$FORMAT