Merge remote-tracking branch 'upstream/master' into rcb-master
commit
336e0001a6
@ -0,0 +1,3 @@
|
||||
proto
|
||||
*~
|
||||
localrc
|
@ -0,0 +1,42 @@
|
||||
Tool to quickly deploy openstack dev environments.
|
||||
|
||||
# Goals
|
||||
|
||||
* To quickly build dev openstack environments in clean natty environments
|
||||
* To describe working configurations of openstack (which code branches work together? what do config files look like for those branches?)
|
||||
* To make it easier for developers to dive into openstack so that they can productively contribute without having to understand every part of the system at once
|
||||
* To make it easy to prototype cross-project features
|
||||
|
||||
Be sure to carefully read these scripts before you run them as they install software and may alter your networking configuration.
|
||||
|
||||
# To start a dev cloud on your local machine (installing on a dedicated vm is safer!):
|
||||
|
||||
./stack.sh
|
||||
|
||||
If working correctly, you should be able to access openstack endpoints, like:
|
||||
|
||||
* Dashboard: http://myhost/
|
||||
* Keystone: http://myhost:5000/v2.0/
|
||||
|
||||
# To start a dev cloud in an lxc container:
|
||||
|
||||
./build_lxc.sh
|
||||
|
||||
You will need to configure a bridge and network on your host machine (by default br0) before starting build_lxc.sh. A sample host-only network configuration can be found in lxc_network_hostonlyplusnat.sh.
|
||||
|
||||
# Customizing
|
||||
|
||||
You can tweak environment variables by creating file name 'localrc' should you need to override defaults. It is likely that you will need to do this to tweak your networking configuration should you need to access your cloud from a different host.
|
||||
|
||||
# Todo
|
||||
|
||||
* Add python-novaclient cli support
|
||||
* syslog
|
||||
* allow rabbit connection to be specified via environment variables with sensible defaults
|
||||
* Add volume support
|
||||
* Add quantum support
|
||||
|
||||
# Future
|
||||
|
||||
* idea: move from screen to tmux?
|
||||
* idea: create a live-cd / vmware preview image using this?
|
@ -0,0 +1,251 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Sanity check
|
||||
if [ "$EUID" -ne "0" ]; then
|
||||
echo "This script must be run with root privileges."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Warn users who aren't on natty
|
||||
if ! grep -q natty /etc/lsb-release; then
|
||||
echo "WARNING: this script has only been tested on natty"
|
||||
fi
|
||||
|
||||
# Source params
|
||||
source ./stackrc
|
||||
|
||||
# Store cwd
|
||||
CWD=`pwd`
|
||||
|
||||
# Configurable params
|
||||
BRIDGE=${BRIDGE:-br0}
|
||||
CONTAINER=${CONTAINER:-STACK}
|
||||
CONTAINER_IP=${CONTAINER_IP:-192.168.1.50}
|
||||
CONTAINER_CIDR=${CONTAINER_CIDR:-$CONTAINER_IP/24}
|
||||
CONTAINER_NETMASK=${CONTAINER_NETMASK:-255.255.255.0}
|
||||
CONTAINER_GATEWAY=${CONTAINER_GATEWAY:-192.168.1.1}
|
||||
NAMESERVER=${NAMESERVER:-$CONTAINER_GATEWAY}
|
||||
COPYENV=${COPYENV:-1}
|
||||
DEST=${DEST:-/opt/stack}
|
||||
|
||||
# Param string to pass to stack.sh. Like "EC2_DMZ_HOST=192.168.1.1 MYSQL_USER=nova"
|
||||
STACKSH_PARAMS=${STACKSH_PARAMS:-}
|
||||
|
||||
# Option to use the version of devstack on which we are currently working
|
||||
USE_CURRENT_DEVSTACK=${USE_CURRENT_DEVSTACK:-1}
|
||||
|
||||
|
||||
# Install deps
|
||||
apt-get install -y lxc debootstrap
|
||||
|
||||
# Install cgroup-bin from source, since the packaging is buggy and possibly incompatible with our setup
|
||||
if ! which cgdelete | grep -q cgdelete; then
|
||||
apt-get install -y g++ bison flex libpam0g-dev make
|
||||
wget http://sourceforge.net/projects/libcg/files/libcgroup/v0.37.1/libcgroup-0.37.1.tar.bz2/download -O /tmp/libcgroup-0.37.1.tar.bz2
|
||||
cd /tmp && bunzip2 libcgroup-0.37.1.tar.bz2 && tar xfv libcgroup-0.37.1.tar
|
||||
cd libcgroup-0.37.1
|
||||
./configure
|
||||
make install
|
||||
ldconfig
|
||||
fi
|
||||
|
||||
# Create lxc configuration
|
||||
LXC_CONF=/tmp/$CONTAINER.conf
|
||||
cat > $LXC_CONF <<EOF
|
||||
lxc.network.type = veth
|
||||
lxc.network.link = $BRIDGE
|
||||
lxc.network.flags = up
|
||||
lxc.network.ipv4 = $CONTAINER_CIDR
|
||||
# allow tap/tun devices
|
||||
lxc.cgroup.devices.allow = c 10:200 rwm
|
||||
EOF
|
||||
|
||||
# Shutdown any existing container
|
||||
lxc-stop -n $CONTAINER
|
||||
|
||||
# This kills zombie containers
|
||||
if [ -d /cgroup/$CONTAINER ]; then
|
||||
cgdelete -r cpu,net_cls:$CONTAINER
|
||||
fi
|
||||
|
||||
# git clone only if directory doesn't exist already. Since ``DEST`` might not
|
||||
# be owned by the installation user, we create the directory and change the
|
||||
# ownership to the proper user.
|
||||
function git_clone {
|
||||
if [ ! -d $2 ]; then
|
||||
sudo mkdir $2
|
||||
sudo chown `whoami` $2
|
||||
git clone $1 $2
|
||||
cd $2
|
||||
# This checkout syntax works for both branches and tags
|
||||
git checkout $3
|
||||
fi
|
||||
}
|
||||
|
||||
# Location of the base image directory
|
||||
CACHEDIR=/var/cache/lxc/natty/rootfs-amd64
|
||||
|
||||
# Provide option to do totally clean install
|
||||
if [ "$CLEAR_LXC_CACHE" = "1" ]; then
|
||||
rm -rf $CACHEDIR
|
||||
fi
|
||||
|
||||
# Warm the base image on first install
|
||||
if [ ! -f $CACHEDIR/bootstrapped ]; then
|
||||
# by deleting the container, we force lxc-create to re-bootstrap (lxc is
|
||||
# lazy and doesn't do anything if a container already exists)
|
||||
lxc-destroy -n $CONTAINER
|
||||
# trigger the initial debootstrap
|
||||
lxc-create -n $CONTAINER -t natty -f $LXC_CONF
|
||||
chroot $CACHEDIR apt-get update
|
||||
chroot $CACHEDIR apt-get install -y --force-yes `cat files/apts/* | cut -d\# -f1 | egrep -v "(rabbitmq|libvirt-bin|mysql-server)"`
|
||||
chroot $CACHEDIR pip install `cat files/pips/*`
|
||||
touch $CACHEDIR/bootstrapped
|
||||
fi
|
||||
|
||||
# Clean out code repos if directed to do so
|
||||
if [ "$CLEAN" = "1" ]; then
|
||||
rm -rf $CACHEDIR/$DEST
|
||||
fi
|
||||
|
||||
# Cache openstack code
|
||||
mkdir -p $CACHEDIR/$DEST
|
||||
git_clone $NOVA_REPO $CACHEDIR/$DEST/nova $NOVA_BRANCH
|
||||
git_clone $GLANCE_REPO $CACHEDIR/$DEST/glance $GLANCE_BRANCH
|
||||
git_clone $KEYSTONE_REPO $CACHEDIR/$DESTkeystone $KEYSTONE_BRANCH
|
||||
git_clone $NOVNC_REPO $CACHEDIR/$DEST/novnc $NOVNC_BRANCH
|
||||
git_clone $DASH_REPO $CACHEDIR/$DEST/dash $DASH_BRANCH $DASH_TAG
|
||||
git_clone $NOVACLIENT_REPO $CACHEDIR/$DEST/python-novaclient $NOVACLIENT_BRANCH
|
||||
git_clone $OPENSTACKX_REPO $CACHEDIR/$DEST/openstackx $OPENSTACKX_BRANCH
|
||||
|
||||
# Use this version of devstack?
|
||||
if [ "$USE_CURRENT_DEVSTACK" = "1" ]; then
|
||||
rm -rf $CACHEDIR/$DEST/devstack
|
||||
cp -pr $CWD $CACHEDIR/$DEST/devstack
|
||||
fi
|
||||
|
||||
# Destroy the old container
|
||||
lxc-destroy -n $CONTAINER
|
||||
|
||||
# If this call is to TERMINATE the container then exit
|
||||
if [ "$TERMINATE" = "1" ]; then
|
||||
exit
|
||||
fi
|
||||
|
||||
# Create the container
|
||||
lxc-create -n $CONTAINER -t natty -f $LXC_CONF
|
||||
|
||||
# Specify where our container rootfs lives
|
||||
ROOTFS=/var/lib/lxc/$CONTAINER/rootfs/
|
||||
|
||||
# Create a stack user that is a member of the libvirtd group so that stack
|
||||
# is able to interact with libvirt.
|
||||
chroot $ROOTFS groupadd libvirtd
|
||||
chroot $ROOTFS useradd stack -s /bin/bash -d $DEST -G libvirtd
|
||||
|
||||
# a simple password - pass
|
||||
echo stack:pass | chroot $ROOTFS chpasswd
|
||||
|
||||
# and has sudo ability (in the future this should be limited to only what
|
||||
# stack requires)
|
||||
echo "stack ALL=(ALL) NOPASSWD: ALL" >> $ROOTFS/etc/sudoers
|
||||
|
||||
# Copy kernel modules
|
||||
mkdir -p $ROOTFS/lib/modules/`uname -r`/kernel
|
||||
cp -p /lib/modules/`uname -r`/modules.dep $ROOTFS/lib/modules/`uname -r`/
|
||||
cp -pR /lib/modules/`uname -r`/kernel/net $ROOTFS/lib/modules/`uname -r`/kernel/
|
||||
|
||||
# Gracefully cp only if source file/dir exists
|
||||
function cp_it {
|
||||
if [ -e $1 ] || [ -d $1 ]; then
|
||||
cp -pRL $1 $2
|
||||
fi
|
||||
}
|
||||
|
||||
# Copy over your ssh keys and env if desired
|
||||
if [ "$COPYENV" = "1" ]; then
|
||||
cp_it ~/.ssh $ROOTFS/$DEST/.ssh
|
||||
cp_it ~/.ssh/id_rsa.pub $ROOTFS/$DEST/.ssh/authorized_keys
|
||||
cp_it ~/.gitconfig $ROOTFS/$DEST/.gitconfig
|
||||
cp_it ~/.vimrc $ROOTFS/$DEST/.vimrc
|
||||
cp_it ~/.bashrc $ROOTFS/$DEST/.bashrc
|
||||
fi
|
||||
|
||||
# Make our ip address hostnames look nice at the command prompt
|
||||
echo "export PS1='${debian_chroot:+($debian_chroot)}\\u@\\H:\\w\\$ '" >> $ROOTFS/$DEST/.bashrc
|
||||
echo "export PS1='${debian_chroot:+($debian_chroot)}\\u@\\H:\\w\\$ '" >> $ROOTFS/etc/profile
|
||||
|
||||
# Give stack ownership over $DEST so it may do the work needed
|
||||
chroot $ROOTFS chown -R stack $DEST
|
||||
|
||||
# Configure instance network
|
||||
INTERFACES=$ROOTFS/etc/network/interfaces
|
||||
cat > $INTERFACES <<EOF
|
||||
auto lo
|
||||
iface lo inet loopback
|
||||
|
||||
auto eth0
|
||||
iface eth0 inet static
|
||||
address $CONTAINER_IP
|
||||
netmask $CONTAINER_NETMASK
|
||||
gateway $CONTAINER_GATEWAY
|
||||
EOF
|
||||
|
||||
# Configure the runner
|
||||
RUN_SH=$ROOTFS/$DEST/run.sh
|
||||
cat > $RUN_SH <<EOF
|
||||
#!/usr/bin/env bash
|
||||
# Make sure dns is set up
|
||||
echo "nameserver $NAMESERVER" | sudo resolvconf -a eth0
|
||||
sleep 1
|
||||
|
||||
# Kill any existing screens
|
||||
killall screen
|
||||
|
||||
# Install and run stack.sh
|
||||
sudo apt-get update
|
||||
sudo apt-get -y --force-yes install git-core vim-nox sudo
|
||||
if [ ! -d "$DEST/devstack" ]; then
|
||||
git clone git://github.com/cloudbuilders/devstack.git $DEST/devstack
|
||||
fi
|
||||
cd $DEST/devstack && $STACKSH_PARAMS ./stack.sh > /$DEST/run.sh.log
|
||||
echo >> /$DEST/run.sh.log
|
||||
echo >> /$DEST/run.sh.log
|
||||
echo "All done! Time to start clicking." >> /$DEST/run.sh.log
|
||||
EOF
|
||||
|
||||
# Make the run.sh executable
|
||||
chmod 755 $RUN_SH
|
||||
|
||||
# Make runner launch on boot
|
||||
RC_LOCAL=$ROOTFS/etc/rc.local
|
||||
cat > $RC_LOCAL <<EOF
|
||||
#!/bin/sh -e
|
||||
su -c "$DEST/run.sh" stack
|
||||
EOF
|
||||
|
||||
# Configure cgroup directory
|
||||
if ! mount | grep -q cgroup; then
|
||||
mkdir -p /cgroup
|
||||
mount none -t cgroup /cgroup
|
||||
fi
|
||||
|
||||
# Start our container
|
||||
lxc-start -d -n $CONTAINER
|
||||
|
||||
# Done creating the container, let's tail the log
|
||||
echo
|
||||
echo "============================================================="
|
||||
echo " -- YAY! --"
|
||||
echo "============================================================="
|
||||
echo
|
||||
echo "We're done creating the container, about to start tailing the"
|
||||
echo "stack.sh log. It will take a second or two to start."
|
||||
echo
|
||||
echo "Just CTRL-C at any time to stop tailing."
|
||||
|
||||
while [ ! -e "$ROOTFS/$DEST/run.sh.log" ]; do
|
||||
sleep 1
|
||||
done
|
||||
|
||||
tail -F $ROOTFS/$DEST/run.sh.log
|
@ -0,0 +1,39 @@
|
||||
#!/usr/bin/env bash
|
||||
# Head node host, which runs glance, api, keystone
|
||||
HEAD_HOST=${HEAD_HOST:-192.168.1.52}
|
||||
COMPUTE_HOSTS=${COMPUTE_HOSTS:-192.168.1.53,192.168.1.54}
|
||||
|
||||
# Networking params
|
||||
NAMESERVER=${NAMESERVER:-192.168.1.1}
|
||||
GATEWAY=${GATEWAY:-192.168.1.1}
|
||||
NETMASK=${NETMASK:-255.255.255.0}
|
||||
FLOATING_RANGE=${FLOATING_RANGE:-192.168.1.196/30}
|
||||
|
||||
# Setting this to 1 shuts down and destroys our containers without relaunching.
|
||||
TERMINATE=${TERMINATE:-0}
|
||||
|
||||
# Variables common amongst all hosts in the cluster
|
||||
COMMON_VARS="MYSQL_HOST=$HEAD_HOST RABBIT_HOST=$HEAD_HOST GLANCE_HOSTPORT=$HEAD_HOST:9292 NET_MAN=FlatDHCPManager FLAT_INTERFACE=eth0 FLOATING_RANGE=$FLOATING_RANGE MULTI_HOST=1"
|
||||
|
||||
# Helper to launch containers
|
||||
function run_lxc {
|
||||
# For some reason container names with periods can cause issues :/
|
||||
CONTAINER=$1 CONTAINER_IP=$2 CONTAINER_NETMASK=$NETMASK CONTAINER_GATEWAY=$GATEWAY NAMESERVER=$NAMESERVER TERMINATE=$TERMINATE STACKSH_PARAMS="$COMMON_VARS $3" ./build_lxc.sh
|
||||
}
|
||||
|
||||
# Launch the head node - headnode uses a non-ip domain name,
|
||||
# because rabbit won't launch with an ip addr hostname :(
|
||||
run_lxc STACKMASTER $HEAD_HOST "ENABLED_SERVICES=g-api,g-reg,key,n-api,n-sch,n-vnc,dash,mysql,rabbit"
|
||||
|
||||
# Wait till the head node is up
|
||||
if [ ! "$TERMINATE" = "1" ]; then
|
||||
while ! wget -q -O- http://$HEAD_HOST | grep -q username; do
|
||||
echo "Waiting for head node ($HEAD_HOST) to start..."
|
||||
sleep 5
|
||||
done
|
||||
fi
|
||||
|
||||
# Launch the compute hosts
|
||||
for compute_host in ${COMPUTE_HOSTS//,/ }; do
|
||||
run_lxc $compute_host $compute_host "ENABLED_SERVICES=n-cpu,n-net,n-api"
|
||||
done
|
@ -0,0 +1,117 @@
|
||||
#!/bin/bash
|
||||
|
||||
PROGDIR=`dirname $0`
|
||||
CHROOTCACHE=${CHROOTCACHE:-/var/cache/devstack}
|
||||
|
||||
# Source params
|
||||
source ./stackrc
|
||||
|
||||
# Store cwd
|
||||
CWD=`pwd`
|
||||
|
||||
NAME=$1
|
||||
NFSDIR="/nfs/$NAME"
|
||||
DEST=${DEST:-/opt/stack}
|
||||
|
||||
# Option to use the version of devstack on which we are currently working
|
||||
USE_CURRENT_DEVSTACK=${USE_CURRENT_DEVSTACK:-1}
|
||||
|
||||
# remove old nfs filesystem if one exists
|
||||
rm -rf $DEST
|
||||
|
||||
# clean install of natty
|
||||
if [ ! -d $CHROOTCACHE/natty-base ]; then
|
||||
$PROGDIR/make_image.sh -C natty $CHROOTCACHE/natty-base
|
||||
# copy kernel modules...
|
||||
# NOTE(ja): is there a better way to do this?
|
||||
cp -pr /lib/modules/`uname -r` $CHROOTCACHE/natty-base/lib/modules
|
||||
# a simple password - pass
|
||||
echo root:pass | chroot $CHROOTCACHE/natty-base chpasswd
|
||||
fi
|
||||
|
||||
# prime natty with as many apt/pips as we can
|
||||
if [ ! -d $CHROOTCACHE/natty-dev ]; then
|
||||
rsync -azH $CHROOTCACHE/natty-base/ $CHROOTCACHE/natty-dev/
|
||||
chroot $CHROOTCACHE/natty-dev apt-get install -y `cat files/apts/* | cut -d\# -f1 | egrep -v "(rabbitmq|libvirt-bin|mysql-server)"`
|
||||
chroot $CHROOTCACHE/natty-dev pip install `cat files/pips/*`
|
||||
|
||||
# Create a stack user that is a member of the libvirtd group so that stack
|
||||
# is able to interact with libvirt.
|
||||
chroot $CHROOTCACHE/natty-dev groupadd libvirtd
|
||||
chroot $CHROOTCACHE/natty-dev useradd stack -s /bin/bash -d $DEST -G libvirtd
|
||||
mkdir -p $CHROOTCACHE/natty-dev/$DEST
|
||||
chown stack $CHROOTCACHE/natty-dev/$DEST
|
||||
|
||||
# a simple password - pass
|
||||
echo stack:pass | chroot $CHROOTCACHE/natty-dev chpasswd
|
||||
|
||||
# and has sudo ability (in the future this should be limited to only what
|
||||
# stack requires)
|
||||
echo "stack ALL=(ALL) NOPASSWD: ALL" >> $CHROOTCACHE/natty-dev/etc/sudoers
|
||||
fi
|
||||
|
||||
# clone git repositories onto the system
|
||||
# ======================================
|
||||
|
||||
if [ ! -d $CHROOTCACHE/natty-stack ]; then
|
||||
rsync -azH $CHROOTCACHE/natty-dev/ $CHROOTCACHE/natty-stack/
|
||||
fi
|
||||
|
||||
# git clone only if directory doesn't exist already. Since ``DEST`` might not
|
||||
# be owned by the installation user, we create the directory and change the
|
||||
# ownership to the proper user.
|
||||
function git_clone {
|
||||
|
||||
# clone new copy or fetch latest changes
|
||||
CHECKOUT=$CHROOTCACHE/natty-stack$2
|
||||
if [ ! -d $CHECKOUT ]; then
|
||||
mkdir -p $CHECKOUT
|
||||
git clone $1 $CHECKOUT
|
||||
else
|
||||
pushd $CHECKOUT
|
||||
git fetch
|
||||
popd
|
||||
fi
|
||||
|
||||
# FIXME(ja): checkout specified version (should works for branches and tags)
|
||||
|
||||
pushd $CHECKOUT
|
||||
# checkout the proper branch/tag
|
||||
git checkout $3
|
||||
# force our local version to be the same as the remote version
|
||||
git reset --hard origin/$3
|
||||
popd
|
||||
|
||||
# give ownership to the stack user
|
||||
chroot $CHROOTCACHE/natty-stack/ chown -R stack $2
|
||||
}
|
||||
|
||||
git_clone $NOVA_REPO $DEST/nova $NOVA_BRANCH
|
||||
git_clone $GLANCE_REPO $DEST/glance $GLANCE_BRANCH
|
||||
git_clone $KEYSTONE_REPO $DEST/keystone $KEYSTONE_BRANCH
|
||||
git_clone $NOVNC_REPO $DEST/novnc $NOVNC_BRANCH
|
||||
git_clone $DASH_REPO $DEST/dash $DASH_BRANCH $DASH_TAG
|
||||
git_clone $NOVACLIENT_REPO $DEST/python-novaclient $NOVACLIENT_BRANCH
|
||||
git_clone $OPENSTACKX_REPO $DEST/openstackx $OPENSTACKX_BRANCH
|
||||
|
||||
chroot $CHROOTCACHE/natty-stack mkdir -p $DEST/files
|
||||
wget -c http://images.ansolabs.com/tty.tgz -O $CHROOTCACHE/natty-stack$DEST/files/tty.tgz
|
||||
|
||||
# Use this version of devstack?
|
||||
if [ "$USE_CURRENT_DEVSTACK" = "1" ]; then
|
||||
rm -rf $CHROOTCACHE/natty-stack/$DEST/devstack
|
||||
cp -pr $CWD $CHROOTCACHE/natty-stack/$DEST/devstack
|
||||
fi
|
||||
|
||||
cp -pr $CHROOTCACHE/natty-stack $NFSDIR
|
||||
|
||||
# set hostname
|
||||
echo $NAME > $NFSDIR/etc/hostname
|
||||
echo "127.0.0.1 localhost $NAME" > $NFSDIR/etc/hosts
|
||||
|
||||
# injecting root's public ssh key if it exists
|
||||
if [ -f /root/.ssh/id_rsa.pub ]; then
|
||||
mkdir $NFSDIR/root/.ssh
|
||||
chmod 700 $NFSDIR/root/.ssh
|
||||
cp /root/.ssh/id_rsa.pub $NFSDIR/root/.ssh/authorized_keys
|
||||
fi
|
@ -0,0 +1,116 @@
|
||||
#!/bin/bash -e
|
||||
# build_pxe_boot.sh - Create a PXE boot environment
|
||||
#
|
||||
# build_pxe_boot.sh [-k kernel-version] destdir
|
||||
#
|
||||
# Assumes syslinux is installed
|
||||
# Assumes devstack files are in `pwd`/pxe
|
||||
# Only needs to run as root if the destdir permissions require it
|
||||
|
||||
UBUNTU_MIRROR=http://archive.ubuntu.com/ubuntu/dists/natty/main/installer-amd64/current/images/netboot/ubuntu-installer/amd64
|
||||
|
||||
MEMTEST_VER=4.10
|
||||
MEMTEST_BIN=memtest86+-${MEMTEST_VER}.bin
|
||||
MEMTEST_URL=http://www.memtest.org/download/${MEMTEST_VER}/
|
||||
|
||||
KVER=`uname -r`
|
||||
if [ "$1" = "-k" ]; then
|
||||
KVER=$2
|
||||
shift;shift
|
||||
fi
|
||||
|
||||
DEST_DIR=${1:-/tmp}/tftpboot
|
||||
PXEDIR=${PXEDIR:-/var/cache/devstack/pxe}
|
||||
OPWD=`pwd`
|
||||
PROGDIR=`dirname $0`
|
||||
|
||||
mkdir -p $DEST_DIR/pxelinux.cfg
|
||||
cd $DEST_DIR
|
||||
for i in memdisk menu.c32 pxelinux.0; do
|
||||
cp -p /usr/lib/syslinux/$i $DEST_DIR
|
||||
done
|
||||
|
||||
DEFAULT=$DEST_DIR/pxelinux.cfg/default
|
||||
cat >$DEFAULT <<EOF
|
||||
default menu.c32
|
||||
prompt 0
|
||||
timeout 0
|
||||
|
||||
MENU TITLE PXE Boot Menu
|
||||
|
||||
EOF
|
||||
|
||||
# Setup devstack boot
|
||||
mkdir -p $DEST_DIR/ubuntu
|
||||
if [ ! -d $PXEDIR ]; then
|
||||
mkdir -p $PXEDIR
|
||||
fi
|
||||
if [ ! -r $PXEDIR/vmlinuz-${KVER} ]; then
|
||||
sudo chmod 644 /boot/vmlinuz-${KVER}
|
||||
if [ ! -r /boot/vmlinuz-${KVER} ]; then
|
||||
echo "No kernel found"
|
||||
else
|
||||
cp -p /boot/vmlinuz-${KVER} $PXEDIR
|
||||
fi
|
||||
fi
|
||||
cp -p $PXEDIR/vmlinuz-${KVER} $DEST_DIR/ubuntu
|
||||
if [ ! -r $PXEDIR/stack-initrd.gz ]; then
|
||||
cd $OPWD
|
||||
sudo $PROGDIR/build_pxe_ramdisk.sh $PXEDIR/stack-initrd.gz
|
||||
fi
|
||||
cp -p $PXEDIR/stack-initrd.gz $DEST_DIR/ubuntu
|
||||
cat >>$DEFAULT <<EOF
|
||||
|
||||
LABEL devstack
|
||||
MENU LABEL ^devstack
|
||||
MENU DEFAULT
|
||||
KERNEL ubuntu/vmlinuz-$KVER
|
||||
APPEND initrd=ubuntu/stack-initrd.gz ramdisk_size=2109600 root=/dev/ram0
|
||||
EOF
|
||||
|
||||
# Get Ubuntu
|
||||
if [ -d $PXEDIR ]; then
|
||||
cp -p $PXEDIR/natty-base-initrd.gz $DEST_DIR/ubuntu
|
||||
fi
|
||||
cat >>$DEFAULT <<EOF
|
||||
|
||||
LABEL ubuntu
|
||||
MENU LABEL ^Ubuntu Natty
|
||||
KERNEL ubuntu/vmlinuz-$KVER
|
||||
APPEND initrd=ubuntu/natty-base-initrd.gz ramdisk_size=419600 root=/dev/ram0
|
||||
EOF
|
||||
|
||||
# Get Memtest
|
||||
cd $DEST_DIR
|
||||
if [ ! -r $MEMTEST_BIN ]; then
|
||||
wget -N --quiet ${MEMTEST_URL}/${MEMTEST_BIN}.gz
|
||||
gunzip $MEMTEST_BIN
|
||||
fi
|
||||
cat >>$DEFAULT <<EOF
|
||||
|
||||
LABEL memtest
|
||||
MENU LABEL ^Memtest86+
|
||||
KERNEL $MEMTEST_BIN
|
||||
EOF
|
||||
|
||||
# Get FreeDOS
|
||||
mkdir -p $DEST_DIR/freedos
|
||||
cd $DEST_DIR/freedos
|
||||
wget -N --quiet http://www.fdos.org/bootdisks/autogen/FDSTD.288.gz
|
||||
gunzip -f FDSTD.288.gz
|
||||
cat >>$DEFAULT <<EOF
|
||||
|
||||
LABEL freedos
|
||||
MENU LABEL ^FreeDOS bootdisk
|
||||
KERNEL memdisk
|
||||
APPEND initrd=freedos/FDSTD.288
|
||||
EOF
|
||||
|
||||
# Local disk boot
|
||||
cat >>$DEFAULT <<EOF
|
||||
|
||||
LABEL local
|
||||
MENU LABEL ^Local disk
|
||||
MENU DEFAULT
|
||||
LOCALBOOT 0
|
||||
EOF
|
@ -0,0 +1,135 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [ ! "$#" -eq "1" ]; then
|
||||
echo "$0 builds a gziped natty openstack install"
|
||||
echo "usage: $0 dest"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
PROGDIR=`dirname $0`
|
||||
CHROOTCACHE=${CHROOTCACHE:-/var/cache/devstack}
|
||||
|
||||
# Source params
|
||||
source ./stackrc
|
||||
|
||||
# Store cwd
|
||||
CWD=`pwd`
|
||||
|
||||
DEST=${DEST:-/opt/stack}
|
||||
|
||||
# Option to use the version of devstack on which we are currently working
|
||||
USE_CURRENT_DEVSTACK=${USE_CURRENT_DEVSTACK:-1}
|
||||
|
||||
# clean install of natty
|
||||
if [ ! -d $CHROOTCACHE/natty-base ]; then
|
||||
$PROGDIR/make_image.sh -C natty $CHROOTCACHE/natty-base
|
||||
# copy kernel modules...
|
||||
# NOTE(ja): is there a better way to do this?
|
||||
cp -pr /lib/modules/`uname -r` $CHROOTCACHE/natty-base/lib/modules
|
||||
# a simple password - pass
|
||||
echo root:pass | chroot $CHROOTCACHE/natty-base chpasswd
|
||||
fi
|
||||
|
||||
# prime natty with as many apt/pips as we can
|
||||
if [ ! -d $CHROOTCACHE/natty-dev ]; then
|
||||
rsync -azH $CHROOTCACHE/natty-base/ $CHROOTCACHE/natty-dev/
|
||||
chroot $CHROOTCACHE/natty-dev apt-get install -y `cat files/apts/* | cut -d\# -f1 | egrep -v "(rabbitmq|libvirt-bin|mysql-server)"`
|
||||
chroot $CHROOTCACHE/natty-dev pip install `cat files/pips/*`
|
||||
|
||||
# Create a stack user that is a member of the libvirtd group so that stack
|
||||
# is able to interact with libvirt.
|
||||
chroot $CHROOTCACHE/natty-dev groupadd libvirtd
|
||||
chroot $CHROOTCACHE/natty-dev useradd stack -s /bin/bash -d $DEST -G libvirtd
|
||||
mkdir -p $CHROOTCACHE/natty-dev/$DEST
|
||||
chown stack $CHROOTCACHE/natty-dev/$DEST
|
||||
|
||||
# a simple password - pass
|
||||
echo stack:pass | chroot $CHROOTCACHE/natty-dev chpasswd
|
||||
|
||||
# and has sudo ability (in the future this should be limited to only what
|
||||
# stack requires)
|
||||
echo "stack ALL=(ALL) NOPASSWD: ALL" >> $CHROOTCACHE/natty-dev/etc/sudoers
|
||||
fi
|
||||
|
||||
# clone git repositories onto the system
|
||||
# ======================================
|
||||
|
||||
if [ ! -d $CHROOTCACHE/natty-stack ]; then
|
||||
rsync -azH $CHROOTCACHE/natty-dev/ $CHROOTCACHE/natty-stack/
|
||||
fi
|
||||
|
||||
# git clone only if directory doesn't exist already. Since ``DEST`` might not
|
||||
# be owned by the installation user, we create the directory and change the
|
||||
# ownership to the proper user.
|
||||
function git_clone {
|
||||
|
||||
# clone new copy or fetch latest changes
|
||||
CHECKOUT=$CHROOTCACHE/natty-stack$2
|
||||
if [ ! -d $CHECKOUT ]; then
|
||||
mkdir -p $CHECKOUT
|
||||
git clone $1 $CHECKOUT
|
||||
else
|
||||
pushd $CHECKOUT
|
||||
git fetch
|
||||
popd
|
||||
fi
|
||||
|
||||
# FIXME(ja): checkout specified version (should works for branches and tags)
|
||||
|
||||
pushd $CHECKOUT
|
||||
# checkout the proper branch/tag
|
||||
git checkout $3
|
||||
# force our local version to be the same as the remote version
|
||||
git reset --hard origin/$3
|
||||
popd
|
||||
|
||||
# give ownership to the stack user
|
||||
chroot $CHROOTCACHE/natty-stack/ chown -R stack $2
|
||||
}
|
||||
|
||||
git_clone $NOVA_REPO $DEST/nova $NOVA_BRANCH
|
||||
git_clone $GLANCE_REPO $DEST/glance $GLANCE_BRANCH
|
||||
git_clone $KEYSTONE_REPO $DEST/keystone $KEYSTONE_BRANCH
|
||||
git_clone $NOVNC_REPO $DEST/novnc $NOVNC_BRANCH
|
||||
git_clone $DASH_REPO $DEST/dash $DASH_BRANCH
|
||||
git_clone $NOVACLIENT_REPO $DEST/python-novaclient $NOVACLIENT_BRANCH
|
||||
git_clone $OPENSTACKX_REPO $DEST/openstackx $OPENSTACKX_BRANCH
|
||||
|
||||
# Use this version of devstack?
|
||||
if [ "$USE_CURRENT_DEVSTACK" = "1" ]; then
|
||||
rm -rf $CHROOTCACHE/natty-stack/$DEST/devstack
|
||||
cp -pr $CWD $CHROOTCACHE/natty-stack/$DEST/devstack
|
||||
fi
|
||||
|
||||
# Configure host network for DHCP
|
||||
mkdir -p $CHROOTCACHE/natty-stack/etc/network
|
||||
cat > $CHROOTCACHE/natty-stack/etc/network/interfaces <<EOF
|
||||
auto lo
|
||||
iface lo inet loopback
|
||||
|
||||
auto eth0
|
||||
iface eth0 inet dhcp
|
||||
EOF
|
||||
|
||||
# build a new image
|
||||
BASE=$CHROOTCACHE/build.$$
|
||||
IMG=$BASE.img
|
||||
MNT=$BASE/
|
||||
|
||||
# (quickly) create a 2GB blank filesystem
|
||||
dd bs=1 count=1 seek=$((2*1024*1024*1024)) if=/dev/zero of=$IMG
|
||||
# force it to be initialized as ext2
|
||||
mkfs.ext2 -F $IMG
|
||||
|
||||
# mount blank image loopback and load it
|
||||
mkdir -p $MNT
|
||||
mount -o loop $IMG $MNT
|
||||
rsync -azH $CHROOTCACHE/natty-stack/ $MNT
|
||||
|
||||
# umount and cleanup
|
||||
umount $MNT
|
||||
rmdir $MNT
|
||||
|
||||
# gzip into final location
|
||||
gzip -1 $IMG -c > $1
|
||||
|
@ -0,0 +1,82 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# **exercise.sh** - using the cloud can be fun
|
||||
|
||||
# we will use the ``nova`` cli tool provided by the ``python-novaclient``
|
||||
# package
|
||||
#
|
||||
|
||||
|
||||
# This script exits on an error so that errors don't compound and you see
|
||||
# only the first error that occured.
|
||||
set -o errexit
|
||||
|
||||
# Print the commands being run so that we can see the command that triggers
|
||||
# an error. It is also useful for following allowing as the install occurs.
|
||||
set -o xtrace
|
||||
|
||||
|
||||
# Settings
|
||||
# ========
|
||||
|
||||
HOST=${HOST:-localhost}
|
||||
|
||||
# Nova original used project_id as the *account* that owned resources (servers,
|
||||
# ip address, ...) With the addition of Keystone we have standardized on the
|
||||
# term **tenant** as the entity that owns the resources. **novaclient** still
|
||||
# uses the old deprecated terms project_id. Note that this field should now be
|
||||
# set to tenant_name, not tenant_id.
|
||||
export NOVA_PROJECT_ID=${TENANT:-demo}
|
||||
|
||||
# In addition to the owning entity (tenant), nova stores the entity performing
|
||||
# the action as the **user**.
|
||||
export NOVA_USERNAME=${USERNAME:-demo}
|
||||
|
||||
# With Keystone you pass the keystone password instead of an api key.
|
||||
export NOVA_API_KEY=${PASSWORD:-secrete}
|
||||
|
||||
# With the addition of Keystone, to use an openstack cloud you should
|
||||
# authenticate against keystone, which returns a **Token** and **Service
|
||||
# Catalog**. The catalog contains the endpoint for all services the user/tenant
|
||||
# has access to - including nova, glance, keystone, swift, ... We currently
|
||||
# recommend using the 2.0 *auth api*.
|
||||
#
|
||||
# *NOTE*: Using the 2.0 *auth api* does mean that compute api is 2.0. We will
|
||||
# use the 1.1 *compute api*
|
||||
export NOVA_URL=${NOVA_URL:-http://$HOST:5000/v2.0/}
|
||||
|
||||
# Currently novaclient needs you to specify the *compute api* version. This
|
||||
# needs to match the config of your catalog returned by Keystone.
|
||||
export NOVA_VERSION=1.1
|
||||
|
||||
# FIXME - why does this need to be specified?
|
||||
export NOVA_REGION_NAME=RegionOne
|
||||
|
||||
|
||||
# Get a token for clients that don't support service catalog
|
||||
# ==========================================================
|
||||
SERVICE_TOKEN=`curl -s -d "{\"auth\":{\"passwordCredentials\": {\"username\": \"$NOVA_PROJECT_ID\", \"password\": \"$NOVA_API_KEY\"}}}" -H "Content-type: application/json" http://$HOST:5000/v2.0/tokens | python -c "import sys; import json; tok = json.loads(sys.stdin.read()); print tok['access']['token']['id'];"`
|
||||
|
||||
# Launching a server
|
||||
# ==================
|
||||
|
||||
# List servers for tenant:
|
||||
nova list
|
||||
|
||||
# List of flavors:
|
||||
nova flavor-list
|
||||
|
||||
# Images
|
||||
# ------
|
||||
|
||||
# Nova has a **deprecated** way of listing images.
|
||||
nova image-list
|
||||
|
||||
# But we recommend using glance directly
|
||||
glance -A $SERVICE_TOKEN index
|
||||
|
||||
# show details of the active servers::
|
||||
#
|
||||
# nova show 1234
|
||||
#
|
||||
nova list | grep ACTIVE | cut -d \| -f2 | xargs -n1 nova show
|
@ -0,0 +1,27 @@
|
||||
<VirtualHost *:80>
|
||||
WSGIScriptAlias / %DASH_DIR%/openstack-dashboard/dashboard/wsgi/django.wsgi
|
||||
WSGIDaemonProcess dashboard user=%USER% group=%USER% processes=3 threads=10
|
||||
SetEnv APACHE_RUN_USER %USER%
|
||||
SetEnv APACHE_RUN_GROUP %USER%
|
||||
WSGIProcessGroup dashboard
|
||||
|
||||
DocumentRoot %DASH_DIR%/.blackhole/
|
||||
Alias /media %DASH_DIR%/openstack-dashboard/media
|
||||
|
||||
<Directory />
|
||||
Options FollowSymLinks
|
||||
AllowOverride None
|
||||
</Directory>
|
||||
|
||||
<Directory %DASH_DIR%/>
|
||||
Options Indexes FollowSymLinks MultiViews
|
||||
AllowOverride None
|
||||
Order allow,deny
|
||||
allow from all
|
||||
</Directory>
|
||||
|
||||
ErrorLog /var/log/apache2/error.log
|
||||
LogLevel warn
|
||||
CustomLog /var/log/apache2/access.log combined
|
||||
</VirtualHost>
|
||||
|
@ -0,0 +1,4 @@
|
||||
apache2
|
||||
libapache2-mod-wsgi
|
||||
python-dateutil
|
||||
python-anyjson
|
@ -0,0 +1,14 @@
|
||||
pep8
|
||||
pylint
|
||||
python-pip
|
||||
screen
|
||||
unzip
|
||||
wget
|
||||
psmisc
|
||||
git-core
|
||||
lsof # useful when debugging
|
||||
openssh-server
|
||||
vim-nox
|
||||
locate # useful when debugging
|
||||
python-virtualenv
|
||||
python-unittest2
|
@ -0,0 +1,8 @@
|
||||
python-eventlet
|
||||
python-routes
|
||||
python-greenlet
|
||||
python-argparse
|
||||
python-sqlalchemy
|
||||
python-wsgiref
|
||||
python-pastedeploy
|
||||
python-xattr
|
@ -0,0 +1,15 @@
|
||||
python-setuptools
|
||||
python-dev
|
||||
python-lxml
|
||||
python-pastescript
|
||||
python-pastedeploy
|
||||
python-paste
|
||||
sqlite3
|
||||
python-pysqlite2
|
||||
python-sqlalchemy
|
||||
python-webob
|
||||
python-greenlet
|
||||
python-routes
|
||||
libldap2-dev
|
||||
libsasl2-dev
|
||||
|
@ -0,0 +1,35 @@
|
||||
dnsmasq-base
|
||||
kpartx
|
||||
mysql-server
|
||||
python-mysqldb
|
||||
kvm
|
||||
gawk
|
||||
iptables
|
||||
ebtables
|
||||
sqlite3
|
||||
sudo
|
||||
kvm
|
||||
libvirt-bin
|
||||
vlan
|
||||
curl
|
||||
rabbitmq-server
|
||||
socat # used by ajaxterm
|
||||
python-mox
|
||||
python-paste
|
||||
python-migrate
|
||||
python-gflags
|
||||
python-greenlet
|
||||
python-libvirt
|
||||
python-libxml2
|
||||
python-routes
|
||||
python-netaddr
|
||||
python-pastedeploy
|
||||
python-eventlet
|
||||
python-cheetah
|
||||
python-carrot
|
||||
python-tempita
|
||||
python-sqlalchemy
|
||||
python-suds
|
||||
python-lockfile
|
||||
python-m2crypto
|
||||
python-boto
|
@ -0,0 +1 @@
|
||||
python-numpy
|
@ -0,0 +1,18 @@
|
||||
# a collection of packages that speed up installation as they are dependencies
|
||||
# of packages we can't install during bootstraping (rabbitmq-server,
|
||||
# mysql-server, libvirt-bin)
|
||||
#
|
||||
# NOTE: only add packages to this file that aren't needed directly
|
||||
mysql-common
|
||||
mysql-client-5.1
|
||||
erlang-base
|
||||
erlang-ssl
|
||||
erlang-nox
|
||||
erlang-inets
|
||||
erlang-mnesia
|
||||
libhtml-template-perl
|
||||
gettext-base
|
||||
libavahi-client3
|
||||
libxml2-utils
|
||||
libpciaccess0
|
||||
libparted0debian1
|
@ -0,0 +1,98 @@
|
||||
import os
|
||||
|
||||
DEBUG = True
|
||||
TEMPLATE_DEBUG = DEBUG
|
||||
PROD = False
|
||||
USE_SSL = False
|
||||
|
||||
LOCAL_PATH = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
# FIXME: We need to change this to mysql, instead of sqlite.
|
||||
DATABASES = {
|
||||
'default': {
|
||||
'ENGINE': 'django.db.backends.sqlite3',
|
||||
'NAME': os.path.join(LOCAL_PATH, 'dashboard_openstack.sqlite3'),
|
||||
},
|
||||
}
|
||||
|
||||
CACHE_BACKEND = 'dummy://'
|
||||
|
||||
# Add apps to dash installation.
|
||||
INSTALLED_APPS = (
|
||||
'dashboard',
|
||||
'django.contrib.contenttypes',
|
||||
'django.contrib.sessions',
|
||||
'django.contrib.messages',
|
||||
'django.contrib.staticfiles',
|
||||
'django_openstack',
|
||||
'django_openstack.templatetags',
|
||||
'mailer',
|
||||
)
|
||||
|
||||
|
||||
# Send email to the console by default
|
||||
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
|
||||
# Or send them to /dev/null
|
||||
#EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
|
||||
|
||||
# django-mailer uses a different settings attribute
|
||||
MAILER_EMAIL_BACKEND = EMAIL_BACKEND
|
||||
|
||||
# Configure these for your outgoing email host
|
||||
# EMAIL_HOST = 'smtp.my-company.com'
|
||||
# EMAIL_PORT = 25
|
||||
# EMAIL_HOST_USER = 'djangomail'
|
||||
# EMAIL_HOST_PASSWORD = 'top-secret!'
|
||||
|
||||
# FIXME: This needs to be changed to allow for multi-node setup.
|
||||
OPENSTACK_KEYSTONE_URL = "http://localhost:5000/v2.0/"
|
||||
OPENSTACK_KEYSTONE_ADMIN_URL = "http://localhost:35357/v2.0"
|
||||
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "Member"
|
||||
|
||||
# NOTE(tres): Available services should come from the service
|
||||
# catalog in Keystone.
|
||||
SWIFT_ENABLED = False
|
||||
|
||||
# Configure quantum connection details for networking
|
||||
QUANTUM_ENABLED = False
|
||||
QUANTUM_URL = '127.0.0.1'
|
||||
QUANTUM_PORT = '9696'
|
||||
QUANTUM_TENANT = '1234'
|
||||
QUANTUM_CLIENT_VERSION='0.1'
|
||||
|
||||
# No monitoring links currently
|
||||
EXTERNAL_MONITORING = []
|
||||
|
||||
# Uncomment the following segment to silence most logging
|
||||
# django.db and boto DEBUG logging is extremely verbose.
|
||||
#LOGGING = {
|
||||
# 'version': 1,
|
||||
# # set to True will disable all logging except that specified, unless
|
||||
# # nothing is specified except that django.db.backends will still log,
|
||||
# # even when set to True, so disable explicitly
|
||||
# 'disable_existing_loggers': False,
|
||||
# 'handlers': {
|
||||
# 'null': {
|
||||
# 'level': 'DEBUG',
|
||||
# 'class': 'django.utils.log.NullHandler',
|
||||
# },
|
||||
# 'console': {
|
||||
# 'level': 'DEBUG',
|
||||
# 'class': 'logging.StreamHandler',
|
||||
# },
|
||||
# },
|
||||
# 'loggers': {
|
||||
# # Comment or Uncomment these to turn on/off logging output
|
||||
# 'django.db.backends': {
|
||||
# 'handlers': ['null'],
|
||||
# 'propagate': False,
|
||||
# },
|
||||
# 'django_openstack': {
|
||||
# 'handlers': ['null'],
|
||||
# 'propagate': False,
|
||||
# },
|
||||
# }
|
||||
#}
|
||||
|
||||
# How much ram on each compute host?
|
||||
COMPUTE_HOST_RAM_GB = 16
|
@ -0,0 +1,178 @@
|
||||
[DEFAULT]
|
||||
# Show more verbose log output (sets INFO log level output)
|
||||
verbose = True
|
||||
|
||||
# Show debugging output in logs (sets DEBUG log level output)
|
||||
debug = True
|
||||
|
||||
# Which backend store should Glance use by default is not specified
|
||||
# in a request to add a new image to Glance? Default: 'file'
|
||||
# Available choices are 'file', 'swift', and 's3'
|
||||
default_store = file
|
||||
|
||||
# Address to bind the API server
|
||||
bind_host = 0.0.0.0
|
||||
|
||||
# Port the bind the API server to
|
||||
bind_port = 9292
|
||||
|
||||
# Address to find the registry server
|
||||
registry_host = 0.0.0.0
|
||||
|
||||
# Port the registry server is listening on
|
||||
registry_port = 9191
|
||||
|
||||
# Log to this file. Make sure you do not set the same log
|
||||
# file for both the API and registry servers!
|
||||
log_file = %DEST%/glance/api.log
|
||||
|
||||
# Send logs to syslog (/dev/log) instead of to file specified by `log_file`
|
||||
use_syslog = False
|
||||
|
||||
# ============ Notification System Options =====================
|
||||
|
||||
# Notifications can be sent when images are create, updated or deleted.
|
||||
# There are three methods of sending notifications, logging (via the
|
||||
# log_file directive), rabbit (via a rabbitmq queue) or noop (no
|
||||
# notifications sent, the default)
|
||||
notifier_strategy = noop
|
||||
|
||||
# Configuration options if sending notifications via rabbitmq (these are
|
||||
# the defaults)
|
||||
rabbit_host = localhost
|
||||
rabbit_port = 5672
|
||||
rabbit_use_ssl = false
|
||||
rabbit_userid = guest
|
||||
rabbit_password = guest
|
||||
rabbit_virtual_host = /
|
||||
rabbit_notification_topic = glance_notifications
|
||||
|
||||
# ============ Filesystem Store Options ========================
|
||||
|
||||
# Directory that the Filesystem backend store
|
||||
# writes image data to
|
||||
filesystem_store_datadir = %DEST%/glance/images/
|
||||
|
||||
# ============ Swift Store Options =============================
|
||||
|
||||
# Address where the Swift authentication service lives
|
||||
swift_store_auth_address = 127.0.0.1:8080/v1.0/
|
||||
|
||||
# User to authenticate against the Swift authentication service
|
||||
swift_store_user = jdoe
|
||||
|
||||
# Auth key for the user authenticating against the
|
||||
# Swift authentication service
|
||||
swift_store_key = a86850deb2742ec3cb41518e26aa2d89
|
||||
|
||||
# Container within the account that the account should use
|
||||
# for storing images in Swift
|
||||
swift_store_container = glance
|
||||
|
||||
# Do we create the container if it does not exist?
|
||||
swift_store_create_container_on_put = False
|
||||
|
||||
# What size, in MB, should Glance start chunking image files
|
||||
# and do a large object manifest in Swift? By default, this is
|
||||
# the maximum object size in Swift, which is 5GB
|
||||
swift_store_large_object_size = 5120
|
||||
|
||||
# When doing a large object manifest, what size, in MB, should
|
||||
# Glance write chunks to Swift? This amount of data is written
|
||||
# to a temporary disk buffer during the process of chunking
|
||||
# the image file, and the default is 200MB
|
||||
swift_store_large_object_chunk_size = 200
|
||||
|
||||
# Whether to use ServiceNET to communicate with the Swift storage servers.
|
||||
# (If you aren't RACKSPACE, leave this False!)
|
||||
#
|
||||
# To use ServiceNET for authentication, prefix hostname of
|
||||
# `swift_store_auth_address` with 'snet-'.
|
||||
# Ex. https://example.com/v1.0/ -> https://snet-example.com/v1.0/
|
||||
swift_enable_snet = False
|
||||
|
||||
# ============ S3 Store Options =============================
|
||||
|
||||
# Address where the S3 authentication service lives
|
||||
s3_store_host = 127.0.0.1:8080/v1.0/
|
||||
|
||||
# User to authenticate against the S3 authentication service
|
||||
s3_store_access_key = <20-char AWS access key>
|
||||
|
||||
# Auth key for the user authenticating against the
|
||||
# S3 authentication service
|
||||
s3_store_secret_key = <40-char AWS secret key>
|
||||
|
||||
# Container within the account that the account should use
|
||||
# for storing images in S3. Note that S3 has a flat namespace,
|
||||
# so you need a unique bucket name for your glance images. An
|
||||
# easy way to do this is append your AWS access key to "glance".
|
||||
# S3 buckets in AWS *must* be lowercased, so remember to lowercase
|
||||
# your AWS access key if you use it in your bucket name below!
|
||||
s3_store_bucket = <lowercased 20-char aws access key>glance
|
||||
|
||||
# Do we create the bucket if it does not exist?
|
||||
s3_store_create_bucket_on_put = False
|
||||
|
||||
# ============ Image Cache Options ========================
|
||||
|
||||
image_cache_enabled = False
|
||||
|
||||
# Directory that the Image Cache writes data to
|
||||
# Make sure this is also set in glance-pruner.conf
|
||||
image_cache_datadir = /var/lib/glance/image-cache/
|
||||
|
||||
# Number of seconds after which we should consider an incomplete image to be
|
||||
# stalled and eligible for reaping
|
||||
image_cache_stall_timeout = 86400
|
||||
|
||||
# ============ Delayed Delete Options =============================
|
||||
|
||||
# Turn on/off delayed delete
|
||||
delayed_delete = False
|
||||
|
||||
# Delayed delete time in seconds
|
||||
scrub_time = 43200
|
||||
|
||||
# Directory that the scrubber will use to remind itself of what to delete
|
||||
# Make sure this is also set in glance-scrubber.conf
|
||||
scrubber_datadir = /var/lib/glance/scrubber
|
||||
|
||||
[pipeline:glance-api]
|
||||
#pipeline = versionnegotiation context apiv1app
|
||||
# NOTE: use the following pipeline for keystone
|
||||
pipeline = versionnegotiation authtoken context apiv1app
|
||||
|
||||
# To enable Image Cache Management API replace pipeline with below:
|
||||
# pipeline = versionnegotiation context imagecache apiv1app
|
||||
# NOTE: use the following pipeline for keystone auth (with caching)
|
||||
# pipeline = versionnegotiation authtoken context imagecache apiv1app
|
||||
|
||||
[pipeline:versions]
|
||||
pipeline = versionsapp
|
||||
|
||||
[app:versionsapp]
|
||||
paste.app_factory = glance.api.versions:app_factory
|
||||
|
||||
[app:apiv1app]
|
||||
paste.app_factory = glance.api.v1:app_factory
|
||||
|
||||
[filter:versionnegotiation]
|
||||
paste.filter_factory = glance.api.middleware.version_negotiation:filter_factory
|
||||
|
||||
[filter:imagecache]
|
||||
paste.filter_factory = glance.api.middleware.image_cache:filter_factory
|
||||
|
||||
[filter:context]
|
||||
paste.filter_factory = glance.common.context:filter_factory
|
||||
|
||||
[filter:authtoken]
|
||||
paste.filter_factory = keystone.middleware.auth_token:filter_factory
|
||||
service_protocol = http
|
||||
service_host = 127.0.0.1
|
||||
service_port = 5000
|
||||
auth_host = 127.0.0.1
|
||||
auth_port = 35357
|
||||
auth_protocol = http
|
||||
auth_uri = http://127.0.0.1:5000/
|
||||
admin_token = %SERVICE_TOKEN%
|
@ -0,0 +1,70 @@
|
||||
[DEFAULT]
|
||||
# Show more verbose log output (sets INFO log level output)
|
||||
verbose = True
|
||||
|
||||
# Show debugging output in logs (sets DEBUG log level output)
|
||||
debug = True
|
||||
|
||||
# Address to bind the registry server
|
||||
bind_host = 0.0.0.0
|
||||
|
||||
# Port the bind the registry server to
|
||||
bind_port = 9191
|
||||
|
||||
# Log to this file. Make sure you do not set the same log
|
||||
# file for both the API and registry servers!
|
||||
log_file = %DEST%/glance/registry.log
|
||||
|
||||
# Where to store images
|
||||
filesystem_store_datadir = %DEST%/glance/images
|
||||
|
||||
# Send logs to syslog (/dev/log) instead of to file specified by `log_file`
|
||||
use_syslog = False
|
||||
|
||||
# SQLAlchemy connection string for the reference implementation
|
||||
# registry server. Any valid SQLAlchemy connection string is fine.
|
||||
# See: http://www.sqlalchemy.org/docs/05/reference/sqlalchemy/connections.html#sqlalchemy.create_engine
|
||||
sql_connection = %SQL_CONN%
|
||||
|
||||
# Period in seconds after which SQLAlchemy should reestablish its connection
|
||||
# to the database.
|
||||
#
|
||||
# MySQL uses a default `wait_timeout` of 8 hours, after which it will drop
|
||||
# idle connections. This can result in 'MySQL Gone Away' exceptions. If you
|
||||
# notice this, you can lower this value to ensure that SQLAlchemy reconnects
|
||||
# before MySQL can drop the connection.
|
||||
sql_idle_timeout = 3600
|
||||
|
||||
# Limit the api to return `param_limit_max` items in a call to a container. If
|
||||
# a larger `limit` query param is provided, it will be reduced to this value.
|
||||
api_limit_max = 1000
|
||||
|
||||
# If a `limit` query param is not provided in an api request, it will
|
||||
# default to `limit_param_default`
|
||||
limit_param_default = 25
|
||||
|
||||
[pipeline:glance-registry]
|
||||
#pipeline = context registryapp
|
||||
# NOTE: use the following pipeline for keystone
|
||||
pipeline = authtoken keystone_shim context registryapp
|
||||
|
||||
[app:registryapp]
|
||||
paste.app_factory = glance.registry.server:app_factory
|
||||
|
||||
[filter:context]
|
||||
context_class = glance.registry.context.RequestContext
|
||||
paste.filter_factory = glance.common.context:filter_factory
|
||||
|
||||
[filter:authtoken]
|
||||
paste.filter_factory = keystone.middleware.auth_token:filter_factory
|
||||
service_protocol = http
|
||||
service_host = 127.0.0.1
|
||||
service_port = 5000
|
||||
auth_host = 127.0.0.1
|
||||
auth_port = 35357
|
||||
auth_protocol = http
|
||||
auth_uri = http://127.0.0.1:5000/
|
||||
admin_token = %SERVICE_TOKEN%
|
||||
|
||||
[filter:keystone_shim]
|
||||
paste.filter_factory = keystone.middleware.glance_auth_token:filter_factory
|
@ -0,0 +1,86 @@
|
||||
[DEFAULT]
|
||||
# Show more verbose log output (sets INFO log level output)
|
||||
verbose = False
|
||||
|
||||
# Show debugging output in logs (sets DEBUG log level output)
|
||||
debug = False
|
||||
|
||||
# Which backend store should Keystone use by default.
|
||||
# Default: 'sqlite'
|
||||
# Available choices are 'sqlite' [future will include LDAP, PAM, etc]
|
||||
default_store = sqlite
|
||||
|
||||
# Log to this file. Make sure you do not set the same log
|
||||
# file for both the API and registry servers!
|
||||
log_file = %DEST%/keystone/keystone.log
|
||||
|
||||
# List of backends to be configured
|
||||
backends = keystone.backends.sqlalchemy
|
||||
#For LDAP support, add: ,keystone.backends.ldap
|
||||
|
||||
# Dictionary Maps every service to a header.Missing services would get header
|
||||
# X_(SERVICE_NAME) Key => Service Name, Value => Header Name
|
||||
service-header-mappings = {
|
||||
'nova' : 'X-Server-Management-Url',
|
||||
'swift' : 'X-Storage-Url',
|
||||
'cdn' : 'X-CDN-Management-Url'}
|
||||
|
||||
# Address to bind the API server
|
||||
# TODO Properties defined within app not available via pipeline.
|
||||
service_host = 0.0.0.0
|
||||
|
||||
# Port the bind the API server to
|
||||
service_port = 5000
|
||||
|
||||
# Address to bind the Admin API server
|
||||
admin_host = 0.0.0.0
|
||||
|
||||
# Port the bind the Admin API server to
|
||||
admin_port = 35357
|
||||
|
||||
#Role that allows to perform admin operations.
|
||||
keystone-admin-role = KeystoneAdmin
|
||||
|
||||
#Role that allows to perform service admin operations.
|
||||
keystone-service-admin-role = KeystoneServiceAdmin
|
||||
|
||||
[keystone.backends.sqlalchemy]
|
||||
# SQLAlchemy connection string for the reference implementation registry
|
||||
# server. Any valid SQLAlchemy connection string is fine.
|
||||
# See: http://bit.ly/ideIpI
|
||||
#sql_connection = sqlite:///keystone.db
|
||||
sql_connection = %SQL_CONN%
|
||||
backend_entities = ['UserRoleAssociation', 'Endpoints', 'Role', 'Tenant',
|
||||
'User', 'Credentials', 'EndpointTemplates', 'Token',
|
||||
'Service']
|
||||
|
||||
# Period in seconds after which SQLAlchemy should reestablish its connection
|
||||
# to the database.
|
||||
sql_idle_timeout = 30
|
||||
|
||||
[pipeline:admin]
|
||||
pipeline =
|
||||
urlrewritefilter
|
||||
admin_api
|
||||
|
||||
[pipeline:keystone-legacy-auth]
|
||||
pipeline =
|
||||
urlrewritefilter
|
||||
legacy_auth
|
||||
RAX-KEY-extension
|
||||
service_api
|
||||
|
||||
[app:service_api]
|
||||
paste.app_factory = keystone.server:service_app_factory
|
||||
|
||||
[app:admin_api]
|
||||
paste.app_factory = keystone.server:admin_app_factory
|
||||
|
||||
[filter:urlrewritefilter]
|
||||
paste.filter_factory = keystone.middleware.url:filter_factory
|
||||
|
||||
[filter:legacy_auth]
|
||||
paste.filter_factory = keystone.frontends.legacy_token_auth:filter_factory
|
||||
|
||||
[filter:RAX-KEY-extension]
|
||||
paste.filter_factory = keystone.contrib.extensions.service.raxkey.frontend:filter_factory
|
@ -0,0 +1,43 @@
|
||||
#!/bin/bash
|
||||
BIN_DIR=${BIN_DIR:-.}
|
||||
# Tenants
|
||||
$BIN_DIR/keystone-manage $* tenant add admin
|
||||
$BIN_DIR/keystone-manage $* tenant add demo
|
||||
$BIN_DIR/keystone-manage $* tenant add invisible_to_admin
|
||||
|
||||
# Users
|
||||
$BIN_DIR/keystone-manage $* user add admin %ADMIN_PASSWORD%
|
||||
$BIN_DIR/keystone-manage $* user add demo %ADMIN_PASSWORD%
|
||||
|
||||
# Roles
|
||||
$BIN_DIR/keystone-manage $* role add Admin
|
||||
$BIN_DIR/keystone-manage $* role add Member
|
||||
$BIN_DIR/keystone-manage $* role add KeystoneAdmin
|
||||
$BIN_DIR/keystone-manage $* role add KeystoneServiceAdmin
|
||||
$BIN_DIR/keystone-manage $* role grant Admin admin admin
|
||||
$BIN_DIR/keystone-manage $* role grant Member demo demo
|
||||
$BIN_DIR/keystone-manage $* role grant Member demo invisible_to_admin
|
||||
$BIN_DIR/keystone-manage $* role grant Admin admin demo
|
||||
$BIN_DIR/keystone-manage $* role grant Admin admin
|
||||
$BIN_DIR/keystone-manage $* role grant KeystoneAdmin admin
|
||||
$BIN_DIR/keystone-manage $* role grant KeystoneServiceAdmin admin
|
||||
|
||||
# Services
|
||||
$BIN_DIR/keystone-manage $* service add nova compute "Nova Compute Service"
|
||||
$BIN_DIR/keystone-manage $* service add glance image "Glance Image Service"
|
||||
$BIN_DIR/keystone-manage $* service add keystone identity "Keystone Identity Service"
|
||||
|
||||
#endpointTemplates
|
||||
$BIN_DIR/keystone-manage $* endpointTemplates add RegionOne nova http://%HOST_IP%:8774/v1.1/%tenant_id% http://%HOST_IP%:8774/v1.1/%tenant_id% http://%HOST_IP%:8774/v1.1/%tenant_id% 1 1
|
||||
$BIN_DIR/keystone-manage $* endpointTemplates add RegionOne glance http://%HOST_IP%:9292/v1.1/%tenant_id% http://%HOST_IP%:9292/v1.1/%tenant_id% http://%HOST_IP%:9292/v1.1/%tenant_id% 1 1
|
||||
$BIN_DIR/keystone-manage $* endpointTemplates add RegionOne keystone http://%HOST_IP%:5000/v2.0 http://%HOST_IP%:35357/v2.0 http://%HOST_IP%:5000/v2.0 1 1
|
||||
# $BIN_DIR/keystone-manage $* endpointTemplates add RegionOne swift http://%HOST_IP%:8080/v1/AUTH_%tenant_id% http://%HOST_IP%:8080/ http://%HOST_IP%:8080/v1/AUTH_%tenant_id% 1 1
|
||||
|
||||
# Tokens
|
||||
$BIN_DIR/keystone-manage $* token add %SERVICE_TOKEN% admin admin 2015-02-05T00:00
|
||||
|
||||
# EC2 related creds - note we are setting the token to user_password
|
||||
# but keystone doesn't parse them - it is just a blob from keystone's
|
||||
# point of view
|
||||
$BIN_DIR/keystone-manage $* credentials add admin EC2 'admin_%ADMIN_PASSWORD%' admin admin || echo "no support for adding credentials"
|
||||
$BIN_DIR/keystone-manage $* credentials add demo EC2 'demo_%ADMIN_PASSWORD%' demo demo || echo "no support for adding credentials"
|
@ -0,0 +1,21 @@
|
||||
nose==1.0.0
|
||||
Django==1.3
|
||||
django-nose==0.1.2
|
||||
django-mailer
|
||||
django-registration==0.7
|
||||
kombu
|
||||
python-cloudfiles
|
||||
python-dateutil
|
||||
routes
|
||||
webob
|
||||
sqlalchemy
|
||||
paste
|
||||
PasteDeploy
|
||||
sqlalchemy-migrate
|
||||
eventlet
|
||||
xattr
|
||||
pep8
|
||||
pylint
|
||||
|
||||
-e git+https://github.com/jacobian/openstack.compute.git#egg=openstack
|
||||
|
@ -0,0 +1 @@
|
||||
PassLib
|
@ -0,0 +1,9 @@
|
||||
hardstatus on
|
||||
hardstatus alwayslastline
|
||||
hardstatus string "%{.bW}%-w%{.rW}%n %t%{-}%+w %=%{..G}%H %{..Y}%d/%m %c"
|
||||
|
||||
defscrollback 1024
|
||||
|
||||
vbell off
|
||||
startup_message off
|
||||
|
@ -0,0 +1,9 @@
|
||||
deb http://us.archive.ubuntu.com/ubuntu/ natty main restricted
|
||||
deb http://us.archive.ubuntu.com/ubuntu/ natty-updates main restricted
|
||||
deb http://us.archive.ubuntu.com/ubuntu/ natty universe
|
||||
deb http://us.archive.ubuntu.com/ubuntu/ natty-updates universe
|
||||
deb http://us.archive.ubuntu.com/ubuntu/ natty multiverse
|
||||
deb http://us.archive.ubuntu.com/ubuntu/ natty-updates multiverse
|
||||
deb http://security.ubuntu.com/ubuntu natty-security main restricted
|
||||
deb http://security.ubuntu.com/ubuntu natty-security universe
|
||||
deb http://security.ubuntu.com/ubuntu natty-security multiverse
|
@ -0,0 +1 @@
|
||||
socat
|
@ -0,0 +1,93 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Print some usage info
|
||||
function usage {
|
||||
echo "Usage: $0 [OPTION] [host_ip]"
|
||||
echo "Set up temporary networking for LXC"
|
||||
echo ""
|
||||
echo " -n, --dry-run Just print the commands that would execute."
|
||||
echo " -h, --help Print this usage message."
|
||||
echo ""
|
||||
exit
|
||||
}
|
||||
|
||||
# Allow passing the ip address on the command line.
|
||||
function process_option {
|
||||
case "$1" in
|
||||
-h|--help) usage;;
|
||||
-n|--dry-run) dry_run=1;;
|
||||
*) host_ip="$1"
|
||||
esac
|
||||
}
|
||||
|
||||
# Set up some defaults
|
||||
host_ip=
|
||||
dry_run=0
|
||||
bridge=br0
|
||||
DRIER=
|
||||
|
||||
# Process the args
|
||||
for arg in "$@"; do
|
||||
process_option $arg
|
||||
done
|
||||
|
||||
if [ $dry_run ]; then
|
||||
DRIER=echo
|
||||
fi
|
||||
|
||||
if [ "$UID" -ne "0" ]; then
|
||||
echo "This script must be run with root privileges."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check for bridge-utils.
|
||||
BRCTL=`which brctl`
|
||||
if [ ! -x "$BRCTL" ]; then
|
||||
echo "This script requires you to install bridge-utils."
|
||||
echo "Try: sudo apt-get install bridge-utils."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Scare off the nubs.
|
||||
echo "====================================================="
|
||||
echo
|
||||
echo "WARNING"
|
||||
echo
|
||||
echo "This script will modify your current network setup,"
|
||||
echo "this can be a scary thing and it is recommended that"
|
||||
echo "you have something equivalent to physical access to"
|
||||
echo "this machine before continuing in case your network"
|
||||
echo "gets all funky."
|
||||
echo
|
||||
echo "If you don't want to continue, hit CTRL-C now."
|
||||
|
||||
if [ -z "$host_ip" ];
|
||||
then
|
||||
echo "Otherwise, please type in your host's ip address and"
|
||||
echo "hit enter."
|
||||
echo
|
||||
echo "====================================================="
|
||||
read host_ip
|
||||
else
|
||||
echo "Otherwise hit enter."
|
||||
echo
|
||||
echo "====================================================="
|
||||
read accept
|
||||
fi
|
||||
|
||||
|
||||
# Add a bridge interface, this will choke if there is already
|
||||
# a bridge named $bridge
|
||||
$DRIER $BRCTL addbr $bridge
|
||||
$DRIER ip addr add 192.168.1.1/24 dev $bridge
|
||||
if [ $dry_run ]; then
|
||||
echo "echo 1 > /proc/sys/net/ipv4/ip_forward"
|
||||
else
|
||||
echo 1 > /proc/sys/net/ipv4/ip_forward
|
||||
fi
|
||||
$DRIER ifconfig $bridge up
|
||||
|
||||
# Set up the NAT for the instances
|
||||
$DRIER iptables -t nat -A POSTROUTING -s 192.168.1.0/24 -j SNAT --to-source $host_ip
|
||||
$DRIER iptables -I FORWARD -s 192.168.1.0/24 -j ACCEPT
|
||||
|
@ -0,0 +1,178 @@
|
||||
#!/bin/bash
|
||||
# make_image.sh - Create Ubuntu images in various formats
|
||||
#
|
||||
# Supported formats: qcow (kvm), vmdk (vmserver), vdi (vbox), vhd (vpc), raw
|
||||
#
|
||||
# Requires sudo to root
|
||||
|
||||
ROOTSIZE=${ROOTSIZE:-8192}
|
||||
SWAPSIZE=${SWAPSIZE:-1024}
|
||||
MIN_PKGS=${MIN_PKGS:-"apt-utils gpgv openssh-server"}
|
||||
|
||||
usage() {
|
||||
echo "Usage: $0 - Create Ubuntu images"
|
||||
echo ""
|
||||
echo "$0 [-m] [-r rootsize] [-s swapsize] release format"
|
||||
echo "$0 -C [-m] release chrootdir"
|
||||
echo "$0 -I [-r rootsize] [-s swapsize] chrootdir format"
|
||||
echo ""
|
||||
echo "-C - Create the initial chroot dir"
|
||||
echo "-I - Create the final image from a chroot"
|
||||
echo "-m - minimal installation"
|
||||
echo "-r size - root fs size in MB"
|
||||
echo "-s size - swap fs size in MB"
|
||||
echo "release - Ubuntu release: jaunty - oneric"
|
||||
echo "format - image format: qcow2, vmdk, vdi, vhd, xen, raw, fs"
|
||||
exit 1
|
||||
}
|
||||
|
||||
while getopts CIhmr:s: c; do
|
||||
case $c in
|
||||
C) CHROOTONLY=1
|
||||
;;
|
||||
I) IMAGEONLY=1
|
||||
;;
|
||||
h) usage
|
||||
;;
|
||||
m) MINIMAL=1
|
||||
;;
|
||||
r) ROOTSIZE=$OPTARG
|
||||
;;
|
||||
s) SWAPSIZE=$OPTARG
|
||||
;;
|
||||
esac
|
||||
done
|
||||
shift `expr $OPTIND - 1`
|
||||
|
||||
if [ ! "$#" -eq "2" -o -n "$CHROOTONLY" -a -n "$IMAGEONLY" ]; then
|
||||
usage
|
||||
fi
|
||||
|
||||
# Default args
|
||||
RELEASE=$1
|
||||
FORMAT=$2
|
||||
CHROOTDIR=""
|
||||
|
||||
if [ -n "$CHROOTONLY" ]; then
|
||||
RELEASE=$1
|
||||
CHROOTDIR=$2
|
||||
FORMAT="pass"
|
||||
fi
|
||||
|
||||
if [ -n "$IMAGEONLY" ]; then
|
||||
CHROOTDIR=$1
|
||||
FORMAT=$2
|
||||
RELEASE="pass"
|
||||
fi
|
||||
|
||||
case $FORMAT in
|
||||
kvm|qcow2) FORMAT=qcow2
|
||||
QFORMAT=qcow2
|
||||
HYPER=kvm
|
||||
;;
|
||||
vmserver|vmdk)
|
||||
FORMAT=vmdk
|
||||
QFORMAT=vmdk
|
||||
HYPER=vmserver
|
||||
;;
|
||||
vbox|vdi) FORMAT=vdi
|
||||
QFORMAT=vdi
|
||||
HYPER=kvm
|
||||
;;
|
||||
vhd|vpc) FORMAT=vhd
|
||||
QFORMAT=vpc
|
||||
HYPER=kvm
|
||||
;;
|
||||
xen) FORMAT=raw
|
||||
QFORMAT=raw
|
||||
HYPER=xen
|
||||
;;
|
||||
raw) FORMAT=raw
|
||||
QFORMAT=raw
|
||||
HYPER=kvm
|
||||
;;
|
||||
pass) ;;
|
||||
*) echo "Unknown format: $FORMAT"
|
||||
usage
|
||||
esac
|
||||
|
||||
case $RELEASE in
|
||||
natty) ;;
|
||||
maverick) ;;
|
||||
lucid) ;;
|
||||
karmic) ;;
|
||||
jaunty) ;;
|
||||
pass) ;;
|
||||
*) echo "Unknown release: $RELEASE"
|
||||
usage
|
||||
;;
|
||||
esac
|
||||
|
||||
# Install stuff if necessary
|
||||
if [ -z `which vmbuilder` ]; then
|
||||
sudo apt-get install ubuntu-vm-builder
|
||||
fi
|
||||
|
||||
if [ -n "$CHROOTONLY" ]; then
|
||||
# Build a chroot directory
|
||||
HYPER=kvm
|
||||
if [ "$MINIMAL" = 1 ]; then
|
||||
ARGS="--variant=minbase"
|
||||
for i in $MIN_PKGS; do
|
||||
ARGS="$ARGS --addpkg=$i"
|
||||
done
|
||||
fi
|
||||
sudo vmbuilder $HYPER ubuntu $ARGS \
|
||||
--suite $RELEASE \
|
||||
--only-chroot \
|
||||
--chroot-dir=$CHROOTDIR \
|
||||
--overwrite \
|
||||
--addpkg=$MIN_PKGS \
|
||||
|
||||
sudo cp -p files/sources.list $CHROOTDIR/etc/apt/sources.list
|
||||
sudo chroot $CHROOTDIR apt-get update
|
||||
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Build the image
|
||||
TMPDIR=tmp
|
||||
TMPDISK=`mktemp imgXXXXXXXX`
|
||||
SIZE=$[$ROOTSIZE+$SWAPSIZE+1]
|
||||
dd if=/dev/null of=$TMPDISK bs=1M seek=$SIZE count=1
|
||||
|
||||
if [ -n "$IMAGEONLY" ]; then
|
||||
# Build image from chroot
|
||||
sudo vmbuilder $HYPER ubuntu $ARGS \
|
||||
--existing-chroot=$CHROOTDIR \
|
||||
--overwrite \
|
||||
--rootsize=$ROOTSIZE \
|
||||
--swapsize=$SWAPSIZE \
|
||||
--tmpfs - \
|
||||
--raw=$TMPDISK \
|
||||
|
||||
else
|
||||
# Do the whole shebang in one pass
|
||||
ARGS="--variant=minbase"
|
||||
for i in $MIN_PKGS; do
|
||||
ARGS="$ARGS --addpkg=$i"
|
||||
done
|
||||
sudo vmbuilder $HYPER ubuntu $ARGS \
|
||||
--suite $RELEASE \
|
||||
--overwrite \
|
||||
--rootsize=$ROOTSIZE \
|
||||
--swapsize=$SWAPSIZE \
|
||||
--tmpfs - \
|
||||
--raw=$TMPDISK \
|
||||
|
||||
fi
|
||||
|
||||
if [ "$FORMAT" = "raw" ]; then
|
||||
# Get image
|
||||
mv $TMPDISK $RELEASE.$FORMAT
|
||||
else
|
||||
# Convert image
|
||||
qemu-img convert -O $QFORMAT $TMPDISK $RELEASE.$FORMAT
|
||||
rm $TMPDISK
|
||||
fi
|
||||
rm -rf ubuntu-$HYPER
|
@ -0,0 +1,33 @@
|
||||
# compute service
|
||||
NOVA_REPO=https://github.com/cloudbuilders/nova.git
|
||||
NOVA_BRANCH=diablo
|
||||
|
||||
# image catalog service
|
||||
GLANCE_REPO=https://github.com/cloudbuilders/glance.git
|
||||
GLANCE_BRANCH=diablo
|
||||
|
||||
# unified auth system (manages accounts/tokens)
|
||||
KEYSTONE_REPO=https://github.com/cloudbuilders/keystone.git
|
||||
KEYSTONE_BRANCH=diablo
|
||||
|
||||
# a websockets/html5 or flash powered VNC console for vm instances
|
||||
NOVNC_REPO=https://github.com/cloudbuilders/noVNC.git
|
||||
NOVNC_BRANCH=master
|
||||
|
||||
# django powered web control panel for openstack
|
||||
DASH_REPO=https://github.com/cloudbuilders/openstack-dashboard.git
|
||||
DASH_BRANCH=master
|
||||
|
||||
# python client library to nova that dashboard (and others) use
|
||||
NOVACLIENT_REPO=https://github.com/cloudbuilders/python-novaclient.git
|
||||
NOVACLIENT_BRANCH=master
|
||||
|
||||
# openstackx is a collection of extensions to openstack.compute & nova
|
||||
# that is *deprecated*. The code is being moved into python-novaclient & nova.
|
||||
OPENSTACKX_REPO=https://github.com/cloudbuilders/openstackx.git
|
||||
OPENSTACKX_BRANCH=diablo
|
||||
|
||||
# allow local overrides of env variables
|
||||
if [ -f ./localrc ]; then
|
||||
source ./localrc
|
||||
fi
|
@ -0,0 +1,154 @@
|
||||
#!/bin/bash
|
||||
# install_openvpn.sh - Install OpenVPN and generate required certificates
|
||||
#
|
||||
# install_openvpn.sh --client name
|
||||
# install_openvpn.sh --server [name]
|
||||
#
|
||||
# name is used on the CN of the generated cert, and the filename of
|
||||
# the configuration, certificate and key files.
|
||||
#
|
||||
# --server mode configures the host with a running OpenVPN server instance
|
||||
# --client mode creates a tarball of a client configuration for this server
|
||||
|
||||
# VPN Config
|
||||
VPN_SERVER=${VPN_SERVER:-`ifconfig eth0 | awk "/inet addr:/ { print \$2 }" | cut -d: -f2`} # 50.56.12.212
|
||||
VPN_PROTO=${VPN_PROTO:-tcp}
|
||||
VPN_PORT=${VPN_PORT:-6081}
|
||||
VPN_DEV=${VPN_DEV:-tun}
|
||||
VPN_CLIENT_NET=${VPN_CLIENT_NET:-172.16.28.0}
|
||||
VPN_CLIENT_MASK=${VPN_CLIENT_MASK:-255.255.255.0}
|
||||
VPN_LOCAL_NET=${VPN_LOCAL_NET:-10.0.0.0}
|
||||
VPN_LOCAL_MASK=${VPN_LOCAL_MASK:-255.255.0.0}
|
||||
|
||||
VPN_DIR=/etc/openvpn
|
||||
CA_DIR=/etc/openvpn/easy-rsa
|
||||
|
||||
usage() {
|
||||
echo "$0 - OpenVPN install and certificate generation"
|
||||
echo ""
|
||||
echo "$0 --client name"
|
||||
echo "$0 --server [name]"
|
||||
echo ""
|
||||
echo " --server mode configures the host with a running OpenVPN server instance"
|
||||
echo " --client mode creates a tarball of a client configuration for this server"
|
||||
exit 1
|
||||
}
|
||||
|
||||
if [ -z $1 ]; then
|
||||
usage
|
||||
fi
|
||||
|
||||
# Install OpenVPN
|
||||
if [ ! -x `which openvpn` ]; then
|
||||
apt-get install -y openvpn bridge-utils
|
||||
fi
|
||||
if [ ! -d $CA_DIR ]; then
|
||||
cp -pR /usr/share/doc/openvpn/examples/easy-rsa/2.0/ $CA_DIR
|
||||
fi
|
||||
|
||||
OPWD=`pwd`
|
||||
cd $CA_DIR
|
||||
source ./vars
|
||||
|
||||
# Override the defaults
|
||||
export KEY_COUNTRY="US"
|
||||
export KEY_PROVINCE="TX"
|
||||
export KEY_CITY="SanAntonio"
|
||||
export KEY_ORG="Cloudbuilders"
|
||||
export KEY_EMAIL="rcb@lists.rackspace.com"
|
||||
|
||||
if [ ! -r $CA_DIR/keys/dh1024.pem ]; then
|
||||
# Initialize a new CA
|
||||
$CA_DIR/clean-all
|
||||
$CA_DIR/build-dh
|
||||
$CA_DIR/pkitool --initca
|
||||
openvpn --genkey --secret $CA_DIR/keys/ta.key ## Build a TLS key
|
||||
fi
|
||||
|
||||
do_server() {
|
||||
NAME=$1
|
||||
# Generate server certificate
|
||||
$CA_DIR/pkitool --server $NAME
|
||||
|
||||
(cd $CA_DIR/keys;
|
||||
cp $NAME.crt $NAME.key ca.crt dh1024.pem ta.key $VPN_DIR
|
||||
)
|
||||
cat >$VPN_DIR/$NAME.conf <<EOF
|
||||
proto $VPN_PROTO
|
||||
port $VPN_PORT
|
||||
dev $VPN_DEV
|
||||
cert $NAME.crt
|
||||
key $NAME.key # This file should be kept secret
|
||||
ca ca.crt
|
||||
dh dh1024.pem
|
||||
duplicate-cn
|
||||
server $VPN_CLIENT_NET $VPN_CLIENT_MASK
|
||||
ifconfig-pool-persist ipp.txt
|
||||
push "route $VPN_LOCAL_NET $VPN_LOCAL_MASK"
|
||||
comp-lzo
|
||||
user nobody
|
||||
group nobody
|
||||
persist-key
|
||||
persist-tun
|
||||
status openvpn-status.log
|
||||
EOF
|
||||
/etc/init.d/openvpn restart
|
||||
}
|
||||
|
||||
do_client() {
|
||||
NAME=$1
|
||||
# Generate a client certificate
|
||||
$CA_DIR/pkitool $NAME
|
||||
|
||||
TMP_DIR=`mktemp -d`
|
||||
(cd $CA_DIR/keys;
|
||||
cp -p ca.crt ta.key $NAME.key $NAME.crt $TMP_DIR
|
||||
)
|
||||
if [ -r $VPN_DIR/hostname ]; then
|
||||
HOST=`cat $VPN_DIR/hostname`
|
||||
else
|
||||
HOST=`hostname`
|
||||
fi
|
||||
cat >$TMP_DIR/$HOST.conf <<EOF
|
||||
proto $VPN_PROTO
|
||||
port $VPN_PORT
|
||||
dev $VPN_DEV
|
||||
cert $NAME.crt
|
||||
key $NAME.key # This file should be kept secret
|
||||
ca ca.crt
|
||||
client
|
||||
remote $VPN_SERVER $VPN_PORT
|
||||
resolv-retry infinite
|
||||
nobind
|
||||
user nobody
|
||||
group nobody
|
||||
persist-key
|
||||
persist-tun
|
||||
comp-lzo
|
||||
verb 3
|
||||
EOF
|
||||
(cd $TMP_DIR; tar cf $OPWD/$NAME.tar *)
|
||||
rm -rf $TMP_DIR
|
||||
echo "Client certificate and configuration is in $OPWD/$NAME.tar"
|
||||
}
|
||||
|
||||
# Process command line args
|
||||
case $1 in
|
||||
--client) if [ -z $2 ]; then
|
||||
usage
|
||||
fi
|
||||
do_client $2
|
||||
;;
|
||||
--server) if [ -z $2 ]; then
|
||||
NAME=`hostname`
|
||||
else
|
||||
NAME=$2
|
||||
# Save for --client use
|
||||
echo $NAME >$VPN_DIR/hostname
|
||||
fi
|
||||
do_server $NAME
|
||||
;;
|
||||
--clean) $CA_DIR/clean-all
|
||||
;;
|
||||
*) usage
|
||||
esac
|
@ -0,0 +1,90 @@
|
||||
#!/bin/bash
|
||||
# upload_image.sh - Upload Ubuntu images (create if necessary) in various formats
|
||||
# Supported formats: qcow (kvm), vmdk (vmserver), vdi (vbox), vhd (vpc)
|
||||
# Requires sudo to root
|
||||
|
||||
usage() {
|
||||
echo "$0 - Upload images to OpenStack"
|
||||
echo ""
|
||||
echo "$0 [-h host] [-p port] release format"
|
||||
exit 1
|
||||
}
|
||||
|
||||
HOST=${HOST:-localhost}
|
||||
PORT=${PORT:-9292}
|
||||
DEST=${DEST:-/opt/stack}
|
||||
|
||||
while getopts h:p: c; do
|
||||
case $c in
|
||||
h) HOST=$OPTARG
|
||||
;;
|
||||
p) PORT=$OPTARG
|
||||
;;
|
||||
esac
|
||||
done
|
||||
shift `expr $OPTIND - 1`
|
||||
|
||||
RELEASE=$1
|
||||
FORMAT=$2
|
||||
|
||||
case $FORMAT in
|
||||
kvm|qcow2) FORMAT=qcow2
|
||||
TARGET=kvm
|
||||
;;
|
||||
vmserver|vmdk)
|
||||
FORMAT=vmdk
|
||||
TARGET=vmserver
|
||||
;;
|
||||
vbox|vdi) TARGET=kvm
|
||||
FORMAT=vdi
|
||||
;;
|
||||
vhd|vpc) TARGET=kvm
|
||||
FORMAT=vhd
|
||||
;;
|
||||
*) echo "Unknown format: $FORMAT"
|
||||
usage
|
||||
esac
|
||||
|
||||
case $RELEASE in
|
||||
natty) ;;
|
||||
maverick) ;;
|
||||
lucid) ;;
|
||||
karmic) ;;
|
||||
jaunty) ;;
|
||||
*) if [ ! -r $RELEASE.$FORMAT ]; then
|
||||
echo "Unknown release: $RELEASE"
|
||||
usage
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
|
||||
GLANCE=`which glance`
|
||||
if [ -z "$GLANCE" ]; then
|
||||
if [ -x "$DEST/glance/bin/glance" ]; then
|
||||
# Look for stack.sh's install
|
||||
GLANCE="$DEST/glance/bin/glance"
|
||||
else
|
||||
# Install Glance client in $DEST
|
||||
echo "Glance not found, must install client"
|
||||
OWD=`pwd`
|
||||
cd $DEST
|
||||
sudo apt-get install python-pip python-eventlet python-routes python-greenlet python-argparse python-sqlalchemy python-wsgiref python-pastedeploy python-xattr
|
||||
sudo pip install kombu
|
||||
sudo git clone https://github.com/cloudbuilders/glance.git
|
||||
cd glance
|
||||
sudo python setup.py develop
|
||||
cd $OWD
|
||||
GLANCE=`which glance`
|
||||
fi
|
||||
fi
|
||||
|
||||
# Create image if it doesn't exist
|
||||
if [ ! -r $RELEASE.$FORMAT ]; then
|
||||
DIR=`dirname $0`
|
||||
echo "$RELEASE.$FORMAT not found, creating..."
|
||||
$DIR/make_image.sh $RELEASE $FORMAT
|
||||
fi
|
||||
|
||||
# Upload the image
|
||||
echo "Uploading image $RELEASE.$FORMAT to $HOST"
|
||||
$GLANCE add name=$RELEASE.$FORMAT is_public=true disk_format=$FORMAT --host $HOST --port $PORT <$RELEASE.$FORMAT
|
Loading…
Reference in New Issue