2011-09-11 10:22:13 +00:00
#!/usr/bin/env bash
2011-10-02 20:36:54 +00:00
# **stack.sh** is an opinionated openstack developer installation.
2011-10-28 21:00:21 +00:00
# This script installs and configures *nova*, *glance*, *horizon* and *keystone*
2011-09-11 10:22:13 +00:00
2011-10-20 17:07:10 +00:00
# This script allows you to specify configuration options of what git
2011-10-03 05:08:24 +00:00
# repositories to use, enabled services, network configuration and various
# passwords. If you are crafty you can run the script on multiple nodes using
# shared settings for common resources (mysql, rabbitmq) and build a multi-node
# developer install.
2011-10-02 20:53:21 +00:00
2011-09-16 05:54:52 +00:00
# To keep this script simple we assume you are running on an **Ubuntu 11.04
2011-09-16 04:28:23 +00:00
# Natty** machine. It should work in a VM or physical server. Additionally we
# put the list of *apt* and *pip* dependencies and other configuration files in
# this repo. So start by grabbing this script and the dependencies.
2011-10-02 20:36:54 +00:00
# Learn more and get the most recent version at http://devstack.org
2011-09-16 05:19:42 +00:00
# Sanity Check
# ============
2011-09-16 05:54:52 +00:00
# Warn users who aren't on natty, but allow them to override check and attempt
2011-09-16 05:19:42 +00:00
# installation with ``FORCE=yes ./stack``
2011-10-31 18:15:05 +00:00
if ! egrep -q 'natty|oneiric' /etc/lsb-release; then
2011-10-31 20:31:19 +00:00
echo "WARNING: this script has only been tested on natty and oneiric"
2011-09-16 05:19:42 +00:00
if [ [ " $FORCE " != "yes" ] ] ; then
echo "If you wish to run this script anyway run with FORCE=yes"
exit 1
fi
fi
2011-10-19 16:24:17 +00:00
# Keep track of the current devstack directory.
TOP_DIR = $( cd $( dirname " $0 " ) && pwd )
2011-09-20 18:06:14 +00:00
# stack.sh keeps the list of **apt** and **pip** dependencies in external
2011-09-16 18:27:43 +00:00
# files, along with config templates and other useful files. You can find these
2011-09-20 18:06:14 +00:00
# in the ``files`` directory (next to this script). We will reference this
2011-09-16 18:31:16 +00:00
# directory using the ``FILES`` variable in this script.
2011-10-19 16:24:17 +00:00
FILES = $TOP_DIR /files
2011-09-16 18:31:16 +00:00
if [ ! -d $FILES ] ; then
echo "ERROR: missing devstack/files - did you grab more than just stack.sh?"
2011-09-16 05:19:42 +00:00
exit 1
fi
2011-10-12 07:17:11 +00:00
2011-10-17 19:07:11 +00:00
# Settings
# ========
# This script is customizable through setting environment variables. If you
# want to override a setting you can either::
#
# export MYSQL_PASSWORD=anothersecret
# ./stack.sh
#
# You can also pass options on a single line ``MYSQL_PASSWORD=simple ./stack.sh``
#
# Additionally, you can put any local variables into a ``localrc`` file, like::
#
# MYSQL_PASSWORD=anothersecret
# MYSQL_USER=hellaroot
#
# We try to have sensible defaults, so you should be able to run ``./stack.sh``
# in most cases.
#
# We our settings from ``stackrc``. This file is distributed with devstack and
2011-10-20 17:07:10 +00:00
# contains locations for what repositories to use. If you want to use other
# repositories and branches, you can add your own settings with another file
2011-10-17 19:07:11 +00:00
# called ``localrc``
#
2011-10-20 17:07:10 +00:00
# If ``localrc`` exists, then ``stackrc`` will load those settings. This is
2011-11-02 16:57:11 +00:00
# useful for changing a branch or repository to test other versions. Also you
2011-10-17 19:07:11 +00:00
# can store your other settings like **MYSQL_PASSWORD** or **ADMIN_PASSWORD** instead
# of letting devstack generate random ones for you.
source ./stackrc
# Destination path for installation ``DEST``
DEST = ${ DEST :- /opt/stack }
2011-10-26 19:44:27 +00:00
# Configure services to syslog instead of writing to individual log files
SYSLOG = ${ SYSLOG :- False }
2011-10-28 06:34:19 +00:00
# apt-get wrapper to just get arguments set correctly
function apt_get( ) {
local sudo = "sudo"
[ " $( id -u) " = "0" ] && sudo = "env"
$sudo DEBIAN_FRONTEND = noninteractive apt-get \
--option "Dpkg::Options::=--force-confold" --assume-yes " $@ "
}
2011-10-28 21:00:21 +00:00
# OpenStack is designed to be run as a regular user (Horizon will fail to run
2011-10-02 20:53:21 +00:00
# as root, since apache refused to startup serve content from root user). If
# stack.sh is run as root, it automatically creates a stack user with
2011-09-29 17:48:49 +00:00
# sudo privileges and runs as that user.
2011-10-02 21:47:32 +00:00
2011-09-28 21:08:26 +00:00
if [ [ $EUID -eq 0 ] ] ; then
2011-10-11 14:26:29 +00:00
ROOTSLEEP = ${ ROOTSLEEP :- 10 }
2011-10-04 03:10:55 +00:00
echo "You are running this script as root."
2011-10-11 14:26:29 +00:00
echo " In $ROOTSLEEP seconds, we will create a user 'stack' and run as that user "
sleep $ROOTSLEEP
2011-10-02 20:53:21 +00:00
2011-10-04 03:10:55 +00:00
# since this script runs as a normal user, we need to give that user
# ability to run sudo
2011-11-06 16:00:28 +00:00
dpkg -l sudo || apt_get update && apt_get install sudo
2011-10-02 20:53:21 +00:00
2011-10-07 15:18:10 +00:00
if ! getent passwd stack >/dev/null; then
2011-10-04 03:10:55 +00:00
echo "Creating a user called stack"
2011-10-17 19:07:11 +00:00
useradd -U -G sudo -s /bin/bash -d $DEST -m stack
2011-09-29 17:48:49 +00:00
fi
2011-10-07 15:18:10 +00:00
2011-10-02 20:53:21 +00:00
echo "Giving stack user passwordless sudo priviledges"
2011-10-13 00:32:16 +00:00
# natty uec images sudoers does not have a '#includedir'. add one.
grep -q "^#includedir.*/etc/sudoers.d" /etc/sudoers ||
echo "#includedir /etc/sudoers.d" >> /etc/sudoers
2011-10-13 00:19:46 +00:00
( umask 226 && echo "stack ALL=(ALL) NOPASSWD:ALL" \
> /etc/sudoers.d/50_stack_sh )
2011-10-02 20:53:21 +00:00
2011-09-29 17:48:49 +00:00
echo "Copying files to stack user"
2011-10-17 19:07:11 +00:00
STACK_DIR = " $DEST / ${ PWD ##*/ } "
2011-10-07 15:18:10 +00:00
cp -r -f " $PWD " " $STACK_DIR "
2011-11-06 15:47:09 +00:00
chown -R stack " $STACK_DIR "
2011-10-10 13:06:14 +00:00
if [ [ " $SHELL_AFTER_RUN " != "no" ] ] ; then
2011-10-13 00:00:34 +00:00
exec su -c " set -e; cd $STACK_DIR ; bash stack.sh; bash " stack
2011-10-10 13:06:14 +00:00
else
2011-10-13 00:00:34 +00:00
exec su -c " set -e; cd $STACK_DIR ; bash stack.sh " stack
2011-10-10 13:06:14 +00:00
fi
2011-10-08 01:28:00 +00:00
exit 1
2011-10-27 18:18:09 +00:00
else
2011-10-28 19:20:07 +00:00
# Our user needs passwordless priviledges for certain commands which nova
2011-10-27 18:18:09 +00:00
# uses internally.
# Natty uec images sudoers does not have a '#includedir'. add one.
2011-10-27 18:20:38 +00:00
sudo grep -q "^#includedir.*/etc/sudoers.d" /etc/sudoers ||
echo "#includedir /etc/sudoers.d" | sudo tee -a /etc/sudoers
2011-10-28 06:34:19 +00:00
TEMPFILE = ` mktemp`
cat $FILES /sudo/nova > $TEMPFILE
sed -e " s,%USER%, $USER ,g " -i $TEMPFILE
chmod 0440 $TEMPFILE
sudo chown root:root $TEMPFILE
2011-10-28 16:27:20 +00:00
sudo mv $TEMPFILE /etc/sudoers.d/stack_sh_nova
2011-09-28 21:08:26 +00:00
fi
2011-09-12 18:59:38 +00:00
# Set the destination directories for openstack projects
2011-09-11 10:22:13 +00:00
NOVA_DIR = $DEST /nova
2011-10-28 21:00:21 +00:00
HORIZON_DIR = $DEST /horizon
2011-09-11 10:22:13 +00:00
GLANCE_DIR = $DEST /glance
KEYSTONE_DIR = $DEST /keystone
NOVACLIENT_DIR = $DEST /python-novaclient
2011-09-26 20:02:40 +00:00
OPENSTACKX_DIR = $DEST /openstackx
2011-09-11 10:22:13 +00:00
NOVNC_DIR = $DEST /noVNC
2011-11-01 11:30:55 +00:00
SWIFT_DIR = $DEST /swift
2011-11-01 18:32:23 +00:00
SWIFT_KEYSTONE_DIR = $DEST /swift-keystone2
2011-10-28 01:18:20 +00:00
QUANTUM_DIR = $DEST /quantum
# Default Quantum Plugin
Q_PLUGIN = ${ Q_PLUGIN :- openvswitch }
2011-09-14 03:07:44 +00:00
# Specify which services to launch. These generally correspond to screen tabs
2011-11-02 00:02:30 +00:00
ENABLED_SERVICES = ${ ENABLED_SERVICES :- g -api,g-reg,key,n-api,n-cpu,n-net,n-sch,n-vnc,horizon,mysql,rabbit }
2011-09-11 10:22:13 +00:00
2011-10-27 05:29:08 +00:00
# Nova hypervisor configuration. We default to libvirt whth **kvm** but will
# drop back to **qemu** if we are unable to load the kvm module. Stack.sh can
# also install an **LXC** based system.
VIRT_DRIVER = ${ VIRT_DRIVER :- libvirt }
2011-10-02 20:53:21 +00:00
LIBVIRT_TYPE = ${ LIBVIRT_TYPE :- kvm }
2011-10-02 21:47:32 +00:00
# nova supports pluggable schedulers. ``SimpleScheduler`` should work in most
# cases unless you are working on multi-zone mode.
2011-10-02 20:53:21 +00:00
SCHEDULER = ${ SCHEDULER :- nova .scheduler.simple.SimpleScheduler }
2011-09-13 08:24:50 +00:00
# Use the first IP unless an explicit is set by ``HOST_IP`` environment variable
2011-09-11 10:22:13 +00:00
if [ ! -n " $HOST_IP " ] ; then
2011-09-13 18:22:14 +00:00
HOST_IP = ` LC_ALL = C /sbin/ifconfig | grep -m 1 'inet addr:' | cut -d: -f2 | awk '{print $1}' `
2011-09-11 10:22:13 +00:00
fi
2011-11-05 21:19:03 +00:00
# Service startup timeout
SERVICE_TIMEOUT = ${ SERVICE_TIMEOUT :- 60 }
2011-10-12 07:13:13 +00:00
# Generic helper to configure passwords
function read_password {
set +o xtrace
var = $1 ; msg = $2
pw = ${ !var }
2011-10-12 21:08:08 +00:00
localrc = $TOP_DIR /localrc
2011-10-12 07:17:11 +00:00
2011-10-12 07:13:13 +00:00
# If the password is not defined yet, proceed to prompt user for a password.
if [ ! $pw ] ; then
# If there is no localrc file, create one
2011-10-12 21:08:08 +00:00
if [ ! -e $localrc ] ; then
touch $localrc
2011-10-12 07:13:13 +00:00
fi
2011-10-20 17:07:10 +00:00
# Presumably if we got this far it can only be that our localrc is missing
2011-10-12 07:13:13 +00:00
# the required password. Prompt user for a password and write to localrc.
2011-10-12 21:08:08 +00:00
echo ''
echo '################################################################################'
echo $msg
echo '################################################################################'
echo "This value will be written to your localrc file so you don't have to enter it again."
echo "It is probably best to avoid spaces and weird characters."
echo "If you leave this blank, a random default value will be used."
echo "Enter a password now:"
read $var
pw = ${ !var }
if [ ! $pw ] ; then
pw = ` openssl rand -hex 10`
2011-10-12 07:13:13 +00:00
fi
2011-10-12 21:08:08 +00:00
eval " $var = $pw "
echo " $var = $pw " >> $localrc
2011-10-12 07:13:13 +00:00
fi
set -o xtrace
}
2011-10-02 20:53:21 +00:00
# Nova Network Configuration
# --------------------------
2011-10-20 17:07:10 +00:00
# FIXME: more documentation about why these are important flags. Also
2011-10-03 05:08:24 +00:00
# we should make sure we use the same variable names as the flag names.
2011-09-20 16:39:50 +00:00
PUBLIC_INTERFACE = ${ PUBLIC_INTERFACE :- eth0 }
2011-09-26 19:48:31 +00:00
FIXED_RANGE = ${ FIXED_RANGE :- 10 .0.0.0/24 }
FIXED_NETWORK_SIZE = ${ FIXED_NETWORK_SIZE :- 256 }
2011-10-27 20:21:52 +00:00
FLOATING_RANGE = ${ FLOATING_RANGE :- 172 .24.4.224/28 }
2011-09-25 20:41:22 +00:00
NET_MAN = ${ NET_MAN :- FlatDHCPManager }
2011-09-14 03:07:44 +00:00
EC2_DMZ_HOST = ${ EC2_DMZ_HOST :- $HOST_IP }
2011-09-20 16:39:50 +00:00
FLAT_NETWORK_BRIDGE = ${ FLAT_NETWORK_BRIDGE :- br100 }
2011-10-02 21:47:32 +00:00
VLAN_INTERFACE = ${ VLAN_INTERFACE :- $PUBLIC_INTERFACE }
# Multi-host is a mode where each compute node runs its own network node. This
# allows network operations and routing for a VM to occur on the server that is
# running the VM - removing a SPOF and bandwidth bottleneck.
MULTI_HOST = ${ MULTI_HOST :- 0 }
2011-09-13 07:59:54 +00:00
2011-09-13 08:24:50 +00:00
# If you are using FlatDHCP on multiple hosts, set the ``FLAT_INTERFACE``
# variable but make sure that the interface doesn't already have an
# ip or you risk breaking things.
2011-10-03 05:08:24 +00:00
#
2011-10-20 17:07:10 +00:00
# **DHCP Warning**: If your flat interface device uses DHCP, there will be a
# hiccup while the network is moved from the flat interface to the flat network
# bridge. This will happen when you launch your first instance. Upon launch
# you will lose all connectivity to the node, and the vm launch will probably
2011-10-03 05:08:24 +00:00
# fail.
2011-10-20 17:07:10 +00:00
#
# If you are running on a single node and don't need to access the VMs from
2011-10-03 05:08:24 +00:00
# devices other than that node, you can set the flat interface to the same
2011-10-20 17:07:10 +00:00
# value as ``FLAT_NETWORK_BRIDGE``. This will stop the network hiccup from
2011-11-02 16:57:11 +00:00
# occurring.
2011-09-26 05:28:08 +00:00
FLAT_INTERFACE = ${ FLAT_INTERFACE :- eth0 }
2011-09-11 10:22:13 +00:00
2011-10-02 21:47:32 +00:00
## FIXME(ja): should/can we check that FLAT_INTERFACE is sane?
2011-10-28 01:18:20 +00:00
# Using Quantum networking:
#
# Make sure that q-svc is enabled in ENABLED_SERVICES. If it is the network
# manager will be set to the QuantumManager.
#
# If you're planning to use the Quantum openvswitch plugin, set Q_PLUGIN to
# "openvswitch" and make sure the q-agt service is enabled in
# ENABLED_SERVICES.
#
# With Quantum networking the NET_MAN variable is ignored.
2011-09-13 08:24:50 +00:00
2011-10-02 20:53:21 +00:00
# MySQL & RabbitMQ
# ----------------
2011-10-28 21:00:21 +00:00
# We configure Nova, Horizon, Glance and Keystone to use MySQL as their
2011-10-02 20:53:21 +00:00
# database server. While they share a single server, each has their own
# database and tables.
2011-10-20 17:07:10 +00:00
# By default this script will install and configure MySQL. If you want to
2011-10-02 20:53:21 +00:00
# use an existing server, you can pass in the user/password/host parameters.
2011-10-12 07:13:13 +00:00
# You will need to send the same ``MYSQL_PASSWORD`` to every host if you are doing
2011-10-02 20:53:21 +00:00
# a multi-node devstack installation.
2011-10-19 22:38:10 +00:00
MYSQL_HOST = ${ MYSQL_HOST :- localhost }
2011-09-14 09:39:10 +00:00
MYSQL_USER = ${ MYSQL_USER :- root }
2011-10-12 07:13:13 +00:00
read_password MYSQL_PASSWORD "ENTER A PASSWORD TO USE FOR MYSQL."
2011-10-02 20:53:21 +00:00
2011-09-14 03:07:44 +00:00
# don't specify /db in this string, so we can use it for multiple services
2011-10-12 07:13:13 +00:00
BASE_SQL_CONN = ${ BASE_SQL_CONN :- mysql : // $MYSQL_USER : $MYSQL_PASSWORD @ $MYSQL_HOST }
2011-09-14 03:07:44 +00:00
# Rabbit connection info
RABBIT_HOST = ${ RABBIT_HOST :- localhost }
2011-10-12 07:13:13 +00:00
read_password RABBIT_PASSWORD "ENTER A PASSWORD TO USE FOR RABBIT."
2011-09-11 10:22:13 +00:00
2011-09-14 16:55:31 +00:00
# Glance connection info. Note the port must be specified.
2011-09-20 16:39:50 +00:00
GLANCE_HOSTPORT = ${ GLANCE_HOSTPORT :- $HOST_IP : 9292 }
2011-09-14 16:55:31 +00:00
2011-11-01 11:30:55 +00:00
# SWIFT
# -----
2011-11-02 16:57:11 +00:00
# TODO: implement glance support
# TODO: add logging to different location.
2011-11-02 15:49:56 +00:00
2011-11-02 16:57:11 +00:00
# By default the location of swift drives and objects is located inside
2011-11-03 08:17:06 +00:00
# the swift source directory. SWIFT_DATA_LOCATION variable allow you to redefine
2011-11-02 16:57:11 +00:00
# this.
2011-11-03 08:17:06 +00:00
SWIFT_DATA_LOCATION = ${ SWIFT_DATA_LOCATION :- ${ SWIFT_DIR } /data }
2011-11-01 11:30:55 +00:00
2011-11-03 09:43:46 +00:00
# We are going to have the configuration files inside the source
# directory, change SWIFT_CONFIG_LOCATION if you want to adjust that.
SWIFT_CONFIG_LOCATION = ${ SWIFT_CONFIG_LOCATION :- ${ SWIFT_DIR } /config }
2011-11-01 11:30:55 +00:00
2011-11-02 16:57:11 +00:00
# devstack will create a loop-back disk formatted as XFS to store the
# swift data. By default the disk size is 1 gigabyte. The variable
# SWIFT_LOOPBACK_DISK_SIZE specified in bytes allow you to change
# that.
2011-11-01 11:30:55 +00:00
SWIFT_LOOPBACK_DISK_SIZE = ${ SWIFT_LOOPBACK_DISK_SIZE :- 1000000 }
2011-10-12 07:13:13 +00:00
2011-11-02 16:57:11 +00:00
# The ring uses a configurable number of bits from a path’ s MD5 hash as
# a partition index that designates a device. The number of bits kept
# from the hash is known as the partition power, and 2 to the partition
# power indicates the partition count. Partitioning the full MD5 hash
# ring allows other parts of the cluster to work in batches of items at
# once which ends up either more efficient or at least less complex than
# working with each item separately or the entire cluster all at once.
# By default we define 9 for the partition count (which mean 512).
2011-11-01 14:36:00 +00:00
SWIFT_PARTITION_POWER_SIZE = ${ SWIFT_PARTITION_POWER_SIZE :- 9 }
2011-11-03 15:19:14 +00:00
# We only ask for Swift Hash if we have enabled swift service.
if [ [ " $ENABLED_SERVICES " = ~ "swift" ] ] ; then
# SWIFT_HASH is a random unique string for a swift cluster that
# can never change.
read_password SWIFT_HASH "ENTER A RANDOM SWIFT HASH."
fi
2011-10-02 20:53:21 +00:00
# Keystone
# --------
2011-10-02 16:02:46 +00:00
# Service Token - Openstack components need to have an admin token
# to validate user tokens.
2011-10-12 07:13:13 +00:00
read_password SERVICE_TOKEN "ENTER A SERVICE_TOKEN TO USE FOR THE SERVICE ADMIN TOKEN."
2011-10-28 21:00:21 +00:00
# Horizon currently truncates usernames and passwords at 20 characters
read_password ADMIN_PASSWORD "ENTER A PASSWORD TO USE FOR HORIZON AND KEYSTONE (20 CHARS OR LESS)."
2011-10-02 16:02:46 +00:00
2011-10-08 01:28:00 +00:00
LOGFILE = ${ LOGFILE :- " $PWD /stack.sh. $$ .log " }
(
# So that errors don't compound we exit on any errors so you see only the
2011-11-02 16:57:11 +00:00
# first error that occurred.
2011-10-08 01:28:00 +00:00
trap failed ERR
failed( ) {
local r = $?
set +o xtrace
[ -n " $LOGFILE " ] && echo " ${ 0 ##*/ } failed: full log in $LOGFILE "
exit $r
}
# Print the commands being run so that we can see the command that triggers
# an error. It is also useful for following along as the install occurs.
set -o xtrace
2011-10-19 21:30:37 +00:00
# create the destination directory and ensure it is writable by the user
2011-10-08 01:28:00 +00:00
sudo mkdir -p $DEST
2011-10-19 21:30:37 +00:00
if [ ! -w $DEST ] ; then
sudo chown ` whoami` $DEST
fi
2011-10-02 18:28:17 +00:00
2011-09-13 07:59:54 +00:00
# Install Packages
2011-09-13 08:24:50 +00:00
# ================
2011-09-13 07:59:54 +00:00
#
# Openstack uses a fair number of other projects.
2011-09-13 00:09:08 +00:00
# install apt requirements
2011-10-20 17:09:11 +00:00
apt_get update
2011-11-03 14:36:13 +00:00
apt_get install ` cat $FILES /apts/* | cut -d\# -f1 | grep -Ev "mysql-server|rabbitmq-server|memcached" `
2011-09-13 00:09:08 +00:00
# install python requirements
2011-09-16 18:31:16 +00:00
sudo PIP_DOWNLOAD_CACHE = /var/cache/pip pip install ` cat $FILES /pips/*`
2011-09-13 00:09:08 +00:00
2011-09-16 21:13:17 +00:00
# git clone only if directory doesn't exist already. Since ``DEST`` might not
# be owned by the installation user, we create the directory and change the
# ownership to the proper user.
2011-09-13 00:40:00 +00:00
function git_clone {
2011-10-17 20:20:40 +00:00
2011-10-25 01:47:06 +00:00
GIT_REMOTE = $1
GIT_DEST = $2
GIT_BRANCH = $3
2011-10-25 01:42:11 +00:00
# do a full clone only if the directory doesn't exist
2011-10-25 01:47:06 +00:00
if [ ! -d $GIT_DEST ] ; then
git clone $GIT_REMOTE $GIT_DEST
2011-09-26 20:02:40 +00:00
cd $2
2011-09-26 20:12:57 +00:00
# This checkout syntax works for both branches and tags
2011-10-25 01:47:06 +00:00
git checkout $GIT_BRANCH
2011-10-25 01:42:11 +00:00
elif [ [ " $RECLONE " = = "yes" ] ] ; then
# if it does exist then simulate what clone does if asked to RECLONE
2011-10-25 01:52:58 +00:00
cd $GIT_DEST
2011-10-25 01:42:11 +00:00
# set the url to pull from and fetch
2011-10-25 01:47:06 +00:00
git remote set-url origin $GIT_REMOTE
2011-10-25 01:42:11 +00:00
git fetch origin
2011-10-25 02:06:46 +00:00
# remove the existing ignored files (like pyc) as they cause breakage
# (due to the py files having older timestamps than our pyc, so python
# thinks the pyc files are correct using them)
sudo git clean -f -d
2011-10-25 01:47:06 +00:00
git checkout -f origin/$GIT_BRANCH
2011-10-25 02:09:52 +00:00
# a local branch might not exist
2011-10-25 01:47:06 +00:00
git branch -D $GIT_BRANCH || true
git checkout -b $GIT_BRANCH
2011-09-13 00:40:00 +00:00
fi
}
2011-09-13 00:09:08 +00:00
# compute service
2011-09-26 20:02:40 +00:00
git_clone $NOVA_REPO $NOVA_DIR $NOVA_BRANCH
2011-11-01 11:30:55 +00:00
# storage service
git_clone $SWIFT_REPO $SWIFT_DIR $SWIFT_BRANCH
2011-11-01 18:32:23 +00:00
# swift + keystone middleware
git_clone $SWIFT_KEYSTONE_REPO $SWIFT_KEYSTONE_DIR $SWIFT_KEYSTONE_BRANCH
2011-09-13 00:09:08 +00:00
# image catalog service
2011-09-26 20:02:40 +00:00
git_clone $GLANCE_REPO $GLANCE_DIR $GLANCE_BRANCH
2011-09-13 00:09:08 +00:00
# unified auth system (manages accounts/tokens)
2011-09-26 20:02:40 +00:00
git_clone $KEYSTONE_REPO $KEYSTONE_DIR $KEYSTONE_BRANCH
2011-09-13 00:09:08 +00:00
# a websockets/html5 or flash powered VNC console for vm instances
2011-09-26 20:02:40 +00:00
git_clone $NOVNC_REPO $NOVNC_DIR $NOVNC_BRANCH
2011-09-13 00:09:08 +00:00
# django powered web control panel for openstack
2011-10-28 21:00:21 +00:00
git_clone $HORIZON_REPO $HORIZON_DIR $HORIZON_BRANCH $HORIZON_TAG
# python client library to nova that horizon (and others) use
2011-09-26 20:02:40 +00:00
git_clone $NOVACLIENT_REPO $NOVACLIENT_DIR $NOVACLIENT_BRANCH
2011-09-20 18:06:14 +00:00
# openstackx is a collection of extensions to openstack.compute & nova
2011-09-13 00:09:08 +00:00
# that is *deprecated*. The code is being moved into python-novaclient & nova.
2011-09-26 20:02:40 +00:00
git_clone $OPENSTACKX_REPO $OPENSTACKX_DIR $OPENSTACKX_BRANCH
2011-10-28 01:18:20 +00:00
# quantum
git_clone $QUANTUM_REPO $QUANTUM_DIR $QUANTUM_BRANCH
2011-09-13 00:09:08 +00:00
2011-09-13 07:59:54 +00:00
# Initialization
2011-09-13 08:24:50 +00:00
# ==============
2011-09-13 07:59:54 +00:00
2011-09-16 23:28:13 +00:00
2011-09-13 00:09:08 +00:00
# setup our checkouts so they are installed into python path
2011-09-13 08:24:50 +00:00
# allowing ``import nova`` or ``import glance.client``
2011-09-13 04:46:12 +00:00
cd $KEYSTONE_DIR ; sudo python setup.py develop
2011-11-01 11:30:55 +00:00
cd $SWIFT_DIR ; sudo python setup.py develop
2011-11-01 18:32:23 +00:00
cd $SWIFT_KEYSTONE_DIR ; sudo python setup.py develop
2011-09-13 04:46:12 +00:00
cd $GLANCE_DIR ; sudo python setup.py develop
2011-10-16 19:24:11 +00:00
cd $NOVACLIENT_DIR ; sudo python setup.py develop
cd $NOVA_DIR ; sudo python setup.py develop
2011-09-26 20:02:40 +00:00
cd $OPENSTACKX_DIR ; sudo python setup.py develop
2011-10-28 21:00:21 +00:00
cd $HORIZON_DIR /django-openstack; sudo python setup.py develop
cd $HORIZON_DIR /openstack-dashboard; sudo python setup.py develop
2011-10-28 01:18:20 +00:00
cd $QUANTUM_DIR ; sudo python setup.py develop
2011-09-13 00:09:08 +00:00
2011-09-16 05:19:42 +00:00
# Add a useful screenrc. This isn't required to run openstack but is we do
2011-09-20 18:06:14 +00:00
# it since we are going to run the services in screen for simple
2011-09-16 18:31:16 +00:00
cp $FILES /screenrc ~/.screenrc
2011-09-13 20:17:22 +00:00
2011-09-16 06:11:29 +00:00
# Rabbit
# ---------
2011-10-02 21:47:32 +00:00
2011-09-16 06:11:29 +00:00
if [ [ " $ENABLED_SERVICES " = ~ "rabbit" ] ] ; then
# Install and start rabbitmq-server
2011-10-20 16:41:40 +00:00
# the temp file is necessary due to LP: #878600
tfile = $( mktemp)
2011-10-20 17:09:11 +00:00
apt_get install rabbitmq-server > " $tfile " 2>& 1
2011-10-20 16:41:40 +00:00
cat " $tfile "
rm -f " $tfile "
2011-10-02 18:28:17 +00:00
# change the rabbit password since the default is "guest"
sudo rabbitmqctl change_password guest $RABBIT_PASSWORD
2011-09-16 06:11:29 +00:00
fi
2011-09-11 10:22:13 +00:00
2011-09-16 04:28:23 +00:00
# Mysql
# ---------
2011-10-02 21:47:32 +00:00
2011-09-16 04:28:23 +00:00
if [ [ " $ENABLED_SERVICES " = ~ "mysql" ] ] ; then
2011-10-02 21:47:32 +00:00
# Seed configuration with mysql password so that apt-get install doesn't
# prompt us for a password upon install.
cat <<MYSQL_PRESEED | sudo debconf-set-selections
2011-10-12 07:13:13 +00:00
mysql-server-5.1 mysql-server/root_password password $MYSQL_PASSWORD
mysql-server-5.1 mysql-server/root_password_again password $MYSQL_PASSWORD
2011-10-02 21:47:32 +00:00
mysql-server-5.1 mysql-server/start_on_boot boolean true
MYSQL_PRESEED
2011-10-13 18:40:16 +00:00
# while ``.my.cnf`` is not needed for openstack to function, it is useful
# as it allows you to access the mysql databases via ``mysql nova`` instead
# of having to specify the username/password each time.
2011-10-13 17:45:42 +00:00
if [ [ ! -e $HOME /.my.cnf ] ] ; then
cat <<EOF >$HOME /.my.cnf
[ client]
user = $MYSQL_USER
2011-10-13 22:07:36 +00:00
password = $MYSQL_PASSWORD
2011-10-13 17:45:42 +00:00
host = $MYSQL_HOST
EOF
chmod 0600 $HOME /.my.cnf
fi
2011-09-16 06:11:29 +00:00
# Install and start mysql-server
2011-10-20 17:09:11 +00:00
apt_get install mysql-server
2011-09-16 04:28:23 +00:00
# Update the DB to give user ‘ $MYSQL_USER’ @’ %’ full control of the all databases:
2011-10-11 14:26:29 +00:00
sudo mysql -uroot -p$MYSQL_PASSWORD -h127.0.0.1 -e " GRANT ALL PRIVILEGES ON *.* TO ' $MYSQL_USER '@'%' identified by ' $MYSQL_PASSWORD '; "
2011-09-16 04:28:23 +00:00
# Edit /etc/mysql/my.cnf to change ‘ bind-address’ from localhost (127.0.0.1) to any (0.0.0.0) and restart the mysql service:
sudo sed -i 's/127.0.0.1/0.0.0.0/g' /etc/mysql/my.cnf
sudo service mysql restart
fi
2011-10-28 21:00:21 +00:00
# Horizon
2011-09-13 08:24:50 +00:00
# ---------
2011-10-02 21:47:32 +00:00
2011-10-28 21:00:21 +00:00
# Setup the django horizon application to serve via apache/wsgi
2011-09-13 00:09:08 +00:00
2011-10-28 21:00:21 +00:00
if [ [ " $ENABLED_SERVICES " = ~ "horizon" ] ] ; then
2011-09-16 04:28:23 +00:00
2011-10-28 21:00:21 +00:00
# Horizon currently imports quantum even if you aren't using it. Instead
2011-09-20 18:06:14 +00:00
# of installing quantum we can create a simple module that will pass the
2011-09-16 04:28:23 +00:00
# initial imports
2011-10-28 21:00:21 +00:00
mkdir -p $HORIZON_DIR /openstack-dashboard/quantum || true
touch $HORIZON_DIR /openstack-dashboard/quantum/__init__.py
touch $HORIZON_DIR /openstack-dashboard/quantum/client.py
2011-09-15 23:52:43 +00:00
2011-09-20 18:06:14 +00:00
2011-10-28 21:00:21 +00:00
# ``local_settings.py`` is used to override horizon default settings.
cp $FILES /horizon_settings.py $HORIZON_DIR /openstack-dashboard/local/local_settings.py
2011-09-16 04:46:20 +00:00
2011-10-28 21:00:21 +00:00
# Initialize the horizon database (it stores sessions and notices shown to
2011-10-19 16:24:17 +00:00
# users). The user system is external (keystone).
2011-10-28 21:00:21 +00:00
cd $HORIZON_DIR /openstack-dashboard
2011-09-15 23:52:43 +00:00
dashboard/manage.py syncdb
# create an empty directory that apache uses as docroot
2011-10-28 21:00:21 +00:00
sudo mkdir -p $HORIZON_DIR /.blackhole
2011-09-15 23:52:43 +00:00
2011-10-28 21:00:21 +00:00
## Configure apache's 000-default to run horizon
2011-09-16 18:31:16 +00:00
sudo cp $FILES /000-default.template /etc/apache2/sites-enabled/000-default
2011-09-28 01:26:27 +00:00
sudo sed -e " s,%USER%, $USER ,g " -i /etc/apache2/sites-enabled/000-default
2011-10-28 21:00:21 +00:00
sudo sed -e " s,%HORIZON_DIR%, $HORIZON_DIR ,g " -i /etc/apache2/sites-enabled/000-default
2011-10-28 17:34:26 +00:00
sudo service apache2 restart
2011-09-15 23:52:43 +00:00
fi
2011-09-13 02:29:56 +00:00
2011-09-13 04:46:12 +00:00
2011-09-13 08:24:50 +00:00
# Glance
# ------
2011-09-15 23:52:43 +00:00
if [ [ " $ENABLED_SERVICES " = ~ "g-reg" ] ] ; then
2011-09-20 17:38:06 +00:00
GLANCE_IMAGE_DIR = $DEST /glance/images
2011-09-20 16:59:54 +00:00
# Delete existing images
rm -rf $GLANCE_IMAGE_DIR
# Use local glance directories
mkdir -p $GLANCE_IMAGE_DIR
2011-09-15 23:52:43 +00:00
# (re)create glance database
2011-10-12 07:13:13 +00:00
mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS glance;'
mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE glance;'
2011-10-19 16:24:17 +00:00
# Copy over our glance configurations and update them
2011-09-15 23:52:43 +00:00
GLANCE_CONF = $GLANCE_DIR /etc/glance-registry.conf
2011-09-16 18:31:16 +00:00
cp $FILES /glance-registry.conf $GLANCE_CONF
2011-09-15 23:52:43 +00:00
sudo sed -e " s,%SQL_CONN%, $BASE_SQL_CONN /glance,g " -i $GLANCE_CONF
2011-10-02 16:02:46 +00:00
sudo sed -e " s,%SERVICE_TOKEN%, $SERVICE_TOKEN ,g " -i $GLANCE_CONF
2011-09-20 16:59:54 +00:00
sudo sed -e " s,%DEST%, $DEST ,g " -i $GLANCE_CONF
2011-10-26 19:44:27 +00:00
sudo sed -e " s,%SYSLOG%, $SYSLOG ,g " -i $GLANCE_CONF
2011-09-20 07:33:51 +00:00
GLANCE_API_CONF = $GLANCE_DIR /etc/glance-api.conf
cp $FILES /glance-api.conf $GLANCE_API_CONF
2011-09-20 16:59:54 +00:00
sudo sed -e " s,%DEST%, $DEST ,g " -i $GLANCE_API_CONF
2011-10-02 16:02:46 +00:00
sudo sed -e " s,%SERVICE_TOKEN%, $SERVICE_TOKEN ,g " -i $GLANCE_API_CONF
2011-10-26 19:44:27 +00:00
sudo sed -e " s,%SYSLOG%, $SYSLOG ,g " -i $GLANCE_API_CONF
2011-09-15 23:52:43 +00:00
fi
2011-09-13 00:09:08 +00:00
2011-09-13 08:24:50 +00:00
# Nova
# ----
2011-10-25 04:56:25 +00:00
if [ [ " $ENABLED_SERVICES " = ~ "n-api" ] ] ; then
# We are going to use the sample http middleware configuration from the
# keystone project to launch nova. This paste config adds the configuration
# required for nova to validate keystone tokens - except we need to switch
# the config to use our service token instead (instead of the invalid token
# 999888777666).
cp $KEYSTONE_DIR /examples/paste/nova-api-paste.ini $NOVA_DIR /bin
sed -e " s,999888777666, $SERVICE_TOKEN ,g " -i $NOVA_DIR /bin/nova-api-paste.ini
fi
2011-09-13 00:09:08 +00:00
2011-09-15 23:52:43 +00:00
if [ [ " $ENABLED_SERVICES " = ~ "n-cpu" ] ] ; then
2011-09-13 20:17:22 +00:00
2011-10-02 21:47:32 +00:00
# Virtualization Configuration
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# attempt to load modules: network block device - used to manage qcow images
2011-09-15 23:52:43 +00:00
sudo modprobe nbd || true
2011-10-02 17:11:28 +00:00
2011-10-20 17:07:10 +00:00
# Check for kvm (hardware based virtualization). If unable to initialize
# kvm, we drop back to the slower emulation mode (qemu). Note: many systems
2011-10-19 16:24:17 +00:00
# come with hardware virtualization disabled in BIOS.
2011-10-04 02:48:30 +00:00
if [ [ " $LIBVIRT_TYPE " = = "kvm" ] ] ; then
2011-10-02 21:47:32 +00:00
sudo modprobe kvm || true
2011-10-02 17:11:28 +00:00
if [ ! -e /dev/kvm ] ; then
2011-10-02 21:47:32 +00:00
echo "WARNING: Switching to QEMU"
2011-10-02 17:11:28 +00:00
LIBVIRT_TYPE = qemu
fi
fi
2011-10-02 21:47:32 +00:00
# Install and configure **LXC** if specified. LXC is another approach to
# splitting a system into many smaller parts. LXC uses cgroups and chroot
# to simulate multiple systems.
2011-10-04 02:48:30 +00:00
if [ [ " $LIBVIRT_TYPE " = = "lxc" ] ] ; then
2011-10-20 17:09:11 +00:00
apt_get install lxc
2011-10-19 16:24:17 +00:00
# lxc uses cgroups (a kernel interface via virtual filesystem) configured
# and mounted to ``/cgroup``
2011-10-02 17:11:28 +00:00
sudo mkdir -p /cgroup
if ! grep -q cgroup /etc/fstab; then
2011-10-02 17:25:33 +00:00
echo none /cgroup cgroup cpuacct,memory,devices,cpu,freezer,blkio 0 0 | sudo tee -a /etc/fstab
2011-10-02 17:11:28 +00:00
fi
2011-10-07 14:34:32 +00:00
if ! mount -n | grep -q cgroup; then
sudo mount /cgroup
fi
2011-09-16 23:28:13 +00:00
fi
2011-10-02 17:11:28 +00:00
2011-10-19 16:24:17 +00:00
# The user that nova runs as needs to be member of libvirtd group otherwise
# nova-compute will be unable to use libvirt.
2011-09-15 23:52:43 +00:00
sudo usermod -a -G libvirtd ` whoami`
2011-10-20 17:07:10 +00:00
# libvirt detects various settings on startup, as we potentially changed
2011-10-19 16:24:17 +00:00
# the system configuration (modules, filesystems), we need to restart
# libvirt to detect those changes.
2011-09-15 23:52:43 +00:00
sudo /etc/init.d/libvirt-bin restart
2011-09-13 20:17:22 +00:00
2011-10-02 21:47:32 +00:00
# Instance Storage
# ~~~~~~~~~~~~~~~~
# Nova stores each instance in its own directory.
2011-09-15 23:52:43 +00:00
mkdir -p $NOVA_DIR /instances
2011-09-13 00:09:08 +00:00
2011-10-19 16:24:17 +00:00
# You can specify a different disk to be mounted and used for backing the
2011-10-20 17:07:10 +00:00
# virtual machines. If there is a partition labeled nova-instances we
2011-10-19 16:24:17 +00:00
# mount it (ext filesystems can be labeled via e2label).
2011-09-15 23:52:43 +00:00
if [ -L /dev/disk/by-label/nova-instances ] ; then
2011-10-19 16:24:17 +00:00
if ! mount -n | grep -q nova-instances; then
sudo mount -L nova-instances $NOVA_DIR /instances
sudo chown -R ` whoami` $NOVA_DIR /instances
fi
2011-09-15 23:52:43 +00:00
fi
2011-10-02 21:47:32 +00:00
# Clean out the instances directory.
2011-10-10 00:50:38 +00:00
sudo rm -rf $NOVA_DIR /instances/*
2011-09-15 23:52:43 +00:00
fi
if [ [ " $ENABLED_SERVICES " = ~ "n-net" ] ] ; then
# delete traces of nova networks from prior runs
2011-09-20 09:23:54 +00:00
sudo killall dnsmasq || true
2011-09-15 23:52:43 +00:00
rm -rf $NOVA_DIR /networks
mkdir -p $NOVA_DIR /networks
fi
2011-11-01 11:30:55 +00:00
# Storage Service
2011-11-02 18:09:30 +00:00
if [ [ " $ENABLED_SERVICES " = ~ "swift" ] ] ; then
2011-11-02 16:57:11 +00:00
# We first do a bit of setup by creating the directories and
# changing the permissions so we can run it as our user.
2011-11-02 13:25:06 +00:00
USER_GROUP = $( id -g)
2011-11-03 08:17:06 +00:00
sudo mkdir -p ${ SWIFT_DATA_LOCATION } /drives
sudo chown -R $USER :${ USER_GROUP } ${ SWIFT_DATA_LOCATION } /drives
2011-11-01 11:30:55 +00:00
2011-11-02 16:57:11 +00:00
# We then create a loopback disk and format it to XFS.
2011-11-03 08:17:06 +00:00
if [ [ ! -e ${ SWIFT_DATA_LOCATION } /drives/images/swift.img ] ] ; then
mkdir -p ${ SWIFT_DATA_LOCATION } /drives/images
sudo touch ${ SWIFT_DATA_LOCATION } /drives/images/swift.img
sudo chown $USER : ${ SWIFT_DATA_LOCATION } /drives/images/swift.img
2011-11-01 13:08:29 +00:00
2011-11-03 08:17:06 +00:00
dd if = /dev/zero of = ${ SWIFT_DATA_LOCATION } /drives/images/swift.img \
2011-11-02 15:49:56 +00:00
bs = 1024 count = 0 seek = ${ SWIFT_LOOPBACK_DISK_SIZE }
2011-11-03 08:17:06 +00:00
mkfs.xfs -f -i size = 1024 ${ SWIFT_DATA_LOCATION } /drives/images/swift.img
2011-11-01 11:30:55 +00:00
fi
2011-11-02 16:57:11 +00:00
# After the drive being created we mount the disk with a few mount
# options to make it most efficient as possible for swift.
2011-11-03 08:17:06 +00:00
mkdir -p ${ SWIFT_DATA_LOCATION } /drives/sdb1
if ! egrep -q ${ SWIFT_DATA_LOCATION } /drives/sdb1 /proc/mounts; then
2011-11-02 15:49:56 +00:00
sudo mount -t xfs -o loop,noatime,nodiratime,nobarrier,logbufs= 8 \
2011-11-03 08:17:06 +00:00
${ SWIFT_DATA_LOCATION } /drives/images/swift.img ${ SWIFT_DATA_LOCATION } /drives/sdb1
2011-11-01 13:08:29 +00:00
fi
2011-11-01 11:30:55 +00:00
2011-11-02 16:57:11 +00:00
# We then create link to that mounted location so swift would know
# where to go.
2011-11-03 08:17:06 +00:00
for x in { 1..4} ; do sudo ln -sf ${ SWIFT_DATA_LOCATION } /drives/sdb1/$x ${ SWIFT_DATA_LOCATION } /$x ; done
2011-11-01 16:32:11 +00:00
2011-11-02 16:57:11 +00:00
# We now have to emulate a few different servers into one we
# create all the directories needed for swift
2011-11-01 16:32:11 +00:00
tmpd = ""
2011-11-03 09:43:46 +00:00
for d in ${ SWIFT_DATA_LOCATION } /drives/sdb1/{ 1..4} \
${ SWIFT_CONFIG_LOCATION } /{ object,container,account} -server \
2011-11-03 08:17:06 +00:00
${ SWIFT_DATA_LOCATION } /{ 1..4} /node/sdb1 /var/run/swift ; do
2011-11-01 16:32:11 +00:00
[ [ -d $d ] ] && continue
2011-11-02 13:25:06 +00:00
sudo install -o ${ USER } -g $USER_GROUP -d $d
2011-11-01 16:32:11 +00:00
done
2011-11-03 09:43:46 +00:00
# We do want to make sure this is all owned by our user.
sudo chown -R $USER : ${ SWIFT_DATA_LOCATION } /{ 1..4} /node
sudo chown -R $USER : ${ SWIFT_CONFIG_LOCATION }
2011-11-01 11:30:55 +00:00
2011-11-03 09:43:46 +00:00
# swift-init has a bug using /etc/swift until bug #885595 is fixed
# we have to create a link
sudo ln -s ${ SWIFT_CONFIG_LOCATION } /etc/swift
2011-11-02 16:57:11 +00:00
# Swift use rsync to syncronize between all the different
# partitions (which make more sense when you have a multi-node
# setup) we configure it with our version of rsync.
2011-11-03 08:17:06 +00:00
sed -e " s/%GROUP%/ ${ USER_GROUP } /;s/%USER%/ $USER /;s,%SWIFT_DATA_LOCATION%, $SWIFT_DATA_LOCATION , " $FILES /swift/rsyncd.conf | sudo tee /etc/rsyncd.conf
2011-11-01 16:32:11 +00:00
sudo sed -i '/^RSYNC_ENABLE=false/ { s/false/true/ }' /etc/default/rsync
2011-11-01 18:32:23 +00:00
2011-11-02 16:57:11 +00:00
# By default Swift will be installed with the tempauth middleware
# which has some default username and password if you have
# configured keystone it will checkout the directory.
2011-11-01 18:32:23 +00:00
if [ [ " $ENABLED_SERVICES " = ~ "key" ] ] ; then
swift_auth_server = keystone
2011-11-02 16:57:11 +00:00
# We need a special version of bin/swift which understand the
# OpenStack api 2.0, we download it until this is getting
# integrated in swift.
2011-11-01 18:32:23 +00:00
sudo curl -s -o/usr/local/bin/swift \
'https://review.openstack.org/gitweb?p=openstack/swift.git;a=blob_plain;f=bin/swift;hb=48bfda6e2fdf3886c98bd15649887d54b9a2574e'
else
swift_auth_server = tempauth
fi
2011-11-02 16:57:11 +00:00
# We do the install of the proxy-server and swift configuration
# replacing a few directives to match our configuration.
2011-11-03 09:43:46 +00:00
sed " s,%SWIFT_CONFIG_LOCATION%, ${ SWIFT_CONFIG_LOCATION } ,;s/%USER%/ $USER /;s/%SERVICE_TOKEN%/ ${ SERVICE_TOKEN } /;s/%AUTH_SERVER%/ ${ swift_auth_server } / " \
$FILES /swift/proxy-server.conf| sudo tee ${ SWIFT_CONFIG_LOCATION } /proxy-server.conf
2011-11-01 11:30:55 +00:00
2011-11-03 09:43:46 +00:00
sed -e " s/%SWIFT_HASH%/ $SWIFT_HASH / " $FILES /swift/swift.conf > ${ SWIFT_CONFIG_LOCATION } /swift.conf
2011-11-01 11:30:55 +00:00
# We need to generate a object/account/proxy configuration
2011-11-02 16:57:11 +00:00
# emulating 4 nodes on different ports we have a little function
2011-11-01 11:30:55 +00:00
# that help us doing that.
function generate_swift_configuration( ) {
local server_type = $1
local bind_port = $2
local log_facility = $3
2011-11-01 18:32:23 +00:00
local node_number
2011-11-01 11:30:55 +00:00
for node_number in { 1..4} ; do
2011-11-03 08:17:06 +00:00
node_path = ${ SWIFT_DATA_LOCATION } /${ node_number }
2011-11-03 09:43:46 +00:00
sed -e " s,%SWIFT_CONFIG_LOCATION%, ${ SWIFT_CONFIG_LOCATION } ,;s,%USER%, $USER ,;s,%NODE_PATH%, ${ node_path } ,;s,%BIND_PORT%, ${ bind_port } ,;s,%LOG_FACILITY%, ${ log_facility } , " \
$FILES /swift/${ server_type } -server.conf > ${ SWIFT_CONFIG_LOCATION } /${ server_type } -server/${ node_number } .conf
2011-11-01 11:30:55 +00:00
bind_port = $(( ${ bind_port } + 10 ))
log_facility = $(( ${ log_facility } + 1 ))
done
}
generate_swift_configuration object 6010 2
generate_swift_configuration container 6011 2
generate_swift_configuration account 6012 2
2011-11-01 13:08:29 +00:00
2011-11-02 16:57:11 +00:00
# We create two helper scripts :
#
# - swift-remakerings
# Allow to recreate rings from scratch.
# - swift-startmain
# Restart your full cluster.
#
2011-11-03 09:43:46 +00:00
sed -e " s,%SWIFT_CONFIG_LOCATION%, ${ SWIFT_CONFIG_LOCATION } ,;s/%SWIFT_PARTITION_POWER_SIZE%/ $SWIFT_PARTITION_POWER_SIZE / " $FILES /swift/swift-remakerings | \
2011-11-01 14:36:00 +00:00
sudo tee /usr/local/bin/swift-remakerings
2011-11-02 18:09:30 +00:00
sudo install -m755 $FILES /swift/swift-startmain /usr/local/bin/
2011-11-01 14:36:00 +00:00
sudo chmod +x /usr/local/bin/swift-*
2011-11-02 16:57:11 +00:00
# We then can start rsync.
2011-11-01 16:32:11 +00:00
sudo /etc/init.d/rsync restart || :
2011-11-02 16:57:11 +00:00
# Create our ring for the object/container/account.
2011-11-01 15:22:08 +00:00
/usr/local/bin/swift-remakerings
2011-11-02 16:57:11 +00:00
# And now we launch swift-startmain to get our cluster running
# ready to be tested.
2011-11-01 15:22:08 +00:00
/usr/local/bin/swift-startmain || :
2011-11-01 18:32:23 +00:00
unset s swift_hash swift_auth_server tmpd
2011-11-01 11:30:55 +00:00
fi
2011-10-20 17:12:58 +00:00
# Volume Service
# --------------
if [ [ " $ENABLED_SERVICES " = ~ "n-vol" ] ] ; then
#
# Configure a default volume group called 'nova-volumes' for the nova-volume
# service if it does not yet exist. If you don't wish to use a file backed
# volume group, create your own volume group called 'nova-volumes' before
# invoking stack.sh.
#
# By default, the backing file is 2G in size, and is stored in /opt/stack.
#
if ! sudo vgdisplay | grep -q nova-volumes; then
2011-11-03 22:19:21 +00:00
VOLUME_BACKING_FILE = ${ VOLUME_BACKING_FILE :- $DEST /nova-volumes-backing-file }
2011-10-20 17:26:30 +00:00
VOLUME_BACKING_FILE_SIZE = ${ VOLUME_BACKING_FILE_SIZE :- 2052M }
2011-10-20 17:12:58 +00:00
truncate -s $VOLUME_BACKING_FILE_SIZE $VOLUME_BACKING_FILE
DEV = ` sudo losetup -f --show $VOLUME_BACKING_FILE `
sudo vgcreate nova-volumes $DEV
fi
# Configure iscsitarget
sudo sed 's/ISCSITARGET_ENABLE=false/ISCSITARGET_ENABLE=true/' -i /etc/default/iscsitarget
sudo /etc/init.d/iscsitarget restart
fi
2011-09-16 23:28:13 +00:00
function add_nova_flag {
echo " $1 " >> $NOVA_DIR /bin/nova.conf
}
# (re)create nova.conf
rm -f $NOVA_DIR /bin/nova.conf
add_nova_flag "--verbose"
add_nova_flag "--nodaemon"
2011-10-27 19:55:29 +00:00
add_nova_flag "--allow_admin_api"
2011-09-26 05:28:08 +00:00
add_nova_flag " --scheduler_driver= $SCHEDULER "
2011-09-16 23:28:13 +00:00
add_nova_flag " --dhcpbridge_flagfile= $NOVA_DIR /bin/nova.conf "
2011-10-30 03:30:10 +00:00
add_nova_flag " --fixed_range= $FIXED_RANGE "
2011-10-28 01:18:20 +00:00
if [ [ " $ENABLED_SERVICES " = ~ "q-svc" ] ] ; then
add_nova_flag "--network_manager=nova.network.quantum.manager.QuantumManager"
if [ [ " $Q_PLUGIN " = "openvswitch" ] ] ; then
add_nova_flag "--libvirt_vif_type=ethernet"
add_nova_flag "--libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtOpenVswitchDriver"
fi
else
add_nova_flag " --network_manager=nova.network.manager. $NET_MAN "
fi
2011-09-16 23:28:13 +00:00
add_nova_flag " --my_ip= $HOST_IP "
2011-09-20 16:39:50 +00:00
add_nova_flag " --public_interface= $PUBLIC_INTERFACE "
add_nova_flag " --vlan_interface= $VLAN_INTERFACE "
2011-09-16 23:28:13 +00:00
add_nova_flag " --sql_connection= $BASE_SQL_CONN /nova "
add_nova_flag " --libvirt_type= $LIBVIRT_TYPE "
2011-09-26 20:02:40 +00:00
add_nova_flag " --osapi_extensions_path= $OPENSTACKX_DIR /extensions "
2011-09-16 23:28:13 +00:00
add_nova_flag " --vncproxy_url=http:// $HOST_IP :6080 "
add_nova_flag " --vncproxy_wwwroot= $NOVNC_DIR / "
2011-10-25 05:03:11 +00:00
add_nova_flag " --api_paste_config= $NOVA_DIR /bin/nova-api-paste.ini "
2011-09-16 23:28:13 +00:00
add_nova_flag "--image_service=nova.image.glance.GlanceImageService"
add_nova_flag " --ec2_dmz_host= $EC2_DMZ_HOST "
add_nova_flag " --rabbit_host= $RABBIT_HOST "
2011-10-02 18:28:17 +00:00
add_nova_flag " --rabbit_password= $RABBIT_PASSWORD "
2011-09-16 23:28:13 +00:00
add_nova_flag " --glance_api_servers= $GLANCE_HOSTPORT "
2011-10-28 19:20:07 +00:00
if [ -n " $INSTANCES_PATH " ] ; then
add_nova_flag " --instances_path= $INSTANCES_PATH "
2011-10-28 20:34:38 +00:00
fi
2011-09-16 23:28:13 +00:00
if [ -n " $MULTI_HOST " ] ; then
add_nova_flag " --multi_host= $MULTI_HOST "
2011-10-25 07:20:44 +00:00
add_nova_flag "--send_arp_for_ha=1"
2011-09-16 23:28:13 +00:00
fi
2011-10-26 19:44:27 +00:00
if [ " $SYSLOG " != "False" ] ; then
add_nova_flag "--use_syslog=1"
fi
2011-09-16 23:28:13 +00:00
2011-10-27 05:29:08 +00:00
# XenServer
# ---------
if [ " $VIRT_DRIVER " = 'xenserver' ] ; then
read_password XENAPI_PASSWORD "ENTER A PASSWORD TO USE FOR XEN."
add_nova_flag "--connection_type=xenapi"
add_nova_flag "--xenapi_connection_url=http://169.254.0.1"
add_nova_flag "--xenapi_connection_username=root"
add_nova_flag " --xenapi_connection_password= $XENAPI_PASSWORD "
add_nova_flag "--flat_injected=False"
add_nova_flag "--flat_interface=eth1"
2011-10-27 19:53:30 +00:00
add_nova_flag "--flat_network_bridge=xenbr1"
2011-10-27 05:29:08 +00:00
add_nova_flag "--public_interface=eth3"
else
add_nova_flag " --flat_network_bridge= $FLAT_NETWORK_BRIDGE "
if [ -n " $FLAT_INTERFACE " ] ; then
add_nova_flag " --flat_interface= $FLAT_INTERFACE "
fi
fi
2011-10-02 21:47:32 +00:00
# Nova Database
# ~~~~~~~~~~~~~
# All nova components talk to a central database. We will need to do this step
# only once for an entire cluster.
2011-09-17 04:37:36 +00:00
if [ [ " $ENABLED_SERVICES " = ~ "mysql" ] ] ; then
# (re)create nova database
2011-10-12 07:13:13 +00:00
mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS nova;'
mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE nova;'
2011-10-02 21:47:32 +00:00
# (re)create nova database
2011-09-17 04:37:36 +00:00
$NOVA_DIR /bin/nova-manage db sync
fi
2011-09-13 22:16:26 +00:00
# Keystone
# --------
2011-09-15 23:52:43 +00:00
if [ [ " $ENABLED_SERVICES " = ~ "key" ] ] ; then
# (re)create keystone database
2011-10-12 07:13:13 +00:00
mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS keystone;'
mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE keystone;'
2011-09-13 00:09:08 +00:00
2011-09-15 23:52:43 +00:00
# FIXME (anthony) keystone should use keystone.conf.example
KEYSTONE_CONF = $KEYSTONE_DIR /etc/keystone.conf
2011-09-16 18:31:16 +00:00
cp $FILES /keystone.conf $KEYSTONE_CONF
2011-09-15 23:52:43 +00:00
sudo sed -e " s,%SQL_CONN%, $BASE_SQL_CONN /keystone,g " -i $KEYSTONE_CONF
2011-09-27 02:50:43 +00:00
sudo sed -e " s,%DEST%, $DEST ,g " -i $KEYSTONE_CONF
2011-09-13 19:01:45 +00:00
2011-10-02 21:47:32 +00:00
# keystone_data.sh creates our admin user and our ``SERVICE_TOKEN``.
2011-09-16 23:05:55 +00:00
KEYSTONE_DATA = $KEYSTONE_DIR /bin/keystone_data.sh
cp $FILES /keystone_data.sh $KEYSTONE_DATA
sudo sed -e " s,%HOST_IP%, $HOST_IP ,g " -i $KEYSTONE_DATA
2011-10-02 16:02:46 +00:00
sudo sed -e " s,%SERVICE_TOKEN%, $SERVICE_TOKEN ,g " -i $KEYSTONE_DATA
2011-10-02 18:11:17 +00:00
sudo sed -e " s,%ADMIN_PASSWORD%, $ADMIN_PASSWORD ,g " -i $KEYSTONE_DATA
2011-09-15 23:52:43 +00:00
# initialize keystone with default users/endpoints
2011-09-16 23:05:55 +00:00
BIN_DIR = $KEYSTONE_DIR /bin bash $KEYSTONE_DATA
2011-09-15 23:52:43 +00:00
fi
2011-09-13 00:09:08 +00:00
2011-09-13 08:24:50 +00:00
# Launch Services
# ===============
2011-09-13 07:59:54 +00:00
2011-09-13 02:29:56 +00:00
# nova api crashes if we start it with a regular screen command,
# so send the start command by forcing text into the window.
2011-09-13 20:17:22 +00:00
# Only run the services specified in ``ENABLED_SERVICES``
2011-09-16 22:18:53 +00:00
# our screen helper to launch a service in a hidden named screen
2011-09-13 02:29:56 +00:00
function screen_it {
2011-09-16 22:18:53 +00:00
NL = ` echo -ne '\015' `
2011-09-13 18:28:56 +00:00
if [ [ " $ENABLED_SERVICES " = ~ " $1 " ] ] ; then
2011-11-02 00:23:04 +00:00
screen -S stack -X screen -t $1
2011-11-07 17:51:15 +00:00
# sleep to allow bash to be ready to be send the command - we are
# creating a new window in screen and then sends characters, so if
# bash isn't running by the time we send the command, nothing happens
sleep 1
2011-11-02 00:23:04 +00:00
screen -S stack -p $1 -X stuff " $2 $NL "
2011-09-13 18:28:56 +00:00
fi
2011-09-13 02:29:56 +00:00
}
2011-09-16 23:30:55 +00:00
# create a new named screen to run processes in
2011-11-02 00:23:04 +00:00
screen -d -m -S stack -t stack
2011-09-16 23:30:55 +00:00
sleep 1
2011-11-02 16:57:11 +00:00
# launch the glance registry service
2011-09-19 21:46:53 +00:00
if [ [ " $ENABLED_SERVICES " = ~ "g-reg" ] ] ; then
screen_it g-reg " cd $GLANCE_DIR ; bin/glance-registry --config-file=etc/glance-registry.conf "
fi
2011-10-02 21:50:41 +00:00
# launch the glance api and wait for it to answer before continuing
2011-09-19 21:46:53 +00:00
if [ [ " $ENABLED_SERVICES " = ~ "g-api" ] ] ; then
screen_it g-api " cd $GLANCE_DIR ; bin/glance-api --config-file=etc/glance-api.conf "
2011-10-11 14:26:29 +00:00
echo " Waiting for g-api ( $GLANCE_HOSTPORT ) to start... "
2011-11-05 21:19:03 +00:00
if ! timeout $SERVICE_TIMEOUT sh -c " while ! wget -q -O- http:// $GLANCE_HOSTPORT ; do sleep 1; done " ; then
2011-10-11 14:26:29 +00:00
echo "g-api did not start"
exit 1
fi
2011-09-19 21:46:53 +00:00
fi
2011-10-02 21:50:41 +00:00
# launch the keystone and wait for it to answer before continuing
2011-09-19 21:46:53 +00:00
if [ [ " $ENABLED_SERVICES " = ~ "key" ] ] ; then
2011-09-22 07:14:12 +00:00
screen_it key " cd $KEYSTONE_DIR && $KEYSTONE_DIR /bin/keystone --config-file $KEYSTONE_CONF -d "
2011-10-11 14:26:29 +00:00
echo "Waiting for keystone to start..."
2011-11-05 21:19:03 +00:00
if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget -q -O- http://127.0.0.1:5000; do sleep 1; done" ; then
2011-10-11 14:26:29 +00:00
echo "keystone did not start"
exit 1
fi
2011-09-19 21:46:53 +00:00
fi
2011-10-02 21:50:41 +00:00
# launch the nova-api and wait for it to answer before continuing
2011-09-19 21:46:53 +00:00
if [ [ " $ENABLED_SERVICES " = ~ "n-api" ] ] ; then
2011-09-20 16:51:16 +00:00
screen_it n-api " cd $NOVA_DIR && $NOVA_DIR /bin/nova-api "
2011-10-11 14:26:29 +00:00
echo "Waiting for nova-api to start..."
2011-11-05 21:19:03 +00:00
if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget -q -O- http://127.0.0.1:8774; do sleep 1; done" ; then
2011-10-11 14:26:29 +00:00
echo "nova-api did not start"
exit 1
fi
2011-09-19 21:46:53 +00:00
fi
2011-10-28 01:18:20 +00:00
# Quantum
if [ [ " $ENABLED_SERVICES " = ~ "q-svc" ] ] ; then
2011-11-03 04:57:12 +00:00
# Install deps
2011-11-03 05:07:55 +00:00
# FIXME add to files/apts/quantum, but don't install if not needed!
2011-11-03 05:03:53 +00:00
apt_get install openvswitch-switch openvswitch-datapath-dkms
2011-11-03 04:57:12 +00:00
2011-10-28 01:18:20 +00:00
# Create database for the plugin/agent
if [ [ " $Q_PLUGIN " = "openvswitch" ] ] ; then
if [ [ " $ENABLED_SERVICES " = ~ "mysql" ] ] ; then
mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE IF NOT EXISTS ovs_quantum;'
else
2011-10-28 15:28:26 +00:00
echo " mysql must be enabled in order to use the $Q_PLUGIN Quantum plugin. "
exit 1
2011-10-28 01:18:20 +00:00
fi
fi
QUANTUM_PLUGIN_INI_FILE = $QUANTUM_DIR /quantum/plugins.ini
# Make sure we're using the openvswitch plugin
sed -i -e " s/^provider =.* $/provider = quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPlugin/g " $QUANTUM_PLUGIN_INI_FILE
screen_it q-svc " cd $QUANTUM_DIR && export PYTHONPATH=.: $PYTHONPATH ; python $QUANTUM_DIR /bin/quantum $QUANTUM_DIR /etc/quantum.conf "
fi
# Quantum agent (for compute nodes)
if [ [ " $ENABLED_SERVICES " = ~ "q-agt" ] ] ; then
if [ [ " $Q_PLUGIN " = "openvswitch" ] ] ; then
# Set up integration bridge
OVS_BRIDGE = ${ OVS_BRIDGE :- br -int }
sudo ovs-vsctl --no-wait -- --if-exists del-br $OVS_BRIDGE
sudo ovs-vsctl --no-wait add-br $OVS_BRIDGE
sudo ovs-vsctl --no-wait br-set-external-id $OVS_BRIDGE bridge-id br-int
fi
# Start up the quantum <-> openvswitch agent
screen_it q-agt " sleep 4; sudo python $QUANTUM_DIR /quantum/plugins/openvswitch/agent/ovs_quantum_agent.py $QUANTUM_DIR /quantum/plugins/openvswitch/ovs_quantum_plugin.ini -v "
fi
2011-10-28 15:28:26 +00:00
# If we're using Quantum (i.e. q-svc is enabled), network creation has to
# happen after we've started the Quantum service.
2011-10-28 01:18:20 +00:00
if [ [ " $ENABLED_SERVICES " = ~ "mysql" ] ] ; then
# create a small network
$NOVA_DIR /bin/nova-manage network create private $FIXED_RANGE 1 $FIXED_NETWORK_SIZE
if [ [ " $ENABLED_SERVICES " = ~ "q-svc" ] ] ; then
echo "Not creating floating IPs (not supported by QuantumManager)"
else
# create some floating ips
$NOVA_DIR /bin/nova-manage floating create $FLOATING_RANGE
fi
fi
2011-09-20 18:06:14 +00:00
# Launching nova-compute should be as simple as running ``nova-compute`` but
# have to do a little more than that in our script. Since we add the group
2011-09-16 22:18:53 +00:00
# ``libvirtd`` to our user in this script, when nova-compute is run it is
2011-09-20 18:06:14 +00:00
# within the context of our original shell (so our groups won't be updated).
2011-10-08 01:03:16 +00:00
# Use 'sg' to execute nova-compute as a member of the libvirtd group.
screen_it n-cpu " cd $NOVA_DIR && sg libvirtd $NOVA_DIR /bin/nova-compute "
2011-10-20 17:12:58 +00:00
screen_it n-vol " cd $NOVA_DIR && $NOVA_DIR /bin/nova-volume "
2011-09-20 16:51:16 +00:00
screen_it n-net " cd $NOVA_DIR && $NOVA_DIR /bin/nova-network "
screen_it n-sch " cd $NOVA_DIR && $NOVA_DIR /bin/nova-scheduler "
2011-10-31 17:15:33 +00:00
screen_it n-vnc " cd $NOVNC_DIR && ./utils/nova-wsproxy.py --flagfile $NOVA_DIR /bin/nova.conf --web . 6080 "
2011-10-28 21:00:21 +00:00
screen_it horizon " cd $HORIZON_DIR && sudo tail -f /var/log/apache2/error.log "
2011-09-13 00:09:08 +00:00
2011-09-13 08:24:50 +00:00
# Install Images
# ==============
2011-09-13 01:08:04 +00:00
2011-10-14 06:03:23 +00:00
# Upload an image to glance.
2011-10-03 05:08:24 +00:00
#
2011-10-14 06:03:23 +00:00
# The default image is a small ***TTY*** testing image, which lets you login
# the username/password of root/password.
#
# TTY also uses cloud-init, supporting login via keypair and sending scripts as
# userdata. See https://help.ubuntu.com/community/CloudInit for more on cloud-init
#
2011-11-02 16:57:11 +00:00
# Override ``IMAGE_URLS`` with a comma-separated list of uec images.
2011-10-19 17:30:19 +00:00
#
# * **natty**: http://uec-images.ubuntu.com/natty/current/natty-server-cloudimg-amd64.tar.gz
# * **oneiric**: http://uec-images.ubuntu.com/oneiric/current/oneiric-server-cloudimg-amd64.tar.gz
2011-10-03 03:42:56 +00:00
2011-10-03 04:01:28 +00:00
if [ [ " $ENABLED_SERVICES " = ~ "g-reg" ] ] ; then
2011-10-14 17:20:30 +00:00
# Create a directory for the downloaded image tarballs.
2011-10-03 03:42:56 +00:00
mkdir -p $FILES /images
2011-10-27 05:29:08 +00:00
# Option to upload legacy ami-tty, which works with xenserver
if [ $UPLOAD_LEGACY_TTY ] ; then
if [ ! -f $FILES /tty.tgz ] ; then
wget -c http://images.ansolabs.com/tty.tgz -O $FILES /tty.tgz
fi
tar -zxf $FILES /tty.tgz -C $FILES /images
RVAL = ` glance add -A $SERVICE_TOKEN name = "tty-kernel" is_public = true container_format = aki disk_format = aki < $FILES /images/aki-tty/image`
KERNEL_ID = ` echo $RVAL | cut -d":" -f2 | tr -d " " `
RVAL = ` glance add -A $SERVICE_TOKEN name = "tty-ramdisk" is_public = true container_format = ari disk_format = ari < $FILES /images/ari-tty/image`
RAMDISK_ID = ` echo $RVAL | cut -d":" -f2 | tr -d " " `
glance add -A $SERVICE_TOKEN name = "tty" is_public = true container_format = ami disk_format = ami kernel_id = $KERNEL_ID ramdisk_id = $RAMDISK_ID < $FILES /images/ami-tty/image
fi
2011-10-14 16:31:09 +00:00
for image_url in ${ IMAGE_URLS //,/ } ; do
# Downloads the image (uec ami+aki style), then extracts it.
2011-10-25 04:37:00 +00:00
IMAGE_FNAME = ` basename " $image_url " `
2011-10-25 07:10:21 +00:00
IMAGE_NAME = ` basename " $IMAGE_FNAME " .tar.gz`
2011-10-14 16:31:09 +00:00
if [ ! -f $FILES /$IMAGE_FNAME ] ; then
wget -c $image_url -O $FILES /$IMAGE_FNAME
fi
2011-09-13 01:08:04 +00:00
2011-10-14 16:31:09 +00:00
# Extract ami and aki files
tar -zxf $FILES /$IMAGE_FNAME -C $FILES /images
2011-09-13 01:08:04 +00:00
2011-10-14 16:31:09 +00:00
# Use glance client to add the kernel the root filesystem.
# We parse the results of the first upload to get the glance ID of the
# kernel for use when uploading the root filesystem.
RVAL = ` glance add -A $SERVICE_TOKEN name = " $IMAGE_NAME -kernel " is_public = true container_format = aki disk_format = aki < $FILES /images/$IMAGE_NAME -vmlinuz*`
KERNEL_ID = ` echo $RVAL | cut -d":" -f2 | tr -d " " `
glance add -A $SERVICE_TOKEN name = " $IMAGE_NAME " is_public = true container_format = ami disk_format = ami kernel_id = $KERNEL_ID < $FILES /images/$IMAGE_NAME .img
done
2011-09-15 23:52:43 +00:00
fi
2011-09-16 04:28:23 +00:00
2011-10-07 14:51:07 +00:00
# Fin
# ===
) 2>& 1 | tee " ${ LOGFILE } "
# Check that the left side of the above pipe succeeded
for ret in " ${ PIPESTATUS [@] } " ; do [ $ret -eq 0 ] || exit $ret ; done
(
2011-09-16 04:28:23 +00:00
# Using the cloud
# ===============
2011-11-02 03:06:55 +00:00
echo ""
echo ""
echo ""
2011-10-28 21:00:21 +00:00
# If you installed the horizon on this server, then you should be able
2011-09-20 18:06:14 +00:00
# to access the site using your browser.
2011-10-28 21:00:21 +00:00
if [ [ " $ENABLED_SERVICES " = ~ "horizon" ] ] ; then
echo " horizon is now available at http:// $HOST_IP / "
2011-09-16 04:28:23 +00:00
fi
# If keystone is present, you can point nova cli to this server
if [ [ " $ENABLED_SERVICES " = ~ "key" ] ] ; then
echo " keystone is serving at http:// $HOST_IP :5000/v2.0/ "
echo "examples on using novaclient command line is in exercise.sh"
2011-10-02 18:11:17 +00:00
echo "the default users are: admin and demo"
echo " the password: $ADMIN_PASSWORD "
2011-09-16 04:28:23 +00:00
fi
2011-09-29 00:49:40 +00:00
2011-10-07 13:29:29 +00:00
# indicate how long this took to run (bash maintained variable 'SECONDS')
2011-10-07 14:51:07 +00:00
echo " stack.sh completed in $SECONDS seconds. "
2011-10-07 13:50:21 +00:00
2011-10-07 14:51:07 +00:00
) | tee -a " $LOGFILE "