Merge pull request #12 from fgrehm/basic-provider
Basic support for start, halt, reload, ssh, status and destroy commands
This commit is contained in:
commit
4d6e5ea287
21 changed files with 1059 additions and 159 deletions
3
.gitignore
vendored
3
.gitignore
vendored
|
@ -22,4 +22,5 @@ doc/
|
|||
.vagrant
|
||||
/cache
|
||||
|
||||
/dummy-ubuntu-cloudimg.box
|
||||
/boxes/**/*.tar.gz
|
||||
/boxes/output/
|
||||
|
|
7
boxes/ubuntu-cloud/metadata.json
Normal file
7
boxes/ubuntu-cloud/metadata.json
Normal file
|
@ -0,0 +1,7 @@
|
|||
{
|
||||
"provider": "lxc",
|
||||
"vagrant-lxc-version": "0.0.1",
|
||||
"template-name": "ubuntu-cloud",
|
||||
"after-create-script": "setup-vagrant-user.sh",
|
||||
"tar-cache": "ubuntu-12.10-server-cloudimg-amd64-root.tar.gz"
|
||||
}
|
58
boxes/ubuntu-cloud/setup-vagrant-user.sh
Executable file
58
boxes/ubuntu-cloud/setup-vagrant-user.sh
Executable file
|
@ -0,0 +1,58 @@
|
|||
#!/bin/bash
|
||||
# Argument = -r <path/to/rootfs> -i <container-ip> -k <vagrant-private-key-path>
|
||||
|
||||
CONTAINER_ROOTFS=
|
||||
CONTAINER_IP=
|
||||
VAGRANT_PRIVATE_KEY_PATH=
|
||||
|
||||
options=$(getopt -o r:i:k: -- "$@")
|
||||
eval set -- "$options"
|
||||
|
||||
declare r CONTAINER_ROOTFS \
|
||||
i CONTAINER_IP \
|
||||
k VAGRANT_PRIVATE_KEY_PATH
|
||||
|
||||
while true
|
||||
do
|
||||
case "$1" in
|
||||
-r) CONTAINER_ROOTFS=$2; shift 2;;
|
||||
-i) CONTAINER_IP=$2; shift 2;;
|
||||
-k) VAGRANT_PRIVATE_KEY_PATH=$2; shift 2;;
|
||||
*) break ;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ -z $CONTAINER_ROOTFS ]] || [[ -z $CONTAINER_IP ]] || [[ -z $VAGRANT_PRIVATE_KEY_PATH ]]
|
||||
then
|
||||
echo 'You forgot an argument!'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
remote_setup_script() {
|
||||
cat << EOF
|
||||
useradd -d /home/vagrant -m vagrant -r -s /bin/bash
|
||||
usermod -a -G admin vagrant
|
||||
cp /etc/sudoers /etc/sudoers.orig
|
||||
sed -i -e '/Defaults\s\+env_reset/a Defaults\texempt_group=admin' /etc/sudoers
|
||||
sed -i -e 's/%admin\s\+ALL=(ALL)\s\+ALL/%admin ALL=NOPASSWD:ALL/g' /etc/sudoers
|
||||
service sudo restart
|
||||
sudo su vagrant -c "mkdir -p /home/vagrant/.ssh"
|
||||
sudo su vagrant -c "curl -s -o /home/vagrant/.ssh/authorized_keys https://raw.github.com/mitchellh/vagrant/master/keys/vagrant.pub"
|
||||
EOF
|
||||
}
|
||||
|
||||
REMOTE_SETUP_SCRIPT_PATH="/tmp/setup-vagrant-user"
|
||||
|
||||
# Ensures the private key has the right permissions
|
||||
# Might not be needed after: https://github.com/mitchellh/vagrant/commit/d304cca35d19c5bd370330c74f003b6ac46e7f4a
|
||||
chmod 0600 $VAGRANT_PRIVATE_KEY_PATH
|
||||
|
||||
remote_setup_script > "${CONTAINER_ROOTFS}${REMOTE_SETUP_SCRIPT_PATH}"
|
||||
chmod +x "${CONTAINER_ROOTFS}${REMOTE_SETUP_SCRIPT_PATH}"
|
||||
|
||||
ssh ubuntu@"$CONTAINER_IP" \
|
||||
-o 'StrictHostKeyChecking no' \
|
||||
-o 'UserKnownHostsFile /dev/null' \
|
||||
-i $VAGRANT_PRIVATE_KEY_PATH \
|
||||
-- \
|
||||
sudo $REMOTE_SETUP_SCRIPT_PATH
|
406
boxes/ubuntu-cloud/ubuntu-cloud
Executable file
406
boxes/ubuntu-cloud/ubuntu-cloud
Executable file
|
@ -0,0 +1,406 @@
|
|||
#!/bin/bash
|
||||
|
||||
# template script for generating ubuntu container for LXC based on released cloud
|
||||
# images
|
||||
#
|
||||
# Copyright © 2012 Serge Hallyn <serge.hallyn@canonical.com>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2, as
|
||||
# published by the Free Software Foundation.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
|
||||
set -e
|
||||
|
||||
if [ -r /etc/default/lxc ]; then
|
||||
. /etc/default/lxc
|
||||
fi
|
||||
|
||||
copy_configuration()
|
||||
{
|
||||
path=$1
|
||||
rootfs=$2
|
||||
name=$3
|
||||
arch=$4
|
||||
release=$5
|
||||
|
||||
if [ $arch = "i386" ]; then
|
||||
arch="i686"
|
||||
fi
|
||||
|
||||
# if there is exactly one veth network entry, make sure it has an
|
||||
# associated hwaddr.
|
||||
nics=`grep -e '^lxc\.network\.type[ \t]*=[ \t]*veth' $path/config | wc -l`
|
||||
if [ $nics -eq 1 ]; then
|
||||
grep -q "^lxc.network.hwaddr" $path/config || cat <<EOF >> $path/config
|
||||
lxc.network.hwaddr = 00:16:3e:$(openssl rand -hex 3| sed 's/\(..\)/\1:/g; s/.$//')
|
||||
EOF
|
||||
fi
|
||||
|
||||
grep -q "^lxc.rootfs" $path/config 2>/dev/null || echo "lxc.rootfs = $rootfs" >> $path/config
|
||||
cat <<EOF >> $path/config
|
||||
lxc.utsname = $name
|
||||
|
||||
lxc.tty = 4
|
||||
lxc.pts = 1024
|
||||
lxc.mount = $path/fstab
|
||||
lxc.arch = $arch
|
||||
lxc.cap.drop = sys_module mac_admin
|
||||
lxc.pivotdir = lxc_putold
|
||||
|
||||
# uncomment the next line to run the container unconfined:
|
||||
#lxc.aa_profile = unconfined
|
||||
|
||||
lxc.cgroup.devices.deny = a
|
||||
# Allow any mknod (but not using the node)
|
||||
lxc.cgroup.devices.allow = c *:* m
|
||||
lxc.cgroup.devices.allow = b *:* m
|
||||
# /dev/null and zero
|
||||
lxc.cgroup.devices.allow = c 1:3 rwm
|
||||
lxc.cgroup.devices.allow = c 1:5 rwm
|
||||
# consoles
|
||||
lxc.cgroup.devices.allow = c 5:1 rwm
|
||||
lxc.cgroup.devices.allow = c 5:0 rwm
|
||||
#lxc.cgroup.devices.allow = c 4:0 rwm
|
||||
#lxc.cgroup.devices.allow = c 4:1 rwm
|
||||
# /dev/{,u}random
|
||||
lxc.cgroup.devices.allow = c 1:9 rwm
|
||||
lxc.cgroup.devices.allow = c 1:8 rwm
|
||||
lxc.cgroup.devices.allow = c 136:* rwm
|
||||
lxc.cgroup.devices.allow = c 5:2 rwm
|
||||
# rtc
|
||||
lxc.cgroup.devices.allow = c 254:0 rwm
|
||||
#fuse
|
||||
lxc.cgroup.devices.allow = c 10:229 rwm
|
||||
#tun
|
||||
lxc.cgroup.devices.allow = c 10:200 rwm
|
||||
#full
|
||||
lxc.cgroup.devices.allow = c 1:7 rwm
|
||||
#hpet
|
||||
lxc.cgroup.devices.allow = c 10:228 rwm
|
||||
#kvm
|
||||
lxc.cgroup.devices.allow = c 10:232 rwm
|
||||
EOF
|
||||
|
||||
cat <<EOF > $path/fstab
|
||||
proc proc proc nodev,noexec,nosuid 0 0
|
||||
sysfs sys sysfs defaults 0 0
|
||||
EOF
|
||||
|
||||
# rmdir /dev/shm for containers that have /run/shm
|
||||
# I'm afraid of doing rm -rf $rootfs/dev/shm, in case it did
|
||||
# get bind mounted to the host's /run/shm. So try to rmdir
|
||||
# it, and in case that fails move it out of the way.
|
||||
if [ ! -L $rootfs/dev/shm ] && [ -d $rootfs/run/shm ] && [ -e $rootfs/dev/shm ]; then
|
||||
mv $rootfs/dev/shm $rootfs/dev/shm.bak
|
||||
ln -s /run/shm $rootfs/dev/shm
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
usage()
|
||||
{
|
||||
cat <<EOF
|
||||
LXC Container configuration for Ubuntu Cloud images.
|
||||
|
||||
Generic Options
|
||||
[ -r | --release <release> ]: Release name of container, defaults to host
|
||||
[ -a | --arch ]: Arhcitecture of container, defaults to host arcitecture
|
||||
[ -C | --cloud ]: Configure container for use with meta-data service, defaults to no
|
||||
[ -T | --tarball ]: Location of tarball
|
||||
[ -d | --debug ]: Run with 'set -x' to debug errors
|
||||
[ -s | --stream]: Use specified stream rather than 'released'
|
||||
|
||||
Options, mutually exclusive of "-C" and "--cloud":
|
||||
[ -i | --hostid ]: HostID for cloud-init, defaults to random string
|
||||
[ -u | --userdata ]: Cloud-init user-data file to configure container on start
|
||||
[ -S | --auth-key ]: SSH Public key file to inject into container
|
||||
[ -L | --nolocales ]: Do not copy host's locales into container
|
||||
|
||||
EOF
|
||||
return 0
|
||||
}
|
||||
|
||||
options=$(getopt -o a:hp:r:n:Fi:CLS:T:ds:u: -l arch:,help,path:,release:,name:,flush-cache,hostid:,auth-key:,cloud,no_locales,tarball:,debug,stream:,userdata: -- "$@")
|
||||
if [ $? -ne 0 ]; then
|
||||
usage $(basename $0)
|
||||
exit 1
|
||||
fi
|
||||
eval set -- "$options"
|
||||
|
||||
release=lucid
|
||||
if [ -f /etc/lsb-release ]; then
|
||||
. /etc/lsb-release
|
||||
case "$DISTRIB_CODENAME" in
|
||||
lucid|natty|oneiric|precise|quantal)
|
||||
release=$DISTRIB_CODENAME
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
arch=$(arch)
|
||||
|
||||
# Code taken from debootstrap
|
||||
if [ -x /usr/bin/dpkg ] && /usr/bin/dpkg --print-architecture >/dev/null 2>&1; then
|
||||
arch=`/usr/bin/dpkg --print-architecture`
|
||||
elif type udpkg >/dev/null 2>&1 && udpkg --print-architecture >/dev/null 2>&1; then
|
||||
arch=`/usr/bin/udpkg --print-architecture`
|
||||
else
|
||||
arch=$(arch)
|
||||
if [ "$arch" = "i686" ]; then
|
||||
arch="i386"
|
||||
elif [ "$arch" = "x86_64" ]; then
|
||||
arch="amd64"
|
||||
elif [ "$arch" = "armv7l" ]; then
|
||||
# note: arm images don't exist before oneiric; are called armhf in
|
||||
# precise and later; and are not supported by the query, so we don't actually
|
||||
# support them yet (see check later on). When Query2 is available,
|
||||
# we'll use that to enable arm images.
|
||||
arch="armel"
|
||||
fi
|
||||
fi
|
||||
|
||||
debug=0
|
||||
hostarch=$arch
|
||||
cloud=0
|
||||
locales=1
|
||||
flushcache=0
|
||||
stream="released"
|
||||
while true
|
||||
do
|
||||
case "$1" in
|
||||
-h|--help) usage $0 && exit 0;;
|
||||
-p|--path) path=$2; shift 2;;
|
||||
-n|--name) name=$2; shift 2;;
|
||||
-F|--flush-cache) flushcache=1; shift 1;;
|
||||
-r|--release) release=$2; shift 2;;
|
||||
-a|--arch) arch=$2; shift 2;;
|
||||
-i|--hostid) host_id=$2; shift 2;;
|
||||
-u|--userdata) userdata=$2; shift 2;;
|
||||
-C|--cloud) cloud=1; shift 1;;
|
||||
-S|--auth-key) auth_key=$2; shift 2;;
|
||||
-L|--no_locales) locales=0; shift 1;;
|
||||
-T|--tarball) tarball=$2; shift 2;;
|
||||
-d|--debug) debug=1; shift 1;;
|
||||
-s|--stream) stream=$2; shift 2;;
|
||||
--) shift 1; break ;;
|
||||
*) break ;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [ $debug -eq 1 ]; then
|
||||
set -x
|
||||
fi
|
||||
|
||||
if [ "$arch" == "i686" ]; then
|
||||
arch=i386
|
||||
fi
|
||||
|
||||
if [ $hostarch = "i386" -a $arch = "amd64" ]; then
|
||||
echo "can't create amd64 container on i386"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ $arch != "i386" -a $arch != "amd64" ]; then
|
||||
echo "Only i386 and amd64 are supported by the ubuntu cloud template."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$stream" != "daily" -a "$stream" != "released" ]; then
|
||||
echo "Only 'daily' and 'released' streams are supported"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -n "$userdata" ]; then
|
||||
if [ ! -f "$userdata" ]; then
|
||||
echo "Userdata ($userdata) does not exist"
|
||||
exit 1
|
||||
else
|
||||
userdata=`readlink -f $userdata`
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -n "$auth_key" ]; then
|
||||
if [ ! -f "$auth_key" ]; then
|
||||
echo "--auth-key=${auth_key} must reference a file"
|
||||
exit 1
|
||||
fi
|
||||
auth_key=$(readlink -f "${auth_key}") ||
|
||||
{ echo "failed to get full path for auth_key"; exit 1; }
|
||||
fi
|
||||
|
||||
if [ -z "$path" ]; then
|
||||
echo "'path' parameter is required"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$(id -u)" != "0" ]; then
|
||||
echo "This script should be run as 'root'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# detect rootfs
|
||||
config="$path/config"
|
||||
if grep -q '^lxc.rootfs' $config 2>/dev/null ; then
|
||||
rootfs=`grep 'lxc.rootfs =' $config | awk -F= '{ print $2 }'`
|
||||
else
|
||||
rootfs=$path/rootfs
|
||||
fi
|
||||
|
||||
type ubuntu-cloudimg-query
|
||||
type wget
|
||||
|
||||
# determine the url, tarball, and directory names
|
||||
# download if needed
|
||||
cache="/var/cache/lxc/cloud-$release"
|
||||
|
||||
mkdir -p $cache
|
||||
|
||||
if [ -n "$tarball" ]; then
|
||||
url2="$tarball"
|
||||
else
|
||||
url1=`ubuntu-cloudimg-query $release $stream $arch --format "%{url}\n"`
|
||||
url2=`echo $url1 | sed -e 's/.tar.gz/-root\0/'`
|
||||
fi
|
||||
|
||||
filename=`basename $url2`
|
||||
|
||||
wgetcleanup()
|
||||
{
|
||||
rm -f $filename
|
||||
}
|
||||
|
||||
buildcleanup()
|
||||
{
|
||||
cd $rootfs
|
||||
umount -l $cache/$xdir || true
|
||||
rm -rf $cache
|
||||
}
|
||||
|
||||
# if the release doesn't have a *-rootfs.tar.gz, then create one from the
|
||||
# cloudimg.tar.gz by extracting the .img, mounting it loopback, and creating
|
||||
# a tarball from the mounted image.
|
||||
build_root_tgz()
|
||||
{
|
||||
url=$1
|
||||
filename=$2
|
||||
|
||||
xdir=`mktemp -d -p .`
|
||||
tarname=`basename $url`
|
||||
imgname="$release-*-cloudimg-$arch.img"
|
||||
trap buildcleanup EXIT SIGHUP SIGINT SIGTERM
|
||||
if [ $flushcache -eq 1 -o ! -f $cache/$tarname ]; then
|
||||
rm -f $tarname
|
||||
echo "Downloading cloud image from $url"
|
||||
wget $url || { echo "Couldn't find cloud image $url."; exit 1; }
|
||||
fi
|
||||
echo "Creating new cached cloud image rootfs"
|
||||
tar --wildcards -zxf $tarname $imgname
|
||||
mount -o loop $imgname $xdir
|
||||
(cd $xdir; tar zcf ../$filename .)
|
||||
umount $xdir
|
||||
rm -f $tarname $imgname
|
||||
rmdir $xdir
|
||||
echo "New cloud image cache created"
|
||||
trap EXIT
|
||||
trap SIGHUP
|
||||
trap SIGINT
|
||||
trap SIGTERM
|
||||
}
|
||||
|
||||
mkdir -p /var/lock/subsys/
|
||||
(
|
||||
flock -x 200
|
||||
|
||||
cd $cache
|
||||
if [ $flushcache -eq 1 ]; then
|
||||
echo "Clearing the cached images"
|
||||
rm -f $filename
|
||||
fi
|
||||
|
||||
trap wgetcleanup EXIT SIGHUP SIGINT SIGTERM
|
||||
if [ ! -f $filename ]; then
|
||||
wget $url2 || build_root_tgz $url1 $filename
|
||||
fi
|
||||
trap EXIT
|
||||
trap SIGHUP
|
||||
trap SIGINT
|
||||
trap SIGTERM
|
||||
|
||||
echo "Extracting container rootfs"
|
||||
mkdir -p $rootfs
|
||||
cd $rootfs
|
||||
tar --numeric-owner -zxf $cache/$filename
|
||||
|
||||
|
||||
if [ $cloud -eq 0 ]; then
|
||||
echo "Configuring for running outside of a cloud environment"
|
||||
echo "If you want to configure for a cloud evironment, please use '-- -C' to create the container"
|
||||
|
||||
seed_d=$rootfs/var/lib/cloud/seed/nocloud-net
|
||||
rhostid=$(uuidgen | cut -c -8)
|
||||
host_id=${hostid:-$rhostid}
|
||||
mkdir -p $seed_d
|
||||
|
||||
cat > "$seed_d/meta-data" <<EOF
|
||||
instance-id: lxc-$host_id
|
||||
EOF
|
||||
if [ -n "$auth_key" ]; then
|
||||
{
|
||||
echo "public-keys:" &&
|
||||
sed -e '/^$/d' -e 's,^,- ,' "$auth_key" "$auth_key"
|
||||
} >> "$seed_d/meta-data"
|
||||
[ $? -eq 0 ] ||
|
||||
{ echo "failed to write public keys to metadata"; exit 1; }
|
||||
fi
|
||||
|
||||
rm $rootfs/etc/hostname
|
||||
|
||||
if [ $locales -eq 1 ]; then
|
||||
cp /usr/lib/locale/locale-archive $rootfs/usr/lib/locale/locale-archive
|
||||
fi
|
||||
|
||||
if [ -f "$userdata" ]; then
|
||||
echo "Using custom user-data"
|
||||
cp $userdata $seed_d/user-data
|
||||
else
|
||||
|
||||
if [ -z "$MIRROR" ]; then
|
||||
MIRROR="http://archive.ubuntu.com/ubuntu"
|
||||
fi
|
||||
|
||||
cat > "$seed_d/user-data" <<EOF
|
||||
#cloud-config
|
||||
output: {all: '| tee -a /var/log/cloud-init-output.log'}
|
||||
apt_mirror: $MIRROR
|
||||
manage_etc_hosts: localhost
|
||||
locale: $(/usr/bin/locale | awk -F= '/LANG=/ {print$NF}')
|
||||
password: ubuntu
|
||||
chpasswd: { expire: False }
|
||||
EOF
|
||||
fi
|
||||
|
||||
else
|
||||
|
||||
echo "Configured for running in a cloud environment."
|
||||
echo "If you do not have a meta-data service, this container will likely be useless."
|
||||
|
||||
fi
|
||||
) 200>/var/lock/subsys/lxc-ubucloud
|
||||
|
||||
copy_configuration $path $rootfs $name $arch $release
|
||||
|
||||
echo "Container $name created."
|
||||
exit 0
|
||||
|
||||
# vi: ts=4 expandtab
|
112
dev/Vagrantfile
vendored
112
dev/Vagrantfile
vendored
|
@ -1,111 +1,11 @@
|
|||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
|
||||
require '../lib/vagrant-lxc'
|
||||
|
||||
Vagrant.configure("2") do |config|
|
||||
# All Vagrant configuration is done here. The most common configuration
|
||||
# options are documented and commented below. For a complete reference,
|
||||
# please see the online documentation at vagrantup.com.
|
||||
|
||||
# Every Vagrant virtual environment requires a box to build off of.
|
||||
config.vm.box = "base"
|
||||
|
||||
# The url from where the 'config.vm.box' box will be fetched if it
|
||||
# doesn't already exist on the user's system.
|
||||
# config.vm.box_url = "http://domain.com/path/to/above.box"
|
||||
|
||||
# Create a forwarded port mapping which allows access to a specific port
|
||||
# within the machine from a port on the host machine. In the example below,
|
||||
# accessing "localhost:8080" will access port 80 on the guest machine.
|
||||
# config.vm.network :forwarded_port, 80, 8080
|
||||
|
||||
# Create a private network, which allows host-only access to the machine
|
||||
# using a specific IP.
|
||||
# config.vm.network :private_network, "192.168.33.10"
|
||||
|
||||
# Create a public network, which generally matched to bridged network.
|
||||
# Bridged networks make the machine appear as another physical device on
|
||||
# your network.
|
||||
# config.vm.network :public_network
|
||||
|
||||
# Share an additional folder to the guest VM. The first argument is
|
||||
# the path on the host to the actual folder. The second argument is
|
||||
# the path on the guest to mount the folder. And the optional third
|
||||
# argument is a set of non-required options.
|
||||
# config.vm.synced_folder "../data", "/vagrant_data"
|
||||
|
||||
# Provider-specific configuration so you can fine-tune various
|
||||
# backing providers for Vagrant. These expose provider-specific options.
|
||||
# Example for VirtualBox:
|
||||
#
|
||||
# config.vm.provider :virtualbox do |vb|
|
||||
# # Don't boot with headless mode
|
||||
# vb.gui = true
|
||||
#
|
||||
# # Use VBoxManage to customize the VM. For example to change memory:
|
||||
# vb.customize ["modifyvm", :id, "--memory", "1024"]
|
||||
# end
|
||||
#
|
||||
# View the documentation for the provider you're using for more
|
||||
# information on available options.
|
||||
|
||||
# Enable provisioning with Puppet stand alone. Puppet manifests
|
||||
# are contained in a directory path relative to this Vagrantfile.
|
||||
# You will need to create the manifests directory and a manifest in
|
||||
# the file base.pp in the manifests_path directory.
|
||||
#
|
||||
# An example Puppet manifest to provision the message of the day:
|
||||
#
|
||||
# # group { "puppet":
|
||||
# # ensure => "present",
|
||||
# # }
|
||||
# #
|
||||
# # File { owner => 0, group => 0, mode => 0644 }
|
||||
# #
|
||||
# # file { '/etc/motd':
|
||||
# # content => "Welcome to your Vagrant-built virtual machine!
|
||||
# # Managed by Puppet.\n"
|
||||
# # }
|
||||
#
|
||||
# config.vm.provision :puppet do |puppet|
|
||||
# puppet.manifests_path = "manifests"
|
||||
# puppet.manifest_file = "base.pp"
|
||||
# end
|
||||
|
||||
# Enable provisioning with chef solo, specifying a cookbooks path, roles
|
||||
# path, and data_bags path (all relative to this Vagrantfile), and adding
|
||||
# some recipes and/or roles.
|
||||
#
|
||||
# config.vm.provision :chef_solo do |chef|
|
||||
# chef.cookbooks_path = "../my-recipes/cookbooks"
|
||||
# chef.roles_path = "../my-recipes/roles"
|
||||
# chef.data_bags_path = "../my-recipes/data_bags"
|
||||
# chef.add_recipe "mysql"
|
||||
# chef.add_role "web"
|
||||
#
|
||||
# # You may also specify custom JSON attributes:
|
||||
# chef.json = { :mysql_password => "foo" }
|
||||
# end
|
||||
|
||||
# Enable provisioning with chef server, specifying the chef server URL,
|
||||
# and the path to the validation key (relative to this Vagrantfile).
|
||||
#
|
||||
# The Opscode Platform uses HTTPS. Substitute your organization for
|
||||
# ORGNAME in the URL and validation key.
|
||||
#
|
||||
# If you have your own Chef Server, use the appropriate URL, which may be
|
||||
# HTTP instead of HTTPS depending on your configuration. Also change the
|
||||
# validation key to validation.pem.
|
||||
#
|
||||
# config.vm.provision :chef_client do |chef|
|
||||
# chef.chef_server_url = "https://api.opscode.com/organizations/ORGNAME"
|
||||
# chef.validation_key_path = "ORGNAME-validator.pem"
|
||||
# end
|
||||
#
|
||||
# If you're using the Opscode platform, your validator client is
|
||||
# ORGNAME-validator, replacing ORGNAME with your organization name.
|
||||
#
|
||||
# If you have your own Chef Server, the default validation client name is
|
||||
# chef-validator, unless you changed the configuration.
|
||||
#
|
||||
# chef.validation_client_name = "ORGNAME-validator"
|
||||
config.vm.box = "ubuntu-cloud"
|
||||
config.vm.provider :lxc do |lxc|
|
||||
# ... soon to come lxc configs...
|
||||
end
|
||||
end
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
{"provider":"lxc"}
|
|
@ -1,8 +1,10 @@
|
|||
# TODO: Split action classes into their own files
|
||||
require 'vagrant-lxc/action/base_action'
|
||||
require 'vagrant-lxc/action/handle_box_metadata'
|
||||
|
||||
# TODO: Split action classes into their own files
|
||||
module Vagrant
|
||||
module LXC
|
||||
module Actions
|
||||
module Action
|
||||
# This action is responsible for reloading the machine, which
|
||||
# brings it down, sucks in new configuration, and brings the
|
||||
# machine back up with the new configuration.
|
||||
|
@ -71,11 +73,12 @@ module Vagrant
|
|||
b.use Vagrant::Action::Builtin::Call, Created do |env, b2|
|
||||
# If the VM is NOT created yet, then do the setup steps
|
||||
if !env[:result]
|
||||
b2.use HandleBoxMetadata
|
||||
b2.use Create
|
||||
# We'll probably have other actions down here...
|
||||
end
|
||||
end
|
||||
b.use action_start
|
||||
b.use AfterCreate
|
||||
end
|
||||
end
|
||||
|
||||
|
@ -129,14 +132,48 @@ module Vagrant
|
|||
end
|
||||
end
|
||||
|
||||
|
||||
class BaseAction
|
||||
def initialize(app, env)
|
||||
@app = app
|
||||
# This is the action that will exec into an SSH shell.
|
||||
def self.action_ssh
|
||||
Vagrant::Action::Builder.new.tap do |b|
|
||||
b.use CheckLXC
|
||||
b.use CheckCreated
|
||||
# b.use CheckAccessible
|
||||
b.use CheckRunning
|
||||
b.use Vagrant::Action::Builtin::SSHExec
|
||||
end
|
||||
end
|
||||
|
||||
# This is the action that will run a single SSH command.
|
||||
def self.action_ssh_run
|
||||
Vagrant::Action::Builder.new.tap do |b|
|
||||
b.use CheckLXC
|
||||
b.use CheckCreated
|
||||
# b.use CheckAccessible
|
||||
b.use CheckRunning
|
||||
b.use Vagrant::Action::Builtin::SSHRun
|
||||
end
|
||||
end
|
||||
|
||||
class CheckCreated < BaseAction
|
||||
def call(env)
|
||||
puts "TODO: Implement #{self.class.name}"
|
||||
unless env[:machine].state.created?
|
||||
raise Vagrant::Errors::VMNotCreatedError
|
||||
end
|
||||
|
||||
# Call the next if we have one (but we shouldn't, since this
|
||||
# middleware is built to run with the Call-type middlewares)
|
||||
@app.call(env)
|
||||
end
|
||||
end
|
||||
|
||||
class CheckRunning < BaseAction
|
||||
def call(env)
|
||||
unless env[:machine].state.running?
|
||||
raise Vagrant::Errors::VMNotCreatedError
|
||||
end
|
||||
|
||||
# Call the next if we have one (but we shouldn't, since this
|
||||
# middleware is built to run with the Call-type middlewares)
|
||||
@app.call(env)
|
||||
end
|
||||
end
|
||||
|
@ -165,17 +202,26 @@ module Vagrant
|
|||
|
||||
class Create < BaseAction
|
||||
def call(env)
|
||||
puts "TODO: Create container"
|
||||
env[:machine].id = 'TODO-set-a-proper-machine-id' unless env[:machine].id
|
||||
env[:machine].provider.container.create
|
||||
machine_id = env[:machine].provider.container.create(env[:machine].box.metadata)
|
||||
env[:machine].id = machine_id
|
||||
env[:just_created] = true
|
||||
@app.call env
|
||||
end
|
||||
end
|
||||
|
||||
class AfterCreate < BaseAction
|
||||
def call(env)
|
||||
if env[:just_created] && (script = env[:machine].box.metadata['after-create-script'])
|
||||
env[:machine].provider.container.run_after_create_script script
|
||||
end
|
||||
@app.call env
|
||||
end
|
||||
end
|
||||
|
||||
class Destroy < BaseAction
|
||||
def call(env)
|
||||
env[:machine].id = nil
|
||||
env[:machine].provider.container.destroy
|
||||
env[:machine].id = nil
|
||||
@app.call env
|
||||
end
|
||||
end
|
16
lib/vagrant-lxc/action/base_action.rb
Normal file
16
lib/vagrant-lxc/action/base_action.rb
Normal file
|
@ -0,0 +1,16 @@
|
|||
module Vagrant
|
||||
module LXC
|
||||
module Action
|
||||
class BaseAction
|
||||
def initialize(app, env)
|
||||
@app = app
|
||||
end
|
||||
|
||||
def call(env)
|
||||
puts "TODO: Implement #{self.class.name}"
|
||||
@app.call(env)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
44
lib/vagrant-lxc/action/handle_box_metadata.rb
Normal file
44
lib/vagrant-lxc/action/handle_box_metadata.rb
Normal file
|
@ -0,0 +1,44 @@
|
|||
require 'vagrant-lxc/action/base_action'
|
||||
|
||||
module Vagrant
|
||||
module LXC
|
||||
module Action
|
||||
# Prepare arguments to be used for lxc-create
|
||||
class HandleBoxMetadata < BaseAction
|
||||
LXC_TEMPLATES_PATH = Pathname.new("/usr/share/lxc/templates")
|
||||
|
||||
def initialize(app, env)
|
||||
super
|
||||
@logger = Log4r::Logger.new("vagrant::lxc::action::handle_box_metadata")
|
||||
end
|
||||
|
||||
def call(env)
|
||||
box = env[:machine].box
|
||||
metadata = box.metadata
|
||||
template_name = metadata['template-name']
|
||||
|
||||
after_create = metadata['after-create-script'] ?
|
||||
box.directory.join(metadata['after-create-script']).to_s :
|
||||
nil
|
||||
|
||||
metadata.merge!(
|
||||
'template-name' => "vagrant-#{box.name}-#{template_name}",
|
||||
'tar-cache' => box.directory.join(metadata['tar-cache']).to_s,
|
||||
'after-create-script' => after_create
|
||||
)
|
||||
|
||||
# Prepends "lxc-" to the template file so that `lxc-create` is able to find it
|
||||
dest = LXC_TEMPLATES_PATH.join("lxc-#{metadata['template-name']}").to_s
|
||||
src = box.directory.join(template_name).to_s
|
||||
|
||||
@logger.debug('Copying LXC template into place')
|
||||
# This should only ask for administrative permission once, even
|
||||
# though its executed in multiple subshells.
|
||||
system(%Q[sudo su root -c "cp #{src} #{dest}"])
|
||||
|
||||
@app.call(env)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
|
@ -1,51 +1,166 @@
|
|||
# FIXME: Ruby 1.8 users dont have SecureRandom
|
||||
require 'securerandom'
|
||||
|
||||
require 'vagrant/util/retryable'
|
||||
require 'vagrant/util/subprocess'
|
||||
|
||||
require "vagrant-lxc/errors"
|
||||
|
||||
module Vagrant
|
||||
module LXC
|
||||
class Container
|
||||
CONTAINER_STATE_FILE_PATH = '/tmp/vagrant-lxc-container-state-%<id>s'
|
||||
# Include this so we can use `Subprocess` more easily.
|
||||
include Vagrant::Util::Retryable
|
||||
|
||||
def initialize(machine)
|
||||
@machine = machine
|
||||
# This is raised if the container can't be found when initializing it with
|
||||
# an UUID.
|
||||
class NotFound < StandardError; end
|
||||
|
||||
CONTAINERS_PATH = '/var/lib/lxc'
|
||||
|
||||
attr_reader :name
|
||||
|
||||
def initialize(name)
|
||||
@name = name
|
||||
@logger = Log4r::Logger.new("vagrant::provider::lxc::container")
|
||||
end
|
||||
|
||||
def create
|
||||
puts 'TODO: Create container'
|
||||
def validate!
|
||||
raise NotFound if @name && ! lxc(:ls).split("\n").include?(@name)
|
||||
end
|
||||
|
||||
def create(metadata = {})
|
||||
# FIXME: Ruby 1.8 users dont have SecureRandom
|
||||
# @logger.info('Creating container...')
|
||||
@name = SecureRandom.hex(6)
|
||||
public_key = Vagrant.source_root.join('keys', 'vagrant.pub').expand_path.to_s
|
||||
|
||||
# TODO: Handle errors
|
||||
lxc :create, '--template', metadata['template-name'], '--name', @name, '--', '-S', public_key, '-T', metadata['tar-cache']
|
||||
|
||||
@name
|
||||
end
|
||||
|
||||
def run_after_create_script(script)
|
||||
private_key = Vagrant.source_root.join('keys', 'vagrant').expand_path.to_s
|
||||
|
||||
@logger.debug 'Running after-create-script from box metadata'
|
||||
cmd = [
|
||||
script,
|
||||
'-r', "#{CONTAINERS_PATH}/#{@name}/rootfs",
|
||||
'-k', private_key,
|
||||
'-i', dhcp_ip
|
||||
]
|
||||
execute *cmd
|
||||
end
|
||||
|
||||
def start
|
||||
puts 'TODO: Start container'
|
||||
update!(:running)
|
||||
lxc :start, '-d', '--name', @name
|
||||
wait_until :running
|
||||
end
|
||||
|
||||
def halt
|
||||
update!(:poweroff)
|
||||
lxc :shutdown, '--name', @name
|
||||
wait_until :stopped
|
||||
end
|
||||
|
||||
def destroy
|
||||
puts "TODO: Destroy container"
|
||||
File.delete(state_file_path) if state_file_path
|
||||
lxc :destroy, '--name', @name
|
||||
end
|
||||
|
||||
def state
|
||||
# TODO: Grab the real machine state here
|
||||
read_state_from_file
|
||||
def wait_until(state)
|
||||
lxc :wait, '--name', @name, '--state', state.to_s.upcase
|
||||
end
|
||||
|
||||
private
|
||||
def lxc(command, *args)
|
||||
execute('sudo', "lxc-#{command}", *args)
|
||||
end
|
||||
|
||||
def update!(state)
|
||||
File.open(state_file_path, 'w') { |f| f.print state }
|
||||
end
|
||||
|
||||
def read_state_from_file
|
||||
if File.exists?(state_file_path)
|
||||
File.read(state_file_path).to_sym
|
||||
elsif @machine.id
|
||||
def state
|
||||
if @name && lxc(:info, '--name', @name) =~ /^state:[^A-Z]+([A-Z]+)$/
|
||||
$1.downcase.to_sym
|
||||
elsif @name
|
||||
:unknown
|
||||
end
|
||||
end
|
||||
|
||||
def state_file_path
|
||||
CONTAINER_STATE_FILE_PATH % {id: @machine.id}
|
||||
def dhcp_ip
|
||||
ip = ''
|
||||
# Right after creation lxc reports the container as running
|
||||
# before DNS is returning the right IP, so have to wait for a while
|
||||
retryable(:on => LXC::Errors::ExecuteError, :tries => 10, :sleep => 1) do
|
||||
# By default LXC supplies a dns server on 10.0.3.1 so we request the IP
|
||||
# of our target from there.
|
||||
# Tks to: https://github.com/neerolyte/vagueant/blob/master/bin/vagueant#L340
|
||||
r = (raw 'dig', @name, '@10.0.3.1', '+short')
|
||||
|
||||
# If the command was a failure then raise an exception that is nicely
|
||||
# handled by Vagrant.
|
||||
if r.exit_code != 0
|
||||
if @interrupted
|
||||
@logger.info("Exit code != 0, but interrupted. Ignoring.")
|
||||
else
|
||||
raise LXC::Errors::ExecuteError, :command => command.inspect
|
||||
end
|
||||
end
|
||||
|
||||
ip = r.stdout.gsub("\r\n", "\n").strip
|
||||
if ip.empty?
|
||||
raise LXC::Errors::ExecuteError, 'Unable to identify container ip'
|
||||
end
|
||||
end
|
||||
ip
|
||||
end
|
||||
|
||||
# TODO: Review code below this line, it was pretty much a copy and paste from VirtualBox base driver
|
||||
def execute(*command, &block)
|
||||
# Get the options hash if it exists
|
||||
opts = {}
|
||||
opts = command.pop if command.last.is_a?(Hash)
|
||||
|
||||
tries = 0
|
||||
tries = 3 if opts[:retryable]
|
||||
|
||||
# Variable to store our execution result
|
||||
r = nil
|
||||
|
||||
retryable(:on => LXC::Errors::ExecuteError, :tries => tries, :sleep => 1) do
|
||||
# Execute the command
|
||||
r = raw(*command, &block)
|
||||
|
||||
# If the command was a failure, then raise an exception that is
|
||||
# nicely handled by Vagrant.
|
||||
if r.exit_code != 0
|
||||
if @interrupted
|
||||
@logger.info("Exit code != 0, but interrupted. Ignoring.")
|
||||
else
|
||||
raise LXC::Errors::ExecuteError, :command => command.inspect
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
# Return the output, making sure to replace any Windows-style
|
||||
# newlines with Unix-style.
|
||||
r.stdout.gsub("\r\n", "\n")
|
||||
end
|
||||
|
||||
# Executes a command and returns the raw result object.
|
||||
def raw(*command, &block)
|
||||
int_callback = lambda do
|
||||
@interrupted = true
|
||||
@logger.info("Interrupted.")
|
||||
end
|
||||
|
||||
# Append in the options for subprocess
|
||||
command << { :notify => [:stdout, :stderr] }
|
||||
|
||||
Vagrant::Util::Busy.busy(int_callback) do
|
||||
Vagrant::Util::Subprocess.execute(*command, &block)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
9
lib/vagrant-lxc/errors.rb
Normal file
9
lib/vagrant-lxc/errors.rb
Normal file
|
@ -0,0 +1,9 @@
|
|||
module Vagrant
|
||||
module LXC
|
||||
module Errors
|
||||
class ExecuteError < Vagrant::Errors::VagrantError
|
||||
error_key(:lxc_execute_error)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
|
@ -1,7 +1,7 @@
|
|||
module Vagrant
|
||||
module LXC
|
||||
class MachineState < Vagrant::MachineState
|
||||
CREATED_STATES = %w( running poweroff ).map!(&:to_sym)
|
||||
CREATED_STATES = %w( running stopped ).map!(&:to_sym)
|
||||
|
||||
def initialize(state_id)
|
||||
short = state_id.to_s.gsub("_", " ")
|
||||
|
@ -14,7 +14,7 @@ module Vagrant
|
|||
end
|
||||
|
||||
def off?
|
||||
self.id == :poweroff
|
||||
self.id == :stopped
|
||||
end
|
||||
|
||||
def running?
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
require "vagrant-lxc/actions"
|
||||
require "vagrant-lxc/action"
|
||||
require "vagrant-lxc/container"
|
||||
require "vagrant-lxc/machine_state"
|
||||
|
||||
|
@ -6,14 +6,32 @@ require "log4r"
|
|||
|
||||
module Vagrant
|
||||
module LXC
|
||||
# DISCUSS: VirtualBox provider has a #machine_id_changed, do we need to handle it as well?
|
||||
class Provider < Vagrant.plugin("2", :provider)
|
||||
attr_reader :container
|
||||
|
||||
def initialize(machine)
|
||||
@logger = Log4r::Logger.new("vagrant::provider::lxc")
|
||||
@machine = machine
|
||||
@container = Container.new(@machine)
|
||||
|
||||
machine_id_changed
|
||||
end
|
||||
|
||||
# If the machine ID changed, then we need to rebuild our underlying
|
||||
# container.
|
||||
def machine_id_changed
|
||||
id = @machine.id
|
||||
|
||||
begin
|
||||
@logger.debug("Instantiating the container for: #{id.inspect}")
|
||||
@container = Container.new(id)
|
||||
@container.validate!
|
||||
rescue Container::NotFound
|
||||
# The container doesn't exist, so we probably have a stale
|
||||
# ID. Just clear the id out of the machine and reload it.
|
||||
@logger.debug("Container not found! Clearing saved machine ID and reloading.")
|
||||
id = nil
|
||||
retry
|
||||
end
|
||||
end
|
||||
|
||||
# @see Vagrant::Plugin::V1::Provider#action
|
||||
|
@ -23,12 +41,28 @@ module Vagrant
|
|||
# given action.
|
||||
action_method = "action_#{name}"
|
||||
# TODO: Rename to singular
|
||||
return LXC::Actions.send(action_method) if LXC::Actions.respond_to?(action_method)
|
||||
return LXC::Action.send(action_method) if LXC::Action.respond_to?(action_method)
|
||||
nil
|
||||
end
|
||||
|
||||
# Returns the SSH info for accessing the Container.
|
||||
def ssh_info
|
||||
# If the Container is not created then we cannot possibly SSH into it, so
|
||||
# we return nil.
|
||||
return nil if state == :not_created
|
||||
|
||||
{
|
||||
:host => @container.dhcp_ip,
|
||||
:port => 22 # @driver.ssh_port(@machine.config.ssh.guest_port)
|
||||
}
|
||||
end
|
||||
|
||||
def state
|
||||
LXC::MachineState.new(@container.state)
|
||||
state_id = nil
|
||||
state_id = :not_created if !@container.name
|
||||
state_id = @container.state if !state_id
|
||||
state_id = :unknown if !state_id
|
||||
LXC::MachineState.new(state_id)
|
||||
end
|
||||
|
||||
def to_s
|
||||
|
|
|
@ -1,3 +1,8 @@
|
|||
en:
|
||||
vagrant:
|
||||
errors:
|
||||
lxc_execute_error: |-
|
||||
There was an error executing %{command}
|
||||
|
||||
For more information on the failure, enable detailed logging by setting
|
||||
the environment variable VAGRANT_LOG to DEBUG.
|
||||
|
|
|
@ -5,12 +5,11 @@ raise 'You should not run this script from the dev box' if ENV['USER'] == 'vagra
|
|||
require 'bundler'
|
||||
require 'json'
|
||||
|
||||
IMAGE_ROOT = 'https://cloud-images.ubuntu.com/releases/quantal/release-20130206'
|
||||
IMAGE_NAME = 'ubuntu-12.10-server-cloudimg-amd64-root.tar.gz'
|
||||
VAGRANT_REPO = 'https://raw.github.com/mitchellh/vagrant/master'
|
||||
IMAGE_ROOT = 'https://cloud-images.ubuntu.com/releases/quantal/release-20130206'
|
||||
IMAGE_NAME = 'ubuntu-12.10-server-cloudimg-amd64-root.tar.gz'
|
||||
|
||||
def download(source, destination)
|
||||
destination = "#{File.dirname __FILE__}/cache/#{destination}"
|
||||
destination = "#{File.dirname __FILE__}/#{destination}"
|
||||
return if File.exists?(destination)
|
||||
|
||||
sh "wget #{source} -O #{destination}"
|
||||
|
@ -61,8 +60,8 @@ end
|
|||
# Fetches vagrant submodule
|
||||
`git submodule update --init`
|
||||
|
||||
# Cache container image between vagrant box destructions
|
||||
download "#{IMAGE_ROOT}/#{IMAGE_NAME}", IMAGE_NAME
|
||||
# Download container image for building the base ubuntu-cloud box
|
||||
download "#{IMAGE_ROOT}/#{IMAGE_NAME}", "boxes/ubuntu-cloud/#{IMAGE_NAME}"
|
||||
|
||||
# Start vagrant
|
||||
sh 'vagrant up'
|
||||
|
|
|
@ -5,6 +5,7 @@ module UnitExampleGroup
|
|||
Object.any_instance.stub(:system) { |*args, &block|
|
||||
UnitExampleGroup.prevent_system_calls(*args, &block)
|
||||
}
|
||||
require 'vagrant/util/subprocess'
|
||||
Vagrant::Util::Subprocess.stub(:execute) { |*args, &block|
|
||||
UnitExampleGroup.prevent_system_calls(*args, &block)
|
||||
}
|
||||
|
|
39
spec/unit/action/handle_box_metadata_spec.rb
Normal file
39
spec/unit/action/handle_box_metadata_spec.rb
Normal file
|
@ -0,0 +1,39 @@
|
|||
require 'unit_helper'
|
||||
|
||||
require 'vagrant-lxc/action/handle_box_metadata'
|
||||
|
||||
describe Vagrant::LXC::Action::HandleBoxMetadata do
|
||||
let(:tar_cache) { 'template.zip' }
|
||||
let(:template_name) { 'ubuntu-lts' }
|
||||
let(:after_create) { 'setup-vagrant-user.sh' }
|
||||
let(:metadata) { {'template-name' => template_name, 'tar-cache' => tar_cache, 'after-create-script' => after_create} }
|
||||
let(:box) { mock(:box, name: 'box-name', metadata: metadata, directory: Pathname.new('/path/to/box')) }
|
||||
let(:machine) { mock(:machine, box: box) }
|
||||
let(:app) { mock(:app, call: true) }
|
||||
let(:env) { {machine: machine} }
|
||||
|
||||
subject { described_class.new(app, env) }
|
||||
|
||||
before do
|
||||
subject.stub(:system)
|
||||
subject.call(env)
|
||||
end
|
||||
|
||||
it 'prepends box directory to tar-cache' do
|
||||
metadata['tar-cache'].should == "#{box.directory.to_s}/#{tar_cache}"
|
||||
end
|
||||
|
||||
it 'prepends box directory to after-create-script' do
|
||||
metadata['after-create-script'].should == "#{box.directory.to_s}/#{after_create}"
|
||||
end
|
||||
|
||||
it 'prepends vagrant and box name to template-name' do
|
||||
metadata['template-name'].should == "vagrant-#{box.name}-#{template_name}"
|
||||
end
|
||||
|
||||
it 'copies box template file to the right folder' do
|
||||
src = box.directory.join(template_name).to_s
|
||||
dest = "/usr/share/lxc/templates/lxc-#{metadata['template-name']}"
|
||||
subject.should have_received(:system).with("sudo su root -c \"cp #{src} #{dest}\"")
|
||||
end
|
||||
end
|
215
spec/unit/container_spec.rb
Normal file
215
spec/unit/container_spec.rb
Normal file
|
@ -0,0 +1,215 @@
|
|||
require 'unit_helper'
|
||||
|
||||
require 'vagrant-lxc/container'
|
||||
|
||||
describe Vagrant::LXC::Container do
|
||||
# Default subject and container name for specs
|
||||
let(:name) { nil }
|
||||
subject { described_class.new(name) }
|
||||
|
||||
describe 'container name validation' do
|
||||
let(:unknown_container) { described_class.new('unknown') }
|
||||
let(:valid_container) { described_class.new('valid') }
|
||||
let(:new_container) { described_class.new(nil) }
|
||||
|
||||
before do
|
||||
unknown_container.stub(lxc: 'valid')
|
||||
valid_container.stub(lxc: 'valid')
|
||||
end
|
||||
|
||||
it 'raises a NotFound error if an unknown container name gets provided' do
|
||||
expect {
|
||||
unknown_container.validate!
|
||||
}.to raise_error(Vagrant::LXC::Container::NotFound)
|
||||
end
|
||||
|
||||
it 'does not raise a NotFound error if a valid container name gets provided' do
|
||||
expect {
|
||||
valid_container.validate!
|
||||
}.to_not raise_error(Vagrant::LXC::Container::NotFound)
|
||||
end
|
||||
|
||||
it 'does not raise a NotFound error if nil is provider as name' do
|
||||
expect {
|
||||
new_container.validate!
|
||||
}.to_not raise_error(Vagrant::LXC::Container::NotFound)
|
||||
end
|
||||
end
|
||||
|
||||
describe 'lxc commands execution' do
|
||||
let(:args) { @args }
|
||||
|
||||
before do
|
||||
subject.stub(:execute) { |*args| @args = args }
|
||||
subject.lxc :command, '--state', 'RUNNING'
|
||||
end
|
||||
|
||||
it 'prepends sudo' do
|
||||
args[0].should == 'sudo'
|
||||
end
|
||||
|
||||
it 'uses the first argument as lxc command suffix' do
|
||||
args[1].should == 'lxc-command'
|
||||
end
|
||||
|
||||
it 'pass through remaining arguments' do
|
||||
args[2].should == '--state'
|
||||
args[3].should == 'RUNNING'
|
||||
end
|
||||
end
|
||||
|
||||
describe 'guard for container state' do
|
||||
let(:name) { 'random-container-name' }
|
||||
|
||||
before do
|
||||
subject.stub :lxc
|
||||
subject.wait_until :running
|
||||
end
|
||||
|
||||
it 'runs lxc-wait with the machine id and upcased state' do
|
||||
subject.should have_received(:lxc).with(
|
||||
:wait,
|
||||
'--name', name,
|
||||
'--state', 'RUNNING'
|
||||
)
|
||||
end
|
||||
end
|
||||
|
||||
describe 'creation' do
|
||||
let(:name) { 'random-container-name' }
|
||||
let(:template_name) { 'template-name' }
|
||||
let(:tar_cache_path) { '/path/to/tar/cache' }
|
||||
let(:public_key_path) { Vagrant.source_root.join('keys', 'vagrant.pub').expand_path.to_s }
|
||||
|
||||
before do
|
||||
subject.stub(lxc: true)
|
||||
SecureRandom.stub(hex: name)
|
||||
subject.create 'template-name' => template_name, 'tar-cache' => tar_cache_path
|
||||
end
|
||||
|
||||
it 'calls lxc-create with the right arguments' do
|
||||
subject.should have_received(:lxc).with(
|
||||
:create,
|
||||
'--template', template_name,
|
||||
'--name', name,
|
||||
'--',
|
||||
'-S', public_key_path,
|
||||
'-T', tar_cache_path
|
||||
)
|
||||
end
|
||||
end
|
||||
|
||||
describe 'after create script execution' do
|
||||
let(:name) { 'random-container-name' }
|
||||
let(:after_create_path) { '/path/to/after/create' }
|
||||
let(:execute_cmd) { @execute_cmd }
|
||||
let(:priv_key_path) { Vagrant.source_root.join('keys', 'vagrant').expand_path.to_s }
|
||||
let(:ip) { '10.0.3.234' }
|
||||
|
||||
before do
|
||||
subject.stub(dhcp_ip: ip)
|
||||
subject.stub(:execute) { |*args| @execute_cmd = args.join(' ') }
|
||||
subject.run_after_create_script after_create_path
|
||||
end
|
||||
|
||||
it 'runs after-create-script when present passing required variables' do
|
||||
execute_cmd.should include after_create_path
|
||||
execute_cmd.should include "-r /var/lib/lxc/#{name}/rootfs"
|
||||
execute_cmd.should include "-k #{priv_key_path}"
|
||||
execute_cmd.should include "-i #{ip}"
|
||||
end
|
||||
end
|
||||
|
||||
describe 'destruction' do
|
||||
let(:name) { 'container-name' }
|
||||
|
||||
before do
|
||||
subject.stub(lxc: true)
|
||||
subject.destroy
|
||||
end
|
||||
|
||||
it 'calls lxc-destroy with the right arguments' do
|
||||
subject.should have_received(:lxc).with(
|
||||
:destroy,
|
||||
'--name', name,
|
||||
)
|
||||
end
|
||||
end
|
||||
|
||||
describe 'start' do
|
||||
let(:name) { 'container-name' }
|
||||
|
||||
before do
|
||||
subject.stub(lxc: true, wait_until: true)
|
||||
subject.start
|
||||
end
|
||||
|
||||
it 'calls lxc-start with the right arguments' do
|
||||
subject.should have_received(:lxc).with(
|
||||
:start,
|
||||
'-d',
|
||||
'--name', name
|
||||
)
|
||||
end
|
||||
|
||||
it 'waits for container state to be RUNNING' do
|
||||
subject.should have_received(:wait_until).with(:running)
|
||||
end
|
||||
end
|
||||
|
||||
describe 'halt' do
|
||||
let(:name) { 'random-container-name' }
|
||||
|
||||
before do
|
||||
subject.stub(lxc: true, wait_until: true)
|
||||
subject.halt
|
||||
end
|
||||
|
||||
it 'calls lxc-shutdown with the right arguments' do
|
||||
subject.should have_received(:lxc).with(
|
||||
:shutdown,
|
||||
'--name', name
|
||||
)
|
||||
end
|
||||
|
||||
it 'waits for container state to be STOPPED' do
|
||||
subject.should have_received(:wait_until).with(:stopped)
|
||||
end
|
||||
end
|
||||
|
||||
describe 'state' do
|
||||
let(:name) { 'random-container-name' }
|
||||
|
||||
before do
|
||||
subject.stub(lxc: "state: STOPPED\npid: 2")
|
||||
end
|
||||
|
||||
it 'calls lxc-info with the right arguments' do
|
||||
subject.state
|
||||
subject.should have_received(:lxc).with(
|
||||
:info,
|
||||
'--name', name
|
||||
)
|
||||
end
|
||||
|
||||
it 'maps the output of lxc-info status out to a symbol' do
|
||||
subject.state.should == :stopped
|
||||
end
|
||||
end
|
||||
|
||||
describe 'dhcp ip' do
|
||||
let(:name) { 'random-container-name' }
|
||||
let(:ip) { "10.0.3.123" }
|
||||
|
||||
before do
|
||||
subject.stub(:raw) {
|
||||
mock(stdout: "#{ip}\n", exit_code: 0)
|
||||
}
|
||||
end
|
||||
|
||||
it 'digs the container ip from lxc dns server' do
|
||||
subject.dhcp_ip.should == ip
|
||||
subject.should have_received(:raw).with('dig', name, '@10.0.3.1', '+short')
|
||||
end
|
||||
end
|
||||
end
|
|
@ -29,8 +29,8 @@ describe Vagrant::LXC::MachineState do
|
|||
it { should_not be_off }
|
||||
end
|
||||
|
||||
context 'when state id is :poweroff' do
|
||||
subject { described_class.new(:poweroff) }
|
||||
context 'when state id is :stopped' do
|
||||
subject { described_class.new(:stopped) }
|
||||
|
||||
it { should be_created }
|
||||
it { should be_off }
|
||||
|
|
10
tasks/boxes.rake
Normal file
10
tasks/boxes.rake
Normal file
|
@ -0,0 +1,10 @@
|
|||
namespace :boxes do
|
||||
namespace :build do
|
||||
desc 'Packages an Ubuntu cloud image as a Vagrant LXC box'
|
||||
task 'ubuntu-cloud' do
|
||||
sh 'mkdir -p boxes/output'
|
||||
sh 'cp cache/ubuntu-12.10-server-cloudimg-amd64-root.tar.gz boxes/ubuntu-cloud'
|
||||
sh 'cd boxes/ubuntu-cloud && tar -czf ../output/ubuntu-cloud.box ./*'
|
||||
end
|
||||
end
|
||||
end
|
|
@ -1,4 +0,0 @@
|
|||
desc 'Packages a dummy Vagrant box to be used during development'
|
||||
task :package_dummy_box do
|
||||
sh 'cd dummy-box-files/ && tar -czf ../dummy-ubuntu-cloudimg.box ./*'
|
||||
end
|
Loading…
Reference in a new issue