summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJames Shubin <james@shubin.ca>2014-01-05 16:01:50 -0500
committerJames Shubin <james@shubin.ca>2014-01-08 22:09:21 -0500
commitddd1f6671de0d2b2e86aa7f0f73ddc67b690b16a (patch)
treeec74546da6311fc881231386b98ded150c43d981
parent36275d43f72ba1371042bd24fb3718df946f4d56 (diff)
downloadpuppet-gluster-ddd1f6671de0d2b2e86aa7f0f73ddc67b690b16a.tar.gz
puppet-gluster-ddd1f6671de0d2b2e86aa7f0f73ddc67b690b16a.tar.xz
puppet-gluster-ddd1f6671de0d2b2e86aa7f0f73ddc67b690b16a.zip
This is Puppet-Gluster+Vagrant! (https://ttboj.wordpress.com/)
Puppet-Gluster, now with Vagrant! - Initial release. Happy hacking!
-rw-r--r--.gitmodules21
-rw-r--r--README1
-rw-r--r--files/xml.py42
-rw-r--r--manifests/repo.pp9
-rw-r--r--manifests/volume.pp22
-rw-r--r--vagrant/README27
-rw-r--r--vagrant/gluster/.gitignore3
-rw-r--r--vagrant/gluster/Vagrantfile351
-rw-r--r--vagrant/gluster/puppet/files/README2
-rw-r--r--vagrant/gluster/puppet/hiera.yaml7
-rw-r--r--vagrant/gluster/puppet/hieradata/common.yaml3
-rw-r--r--vagrant/gluster/puppet/manifests/site.pp123
-rw-r--r--vagrant/gluster/puppet/modules/.gitignore1
-rw-r--r--vagrant/gluster/puppet/modules/Makefile65
-rw-r--r--vagrant/gluster/puppet/modules/README22
m---------vagrant/gluster/puppet/modules/apt0
m---------vagrant/gluster/puppet/modules/common0
m---------vagrant/gluster/puppet/modules/keepalived0
m---------vagrant/gluster/puppet/modules/puppet0
m---------vagrant/gluster/puppet/modules/shorewall0
m---------vagrant/gluster/puppet/modules/stdlib0
m---------vagrant/gluster/puppet/modules/yum0
22 files changed, 692 insertions, 7 deletions
diff --git a/.gitmodules b/.gitmodules
new file mode 100644
index 0000000..c41791b
--- /dev/null
+++ b/.gitmodules
@@ -0,0 +1,21 @@
+[submodule "vagrant/gluster/puppet/modules/stdlib"]
+ path = vagrant/gluster/puppet/modules/stdlib
+ url = https://github.com/purpleidea/puppetlabs-stdlib.git
+[submodule "vagrant/gluster/puppet/modules/apt"]
+ path = vagrant/gluster/puppet/modules/apt
+ url = https://github.com/purpleidea/puppetlabs-apt.git
+[submodule "vagrant/gluster/puppet/modules/common"]
+ path = vagrant/gluster/puppet/modules/common
+ url = https://github.com/purpleidea/puppet-common.git
+[submodule "vagrant/gluster/puppet/modules/keepalived"]
+ path = vagrant/gluster/puppet/modules/keepalived
+ url = https://github.com/purpleidea/puppet-keepalived.git
+[submodule "vagrant/gluster/puppet/modules/puppet"]
+ path = vagrant/gluster/puppet/modules/puppet
+ url = https://github.com/purpleidea/puppet-puppet.git
+[submodule "vagrant/gluster/puppet/modules/shorewall"]
+ path = vagrant/gluster/puppet/modules/shorewall
+ url = https://github.com/purpleidea/puppet-shorewall.git
+[submodule "vagrant/gluster/puppet/modules/yum"]
+ path = vagrant/gluster/puppet/modules/yum
+ url = https://github.com/purpleidea/puppet-yum.git
diff --git a/README b/README
index 816a0b5..6fe966f 100644
--- a/README
+++ b/README
@@ -10,6 +10,7 @@ Module specific notes:
* This is _the_ puppet module for gluster. Accept no imitations!
* All the participating nodes, need to have an identical puppet-gluster config.
* Using gluster::simple is probably the best way to try this out.
+* This is easily deployed with vagrant. See the vagrant/ directory!
* You can use less of the available resources, if you only want to manage some.
* You can get CentOS and RHEL rpms from:
* http://download.gluster.org/pub/gluster/glusterfs/LATEST/CentOS/ or:
diff --git a/files/xml.py b/files/xml.py
index b75906f..b3150cd 100644
--- a/files/xml.py
+++ b/files/xml.py
@@ -21,6 +21,10 @@
# <BOOL>
# EXAMPLE:
+# $ gluster peer status --xml | ./xml.py stuck <PEER1> <PEER2> <PEERn>
+# <BOOL>
+
+# EXAMPLE:
# $ gluster volume info --xml <VOLNAME> | ./xml.py property --key <KEY>
# <VALUE>
@@ -53,6 +57,7 @@ import lxml.etree as etree
# "Invalid State" # 11
# };
VALID_PEERED = ['3']
+VALID_STUCK = ['4']
parser = argparse.ArgumentParser(description='gluster xml parsing tools')
#parser.add_argument('--debug', dest='debug', action='store_true', default=False)
@@ -65,6 +70,12 @@ parser_connected = subparsers.add_parser('connected')
parser_connected.add_argument('peers', type=str, nargs='*', action='store')
#
+# 'stuck' parser
+#
+parser_stuck = subparsers.add_parser('stuck')
+parser_stuck.add_argument('peers', type=str, nargs='*', action='store')
+
+#
# 'property' parser
#
parser_property = subparsers.add_parser('property')
@@ -123,6 +134,37 @@ if args.mode == 'connected':
# must be good!
sys.exit(0)
+# are any hosts 'stuck' ?
+elif args.mode == 'stuck':
+ store = {}
+ peers = args.peers
+
+ l = root.findall('.//peerStatus')
+ if len(l) != 1:
+ sys.exit(3)
+
+ for p in l[0].findall('.//peer'):
+ h = p.find('hostname').text
+ c = (str(p.find('connected').text) == '1') # connected...?
+ s = (str(p.find('state').text) in VALID_STUCK) # is it stuck ?
+ store[h] = c and s # save for later...
+
+ # if no peers specified, assume we should check all...
+ if len(peers) == 0:
+ peers = store.keys()
+
+ for i in peers:
+ if i in store.keys():
+ if store[i]:
+ # someone is stuck
+ sys.exit(0)
+ else:
+ # we're looking for a peer that isn't peered yet
+ sys.exit(2)
+
+ # nobody is stuck
+ sys.exit(1)
+
elif args.mode == 'property':
store = []
for i in root.findall('.//option'):
diff --git a/manifests/repo.pp b/manifests/repo.pp
index 7198476..3fccd2d 100644
--- a/manifests/repo.pp
+++ b/manifests/repo.pp
@@ -22,8 +22,7 @@ class gluster::repo(
# if you leave this blank, we assume you want the latest version...
$version = ''
) {
- # XXX: this should be https !
- $base = 'http://download.gluster.org/pub/gluster/glusterfs/'
+ $base = 'https://download.gluster.org/pub/gluster/glusterfs/'
if "${version}" == '' {
# latest
@@ -87,8 +86,7 @@ class gluster::repo(
baseurl => "${base_arch}${arch}/",
enabled => true,
gpgcheck => true,
- # XXX: this should not be an http:// link, it should be a file!
- # XXX: it's not even https! how can you even prevent a mitm...!
+ # XXX: this should not be an https:// link, it should be a file
gpgkeys => ["${gpgkey}"],
ensure => present,
}
@@ -98,8 +96,7 @@ class gluster::repo(
# baseurl => "${base_arch}noarch/",
# enabled => true,
# gpgcheck => true,
- # # XXX: this should not be an http:// link, it should be a file!
- # # XXX: it's not even https! how can you even prevent a mitm...!
+ # # XXX: this should not be an https:// link, it should be a file
# gpgkeys => ["${gpgkey}"],
# ensure => present,
#}
diff --git a/manifests/volume.pp b/manifests/volume.pp
index 013915a..01ef8c5 100644
--- a/manifests/volume.pp
+++ b/manifests/volume.pp
@@ -202,6 +202,7 @@ define gluster::volume(
File["${vardir}/volume/create-${name}.sh"],
File["${vardir}/xml.py"], # status check
Gluster::Brick[$valid_bricks],
+ Exec["gluster-volume-stuck-${name}"],
],
default => [
Service['glusterd'],
@@ -209,14 +210,33 @@ define gluster::volume(
Package['fping'],
File["${vardir}/xml.py"], # status check
Gluster::Brick[$valid_bricks],
+ Exec["gluster-volume-stuck-${name}"],
+ ],
+ }
+
+ # work around stuck connection state (4) of: 'Accepted peer request'...
+ exec { "gluster-volume-stuck-${name}":
+ command => '/sbin/service glusterd reload',
+ logoutput => on_failure,
+ unless => "/usr/sbin/gluster volume list | /bin/grep -qxF '${name}' -", # reconnect if it doesn't exist
+ onlyif => sprintf("/usr/sbin/gluster peer status --xml | ${vardir}/xml.py stuck %s", $others),
+ notify => Common::Again::Delta['gluster-exec-again'],
+ require => [
+ Service['glusterd'],
+ File["${vardir}/xml.py"], # stuck check
+ Gluster::Brick[$valid_bricks],
],
}
# store command in a separate file to run as bash...
# NOTE: we sleep for 5 seconds to give glusterd a chance to
# settle down first if we're doing a hot (clean) puppet run
+ # NOTE: force is needed for now because of the following error:
+ # volume create: puppet: failed: The brick annex1.example.com:/var/lib/puppet/tmp/gluster/data/puppet is is being created in the root partition. It is recommended that you don't use the system's root partition for storage backend. Or use 'force' at the end of the command if you want to override this behavior.
+ # FIXME: it would be create to have an --allow-root-storage type option
+ # instead, so that we don't inadvertently force some other bad thing...
file { "${vardir}/volume/create-${name}.sh":
- content => inline_template("#!/bin/bash\n/bin/sleep 5s && /usr/sbin/gluster volume create ${name} ${valid_replica}${valid_stripe}transport ${valid_transport} ${brick_spec} > >(/usr/bin/tee '/tmp/gluster-volume-create-${name}.stdout') 2> >(/usr/bin/tee '/tmp/gluster-volume-create-${name}.stderr' >&2) || (${rmdir_volume_dirs} && /bin/false)\nexit \$?\n"),
+ content => inline_template("#!/bin/bash\n/bin/sleep 5s && /usr/sbin/gluster volume create ${name} ${valid_replica}${valid_stripe}transport ${valid_transport} ${brick_spec} force > >(/usr/bin/tee '/tmp/gluster-volume-create-${name}.stdout') 2> >(/usr/bin/tee '/tmp/gluster-volume-create-${name}.stderr' >&2) || (${rmdir_volume_dirs} && /bin/false)\nexit \$?\n"),
owner => root,
group => root,
mode => 755,
diff --git a/vagrant/README b/vagrant/README
new file mode 100644
index 0000000..e73ef0a
--- /dev/null
+++ b/vagrant/README
@@ -0,0 +1,27 @@
+This is Puppet-Gluster+Vagrant! (https://ttboj.wordpress.com/)
+
+You'll first need to get vagrant working. Here are some background articles:
+
+* https://ttboj.wordpress.com/2013/12/09/vagrant-on-fedora-with-libvirt/
+* https://ttboj.wordpress.com/2013/12/21/vagrant-vsftp-and-other-tricks/
+* https://ttboj.wordpress.com/2014/01/02/vagrant-clustered-ssh-and-screen/
+
+I've written a detailed article about all of this, which is available here:
+
+* https://ttboj.wordpress.com/2014/01/08/automatically-deploying-glusterfs-with-puppet-gluster-vagrant
+
+This will not work perfectly on Fedora 19. You must use Fedora 20 or greater.
+
+Once you're comfortable that vagrant is working properly, run this command:
+
+ vagrant up puppet && sudo -v && vagrant up
+
+Sit back and watch, or go have a beverage...
+The first run can take a while because it has to download/install a base image.
+
+When the above command completes, puppet will still be provisioning your hosts. Once the cluster state settles, puppet will then create a gluster volume.
+
+Happy hacking,
+
+James
+
diff --git a/vagrant/gluster/.gitignore b/vagrant/gluster/.gitignore
new file mode 100644
index 0000000..3883822
--- /dev/null
+++ b/vagrant/gluster/.gitignore
@@ -0,0 +1,3 @@
+puppet-gluster.yaml
+.vagrant/
+.ssh/
diff --git a/vagrant/gluster/Vagrantfile b/vagrant/gluster/Vagrantfile
new file mode 100644
index 0000000..4233942
--- /dev/null
+++ b/vagrant/gluster/Vagrantfile
@@ -0,0 +1,351 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+
+# Vagrantfile for GlusterFS using Puppet-Gluster
+# Copyright (C) 2010-2013+ James Shubin
+# Written by James Shubin <james@shubin.ca>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+# NOTE: vagrant-libvirt needs to run in series (not in parallel) to avoid trying
+# to create the network twice, eg: 'vagrant up --no-parallel'. alternatively you
+# can build the first host (puppet server) manually, and then the hosts next eg:
+# 'vagrant up puppet && vagrant up' which ensures the puppet server is up first!
+# NOTE: https://github.com/pradels/vagrant-libvirt/issues/104 is the open bug...
+
+# README: https://ttboj.wordpress.com/2013/12/09/vagrant-on-fedora-with-libvirt/
+# README: https://ttboj.wordpress.com/2013/12/21/vagrant-vsftp-and-other-tricks/
+# ALSO: https://ttboj.wordpress.com/2014/01/02/vagrant-clustered-ssh-and-screen/
+# README: https://ttboj.wordpress.com/2014/01/08/automatically-deploying-glusterfs-with-puppet-gluster-vagrant
+
+# NOTE: this will not work properly on Fedora 19 or anything that does not have
+# the libvirt broadcast patch included. You can check which libvirt version you
+# have and see if that version tag is in the libvirt git or in your distro. eg:
+# git tag --contains 51e184e9821c3740ac9b52055860d683f27b0ab6 | grep <version#>
+# this is because old libvirt broke the vrrp broadcast packets from keepalived!
+
+# TODO: the /etc/hosts DNS setup is less than ideal, but I didn't implement
+# anything better yet. Please feel free to suggest something else!
+
+# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
+VAGRANTFILE_API_VERSION = '2'
+
+require 'ipaddr'
+require 'yaml'
+
+#
+# globals
+#
+domain = 'example.com'
+network = IPAddr.new '192.168.142.0/24'
+range = network.to_range.to_a
+cidr = (32-(Math.log(range.length)/Math.log(2))).to_i
+offset = 100 # start gluster hosts after here
+#puts range[0].to_s # network
+#puts range[1].to_s # router (reserved)
+#puts range[2].to_s # puppetmaster
+#puts range[3].to_s # vip
+
+# mutable by ARGV and settings file
+count = 4 # default number of gluster hosts to build
+version = '' # default gluster version (empty string means latest!)
+
+#
+# ARGV parsing
+#
+projectdir = File.expand_path File.dirname(__FILE__) # vagrant project dir!!
+f = File.join(projectdir, 'puppet-gluster.yaml')
+
+# load settings
+if File.exist?(f)
+ settings = YAML::load_file f
+ count = settings[:count]
+ version = settings[:version]
+end
+
+# ARGV parser
+skip = 0
+while skip < ARGV.length
+ #puts "#{skip}, #{ARGV[skip]}" # debug
+ if ARGV[skip].start_with?(arg='--gluster-count=')
+ v = ARGV.delete_at(skip).dup
+ v.slice! arg
+ #puts "#{arg}, #{v}" # debug
+
+ count = v.to_i # set gluster host count
+
+ elsif ARGV[skip].start_with?(arg='--gluster-version=')
+ v = ARGV.delete_at(skip).dup
+ v.slice! arg
+
+ version = v.to_s # set gluster version
+
+ else # skip over "official" vagrant args
+ skip = skip + 1
+ end
+end
+
+# save settings (ARGV overrides)
+settings = {:count => count, :version => version}
+File.open(f, 'w') do |file|
+ file.write settings.to_yaml
+end
+
+#puts "ARGV: #{ARGV}" # debug
+
+# erase host information from puppet so that the user can do partial rebuilds
+snoop = ARGV.select { |x| !x.start_with?('-') }
+if snoop.length > 1 and snoop[0] == 'destroy'
+ snoop.shift # left over array snoop should be list of hosts
+ if snoop.include?('puppet') # doesn't matter then...
+ snoop = []
+ end
+else
+ # important! clear snoop because we're not using 'destroy'
+ snoop = []
+end
+
+# figure out which hosts are getting destroyed
+destroy = ARGV.select { |x| !x.start_with?('-') }
+if destroy.length > 0 and destroy[0] == 'destroy'
+ destroy.shift # left over array destroy should be list of hosts or []
+ if destroy.length == 0
+ destroy = true # destroy everything
+ end
+else
+ destroy = false # destroy nothing
+end
+
+# figure out which hosts are getting provisioned
+provision = ARGV.select { |x| !x.start_with?('-') }
+if provision.length > 0 and ['up', 'provision'].include?(provision[0])
+ provision.shift # left over array provision should be list of hosts or []
+ if provision.length == 0
+ provision = true # provision everything
+ end
+else
+ provision = false # provision nothing
+end
+
+# XXX: workaround for: https://github.com/mitchellh/vagrant/issues/2447
+# only run on 'vagrant init' or if it's the first time running vagrant
+if (ARGV.length > 0 and ARGV[0] == 'init') or not(File.exist?(f))
+ `sudo systemctl restart nfs-server`
+ `firewall-cmd --permanent --zone public --add-service mountd`
+ `firewall-cmd --permanent --zone public --add-service rpc-bind`
+ `firewall-cmd --permanent --zone public --add-service nfs`
+ `firewall-cmd --reload`
+end
+
+Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
+
+ #config.landrush.enable # TODO ?
+
+ #
+ # box (pre-built base image)
+ #
+ config.vm.box = 'centos-6' # i built it
+
+ # box source url
+ # TODO: this box should be GPG signed
+ config.vm.box_url = 'https://download.gluster.org/pub/gluster/purpleidea/vagrant/centos-6.box'
+
+ #
+ # cache
+ #
+ # TODO: this doesn't cache metadata, full offline operation not possible
+ config.cache.auto_detect = true
+ config.cache.enable :yum
+ #config.cache.enable :apt
+ if not ARGV.include?('--no-parallel') # when running in parallel,
+ config.cache.scope = :machine # use the per machine cache
+ end
+ config.cache.enable_nfs = true # sets nfs => true on the synced_folder
+ # the nolock option is required, otherwise the NFSv3 client will try to
+ # access the NLM sideband protocol to lock files needed for /var/cache/
+ # all of this can be avoided by using NFSv4 everywhere. die NFSv3, die!
+ config.cache.mount_options = ['rw', 'vers=3', 'tcp', 'nolock']
+
+ #
+ # vip
+ #
+ vip_ip = range[3].to_s
+ vip_hostname = 'annex'
+
+ #
+ # puppetmaster
+ #
+ puppet_ip = range[2].to_s
+ puppet_hostname = 'puppet'
+ fv = File.join(projectdir, '.vagrant', "#{puppet_hostname}-hosts.done")
+ if destroy.is_a?(TrueClass) or (destroy.is_a?(Array) and destroy.include?(puppet_hostname))
+ if File.exists?(fv) # safety
+ puts "Unlocking shell provisioning for: #{puppet_hostname}..."
+ File.delete(fv) # delete hosts token
+ end
+ end
+
+ #puppet_fqdn = "#{puppet_hostname}.#{domain}"
+ config.vm.define :puppet, :primary => true do |vm|
+ vm.vm.hostname = puppet_hostname
+ # red herring network so that management happens here...
+ vm.vm.network :private_network,
+ :ip => "10.10.10.10",
+ #:libvirt__dhcp_enabled => false, # XXX: not allowed here
+ :libvirt__network_name => 'default'
+
+ # this is the real network that we'll use...
+ vm.vm.network :private_network,
+ :ip => puppet_ip,
+ :libvirt__dhcp_enabled => false,
+ :libvirt__network_name => 'gluster'
+
+ #vm.landrush.host puppet_hostname, puppet_ip # TODO ?
+
+ # ensure the gluster module is present for provisioning...
+ if provision.is_a?(TrueClass) or (provision.is_a?(Array) and provision.include?(puppet_hostname))
+ cwd = `pwd`
+ mod = File.join(projectdir, 'puppet', 'modules')
+ `cd #{mod} && make gluster &> /dev/null && cd #{cwd}`
+ end
+
+ #
+ # shell
+ #
+ if not File.exists?(fv) # only modify /etc/hosts once
+ if provision.is_a?(TrueClass) or (provision.is_a?(Array) and provision.include?(puppet_hostname))
+ File.open(fv, 'w') {} # touch
+ end
+ vm.vm.provision 'shell', inline: 'puppet resource host localhost.localdomain ip=127.0.0.1 host_aliases=localhost'
+ vm.vm.provision 'shell', inline: "puppet resource host #{puppet_hostname} ensure=absent" # so that fqdn works
+
+ vm.vm.provision 'shell', inline: "puppet resource host #{vip_hostname}.#{domain} ip=#{vip_ip} host_aliases=#{vip_hostname} ensure=present"
+ vm.vm.provision 'shell', inline: "puppet resource host #{puppet_hostname}.#{domain} ip=#{puppet_ip} host_aliases=#{puppet_hostname} ensure=present"
+ (1..count).each do |i|
+ h = "annex#{i}"
+ ip = range[offset+i].to_s
+ vm.vm.provision 'shell', inline: "puppet resource host #{h}.#{domain} ip=#{ip} host_aliases=#{h} ensure=present"
+ end
+ end
+ #
+ # puppet (apply)
+ #
+ vm.vm.provision :puppet do |puppet|
+ puppet.module_path = 'puppet/modules'
+ puppet.manifests_path = 'puppet/manifests'
+ puppet.manifest_file = 'site.pp'
+ # custom fact
+ puppet.facter = {
+ 'vagrant' => '1',
+ #'vagrant_puppet_allow' => "#{network.to_s}/#{cidr}", # TODO ?
+ }
+ end
+ end
+
+ #
+ # annex
+ #
+ (1..count).each do |i|
+ h = "annex#{i}"
+ ip = range[offset+i].to_s # eg: "192.168.142.#{100+i}"
+ #fqdn = "annex#{i}.#{domain}"
+ fvx = File.join(projectdir, '.vagrant', "#{h}-hosts.done")
+ if destroy.is_a?(TrueClass) or (destroy.is_a?(Array) and destroy.include?(h))
+ if File.exists?(fvx) # safety
+ puts "Unlocking shell provisioning for: #{h}..."
+ File.delete(fvx) # delete hosts token
+ end
+ end
+
+ if snoop.include?(h) # should we clean this machine?
+ cmd = "puppet cert clean #{h}.#{domain}"
+ puts "Running 'puppet cert clean' for: #{h}..."
+ `vagrant ssh #{puppet_hostname} -c 'sudo #{cmd}'`
+ cmd = "puppet node deactivate #{h}.#{domain}"
+ puts "Running 'puppet node deactivate' for: #{h}..."
+ `vagrant ssh #{puppet_hostname} -c 'sudo #{cmd}'`
+ end
+
+ config.vm.define h.to_sym do |vm|
+ vm.vm.hostname = h
+ # red herring network so that management happens here...
+ vm.vm.network :private_network,
+ :ip => "10.10.10.1#{i}",
+ :libvirt__network_name => 'default'
+
+ # this is the real network that we'll use...
+ vm.vm.network :private_network,
+ :ip => ip,
+ :libvirt__dhcp_enabled => false,
+ :libvirt__network_name => 'gluster'
+
+ #vm.landrush.host h, ip # TODO ?
+
+ #
+ # shell
+ #
+ if not File.exists?(fvx) # only modify /etc/hosts once
+ if provision.is_a?(TrueClass) or (provision.is_a?(Array) and provision.include?(puppet_hostname))
+ File.open(fvx, 'w') {} # touch
+ end
+ vm.vm.provision 'shell', inline: 'puppet resource host localhost.localdomain ip=127.0.0.1 host_aliases=localhost'
+ vm.vm.provision 'shell', inline: "puppet resource host #{h} ensure=absent" # so that fqdn works
+
+ vm.vm.provision 'shell', inline: "puppet resource host #{vip_hostname}.#{domain} ip=#{vip_ip} host_aliases=#{vip_hostname} ensure=present"
+ vm.vm.provision 'shell', inline: "puppet resource host #{puppet_hostname}.#{domain} ip=#{puppet_ip} host_aliases=#{puppet_hostname} ensure=present"
+ #vm.vm.provision 'shell', inline: "[ ! -e /root/puppet-cert-is-clean ] && ssh -o 'StrictHostKeyChecking=no' #{puppet_hostname} puppet cert clean #{h}.#{domain} ; touch /root/puppet-cert-is-clean"
+ (1..count).each do |j| # hosts entries for all hosts
+ oh = "annex#{j}"
+ oip = range[offset+j].to_s # eg: "192.168.142.#{100+i}"
+ vm.vm.provision 'shell', inline: "puppet resource host #{oh}.#{domain} ip=#{oip} host_aliases=#{oh} ensure=present"
+ end
+ end
+ #
+ # puppet (agent)
+ #
+ vm.vm.provision :puppet_server do |puppet|
+ #puppet.puppet_node = "#{h}" # redundant
+ #puppet.puppet_server = "#{puppet_hostname}.#{domain}"
+ puppet.puppet_server = puppet_hostname
+ #puppet.options = '--verbose --debug'
+ puppet.options = '--test' # see the output
+ puppet.facter = {
+ 'vagrant' => '1',
+ 'vagrant_gluster_vip' => vip_ip,
+ 'gluster_package_version' => version,
+ }
+ end
+ end
+ end
+
+ #
+ # misc
+ #
+ #config.vm.synced_folder "hacking/", "/vagrant_hacking", :nfs => true, :mount_options => ['rw', 'vers=3', 'tcp']
+
+ #
+ # libvirt
+ #
+ config.vm.provider :libvirt do |libvirt|
+ libvirt.driver = 'qemu'
+ # leave out to connect directly with qemu:///system
+ #libvirt.host = 'localhost'
+ libvirt.connect_via_ssh = false
+ libvirt.username = 'root'
+ libvirt.storage_pool_name = 'default'
+ #libvirt.default_network = 'default' # XXX: this does nothing
+ end
+
+end
+
diff --git a/vagrant/gluster/puppet/files/README b/vagrant/gluster/puppet/files/README
new file mode 100644
index 0000000..3f5a63a
--- /dev/null
+++ b/vagrant/gluster/puppet/files/README
@@ -0,0 +1,2 @@
+This is Puppet-Gluster+Vagrant! (https://ttboj.wordpress.com/)
+
diff --git a/vagrant/gluster/puppet/hiera.yaml b/vagrant/gluster/puppet/hiera.yaml
new file mode 100644
index 0000000..5aaf25d
--- /dev/null
+++ b/vagrant/gluster/puppet/hiera.yaml
@@ -0,0 +1,7 @@
+---
+:backends:
+ - yaml
+:yaml:
+ :datadir: /etc/puppet/hieradata/
+:hierarchy:
+ - common
diff --git a/vagrant/gluster/puppet/hieradata/common.yaml b/vagrant/gluster/puppet/hieradata/common.yaml
new file mode 100644
index 0000000..d0fa005
--- /dev/null
+++ b/vagrant/gluster/puppet/hieradata/common.yaml
@@ -0,0 +1,3 @@
+---
+welcome: 'This is Puppet-Gluster+Vagrant! (https://ttboj.wordpress.com/)'
+# vim:expandtab ts=8 sw=8 sta
diff --git a/vagrant/gluster/puppet/manifests/site.pp b/vagrant/gluster/puppet/manifests/site.pp
new file mode 100644
index 0000000..fa849c5
--- /dev/null
+++ b/vagrant/gluster/puppet/manifests/site.pp
@@ -0,0 +1,123 @@
+node default {
+ # this will get put on every host...
+ $url = 'https://ttboj.wordpress.com/'
+ file { '/etc/motd':
+ content => "This is Puppet-Gluster+Vagrant! (${url})\n",
+ }
+}
+
+# puppetmaster
+node puppet inherits default {
+
+ class { '::puppet::server':
+ pluginsync => true, # do we want to enable pluginsync?
+ storeconfigs => true, # do we want to enable storeconfigs?
+ autosign => [
+ '*', # FIXME: this is a temporary solution
+ #"*.${domain}", # FIXME: this is a temporary solution
+ ],
+ #allow_duplicate_certs => true, # redeploy without cert clean
+ #allow => XXX, # also used in fileserver.conf
+ repo => true, # automatic repos
+ shorewall => false, # XXX: for now...
+ start => true,
+ }
+
+ class { '::puppet::deploy':
+ path => '/vagrant/puppet/', # puppet folder is put here...
+ backup => false, # don't use puppet to backup...
+ }
+}
+
+node /^annex\d+$/ inherits default { # annex{1,2,..N}
+
+ #include firewall # XXX: for now...
+
+ class { '::puppet::client':
+ #start => true,
+ start => false, # useful for testing manually...
+ }
+
+ # this is a simple way to setup gluster
+ class { '::gluster::simple':
+ vip => "${::vagrant_gluster_vip}", # from vagrant
+ vrrp => true,
+ shorewall => false, # XXX: for now...
+ }
+}
+
+#node /^client\d+$/ inherits default { # annex{1,2,..N}
+#
+# class { '::puppet::client':
+# #start => true,
+# start => false, # useful for testing manually...
+# }
+#
+# class { '::gluster::client':
+# shorewall => false, # XXX: for now...
+# }
+#}
+
+class firewall {
+
+ $FW = '$FW' # make using $FW in shorewall easier
+
+ class { '::shorewall::configuration':
+ # NOTE: no configuration specifics are needed at the moment
+ }
+
+ shorewall::zone { ['net', 'man']:
+ type => 'ipv4',
+ options => [], # these aren't really needed right now
+ }
+
+ # management zone interface used by vagrant-libvirt
+ shorewall::interface { 'man':
+ interface => 'MAN_IF',
+ broadcast => 'detect',
+ physical => 'eth0', # XXX: set manually!
+ options => ['dhcp', 'tcpflags', 'routefilter', 'nosmurfs', 'logmartians'],
+ comment => 'Management zone.', # FIXME: verify options
+ }
+
+ # XXX: eth1 'dummy' zone to trick vagrant-libvirt into leaving me alone
+ # <no interface definition needed>
+
+ # net zone that gluster uses to communicate
+ shorewall::interface { 'net':
+ interface => 'NET_IF',
+ broadcast => 'detect',
+ physical => 'eth2', # XXX: set manually!
+ options => ['tcpflags', 'routefilter', 'nosmurfs', 'logmartians'],
+ comment => 'Public internet zone.', # FIXME: verify options
+ }
+
+ # TODO: is this policy really what we want ? can we try to limit this ?
+ shorewall::policy { '$FW-net':
+ policy => 'ACCEPT', # TODO: shouldn't we whitelist?
+ }
+
+ shorewall::policy { '$FW-man':
+ policy => 'ACCEPT', # TODO: shouldn't we whitelist?
+ }
+
+ ####################################################################
+ #ACTION SOURCE DEST PROTO DEST SOURCE ORIGINAL
+ # PORT PORT(S) DEST
+ shorewall::rule { 'ssh': rule => "
+ SSH/ACCEPT net $FW
+ SSH/ACCEPT man $FW
+ ", comment => 'Allow SSH'}
+
+ shorewall::rule { 'ping': rule => "
+ #Ping/DROP net $FW
+ Ping/ACCEPT net $FW
+ Ping/ACCEPT man $FW
+ ", comment => 'Allow ping from the `bad` net zone'}
+
+ shorewall::rule { 'icmp': rule => "
+ ACCEPT $FW net icmp
+ ACCEPT $FW man icmp
+ ", comment => 'Allow icmp from the firewall zone'}
+}
+
diff --git a/vagrant/gluster/puppet/modules/.gitignore b/vagrant/gluster/puppet/modules/.gitignore
new file mode 100644
index 0000000..8a495bf
--- /dev/null
+++ b/vagrant/gluster/puppet/modules/.gitignore
@@ -0,0 +1 @@
+gluster/
diff --git a/vagrant/gluster/puppet/modules/Makefile b/vagrant/gluster/puppet/modules/Makefile
new file mode 100644
index 0000000..ff66d16
--- /dev/null
+++ b/vagrant/gluster/puppet/modules/Makefile
@@ -0,0 +1,65 @@
+# Makefile for pulling in git modules for Vagrant deployment for Puppet-Gluster
+# Copyright (C) 2010-2013+ James Shubin
+# Written by James Shubin <james@shubin.ca>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+# NOTE: if we remove a module, it won't get purged from the destination!
+
+# NOTE: this script can sync puppet-gluster to a specific sha1sum commit, or it
+# can sync all of the repos to git master. This option can be useful for devel.
+
+BASE = 'https://github.com/purpleidea'
+MODULES := \
+ puppet-common \
+ puppet-gluster \
+ puppet-keepalived \
+ puppet-puppet \
+ puppet-shorewall \
+ puppet-yum \
+ puppetlabs-apt \
+ puppetlabs-stdlib
+# NOTE: set to a git commit id if we need an specific commit for vagrant builds
+# NOTE: remember that new commits to master should change this to a specific id
+# if they will break the vagrant build process. hopefully we don't forget this!
+SHA1SUM := master
+
+.PHONY: all modules gluster
+.SILENT: all modules gluster
+
+all:
+
+#
+# modules
+#
+# clone, and then pull
+modules:
+ basename `pwd` | grep -q '^modules' || exit 1 # run in a modules dir!
+ for i in $(MODULES); do \
+ j=`echo $$i | awk -F '-' '{print $$2}'`; \
+ [ -d "$$j" ] || git clone --depth 1 $(BASE)/$$i.git $$j; \
+ [ -d "$$j" ] && cd $$j && git pull; cd ..; \
+ done
+
+#
+# gluster
+#
+# just clone and pull this one
+gluster:
+ basename `pwd` | grep -q '^modules' || exit 1 # run in a modules dir!
+ i='puppet-gluster'; \
+ j=`echo $$i | awk -F '-' '{print $$2}'`; \
+ [ -d "$$j" ] || git clone ../../../../. $$j; \
+ [ -d "$$j" ] && cd $$j && git checkout master && git pull && git checkout $(SHA1SUM); cd ..
+
diff --git a/vagrant/gluster/puppet/modules/README b/vagrant/gluster/puppet/modules/README
new file mode 100644
index 0000000..c1837a5
--- /dev/null
+++ b/vagrant/gluster/puppet/modules/README
@@ -0,0 +1,22 @@
+This directory contains the puppet (git) module dependencies for Puppet-Gluster. They are included as git submodules for convenience and version compatibility. The Puppet-Gluster module itself is cloned in by a Makefile on provisioning.
+
+The one problem is Puppet-Gluster itself, since it is the parent repository to this sub-directory. There were a few options:
+
+1) Maintain the vagrant/ directory as a separate git project.
+This would make a lot of sense, but I wanted to keep the vagrant portions bundled with Puppet-Gluster since they are so closely connected and for ease of distribution. In this situation, the vagrant/puppet/modules/ directory would include the Puppet-Gluster submodule along with all the other puppet (git) modules.
+
+2) Fill the vagrant/puppet/modules/ directory with git submodules.
+This would make a lot of sense because you can reference specific commits, and it's easy to recursively clone all of the necessary code for a vagrant run. The problem is that recursively referencing the Puppet-Gluster might be a little awkward for some hackers to understand. One inconvenience, is that to update the modules/ directory, you'd have to first push your code changes to the server, get the sha1 commit hash, and then in a secondary commit change the submodules pointer. This would apparently cause a cascase of extra cloning each new commit.
+
+3) Fill the vagrant/puppet/modules/ directory with git submodules & 1 symlink.
+This option seems to be the best solution. As in #2, we use git submodules. For the tricky Puppet-Gluster recursion scenario, we symlink the correct parent directory so that the relevant puppet code is present for the puppet::deploy. This only works if the provisioner follows the symlinks. For vagrant-libvirt, rsync needs the --copy-dirlinks option added.
+
+4) Maintain a Makefile and sync in Puppet-Gluster as needed.
+This is what I've adopted for now. It works, and is mostly straightforward. If you can find a better solution, please let me know!
+
+Hope this gives you some helpful background, and thanks to #git for consulting.
+
+Happy hacking,
+
+James
+
diff --git a/vagrant/gluster/puppet/modules/apt b/vagrant/gluster/puppet/modules/apt
new file mode 160000
+Subproject 5b54eda3668a42b12e21696e500b40a27005bf2
diff --git a/vagrant/gluster/puppet/modules/common b/vagrant/gluster/puppet/modules/common
new file mode 160000
+Subproject e494d38fb912274273facd84af20a00fb27b045
diff --git a/vagrant/gluster/puppet/modules/keepalived b/vagrant/gluster/puppet/modules/keepalived
new file mode 160000
+Subproject 64b3c92f884b42836a37e215fefc809aa7f0db3
diff --git a/vagrant/gluster/puppet/modules/puppet b/vagrant/gluster/puppet/modules/puppet
new file mode 160000
+Subproject e434b38393064d61afc7b791aca986c4ef4fc05
diff --git a/vagrant/gluster/puppet/modules/shorewall b/vagrant/gluster/puppet/modules/shorewall
new file mode 160000
+Subproject c1960f6b0ec75ddc4b464b30a15d1406c717850
diff --git a/vagrant/gluster/puppet/modules/stdlib b/vagrant/gluster/puppet/modules/stdlib
new file mode 160000
+Subproject 44c181ec0e230768b8dce10de57f9b32638e66e
diff --git a/vagrant/gluster/puppet/modules/yum b/vagrant/gluster/puppet/modules/yum
new file mode 160000
+Subproject cfeff4b522676a67448a611158dbbf8116bf857