summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJames Shubin <james@shubin.ca>2014-01-21 05:02:58 -0500
committerJames Shubin <james@shubin.ca>2014-01-21 10:46:34 -0500
commit51fea724f4b606326489636a0ffb7e9673600f9b (patch)
tree5d9fc128b49fc08316b72416c6b36fc7d8e9ab98
parentbee4993d4304730da27424dbdc73819b99d8ab5b (diff)
downloadpuppet-gluster-51fea724f4b606326489636a0ffb7e9673600f9b.tar.gz
puppet-gluster-51fea724f4b606326489636a0ffb7e9673600f9b.tar.xz
puppet-gluster-51fea724f4b606326489636a0ffb7e9673600f9b.zip
Add client mounting and associated magic.
* Rename gluster::client to gluster::mount * Add support to gluster::mount * Add client machines and mounts to vagrant setup * Fixed version interface for gluster::mount and gluster::server * Improved firewall support for gluster::mount * Update examples to use gluster::mount instead of gluster::client * Update documentation * Other small fixes
-rw-r--r--DOCUMENTATION.md49
-rw-r--r--examples/gluster-nfs-ipa-example.pp6
-rw-r--r--examples/mount-example.pp (renamed from examples/client-example.pp)6
-rw-r--r--manifests/client.pp79
-rw-r--r--manifests/mount.pp171
-rw-r--r--manifests/mount/base.pp (renamed from manifests/client/base.pp)32
-rw-r--r--manifests/repo.pp4
-rw-r--r--manifests/rulewrapper.pp47
-rw-r--r--manifests/server.pp35
-rw-r--r--manifests/volume.pp7
-rw-r--r--puppet-gluster-documentation.pdfbin199294 -> 202620 bytes
-rw-r--r--vagrant/README8
-rw-r--r--vagrant/gluster/Vagrantfile138
-rw-r--r--vagrant/gluster/puppet/manifests/site.pp43
14 files changed, 483 insertions, 142 deletions
diff --git a/DOCUMENTATION.md b/DOCUMENTATION.md
index 0953ac2..8d9efa7 100644
--- a/DOCUMENTATION.md
+++ b/DOCUMENTATION.md
@@ -33,6 +33,7 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
* [Simple setup](#simple-setup)
* [Elastic setup](#elastic-setup)
* [Advanced setup](#advanced-setup)
+ * [Client setup](#client-setup)
4. [Usage/FAQ - Notes on management and frequently asked questions](#usage-and-frequently-asked-questions)
5. [Reference - Class and type reference](#reference)
* [gluster::simple](#glustersimple)
@@ -42,6 +43,7 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
* [gluster::brick](#glusterbrick)
* [gluster::volume](#glustervolume)
* [gluster::volume::property](#glustervolumeproperty)
+ * [gluster::mount](#glustermount)
6. [Examples - Example configurations](#examples)
7. [Limitations - Puppet versions, OS compatibility, etc...](#limitations)
8. [Development - Background on module development](#development)
@@ -168,6 +170,24 @@ gluster::volume::property { 'examplevol#auth.reject':
}
```
+###Client setup
+
+Mounting a GlusterFS volume on a client is fairly straightforward. Simply use
+the 'gluster::mount' type.
+
+```puppet
+ gluster::mount { '/mnt/gluster/puppet/':
+ server => 'annex.example.com:/puppet',
+ rw => true,
+ shorewall => false,
+ }
+```
+
+In this example, 'annex.example.com' points to the VIP of the GlusterFS
+cluster. Using the VIP for mounting increases the chance that you'll get an
+available server when you try to mount. This generally works better than RRDNS
+or similar schemes.
+
##Usage and frequently asked questions
All management should be done by manipulating the arguments on the appropriate
@@ -257,6 +277,7 @@ If you feel that a well used option needs documenting here, please contact me.
* [gluster::brick](#glusterbrick): Brick type for each defined brick, per host.
* [gluster::volume](#glustervolume): Volume type for each defined volume.
* [gluster::volume::property](#glustervolumeproperty): Manages properties for each volume.
+* [gluster::mount](#glustermount): Client volume mount point management.
###gluster::simple
This is gluster::simple. It should probably take care of 80% of all use cases.
@@ -462,6 +483,34 @@ you don't use all the others.
####`value`
The value to be used for this volume property.
+###gluster::mount
+Main type to use to mount GlusterFS volumes. This type offers special features,
+like shorewall integration, and repo support.
+
+####`server`
+Server specification to use when mounting. Format is _<server>:/volume_. You
+may use an _FQDN_ or an _IP address_ to specify the server.
+
+####`rw`
+Mount read-write or read-only. Defaults to read-only. Specify _true_ for
+read-write.
+
+####`mounted`
+Mounted argument from standard mount type. Defaults to _true_ (_mounted_).
+
+####`repo`
+Boolean to select if you want automatic repository (package) management or not.
+
+####`version`
+Specify which GlusterFS version you'd like to use.
+
+####`ip`
+IP address of this client. This is usually auto-detected, but you can choose
+your own value manually in case there are multiple options available.
+
+####`shorewall`
+Boolean to specify whether puppet-shorewall integration should be used or not.
+
##Examples
For example configurations, please consult the [examples/](https://github.com/purpleidea/puppet-gluster/tree/master/examples) directory in the git
source repository. It is available from:
diff --git a/examples/gluster-nfs-ipa-example.pp b/examples/gluster-nfs-ipa-example.pp
index 739c5c4..b08c7f2 100644
--- a/examples/gluster-nfs-ipa-example.pp
+++ b/examples/gluster-nfs-ipa-example.pp
@@ -1,9 +1,9 @@
-# gluster::client example using puppet-nfs and puppet-ipa to serve up your data
+# gluster::mount example using puppet-nfs and puppet-ipa to serve up your data!
# NOTE: you'll need to consult puppet-ipa/examples/ to setup the freeipa server
# mount a share on your nfs server, at the moment that nfs server is a SPOF :-(
$gvip = '203.0.113.42'
-gluster::client { '/export/homes':
+gluster::mount { '/export/homes':
server => "${gvip}:/homes",
rw => true,
mounted => true,
@@ -31,7 +31,7 @@ nfs::server::export { '/homes/': # name is the client mountpoint
tagas => 'homes',
safety => false, # be super clever (see the module docs)
comment => 'Export home directories for ws*',
- require => Gluster::Client['/export/homes/'],
+ require => Gluster::Mount['/export/homes/'],
}
# and here is how you can collect / mount ~automatically on the client:
diff --git a/examples/client-example.pp b/examples/mount-example.pp
index 2e3c8d8..90930e3 100644
--- a/examples/client-example.pp
+++ b/examples/mount-example.pp
@@ -1,11 +1,11 @@
-# gluster::client example
+# gluster::mount example
# This is the recommended way of mounting puppet-gluster.
# NOTE: It makes sense to use the VIP as the server to mount from, since it
# stays HA if one of the other nodes goes down.
# mount a share on one of the gluster hosts (note the added require)
$annex_loc_vip_1 = '172.16.1.80'
-gluster::client { '/mnt/gshared':
+gluster::mount { '/mnt/gshared':
server => "${annex_loc_vip_1}:/gshared",
rw => true,
mounted => true,
@@ -13,7 +13,7 @@ gluster::client { '/mnt/gshared':
}
# mount a share on a client somewhere
-gluster::client { '/mnt/some_mount_point':
+gluster::mount { '/mnt/some_mount_point':
server => "${annex_loc_vip_1}:/some_volume_name",
rw => true,
mounted => true,
diff --git a/manifests/client.pp b/manifests/client.pp
deleted file mode 100644
index 5ffe054..0000000
--- a/manifests/client.pp
+++ /dev/null
@@ -1,79 +0,0 @@
-# GlusterFS module by James
-# Copyright (C) 2010-2013+ James Shubin
-# Written by James Shubin <james@shubin.ca>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Affero General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Affero General Public License for more details.
-#
-# You should have received a copy of the GNU Affero General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-# XXX: try mounting with: glusterfs --volfile-server=<server-address> --volfile-id=<volume-name> <mount-point> --xlator-option='*dht*.assert-no-child-down=yes' # TODO: quotes or not?
-define gluster::client(
- $server, # NOTE: use a vip as server hostname
- $rw = false, # mount read only (true) or rw (false)
-# $suid = false, # mount with suid (true) or nosuid (false) # TODO: will this work with gluster ?
- $mounted = true # useful if we want to pull in the group
- # defs, but not actually mount (testing)
-) {
- #mount -t glusterfs brick1.example.com:/test /test
- include gluster::client::base
-
- $rw_bool = $rw ? {
- true => 'rw',
- default => 'ro',
- }
-
- # TODO: will this work with gluster ?
- #$suid_bool = $suid ? {
- # true => 'suid',
- # default => 'nosuid',
- #}
-
- $mounted_bool = $mounted ? {
- true => mounted,
- default => unmounted,
- }
-
- # make an empty directory for the mount point
- file { "${name}":
- ensure => directory, # make sure this is a directory
- recurse => false, # don't recurse into directory
- purge => false, # don't purge unmanaged files
- force => false, # don't purge subdirs and links
- }
-
- # Mount Options:
- # * backupvolfile-server=server-name
- # * fetch-attempts=N (where N is number of attempts)
- # * log-level=loglevel
- # * log-file=logfile
- # * direct-io-mode=[enable|disable]
- # * ro (for readonly mounts)
- # * acl (for enabling posix-ACLs)
- # * worm (making the mount WORM - Write Once, Read Many type)
- # * selinux (enable selinux on GlusterFS mount
- mount { "${name}":
- atboot => true,
- ensure => $mounted_bool,
- device => "${server}",
- fstype => 'glusterfs',
- options => "defaults,_netdev,${rw_bool}", # TODO: will $suid_bool work with gluster ?
- dump => '0', # fs_freq: 0 to skip file system dumps
- pass => '0', # fs_passno: 0 to skip fsck on boot
- require => [
- Package[['glusterfs', 'glusterfs-fuse']],
- File["${name}"], # the mountpoint
- Exec['gluster-fuse'], # ensure fuse is loaded
- ],
- }
-}
-
-# vim: ts=8
diff --git a/manifests/mount.pp b/manifests/mount.pp
new file mode 100644
index 0000000..27b6baa
--- /dev/null
+++ b/manifests/mount.pp
@@ -0,0 +1,171 @@
+# GlusterFS module by James
+# Copyright (C) 2010-2013+ James Shubin
+# Written by James Shubin <james@shubin.ca>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+# XXX: try mounting with: glusterfs --volfile-server=<server-address> --volfile-id=<volume-name> <mount-point> --xlator-option='*dht*.assert-no-child-down=yes' # TODO: quotes or not?
+define gluster::mount(
+ $server, # NOTE: use a vip as server hostname
+ $rw = false, # mount read only (true) or rw (false)
+# $suid = false, # mount with suid (true) or nosuid (false) # TODO: will this work with gluster ?
+ $mounted = true, # useful if we want to pull in the group
+ # defs, but not actually mount (testing)
+ $repo = true, # add a repo automatically? true or false
+ $version = '', # pick a specific version (defaults to latest)
+ $ip = '', # you can specify which ip address to use (if multiple)
+ $shorewall = false
+) {
+ #mount -t glusterfs brick1.example.com:/test /test
+ #include gluster::mount::base
+ #class { '::gluster::mount::base':
+ # repo => $repo,
+ # version => $version,
+ #}
+ $params = {
+ 'repo' => $repo,
+ 'version' => $version,
+ }
+ # because multiple gluster::mount types are allowed on the same server,
+ # we include with the ensure_resource function to avoid identical calls
+ ensure_resource('class', '::gluster::mount::base', $params)
+
+ # eg: vip:/volume
+ $split = split($server, ':') # do some $server parsing
+ $host = $split[0] # host fqdn or ip (eg: vip)
+ # NOTE: technically $path should be everything BUT split[0]. This
+ # lets our $path include colons if for some reason they're needed.
+ #$path = $split[1] # volume
+ # TODO: create substring function
+ $path = inline_template("<%= '${server}'.slice('${host}'.length+1, '${server}'.length-'${host}'.length-1) %>")
+ $short_path = sprintf("%s", regsubst($path, '\/$', '')) # no trailing
+ #$valid_path = sprintf("%s/", regsubst($path, '\/$', ''))
+ $volume = sprintf("%s", regsubst($short_path, '^\/', '')) # no leading
+
+ if ! ( "${host}:${path}" == "${server}" ) {
+ fail('The $server must match a $host:$path pattern.')
+ }
+
+ if ! ( "${host}:/${volume}" == "${server}" ) {
+ fail('The $server must match a $host:/$volume pattern.')
+ }
+
+ $short_name = sprintf("%s", regsubst("${name}", '\/$', '')) # no trailing
+ $long_name = sprintf("%s/", regsubst("${name}", '\/$', '')) # trailing...
+
+ $valid_ip = "${ip}" ? {
+ '' => "${::gluster_host_ip}" ? { # smart fact...
+ '' => "${::ipaddress}", # puppet picks!
+ default => "${::gluster_host_ip}", # smart
+ },
+ default => "${ip}", # user selected
+ }
+ if "${valid_ip}" == '' {
+ fail('No valid IP exists!')
+ }
+
+ if $shorewall {
+ $safename = regsubst("${name}", '/', '_', 'G') # make /'s safe
+ @@shorewall::rule { "glusterd-management-${fqdn}-${safename}":
+ #@@shorewall::rule { "glusterd-management-${volume}-${fqdn}":
+ action => 'ACCEPT',
+ source => '', # override this on collect...
+ source_ips => ["${valid_ip}"],
+ dest => '$FW',
+ proto => 'tcp',
+ port => '24007',
+ comment => 'Allow incoming tcp:24007 from each glusterd.',
+ tag => 'gluster_firewall_management',
+ ensure => present,
+ }
+
+ # wrap shorewall::rule in a fake type so that we can add $match
+ #@@shorewall::rule { "gluster-volume-${fqdn}-${safename}":
+ @@gluster::rulewrapper { "gluster-volume-${fqdn}-${safename}":
+ action => 'ACCEPT',
+ source => '', # override this on collect...
+ source_ips => ["${valid_ip}"],
+ dest => '$FW',
+ proto => 'tcp',
+ port => '', # override this on collect...
+ #comment => "${fqdn}",
+ comment => 'Allow incoming tcp port from glusterfsds.',
+ tag => 'gluster_firewall_volume',
+ match => "${volume}", # used for collection
+ ensure => present,
+ }
+ }
+
+ $rw_bool = $rw ? {
+ true => 'rw',
+ default => 'ro',
+ }
+
+ # TODO: will this work with gluster ?
+ #$suid_bool = $suid ? {
+ # true => 'suid',
+ # default => 'nosuid',
+ #}
+
+ $mounted_bool = $mounted ? {
+ false => unmounted,
+ default => mounted,
+ }
+
+ # ensure parent directories exist
+ exec { "gluster-mount-mkdir-${name}":
+ command => "/bin/mkdir -p '${long_name}'",
+ creates => "${long_name}",
+ logoutput => on_failure,
+ before => File["${long_name}"],
+ }
+
+ # make an empty directory for the mount point
+ file { "${long_name}": # ensure a trailing slash
+ ensure => directory, # make sure this is a directory
+ recurse => false, # don't recurse into directory
+ purge => false, # don't purge unmanaged files
+ force => false, # don't purge subdirs and links
+ alias => "${short_name}", # don't allow duplicates name's
+ }
+
+ # Mount Options:
+ # * backupvolfile-server=server-name
+ # * fetch-attempts=N (where N is number of attempts)
+ # * log-level=loglevel
+ # * log-file=logfile
+ # * direct-io-mode=[enable|disable]
+ # * ro (for readonly mounts)
+ # * acl (for enabling posix-ACLs)
+ # * worm (making the mount WORM - Write Once, Read Many type)
+ # * selinux (enable selinux on GlusterFS mount)
+ # XXX: consider mounting only if some exported resource, collected and turned into a fact shows that the volume is available...
+ # XXX: or something... consider adding the notify => Poke[] functionality
+ mount { "${short_name}":
+ atboot => true,
+ ensure => $mounted_bool,
+ device => "${server}",
+ fstype => 'glusterfs',
+ options => "defaults,_netdev,${rw_bool}", # TODO: will $suid_bool work with gluster ?
+ dump => '0', # fs_freq: 0 to skip file system dumps
+ pass => '0', # fs_passno: 0 to skip fsck on boot
+ require => [
+ Package[['glusterfs', 'glusterfs-fuse']],
+ File["${long_name}"], # the mountpoint
+ Exec['gluster-fuse'], # ensure fuse is loaded!
+ ],
+ }
+}
+
+# vim: ts=8
diff --git a/manifests/client/base.pp b/manifests/mount/base.pp
index 9ec232b..441ebe5 100644
--- a/manifests/client/base.pp
+++ b/manifests/mount/base.pp
@@ -15,10 +15,36 @@
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-class gluster::client::base {
- # TODO: ensure these are from our 'gluster' repo
+class gluster::mount::base(
+ $repo = true, # add a repo automatically? true or false
+ $version = '' # pick a specific version (defaults to latest)
+) {
+ include gluster::vardir
+ #$vardir = $::gluster::vardir::module_vardir # with trailing slash
+ $vardir = regsubst($::gluster::vardir::module_vardir, '\/$', '')
+
+ # if we use ::mount and ::server on the same machine, this could clash,
+ # so we use the ensure_resource function to allow identical duplicates!
+ $rname = "${version}" ? {
+ '' => 'gluster',
+ default => "gluster-${version}",
+ }
+ if $repo {
+ $params = {
+ 'version' => "${version}",
+ }
+ ensure_resource('gluster::repo', "${rname}", $params)
+ }
+
package { ['glusterfs', 'glusterfs-fuse']:
- ensure => present,
+ ensure => "${version}" ? {
+ '' => present,
+ default => "${version}",
+ },
+ require => $repo ? {
+ false => undef,
+ default => Gluster::Repo["${rname}"],
+ },
}
# FIXME: choose a reliable and correct way to ensure fuse is loaded
diff --git a/manifests/repo.pp b/manifests/repo.pp
index ca5cecf..3c45ee0 100644
--- a/manifests/repo.pp
+++ b/manifests/repo.pp
@@ -15,7 +15,7 @@
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-class gluster::repo(
+define gluster::repo(
# if you specify 'x.y', it will find the latest x.y.*
# if you specify 'x.y.z', it will stick to that version
# anything omitted is taken to mean "latest"
@@ -96,7 +96,7 @@ class gluster::repo(
include ::yum
#yum::repos::repo { "gluster-${arch}":
- yum::repos::repo { 'gluster':
+ yum::repos::repo { "${name}":
baseurl => "${base_arch}${arch}/",
enabled => true,
gpgcheck => true,
diff --git a/manifests/rulewrapper.pp b/manifests/rulewrapper.pp
new file mode 100644
index 0000000..4ac6419
--- /dev/null
+++ b/manifests/rulewrapper.pp
@@ -0,0 +1,47 @@
+# GlusterFS module by James
+# Copyright (C) 2012-2013+ James Shubin
+# Written by James Shubin <james@shubin.ca>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+# NOTE: this wraps shorewall::rule so that we can add on additional fake 'tags'
+define gluster::rulewrapper(
+ $action = '',
+ $source = '',
+ $source_ips = [],
+ $dest = '',
+ $dest_ips = [],
+ $proto = '',
+ $port = [],
+ $sport = [],
+ $original = [],
+ $comment = '',
+ $ensure = present,
+ $match = '' # additional tag parameter
+) {
+ shorewall::rule { "${name}":
+ action => "${action}",
+ source => "${source}",
+ source_ips => $source_ips,
+ dest => "${dest}",
+ dest_ips => $dest_ips,
+ proto => "${proto}",
+ port => $port,
+ sport => $sport,
+ comment => "${comment}",
+ ensure => $ensure,
+ }
+}
+
+# vim: ts=8
diff --git a/manifests/server.pp b/manifests/server.pp
index 158f873..247f4d4 100644
--- a/manifests/server.pp
+++ b/manifests/server.pp
@@ -29,30 +29,17 @@ class gluster::server(
) {
$FW = '$FW' # make using $FW in shorewall easier
- # $gluster_package_version is a fact; commonly set by vagrant
- if "${version}" == '' and "${gluster_package_version}" == '' {
- $valid_version = ''
- } else {
- if "${version}" != '' and "${gluster_package_version}" != '' {
- warning('Requested GlusterFS version specified twice!')
- if "${version}" != "${gluster_package_version}" {
- fail('Requested GlusterFS version mismatch!')
- }
- $valid_version = "${version}"
- } elsif "${version}" != '' {
- $valid_version = "${version}"
- } elsif "${gluster_package_version}" != '' {
- $valid_version = "${gluster_package_version}"
- } else {
- fail('Programming error!')
- }
+ # if we use ::mount and ::server on the same machine, this could clash,
+ # so we use the ensure_resource function to allow identical duplicates!
+ $rname = "${version}" ? {
+ '' => 'gluster',
+ default => "gluster-${version}",
}
-
- # ensure these are from a gluster repo
if $repo {
- class { '::gluster::repo':
- version => "${valid_version}",
+ $params = {
+ 'version' => "${version}",
}
+ ensure_resource('gluster::repo', "${rname}", $params)
}
package { 'moreutils': # for scripts needing: 'sponge'
@@ -61,13 +48,13 @@ class gluster::server(
}
package { 'glusterfs-server':
- ensure => "${valid_version}" ? {
+ ensure => "${version}" ? {
'' => present,
- default => "${valid_version}",
+ default => "${version}",
},
require => $repo ? {
false => undef,
- default => Class['::gluster::repo'],
+ default => Gluster::Repo["${rname}"],
},
}
diff --git a/manifests/volume.pp b/manifests/volume.pp
index 01ef8c5..513a431 100644
--- a/manifests/volume.pp
+++ b/manifests/volume.pp
@@ -346,6 +346,13 @@ define gluster::volume(
source => "${zone}", # use our source zone
before => Service['glusterd'],
}
+
+ Gluster::Rulewrapper <<| tag == 'gluster_firewall_volume' and match == "${name}" |>> {
+ #Shorewall::Rule <<| tag == 'gluster_firewall_volume' and match == "${name}" |>> {
+ source => "${zone}", # use our source zone
+ port => "${port}", # comma separated string or list
+ before => Service['glusterd'],
+ }
}
# fsm variables and boilerplate
diff --git a/puppet-gluster-documentation.pdf b/puppet-gluster-documentation.pdf
index a9c2885..02e5e1c 100644
--- a/puppet-gluster-documentation.pdf
+++ b/puppet-gluster-documentation.pdf
Binary files differ
diff --git a/vagrant/README b/vagrant/README
index e73ef0a..7918d11 100644
--- a/vagrant/README
+++ b/vagrant/README
@@ -14,12 +14,16 @@ This will not work perfectly on Fedora 19. You must use Fedora 20 or greater.
Once you're comfortable that vagrant is working properly, run this command:
- vagrant up puppet && sudo -v && vagrant up
+ vagrant up puppet && sudo -v && vagrant up annex{1..4} --no-parallel
Sit back and watch, or go have a beverage...
The first run can take a while because it has to download/install a base image.
-When the above command completes, puppet will still be provisioning your hosts. Once the cluster state settles, puppet will then create a gluster volume.
+When the above command completes, puppet will still be provisioning your hosts.
+Once the cluster state settles, puppet will create and start a gluster volume.
+Once your volume is started, you can build a few clients:
+
+ vagrant up client{1..2} --gluster-clients=2
Happy hacking,
diff --git a/vagrant/gluster/Vagrantfile b/vagrant/gluster/Vagrantfile
index a36dffd..8408d9d 100644
--- a/vagrant/gluster/Vagrantfile
+++ b/vagrant/gluster/Vagrantfile
@@ -61,7 +61,8 @@ offset = 100 # start gluster hosts after here
count = 4 # default number of gluster hosts to build
version = '' # default gluster version (empty string means latest!)
firewall = false # default firewall enabled (FIXME: default to true when keepalived bug is fixed)
-cachier = false # default cachier usage (FIXME: default to true when patches are upstream)
+clients = 1 # default number of gluster clients to build
+cachier = false # default cachier usage
#
# ARGV parsing
@@ -75,6 +76,7 @@ if File.exist?(f)
count = settings[:count]
version = settings[:version]
firewall = settings[:firewall]
+ clients = settings[:clients]
cachier = settings[:cachier]
end
@@ -106,15 +108,21 @@ while skip < ARGV.length
firewall = true
end
+ elsif ARGV[skip].start_with?(arg='--gluster-clients=')
+ v = ARGV.delete_at(skip).dup
+ v.slice! arg
+
+ clients = v.to_i # set gluster client count
+
elsif ARGV[skip].start_with?(arg='--gluster-cachier=')
v = ARGV.delete_at(skip).dup
v.slice! arg
cachier = v.to_s # set cachier flag
- if ['false', 'no'].include?(cachier.downcase)
- cachier = false
- else
+ if ['true', 'yes'].include?(cachier.downcase)
cachier = true
+ else
+ cachier = false
end
else # skip over "official" vagrant args
@@ -123,7 +131,7 @@ while skip < ARGV.length
end
# save settings (ARGV overrides)
-settings = {:count => count, :version => version, :firewall => firewall, :cachier => cachier}
+settings = {:count => count, :version => version, :firewall => firewall, :clients => clients, :cachier => cachier}
File.open(f, 'w') do |file|
file.write settings.to_yaml
end
@@ -190,6 +198,9 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
#
# cache
#
+ # NOTE: you should probably erase the cache between rebuilds if you are
+ # installing older package versions. This is because the newer packages
+ # will get cached, and then subsequently might get silently installed!!
if cachier
# TODO: this doesn't cache metadata, full offline operation not possible
config.cache.auto_detect = true
@@ -229,7 +240,8 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
vm.vm.hostname = puppet_hostname
# red herring network so that management happens here...
vm.vm.network :private_network,
- :ip => "10.10.10.10",
+ :ip => "10.10.1.#{1+10}", # FIXME: remove the +10, see: https://github.com/mitchellh/vagrant/issues/2868
+ :libvirt__netmask => '255.255.0.0',
#:libvirt__dhcp_enabled => false, # XXX: not allowed here
:libvirt__network_name => 'default'
@@ -265,6 +277,13 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
ip = range[offset+i].to_s
vm.vm.provision 'shell', inline: "puppet resource host #{h}.#{domain} ip=#{ip} host_aliases=#{h} ensure=present"
end
+
+ # hosts entries for all clients
+ (1..clients).each do |i|
+ h = "client#{i}"
+ ip = range[offset+count+i].to_s
+ vm.vm.provision 'shell', inline: "puppet resource host #{h}.#{domain} ip=#{ip} host_aliases=#{h} ensure=present"
+ end
end
#
# puppet (apply)
@@ -310,7 +329,8 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
vm.vm.hostname = h
# red herring network so that management happens here...
vm.vm.network :private_network,
- :ip => "10.10.10.1#{i}",
+ :ip => "10.10.2.#{i+10}", # FIXME: remove the +10, see: https://github.com/mitchellh/vagrant/issues/2868
+ :libvirt__netmask => '255.255.0.0',
:libvirt__network_name => 'default'
# this is the real network that we'll use...
@@ -325,7 +345,7 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
# shell
#
if not File.exists?(fvx) # only modify /etc/hosts once
- if provision.is_a?(TrueClass) or (provision.is_a?(Array) and provision.include?(puppet_hostname))
+ if provision.is_a?(TrueClass) or (provision.is_a?(Array) and provision.include?(h))
File.open(fvx, 'w') {} # touch
end
vm.vm.provision 'shell', inline: 'puppet resource host localhost.localdomain ip=127.0.0.1 host_aliases=localhost'
@@ -334,11 +354,107 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
vm.vm.provision 'shell', inline: "puppet resource host #{vip_hostname}.#{domain} ip=#{vip_ip} host_aliases=#{vip_hostname} ensure=present"
vm.vm.provision 'shell', inline: "puppet resource host #{puppet_hostname}.#{domain} ip=#{puppet_ip} host_aliases=#{puppet_hostname} ensure=present"
#vm.vm.provision 'shell', inline: "[ ! -e /root/puppet-cert-is-clean ] && ssh -o 'StrictHostKeyChecking=no' #{puppet_hostname} puppet cert clean #{h}.#{domain} ; touch /root/puppet-cert-is-clean"
- (1..count).each do |j| # hosts entries for all hosts
+ # hosts entries for all hosts
+ (1..count).each do |j|
+ oh = "annex#{j}"
+ oip = range[offset+j].to_s # eg: "192.168.142.#{100+i}"
+ vm.vm.provision 'shell', inline: "puppet resource host #{oh}.#{domain} ip=#{oip} host_aliases=#{oh} ensure=present"
+ end
+
+ # hosts entries for all clients
+ (1..clients).each do |j|
+ oh = "client#{j}"
+ oip = range[offset+count+j].to_s
+ vm.vm.provision 'shell', inline: "puppet resource host #{oh}.#{domain} ip=#{oip} host_aliases=#{oh} ensure=present"
+ end
+ end
+ #
+ # puppet (agent)
+ #
+ vm.vm.provision :puppet_server do |puppet|
+ #puppet.puppet_node = "#{h}" # redundant
+ #puppet.puppet_server = "#{puppet_hostname}.#{domain}"
+ puppet.puppet_server = puppet_hostname
+ #puppet.options = '--verbose --debug'
+ puppet.options = '--test' # see the output
+ puppet.facter = {
+ 'vagrant' => '1',
+ 'vagrant_gluster_vip' => vip_ip,
+ 'vagrant_gluster_vip_fqdn' => "#{vip_hostname}.#{domain}",
+ 'vagrant_gluster_firewall' => firewall ? 'true' : 'false',
+ 'vagrant_gluster_version' => version,
+ }
+ end
+ end
+ end
+
+ #
+ # client
+ #
+ (1..clients).each do |i|
+ h = "client#{i}"
+ ip = range[offset+count+i].to_s
+ #fqdn = "annex#{i}.#{domain}"
+ fvy = File.join(projectdir, '.vagrant', "#{h}-hosts.done")
+ if destroy.is_a?(TrueClass) or (destroy.is_a?(Array) and destroy.include?(h))
+ if File.exists?(fvy) # safety
+ puts "Unlocking shell provisioning for: #{h}..."
+ File.delete(fvy) # delete hosts token
+ end
+ end
+
+ if snoop.include?(h) # should we clean this machine?
+ cmd = "puppet cert clean #{h}.#{domain}"
+ puts "Running 'puppet cert clean' for: #{h}..."
+ `vagrant ssh #{puppet_hostname} -c 'sudo #{cmd}'`
+ cmd = "puppet node deactivate #{h}.#{domain}"
+ puts "Running 'puppet node deactivate' for: #{h}..."
+ `vagrant ssh #{puppet_hostname} -c 'sudo #{cmd}'`
+ end
+
+ config.vm.define h.to_sym do |vm|
+ vm.vm.hostname = h
+ # red herring network so that management happens here...
+ vm.vm.network :private_network,
+ :ip => "10.10.3.#{i+10}", # FIXME: remove the +10, see: https://github.com/mitchellh/vagrant/issues/2868
+ :libvirt__netmask => '255.255.0.0',
+ :libvirt__network_name => 'default'
+
+ # this is the real network that we'll use...
+ vm.vm.network :private_network,
+ :ip => ip,
+ :libvirt__dhcp_enabled => false,
+ :libvirt__network_name => 'gluster'
+
+ #vm.landrush.host h, ip # TODO ?
+
+ #
+ # shell
+ #
+ if not File.exists?(fvy) # only modify /etc/hosts once
+ if provision.is_a?(TrueClass) or (provision.is_a?(Array) and provision.include?(h))
+ File.open(fvy, 'w') {} # touch
+ end
+ vm.vm.provision 'shell', inline: 'puppet resource host localhost.localdomain ip=127.0.0.1 host_aliases=localhost'
+ vm.vm.provision 'shell', inline: "puppet resource host #{h} ensure=absent" # so that fqdn works
+
+ vm.vm.provision 'shell', inline: "puppet resource host #{vip_hostname}.#{domain} ip=#{vip_ip} host_aliases=#{vip_hostname} ensure=present"
+ vm.vm.provision 'shell', inline: "puppet resource host #{puppet_hostname}.#{domain} ip=#{puppet_ip} host_aliases=#{puppet_hostname} ensure=present"
+ # hosts entries for all hosts
+ (1..count).each do |j|
oh = "annex#{j}"
oip = range[offset+j].to_s # eg: "192.168.142.#{100+i}"
vm.vm.provision 'shell', inline: "puppet resource host #{oh}.#{domain} ip=#{oip} host_aliases=#{oh} ensure=present"
end
+
+ # NOTE: we don't add ip's of other clients
+ # because it's probably not needed...
+ # hosts entries for all clients
+ #(1..clients).each do |j|
+ # oh = "client#{j}"
+ # oip = range[offset+count+j].to_s
+ # vm.vm.provision 'shell', inline: "puppet resource host #{oh}.#{domain} ip=#{oip} host_aliases=#{oh} ensure=present"
+ #end
end
#
# puppet (agent)
@@ -352,9 +468,9 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
puppet.facter = {
'vagrant' => '1',
'vagrant_gluster_vip' => vip_ip,
+ 'vagrant_gluster_vip_fqdn' => "#{vip_hostname}.#{domain}",
'vagrant_gluster_firewall' => firewall ? 'true' : 'false',
- 'vagrant_gluster_allow' => (1..count).map{|z| range[offset+z].to_s}.join(','),
- 'gluster_package_version' => version,
+ 'vagrant_gluster_version' => version,
}
end
end
diff --git a/vagrant/gluster/puppet/manifests/site.pp b/vagrant/gluster/puppet/manifests/site.pp
index c13726c..a26200f 100644
--- a/vagrant/gluster/puppet/manifests/site.pp
+++ b/vagrant/gluster/puppet/manifests/site.pp
@@ -51,7 +51,9 @@ node /^annex\d+$/ inherits default { # annex{1,2,..N}
# this is a simple way to setup gluster
class { '::gluster::simple':
+ volume => 'puppet',
vip => "${::vagrant_gluster_vip}", # from vagrant
+ version => "${::vagrant_gluster_version}",
vrrp => true,
shorewall => "${::vagrant_gluster_firewall}" ? {
'false' => false,
@@ -60,21 +62,32 @@ node /^annex\d+$/ inherits default { # annex{1,2,..N}
}
}
-#node /^client\d+$/ inherits default { # annex{1,2,..N}
-#
-# class { '::puppet::client':
-# #start => true,
-# start => false, # useful for testing manually...
-# }
-#
-# class { '::gluster::client':
-# shorewall => "${::vagrant_gluster_firewall}" ? {
-# 'false' => false,
-# default => true,
-# },
-#
-# }
-#}
+node /^client\d+$/ inherits default { # client{1,2,..N}
+
+ if "${::vagrant_gluster_firewall}" != 'false' {
+ include firewall
+ }
+
+ class { '::puppet::client':
+ #start => true,
+ start => false, # useful for testing manually...
+ }
+
+ $host = "${::vagrant_gluster_vip_fqdn}" ? {
+ '' => "${::vagrant_gluster_vip}",
+ default => "${::vagrant_gluster_vip_fqdn}",
+ }
+
+ gluster::mount { '/mnt/gluster/puppet/':
+ server => "${host}:/puppet",
+ rw => true,
+ version => "${::vagrant_gluster_version}",
+ shorewall => "${::vagrant_gluster_firewall}" ? {
+ 'false' => false,
+ default => true,
+ },
+ }
+}
class firewall {