summaryrefslogtreecommitdiffstats
path: root/manifests/volume.pp
diff options
context:
space:
mode:
authorJames Shubin <james@shubin.ca>2014-01-05 16:01:50 -0500
committerJames Shubin <james@shubin.ca>2014-01-08 22:09:21 -0500
commitddd1f6671de0d2b2e86aa7f0f73ddc67b690b16a (patch)
treeec74546da6311fc881231386b98ded150c43d981 /manifests/volume.pp
parent36275d43f72ba1371042bd24fb3718df946f4d56 (diff)
downloadpuppet-gluster-ddd1f6671de0d2b2e86aa7f0f73ddc67b690b16a.tar.gz
puppet-gluster-ddd1f6671de0d2b2e86aa7f0f73ddc67b690b16a.tar.xz
puppet-gluster-ddd1f6671de0d2b2e86aa7f0f73ddc67b690b16a.zip
This is Puppet-Gluster+Vagrant! (https://ttboj.wordpress.com/)
Puppet-Gluster, now with Vagrant! - Initial release. Happy hacking!
Diffstat (limited to 'manifests/volume.pp')
-rw-r--r--manifests/volume.pp22
1 files changed, 21 insertions, 1 deletions
diff --git a/manifests/volume.pp b/manifests/volume.pp
index 013915a..01ef8c5 100644
--- a/manifests/volume.pp
+++ b/manifests/volume.pp
@@ -202,6 +202,7 @@ define gluster::volume(
File["${vardir}/volume/create-${name}.sh"],
File["${vardir}/xml.py"], # status check
Gluster::Brick[$valid_bricks],
+ Exec["gluster-volume-stuck-${name}"],
],
default => [
Service['glusterd'],
@@ -209,14 +210,33 @@ define gluster::volume(
Package['fping'],
File["${vardir}/xml.py"], # status check
Gluster::Brick[$valid_bricks],
+ Exec["gluster-volume-stuck-${name}"],
+ ],
+ }
+
+ # work around stuck connection state (4) of: 'Accepted peer request'...
+ exec { "gluster-volume-stuck-${name}":
+ command => '/sbin/service glusterd reload',
+ logoutput => on_failure,
+ unless => "/usr/sbin/gluster volume list | /bin/grep -qxF '${name}' -", # reconnect if it doesn't exist
+ onlyif => sprintf("/usr/sbin/gluster peer status --xml | ${vardir}/xml.py stuck %s", $others),
+ notify => Common::Again::Delta['gluster-exec-again'],
+ require => [
+ Service['glusterd'],
+ File["${vardir}/xml.py"], # stuck check
+ Gluster::Brick[$valid_bricks],
],
}
# store command in a separate file to run as bash...
# NOTE: we sleep for 5 seconds to give glusterd a chance to
# settle down first if we're doing a hot (clean) puppet run
+ # NOTE: force is needed for now because of the following error:
+ # volume create: puppet: failed: The brick annex1.example.com:/var/lib/puppet/tmp/gluster/data/puppet is is being created in the root partition. It is recommended that you don't use the system's root partition for storage backend. Or use 'force' at the end of the command if you want to override this behavior.
+ # FIXME: it would be create to have an --allow-root-storage type option
+ # instead, so that we don't inadvertently force some other bad thing...
file { "${vardir}/volume/create-${name}.sh":
- content => inline_template("#!/bin/bash\n/bin/sleep 5s && /usr/sbin/gluster volume create ${name} ${valid_replica}${valid_stripe}transport ${valid_transport} ${brick_spec} > >(/usr/bin/tee '/tmp/gluster-volume-create-${name}.stdout') 2> >(/usr/bin/tee '/tmp/gluster-volume-create-${name}.stderr' >&2) || (${rmdir_volume_dirs} && /bin/false)\nexit \$?\n"),
+ content => inline_template("#!/bin/bash\n/bin/sleep 5s && /usr/sbin/gluster volume create ${name} ${valid_replica}${valid_stripe}transport ${valid_transport} ${brick_spec} force > >(/usr/bin/tee '/tmp/gluster-volume-create-${name}.stdout') 2> >(/usr/bin/tee '/tmp/gluster-volume-create-${name}.stderr' >&2) || (${rmdir_volume_dirs} && /bin/false)\nexit \$?\n"),
owner => root,
group => root,
mode => 755,