summaryrefslogtreecommitdiffstats
path: root/manifests/volume.pp
diff options
context:
space:
mode:
authorJames Shubin <james@shubin.ca>2012-07-27 19:32:06 -0400
committerJames Shubin <james@shubin.ca>2012-07-27 19:32:06 -0400
commit00c004343a0d40ec3c12ab0285dac6eb45533732 (patch)
treeabe8e07e242b2991a4ce1be75d4b37222bd46a04 /manifests/volume.pp
parent954431b60f54040007f044a078a6ce66cac11242 (diff)
downloadpuppet-gluster-00c004343a0d40ec3c12ab0285dac6eb45533732.tar.gz
puppet-gluster-00c004343a0d40ec3c12ab0285dac6eb45533732.tar.xz
puppet-gluster-00c004343a0d40ec3c12ab0285dac6eb45533732.zip
Sadly, I could not read my own code, so I had to revert back to tabs. Sorry bodepd!
Diffstat (limited to 'manifests/volume.pp')
-rw-r--r--manifests/volume.pp163
1 files changed, 82 insertions, 81 deletions
diff --git a/manifests/volume.pp b/manifests/volume.pp
index 21d7fb4..ffcfe18 100644
--- a/manifests/volume.pp
+++ b/manifests/volume.pp
@@ -16,96 +16,97 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
define gluster::volume(
- $bricks = [],
- $transport = 'tcp',
- $replica = 1,
- $stripe = 1,
- $start = undef # start volume ? true, false (stop it) or undef
+ $bricks = [],
+ $transport = 'tcp',
+ $replica = 1,
+ $stripe = 1,
+ $start = undef # start volume ? true, false (stop it) or undef
) {
- # TODO: if using rdma, maybe we should pull in the rdma package... ?
- $valid_transport = $transport ? {
- 'rdma' => 'rdma',
- 'tcp,rdma' => 'tcp,rdma',
- default => 'tcp',
- }
+ # TODO: if using rdma, maybe we should pull in the rdma package... ?
+ $valid_transport = $transport ? {
+ 'rdma' => 'rdma',
+ 'tcp,rdma' => 'tcp,rdma',
+ default => 'tcp',
+ }
- $valid_replica = $replica ? {
- '1' => '',
- default => "replica ${replica} ",
- }
+ $valid_replica = $replica ? {
+ '1' => '',
+ default => "replica ${replica} ",
+ }
- $valid_stripe = $stripe ? {
- '1' => '',
- default => "stripe ${stripe} ",
- }
+ $valid_stripe = $stripe ? {
+ '1' => '',
+ default => "stripe ${stripe} ",
+ }
- #Gluster::Brick[$bricks] -> Gluster::Volume[$name] # volume requires bricks
+ #Gluster::Brick[$bricks] -> Gluster::Volume[$name] # volume requires bricks
- # get the bricks that match our fqdn, and append /$name to their path.
- # return only these paths, which can be used to build the volume dirs.
- $volume_dirs = split(inline_template("<%= bricks.find_all{|x| x.split(':')[0] == '${fqdn}' }.collect {|y| y.split(':')[1].chomp('/')+'/${name}' }.join(' ') %>"), ' ')
+ # get the bricks that match our fqdn, and append /$name to their path.
+ # return only these paths, which can be used to build the volume dirs.
+ $volume_dirs = split(inline_template("<%= bricks.find_all{|x| x.split(':')[0] == '${fqdn}' }.collect {|y| y.split(':')[1].chomp('/')+'/${name}' }.join(' ') %>"), ' ')
- file { $volume_dirs:
- ensure => directory, # make sure this is a directory
- recurse => false, # don't recurse into directory
- purge => false, # don't purge unmanaged files
- force => false, # don't purge subdirs and links
- before => Exec["gluster-volume-create-${name}"],
- require => Gluster::Brick[$bricks],
- }
+ file { $volume_dirs:
+ ensure => directory, # make sure this is a directory
+ recurse => false, # don't recurse into directory
+ purge => false, # don't purge unmanaged files
+ force => false, # don't purge subdirs and links
+ before => Exec["gluster-volume-create-${name}"],
+ require => Gluster::Brick[$bricks],
+ }
- # add /${name} to the end of each: brick:/path entry
- $brick_spec = inline_template("<%= bricks.collect {|x| ''+x.chomp('/')+'/${name}' }.join(' ') %>")
+ # add /${name} to the end of each: brick:/path entry
+ $brick_spec = inline_template("<%= bricks.collect {|x| ''+x.chomp('/')+'/${name}' }.join(' ') %>")
- # EXAMPLE: gluster volume create test replica 2 transport tcp annex1.example.com:/storage1a/test annex2.example.com:/storage2a/test annex3.example.com:/storage3b/test annex4.example.com:/storage4b/test annex1.example.com:/storage1c/test annex2.example.com:/storage2c/test annex3.example.com:/storage3d/test annex4.example.com:/storage4d/test
- # NOTE: this should only happen on one host
- # FIXME: there might be a theoretical race condition if this runs at
- # exactly the same time time on more than one host.
- # FIXME: this should probably fail on at least N-1 nodes before it
- # succeeds because it probably shouldn't work until all the bricks are
- # available, which per node will happen right before this runs.
- exec { "/usr/sbin/gluster volume create ${name} ${valid_replica}${valid_stripe}transport ${valid_transport} ${brick_spec}":
- logoutput => on_failure,
- unless => "/usr/sbin/gluster volume list | /bin/grep -qxF '${name}' -", # add volume if it doesn't exist
- #before => TODO?,
- #require => Gluster::Brick[$bricks],
- alias => "gluster-volume-create-${name}",
- }
+ # EXAMPLE: gluster volume create test replica 2 transport tcp annex1.example.com:/storage1a/test annex2.example.com:/storage2a/test annex3.example.com:/storage3b/test annex4.example.com:/storage4b/test annex1.example.com:/storage1c/test annex2.example.com:/storage2c/test annex3.example.com:/storage3d/test annex4.example.com:/storage4d/test
+ # NOTE: this should only happen on one host
+ # FIXME: there might be a theoretical race condition if this runs at
+ # exactly the same time time on more than one host.
+ # FIXME: this should probably fail on at least N-1 nodes before it
+ # succeeds because it probably shouldn't work until all the bricks are
+ # available, which per node will happen right before this runs.
+ exec { "/usr/sbin/gluster volume create ${name} ${valid_replica}${valid_stripe}transport ${valid_transport} ${brick_spec}":
+ logoutput => on_failure,
+ unless => "/usr/sbin/gluster volume list | /bin/grep -qxF '${name}' -", # add volume if it doesn't exist
+ #before => TODO?,
+ #require => Gluster::Brick[$bricks],
+ alias => "gluster-volume-create-${name}",
+ }
- # TODO:
- #if $shorewall {
- # shorewall::rule { 'gluster-TODO':
- # rule => "
- # ACCEPT ${zone} $FW tcp 24009:${endport}
- # ",
- # comment => 'TODO',
- # before => Service['glusterd'],
- # }
- #}
+ # TODO:
+ #if $shorewall {
+ # shorewall::rule { 'gluster-TODO':
+ # rule => "
+ # ACCEPT ${zone} $FW tcp 24009:${endport}
+ # ",
+ # comment => 'TODO',
+ # before => Service['glusterd'],
+ # }
+ #}
- if $start == true {
- # try to start volume if stopped
- exec { "/usr/sbin/gluster volume start ${name}":
- logoutput => on_failure,
- unless => "/usr/sbin/gluster volume status ${name}", # returns false if stopped
- require => Exec["gluster-volume-create-${name}"],
- alias => "gluster-volume-start-${name}",
- }
- } elsif ( $start == false ) {
- # try to stop volume if running
- # NOTE: this will still succeed even if a client is mounted
- # NOTE: This uses `yes` to workaround the: Stopping volume will
- # make its data inaccessible. Do you want to continue? (y/n)
- # TODO: http://community.gluster.org/q/how-can-i-make-automatic-scripts/
- # TODO: gluster --mode=script volume stop ...
- exec { "/usr/bin/yes | /usr/sbin/gluster volume stop ${name}":
- logoutput => on_failure,
- onlyif => "/usr/sbin/gluster volume status ${name}", # returns true if started
- require => Exec["gluster-volume-create-${name}"],
- alias => "gluster-volume-stop-${name}",
- }
- } else {
- # don't manage volume run state
- }
+ if $start == true {
+ # try to start volume if stopped
+ exec { "/usr/sbin/gluster volume start ${name}":
+ logoutput => on_failure,
+ unless => "/usr/sbin/gluster volume status ${name}", # returns false if stopped
+ require => Exec["gluster-volume-create-${name}"],
+ alias => "gluster-volume-start-${name}",
+ }
+ } elsif ( $start == false ) {
+ # try to stop volume if running
+ # NOTE: this will still succeed even if a client is mounted
+ # NOTE: This uses `yes` to workaround the: Stopping volume will
+ # make its data inaccessible. Do you want to continue? (y/n)
+ # TODO: http://community.gluster.org/q/how-can-i-make-automatic-scripts/
+ # TODO: gluster --mode=script volume stop ...
+ exec { "/usr/bin/yes | /usr/sbin/gluster volume stop ${name}":
+ logoutput => on_failure,
+ onlyif => "/usr/sbin/gluster volume status ${name}", # returns true if started
+ require => Exec["gluster-volume-create-${name}"],
+ alias => "gluster-volume-stop-${name}",
+ }
+ } else {
+ # don't manage volume run state
+ }
}
+# vim: ts=8