summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJames Shubin <james@shubin.ca>2012-07-26 16:01:22 -0400
committerJames Shubin <james@shubin.ca>2012-07-26 16:01:22 -0400
commit04b30ca4c784e2bfaadeea28f33214e666f1222b (patch)
tree6c6166c067cde426a421da9c4c45133238056dd7
parent61cf89af55bd57a224f0e600746a05a71fdb4c95 (diff)
downloadpuppet-gluster-04b30ca4c784e2bfaadeea28f33214e666f1222b.tar.gz
puppet-gluster-04b30ca4c784e2bfaadeea28f33214e666f1222b.tar.xz
puppet-gluster-04b30ca4c784e2bfaadeea28f33214e666f1222b.zip
Change tabs to spaces as per bodepd's insistence :)
-rw-r--r--manifests/brick.pp326
-rw-r--r--manifests/client.pp108
-rw-r--r--manifests/client/base.pp48
-rw-r--r--manifests/host.pp120
-rw-r--r--manifests/server.pp260
-rw-r--r--manifests/volume.pp180
6 files changed, 521 insertions, 521 deletions
diff --git a/manifests/brick.pp b/manifests/brick.pp
index b99ab0c..819b384 100644
--- a/manifests/brick.pp
+++ b/manifests/brick.pp
@@ -16,175 +16,175 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
class gluster::brick::xfs {
- package { 'xfsprogs':
- ensure => present,
- }
+ package { 'xfsprogs':
+ ensure => present,
+ }
}
class gluster::brick::ext4 {
- package { 'e2fsprogs':
- ensure => present,
- }
+ package { 'e2fsprogs':
+ ensure => present,
+ }
}
define gluster::brick(
- $dev, # /dev/sdc, /dev/disk/by-id/scsi-36003048007e14f0014ca2743150a5471
- $fsuuid, # set a uuid for this fs
- $labeltype = '', # gpt
- $fstype = '', # xfs
- $xfs_inode64 = false,
- $xfs_nobarrier = false,
- $ro = false, # use for emergencies only- you want your fs rw
- $force = false, # if true, this will overwrite any xfs fs it sees, useful for rebuilding gluster and wiping data. NOTE: there are other safeties in place to stop this.
- $areyousure = false # do you allow puppet to do dangerous things ?
+ $dev, # /dev/sdc, /dev/disk/by-id/scsi-36003048007e14f0014ca2743150a5471
+ $fsuuid, # set a uuid for this fs
+ $labeltype = '', # gpt
+ $fstype = '', # xfs
+ $xfs_inode64 = false,
+ $xfs_nobarrier = false,
+ $ro = false, # use for emergencies only- you want your fs rw
+ $force = false, # if true, this will overwrite any xfs fs it sees, useful for rebuilding gluster and wiping data. NOTE: there are other safeties in place to stop this.
+ $areyousure = false # do you allow puppet to do dangerous things ?
) {
- # eg: annex1.example.com:/storage1a
- $split = split($name, ':') # do some $name parsing
- $host = $split[0] # host fqdn
- $mount = $split[1] # brick mount
-
- if ! ( "${host}:${mount}" == "${name}" ) {
- fail('The brick $name must match a $host-$mount pattern.')
- }
-
- Gluster::Host[$host] -> Gluster::Brick[$name] # brick requires host
-
- $ro_bool = $ro ? { # this has been added as a convenience
- true => 'ro',
- default => 'rw',
- }
-
- $valid_labeltype = $labeltype ? {
- # 'msdos' => 'msdos', # TODO
- default => 'gpt',
- }
-
- $valid_fstype = $fstype ? {
- 'ext4' => 'ext4', # TODO
- default => 'xfs',
- }
-
- $force_flag = $force ? {
- true => 'f',
- default => '',
- }
-
- # XFS mount options:
- # http://git.kernel.org/?p=linux/kernel/git/torvalds/linux-2.6.git;a=blob;f=Documentation/filesystems/xfs.txt;hb=HEAD
- if ( $valid_fstype == 'xfs' ) {
- # exec requires
- include gluster::brick::xfs
- $exec_requires = [Package['xfsprogs']]
-
- # mkfs w/ uuid command
- # NOTE: the -f forces creation when it sees an old xfs part
- # TODO: xfs_admin doesn't have a --quiet flag. silence it...
- $exec_mkfs = "/sbin/mkfs.${valid_fstype} -q${force_flag} `/bin/readlink -e ${dev}`1 && /usr/sbin/xfs_admin -U '${fsuuid}' `/bin/readlink -e ${dev}`1"
-
- # By default, XFS allocates inodes to reflect their on-disk
- # location. However, because some 32-bit userspace applications
- # are not compatible with inode numbers greater than 232, XFS
- # will allocate all inodes in disk locations which result in
- # 32-bit inode numbers. This can lead to decreased performance
- # on very large filesystems (i.e. larger than 2 terabytes),
- # because inodes are skewed to the beginning of the block
- # device, while data is skewed towards the end.
- # To address this, use the inode64 mount option. This option
- # configures XFS to allocate inodes and data across the entire
- # file system, which can improve performance.
- $option01 = $xfs_inode64 ? {
- true => 'inode64',
- default => '',
- }
-
- # By default, XFS uses write barriers to ensure file system
- # integrity even when power is lost to a device with write
- # caches enabled. For devices without write caches, or with
- # battery-backed write caches, disable barriers using the
- # nobarrier option.
- $option02 = $xfs_nobarrier ? {
- true => 'nobarrier',
- default => '',
- }
-
- $options_list = ["${option01}", "${option02}"]
-
- } elsif ( $valid_fstype == 'ext4' ) {
- # exec requires
- include gluster::brick::ext4
- $exec_requires = [Package['e2fsprogs']]
-
- # mkfs w/ uuid command
- $exec_mkfs = "/sbin/mkfs.${valid_fstype} -U '${fsuuid}' `/bin/readlink -e ${dev}`1"
-
- # mount options
- $options_list = [] # TODO
- }
-
- # put all the options in an array, remove the empty ones, and join with
- # commas (this removes ',,' double comma uglyness)
- # adding 'defaults' here ensures no ',' (leading comma) in mount command
- $mount_options = inline_template('<%= (["defaults"]+options_list).delete_if {|x| x.empty? }.join(",") %>')
-
- $exec_noop = $areyousure ? {
- true => false,
- default => true,
- }
-
- # if we're on itself
- if ( "${fqdn}" == "${host}" ) {
-
- # first get the device ready
-
- # the scary parted command to run...
- $exec_mklabel = "/sbin/parted -s -m -a optimal ${dev} mklabel ${valid_labeltype}"
- $exec_mkpart = "/sbin/parted -s -m -a optimal ${dev} mkpart primary 0% 100%"
- $scary_exec = "${exec_mklabel} && ${exec_mkpart} && ${exec_mkfs}" # the command
- if $exec_noop {
- notify { 'noop mode:':
- message => "${scary_exec}",
- }
- }
-
- exec { "${scary_exec}":
- logoutput => on_failure,
- unless => [ # if one element is true, this *doesn't* run
- "/usr/bin/test -e `/bin/readlink -e ${dev}`1", # does partition 1 exist ?
- "/usr/bin/test -e /dev/disk/by-uuid/${fsuuid}",
- "/bin/false", # TODO: add more criteria
- ],
- require => $exec_requires,
- timeout => 3600, # set to something very long
- noop => $exec_noop,
- alias => "gluster-brick-make-${name}",
- }
-
- # make an empty directory for the mount point
- file { "${mount}":
- ensure => directory, # make sure this is a directory
- recurse => false, # don't recurse into directory
- purge => false, # don't purge unmanaged files
- force => false, # don't purge subdirs and links
- require => Exec["gluster-brick-make-${name}"],
- }
-
- mount { "${mount}":
- atboot => true,
- ensure => mounted,
- device => "UUID=${fsuuid}",
- fstype => "${valid_fstype}",
- # noatime,nodiratime to save gluster from silly updates
- options => "${mount_options},${ro_bool},noatime,nodiratime,noexec", # TODO: is nodev? nosuid? noexec? a good idea?
- dump => '0', # fs_freq: 0 to skip file system dumps
- # NOTE: technically this should be '2', to `fsck.xfs`
- # after the rootfs ('1'), but fsck.xfs actually does
- # 'nothing, successfully', so it's irrelevant, because
- # xfs uses xfs_check and friends only when suspect.
- pass => '2', # fs_passno: 0 to skip fsck on boot
- require => [
- File["${mount}"],
- ],
- }
- }
+ # eg: annex1.example.com:/storage1a
+ $split = split($name, ':') # do some $name parsing
+ $host = $split[0] # host fqdn
+ $mount = $split[1] # brick mount
+
+ if ! ( "${host}:${mount}" == "${name}" ) {
+ fail('The brick $name must match a $host-$mount pattern.')
+ }
+
+ Gluster::Host[$host] -> Gluster::Brick[$name] # brick requires host
+
+ $ro_bool = $ro ? { # this has been added as a convenience
+ true => 'ro',
+ default => 'rw',
+ }
+
+ $valid_labeltype = $labeltype ? {
+ #'msdos' => 'msdos', # TODO
+ default => 'gpt',
+ }
+
+ $valid_fstype = $fstype ? {
+ 'ext4' => 'ext4', # TODO
+ default => 'xfs',
+ }
+
+ $force_flag = $force ? {
+ true => 'f',
+ default => '',
+ }
+
+ # XFS mount options:
+ # http://git.kernel.org/?p=linux/kernel/git/torvalds/linux-2.6.git;a=blob;f=Documentation/filesystems/xfs.txt;hb=HEAD
+ if ( $valid_fstype == 'xfs' ) {
+ # exec requires
+ include gluster::brick::xfs
+ $exec_requires = [Package['xfsprogs']]
+
+ # mkfs w/ uuid command
+ # NOTE: the -f forces creation when it sees an old xfs part
+ # TODO: xfs_admin doesn't have a --quiet flag. silence it...
+ $exec_mkfs = "/sbin/mkfs.${valid_fstype} -q${force_flag} `/bin/readlink -e ${dev}`1 && /usr/sbin/xfs_admin -U '${fsuuid}' `/bin/readlink -e ${dev}`1"
+
+ # By default, XFS allocates inodes to reflect their on-disk
+ # location. However, because some 32-bit userspace applications
+ # are not compatible with inode numbers greater than 232, XFS
+ # will allocate all inodes in disk locations which result in
+ # 32-bit inode numbers. This can lead to decreased performance
+ # on very large filesystems (i.e. larger than 2 terabytes),
+ # because inodes are skewed to the beginning of the block
+ # device, while data is skewed towards the end.
+ # To address this, use the inode64 mount option. This option
+ # configures XFS to allocate inodes and data across the entire
+ # file system, which can improve performance.
+ $option01 = $xfs_inode64 ? {
+ true => 'inode64',
+ default => '',
+ }
+
+ # By default, XFS uses write barriers to ensure file system
+ # integrity even when power is lost to a device with write
+ # caches enabled. For devices without write caches, or with
+ # battery-backed write caches, disable barriers using the
+ # nobarrier option.
+ $option02 = $xfs_nobarrier ? {
+ true => 'nobarrier',
+ default => '',
+ }
+
+ $options_list = ["${option01}", "${option02}"]
+
+ } elsif ( $valid_fstype == 'ext4' ) {
+ # exec requires
+ include gluster::brick::ext4
+ $exec_requires = [Package['e2fsprogs']]
+
+ # mkfs w/ uuid command
+ $exec_mkfs = "/sbin/mkfs.${valid_fstype} -U '${fsuuid}' `/bin/readlink -e ${dev}`1"
+
+ # mount options
+ $options_list = [] # TODO
+ }
+
+ # put all the options in an array, remove the empty ones, and join with
+ # commas (this removes ',,' double comma uglyness)
+ # adding 'defaults' here ensures no ',' (leading comma) in mount command
+ $mount_options = inline_template('<%= (["defaults"]+options_list).delete_if {|x| x.empty? }.join(",") %>')
+
+ $exec_noop = $areyousure ? {
+ true => false,
+ default => true,
+ }
+
+ # if we're on itself
+ if ( "${fqdn}" == "${host}" ) {
+
+ # first get the device ready
+
+ # the scary parted command to run...
+ $exec_mklabel = "/sbin/parted -s -m -a optimal ${dev} mklabel ${valid_labeltype}"
+ $exec_mkpart = "/sbin/parted -s -m -a optimal ${dev} mkpart primary 0% 100%"
+ $scary_exec = "${exec_mklabel} && ${exec_mkpart} && ${exec_mkfs}" # the command
+ if $exec_noop {
+ notify { 'noop mode:':
+ message => "${scary_exec}",
+ }
+ }
+
+ exec { "${scary_exec}":
+ logoutput => on_failure,
+ unless => [ # if one element is true, this *doesn't* run
+ "/usr/bin/test -e `/bin/readlink -e ${dev}`1", # does partition 1 exist ?
+ "/usr/bin/test -e /dev/disk/by-uuid/${fsuuid}",
+ "/bin/false", # TODO: add more criteria
+ ],
+ require => $exec_requires,
+ timeout => 3600, # set to something very long
+ noop => $exec_noop,
+ alias => "gluster-brick-make-${name}",
+ }
+
+ # make an empty directory for the mount point
+ file { "${mount}":
+ ensure => directory, # make sure this is a directory
+ recurse => false, # don't recurse into directory
+ purge => false, # don't purge unmanaged files
+ force => false, # don't purge subdirs and links
+ require => Exec["gluster-brick-make-${name}"],
+ }
+
+ mount { "${mount}":
+ atboot => true,
+ ensure => mounted,
+ device => "UUID=${fsuuid}",
+ fstype => "${valid_fstype}",
+ # noatime,nodiratime to save gluster from silly updates
+ options => "${mount_options},${ro_bool},noatime,nodiratime,noexec", # TODO: is nodev? nosuid? noexec? a good idea?
+ dump => '0', # fs_freq: 0 to skip file system dumps
+ # NOTE: technically this should be '2', to `fsck.xfs`
+ # after the rootfs ('1'), but fsck.xfs actually does
+ # 'nothing, successfully', so it's irrelevant, because
+ # xfs uses xfs_check and friends only when suspect.
+ pass => '2', # fs_passno: 0 to skip fsck on boot
+ require => [
+ File["${mount}"],
+ ],
+ }
+ }
}
diff --git a/manifests/client.pp b/manifests/client.pp
index d6d0e45..b4123e9 100644
--- a/manifests/client.pp
+++ b/manifests/client.pp
@@ -17,62 +17,62 @@
# XXX: try mounting with: glusterfs --volfile-server=<server-address> --volfile-id=<volume-name> <mount-point> --xlator-option='*dht*.assert-no-child-down=yes' # TODO: quotes or not?
define gluster::client(
- $server, # NOTE: use a vip as server hostname
- $rw = false, # mount read only (true) or rw (false)
-# $suid = false, # mount with suid (true) or nosuid (false) # TODO: will this work with gluster ?
- $mounted = true # useful if we want to pull in the group
- # defs, but not actually mount (testing)
+ $server, # NOTE: use a vip as server hostname
+ $rw = false, # mount read only (true) or rw (false)
+# $suid = false, # mount with suid (true) or nosuid (false) # TODO: will this work with gluster ?
+ $mounted = true # useful if we want to pull in the group
+ # defs, but not actually mount (testing)
) {
- #mount -t glusterfs brick1.example.com:/test /test
- include gluster::client::base
+ #mount -t glusterfs brick1.example.com:/test /test
+ include gluster::client::base
- $rw_bool = $rw ? {
- true => 'rw',
- default => 'ro',
- }
+ $rw_bool = $rw ? {
+ true => 'rw',
+ default => 'ro',
+ }
- # TODO: will this work with gluster ?
- #$suid_bool = $suid ? {
- # true => 'suid',
- # default => 'nosuid',
- #}
-
- $mounted_bool = $mounted ? {
- true => mounted,
- default => unmounted,
- }
-
- # make an empty directory for the mount point
- file { "${name}":
- ensure => directory, # make sure this is a directory
- recurse => false, # don't recurse into directory
- purge => false, # don't purge unmanaged files
- force => false, # don't purge subdirs and links
- }
-
- # Mount Options:
- # * backupvolfile-server=server-name
- # * fetch-attempts=N (where N is number of attempts)
- # * log-level=loglevel
- # * log-file=logfile
- # * direct-io-mode=[enable|disable]
- # * ro (for readonly mounts)
- # * acl (for enabling posix-ACLs)
- # * worm (making the mount WORM - Write Once, Read Many type)
- # * selinux (enable selinux on GlusterFS mount
- mount { "${name}":
- atboot => true,
- ensure => $mounted_bool,
- device => "${server}",
- fstype => 'glusterfs',
- options => "defaults,_netdev,${rw_bool}", # TODO: will $suid_bool work with gluster ?
- dump => '0', # fs_freq: 0 to skip file system dumps
- pass => '0', # fs_passno: 0 to skip fsck on boot
- require => [
- Package[['glusterfs', 'glusterfs-fuse']],
- File["${name}"], # the mountpoint
- Exec['gluster-fuse'], # ensure fuse is loaded
- ],
- }
+ # TODO: will this work with gluster ?
+ #$suid_bool = $suid ? {
+ # true => 'suid',
+ # default => 'nosuid',
+ #}
+
+ $mounted_bool = $mounted ? {
+ true => mounted,
+ default => unmounted,
+ }
+
+ # make an empty directory for the mount point
+ file { "${name}":
+ ensure => directory, # make sure this is a directory
+ recurse => false, # don't recurse into directory
+ purge => false, # don't purge unmanaged files
+ force => false, # don't purge subdirs and links
+ }
+
+ # Mount Options:
+ # * backupvolfile-server=server-name
+ # * fetch-attempts=N (where N is number of attempts)
+ # * log-level=loglevel
+ # * log-file=logfile
+ # * direct-io-mode=[enable|disable]
+ # * ro (for readonly mounts)
+ # * acl (for enabling posix-ACLs)
+ # * worm (making the mount WORM - Write Once, Read Many type)
+ # * selinux (enable selinux on GlusterFS mount
+ mount { "${name}":
+ atboot => true,
+ ensure => $mounted_bool,
+ device => "${server}",
+ fstype => 'glusterfs',
+ options => "defaults,_netdev,${rw_bool}", # TODO: will $suid_bool work with gluster ?
+ dump => '0', # fs_freq: 0 to skip file system dumps
+ pass => '0', # fs_passno: 0 to skip fsck on boot
+ require => [
+ Package[['glusterfs', 'glusterfs-fuse']],
+ File["${name}"], # the mountpoint
+ Exec['gluster-fuse'], # ensure fuse is loaded
+ ],
+ }
}
diff --git a/manifests/client/base.pp b/manifests/client/base.pp
index 0aadb1b..1cbfde2 100644
--- a/manifests/client/base.pp
+++ b/manifests/client/base.pp
@@ -16,32 +16,32 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
class gluster::client::base {
- # TODO: ensure these are from our 'gluster' repo
- package { ['glusterfs', 'glusterfs-fuse']:
- ensure => present,
- }
+ # TODO: ensure these are from our 'gluster' repo
+ package { ['glusterfs', 'glusterfs-fuse']:
+ ensure => present,
+ }
- # FIXME: choose a reliable and correct way to ensure fuse is loaded
- #[root@test2 ~]# dmesg | grep -i fuse
- #[root@test2 ~]# modprobe fuse
- #[root@test2 ~]# dmesg | grep -i fuse
- #fuse init (API version 7.13)
- #[root@test2 ~]#
+ # FIXME: choose a reliable and correct way to ensure fuse is loaded
+ #[root@test2 ~]# dmesg | grep -i fuse
+ #[root@test2 ~]# modprobe fuse
+ #[root@test2 ~]# dmesg | grep -i fuse
+ #fuse init (API version 7.13)
+ #[root@test2 ~]#
- # modprobe fuse if it's missing
- exec { '/sbin/modprobe fuse':
- logoutput => on_failure,
- onlyif => '/usr/bin/test -z "`/bin/dmesg | grep -i fuse`"',
- alias => 'gluster-fuse',
- }
+ # modprobe fuse if it's missing
+ exec { '/sbin/modprobe fuse':
+ logoutput => on_failure,
+ onlyif => '/usr/bin/test -z "`/bin/dmesg | grep -i fuse`"',
+ alias => 'gluster-fuse',
+ }
- # TODO: will this autoload the fuse module?
- #file { '/etc/modprobe.d/fuse.conf':
- # content => "fuse\n", # TODO: "install fuse /sbin/modprobe --ignore-install fuse ; /bin/true\n" ?
- # owner => root,
- # group => root,
- # mode => 644, # u=rw,go=r
- # ensure => present,
- #}
+ # TODO: will this autoload the fuse module?
+ #file { '/etc/modprobe.d/fuse.conf':
+ # content => "fuse\n", # TODO: "install fuse /sbin/modprobe --ignore-install fuse ; /bin/true\n" ?
+ # owner => root,
+ # group => root,
+ # mode => 644, # u=rw,go=r
+ # ensure => present,
+ #}
}
diff --git a/manifests/host.pp b/manifests/host.pp
index e1b7876..00f6a18 100644
--- a/manifests/host.pp
+++ b/manifests/host.pp
@@ -16,66 +16,66 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
define gluster::host(
- $uuid
+ $uuid
) {
- # if we're on itself
- if ( "${fqdn}" == "${name}" ) {
- # set a unique uuid per host
- file { '/var/lib/glusterd/glusterd.info':
- content => template('gluster/glusterd.info.erb'),
- owner => root,
- group => root,
- mode => 644, # u=rw,go=r
- ensure => present,
- require => File['/var/lib/glusterd/'],
- }
- } else {
- # set uuid=
- exec { "/bin/echo 'uuid=${uuid}' >> '/var/lib/glusterd/peers/${uuid}'":
- logoutput => on_failure,
- unless => "/bin/grep -qF 'uuid=' '/var/lib/glusterd/peers/${uuid}'",
- notify => File['/var/lib/glusterd/peers/'], # propagate the notify up
- before => File["/var/lib/glusterd/peers/${uuid}"],
- alias => "gluster-host-uuid-${name}",
- # FIXME: doing this causes a dependency cycle! adding
- # the Package[] require doesn't. It would be most
- # correct to require the peers/ folder, but since it's
- # not working, requiring the Package[] will still give
- # us the same result. (Package creates peers/ folder).
- # NOTE: it's possible the cycle is a bug in puppet or a
- # bug in the dependencies somewhere else in this module.
- #require => File['/var/lib/glusterd/peers/'],
- require => Package['glusterfs-server'],
- }
-
- # set state=
- exec { "/bin/echo 'state=3' >> '/var/lib/glusterd/peers/${uuid}'":
- logoutput => on_failure,
- unless => "/bin/grep -qF 'state=' '/var/lib/glusterd/peers/${uuid}'",
- notify => File['/var/lib/glusterd/peers/'], # propagate the notify up
- before => File["/var/lib/glusterd/peers/${uuid}"],
- require => Exec["gluster-host-uuid-${name}"],
- alias => "gluster-host-state-${name}",
- }
-
- # set hostname1=...
- exec { "/bin/echo 'hostname1=${name}' >> '/var/lib/glusterd/peers/${uuid}'":
- logoutput => on_failure,
- unless => "/bin/grep -qF 'hostname1=' '/var/lib/glusterd/peers/${uuid}'",
- notify => File['/var/lib/glusterd/peers/'], # propagate the notify up
- before => File["/var/lib/glusterd/peers/${uuid}"],
- require => Exec["gluster-host-state-${name}"],
- }
-
- # tag the file so it doesn't get removed by purge
- file { "/var/lib/glusterd/peers/${uuid}":
- ensure => present,
- notify => File['/var/lib/glusterd/peers/'], # propagate the notify up
- owner => root,
- group => root,
- # NOTE: this mode was found by inspecting the process
- mode => 600, # u=rw,go=
- }
- }
+ # if we're on itself
+ if ( "${fqdn}" == "${name}" ) {
+ # set a unique uuid per host
+ file { '/var/lib/glusterd/glusterd.info':
+ content => template('gluster/glusterd.info.erb'),
+ owner => root,
+ group => root,
+ mode => 644, # u=rw,go=r
+ ensure => present,
+ require => File['/var/lib/glusterd/'],
+ }
+ } else {
+ # set uuid=
+ exec { "/bin/echo 'uuid=${uuid}' >> '/var/lib/glusterd/peers/${uuid}'":
+ logoutput => on_failure,
+ unless => "/bin/grep -qF 'uuid=' '/var/lib/glusterd/peers/${uuid}'",
+ notify => File['/var/lib/glusterd/peers/'], # propagate the notify up
+ before => File["/var/lib/glusterd/peers/${uuid}"],
+ alias => "gluster-host-uuid-${name}",
+ # FIXME: doing this causes a dependency cycle! adding
+ # the Package[] require doesn't. It would be most
+ # correct to require the peers/ folder, but since it's
+ # not working, requiring the Package[] will still give
+ # us the same result. (Package creates peers/ folder).
+ # NOTE: it's possible the cycle is a bug in puppet or a
+ # bug in the dependencies somewhere else in this module.
+ #require => File['/var/lib/glusterd/peers/'],
+ require => Package['glusterfs-server'],
+ }
+
+ # set state=
+ exec { "/bin/echo 'state=3' >> '/var/lib/glusterd/peers/${uuid}'":
+ logoutput => on_failure,
+ unless => "/bin/grep -qF 'state=' '/var/lib/glusterd/peers/${uuid}'",
+ notify => File['/var/lib/glusterd/peers/'], # propagate the notify up
+ before => File["/var/lib/glusterd/peers/${uuid}"],
+ require => Exec["gluster-host-uuid-${name}"],
+ alias => "gluster-host-state-${name}",
+ }
+
+ # set hostname1=...
+ exec { "/bin/echo 'hostname1=${name}' >> '/var/lib/glusterd/peers/${uuid}'":
+ logoutput => on_failure,
+ unless => "/bin/grep -qF 'hostname1=' '/var/lib/glusterd/peers/${uuid}'",
+ notify => File['/var/lib/glusterd/peers/'], # propagate the notify up
+ before => File["/var/lib/glusterd/peers/${uuid}"],
+ require => Exec["gluster-host-state-${name}"],
+ }
+
+ # tag the file so it doesn't get removed by purge
+ file { "/var/lib/glusterd/peers/${uuid}":
+ ensure => present,
+ notify => File['/var/lib/glusterd/peers/'], # propagate the notify up
+ owner => root,
+ group => root,
+ # NOTE: this mode was found by inspecting the process
+ mode => 600, # u=rw,go=
+ }
+ }
}
diff --git a/manifests/server.pp b/manifests/server.pp
index c61c9ed..2a77917 100644
--- a/manifests/server.pp
+++ b/manifests/server.pp
@@ -16,136 +16,136 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
class gluster::server(
- $hosts = [], # this should be a list of fqdn's # TODO: we could easily just setup gluster/shorewall by ip address instead of hostname!
- $ips = [], # this should be a list of ip's for each in hosts[] # TODO: i would have rather this happen with a local dns resolver, but I can't figure out how to make one! # NOTE: this can be overcome probably by using exported resources or dns names in shorewall (bad)
- $clients = [], # list of allowed client ip's
- #$vip = '', # vip of the cluster (optional, but recommended)
- $nfs = false, # TODO
- $shorewall = false,
- $zone = 'net', # TODO: allow a list of zones
- $allow = 'all'
+ $hosts = [], # this should be a list of fqdn's # TODO: we could easily just setup gluster/shorewall by ip address instead of hostname!
+ $ips = [], # this should be a list of ip's for each in hosts[] # TODO: i would have rather this happen with a local dns resolver, but I can't figure out how to make one! # NOTE: this can be overcome probably by using exported resources or dns names in shorewall (bad)
+ $clients = [], # list of allowed client ip's
+ #$vip = '', # vip of the cluster (optional, but recommended)
+ $nfs = false, # TODO
+ $shorewall = false,
+ $zone = 'net', # TODO: allow a list of zones
+ $allow = 'all'
) {
- # TODO: ensure these are from our 'gluster' repo
- package { 'glusterfs-server':
- ensure => present,
- }
-
- # NOTE: not that we necessarily manage anything in here at the moment...
- file { '/etc/glusterfs/':
- ensure => directory, # make sure this is a directory
- recurse => false, # TODO: eventually...
- purge => false, # TODO: eventually...
- force => false, # TODO: eventually...
- owner => root,
- group => root,
- mode => 644,
- #notify => Service['glusterd'], # TODO: ???
- require => Package['glusterfs-server'],
- }
-
- file { '/etc/glusterfs/glusterd.vol':
- content => template('gluster/glusterd.vol.erb'), # NOTE: currently no templating is being done
- owner => root,
- group => root,
- mode => 644, # u=rw,go=r
- ensure => present,
- require => File['/etc/glusterfs/'],
- }
-
- file { '/var/lib/glusterd/':
- ensure => directory, # make sure this is a directory
- recurse => false, # TODO: eventually...
- purge => false, # TODO: eventually...
- force => false, # TODO: eventually...
- owner => root,
- group => root,
- mode => 644,
- #notify => Service['glusterd'], # TODO: eventually...
- require => File['/etc/glusterfs/glusterd.vol'],
- }
-
- file { '/var/lib/glusterd/peers/':
- ensure => directory, # make sure this is a directory
- recurse => true, # recursively manage directory
- purge => true,
- force => true,
- owner => root,
- group => root,
- mode => 644,
- notify => Service['glusterd'],
- require => File['/var/lib/glusterd/'],
- }
-
- if $shorewall {
- if $allow == 'all' {
- $net = 'net'
- } else {
- $net = "net:${allow}"
- }
- # TODO: could the facter values help here ?
- #$other_host_ips = inline_template("<%= ips.delete_if {|x| x == '${ipaddress}' }.join(',') %>") # list of ips except myself
- $source_ips = inline_template("<%= (ips+clients).uniq.delete_if {|x| x.empty? }.join(',') %>")
- #$all_ips = inline_template("<%= (ips+[vip]+clients).uniq.delete_if {|x| x.empty? }.join(',') %>")
- #$list_of_hosts_except_myself = split(inline_template("<%= host_list.delete_if {|x| x == '${fqdn}' }.join(' ') %>"), ' ')
-
- ############################################################################
- # ACTION SOURCE DEST PROTO DEST SOURCE ORIGINAL
- # PORT PORT(S) DEST
-
- # TODO: I've never seen anything connect on 24008. Is it ever used?
- shorewall::rule { 'glusterd':
- rule => "
- ACCEPT ${zone}:${source_ips} $FW tcp 24007:24008
- ",
- comment => 'Allow incoming tcp:24007-24008 from each other glusterd or client.',
- before => Service['glusterd'],
- }
-
- # TODO: Use the correct port range
- shorewall::rule { 'glusterfsd-easyfw':
- rule => "
- ACCEPT ${zone}:${source_ips} $FW tcp 24009:25009 # XXX: Use the correct port range
- ",
- comment => 'Allow incoming tcp:24009-25009 from each other glusterfsd and clients.',
- before => Service['glusterd'],
- }
-
- # TODO: is this only used for nfs?
- shorewall::rule { 'gluster-111':
- rule => "
- ACCEPT ${zone}:${source_ips} $FW tcp 111
- ACCEPT ${zone}:${source_ips} $FW udp 111
- ",
- comment => 'Allow tcp/udp 111.',
- before => Service['glusterd'],
- }
-
- # XXX: WIP
- #$endport = inline_template('<%= 24009+hosts.count %>') # XXX: is there one brick per server or two ? what does 'brick' mean in the context of open ports?
- #$nfs_endport = inline_template('<%= 38465+hosts.count %>') # XXX: is there one brick per server or two ? what does 'brick' mean in the context of open ports?
- #shorewall::rule { 'gluster-24000':
- # rule => "
- # ACCEPT ${zone} $FW tcp 24007,24008
- # ACCEPT ${zone} $FW tcp 24009:${endport}
- # ",
- # comment => 'Allow 24000s for gluster',
- # before => Service['glusterd'],
- #}
-
- if $nfs { # FIXME: TODO
- shorewall::rule { 'gluster-nfs': rule => "
- ACCEPT $(net} $FW tcp 38465:${nfs_endport}
- ", comment => 'Allow nfs for gluster'}
- }
- }
-
- # start service only after the firewall is opened and hosts are defined
- service { 'glusterd':
- enable => true, # start on boot
- ensure => running, # ensure it stays running
- hasstatus => false, # FIXME: BUG: https://bugzilla.redhat.com/show_bug.cgi?id=836007
- hasrestart => true, # use restart, not start; stop
- require => Gluster::Host[$hosts],
- }
+ # TODO: ensure these are from our 'gluster' repo
+ package { 'glusterfs-server':
+ ensure => present,
+ }
+
+ # NOTE: not that we necessarily manage anything in here at the moment...
+ file { '/etc/glusterfs/':
+ ensure => directory, # make sure this is a directory
+ recurse => false, # TODO: eventually...
+ purge => false, # TODO: eventually...
+ force => false, # TODO: eventually...
+ owner => root,
+ group => root,
+ mode => 644,
+ #notify => Service['glusterd'], # TODO: ???
+ require => Package['glusterfs-server'],
+ }
+
+ file { '/etc/glusterfs/glusterd.vol':
+ content => template('gluster/glusterd.vol.erb'), # NOTE: currently no templating is being done
+ owner => root,
+ group => root,
+ mode => 644, # u=rw,go=r
+ ensure => present,
+ require => File['/etc/glusterfs/'],
+ }
+
+ file { '/var/lib/glusterd/':
+ ensure => directory, # make sure this is a directory
+ recurse => false, # TODO: eventually...
+ purge => false, # TODO: eventually...
+ force => false, # TODO: eventually...
+ owner => root,
+ group => root,
+ mode => 644,
+ #notify => Service['glusterd'], # TODO: eventually...
+ require => File['/etc/glusterfs/glusterd.vol'],
+ }
+
+ file { '/var/lib/glusterd/peers/':
+ ensure => directory, # make sure this is a directory
+ recurse => true, # recursively manage directory
+ purge => true,
+ force => true,
+ owner => root,
+ group => root,
+ mode => 644,
+ notify => Service['glusterd'],
+ require => File['/var/lib/glusterd/'],
+ }
+
+ if $shorewall {
+ if $allow == 'all' {
+ $net = 'net'
+ } else {
+ $net = "net:${allow}"
+ }
+ # TODO: could the facter values help here ?
+ #$other_host_ips = inline_template("<%= ips.delete_if {|x| x == '${ipaddress}' }.join(',') %>") # list of ips except myself
+ $source_ips = inline_template("<%= (ips+clients).uniq.delete_if {|x| x.empty? }.join(',') %>")
+ #$all_ips = inline_template("<%= (ips+[vip]+clients).uniq.delete_if {|x| x.empty? }.join(',') %>")
+ #$list_of_hosts_except_myself = split(inline_template("<%= host_list.delete_if {|x| x == '${fqdn}' }.join(' ') %>"), ' ')
+
+ ############################################################################
+ # ACTION SOURCE DEST PROTO DEST SOURCE ORIGINAL
+ # PORT PORT(S) DEST
+
+ # TODO: I've never seen anything connect on 24008. Is it ever used?
+ shorewall::rule { 'glusterd':
+ rule => "
+ ACCEPT ${zone}:${source_ips} $FW tcp 24007:24008
+ ",
+ comment => 'Allow incoming tcp:24007-24008 from each other glusterd or client.',
+ before => Service['glusterd'],
+ }
+
+ # TODO: Use the correct port range
+ shorewall::rule { 'glusterfsd-easyfw':
+ rule => "
+ ACCEPT ${zone}:${source_ips} $FW tcp 24009:25009 # XXX: Use the correct port range
+ ",
+ comment => 'Allow incoming tcp:24009-25009 from each other glusterfsd and clients.',
+ before => Service['glusterd'],
+ }
+
+ # TODO: is this only used for nfs?
+ shorewall::rule { 'gluster-111':
+ rule => "
+ ACCEPT ${zone}:${source_ips} $FW tcp 111
+ ACCEPT ${zone}:${source_ips} $FW udp 111
+ ",
+ comment => 'Allow tcp/udp 111.',
+ before => Service['glusterd'],
+ }
+
+ # XXX: WIP
+ #$endport = inline_template('<%= 24009+hosts.count %>') # XXX: is there one brick per server or two ? what does 'brick' mean in the context of open ports?
+ #$nfs_endport = inline_template('<%= 38465+hosts.count %>') # XXX: is there one brick per server or two ? what does 'brick' mean in the context of open ports?
+ #shorewall::rule { 'gluster-24000':
+ # rule => "
+ # ACCEPT ${zone} $FW tcp 24007,24008
+ # ACCEPT ${zone} $FW tcp 24009:${endport}
+ # ",
+ # comment => 'Allow 24000s for gluster',
+ # before => Service['glusterd'],
+ #}
+
+ if $nfs { # FIXME: TODO
+ shorewall::rule { 'gluster-nfs': rule => "
+ ACCEPT $(net} $FW tcp 38465:${nfs_endport}
+ ", comment => 'Allow nfs for gluster'}
+ }
+ }
+
+ # start service only after the firewall is opened and hosts are defined
+ service { 'glusterd':
+ enable => true, # start on boot
+ ensure => running, # ensure it stays running
+ hasstatus => false, # FIXME: BUG: https://bugzilla.redhat.com/show_bug.cgi?id=836007
+ hasrestart => true, # use restart, not start; stop
+ require => Gluster::Host[$hosts],
+ }
}
diff --git a/manifests/volume.pp b/manifests/volume.pp
index 13c31fe..54716d4 100644
--- a/manifests/volume.pp
+++ b/manifests/volume.pp
@@ -16,96 +16,96 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
define gluster::volume(
- $bricks = [],
- $transport = 'tcp',
- $replica = 1,
- $stripe = 1,
- $start = undef # start volume ? true, false (stop it) or undef
+ $bricks = [],
+ $transport = 'tcp',
+ $replica = 1,
+ $stripe = 1,
+ $start = undef # start volume ? true, false (stop it) or undef
) {
- # TODO: if using rdma, maybe we should pull in the rdma package... ?
- $valid_transport = $transport ? {
- 'rdma' => 'rdma',
- 'tcp,rdma' => 'tcp,rdma',
- default => 'tcp',
- }
-
- $valid_replica = $replica ? {
- '1' => '',
- default => "replica ${replica} ",
- }
-
- $valid_stripe = $stripe ? {
- '1' => '',
- default => "stripe ${stripe} ",
- }
-
- #Gluster::Brick[$bricks] -> Gluster::Volume[$name] # volume requires bricks
-
- # get the bricks that match our fqdn, and append /$name to their path.
- # return only these paths, which can be used to build the volume dirs.
- $volume_dirs = split(inline_template("<%= bricks.find_all{|x| x.split(':')[0] == '${fqdn}' }.collect {|y| y.split(':')[1].chomp('/')+'/${name}' }.join(' ') %>"), ' ')
-
- file { $volume_dirs:
- ensure => directory, # make sure this is a directory
- recurse => false, # don't recurse into directory
- purge => false, # don't purge unmanaged files
- force => false, # don't purge subdirs and links
- before => Exec["gluster-volume-create-${name}"],
- require => Gluster::Brick[$bricks],
- }
-
- # add /${name} to the end of each: brick:/path entry
- $brick_spec = inline_template("<%= bricks.collect {|x| ''+x.chomp('/')+'/${name}' }.join(' ') %>")
-
- # EXAMPLE: gluster volume create test replica 2 transport tcp annex1.example.com:/storage1a/test annex2.example.com:/storage2a/test annex3.example.com:/storage3b/test annex4.example.com:/storage4b/test annex1.example.com:/storage1c/test annex2.example.com:/storage2c/test annex3.example.com:/storage3d/test annex4.example.com:/storage4d/test
- # NOTE: this should only happen on one host
- # FIXME: there might be a theoretical race condition if this runs at
- # exactly the same time time on more than one host.
- # FIXME: this should probably fail on at least N-1 nodes before it
- # succeeds because it probably shouldn't work until all the bricks are
- # available, which per node will happen right before this runs.
- exec { "/usr/sbin/gluster volume create ${name} ${valid_replica}${valid_stripe}transport ${valid_transport} ${brick_spec}":
- logoutput => on_failure,
- unless => "/usr/sbin/gluster volume list | /bin/grep -qxF '${name}' -", # add volume if it doesn't exist
- #before => TODO?,
- #require => Gluster::Brick[$bricks],
- alias => "gluster-volume-create-${name}",
- }
-
- # TODO:
- #if $shorewall {
- # shorewall::rule { 'gluster-TODO':
- # rule => "
- # ACCEPT ${zone} $FW tcp 24009:${endport}
- # ",
- # comment => 'TODO',
- # before => Service['glusterd'],
- # }
- #}
-
- if $start == true {
- # try to start volume if stopped
- exec { "/usr/sbin/gluster volume start ${name}":
- logoutput => on_failure,
- unless => "/usr/sbin/gluster volume status ${name}", # returns false if stopped
- require => Exec["gluster-volume-create-${name}"],
- alias => "gluster-volume-start-${name}",
- }
- } elsif ( $start == false ) {
- # try to stop volume if running
- # NOTE: this will still succeed even if a client is mounted
- # NOTE: This uses `yes` to workaround the: Stopping volume will
- # make its data inaccessible. Do you want to continue? (y/n)
- # TODO: http://community.gluster.org/q/how-can-i-make-automatic-scripts/
- # TODO: gluster --mode=script volume stop ...
- exec { "/usr/bin/yes | /usr/sbin/gluster volume stop ${name}":
- logoutput => on_failure,
- onlyif => "/usr/sbin/gluster volume status ${name}", # returns true if started
- require => Exec["gluster-volume-create-${name}"],
- alias => "gluster-volume-stop-${name}",
- }
- } else {
- # don't manage volume run state
- }
+ # TODO: if using rdma, maybe we should pull in the rdma package... ?
+ $valid_transport = $transport ? {
+ 'rdma' => 'rdma',
+ 'tcp,rdma' => 'tcp,rdma',
+ default => 'tcp',
+ }
+
+ $valid_replica = $replica ? {
+ '1' => '',
+ default => "replica ${replica} ",
+ }
+
+ $valid_stripe = $stripe ? {
+ '1' => '',
+ default => "stripe ${stripe} ",
+ }
+
+ #Gluster::Brick[$bricks] -> Gluster::Volume[$name] # volume requires bricks
+
+ # get the bricks that match our fqdn, and append /$name to their path.
+ # return only these paths, which can be used to build the volume dirs.
+ $volume_dirs = split(inline_template("<%= bricks.find_all{|x| x.split(':')[0] == '${fqdn}' }.collect {|y| y.split(':')[1].chomp('/')+'/${name}' }.join(' ') %>"), ' ')
+
+ file { $volume_dirs:
+ ensure => directory, # make sure this is a directory
+ recurse => false, # don't recurse into directory
+ purge => false, # don't purge unmanaged files
+ force => false, # don't purge subdirs and links
+ before => Exec["gluster-volume-create-${name}"],
+ require => Gluster::Brick[$bricks],
+ }
+
+ # add /${name} to the end of each: brick:/path entry
+ $brick_spec = inline_template("<%= bricks.collect {|x| ''+x.chomp('/')+'/${name}' }.join(' ') %>")
+
+ # EXAMPLE: gluster volume create test replica 2 transport tcp annex1.example.com:/storage1a/test annex2.example.com:/storage2a/test annex3.example.com:/storage3b/test annex4.example.com:/storage4b/test annex1.example.com:/storage1c/test annex2.example.com:/storage2c/test annex3.example.com:/storage3d/test annex4.example.com:/storage4d/test
+ # NOTE: this should only happen on one host
+ # FIXME: there might be a theoretical race condition if this runs at
+ # exactly the same time time on more than one host.
+ # FIXME: this should probably fail on at least N-1 nodes before it
+ # succeeds because it probably shouldn't work until all the bricks are
+ # available, which per node will happen right before this runs.
+ exec { "/usr/sbin/gluster volume create ${name} ${valid_replica}${valid_stripe}transport ${valid_transport} ${brick_spec}":
+ logoutput => on_failure,
+ unless => "/usr/sbin/gluster volume list | /bin/grep -qxF '${name}' -", # add volume if it doesn't exist
+ #before => TODO?,
+ #require => Gluster::Brick[$bricks],
+ alias => "gluster-volume-create-${name}",
+ }
+
+ # TODO:
+ #if $shorewall {
+ # shorewall::rule { 'gluster-TODO':
+ # rule => "
+ # ACCEPT ${zone} $FW tcp 24009:${endport}
+ # ",
+ # comment => 'TODO',
+ # before => Service['glusterd'],
+ # }
+ #}
+
+ if $start == true {
+ # try to start volume if stopped
+ exec { "/usr/sbin/gluster volume start ${name}":
+ logoutput => on_failure,
+ unless => "/usr/sbin/gluster volume status ${name}", # returns false if stopped
+ require => Exec["gluster-volume-create-${name}"],
+ alias => "gluster-volume-start-${name}",
+ }
+ } elsif ( $start == false ) {
+ # try to stop volume if running
+ # NOTE: this will still succeed even if a client is mounted
+ # NOTE: This uses `yes` to workaround the: Stopping volume will
+ # make its data inaccessible. Do you want to continue? (y/n)
+ # TODO: http://community.gluster.org/q/how-can-i-make-automatic-scripts/
+ # TODO: gluster --mode=script volume stop ...
+ exec { "/usr/bin/yes | /usr/sbin/gluster volume stop ${name}":
+ logoutput => on_failure,
+ onlyif => "/usr/sbin/gluster volume status ${name}", # returns true if started
+ require => Exec["gluster-volume-create-${name}"],
+ alias => "gluster-volume-stop-${name}",
+ }
+ } else {
+ # don't manage volume run state
+ }
}