summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJames Shubin <james@shubin.ca>2012-08-23 15:22:51 -0400
committerJames Shubin <james@shubin.ca>2012-08-23 15:22:51 -0400
commitf4dd75604f81cb11d9e85ea41f881bd578d7a5ca (patch)
treeb1c7d43c274e77d5f204a78c00fc4a094a03c78d
parentf6966c004fef1e80af3c52ac034693077037fab2 (diff)
downloadpuppet-gluster-f4dd75604f81cb11d9e85ea41f881bd578d7a5ca.tar.gz
puppet-gluster-f4dd75604f81cb11d9e85ea41f881bd578d7a5ca.tar.xz
puppet-gluster-f4dd75604f81cb11d9e85ea41f881bd578d7a5ca.zip
Experimental gluster::wrapper support.
This patch adds experimental gluster::wrapper support. This should eventually be the best way to use puppet-gluster. Unfortunately this has been largely untested because it requires newer ruby and puppet features not yet available in CentOS 6.x Please test and enjoy.
-rw-r--r--examples/wrapper-example.pp152
-rw-r--r--manifests/wrapper.pp131
2 files changed, 283 insertions, 0 deletions
diff --git a/examples/wrapper-example.pp b/examples/wrapper-example.pp
new file mode 100644
index 0000000..bae6614
--- /dev/null
+++ b/examples/wrapper-example.pp
@@ -0,0 +1,152 @@
+# gluster::wrapper example
+# This is the recommended way of using puppet-gluster.
+# NOTE: I have broken down the trees into pieces to make them easier to read.
+# You can do it exactly like this, use giant trees, or even generate the tree
+# using your favourite puppet tool.
+# NOTE: These tree objects are actually just nested ruby hashes.
+
+class { 'gluster::wrapper':
+ nodetree => $nodetree,
+ volumetree => $volumetree,
+ # NOTE: this is the virtual ip as managed by keepalived. At this time,
+ # you must set up this part on your own. Using the VIP is recommended.
+ # NOTE: you can set this to any of the node ip's to manage puppet from
+ # a single master, or you can leave it blank to get the nodes to race.
+ vip => '172.16.1.80',
+}
+
+$brick1a = {
+ dev => '/dev/disk/by-id/scsi-36003048007e26c00173ad3b633a2ef67', # /dev/sda
+ labeltype => 'gpt',
+ fstype => 'xfs',
+ fsuuid => '1ae49642-7f34-4886-8d23-685d13867fb1',
+ xfs_inode64 => true,
+ xfs_nobarrier => true,
+ areyousure => true,
+}
+
+$brick1c = {
+ dev => '/dev/disk/by-id/scsi-36003048007e26c00173ad3b633a36700', # /dev/sdb
+ labeltype => 'gpt',
+ fstype => 'xfs',
+ fsuuid => '1c9ee010-9cd1-4d81-9a73-f0788d5b3e33',
+ xfs_inode64 => true,
+ xfs_nobarrier => true,
+ areyousure => true,
+}
+
+$brick2a = {
+ dev => '/dev/disk/by-id/scsi-36003048007df450014ca27ee19eaec55', # /dev/sdc
+ labeltype => 'gpt',
+ fstype => 'xfs',
+ fsuuid => '2affe5e3-c10d-4d42-a887-cf8993a6c7b5',
+ xfs_inode64 => true,
+ xfs_nobarrier => true,
+ areyousure => true,
+}
+
+$brick2c = {
+ dev => '/dev/disk/by-id/scsi-36003048007df450014ca280e1bda8e70', # /dev/sdd
+ labeltype => 'gpt',
+ fstype => 'xfs',
+ fsuuid => '2c434d6c-7800-4eec-9121-483bee2336f6',
+ xfs_inode64 => true,
+ xfs_nobarrier => true,
+ areyousure => true,
+}
+
+$brick3b = {
+ dev => '/dev/disk/by-id/scsi-36003048007e14f0014ca2722130bc87c', # /dev/sdc
+ labeltype => 'gpt',
+ fstype => 'xfs',
+ fsuuid => '3b79d76b-a519-493c-9f21-ca35524187ef',
+ xfs_inode64 => true,
+ xfs_nobarrier => true,
+ areyousure => true,
+}
+
+$brick3d = {
+ dev => '/dev/disk/by-id/scsi-36003048007e14f0014ca2743150a5471', # /dev/sdd
+ labeltype => 'gpt',
+ fstype => 'xfs',
+ fsuuid => '3d125f8a-c3c3-490d-a606-453401e9bc21',
+ xfs_inode64 => true,
+ xfs_nobarrier => true,
+ areyousure => true,
+}
+
+$brick4b = {
+ dev => '/dev/disk/by-id/scsi-36003048007e36700174029270d81faa1', # /dev/sdc
+ labeltype => 'gpt',
+ fstype => 'xfs',
+ fsuuid => '4bf21ae6-06a0-44ad-ab48-ea23417e4e44',
+ xfs_inode64 => true,
+ xfs_nobarrier => true,
+ areyousure => true,
+}
+
+$brick4d = {
+ dev => '/dev/disk/by-id/scsi-36003048007e36700174029270d82724d', # /dev/sdd
+ labeltype => 'gpt',
+ fstype => 'xfs',
+ fsuuid => '4dfa7e50-2315-44d3-909b-8e9423def6e5',
+ xfs_inode64 => true,
+ xfs_nobarrier => true,
+ areyousure => true,
+}
+
+$nodetree = {
+ 'annex1.example.com' => {
+ 'ip' => '172.16.1.81',
+ 'uuid' => '1f660ca2-2c78-4aa0-8f4d-21608218c69c',
+ 'bricks' => {
+ '/mnt/storage1a' => $brick1a,
+ '/mnt/storage1c' => $brick1c,
+ },
+ },
+ 'annex2.example.com' => {
+ 'ip' => '172.16.1.82',
+ 'uuid' => '2fbe6e2f-f6bc-4c2d-a301-62fa90c459f8',
+ 'bricks' => {
+ '/mnt/storage2a' => $brick2a,
+ '/mnt/storage2c' => $brick2c,
+ },
+ },
+ 'annex3.example.com' => {
+ 'ip' => '172.16.1.83',
+ 'uuid' => '3f5a86fd-6956-46ca-bb80-65278dc5b945',
+ 'bricks' => {
+ '/mnt/storage3b' => $brick3b,
+ '/mnt/storage3d' => $brick3d,
+ },
+ },
+ 'annex4.example.com' => {
+ 'ip' => '172.16.1.84',
+ 'uuid' => '4f8e3157-e1e3-4f13-b9a8-51e933d53915',
+ 'bricks' => {
+ '/mnt/storage4b' => $brick4b,
+ '/mnt/storage4d' => $brick4d,
+ },
+ }
+}
+
+$volumetree = {
+ 'examplevol1' => {
+ 'transport' => 'tcp',
+ 'replica' => 2,
+ 'clients' => ['172.16.1.143'], # for the auth.allow and $FW
+ # NOTE: if you *don't* specify a bricks argument, the full list
+ # of bricks above will be used for your new volume. This is the
+ # usual thing that you want to do. Alternatively you can choose
+ # your own bricks[] if you're doing something special or weird!
+ #'bricks' => [],
+ },
+
+ 'examplevol2' => {
+ 'transport' => 'tcp',
+ 'replica' => 2,
+ 'clients' => ['172.16.1.143', '172.16.1.253'],
+ #'bricks' => [],
+ }
+}
+
diff --git a/manifests/wrapper.pp b/manifests/wrapper.pp
new file mode 100644
index 0000000..2bea8af
--- /dev/null
+++ b/manifests/wrapper.pp
@@ -0,0 +1,131 @@
+# Simple? gluster module by James
+# Copyright (C) 2010-2012 James Shubin
+# Written by James Shubin <james@shubin.ca>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+class gluster::wrapper(
+ $nodetree,
+ $volumetree,
+ $vip = '', # vip of the cluster (optional but recommended)
+
+ $nfs = false, # TODO in server.pp
+ $shorewall = false,
+ $zone = 'net', # TODO: allow a list of zones
+ $allow = 'all'
+) {
+ #
+ # build gluster::server
+ #
+
+ $hosts = split(inline_template("<%= nodetree.keys.join(',') %>"), ',')
+ $ips = split(inline_template('<%= nodetree.map{ |host,value| \'#{value["ip"]}\' }.join(",") %>'), ',')
+
+ class { 'gluster::server':
+ hosts => $hosts,
+ ips => $ips,
+#XXX: TODO? clients => XXX,
+ nfs => $nfs,
+ shorewall => $shorewall,
+ zone => $zone,
+ allow => $allow,
+ }
+
+ #
+ # build gluster::host
+ #
+
+ # EXAMPLE:
+ #gluster::host { 'annex1.example.com':
+ # # use uuidgen to make these
+ # uuid => '1f660ca2-2c78-4aa0-8f4d-21608218c69c',
+ #}
+
+ # filter the nodetree so that only host elements with uuid's are left
+ # XXX: each_with_object doesn't exist in rhel6 ruby, so use inject
+ #$hosttree = inline_template('<%= nodetree.each_with_object({}) {|(x,y), h| h[x] = y.select{ |key,value| ["uuid"].include?(key) } }.to_yaml %>')
+ $hosttree = inline_template('<%= nodetree.inject({}) {|h, (x,y)| h[x] = y.select{ |key,value| ["uuid"].include?(key) }; h }.to_yaml %>')
+ # newhash = oldhash.inject({}) { |h,(k,v)| h[k] = some_operation(v); h } # XXX: does this form work ?
+ create_resources('gluster::host', loadyaml($hosttree))
+
+ #
+ # build gluster::brick
+ #
+
+ # EXAMPLE:
+ #gluster::brick { 'annex1.example.com:/mnt/storage1a':
+ # dev => '/dev/disk/by-id/scsi-36003048007e26c00173ad3b633a2ef67', # /dev/sda
+ # labeltype => 'gpt',
+ # fstype => 'xfs',
+ # fsuuid => '1ae49642-7f34-4886-8d23-685d13867fb1',
+ # xfs_inode64 => true,
+ # xfs_nobarrier => true,
+ # areyousure => true,
+ #}
+
+ # filter the nodetree and build out each brick element from the hosts
+ $bricktree = inline_template('<%= r = {}; nodetree.each {|x,y| y["bricks"].each {|k,v| r[x+":"+k] = v} }; r.to_yaml %>')
+ # this version removes any invalid keys from the brick specifications
+ #$bricktree = inline_template('<%= r = {}; nodetree.each {|x,y| y["bricks"].each {|k,v| r[x+":"+k] = v.select{ |key,value| ["dev", "labeltype", "fstype", "fsuuid", "..."].include?(key) } } }; r.to_yaml %>')
+ create_resources('gluster::brick', loadyaml($bricktree))
+
+ #
+ # build gluster::volume
+ #
+
+ # EXAMPLE:
+ #gluster::volume { 'examplevol':
+ # replica => 2,
+ # bricks => $brick_list,
+ # start => undef, # i'll start this myself
+ #}
+
+ # to be used as default gluster::volume brick list
+ $bricklist = split(inline_template("<%= bricktree.keys.join(',') %>"), ',')
+
+ # semi ok method:
+ #$volumetree_defaults_all = {
+ # "bricks" => $bricklist,
+ # "transport" => 'tcp',
+ # "replica" => 1,
+ # "stripe" => 1,
+ # "vip" => $vip,
+ # "start" => undef, # ?
+ #}
+ #$semi_ok = inline_template('<%= volumetree.each_with_object({}) {|(x,y), h| h[x] = volumetree_defaults_all.each_with_object({}) {|(xx,yy), hh| hh[xx] = y.fetch(xx, volumetree_defaults_all[xx])} }.to_yaml %>')
+
+ # good method
+ $volumetree_defaults = {
+ 'bricks' => $bricklist,
+ 'vip' => $vip,
+ }
+ # loop through volumetree... if special defaults are missing, then add!
+ $volumetree_updated = inline_template('<%= volumetree.each_with_object({}) {|(x,y), h| h[x] = y; volumetree_defaults.each {|k,v| h[k] = h.fetch(k, v)} }.to_yaml %>')
+ create_resources('gluster::volume', loadyaml($volumetree_updated))
+
+ #
+ # build gluster::volume::property (auth.allow)
+ #
+
+ # EXAMPLE:
+ #gluster::volume::property { 'examplevol#auth.allow':
+ # value => '192.0.2.13,198.51.100.42,203.0.113.69',
+ #}
+
+ #$simplewrongname = inline_template('<%= volumetree.each_with_object({}) {|(x,y), h| h[x+"#auth.allow"] = y.select{ |key,value| ["clients"].include?(key) } }.to_yaml %>')
+ $propertytree = inline_template('<%= volumetree.each_with_object({}) {|(x,y), h| h[x+"#auth.allow"] = { "value" => y.fetch("clients", []) } }.to_yaml %>')
+ create_resources('gluster::volume::property', loadyaml($propertytree))
+}
+
+# vim: ts=8