summaryrefslogtreecommitdiffstats
path: root/vagrant
diff options
context:
space:
mode:
authorJames Shubin <james@shubin.ca>2014-04-10 13:32:08 -0400
committerJames Shubin <james@shubin.ca>2014-05-05 18:34:34 -0400
commita80a7a64835d450c168c4cede18ed156095a4fd7 (patch)
treee9712359f950a89c0519e0073f821c161db529c0 /vagrant
parenta3f50eb8b5886b09651ed80b613a690aafea5042 (diff)
downloadpuppet-gluster-a80a7a64835d450c168c4cede18ed156095a4fd7.tar.gz
puppet-gluster-a80a7a64835d450c168c4cede18ed156095a4fd7.tar.xz
puppet-gluster-a80a7a64835d450c168c4cede18ed156095a4fd7.zip
Support adding multiple additional disks to gluster machines.
You can easily configure this by settings the disks integer in the puppet-gluster.yaml file or with the --gluster-disks= parameter. If this is set to the default of zero (0) then no disks are added. This patch depends on: https://github.com/pradels/vagrant-libvirt/issues/162
Diffstat (limited to 'vagrant')
-rw-r--r--vagrant/gluster/Vagrantfile23
-rw-r--r--vagrant/gluster/puppet/manifests/site.pp35
2 files changed, 58 insertions, 0 deletions
diff --git a/vagrant/gluster/Vagrantfile b/vagrant/gluster/Vagrantfile
index 1ed3a24..cdb71d0 100644
--- a/vagrant/gluster/Vagrantfile
+++ b/vagrant/gluster/Vagrantfile
@@ -65,6 +65,7 @@ netmask2 = IPAddr.new('255.255.255.255').mask(cidr2).to_s
# mutable by ARGV and settings file
count = 4 # default number of gluster hosts to build
+disks = 0 # default number of disks to attach (after the host os)
bricks = 0 # default number of bricks to build (0 defaults to 1)
version = '' # default gluster version (empty string means latest!)
firewall = false # default firewall enabled (FIXME: default to true when keepalived bug is fixed)
@@ -85,6 +86,7 @@ f = File.join(projectdir, 'puppet-gluster.yaml')
if File.exist?(f)
settings = YAML::load_file f
count = settings[:count]
+ disks = settings[:disks]
bricks = settings[:bricks]
version = settings[:version]
firewall = settings[:firewall]
@@ -107,6 +109,12 @@ while skip < ARGV.length
count = v.to_i # set gluster host count
+ elsif ARGV[skip].start_with?(arg='--gluster-disks=')
+ v = ARGV.delete_at(skip).dup
+ v.slice! arg
+
+ disks = v.to_i # set gluster disk count
+
elsif ARGV[skip].start_with?(arg='--gluster-bricks=')
v = ARGV.delete_at(skip).dup
v.slice! arg
@@ -179,6 +187,7 @@ end
# save settings (ARGV overrides)
settings = {
:count => count,
+ :disks => disks,
:bricks => bricks,
:version => version,
:firewall => firewall,
@@ -449,6 +458,7 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
puppet.options = '--test' # see the output
puppet.facter = {
'vagrant' => '1',
+ 'vagrant_gluster_disks' => disks.to_s,
'vagrant_gluster_bricks' => bricks.to_s,
'vagrant_gluster_replica' => replica.to_s,
'vagrant_gluster_layout' => layout,
@@ -459,6 +469,19 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
'vagrant_gluster_version' => version,
}
end
+
+ vm.vm.provider :libvirt do |libvirt|
+ # add additional disks to the os
+ (1..disks).each do |j| # if disks is 0, this passes :)
+ #print "disk: #{j}"
+ libvirt.storage :file,
+ #:path => '', # auto!
+ #:device => 'vdb', # auto!
+ #:size => '10G', # auto!
+ :type => 'qcow2'
+
+ end
+ end
end
end
diff --git a/vagrant/gluster/puppet/manifests/site.pp b/vagrant/gluster/puppet/manifests/site.pp
index 55183f5..426099d 100644
--- a/vagrant/gluster/puppet/manifests/site.pp
+++ b/vagrant/gluster/puppet/manifests/site.pp
@@ -49,6 +49,30 @@ node /^annex\d+$/ inherits default { # annex{1,2,..N}
start => false, # useful for testing manually...
}
+ # build a list of hashes with ordered vdX devices
+ # (s='';q=i;(q, r = (q - 1).divmod(26)) && s.prepend(('a'..'z').to_a[r]) until q.zero?;'/dev/vd'+s)
+ $skip = 1 # skip over 1 disk (eg: /dev/vda from the host)
+ $disks = "${::vagrant_gluster_disks}"
+ $disks_yaml = inline_template("<%= (1+@skip.to_i..@disks.to_i+@skip.to_i).collect { |i| { 'dev' => (s='';q=i;(q, r = (q - 1).divmod(26)) && s.insert(0, ('a'..'z').to_a[r]) until q.zero?;'/dev/vd'+s) } }.to_yaml %>")
+ #$brick_params_defaults = [ # this is one possible example data set
+ # {'dev' => '/dev/vdb'},
+ # {'dev' => '/dev/vdc'},
+ # {'dev' => '/dev/vdd'},
+ # {'dev' => '/dev/vde'},
+ #]
+ $brick_params_defaults = parseyaml($disks_yaml)
+ notice(inline_template('disks: <%= YAML::load(@disks_yaml).inspect %>'))
+ #notify { 'disks':
+ # message => inline_template('disks: <%= YAML::load(@disks_yaml).inspect %>'),
+ #}
+
+ $brick_param_defaults = {
+ # TODO: set these from vagrant variables...
+ 'lvm' => false,
+ 'xfs_inode64' => true,
+ 'force' => true,
+ }
+
# this is a simple way to setup gluster
class { '::gluster::simple':
volume => 'puppet',
@@ -63,6 +87,17 @@ node /^annex\d+$/ inherits default { # annex{1,2,..N}
'false' => false,
default => true,
},
+ # NOTE: this is brick_params_defaults NOT param! param is below
+ brick_params_defaults => "${::vagrant_gluster_disks}" ? {
+ '0' => undef,
+ # NOTE: _each_ host will have N bricks with these devs!
+ default => $brick_params_defaults,
+ },
+ brick_param_defaults => "${::vagrant_gluster_disks}" ? {
+ '0' => undef,
+ # NOTE: _each_ brick will use these...
+ default => $brick_param_defaults,
+ },
}
}