forked from cerner/cerner_splunk
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathVagrantfile
366 lines (325 loc) · 13.5 KB
/
Vagrantfile
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
# -*- mode: ruby -*-
# vi: set ft=ruby :
Vagrant.require_version '>= 1.4.1'
%w[vagrant-ohai vagrant-omnibus].each do |plugin|
fail "Missing #{plugin}. Please install it!" unless Vagrant.has_plugin? plugin
end
@network = {
chef: { ip: '33.33.33.33', hostname: 'chef', ports: { 4000 => 4000 } },
c1_search: { ip: '33.33.33.10', hostname: 'search.splunk', ports: { 8001 => 8000, 8091 => 8089 } },
c1_master: { ip: '33.33.33.11', hostname: 'master.splunk', ports: { 8002 => 8000, 8092 => 8089 } },
c1_slave1: { ip: '33.33.33.12', hostname: 'slave01.splunk', ports: { 8003 => 8000, 8093 => 8089 } },
c1_slave2: { ip: '33.33.33.13', hostname: 'slave02.splunk', ports: { 8004 => 8000, 8094 => 8089 } },
c1_slave3: { ip: '33.33.33.14', hostname: 'slave03.splunk', ports: { 8005 => 8000, 8095 => 8089 } },
s_standalone: { ip: '33.33.33.20', hostname: 'splunk2', ports: { 8006 => 8000, 8096 => 8089 } },
s_license: { ip: '33.33.33.30', hostname: 'splunk-license', ports: { 8007 => 8000, 8097 => 8089 } },
c2_boot1: { ip: '33.33.33.15', hostname: 'search01.splunk', ports: { 8008 => 8000, 8098 => 8089 } },
c2_boot2: { ip: '33.33.33.16', hostname: 'search02.splunk', ports: { 8009 => 8000, 8099 => 8089 } },
c2_captain: { ip: '33.33.33.17', hostname: 'search03.splunk', ports: { 8010 => 8000, 8100 => 8089 } },
c2_newnode: { ip: '33.33.33.18', hostname: 'search04.splunk', ports: { 8011 => 8000, 8101 => 8089 } },
c2_deployer: { ip: '33.33.33.28', hostname: 'deployer.splunk', ports: { 8012 => 8000, 8102 => 8089 } },
s1_slave1: { ip: '33.33.33.31', hostname: 's1.slave01.splunk', ports: { 8013 => 8000, 8103 => 8089 } },
s1_slave2: { ip: '33.33.33.32', hostname: 's1.slave02.splunk', ports: { 8014 => 8000, 8104 => 8089 } },
s1_slave3: { ip: '33.33.33.34', hostname: 's1.slave03.splunk', ports: { 8015 => 8000, 8105 => 8089 } },
s1_master: { ip: '33.33.33.35', hostname: 's1.master.splunk', ports: { 8016 => 8000, 8106 => 8089 } },
s2_search: { ip: '33.33.33.36', hostname: 's2.search.splunk', ports: { 8017 => 8000, 8107 => 8089 } },
s2_slave1: { ip: '33.33.33.37', hostname: 's2.slave01.splunk', ports: { 8018 => 8000, 8108 => 8089 } },
s2_slave2: { ip: '33.33.33.38', hostname: 's2.slave02.splunk', ports: { 8019 => 8000, 8109 => 8089 } },
f_default: { ip: '33.33.33.50', hostname: 'default.forward', ports: { 9090 => 8089 } },
f_debian: { ip: '33.33.33.51', hostname: 'debian.forward', ports: { 9091 => 8089 } },
f_heavy: { ip: '33.33.33.52', hostname: 'heavy.forward', ports: { 9092 => 8089 } },
f_win2012r2: { ip: '33.33.33.53', hostname: 'windowsforward', ports: { 9093 => 8089 } }
}
@chefip = @network[:chef][:ip]
# Network sanity checks.
fail 'Non-unique ips' if @network.collect { |_, v| v[:ip] }.uniq!
fail 'Non-unique hostnames' if @network.collect { |_, v| v[:hostname] }.uniq!
fail 'Non-unique ports' if @network.collect { |_, v| v[:ports].keys }.flat_map { |v| v }.uniq!
def default_omnibus(config)
config.omnibus.chef_version = '12'
end
def network(config, name, splunk_password = true)
net = @network.delete(name)
throw "Unknown or duplicate config #{name}" unless net
config.vm.hostname = net[:hostname]
config.vm.network :private_network, ip: net[:ip]
net[:ports].each do |hostport, guestport|
config.vm.network :forwarded_port, guest: guestport, host: hostport, auto_correct: true
end
config.berkshelf.enabled = false if Vagrant.has_plugin? 'vagrant-berkshelf'
config.vm.provision :shell, inline: 'cat /etc/splunk/password; echo' if splunk_password
end
def chef_defaults(chef, name, environment = 'splunk_server')
chef.environment = environment
chef.chef_server_url = "http://#{@chefip}:4000/"
chef.validation_key_path = 'vagrant_repo/fake-key.pem'
chef.client_key_path = "/vagrant/vagrant_repo/pems/#{name}.pem"
chef.node_name = name.to_s
chef.encrypted_data_bag_secret_key_path = 'vagrant_repo/encrypted_data_bag_secret'
# Use this role only when you have setup local package mirroring per the readme.
# chef.add_role 'splunk_mirrors_local'
chef.add_role 'splunk_monitors'
end
Vagrant.configure('2') do |config|
config.vm.box = 'bento/centos-6.7'
config.ohai.primary_nic = 'eth1'
if Vagrant.has_plugin? 'vagrant-berkshelf'
config.berkshelf.enabled = false
# https://github.com/berkshelf/vagrant-berkshelf/issues/180
config.berkshelf.berksfile_path = 'this_is_a_deprecated_plugin_and_i_do_not_want_to_use_it'
end
config.vm.provider :virtualbox do |vb|
vb.customize ['modifyvm', :id, '--natdnsproxy1', 'off']
vb.customize ['modifyvm', :id, '--natdnshostresolver1', 'off']
vb.customize ['modifyvm', :id, '--memory', 128]
end
config.vm.define :chef do |cfg|
config.omnibus.chef_version = nil
cfg.vm.provision :shell, inline: 'rpm -q chefdk || curl -L https://omnitruck.chef.io/install.sh | bash -s -- -P chefdk'
if ENV['KNIFE_ONLY']
cfg.vm.provision :shell, inline: 'cd /vagrant/vagrant_repo; mv nodes .nodes.bak', privileged: false
else
cfg.vm.provision :shell, inline: 'kill -9 $(ps f -fA | grep [c]hef-zero | awk \'{print $2}\'); echo "killed chef-zero"', privileged: false
end
# knife upload will take care of json files,
# We then need to run through any ruby files as well
# We use berks to upload everything here as well, as we could be on VPN :)
cfg.vm.provision :shell, inline: <<-'SCRIPT'.gsub(/^\s+/, ''), privileged: false
export PATH=$PATH:/opt/chefdk/bin:/opt/chefdk/embedded/bin
nohup chef-zero -H 0.0.0.0 -p 4000 2>&1 > /dev/null &
cd /vagrant/vagrant_repo
knife upload .
berks install -b ../Berksfile
berks upload -b ../Berksfile --no-freeze
SCRIPT
if ENV['KNIFE_ONLY']
cfg.vm.provision :shell, inline: 'cd /vagrant/vagrant_repo; mv .nodes.bak nodes', privileged: false
end
app_gen = <<-'SCRIPT'.gsub(/^\s+/, '')
mkdir -p "$HOME/app_service"
rm -rf "$HOME/app_service/*"
cd /vagrant/vagrant_repo/files
cp -R lookups "$HOME/app_service/"
cd /vagrant/vagrant_repo/apps
timestamp=`date -u +%Y%m%d%H%M%S`
for D in *; do
if [ -d "${D}" ]; then
if [ -f "${D}/default/app.conf" ]; then
cp "${D}/default/app.conf" "app.conf.bak"
sed -i "s/^\(version \?= \?.\+ SNAPSHOT\)$/\1_`echo $timestamp`/" "${D}/default/app.conf"
fi
tar czf "$HOME/app_service/$D.tgz" "$D"
[ -f "app.conf.bak" ] && mv "app.conf.bak" "${D}/default/app.conf"
fi
done
cd "$HOME"
netstat -nl | grep -q :5000 || nohup /opt/chefdk/embedded/bin/ruby -run -e httpd "$HOME/app_service" -p5000 2>&1 > /dev/null &
sleep 10
SCRIPT
app_gen.sub!('tar', '[ -f "$HOME/app_service/$D.tgz" ] || tar') unless ENV['REGEN_APPS']
cfg.vm.provision :shell, inline: app_gen, privileged: false
network cfg, :chef, false
end
config.vm.define :s_license do |cfg|
default_omnibus config
cfg.vm.provision :chef_client do |chef|
chef_defaults chef, :s_license, 'splunk_license'
chef.add_recipe 'cerner_splunk::license_server'
end
network cfg, :s_license
end
config.vm.define :c1_master do |cfg|
default_omnibus config
cfg.vm.provision :chef_client do |chef|
chef_defaults chef, :c1_master
chef.add_recipe 'cerner_splunk::cluster_master'
end
network cfg, :c1_master
end
# Cruisin' Mos Espa In my Delorean ...
(1..3).each do |n|
symbol = "c1_slave#{n}".to_sym
config.vm.define symbol do |cfg|
default_omnibus config
cfg.vm.provider :virtualbox do |vb|
vb.customize ['modifyvm', :id, '--memory', 256]
end
cfg.vm.provision :chef_client do |chef|
chef_defaults chef, symbol
chef.add_recipe 'cerner_splunk::cluster_slave'
# Uncomment the line below to set predefined GUIDs on the cluster slaves (for playing with license pooling)
# chef.add_recipe 'cerner_splunk_test::configure_guids'
end
network cfg, symbol
end
end
config.vm.define :s1_master do |cfg|
default_omnibus config
cfg.vm.provision :chef_client do |chef|
chef_defaults chef, :s1_master, 'splunk_site1'
chef.add_recipe 'cerner_splunk::cluster_master'
end
network cfg, :s1_master
end
(1..3).each do |n|
symbol = "s1_slave#{n}".to_sym
config.vm.define symbol do |cfg|
default_omnibus config
cfg.vm.provider :virtualbox do |vb|
vb.customize ['modifyvm', :id, '--memory', 256]
end
cfg.vm.provision :chef_client do |chef|
chef_defaults chef, symbol, 'splunk_site1'
chef.add_recipe 'cerner_splunk::cluster_slave'
end
network cfg, symbol
end
end
(1..2).each do |n|
symbol = "s2_slave#{n}".to_sym
config.vm.define symbol do |cfg|
default_omnibus config
cfg.vm.provider :virtualbox do |vb|
vb.customize ['modifyvm', :id, '--memory', 256]
end
cfg.vm.provision :chef_client do |chef|
chef_defaults chef, symbol, 'splunk_site2'
chef.add_recipe 'cerner_splunk::cluster_slave'
end
network cfg, symbol
end
end
config.vm.define :s2_search do |cfg|
default_omnibus config
cfg.vm.provider :virtualbox do |vb|
vb.customize ['modifyvm', :id, '--memory', 256]
end
cfg.vm.provision :chef_client do |chef|
chef_defaults chef, :s2_search, 'splunk_site2'
chef.add_recipe 'cerner_splunk::search_head'
end
network cfg, :s2_search
end
config.vm.define :c1_search do |cfg|
default_omnibus config
cfg.vm.provider :virtualbox do |vb|
vb.customize ['modifyvm', :id, '--memory', 256]
end
cfg.vm.provision :chef_client do |chef|
chef_defaults chef, :c1_search
chef.add_recipe 'cerner_splunk::search_head'
end
network cfg, :c1_search
end
(1..2).each do |n|
symbol = "c2_boot#{n}".to_sym
config.vm.define symbol do |cfg|
default_omnibus config
cfg.vm.provider :virtualbox do |vb|
vb.customize ['modifyvm', :id, '--memory', 256]
end
cfg.vm.provision :chef_client do |chef|
chef_defaults chef, symbol
chef.add_recipe 'cerner_splunk::shc_search_head'
chef.json = {
'splunk' => {
'bootstrap_shc_member' => true
}
}
end
network cfg, symbol
end
end
config.vm.define :c2_captain do |cfg|
default_omnibus config
cfg.vm.provider :virtualbox do |vb|
vb.customize ['modifyvm', :id, '--memory', 256]
end
cfg.vm.provision :chef_client do |chef|
chef_defaults chef, :c2_captain
chef.add_recipe 'cerner_splunk::shc_captain'
end
network cfg, :c2_captain
end
config.vm.define :c2_deployer do |cfg|
default_omnibus config
cfg.vm.provider :virtualbox do |vb|
vb.customize ['modifyvm', :id, '--memory', 256]
end
cfg.vm.provision :chef_client do |chef|
chef_defaults chef, :c2_deployer
chef.add_recipe 'cerner_splunk::shc_deployer'
end
network cfg, :c2_deployer
end
config.vm.define :c2_newnode do |cfg|
default_omnibus config
cfg.vm.provider :virtualbox do |vb|
vb.customize ['modifyvm', :id, '--memory', 256]
end
cfg.vm.provision :chef_client do |chef|
chef_defaults chef, :c2_newnode
chef.add_recipe 'cerner_splunk::shc_search_head'
end
network cfg, :c2_newnode
end
config.vm.define :s_standalone do |cfg|
default_omnibus config
cfg.vm.provider :virtualbox do |vb|
vb.customize ['modifyvm', :id, '--memory', 256]
end
cfg.vm.provision :chef_client do |chef|
chef_defaults chef, :s_standalone, 'splunk_standalone'
chef.add_recipe 'cerner_splunk::server'
end
network cfg, :s_standalone
end
config.vm.define :f_default do |cfg|
default_omnibus config
cfg.vm.provision :chef_client do |chef|
chef_defaults chef, :f_default, 'splunk_standalone'
chef.add_recipe 'cerner_splunk'
chef.add_recipe 'cerner_splunk_test'
end
network cfg, :f_default
end
config.vm.define :f_debian do |cfg|
default_omnibus config
cfg.vm.box = 'bento/ubuntu-12.04'
cfg.vm.provider :virtualbox do |vb|
vb.customize ['modifyvm', :id, '--memory', 256]
end
cfg.vm.provision :chef_client do |chef|
chef_defaults chef, :f_debian, 'splunk_standalone'
chef.add_recipe 'cerner_splunk'
end
network cfg, :f_debian
end
config.vm.define :f_heavy do |cfg|
default_omnibus config
cfg.vm.provision :chef_client do |chef|
chef_defaults chef, :f_heavy, 'splunk_standalone'
chef.add_recipe 'cerner_splunk::heavy_forwarder'
end
network cfg, :f_heavy
end
config.vm.define :f_win2012r2 do |cfg|
cfg.vm.box = 'opentable/win-2012r2-standard-amd64-nocm'
# Without the line below here or in the box, vagrant-omnibus breaks on windows.
# Reference: https://github.com/chef/vagrant-omnibus/issues/90#issuecomment-51816397
cfg.vm.guest = :windows
default_omnibus config
# config below prevents the installation of latest Chef on the box.
# Reference: https://github.com/chef/vagrant-omnibus/issues/118
config.omnibus.install_url = 'https://packages.chef.io/files/stable/chef/12.18.31/windows/2012r2/chef-client-12.18.31-1-x64.msi'
cfg.vm.provider :virtualbox do |vb|
vb.customize ['modifyvm', :id, '--memory', 1024]
end
cfg.vm.provision :chef_client do |chef|
chef_defaults chef, :f_win2012r2, 'splunk_standalone'
chef.add_role 'splunk_monitors_windows'
chef.add_recipe 'cerner_splunk'
end
network cfg, :f_win2012r2, false
end
end