fhcap-cli 0.4.5 → 0.4.6

Sign up to get free protection for your applications and to get access to all the features.
Files changed (53) hide show
  1. checksums.yaml +4 -4
  2. data/.gitignore +1 -0
  3. data/CHANGELOG.md +6 -0
  4. data/README.md +1 -1
  5. data/fhcap-cli.gemspec +2 -1
  6. data/lib/cookbooks/provision/libraries/provision.rb +26 -11
  7. data/lib/cookbooks/provision/recipes/aws_cluster_create.rb +7 -0
  8. data/lib/cookbooks/provision/recipes/aws_cluster_create_elb.rb +6 -1
  9. data/lib/cookbooks/provision/recipes/aws_cluster_destroy.rb +17 -16
  10. data/lib/cookbooks/provision/recipes/cluster_bootstrap.rb +4 -0
  11. data/lib/cookbooks/provision/recipes/cluster_bootstrap_instances.rb +37 -0
  12. data/lib/cookbooks/provision/recipes/cluster_create_instances.rb +11 -9
  13. data/lib/cookbooks/provision/recipes/cluster_provision_instances.rb +12 -11
  14. data/lib/cookbooks/provision/recipes/galera_reset_cluster.rb +40 -36
  15. data/lib/cookbooks/provision/recipes/mongo_reset_cluster.rb +10 -9
  16. data/lib/cookbooks/provision/recipes/rabbitmq_reset_cluster.rb +3 -1
  17. data/lib/extensions/chef/solr/query/regexpable_query.rb +17 -0
  18. data/lib/fhcap/chef-dk/chef_runner.rb +4 -1
  19. data/lib/fhcap/cluster.rb +66 -14
  20. data/lib/fhcap/cookbook.rb +1 -0
  21. data/lib/fhcap/kitchen.rb +27 -4
  22. data/lib/fhcap/provider.rb +1 -1
  23. data/lib/fhcap/repos_helper.rb +1 -1
  24. data/lib/fhcap/tasks/chef/chef_task_base.rb +3 -1
  25. data/lib/fhcap/tasks/chef/chef_zero_server.rb +1 -0
  26. data/lib/fhcap/tasks/chef/cookbook/update_version.rb +1 -2
  27. data/lib/fhcap/tasks/chef/provisioning/chef_provisioning_task_base.rb +19 -4
  28. data/lib/fhcap/tasks/chef/provisioning/create.rb +6 -3
  29. data/lib/fhcap/tasks/cluster/cluster_task_base.rb +1 -0
  30. data/lib/fhcap/tasks/cluster/create.rb +4 -201
  31. data/lib/fhcap/tasks/cluster/create_dns_records.rb +108 -0
  32. data/lib/fhcap/tasks/cluster/create_environment.rb +1 -0
  33. data/lib/fhcap/tasks/cluster/generate.rb +135 -0
  34. data/lib/fhcap/tasks/provider/add.rb +11 -0
  35. data/lib/fhcap/version.rb +1 -1
  36. data/spec/fhcap/tasks/cluster/create_dns_records_spec.rb +64 -0
  37. data/spec/fhcap/tasks/cluster/create_spec.rb +34 -13
  38. data/spec/fhcap/tasks/cluster/generate_spec.rb +44 -0
  39. data/templates/chef/environment_core.json.erb +8 -3
  40. data/templates/chef/environment_farm.json.erb +5 -0
  41. data/templates/chef/environment_mbaas.json.erb +11 -2
  42. data/templates/chef/environment_single.json.erb +17 -8
  43. data/templates/cluster/aws/common.json.erb +1 -1
  44. data/templates/cluster/aws/core-3node.json.erb +14 -2
  45. data/templates/cluster/aws/core-small-9node.json.erb +13 -1
  46. data/templates/cluster/aws/mbaas-3node.json.erb +14 -2
  47. data/templates/cluster/aws/single-blank.json.erb +6 -0
  48. data/templates/cluster/aws/single.json.erb +13 -1
  49. data/templates/cluster/openstack/core-3node.json.erb +1 -1
  50. data/templates/kitchen/kitchen.docker.yml.erb +0 -3
  51. data/templates/kitchen/kitchen.generate.yml.erb +0 -1
  52. data/templates/kitchen/kitchen.vagrant.yml.erb +37 -0
  53. metadata +29 -5
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 2404936a843382f70df3f7f0c834b3ed5891ab4f
4
- data.tar.gz: b3edd1294c2b6ea919a7d81642bf464adf29808a
3
+ metadata.gz: 78bf8ef12f1a57a0cea0fb7d43f449ebaa837d3e
4
+ data.tar.gz: 20bd845bfbed5c9adf26e8d4b5785737c5ed7a73
5
5
  SHA512:
6
- metadata.gz: 78843714ea979e07b5dffc9a4dec0f75e3fc626480bab516f27879e2ce106ca6cfeef9867fa3862ddd9a43dd1546055967a9d1e01be585f4d1d49fe1e01cc2d8
7
- data.tar.gz: 1d8872f549528b47f72ec8f465763b94beb8d907c7d9d43a6ab4d38606f306aa3a84db13284c729891546ec83f225f35405a0aa0cb255c504d6bdbac621cf6f9
6
+ metadata.gz: 114792fc44c7c251413a224373f8ad1d196742eabac60c3dd65fb221194f2afa2be27c0e39d70dce77a9c99c631212aacd3001d2c79947bc37908acbcd5b5ea5
7
+ data.tar.gz: 57d6da1fe18c0233a7cfeb02643c2c7a045b3fd3308f972fd013aaae72ee356d30f934c4a3b9e7aeedb1984c36019c53c6d6e99ea6a3c162a0ba2a1084af4f67
data/.gitignore CHANGED
@@ -8,3 +8,4 @@
8
8
  /spec/reports/
9
9
  /tmp/
10
10
  /lib/nodes/
11
+ .idea
data/CHANGELOG.md CHANGED
@@ -1,4 +1,10 @@
1
1
 
2
+ ## 0.4.6
3
+
4
+ * [RHMAP-3031] - Add aws-tags (Name, Organisationm, Environment and Role) to all instances
5
+ * [RHMAP-3815] - Add dry-run option to all chef_provisions tasks, sets why-run option on chef run (chef-provisioning)
6
+ * [RHMAP-4038] - Fix aws templates to set correct required security group protocols/ports for environment.
7
+
2
8
  ## 0.4.5
3
9
 
4
10
  * [RHMAP-3789] - Implement series option for cluster provision task to allow switching between parallel/series provisions of cluster nodes.
data/README.md CHANGED
@@ -133,7 +133,7 @@ fhcap provider add --name os1internal --type openstack --credentials os-auth-url
133
133
  fhcap provider add --name fheng --type aws
134
134
  ```
135
135
 
136
- A prompt will be displayed for any credentaisl that are required but not passed to the command directly
136
+ A prompt will be displayed for any credentials that are required but not passed to the command directly
137
137
 
138
138
  ## Usage
139
139
 
data/fhcap-cli.gemspec CHANGED
@@ -25,6 +25,7 @@ Gem::Specification.new do |spec|
25
25
  spec.add_dependency 'public_suffix', '= 1.5.2'
26
26
  spec.add_dependency 'erubis', '= 2.7.0'
27
27
  spec.add_dependency 'launchy', '= 2.4.3'
28
+ spec.add_dependency 'deep_merge', '= 1.0.1'
28
29
  spec.add_dependency 'librarian-chef', '= 0.0.4'
29
30
  spec.add_dependency 'knife-block', '= 0.2.1'
30
31
  spec.add_dependency 'knife-cookbook-readme', '= 0.2.1'
@@ -38,7 +39,7 @@ Gem::Specification.new do |spec|
38
39
  spec.add_dependency 'chef-provisioning-vagrant', '= 0.10.0'
39
40
  spec.add_dependency 'chef-provisioning-fog', '= 0.15.0'
40
41
  spec.add_dependency 'test-kitchen', '= 1.4.2'
41
- spec.add_dependency 'kitchen-sync', '= 1.1.1'
42
+ spec.add_dependency 'kitchen-sync', '= 2.1.1'
42
43
  spec.add_dependency 'kitchen-vagrant', '= 0.14.0'
43
44
  spec.add_dependency 'kitchen-ec2', '= 0.8.0'
44
45
  spec.add_dependency 'kitchen-docker', '= 2.1.0'
@@ -1,3 +1,5 @@
1
+ require 'deep_merge'
2
+
1
3
  def node_names_for(cluster_config)
2
4
  org_name = cluster_config[:id]
3
5
  #If we are using chef-client in local mode (-z) we can't search :-(
@@ -25,8 +27,8 @@ def inbound_rules_for(org_name, ingress)
25
27
  end.flatten
26
28
  end
27
29
 
28
- def machine_options_for(provider, org_name, environment, instance_options)
29
- send(:"#{provider}_machine_options", org_name, environment, instance_options)
30
+ def machine_options_for(provider, org_name, environment, instance_name, instance_options)
31
+ send(:"#{provider}_machine_options", org_name, environment, instance_name, instance_options)
30
32
  end
31
33
 
32
34
  def bootstrap_options_for(provider, org_name, environment, instance_options)
@@ -39,10 +41,11 @@ end
39
41
 
40
42
  #AWS
41
43
 
42
- def aws_machine_options(org_name, environment, instance_options)
44
+ def aws_machine_options(org_name, environment, instance_name, instance_options)
43
45
  {
44
46
  :bootstrap_options => bootstrap_options_for('aws', org_name, environment, instance_options),
45
47
  :convergence_options => converge_options_for('aws', org_name, environment, instance_options),
48
+ :aws_tags => {'Name' => instance_name, 'fh-tag-org' => org_name, 'fh-tag-environment' => [org_name,environment].join('-'), 'fh-tag-role' => instance_name.split('-').last.gsub(/\d+/, "")}
46
49
  }
47
50
  end
48
51
 
@@ -67,7 +70,7 @@ end
67
70
 
68
71
  #OpenStack
69
72
 
70
- def openstack_machine_options(org_name, environment, instance_options)
73
+ def openstack_machine_options(org_name, environment, instance_name, instance_options)
71
74
  {
72
75
  :bootstrap_options => bootstrap_options_for('openstack', org_name, environment, instance_options),
73
76
  :convergence_options => converge_options_for('openstack', org_name, environment, instance_options),
@@ -144,19 +147,31 @@ def with_cluster_config(node, &block)
144
147
  block.call cluster_config
145
148
  end
146
149
 
147
- def with_cluster_instances(node, &block)
150
+ def with_cluster_instances(node, &block)
151
+ env_regexp = node[:'env-regexp'] || '.'
152
+ node_regexp = node[:'node-regexp'] || '.'
148
153
  with_cluster_config(node) do |cluster_config|
149
154
  org_name = cluster_config[:id]
150
155
  default_instance_options = cluster_config[:default_instance_options]
151
156
  instances = {}
152
157
  cluster_config[:environments].each do |env, env_config|
153
158
  chef_environment = fh_name_for(org_name, env)
154
- instances[chef_environment] = {}
155
-
156
- env_config[:instances].each do |instance_name, instance_config|
157
- instances[chef_environment][instance_name_for(org_name, env, instance_name)] = {}
158
- instances[chef_environment][instance_name_for(org_name, env, instance_name)][:instance_config] = instance_config
159
- instances[chef_environment][instance_name_for(org_name, env, instance_name)][:machine_options] = machine_options_for(cluster_config[:driver], org_name, env, default_instance_options.merge(instance_config[cluster_config[:driver].to_sym] || {}))
159
+ if chef_environment =~ /#{env_regexp}/
160
+ instances[chef_environment] = {}
161
+
162
+ env_config[:instances].each do |instance_name, instance_config|
163
+ instance_name = instance_name_for(org_name, env, instance_name)
164
+ if instance_name =~ /#{node_regexp}/
165
+ instances[chef_environment][instance_name] = {}
166
+ instances[chef_environment][instance_name][:instance_config] = instance_config
167
+
168
+ machine_options = machine_options_for(cluster_config[:driver], org_name, env, instance_name, default_instance_options.merge(instance_config[cluster_config[:driver].to_sym] || {}))
169
+ machine_options.deep_merge!(JSON.parse(node[:machine_options].to_json, {:symbolize_names => true})) if node[:machine_options]
170
+ machine_options.deep_merge!(JSON.parse(instance_config[:machine_options].to_json, {:symbolize_names => true})) if instance_config[:machine_options]
171
+
172
+ instances[chef_environment][instance_name][:machine_options] = machine_options
173
+ end
174
+ end
160
175
  end
161
176
  end
162
177
  block.call instances
@@ -21,6 +21,13 @@ aws_vpc vpc_name do
21
21
  main_routes '0.0.0.0/0' => :internet_gateway
22
22
  end
23
23
 
24
+ # Create Security Groups with empty rules first to avoid group dependent rule conflicts
25
+ cluster_config[:security_groups].each do |sg_name, sg_config|
26
+ aws_security_group security_group_name_for(org_name, sg_name) do
27
+ vpc vpc_name
28
+ end
29
+ end
30
+
24
31
  # Create Security Groups
25
32
  cluster_config[:security_groups].each do |sg_name, sg_config|
26
33
  aws_security_group security_group_name_for(org_name, sg_name) do
@@ -23,16 +23,21 @@ cluster_config[:environments].each do |env, env_config|
23
23
  certificate_file = File.join(node[:local_repo_path], node[:local_repo_clusters_dir], 'ssl_certs', "#{listener[:server_certificate]}-certificate.pem")
24
24
  private_key_file = File.join(node[:local_repo_path], node[:local_repo_clusters_dir], 'ssl_certs', "#{listener[:server_certificate]}-private-key.pem")
25
25
 
26
+ cert_name = listener[:server_certificate]
27
+
26
28
  if File.exists?(certificate_file) && File.exists?(private_key_file)
27
29
  certificate_body = File.open(certificate_file, "rb").read
28
30
  private_key = File.open(private_key_file, "rb").read
29
31
 
30
- cert_name = listener[:server_certificate]
31
32
  lb_certs[index] = aws_server_certificate cert_name do
32
33
  certificate_body certificate_body
33
34
  private_key private_key
34
35
  end
35
36
  else
37
+ cert_name = listener[:server_certificate]
38
+ lb_certs[index] = aws_server_certificate cert_name do
39
+ action :nothing
40
+ end
36
41
  Chef::Log.warn("Could not find #{listener[:server_certificate]} locally to upload. Is this certificate already registered? ELB create will fail if it does not exist!!")
37
42
  end
38
43
  end
@@ -5,27 +5,38 @@ cluster_config = cluster_config_for(node)
5
5
 
6
6
  org_name = cluster_config[:id]
7
7
 
8
- node_names = node_names_for(cluster_config)
8
+ include_recipe "provision::cluster_destroy_instances"
9
9
 
10
- machine_batch do
11
- machines node_names
12
- action :destroy
10
+ cluster_config[:environments].each do |env, env_config|
11
+ env_config[:load_balancers].each do |lb_name, lb_config|
12
+ lb_name = load_balancer_name_for(org_name, env, lb_name)
13
+ load_balancer lb_name do
14
+ action :destroy
15
+ end
16
+ end if env_config[:load_balancers]
13
17
  end
14
18
 
15
- # Clear all rules from Security Groups
19
+ # Clear all rules from Security Groups so that all dependent objects are cleared. This can potentially re-create security groups, so we have to force the destroy again below.
16
20
  cluster_config[:security_groups].each do |sg_name, sg_config|
17
21
  aws_security_group security_group_name_for(org_name, sg_name) do
18
22
  inbound_rules []
19
23
  end
20
24
  end
21
25
 
26
+ # Destroy all security groups. vpc purge would do this, but since we could have potentially re-created the group above, we do it here to be sure its removed.
27
+ cluster_config[:security_groups].each do |sg_name, sg_config|
28
+ aws_security_group security_group_name_for(org_name, sg_name) do
29
+ action :destroy
30
+ ignore_failure true
31
+ end
32
+ end
33
+
22
34
  vpc_name = vpc_name_for(org_name)
23
35
  aws_vpc vpc_name do
24
36
  action :purge
25
37
  end
26
38
 
27
39
  cluster_config[:environments].each do |env, env_config|
28
-
29
40
  aws_route_table route_table_name_for(org_name, env) do
30
41
  action :destroy
31
42
  end
@@ -35,16 +46,6 @@ cluster_config[:environments].each do |env, env_config|
35
46
  action :destroy
36
47
  end
37
48
  end
38
-
39
- end
40
-
41
- cluster_config[:environments].each do |env, env_config|
42
- env_config[:load_balancers].each do |lb_name, lb_config|
43
- lb_name = load_balancer_name_for(org_name, env, lb_name)
44
- load_balancer lb_name do
45
- action :destroy
46
- end
47
- end if env_config[:load_balancers]
48
49
  end
49
50
 
50
51
  aws_key_pair key_pair_name_for(org_name) do
@@ -0,0 +1,4 @@
1
+
2
+ include_recipe 'provision::common'
3
+ include_recipe "provision::#{node['driver']}"
4
+ include_recipe "provision::cluster_bootstrap_instances"
@@ -0,0 +1,37 @@
1
+ include_recipe 'provision::common'
2
+ include_recipe "provision::#{node['driver']}"
3
+
4
+ with_cluster_instances(node) do |cluster_instances|
5
+ machine_batch "cluster_bootstrap_instances-converge_missing_nodes" do
6
+ cluster_instances.each do |chef_environment, instances|
7
+ current_nodes = search(:node, "chef_environment:#{chef_environment} AND network:*").map { |n| n.name }
8
+ required_nodes = instances.keys
9
+ missing_nodes = required_nodes - current_nodes
10
+
11
+ missing_nodes.each do |instance_name|
12
+ machine_options = instances[instance_name][:machine_options]
13
+ machine instance_name do
14
+ chef_environment chef_environment
15
+ machine_options machine_options
16
+ run_list []
17
+ end
18
+ end
19
+ end
20
+ max_simultaneous node['max_simultaneous']
21
+ action :converge
22
+ end
23
+
24
+ machine_batch "cluster_bootstrap_instances-setup_nodes" do
25
+ cluster_instances.each do |chef_environment, instances|
26
+ instances.each do |name, cfg|
27
+ machine name do
28
+ chef_environment chef_environment
29
+ machine_options cfg[:machine_options]
30
+ run_list cfg[:instance_config][:run_list]
31
+ end
32
+ end
33
+ end
34
+ max_simultaneous node['max_simultaneous']
35
+ action :setup
36
+ end
37
+ end
@@ -1,15 +1,17 @@
1
+ include_recipe "provision::common"
2
+ include_recipe "provision::#{node['driver']}"
3
+
1
4
  with_cluster_instances(node) do |cluster_instances|
2
- cluster_instances.each do |chef_environment, instances|
3
- with_chef_environment chef_environment do
4
- machine_batch 'cluster_create_instances' do
5
- instances.each do |name, cfg|
6
- machine name do
7
- chef_environment chef_environment
8
- machine_options(cfg[:machine_options])
9
- run_list []
10
- end
5
+ machine_batch "cluster_create_instances" do
6
+ cluster_instances.each do |chef_environment, instances|
7
+ instances.each do |name, cfg|
8
+ machine name do
9
+ chef_environment chef_environment
10
+ machine_options cfg[:machine_options]
11
11
  end
12
12
  end
13
13
  end
14
+ max_simultaneous node['max_simultaneous']
15
+ action :ready
14
16
  end
15
17
  end
@@ -1,17 +1,18 @@
1
+ include_recipe 'provision::common'
2
+ include_recipe "provision::#{node['driver']}"
3
+
1
4
  with_cluster_instances(node) do |cluster_instances|
2
- cluster_instances.each do |chef_environment, instances|
3
- with_chef_environment chef_environment do
4
- machine_batch 'cluster_provision_instances' do
5
- max_simultaneous node['max_simultaneous']
6
- instances.each do |name, cfg|
7
- machine name do
8
- chef_environment chef_environment
9
- run_list cfg[:instance_config][:run_list]
10
- machine_options(cfg[:machine_options])
11
- converge true
12
- end
5
+ machine_batch "cluster_provision_instances" do
6
+ cluster_instances.each do |chef_environment, instances|
7
+ instances.each do |name, cfg|
8
+ machine name do
9
+ chef_environment chef_environment
10
+ machine_options cfg[:machine_options]
11
+ run_list cfg[:instance_config][:run_list]
13
12
  end
14
13
  end
15
14
  end
15
+ max_simultaneous node['max_simultaneous']
16
+ action :converge_only
16
17
  end
17
18
  end
@@ -7,53 +7,57 @@ with_cluster_instances(node) do |cluster_instances|
7
7
  cluster_instances.each do |chef_environment, instances|
8
8
  with_chef_environment chef_environment do
9
9
 
10
- search_role = 'galera.*_server'
10
+ # We have to deal with arbiters after the servers or things go all wrong :-/
11
+ %w{galera_server galera_arbiter_server}.each do |role|
11
12
 
12
- search_nodes = search(:node, "chef_environment:#{chef_environment} AND roles:#{search_role}").map { |n| n.name }
13
+ search_nodes = search(:node, "chef_environment:#{chef_environment} AND roles:#{role}").map { |n| n.name }
13
14
 
14
- run_list_query = /role\[#{search_role}\]/
15
+ run_list_query = /role\[#{role}\]/
15
16
 
16
- galera_instances = instances.select do |name, cfg|
17
- if cfg[:instance_config] && cfg[:instance_config][:run_list] && cfg[:instance_config][:run_list].index { |s| s =~ run_list_query }
18
- tmp_run_list = []
19
- tmp_run_list << cfg[:instance_config][:run_list][cfg[:instance_config][:run_list].index { |s| s =~ run_list_query }]
20
- cfg[:instance_config][:tmp_run_list] = tmp_run_list
21
- true
22
- else
23
- false
17
+ galera_instances = instances.select do |name, cfg|
18
+ if cfg[:instance_config] && cfg[:instance_config][:run_list] && cfg[:instance_config][:run_list].index { |s| s =~ run_list_query }
19
+ node.default['tmp_node_run_list'] = {} unless node['tmp_node_run_list']
20
+ tmp_run_list = node['tmp_node_run_list'][name] ? node['tmp_node_run_list'][name].dup : []
21
+ tmp_run_list << cfg[:instance_config][:run_list][cfg[:instance_config][:run_list].index { |s| s =~ run_list_query }]
22
+ cfg[:instance_config][:tmp_run_list] = tmp_run_list
23
+ node.default['tmp_node_run_list'][name] = tmp_run_list
24
+ true
25
+ else
26
+ false
27
+ end
24
28
  end
25
- end
26
29
 
27
- #Delete the installed flag to force a re-install of galera, Note DO NOT run this in a prod setup that is being used for real.
28
- galera_instances.each do |name, cfg|
29
- machine_execute "galera_reset_cluster - #{name}: Remove /root/.galera_installed" do
30
- command 'sudo rm -rf /root/.galera_installed'
31
- machine name
30
+ #Delete the installed flag to force a re-install of galera, Note DO NOT run this in a prod setup that is being used for real.
31
+ galera_instances.each do |name, cfg|
32
+ machine_execute "galera_reset_cluster - #{name}: Remove /root/.galera_installed" do
33
+ command 'sudo rm -rf /root/.galera_installed'
34
+ machine name
35
+ end
32
36
  end
33
- end
34
37
 
35
- # Chef run to force the install recipe for galera to run, depending on weather nodes exist on the server already or not, we will need to do this once or twice.
36
- batch_provisions = search_nodes.length > 1 ? 1 : 2
37
- batch_provisions.times do |index|
38
- machine_batch "galera_reset_cluster: Batch Provison #{index + 1}/#{batch_provisions}" do
39
- galera_instances.each do |name, cfg|
40
- machine name do
41
- chef_environment chef_environment
42
- machine_options(cfg[:machine_options])
43
- run_list cfg[:instance_config][:tmp_run_list]
44
- converge true
38
+ # Chef run to force the install recipe for galera to run, depending on weather nodes exist on the server already or not, we will need to do this once or twice.
39
+ batch_provisions = search_nodes.length > 1 ? 1 : 2
40
+ batch_provisions.times do |index|
41
+ machine_batch "galera_reset_cluster: Batch Provison #{index + 1}/#{batch_provisions}" do
42
+ galera_instances.each do |name, cfg|
43
+ machine name do
44
+ chef_environment chef_environment
45
+ machine_options(cfg[:machine_options])
46
+ run_list cfg[:instance_config][:tmp_run_list]
47
+ converge true
48
+ end
45
49
  end
46
50
  end
47
51
  end
48
- end
49
52
 
50
- # Chef run on each node in series to setup clustering
51
- galera_instances.each do |name, cfg|
52
- machine name do
53
- chef_environment chef_environment
54
- machine_options(cfg[:machine_options])
55
- run_list cfg[:instance_config][:tmp_run_list]
56
- converge true
53
+ # Chef run on each node in series to setup clustering
54
+ galera_instances.each do |name, cfg|
55
+ machine name do
56
+ chef_environment chef_environment
57
+ machine_options(cfg[:machine_options])
58
+ run_list cfg[:instance_config][:tmp_run_list]
59
+ converge true
60
+ end
57
61
  end
58
62
  end
59
63
 
@@ -5,27 +5,28 @@ with_cluster_instances(node) do |cluster_instances|
5
5
  cluster_instances.each do |chef_environment, instances|
6
6
  with_chef_environment chef_environment do
7
7
 
8
- search_role = '.*mongo.*_server'
8
+ # It always take 3 provisions for mongo to be working in a clustered setup.
9
+ #
10
+ # 1. install mongo and update chef server
11
+ # 2. configure mongo configs with all the other nodes and setup replicaset
12
+ # 3. Add any users to what should be a now working mongo cluster
9
13
 
14
+ search_role = '.*mongo.*_server'
10
15
  run_list_query = /role\[#{search_role}\]/
11
16
 
12
17
  mongo_instances = instances.select do |name, cfg|
13
18
  if cfg[:instance_config] && cfg[:instance_config][:run_list] && cfg[:instance_config][:run_list].index { |s| s =~ run_list_query }
14
- tmp_run_list = []
19
+ node.default['tmp_node_run_list'] = {} unless node['tmp_node_run_list']
20
+ tmp_run_list = node['tmp_node_run_list'][name] ? node['tmp_node_run_list'][name].dup : []
15
21
  tmp_run_list << cfg[:instance_config][:run_list][cfg[:instance_config][:run_list].index { |s| s =~ run_list_query }]
16
22
  cfg[:instance_config][:tmp_run_list] = tmp_run_list
23
+ node.default['tmp_node_run_list'][name] = tmp_run_list
17
24
  true
18
25
  else
19
26
  false
20
27
  end
21
28
  end
22
29
 
23
- # It always take 3 provisions for mongo to be working in a clustered setup.
24
- #
25
- # 1. install mongo and update chef server
26
- # 2. configure mongo configs with all the other nodes and setup replicaset
27
- # 3. Add any users to what should be a now working mongo cluster
28
-
29
30
  batch_provisions = 3
30
31
  batch_provisions.times do |index|
31
32
  machine_batch "mongo_reset_cluster: Batch Provison #{index + 1}/#{batch_provisions}" do
@@ -34,9 +35,9 @@ with_cluster_instances(node) do |cluster_instances|
34
35
  chef_environment chef_environment
35
36
  machine_options(cfg[:machine_options])
36
37
  run_list cfg[:instance_config][:tmp_run_list]
37
- converge true
38
38
  end
39
39
  end
40
+ action :converge
40
41
  end
41
42
  end
42
43
 
@@ -13,9 +13,11 @@ with_cluster_instances(node) do |cluster_instances|
13
13
 
14
14
  rabbitmq_instances = instances.select do |name, cfg|
15
15
  if cfg[:instance_config] && cfg[:instance_config][:run_list] && cfg[:instance_config][:run_list].index { |s| s =~ run_list_query }
16
- tmp_run_list = []
16
+ node.default['tmp_node_run_list'] = {} unless node['tmp_node_run_list']
17
+ tmp_run_list = node['tmp_node_run_list'][name] ? node['tmp_node_run_list'][name].dup : []
17
18
  tmp_run_list << cfg[:instance_config][:run_list][cfg[:instance_config][:run_list].index { |s| s =~ run_list_query }]
18
19
  cfg[:instance_config][:tmp_run_list] = tmp_run_list
20
+ node.default['tmp_node_run_list'][name] = tmp_run_list
19
21
  true
20
22
  else
21
23
  false
@@ -0,0 +1,17 @@
1
+ #Fixes search issue https://github.com/chef/chef-zero/pull/158
2
+ module ChefZero
3
+ module Solr
4
+ module Query
5
+ class RegexpableQuery
6
+
7
+ original_verbose, $VERBOSE = $VERBOSE, nil
8
+
9
+ WORD_CHARACTER = "[A-Za-z0-9@._':\-]"
10
+ NON_WORD_CHARACTER = "[^A-Za-z0-9@._':\-]"
11
+
12
+ $VERBOSE = original_verbose
13
+
14
+ end
15
+ end
16
+ end
17
+ end
@@ -18,14 +18,16 @@ module Fhcap
18
18
  attr_reader :run_list
19
19
  attr_reader :private_key_paths
20
20
  attr_reader :node_attrs
21
+ attr_reader :dry_run
21
22
 
22
- def initialize(cookbook_path, run_list, node_attrs={}, private_key_paths=[])
23
+ def initialize(cookbook_path, run_list, dry_run=false, node_attrs={}, private_key_paths=[])
23
24
  @cookbook_path = File.expand_path(cookbook_path)
24
25
  @run_list = run_list
25
26
  @private_key_paths = private_key_paths
26
27
  @node_attrs = node_attrs
27
28
  @formatter = nil
28
29
  @ohai = nil
30
+ @dry_run = dry_run
29
31
  end
30
32
 
31
33
  def converge
@@ -70,6 +72,7 @@ module Fhcap
70
72
  Chef::Config.private_key_paths = (Chef::Config.private_key_paths + private_key_paths).compact.uniq
71
73
  Chef::Config.color = true
72
74
  Chef::Config.diff_disabled = true
75
+ Chef::Config.why_run = dry_run
73
76
 
74
77
  # atomic file operations on Windows require Administrator privileges to be able to read the SACL from a file
75
78
  # Using file_staging_uses_destdir(true) will get us inherited permissions indirectly on tempfile creation