hybrid_platforms_conductor 32.13.0 → 32.14.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (141) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +42 -0
  3. data/README.md +10 -3
  4. data/bin/get_impacted_nodes +1 -1
  5. data/bin/setup +6 -1
  6. data/docs/executables/check-node.md +1 -1
  7. data/docs/executables/deploy.md +1 -1
  8. data/docs/executables/free_ips.md +1 -1
  9. data/docs/executables/free_veids.md +1 -1
  10. data/docs/executables/get_impacted_nodes.md +1 -1
  11. data/docs/executables/last_deploys.md +1 -1
  12. data/docs/executables/nodes_to_deploy.md +1 -1
  13. data/docs/executables/report.md +1 -1
  14. data/docs/executables/run.md +1 -1
  15. data/docs/executables/setup.md +1 -1
  16. data/docs/executables/ssh_config.md +1 -1
  17. data/docs/executables/test.md +1 -1
  18. data/docs/plugins.md +1 -0
  19. data/docs/plugins/platform_handler/serverless_chef.md +105 -0
  20. data/docs/tutorial.md +10 -6
  21. data/docs/tutorial/01_installation.md +14 -12
  22. data/docs/tutorial/02_first_node.md +14 -12
  23. data/docs/tutorial/03_scale.md +14 -12
  24. data/docs/tutorial/04_test.md +26 -14
  25. data/docs/tutorial/05_extend_with_plugins.md +17 -17
  26. data/examples/tutorial/01_installation/my-platforms/Gemfile +3 -0
  27. data/examples/tutorial/01_installation/my-platforms/hpc_config.rb +0 -0
  28. data/examples/tutorial/02_first_node/my-platforms/Gemfile +3 -0
  29. data/examples/tutorial/02_first_node/my-platforms/hpc_config.rb +1 -0
  30. data/examples/tutorial/02_first_node/my-service-conf-repo/inventory.yaml +13 -0
  31. data/examples/tutorial/02_first_node/my-service-conf-repo/my-service.conf.erb +3 -0
  32. data/examples/tutorial/02_first_node/my-service-conf-repo/service_my-service.rb +58 -0
  33. data/examples/tutorial/02_first_node/node/my-service.conf +4 -0
  34. data/examples/tutorial/03_scale/my-platforms/Gemfile +3 -0
  35. data/examples/tutorial/03_scale/my-platforms/hpc_config.rb +1 -0
  36. data/examples/tutorial/03_scale/my-platforms/my_commands.bash +2 -0
  37. data/examples/tutorial/03_scale/my-service-conf-repo/inventory.yaml +90 -0
  38. data/examples/tutorial/03_scale/my-service-conf-repo/my-service.conf.erb +3 -0
  39. data/examples/tutorial/03_scale/my-service-conf-repo/service_my-service.rb +58 -0
  40. data/examples/tutorial/03_scale/my-service-conf-repo/service_web-hello.rb +43 -0
  41. data/examples/tutorial/03_scale/node/my-service.conf +4 -0
  42. data/examples/tutorial/03_scale/web_docker_image/Dockerfile +33 -0
  43. data/examples/tutorial/03_scale/web_docker_image/hello_world.txt +1 -0
  44. data/examples/tutorial/03_scale/web_docker_image/hpc_root.key +27 -0
  45. data/examples/tutorial/03_scale/web_docker_image/hpc_root.key.pub +1 -0
  46. data/examples/tutorial/03_scale/web_docker_image/main.go +43 -0
  47. data/examples/tutorial/03_scale/web_docker_image/start.sh +7 -0
  48. data/examples/tutorial/03_scale/web_docker_image/test.bash +6 -0
  49. data/examples/tutorial/04_test/my-platforms/Gemfile +3 -0
  50. data/examples/tutorial/04_test/my-platforms/hpc_config.rb +12 -0
  51. data/examples/tutorial/04_test/my-platforms/images/debian_10/Dockerfile +13 -0
  52. data/examples/tutorial/04_test/my-platforms/my_commands.bash +2 -0
  53. data/examples/tutorial/04_test/my-service-conf-repo/inventory.yaml +100 -0
  54. data/examples/tutorial/04_test/my-service-conf-repo/my-service.conf.erb +3 -0
  55. data/examples/tutorial/04_test/my-service-conf-repo/service_my-service.rb +58 -0
  56. data/examples/tutorial/04_test/my-service-conf-repo/service_web-hello.rb +43 -0
  57. data/examples/tutorial/04_test/node/my-service.conf +4 -0
  58. data/examples/tutorial/04_test/web_docker_image/Dockerfile +33 -0
  59. data/examples/tutorial/04_test/web_docker_image/hello_world.txt +1 -0
  60. data/examples/tutorial/04_test/web_docker_image/hpc_root.key +27 -0
  61. data/examples/tutorial/04_test/web_docker_image/hpc_root.key.pub +1 -0
  62. data/examples/tutorial/04_test/web_docker_image/main.go +43 -0
  63. data/examples/tutorial/04_test/web_docker_image/start.sh +7 -0
  64. data/examples/tutorial/04_test/web_docker_image/test.bash +6 -0
  65. data/examples/tutorial/05_extend_with_plugins/dev-servers-conf-repo/hosts.json +12 -0
  66. data/examples/tutorial/05_extend_with_plugins/dev-servers-conf-repo/install-gcc.bash +14 -0
  67. data/examples/tutorial/05_extend_with_plugins/dev-servers-conf-repo/install-python.bash +14 -0
  68. data/examples/tutorial/05_extend_with_plugins/dev_docker_image/Dockerfile +20 -0
  69. data/examples/tutorial/05_extend_with_plugins/dev_docker_image/hpc_root.key +27 -0
  70. data/examples/tutorial/05_extend_with_plugins/dev_docker_image/hpc_root.key.pub +1 -0
  71. data/examples/tutorial/05_extend_with_plugins/my-platforms/Gemfile +4 -0
  72. data/examples/tutorial/05_extend_with_plugins/my-platforms/hpc_config.rb +13 -0
  73. data/examples/tutorial/05_extend_with_plugins/my-platforms/images/debian_10/Dockerfile +13 -0
  74. data/examples/tutorial/05_extend_with_plugins/my-platforms/my_commands.bash +2 -0
  75. data/examples/tutorial/05_extend_with_plugins/my-service-conf-repo/inventory.yaml +100 -0
  76. data/examples/tutorial/05_extend_with_plugins/my-service-conf-repo/my-service.conf.erb +3 -0
  77. data/examples/tutorial/05_extend_with_plugins/my-service-conf-repo/service_my-service.rb +58 -0
  78. data/examples/tutorial/05_extend_with_plugins/my-service-conf-repo/service_web-hello.rb +43 -0
  79. data/examples/tutorial/05_extend_with_plugins/my_hpc_plugins/lib/my_hpc_plugins/hpc_plugins/platform_handler/json_bash.rb +115 -0
  80. data/examples/tutorial/05_extend_with_plugins/my_hpc_plugins/lib/my_hpc_plugins/hpc_plugins/report/web_report.rb +52 -0
  81. data/examples/tutorial/05_extend_with_plugins/my_hpc_plugins/lib/my_hpc_plugins/hpc_plugins/test/root_space.rb +44 -0
  82. data/examples/tutorial/05_extend_with_plugins/my_hpc_plugins/my_hpc_plugins.gemspec +15 -0
  83. data/examples/tutorial/05_extend_with_plugins/node/my-service.conf +4 -0
  84. data/examples/tutorial/05_extend_with_plugins/web_docker_image/Dockerfile +33 -0
  85. data/examples/tutorial/05_extend_with_plugins/web_docker_image/hello_world.txt +1 -0
  86. data/examples/tutorial/05_extend_with_plugins/web_docker_image/hpc_root.key +27 -0
  87. data/examples/tutorial/05_extend_with_plugins/web_docker_image/hpc_root.key.pub +1 -0
  88. data/examples/tutorial/05_extend_with_plugins/web_docker_image/main.go +43 -0
  89. data/examples/tutorial/05_extend_with_plugins/web_docker_image/start.sh +7 -0
  90. data/examples/tutorial/05_extend_with_plugins/web_docker_image/test.bash +6 -0
  91. data/lib/hybrid_platforms_conductor/deployer.rb +2 -1
  92. data/lib/hybrid_platforms_conductor/hpc_plugins/platform_handler/serverless_chef.rb +440 -0
  93. data/lib/hybrid_platforms_conductor/hpc_plugins/platform_handler/serverless_chef/dsl_parser.rb +51 -0
  94. data/lib/hybrid_platforms_conductor/hpc_plugins/platform_handler/serverless_chef/recipes_tree_builder.rb +271 -0
  95. data/lib/hybrid_platforms_conductor/hpc_plugins/test/vulnerabilities.rb +1 -0
  96. data/lib/hybrid_platforms_conductor/nodes_handler.rb +9 -5
  97. data/lib/hybrid_platforms_conductor/version.rb +1 -1
  98. data/spec/hybrid_platforms_conductor_test.rb +3 -0
  99. data/spec/hybrid_platforms_conductor_test/api/deployer/provisioner_spec.rb +23 -0
  100. data/spec/hybrid_platforms_conductor_test/api/nodes_handler/cmdbs_plugins_api_spec.rb +11 -0
  101. data/spec/hybrid_platforms_conductor_test/api/platform_handlers/serverless_chef/config_dsl_spec.rb +17 -0
  102. data/spec/hybrid_platforms_conductor_test/api/platform_handlers/serverless_chef/deploy_output_parsing_spec.rb +94 -0
  103. data/spec/hybrid_platforms_conductor_test/api/platform_handlers/serverless_chef/diff_impacts_spec.rb +317 -0
  104. data/spec/hybrid_platforms_conductor_test/api/platform_handlers/serverless_chef/inventory_spec.rb +65 -0
  105. data/spec/hybrid_platforms_conductor_test/api/platform_handlers/serverless_chef/packaging_spec.rb +213 -0
  106. data/spec/hybrid_platforms_conductor_test/api/platform_handlers/serverless_chef/services_deployment_spec.rb +268 -0
  107. data/spec/hybrid_platforms_conductor_test/helpers/serverless_chef_helpers.rb +53 -0
  108. data/spec/hybrid_platforms_conductor_test/serverless_chef_repositories/1_node/chef_versions.yml +3 -0
  109. data/spec/hybrid_platforms_conductor_test/serverless_chef_repositories/1_node/nodes/node.json +14 -0
  110. data/spec/hybrid_platforms_conductor_test/serverless_chef_repositories/1_node/policyfiles/test_policy.rb +3 -0
  111. data/spec/hybrid_platforms_conductor_test/serverless_chef_repositories/data_bags/chef_versions.yml +3 -0
  112. data/spec/hybrid_platforms_conductor_test/serverless_chef_repositories/data_bags/data_bags/my_bag/my_item.json +4 -0
  113. data/spec/hybrid_platforms_conductor_test/serverless_chef_repositories/data_bags/nodes/node.json +14 -0
  114. data/spec/hybrid_platforms_conductor_test/serverless_chef_repositories/data_bags/policyfiles/test_policy.rb +3 -0
  115. data/spec/hybrid_platforms_conductor_test/serverless_chef_repositories/recipes/cookbooks/test_cookbook_1/recipes/default.rb +1 -0
  116. data/spec/hybrid_platforms_conductor_test/serverless_chef_repositories/recipes/cookbooks/test_cookbook_2/libraries/default.rb +4 -0
  117. data/spec/hybrid_platforms_conductor_test/serverless_chef_repositories/recipes/cookbooks/test_cookbook_2/recipes/default.rb +1 -0
  118. data/spec/hybrid_platforms_conductor_test/serverless_chef_repositories/recipes/cookbooks/test_cookbook_2/recipes/other_recipe.rb +1 -0
  119. data/spec/hybrid_platforms_conductor_test/serverless_chef_repositories/recipes/cookbooks/test_cookbook_2/resources/my_resource.rb +1 -0
  120. data/spec/hybrid_platforms_conductor_test/serverless_chef_repositories/recipes/nodes/node1.json +10 -0
  121. data/spec/hybrid_platforms_conductor_test/serverless_chef_repositories/recipes/nodes/node2.json +10 -0
  122. data/spec/hybrid_platforms_conductor_test/serverless_chef_repositories/recipes/policyfiles/test_policy_1.rb +4 -0
  123. data/spec/hybrid_platforms_conductor_test/serverless_chef_repositories/recipes/policyfiles/test_policy_2.rb +4 -0
  124. data/spec/hybrid_platforms_conductor_test/serverless_chef_repositories/several_cookbooks/config.rb +1 -0
  125. data/spec/hybrid_platforms_conductor_test/serverless_chef_repositories/several_cookbooks/cookbooks/test_cookbook_1/recipes/default.rb +1 -0
  126. data/spec/hybrid_platforms_conductor_test/serverless_chef_repositories/several_cookbooks/nodes/node1.json +10 -0
  127. data/spec/hybrid_platforms_conductor_test/serverless_chef_repositories/several_cookbooks/nodes/node2.json +10 -0
  128. data/spec/hybrid_platforms_conductor_test/serverless_chef_repositories/several_cookbooks/other_cookbooks/test_cookbook_2/libraries/default.rb +4 -0
  129. data/spec/hybrid_platforms_conductor_test/serverless_chef_repositories/several_cookbooks/other_cookbooks/test_cookbook_2/recipes/default.rb +1 -0
  130. data/spec/hybrid_platforms_conductor_test/serverless_chef_repositories/several_cookbooks/other_cookbooks/test_cookbook_2/recipes/other_recipe.rb +1 -0
  131. data/spec/hybrid_platforms_conductor_test/serverless_chef_repositories/several_cookbooks/other_cookbooks/test_cookbook_2/resources/my_resource.rb +1 -0
  132. data/spec/hybrid_platforms_conductor_test/serverless_chef_repositories/several_cookbooks/policyfiles/test_policy_1.rb +4 -0
  133. data/spec/hybrid_platforms_conductor_test/serverless_chef_repositories/several_cookbooks/policyfiles/test_policy_2.rb +4 -0
  134. data/spec/hybrid_platforms_conductor_test/serverless_chef_repositories/several_nodes/chef_versions.yml +3 -0
  135. data/spec/hybrid_platforms_conductor_test/serverless_chef_repositories/several_nodes/nodes/local.json +10 -0
  136. data/spec/hybrid_platforms_conductor_test/serverless_chef_repositories/several_nodes/nodes/node1.json +10 -0
  137. data/spec/hybrid_platforms_conductor_test/serverless_chef_repositories/several_nodes/nodes/node2.json +10 -0
  138. data/spec/hybrid_platforms_conductor_test/serverless_chef_repositories/several_nodes/policyfiles/test_policy_1.rb +3 -0
  139. data/spec/hybrid_platforms_conductor_test/serverless_chef_repositories/several_nodes/policyfiles/test_policy_2.rb +3 -0
  140. data/tools/generate_mermaid +1 -1
  141. metadata +260 -86
@@ -0,0 +1,115 @@
1
+ require 'json'
2
+ require 'hybrid_platforms_conductor/platform_handler'
3
+
4
+ module MyHpcPlugins
5
+
6
+ module HpcPlugins
7
+
8
+ module PlatformHandler
9
+
10
+ # A nice platform handler to handle platforms of our team, using json inventory and bash scripts.
11
+ class JsonBash < HybridPlatformsConductor::PlatformHandler
12
+
13
+ # Get the list of known nodes.
14
+ # [API] - This method is mandatory.
15
+ #
16
+ # Result::
17
+ # * Array<String>: List of node names
18
+ def known_nodes
19
+ # This method is used to get the list of nodes that are handled by the platform
20
+ # In our case we read our json file to get this information, and use just the first part of the hostname as the node's name.
21
+ JSON.parse(File.read("#{repository_path}/hosts.json")).keys.map { |hostname| hostname.split('.').first }
22
+ end
23
+
24
+ # Get the metadata of a given node.
25
+ # [API] - This method is mandatory.
26
+ #
27
+ # Parameters::
28
+ # * *node* (String): Node to read metadata from
29
+ # Result::
30
+ # * Hash<Symbol,Object>: The corresponding metadata
31
+ def metadata_for(node)
32
+ # All nodes handled by this platform are running a debian buster image and we derive their name from their hostname.
33
+ {
34
+ hostname: "#{node}.hpc_tutorial.org",
35
+ image: 'debian_10'
36
+ }
37
+ end
38
+
39
+ # Return the services for a given node
40
+ # [API] - This method is mandatory.
41
+ #
42
+ # Parameters::
43
+ # * *node* (String): node to read configuration from
44
+ # Result::
45
+ # * Array<String>: The corresponding services
46
+ def services_for(node)
47
+ # This info is taken from our JSON inventory file
48
+ [JSON.parse(File.read("#{repository_path}/hosts.json"))["#{node}.hpc_tutorial.org"]]
49
+ end
50
+
51
+ # Get the list of services we can deploy
52
+ # [API] - This method is mandatory.
53
+ #
54
+ # Result::
55
+ # * Array<String>: The corresponding services
56
+ def deployable_services
57
+ # This info is taken by listing existing bash scripts
58
+ Dir.glob("#{repository_path}/install-*.bash").map { |file| File.basename(file).match(/install-(.*)\.bash/)[1] }
59
+ end
60
+
61
+ # Get the list of actions to perform to deploy on a given node.
62
+ # Those actions can be executed in parallel with other deployments on other nodes. They must be thread safe.
63
+ # [API] - This method is mandatory.
64
+ # [API] - @cmd_runner is accessible.
65
+ # [API] - @actions_executor is accessible.
66
+ #
67
+ # Parameters::
68
+ # * *node* (String): Node to deploy on
69
+ # * *service* (String): Service to be deployed
70
+ # * *use_why_run* (Boolean): Do we use a why-run mode? [default = true]
71
+ # Result::
72
+ # * Array< Hash<Symbol,Object> >: List of actions to be done
73
+ def actions_to_deploy_on(node, service, use_why_run: true)
74
+ # This method returns all the actions to execute to deploy on a node.
75
+ # The use_why_run switch is on if the deployment should just be simulated.
76
+ # Those actions (bash commands, scp of files, ruby code...) should be thread safe as they can be executed in parallel with other deployment actions for other nodes in case of a concurrent deployment on several nodes.
77
+ # In our case it's very simple: we just call our bash script on the node's hostname.
78
+ [{ bash: "#{repository_path}/install-#{service}.bash #{@nodes_handler.get_hostname_of(node)} #{use_why_run ? 'check' : ''}" }]
79
+ end
80
+
81
+ # Parse stdout and stderr of a given deploy run and get the list of tasks with their status
82
+ # [API] - This method is mandatory.
83
+ #
84
+ # Parameters::
85
+ # * *stdout* (String): stdout to be parsed
86
+ # * *stderr* (String): stderr to be parsed
87
+ # Result::
88
+ # * Array< Hash<Symbol,Object> >: List of task properties. The following properties should be returned, among free ones:
89
+ # * *name* (String): Task name
90
+ # * *status* (Symbol): Task status. Should be one of:
91
+ # * *:changed*: The task has been changed
92
+ # * *:identical*: The task has not been changed
93
+ # * *diffs* (String): Differences, if any
94
+ def parse_deploy_output(stdout, stderr)
95
+ # In our case our bash scripts return the last line as a status, so use it.
96
+ [{
97
+ name: 'Install tool',
98
+ status:
99
+ case stdout.split("\n").last
100
+ when 'OK'
101
+ :identical
102
+ else
103
+ :changed
104
+ end,
105
+ diffs: stdout
106
+ }]
107
+ end
108
+
109
+ end
110
+
111
+ end
112
+
113
+ end
114
+
115
+ end
@@ -0,0 +1,52 @@
1
+ require 'hybrid_platforms_conductor/report'
2
+
3
+ module MyHpcPlugins
4
+
5
+ module HpcPlugins
6
+
7
+ module Report
8
+
9
+ # Publish reports to our web reporting tool
10
+ class WebReport < HybridPlatformsConductor::Report
11
+
12
+ # Give the list of supported locales by this report generator
13
+ # [API] - This method is mandatory.
14
+ #
15
+ # Result::
16
+ # * Array<Symbol>: List of supported locales
17
+ def self.supported_locales
18
+ # This method has to publish the list of translations it accepts.
19
+ [:en]
20
+ end
21
+
22
+ # Create a report for a list of nodes, in a given locale
23
+ # [API] - This method is mandatory.
24
+ #
25
+ # Parameters::
26
+ # * *nodes* (Array<String>): List of nodes
27
+ # * *locale_code* (Symbol): The locale code
28
+ def report_for(nodes, locale_code)
29
+ # This method simply provides a report for a given list of nodes in the desired locale.
30
+ # The locale will be one of the supported ones.
31
+ # Generate the report in a file to be uploaded on web10.
32
+ File.write(
33
+ '/tmp/web_report.txt',
34
+ @platforms_handler.known_platforms.map do |platform|
35
+ "= Inventory for platform #{platform.repository_path} of type #{platform.platform_type}:\n" +
36
+ platform.known_nodes.map do |node|
37
+ "* Node #{node} (IP: #{@nodes_handler.get_host_ip_of(node)}, Hostname: #{@nodes_handler.get_hostname_of(node)})."
38
+ end.join("\n")
39
+ end.join("\n")
40
+ )
41
+ # Upload the file on our web10 instance
42
+ system 'scp -o StrictHostKeyChecking=no /tmp/web_report.txt root@web10.hpc_tutorial.org:/root/hello_world.txt'
43
+ out 'Upload successful'
44
+ end
45
+
46
+ end
47
+
48
+ end
49
+
50
+ end
51
+
52
+ end
@@ -0,0 +1,44 @@
1
+ module MyHpcPlugins
2
+
3
+ module HpcPlugins
4
+
5
+ module Test
6
+
7
+ # Check root space
8
+ class RootSpace < HybridPlatformsConductor::Test
9
+
10
+ # Run test using SSH commands on the node.
11
+ # Instead of executing the SSH commands directly on each node for each test, this method returns the list of commands to run and the test framework then groups them in 1 SSH connection.
12
+ # [API] - @node can be used to adapt the command with the node.
13
+ #
14
+ # Result::
15
+ # * Hash<String,Object>: For each command to execute, information regarding the assertion.
16
+ # * Values can be:
17
+ # * Proc: The code block making the test given the stdout of the command. Here is the Proc description:
18
+ # * Parameters::
19
+ # * *stdout* (Array<String>): List of lines of the stdout of the command.
20
+ # * *stderr* (Array<String>): List of lines of the stderr of the command.
21
+ # * *return_code* (Integer): The return code of the command.
22
+ # * Hash<Symbol,Object>: More complete information, that can contain the following keys:
23
+ # * *validator* (Proc): The proc containing the assertions to perform (as described above). This key is mandatory.
24
+ # * *timeout* (Integer): Timeout to wait for this command to execute.
25
+ def test_on_node
26
+ # If this method is defined, it will be used to execute SSH commands on each node that is being tested.
27
+ # For each SSH command, a validator code block will be called with the stdout of the command run remotely on the node.
28
+ # In place of a simple validator code block, a more complex structure can be used to give more info (for example timeout).
29
+ {
30
+ 'du -sk /root' => proc do |stdout|
31
+ # stdout contains the output of our du command
32
+ used_kb = stdout.first.split.first.to_i
33
+ error "Root space used is #{used_kb}KB - too much!" if used_kb > 1024
34
+ end
35
+ }
36
+ end
37
+
38
+ end
39
+
40
+ end
41
+
42
+ end
43
+
44
+ end
@@ -0,0 +1,15 @@
1
+ Gem::Specification.new do |s|
2
+ s.name = 'my_hpc_plugins'
3
+ s.version = '0.0.1'
4
+ s.date = '2021-04-29'
5
+ s.authors = ['Me myself!']
6
+ s.email = ['me-myself@my-domain.com']
7
+ s.summary = 'My awesome plugins for Hybrid Platforms Conductor'
8
+ s.files = Dir['{bin,lib,spec}/**/*']
9
+ Dir['bin/**/*'].each do |exec_name|
10
+ s.executables << File.basename(exec_name)
11
+ end
12
+ # Dependencies
13
+ # Make sure we use a compatible version of hybrid_platforms_conductor
14
+ s.add_dependency 'hybrid_platforms_conductor', '~> 32.12'
15
+ end
@@ -0,0 +1,4 @@
1
+ service-port: 1107
2
+ service-timeout: 60
3
+ service-logs: stdout
4
+
@@ -0,0 +1,33 @@
1
+ # syntax=docker/dockerfile:1
2
+ # Pull the image containing Go
3
+ FROM golang:1.16.3-buster
4
+
5
+ # Install the web server
6
+ # Create the message file to be displayed by the web server
7
+ COPY hello_world.txt /root/hello_world.txt
8
+ # Copy the code
9
+ COPY main.go /codebase/src/main.go
10
+ # Build the binary
11
+ RUN cd /codebase && go build -v -o /codebase/bin/server ./src/main.go
12
+ # Set the env which will be available at runtime
13
+ ENV PORT=80
14
+ EXPOSE 80
15
+
16
+ # Install sshd
17
+ RUN apt-get update && apt-get install -y openssh-server
18
+ RUN mkdir /var/run/sshd
19
+ # Activate root login
20
+ RUN sed -i 's/#PermitRootLogin prohibit-password/PermitRootLogin yes/' /etc/ssh/sshd_config
21
+ # Speed-up considerably ssh performance and avoid huge lags and timeouts without DNS
22
+ RUN sed -i 's/#UseDNS yes/UseDNS no/' /etc/ssh/sshd_config
23
+ EXPOSE 22
24
+
25
+ # Upload our root key for key authentication of root
26
+ COPY hpc_root.key.pub /root/.ssh/authorized_keys
27
+ RUN chmod 700 /root/.ssh
28
+ RUN chmod 400 /root/.ssh/authorized_keys
29
+
30
+ # Startup script
31
+ COPY start.sh /start.sh
32
+ RUN chmod +x /start.sh
33
+ CMD ["/start.sh"]
@@ -0,0 +1,27 @@
1
+ -----BEGIN OPENSSH PRIVATE KEY-----
2
+ b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABFwAAAAdzc2gtcn
3
+ NhAAAAAwEAAQAAAQEAu5oVFddWtHFKA8+PnRssWieXrnMUPM8+qTdQrxoMaNacFWN+pBv3
4
+ Nj61GphCANoEPBvgcm7ltgZa1EumAZh4NJFi7AqM4Z2AFsqsvzRS1HNFRClEwr576iRkMc
5
+ ECIr0JmxeOscVcDMx0/9uFSfON7DiuStOOpIJIiQfjtFBsLFpzdfsVfZ3zc18Hxugqsbq0
6
+ t0bsTZdjRKnIYeKTSXKbF2n9b/dAf2tBtpywpun1WXQul7w1z20m9zv955k4kq3rAhSIPH
7
+ aOSE+QCXlICYqwmP2KVKCG6XEqSWjF4ujzcE7PlKIK1gsXdfum6GL23X8k8njBIjRzq6a0
8
+ Uabp65tvVQAAA8hgF2y/YBdsvwAAAAdzc2gtcnNhAAABAQC7mhUV11a0cUoDz4+dGyxaJ5
9
+ eucxQ8zz6pN1CvGgxo1pwVY36kG/c2PrUamEIA2gQ8G+BybuW2BlrUS6YBmHg0kWLsCozh
10
+ nYAWyqy/NFLUc0VEKUTCvnvqJGQxwQIivQmbF46xxVwMzHT/24VJ843sOK5K046kgkiJB+
11
+ O0UGwsWnN1+xV9nfNzXwfG6CqxurS3RuxNl2NEqchh4pNJcpsXaf1v90B/a0G2nLCm6fVZ
12
+ dC6XvDXPbSb3O/3nmTiSresCFIg8do5IT5AJeUgJirCY/YpUoIbpcSpJaMXi6PNwTs+Uog
13
+ rWCxd1+6boYvbdfyTyeMEiNHOrprRRpunrm29VAAAAAwEAAQAAAQEAj5IE7vkkvuUweqgf
14
+ RWGlWL2SJIyngT4tuzy8/7SUqKsNOoUxRxCr6ZSl7tt1L8eplALVi135aIuGeZAHVJNF8x
15
+ 0Mso4EcSmi/E5RU+cMONCawuY+XDeBB5igIT+PjdxT/KBTsRLUNmtGYwSyBf1hsNVT9ang
16
+ MWAPeOdTyQv/LRUe+O/ZKQ/h/X51BEwSXqz32RmlmZdMnDacIJ0NWdbcbAEtB4JT83S20d
17
+ bycQNOxGVmyevOGjyTXfUX6FhtYT7A7zzqNUL8UstSdajWeTZvVGd0C7Ob1kxaU3tnAwFr
18
+ 0sHTPHX7FQQFGhiU8gBZHOJ/PQzo+qqcCSsM3FcJlpVw5QAAAIAPYGXt7diWFYEhZVfj3n
19
+ KA0lX7LjQms0en4CoSkN4BELQJnaaqlTkrqN7PNL/WbmeavzDqQMk1jb9Q2+gra826THX5
20
+ GGtbFPFb9TSwBNi+zsyVURKEzqNYSw28ytKIdj98VW1qqQoolgTc5qoXG1JW5f5bEPbzUT
21
+ LXPP5j+YvuCgAAAIEA8NVJc5vXzJxbdgBxOPPHaPCAZ28V15Ggy+Bj1K/9G6FHqa/SFhy0
22
+ tCpBDNB/POX2UXCQDLteTIDodj9h2Yat6PJP9RKYCC9F5+/fU/0Y/tvD/p/uNhwEuqToex
23
+ gBUoL+r7/G3qYK13n8AKwrvb2wiYgP3aUVW7AyOier1MKt4OsAAACBAMdqmcj+5fNSTuOg
24
+ iPgGylUSIYnJ0w2jjZIaX96aat/w+SgciLhenbwpcf/9E8MDYKbXyhArdQvOgkEVogEI4s
25
+ PrFG66ogawPuRUUsuZBhm/b535EEvNAlkC8fMPVuKfOIsHJs8Cwcy7QPZSZu5pyKznpnCT
26
+ UlYGT6aB0GGoeeC/AAAAEWFkbWluQGV4YW1wbGUuY29tAQ==
27
+ -----END OPENSSH PRIVATE KEY-----
@@ -0,0 +1 @@
1
+ ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC7mhUV11a0cUoDz4+dGyxaJ5eucxQ8zz6pN1CvGgxo1pwVY36kG/c2PrUamEIA2gQ8G+BybuW2BlrUS6YBmHg0kWLsCozhnYAWyqy/NFLUc0VEKUTCvnvqJGQxwQIivQmbF46xxVwMzHT/24VJ843sOK5K046kgkiJB+O0UGwsWnN1+xV9nfNzXwfG6CqxurS3RuxNl2NEqchh4pNJcpsXaf1v90B/a0G2nLCm6fVZdC6XvDXPbSb3O/3nmTiSresCFIg8do5IT5AJeUgJirCY/YpUoIbpcSpJaMXi6PNwTs+UogrWCxd1+6boYvbdfyTyeMEiNHOrprRRpunrm29V admin@example.com
@@ -0,0 +1,43 @@
1
+ package main
2
+
3
+ import (
4
+ "fmt"
5
+ "io/ioutil"
6
+ "log"
7
+ "net/http"
8
+ "os"
9
+ )
10
+
11
+ const homepageEndPoint = "/"
12
+
13
+ // StartWebServer the webserver
14
+ func StartWebServer() {
15
+ http.HandleFunc(homepageEndPoint, handleHomepage)
16
+ port := os.Getenv("PORT")
17
+ if len(port) == 0 {
18
+ panic("Environment variable PORT is not set")
19
+ }
20
+
21
+ log.Printf("Starting web server to listen on endpoints [%s] and port %s",
22
+ homepageEndPoint, port)
23
+ if err := http.ListenAndServe(":"+port, nil); err != nil {
24
+ panic(err)
25
+ }
26
+ }
27
+
28
+ func handleHomepage(w http.ResponseWriter, r *http.Request) {
29
+ urlPath := r.URL.Path
30
+ log.Printf("Web request received on url path %s", urlPath)
31
+ content, content_err := ioutil.ReadFile("/root/hello_world.txt")
32
+ if content_err != nil {
33
+ fmt.Printf("Failed to read message to display, err: %s", content_err)
34
+ }
35
+ _, write_err := w.Write(content)
36
+ if write_err != nil {
37
+ fmt.Printf("Failed to write response, err: %s", write_err)
38
+ }
39
+ }
40
+
41
+ func main() {
42
+ StartWebServer()
43
+ }
@@ -0,0 +1,7 @@
1
+ #!/bin/bash
2
+
3
+ # Start sshd as a daemon
4
+ /usr/sbin/sshd
5
+
6
+ # Start web server
7
+ sh -c /codebase/bin/server
@@ -0,0 +1,6 @@
1
+ #!/bin/bash
2
+
3
+ for ((i=1;i<=10;i++));
4
+ do
5
+ echo "Container web$i: $(curl http://web$i.hpc_tutorial.org 2>/dev/null)"
6
+ done
@@ -318,8 +318,9 @@ module HybridPlatformsConductor
318
318
  actions_executor: @actions_executor
319
319
  )
320
320
  instance.with_running_instance(stop_on_exit: true, destroy_on_exit: !reuse_instance, port: 22) do
321
- # Test-provisioned nodes have SSH Session Exec capabilities
321
+ # Test-provisioned nodes have SSH Session Exec capabilities and are not local
322
322
  sub_executable.nodes_handler.override_metadata_of node, :ssh_session_exec, 'true'
323
+ sub_executable.nodes_handler.override_metadata_of node, :local_node, false
323
324
  # Test-provisioned nodes use default sudo
324
325
  sub_executable.config.sudo_procs.replace(sub_executable.config.sudo_procs.map do |sudo_proc_info|
325
326
  {
@@ -0,0 +1,440 @@
1
+ require 'fileutils'
2
+ require 'json'
3
+ require 'yaml'
4
+ require 'hybrid_platforms_conductor/platform_handler'
5
+ require 'hybrid_platforms_conductor/hpc_plugins/platform_handler/serverless_chef/dsl_parser'
6
+ require 'hybrid_platforms_conductor/hpc_plugins/platform_handler/serverless_chef/recipes_tree_builder'
7
+
8
+ module HybridPlatformsConductor
9
+
10
+ module HpcPlugins
11
+
12
+ module PlatformHandler
13
+
14
+ # Handle a Chef repository without using a Chef Infra Server.
15
+ # Inventory is read from nodes/*.json.
16
+ # Services are defined from policy files in policyfiles/*.rb.
17
+ # Roles are not supported as they are considered made obsolete with the usage of policies by the Chef community.
18
+ # Required Chef versions are taken from a chef_versions.yml file containing the following keys:
19
+ # * *workstation* (String): The Chef Workstation version to be installed during setup (can be specified as major.minor only)
20
+ # * *client* (String): The Chef Infra Client version to be installed during nodes deployment (can be specified as major.minor only)
21
+ class ServerlessChef < HybridPlatformsConductor::PlatformHandler
22
+
23
+ # Add a Mixin to the DSL parsing the platforms configuration file.
24
+ # This can be used by any plugin to add plugin-specific configuration getters and setters, accessible later from NodesHandler instances.
25
+ # An optional initializer can also be given.
26
+ # [API] - Those calls are optional
27
+ module MyDSLExtension
28
+
29
+ # The list of library helpers we know include some recipes.
30
+ # This is used when parsing some recipe code: if such a helper is encountered then we assume a dependency on a given recipe.
31
+ # Hash< Symbol, Array<String> >: List of recipes definitions per helper name.
32
+ attr_reader :known_helpers_including_recipes
33
+
34
+ # Initialize the DSL
35
+ def init_serverless_chef
36
+ @known_helpers_including_recipes = {}
37
+ end
38
+
39
+ # Define helpers including recipes
40
+ #
41
+ # Parameters::
42
+ # * *included_recipes* (Hash< Symbol, Array<String> >): List of recipes definitions per helper name.
43
+ def helpers_including_recipes(included_recipes)
44
+ @known_helpers_including_recipes.merge!(included_recipes)
45
+ end
46
+
47
+ end
48
+ self.extend_config_dsl_with MyDSLExtension, :init_serverless_chef
49
+
50
+ # Setup the platform, install dependencies...
51
+ # [API] - This method is optional.
52
+ # [API] - @cmd_runner is accessible.
53
+ def setup
54
+ required_version = YAML.load_file("#{@repository_path}/chef_versions.yml")['workstation']
55
+ Bundler.with_unbundled_env do
56
+ exit_status, stdout, _stderr = @cmd_runner.run_cmd '/opt/chef-workstation/bin/chef --version', expected_code: [0, 127]
57
+ existing_version =
58
+ if exit_status == 127
59
+ 'not installed'
60
+ else
61
+ expected_match = stdout.match(/^Chef Workstation version: (.+)\.\d+$/)
62
+ expected_match.nil? ? 'unreadable' : expected_match[1]
63
+ end
64
+ log_debug "Current Chef version: #{existing_version}. Required version: #{required_version}"
65
+ @cmd_runner.run_cmd "curl -L https://omnitruck.chef.io/install.sh | sudo bash -s -- -P chef-workstation -v #{required_version}" unless existing_version == required_version
66
+ end
67
+ end
68
+
69
+ # Get the list of known nodes.
70
+ # [API] - This method is mandatory.
71
+ #
72
+ # Result::
73
+ # * Array<String>: List of node names
74
+ def known_nodes
75
+ Dir.glob("#{@repository_path}/nodes/*.json").map { |file| File.basename(file, '.json') }
76
+ end
77
+
78
+ # Get the metadata of a given node.
79
+ # [API] - This method is mandatory.
80
+ #
81
+ # Parameters::
82
+ # * *node* (String): Node to read metadata from
83
+ # Result::
84
+ # * Hash<Symbol,Object>: The corresponding metadata
85
+ def metadata_for(node)
86
+ (json_for(node)['normal'] || {}).transform_keys(&:to_sym)
87
+ end
88
+
89
+ # Return the services for a given node
90
+ # [API] - This method is mandatory.
91
+ #
92
+ # Parameters::
93
+ # * *node* (String): node to read configuration from
94
+ # Result::
95
+ # * Array<String>: The corresponding services
96
+ def services_for(node)
97
+ [json_for(node)['policy_name']]
98
+ end
99
+
100
+ # Get the list of services we can deploy
101
+ # [API] - This method is mandatory.
102
+ #
103
+ # Result::
104
+ # * Array<String>: The corresponding services
105
+ def deployable_services
106
+ Dir.glob("#{@repository_path}/policyfiles/*.rb").map { |file| File.basename(file, '.rb') }
107
+ end
108
+
109
+ # Package the repository, ready to be deployed on artefacts or directly to a node.
110
+ # [API] - This method is optional.
111
+ # [API] - @cmd_runner is accessible.
112
+ # [API] - @actions_executor is accessible.
113
+ #
114
+ # Parameters::
115
+ # * *services* (Hash< String, Array<String> >): Services to be deployed, per node
116
+ # * *secrets* (Hash): Secrets to be used for deployment
117
+ # * *local_environment* (Boolean): Are we deploying to a local environment?
118
+ def package(services:, secrets:, local_environment:)
119
+ # Make a stamp of the info that has been packaged, so that we don't package it again if useless
120
+ package_info = {
121
+ secrets: secrets,
122
+ commit: info[:commit].nil? ? Time.now.utc.strftime('%F %T') : info[:commit][:id],
123
+ other_files:
124
+ if info[:status].nil?
125
+ {}
126
+ else
127
+ Hash[
128
+ (info[:status][:added_files] + info[:status][:changed_files] + info[:status][:untracked_files]).
129
+ sort.
130
+ map { |f| [f, File.mtime("#{@repository_path}/#{f}").strftime('%F %T')] }
131
+ ]
132
+ end,
133
+ deleted_files: info[:status].nil? ? [] : info[:status][:deleted_files].sort
134
+ }
135
+ # Each service is packaged individually.
136
+ services.values.flatten.sort.uniq.each do |service|
137
+ package_dir = "dist/#{local_environment ? 'local' : 'prod'}/#{service}"
138
+ package_info_file = "#{@repository_path}/#{package_dir}/hpc_package.info"
139
+ current_package_info = File.exist?(package_info_file) ? JSON.parse(File.read(package_info_file)).transform_keys(&:to_sym) : {}
140
+ unless current_package_info == package_info
141
+ Bundler.with_unbundled_env do
142
+ # If the policy lock file does not exist, generate it
143
+ @cmd_runner.run_cmd "cd #{@repository_path} && /opt/chef-workstation/bin/chef install policyfiles/#{service}.rb" unless File.exist?("#{@repository_path}/policyfiles/#{service}.lock.json")
144
+ extra_cp_data_bags = File.exist?("#{@repository_path}/data_bags") ? " && cp -ar data_bags/ #{package_dir}/" : ''
145
+ @cmd_runner.run_cmd "cd #{@repository_path} && \
146
+ sudo rm -rf #{package_dir} && \
147
+ /opt/chef-workstation/bin/chef export policyfiles/#{service}.rb #{package_dir}#{extra_cp_data_bags}"
148
+ end
149
+ unless @cmd_runner.dry_run
150
+ # Create secrets file
151
+ secrets_file = "#{@repository_path}/#{package_dir}/data_bags/hpc_secrets/hpc_secrets.json"
152
+ FileUtils.mkdir_p(File.dirname(secrets_file))
153
+ File.write(secrets_file, secrets.merge(id: 'hpc_secrets').to_json)
154
+ # Remember the package info
155
+ File.write(package_info_file, package_info.to_json)
156
+ end
157
+ end
158
+ end
159
+ end
160
+
161
+ # Prepare deployments.
162
+ # This method is called just before getting and executing the actions to be deployed.
163
+ # It is called once per platform.
164
+ # [API] - This method is optional.
165
+ # [API] - @cmd_runner is accessible.
166
+ # [API] - @actions_executor is accessible.
167
+ #
168
+ # Parameters::
169
+ # * *services* (Hash< String, Array<String> >): Services to be deployed, per node
170
+ # * *secrets* (Hash): Secrets to be used for deployment
171
+ # * *local_environment* (Boolean): Are we deploying to a local environment?
172
+ # * *why_run* (Boolean): Are we deploying in why-run mode?
173
+ def prepare_for_deploy(services:, secrets:, local_environment:, why_run:)
174
+ @local_env = local_environment
175
+ end
176
+
177
+ # Get the list of actions to perform to deploy on a given node.
178
+ # Those actions can be executed in parallel with other deployments on other nodes. They must be thread safe.
179
+ # [API] - This method is mandatory.
180
+ # [API] - @cmd_runner is accessible.
181
+ # [API] - @actions_executor is accessible.
182
+ #
183
+ # Parameters::
184
+ # * *node* (String): Node to deploy on
185
+ # * *service* (String): Service to be deployed
186
+ # * *use_why_run* (Boolean): Do we use a why-run mode? [default = true]
187
+ # Result::
188
+ # * Array< Hash<Symbol,Object> >: List of actions to be done
189
+ def actions_to_deploy_on(node, service, use_why_run: true)
190
+ package_dir = "#{@repository_path}/dist/#{@local_env ? 'local' : 'prod'}/#{service}"
191
+ # Generate the nodes attributes file
192
+ unless @cmd_runner.dry_run
193
+ FileUtils.mkdir_p "#{package_dir}/nodes"
194
+ File.write("#{package_dir}/nodes/#{node}.json", (known_nodes.include?(node) ? metadata_for(node) : {}).merge(@nodes_handler.metadata_of(node)).to_json)
195
+ end
196
+ if @nodes_handler.get_use_local_chef_of(node)
197
+ # Just run the chef-client directly from the packaged repository
198
+ [{ bash: "cd #{package_dir} && sudo SSL_CERT_DIR=/etc/ssl/certs /opt/chef-workstation/bin/chef-client --local-mode --json-attributes nodes/#{node}.json#{use_why_run ? ' --why-run' : ''}" }]
199
+ else
200
+ # Upload the package and run it from the node
201
+ package_name = File.basename(package_dir)
202
+ chef_versions_file = "#{@repository_path}/chef_versions.yml"
203
+ raise "Missing file #{chef_versions_file} specifying the Chef Infra Client version to be deployed" unless File.exist?(chef_versions_file)
204
+ required_chef_client_version = YAML.load_file(chef_versions_file)['client']
205
+ sudo = (@actions_executor.connector(:ssh).ssh_user == 'root' ? '' : "#{@nodes_handler.sudo_on(node)} ")
206
+ [
207
+ {
208
+ # Install dependencies
209
+ remote_bash: [
210
+ 'set -e',
211
+ 'set -o pipefail',
212
+ "if [ -n \"$(command -v apt)\" ]; then #{sudo}apt update && #{sudo}apt install -y curl build-essential ; else #{sudo}yum groupinstall 'Development Tools' && #{sudo}yum install -y curl ; fi",
213
+ 'mkdir -p ./hpc_deploy',
214
+ "curl --location https://omnitruck.chef.io/install.sh | tac | tac | #{sudo}bash -s -- -d /opt/artefacts -v #{required_chef_client_version} -s once"
215
+ ]
216
+ },
217
+ {
218
+ scp: { package_dir => './hpc_deploy' },
219
+ remote_bash: [
220
+ 'set -e',
221
+ "cd ./hpc_deploy/#{package_name}",
222
+ "#{sudo}SSL_CERT_DIR=/etc/ssl/certs /opt/chef/bin/chef-client --local-mode --chef-license=accept --json-attributes nodes/#{node}.json#{use_why_run ? ' --why-run' : ''}",
223
+ 'cd ..',
224
+ "#{sudo}rm -rf #{package_name}"
225
+ ]
226
+ }
227
+ ]
228
+ end
229
+ end
230
+
231
+ # Parse stdout and stderr of a given deploy run and get the list of tasks with their status
232
+ # [API] - This method is mandatory.
233
+ #
234
+ # Parameters::
235
+ # * *stdout* (String): stdout to be parsed
236
+ # * *stderr* (String): stderr to be parsed
237
+ # Result::
238
+ # * Array< Hash<Symbol,Object> >: List of task properties. The following properties should be returned, among free ones:
239
+ # * *name* (String): Task name
240
+ # * *status* (Symbol): Task status. Should be one of:
241
+ # * *:changed*: The task has been changed
242
+ # * *:identical*: The task has not been changed
243
+ # * *diffs* (String): Differences, if any
244
+ def parse_deploy_output(stdout, stderr)
245
+ tasks = []
246
+ current_task = nil
247
+ stdout.split("\n").each do |line|
248
+ # Remove control chars and spaces around
249
+ case line.gsub(/\e\[[^\x40-\x7E]*[\x40-\x7E]/, '').strip
250
+ when /^\* (\w+\[[^\]]+\]) action (.+)$/
251
+ # New task
252
+ task_name = $1
253
+ task_action = $2
254
+ current_task = {
255
+ name: task_name,
256
+ action: task_action,
257
+ status: :identical
258
+ }
259
+ tasks << current_task
260
+ when /^- (.+)$/
261
+ # Diff on the current task
262
+ diff_description = $1
263
+ unless current_task.nil?
264
+ current_task[:diffs] = '' unless current_task.key?(:diffs)
265
+ current_task[:diffs] << "#{diff_description}\n"
266
+ current_task[:status] = :changed
267
+ end
268
+ end
269
+ end
270
+ tasks
271
+ end
272
+
273
+ # Get the list of impacted nodes and services from a files diff.
274
+ # [API] - This method is optional
275
+ #
276
+ # Parameters::
277
+ # * *files_diffs* (Hash< String, Hash< Symbol, Object > >): List of diffs info, per file name having a diff. Diffs info have the following properties:
278
+ # * *moved_to* (String): The new file path, in case it has been moved [optional]
279
+ # * *diff* (String): The diff content
280
+ # Result::
281
+ # * Array<String>: The list of nodes impacted by this diff
282
+ # * Array<String>: The list of services impacted by this diff
283
+ # * Boolean: Are there some files that have a global impact (meaning all nodes are potentially impacted by this diff)?
284
+ def impacts_from(files_diffs)
285
+ impacted_nodes = []
286
+ impacted_services = []
287
+ # List of impacted [cookbook, recipe]
288
+ # Array< [Symbol, Symbol] >
289
+ impacted_recipes = []
290
+ impacted_global = false
291
+ files_diffs.keys.sort.each do |impacted_file|
292
+ if impacted_file =~ /^policyfiles\/([^\/]+)\.rb$/
293
+ log_debug "[#{impacted_file}] - Impacted service: #{$1}"
294
+ impacted_services << $1
295
+ elsif impacted_file =~ /^policyfiles\/([^\/]+)\.lock.json$/
296
+ log_debug "[#{impacted_file}] - Impacted service: #{$1}"
297
+ impacted_services << $1
298
+ elsif impacted_file =~ /^nodes\/([^\/]+)\.json/
299
+ log_debug "[#{impacted_file}] - Impacted node: #{$1}"
300
+ impacted_nodes << $1
301
+ else
302
+ cookbook_path = known_cookbook_paths.find { |cookbooks_path| impacted_file =~ /^#{Regexp.escape(cookbooks_path)}\/.+$/ }
303
+ if cookbook_path.nil?
304
+ # Global file
305
+ log_debug "[#{impacted_file}] - Global file impacted"
306
+ impacted_global = true
307
+ else
308
+ # File belonging to a cookbook
309
+ cookbook_name, file_path = impacted_file.match(/^#{cookbook_path}\/(\w+)\/(.+)$/)[1..2]
310
+ cookbook = cookbook_name.to_sym
311
+ # Small helper to register a recipe
312
+ register = proc do |source, recipe_name, cookbook_name: cookbook|
313
+ cookbook_name = cookbook_name.to_sym if cookbook_name.is_a?(String)
314
+ log_debug "[#{impacted_file}] - Impacted recipe from #{source}: #{cookbook_name}::#{recipe_name}"
315
+ impacted_recipes << [cookbook_name, recipe_name.to_sym]
316
+ end
317
+ case file_path
318
+ when /recipes\/(.+)\.rb/
319
+ register.call('direct', $1)
320
+ when /attributes\/.+\.rb/, 'metadata.rb'
321
+ # Consider all recipes are impacted
322
+ Dir.glob("#{@repository_path}/#{cookbook_path}/#{cookbook}/recipes/*.rb") do |recipe_path|
323
+ register.call('attributes', File.basename(recipe_path, '.rb'))
324
+ end
325
+ when /(templates|files)\/(.+)/
326
+ # Find recipes using this file name
327
+ included_file = File.basename($2)
328
+ template_regexp = /["']#{Regexp.escape(included_file)}["']/
329
+ Dir.glob("#{@repository_path}/#{cookbook_path}/#{cookbook}/recipes/*.rb") do |recipe_path|
330
+ register.call("included file #{included_file}", File.basename(recipe_path, '.rb')) if File.read(recipe_path) =~ template_regexp
331
+ end
332
+ when /resources\/(.+)/
333
+ # Find any recipe using this resource
334
+ included_resource = "#{cookbook}_#{File.basename($1, '.rb')}"
335
+ resource_regexp = /(\W|^)#{Regexp.escape(included_resource)}(\W|$)/
336
+ known_cookbook_paths.each do |cookbooks_path|
337
+ Dir.glob("#{@repository_path}/#{cookbooks_path}/**/recipes/*.rb") do |recipe_path|
338
+ if File.read(recipe_path) =~ resource_regexp
339
+ cookbook_name, recipe_name = recipe_path.match(/#{cookbooks_path}\/(\w+)\/recipes\/(\w+)\.rb/)[1..2]
340
+ register.call("included resource #{included_resource}", recipe_name, cookbook_name: cookbook_name)
341
+ end
342
+ end
343
+ end
344
+ when /libraries\/(.+)/
345
+ # Find any recipe using methods from this library
346
+ lib_methods_regexps = File.read("#{@repository_path}/#{impacted_file}").scan(/(\W|^)def\s+(\w+)(\W|$)/).map { |_grp1, method_name, _grp2| /(\W|^)#{Regexp.escape(method_name)}(\W|$)/ }
347
+ known_cookbook_paths.each do |cookbooks_path|
348
+ Dir.glob("#{@repository_path}/#{cookbooks_path}/**/recipes/*.rb") do |recipe_path|
349
+ file_content = File.read(recipe_path)
350
+ found_lib_regexp = lib_methods_regexps.find { |regexp| file_content =~ regexp }
351
+ unless found_lib_regexp.nil?
352
+ cookbook_name, recipe_name = recipe_path.match(/#{cookbooks_path}\/(\w+)\/recipes\/(\w+)\.rb/)[1..2]
353
+ register.call("included library helper #{found_lib_regexp.source[6..-7]}", recipe_name, cookbook_name: cookbook_name)
354
+ end
355
+ end
356
+ end
357
+ when 'README.md', 'README.rdoc', 'CHANGELOG.md', '.rubocop.yml'
358
+ # Ignore them
359
+ else
360
+ log_warn "[#{impacted_file}] - Unknown impact for cookbook file belonging to #{cookbook}"
361
+ # Consider all recipes are impacted by default
362
+ Dir.glob("#{@repository_path}/#{cookbook_path}/#{cookbook}/recipes/*.rb") do |recipe_path|
363
+ register.call('attributes', File.basename(recipe_path, '.rb'))
364
+ end
365
+ end
366
+ end
367
+ end
368
+ end
369
+
370
+ # Devise the impacted services from the impacted recipes we just found.
371
+ impacted_recipes.uniq!
372
+ log_debug "* #{impacted_recipes.size} impacted recipes:\n#{impacted_recipes.map { |(cookbook, recipe)| "#{cookbook}::#{recipe}" }.sort.join("\n")}"
373
+
374
+ recipes_tree = RecipesTreeBuilder.new(@config, self).full_recipes_tree
375
+ [
376
+ impacted_nodes,
377
+ (
378
+ impacted_services +
379
+ # Gather the list of services using the impacted recipes
380
+ impacted_recipes.map do |(cookbook, recipe)|
381
+ recipe_info = recipes_tree.dig cookbook, recipe
382
+ recipe_info.nil? ? [] : recipe_info[:used_by_policies]
383
+ end.flatten
384
+ ).sort.uniq,
385
+ impacted_global
386
+ ]
387
+ end
388
+
389
+ # Return the list of possible cookbook paths from this repository only.
390
+ # Returned paths are relative to the repository path.
391
+ #
392
+ # Result::
393
+ # * Array<String>: Known cookbook paths
394
+ def known_cookbook_paths
395
+ # Keep a cache of it for performance.
396
+ unless defined?(@cookbook_paths)
397
+ config_file = "#{@repository_path}/config.rb"
398
+ @cookbook_paths = (
399
+ ['cookbooks'] +
400
+ if File.exist?(config_file)
401
+ # Read the knife configuration to get cookbook paths
402
+ dsl_parser = DslParser.new
403
+ dsl_parser.parse(config_file)
404
+ cookbook_path_call = dsl_parser.calls.find { |call_info| call_info[:method] == :cookbook_path }
405
+ cookbook_path_call.nil? ? [] : cookbook_path_call[:args].first
406
+ else
407
+ []
408
+ end
409
+ ).
410
+ map do |dir|
411
+ # Only keep dirs that actually exist and are part of our repository
412
+ full_path = dir.start_with?('/') ? dir : File.expand_path("#{@repository_path}/#{dir}")
413
+ full_path.start_with?(@repository_path) && File.exist?(full_path) ? full_path.gsub("#{@repository_path}/", '') : nil
414
+ end.
415
+ compact.
416
+ sort.
417
+ uniq
418
+ end
419
+ @cookbook_paths
420
+ end
421
+
422
+ private
423
+
424
+ # Return the JSON associated to a node
425
+ #
426
+ # Parameters::
427
+ # * *node* (String): The node to search for
428
+ # Result::
429
+ # * Hash: JSON object of this node
430
+ def json_for(node)
431
+ JSON.parse(File.read("#{@repository_path}/nodes/#{node}.json"))
432
+ end
433
+
434
+ end
435
+
436
+ end
437
+
438
+ end
439
+
440
+ end