bulk_data_test_kit 0.10.1 → 0.11.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. checksums.yaml +4 -4
  2. data/config/presets/bulk_data_v101_bulk_data_server.json +43 -0
  3. data/config/presets/bulk_data_v101_inferno_reference_server.json +43 -0
  4. data/config/presets/bulk_data_v200_bulk_data_server.json +43 -0
  5. data/config/presets/bulk_data_v200_inferno_reference_server.json +43 -0
  6. data/lib/bulk_data_test_kit/igs/put_ig_package_dot_tgz_here +0 -0
  7. data/lib/bulk_data_test_kit/metadata.rb +45 -0
  8. data/lib/bulk_data_test_kit/requirements/bulk-data-test-kit_out_of_scope_requirements.csv +1 -0
  9. data/lib/bulk_data_test_kit/requirements/bulk-data-test-kit_requirements.csv +465 -0
  10. data/lib/bulk_data_test_kit/requirements/generated/bulk-data-test-kit_requirements_coverage.csv +442 -0
  11. data/lib/bulk_data_test_kit/requirements/hl7.fhir.uv.bulkdata_2.0.0_reqs.xlsx +0 -0
  12. data/lib/bulk_data_test_kit/v1.0.1/bulk_data_smart_backend_services_v101_group.rb +21 -0
  13. data/lib/bulk_data_test_kit/v1.0.1/bulk_data_smart_discovery_v101_contents_test.rb +77 -0
  14. data/lib/bulk_data_test_kit/v1.0.1/bulk_data_smart_discovery_v101_group.rb +15 -0
  15. data/lib/bulk_data_test_kit/v1.0.1/bulk_data_test_suite.rb +3 -5
  16. data/lib/bulk_data_test_kit/{v1.0.1/bulk_data_smart_backend_services_group.rb → v2.0.0/bulk_data_smart_backend_services_v200_group.rb} +3 -3
  17. data/lib/bulk_data_test_kit/v2.0.0/bulk_data_test_suite.rb +3 -5
  18. data/lib/bulk_data_test_kit/v2.0.0_client/bulk_data_client_delete_group.rb +27 -0
  19. data/lib/bulk_data_test_kit/v2.0.0_client/bulk_data_client_delete_test.rb +29 -0
  20. data/lib/bulk_data_test_kit/v2.0.0_client/bulk_data_client_delete_wait_test.rb +44 -0
  21. data/lib/bulk_data_test_kit/v2.0.0_client/bulk_data_client_export_group.rb +31 -0
  22. data/lib/bulk_data_test_kit/v2.0.0_client/bulk_data_client_export_wait_test.rb +45 -0
  23. data/lib/bulk_data_test_kit/v2.0.0_client/bulk_data_client_kick_off_test.rb +45 -0
  24. data/lib/bulk_data_test_kit/v2.0.0_client/bulk_data_client_output_test.rb +28 -0
  25. data/lib/bulk_data_test_kit/v2.0.0_client/bulk_data_client_status_test.rb +29 -0
  26. data/lib/bulk_data_test_kit/v2.0.0_client/bulk_data_client_test_suite.rb +92 -0
  27. data/lib/bulk_data_test_kit/v2.0.0_client/docs/suite_description.md +33 -0
  28. data/lib/bulk_data_test_kit/v2.0.0_client/endpoints/delete.rb +22 -0
  29. data/lib/bulk_data_test_kit/v2.0.0_client/endpoints/kick_off.rb +36 -0
  30. data/lib/bulk_data_test_kit/v2.0.0_client/endpoints/output.rb +31 -0
  31. data/lib/bulk_data_test_kit/v2.0.0_client/endpoints/status.rb +39 -0
  32. data/lib/bulk_data_test_kit/v2.0.0_client/export_types.rb +9 -0
  33. data/lib/bulk_data_test_kit/v2.0.0_client/postman/delete.postman_collection.json +133 -0
  34. data/lib/bulk_data_test_kit/v2.0.0_client/postman/system_export.postman_collection.json +181 -0
  35. data/lib/bulk_data_test_kit/v2.0.0_client/tags.rb +13 -0
  36. data/lib/bulk_data_test_kit/v2.0.0_client/urls.rb +45 -0
  37. data/lib/bulk_data_test_kit/version.rb +2 -1
  38. data/lib/bulk_data_test_kit.rb +3 -1
  39. data/lib/inferno_requirements_tools/ext/inferno_core/runnable.rb +22 -0
  40. data/lib/inferno_requirements_tools/rake/rakefile_template +37 -0
  41. data/lib/inferno_requirements_tools/tasks/collect_requirements.rb +233 -0
  42. data/lib/inferno_requirements_tools/tasks/requirements_coverage.rb +283 -0
  43. data/lib/requirements_config.yaml +14 -0
  44. data/lib/template_requirements_config.yaml +11 -0
  45. metadata +54 -15
@@ -0,0 +1,233 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'csv'
4
+ require 'roo'
5
+ require 'yaml'
6
+ require 'pry'
7
+
8
+ module InfernoRequirementsTools
9
+ module Tasks
10
+ # This class manages the collection of requirements details from
11
+ # requirements planning excel workbooks into a CSV representation.
12
+ # Currently splits out Requirements and Planned Not Tested Requirements
13
+ # into two separate files.
14
+ #
15
+ # The `run` method will generate the files
16
+ # The `run_check` method will check whether the previously generated files are up-to-date.
17
+ class CollectRequirements
18
+ VERSION = '0.1.0' # update when making meaningful changes to this method for tracking used versions
19
+ CONFIG = YAML.load_file(File.join('lib', 'requirements_config.yaml'))
20
+
21
+ TEST_KIT_ID = CONFIG['test_kit_id']
22
+ INPUT_SETS = CONFIG['requirement_sets']
23
+
24
+ # Derivative constants
25
+ TEST_KIT_CODE_FOLDER = TEST_KIT_ID.gsub('-', '_')
26
+ INPUT_HEADERS =
27
+ [
28
+ 'ID*',
29
+ 'URL*',
30
+ 'Requirement*',
31
+ 'Conformance*',
32
+ 'Actor*',
33
+ 'Sub-Requirement(s)',
34
+ 'Conditionality',
35
+ 'Verifiable?',
36
+ 'Verifiability Details',
37
+ 'Planning To Test?',
38
+ 'Planning To Test Details'
39
+ ].freeze
40
+ REQUIREMENTS_OUTPUT_HEADERS =
41
+ [
42
+ 'Req Set',
43
+ 'ID',
44
+ 'URL',
45
+ 'Requirement',
46
+ 'Conformance',
47
+ 'Actor',
48
+ 'Sub-Requirement(s)',
49
+ 'Conditionality'
50
+ ].freeze
51
+ REQUIREMENTS_OUTPUT_FILE_NAME = "#{TEST_KIT_ID}_requirements.csv".freeze
52
+ REQUIREMENTS_OUTPUT_FILE =
53
+ File.join('lib', TEST_KIT_CODE_FOLDER, 'requirements', REQUIREMENTS_OUTPUT_FILE_NAME).freeze
54
+ PLANNED_NOT_TESTED_OUTPUT_HEADERS = ['Req Set', 'ID', 'Reason', 'Details'].freeze
55
+ PLANNED_NOT_TESTED_OUTPUT_FILE_NAME = "#{TEST_KIT_ID}_out_of_scope_requirements.csv".freeze
56
+ PLANNED_NOT_TESTED_OUTPUT_FILE =
57
+ File.join('lib', TEST_KIT_CODE_FOLDER, 'requirements', PLANNED_NOT_TESTED_OUTPUT_FILE_NAME).freeze
58
+
59
+ def available_input_worksheets
60
+ @available_input_worksheets ||= Dir.glob(File.join(@input_directory, '*.xlsx')).reject { |f| f.include?('~$') }
61
+ end
62
+
63
+ # Of the form:
64
+ # {
65
+ # req_set_id_1: [row1, row2, row 3, ...],
66
+ # req_set_id_2: [row1, row2, row 3, ...]
67
+ # }
68
+ def input_requirement_sets
69
+ @input_requirement_sets ||= INPUT_SETS.each_with_object({}) do |req_set_config, req_sets_hash|
70
+ req_set_id = req_set_config['id']
71
+ req_set_file = available_input_worksheets.find { |worksheet_file| worksheet_file.include?(req_set_id) }
72
+
73
+ req_sets_hash[req_set_id] = parse_requirement_set(req_set_file, req_set_config) unless req_set_file.nil?
74
+ end
75
+ end
76
+
77
+ def parse_requirement_set(req_set_file, req_set_config)
78
+ CSV.parse(Roo::Spreadsheet.open(req_set_file).sheet('Requirements').to_csv,
79
+ headers: true).map do |row|
80
+ row_hash = row.to_h.slice(*INPUT_HEADERS)
81
+ req_set_config['actor_map'].each do |actor_mapping|
82
+ row_hash['Actor*']&.gsub!(actor_mapping['spec'], actor_mapping['test_kit'])
83
+ end
84
+ row_hash
85
+ end
86
+ end
87
+
88
+ def new_requirements_csv
89
+ @new_requirements_csv ||=
90
+ CSV.generate(+"\xEF\xBB\xBF") do |csv| # start with an unnecessary BOM to make viewing in excel easier
91
+ csv << REQUIREMENTS_OUTPUT_HEADERS
92
+
93
+ input_requirement_sets.each do |req_set_id, input_rows|
94
+ input_rows.each do |input_row| # NOTE: use row order from source file
95
+ csv << REQUIREMENTS_OUTPUT_HEADERS.map do |header|
96
+ header == 'Req Set' ? req_set_id : input_row[header] || input_row["#{header}*"]
97
+ end
98
+ end
99
+ end
100
+ end
101
+ end
102
+
103
+ def old_requirements_csv
104
+ @old_requirements_csv ||= File.read(REQUIREMENTS_OUTPUT_FILE)
105
+ end
106
+
107
+ def new_planned_not_tested_csv
108
+ @new_planned_not_tested_csv ||=
109
+ CSV.generate(+"\xEF\xBB\xBF") do |csv| # start with an unnecessary BOM to make viewing in excel easier
110
+ csv << PLANNED_NOT_TESTED_OUTPUT_HEADERS
111
+
112
+ input_requirement_sets.each do |req_set_id, input_rows|
113
+ input_rows.each do |row|
114
+ if spreadsheet_value_falsy?(row['Verifiable?'])
115
+ csv << [req_set_id, row['ID*'], 'Not Verifiable', row['Verifiability Details']]
116
+ elsif spreadsheet_value_falsy?(row['Planning To Test?'])
117
+ csv << [req_set_id, row['ID*'], 'Not Tested', row['Planning To Test Details']]
118
+ end
119
+ end
120
+ end
121
+ end
122
+ end
123
+
124
+ def old_planned_not_tested_csv
125
+ @old_planned_not_tested_csv ||= File.read(PLANNED_NOT_TESTED_OUTPUT_FILE)
126
+ end
127
+
128
+ def run(input_directory)
129
+ @input_directory = input_directory
130
+ check_presence_of_input_files
131
+
132
+ update_requirements =
133
+ if File.exist?(REQUIREMENTS_OUTPUT_FILE)
134
+ if old_requirements_csv == new_requirements_csv
135
+ puts "'#{REQUIREMENTS_OUTPUT_FILE_NAME}' file is up to date."
136
+ false
137
+ else
138
+ puts 'Requirements set has changed.'
139
+ true
140
+ end
141
+ else
142
+ puts "No existing #{REQUIREMENTS_OUTPUT_FILE_NAME}."
143
+ true
144
+ end
145
+
146
+ if update_requirements
147
+ puts "Writing to file #{REQUIREMENTS_OUTPUT_FILE}..."
148
+ File.write(REQUIREMENTS_OUTPUT_FILE, new_requirements_csv, encoding: Encoding::UTF_8)
149
+ end
150
+
151
+ udpate_planned_not_tested =
152
+ if File.exist?(PLANNED_NOT_TESTED_OUTPUT_FILE)
153
+ if old_planned_not_tested_csv == new_planned_not_tested_csv
154
+ puts "'#{PLANNED_NOT_TESTED_OUTPUT_FILE_NAME}' file is up to date."
155
+ false
156
+ else
157
+ puts 'Planned Not Tested Requirements set has changed.'
158
+ true
159
+ end
160
+ else
161
+ puts "No existing #{PLANNED_NOT_TESTED_OUTPUT_FILE_NAME}."
162
+ true
163
+ end
164
+
165
+ if udpate_planned_not_tested
166
+ puts "Writing to file #{PLANNED_NOT_TESTED_OUTPUT_FILE}..."
167
+ File.write(PLANNED_NOT_TESTED_OUTPUT_FILE, new_planned_not_tested_csv, encoding: Encoding::UTF_8)
168
+ end
169
+
170
+ puts 'Done.'
171
+ end
172
+
173
+ def run_check(input_directory)
174
+ @input_directory = input_directory
175
+ check_presence_of_input_files
176
+
177
+ requirements_ok =
178
+ if File.exist?(REQUIREMENTS_OUTPUT_FILE)
179
+ if old_requirements_csv == new_requirements_csv
180
+ puts "'#{REQUIREMENTS_OUTPUT_FILE_NAME}' file is up to date."
181
+ true
182
+ else
183
+ puts "#{REQUIREMENTS_OUTPUT_FILE_NAME} file is out of date."
184
+ false
185
+ end
186
+ else
187
+ puts "No existing #{REQUIREMENTS_OUTPUT_FILE_NAME} file."
188
+ false
189
+ end
190
+
191
+ planned_not_tested_requirements_ok =
192
+ if File.exist?(PLANNED_NOT_TESTED_OUTPUT_FILE)
193
+ if old_planned_not_tested_csv == new_planned_not_tested_csv
194
+ puts "'#{PLANNED_NOT_TESTED_OUTPUT_FILE_NAME}' file is up to date."
195
+ true
196
+ else
197
+ puts "#{PLANNED_NOT_TESTED_OUTPUT_FILE_NAME} file is out of date."
198
+ false
199
+ end
200
+ else
201
+ puts "No existing #{PLANNED_NOT_TESTED_OUTPUT_FILE_NAME} file."
202
+ false
203
+ end
204
+
205
+ return if planned_not_tested_requirements_ok && requirements_ok
206
+
207
+ puts <<~MESSAGE
208
+ Check Failed. To resolve, run:
209
+
210
+ bundle exec rake "requirements:collect[<input_directory>]"
211
+
212
+ MESSAGE
213
+ exit(1)
214
+ end
215
+
216
+ def check_presence_of_input_files
217
+ input_requirement_sets.each do |req_set_id, rows|
218
+ next unless rows.nil?
219
+
220
+ puts %(
221
+ Could not find input file for set #{req_set_id} in directory #{@input_directory}. Aborting requirements
222
+ collection..."
223
+ )
224
+ exit(1)
225
+ end
226
+ end
227
+
228
+ def spreadsheet_value_falsy?(str)
229
+ str&.downcase == 'no' || str&.downcase == 'false'
230
+ end
231
+ end
232
+ end
233
+ end
@@ -0,0 +1,283 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'csv'
4
+ require 'yaml'
5
+ require_relative '../ext/inferno_core/runnable'
6
+
7
+ module InfernoRequirementsTools
8
+ module Tasks
9
+ # This class manages the mapping of test kit tests to requirements that they verify
10
+ # and creates a CSV file with the tests that cover each requirement.
11
+ # It expects a CSV file in the repo at `lib/[test kit id]/requirements/[test kit id]_requirements.csv`
12
+ # that serves as the source of the requirement set for the test kit. The requirements in
13
+ # this files are identified by a requirement set and an id and tests, groups, and suites
14
+ # within in the test kit can claim that they verify a requirement by including a reference
15
+ # to that requirementin the form <requirement set>@<id> in their `verifies_requirements` field.
16
+ # Requirements that are out of scope can be listed in a companion file
17
+ # `lib/[test kit id]/requirements/[test kit id]_out_of_scope_requirements.csv`.
18
+ #
19
+ # The `run` method generates a CSV file at
20
+ # `lib/[test kit id]/requirements/generated/[test kit id]_requirements_coverage.csv``.
21
+ # This file will be identical to the input spreadsheet, plus an additional column which holds a comma separated
22
+ # list of inferno test IDs that test each requirement. These test IDs are Inferno short form IDs that represent the
23
+ # position of the test within its group and suite. For example, the fifth test in the second group will have an ID
24
+ # of 2.05. This ID is also shown in the Inferno web UI.
25
+ #
26
+ # The `run_check` method will check whether the previously generated file is up-to-date.
27
+ class RequirementsCoverage
28
+ VERSION = '0.1.0' # update when making meaningful changes to this method for tracking used versions
29
+ CONFIG = YAML.load_file(File.join('lib', 'requirements_config.yaml'))
30
+
31
+ TEST_KIT_ID = CONFIG['test_kit_id']
32
+ TEST_SUITES = CONFIG['suites'].map do |test_suite|
33
+ Object.const_get(test_suite['class_name'])
34
+ end
35
+
36
+ SUITE_ID_TO_ACTOR_MAP = CONFIG['suites'].each_with_object({}) do |test_suite, hash|
37
+ hash[test_suite['id']] = test_suite['suite_actor']
38
+ end
39
+
40
+ # Derivative constants
41
+ TEST_KIT_CODE_FOLDER = TEST_KIT_ID.gsub('-', '_')
42
+ INPUT_HEADERS = [
43
+ 'Req Set',
44
+ 'ID',
45
+ 'URL',
46
+ 'Requirement',
47
+ 'Conformance',
48
+ 'Actor',
49
+ 'Sub-Requirement(s)',
50
+ 'Conditionality'
51
+ ].freeze
52
+ SHORT_ID_HEADER = 'Short ID(s)'
53
+ FULL_ID_HEADER = 'Full ID(s)'
54
+ INPUT_FILE_NAME = "#{TEST_KIT_ID}_requirements.csv".freeze
55
+ INPUT_FILE = File.join('lib', TEST_KIT_CODE_FOLDER, 'requirements', INPUT_FILE_NAME).freeze
56
+ NOT_TESTED_FILE_NAME = "#{TEST_KIT_ID}_out_of_scope_requirements.csv".freeze
57
+ NOT_TESTED_FILE = File.join('lib', TEST_KIT_CODE_FOLDER, 'requirements', NOT_TESTED_FILE_NAME).freeze
58
+ OUTPUT_HEADERS = INPUT_HEADERS + TEST_SUITES.flat_map do |suite|
59
+ ["#{suite.title} #{SHORT_ID_HEADER}", "#{suite.title} #{FULL_ID_HEADER}"]
60
+ end
61
+ OUTPUT_FILE_NAME = "#{TEST_KIT_ID}_requirements_coverage.csv".freeze
62
+ OUTPUT_FILE_DIRECTORY = File.join('lib', TEST_KIT_CODE_FOLDER, 'requirements', 'generated')
63
+ OUTPUT_FILE = File.join(OUTPUT_FILE_DIRECTORY, OUTPUT_FILE_NAME).freeze
64
+
65
+ def input_rows
66
+ @input_rows ||=
67
+ CSV.parse(File.open(INPUT_FILE, 'r:bom|utf-8'), headers: true).map do |row|
68
+ row.to_h.slice(*INPUT_HEADERS)
69
+ end
70
+ end
71
+
72
+ def not_tested_requirements_map
73
+ @not_tested_requirements_map ||= load_not_tested_requirements
74
+ end
75
+
76
+ def load_not_tested_requirements
77
+ return {} unless File.exist?(NOT_TESTED_FILE)
78
+
79
+ not_tested_requirements = {}
80
+ CSV.parse(File.open(NOT_TESTED_FILE, 'r:bom|utf-8'), headers: true).each do |row|
81
+ row_hash = row.to_h
82
+ not_tested_requirements["#{row_hash['Req Set']}@#{row_hash['ID']}"] = row_hash
83
+ end
84
+
85
+ not_tested_requirements
86
+ end
87
+
88
+ # Of the form:
89
+ # {
90
+ # 'req-id-1': [
91
+ # { short_id: 'short-id-1', full_id: 'long-id-1', suite_id: 'suite-id-1' },
92
+ # { short_id: 'short-id-2', full_id: 'long-id-2', suite_id: 'suite-id-2' }
93
+ # ],
94
+ # 'req-id-2': [{ short_id: 'short-id-3', full_id: 'long-id-3', suite_id: 'suite-id-3' }],
95
+ # ...
96
+ # }
97
+ def inferno_requirements_map
98
+ @inferno_requirements_map ||= TEST_SUITES.each_with_object({}) do |suite, requirements_map|
99
+ serialize_requirements(suite, 'suite', suite.id, requirements_map)
100
+ suite.groups.each do |group|
101
+ map_group_requirements(group, suite.id, requirements_map)
102
+ end
103
+ end
104
+ end
105
+
106
+ def new_csv
107
+ @new_csv ||=
108
+ CSV.generate(+"\xEF\xBB\xBF") do |csv|
109
+ csv << OUTPUT_HEADERS
110
+ input_rows.each do |row| # NOTE: use row order from source file
111
+ next if row['Conformance'] == 'DEPRECATED' # filter out deprecated rows
112
+
113
+ TEST_SUITES.each do |suite|
114
+ suite_actor = SUITE_ID_TO_ACTOR_MAP[suite.id]
115
+ if row['Actor']&.include?(suite_actor)
116
+ add_suite_tests_for_row(row, suite)
117
+ else
118
+ row["#{suite.title} #{SHORT_ID_HEADER}"] = 'NA'
119
+ row["#{suite.title} #{FULL_ID_HEADER}"] = 'NA'
120
+ end
121
+ end
122
+ csv << row.values
123
+ end
124
+ end
125
+ end
126
+
127
+ def add_suite_tests_for_row(row, suite)
128
+ set_and_req_id = "#{row['Req Set']}@#{row['ID']}"
129
+ items = get_items_for_requirement(set_and_req_id, suite)
130
+ short_ids = items[0]
131
+ full_ids = items[1]
132
+ if short_ids.blank? && not_tested_requirements_map.key?(set_and_req_id)
133
+ row["#{suite.title} #{SHORT_ID_HEADER}"] = 'Not Tested'
134
+ row["#{suite.title} #{FULL_ID_HEADER}"] = 'Not Tested'
135
+ else
136
+ row["#{suite.title} #{SHORT_ID_HEADER}"] = short_ids&.join(', ')
137
+ row["#{suite.title} #{FULL_ID_HEADER}"] = full_ids&.join(', ')
138
+ end
139
+ end
140
+
141
+ def get_items_for_requirement(set_and_req_id, suite)
142
+ suite_requirement_items = inferno_requirements_map[set_and_req_id]&.filter do |item|
143
+ item[:suite_id] == suite.id
144
+ end
145
+ [
146
+ suite_requirement_items&.map { |item| item[:short_id] },
147
+ suite_requirement_items&.map { |item| item[:full_id] }
148
+ ]
149
+ end
150
+
151
+ def input_requirement_ids
152
+ @input_requirement_ids ||= input_rows.map { |row| "#{row['Req Set']}@#{row['ID']}" }
153
+ end
154
+
155
+ # The requirements present in Inferno that aren't in the input spreadsheet
156
+ def unmatched_requirements_map
157
+ @unmatched_requirements_map ||= inferno_requirements_map.except(*input_requirement_ids)
158
+ end
159
+
160
+ def old_csv
161
+ @old_csv ||= File.read(OUTPUT_FILE)
162
+ end
163
+
164
+ def run
165
+ unless File.exist?(INPUT_FILE)
166
+ puts "Could not find input file: #{INPUT_FILE}. Aborting requirements coverage generation..."
167
+ exit(1)
168
+ end
169
+
170
+ if unmatched_requirements_map.any?
171
+ puts "WARNING: The following requirements indicated in the test kit are not present in #{INPUT_FILE_NAME}"
172
+ output_requirements_map_table(unmatched_requirements_map)
173
+ end
174
+
175
+ if File.exist?(OUTPUT_FILE)
176
+ if old_csv == new_csv
177
+ puts "'#{OUTPUT_FILE_NAME}' file is up to date."
178
+ return
179
+ else
180
+ puts 'Requirements coverage has changed.'
181
+ end
182
+ else
183
+ puts "No existing #{OUTPUT_FILE_NAME}."
184
+ end
185
+
186
+ puts "Writing to file #{OUTPUT_FILE}..."
187
+ FileUtils.mkdir_p(OUTPUT_FILE_DIRECTORY)
188
+ File.write(OUTPUT_FILE, new_csv)
189
+ puts 'Done.'
190
+ end
191
+
192
+ def run_check
193
+ unless File.exist?(INPUT_FILE)
194
+ puts "Could not find input file: #{INPUT_FILE}. Aborting requirements coverage check..."
195
+ exit(1)
196
+ end
197
+
198
+ if unmatched_requirements_map.any?
199
+ puts "The following requirements indicated in the test kit are not present in #{INPUT_FILE_NAME}"
200
+ output_requirements_map_table(unmatched_requirements_map)
201
+ end
202
+
203
+ if File.exist?(OUTPUT_FILE)
204
+ if old_csv == new_csv
205
+ puts "'#{OUTPUT_FILE_NAME}' file is up to date."
206
+ return unless unmatched_requirements_map.any?
207
+ else
208
+ puts <<~MESSAGE
209
+ #{OUTPUT_FILE_NAME} file is out of date.
210
+ To regenerate the file, run:
211
+
212
+ bundle exec rake requirements:generate_coverage
213
+
214
+ MESSAGE
215
+ end
216
+ else
217
+ puts <<~MESSAGE
218
+ No existing #{OUTPUT_FILE_NAME} file.
219
+ To generate the file, run:
220
+
221
+ bundle exec rake requirements:generate_coverage
222
+
223
+ MESSAGE
224
+ end
225
+
226
+ puts 'Check failed.'
227
+ exit(1)
228
+ end
229
+
230
+ def map_group_requirements(group, suite_id, requirements_map)
231
+ serialize_requirements(group, group.short_id, suite_id, requirements_map)
232
+ group.tests&.each { |test| serialize_requirements(test, test.short_id, suite_id, requirements_map) }
233
+ group.groups&.each { |subgroup| map_group_requirements(subgroup, suite_id, requirements_map) }
234
+ end
235
+
236
+ def serialize_requirements(runnable, short_id, suite_id, requirements_map)
237
+ runnable.verifies_requirements&.each do |requirement_id|
238
+ requirement_id_string = requirement_id.to_s
239
+
240
+ requirements_map[requirement_id_string] ||= []
241
+ requirements_map[requirement_id_string] << { short_id:, full_id: runnable.id, suite_id: }
242
+ end
243
+ end
244
+
245
+ # Output the requirements in the map like so:
246
+ #
247
+ # requirement_id | short_id | full_id
248
+ # ---------------+------------+----------
249
+ # req-id-1 | short-id-1 | full-id-1
250
+ # req-id-2 | short-id-2 | full-id-2
251
+ #
252
+ def output_requirements_map_table(requirements_map)
253
+ headers = %w[requirement_id short_id full_id]
254
+ col_widths = headers.map(&:length)
255
+ col_widths[0] = [col_widths[0], requirements_map.keys.map(&:length).max].max
256
+ col_widths[1] = ([col_widths[1]] + requirements_map.values.flatten.map { |item| item[:short_id].length }).max
257
+ col_widths[2] = ([col_widths[2]] + requirements_map.values.flatten.map { |item| item[:full_id].length }).max
258
+ col_widths.map { |width| width + 3 }
259
+
260
+ puts [
261
+ headers[0].ljust(col_widths[0]),
262
+ headers[1].ljust(col_widths[1]),
263
+ headers[2].ljust(col_widths[2])
264
+ ].join(' | ')
265
+ puts col_widths.map { |width| '-' * width }.join('-+-')
266
+ output_requirements_map_table_contents(requirements_map, col_widths)
267
+ puts
268
+ end
269
+
270
+ def output_requirements_map_table_contents(requirements_map, col_widths)
271
+ requirements_map.each do |requirement_id, runnables|
272
+ runnables.each do |runnable|
273
+ puts [
274
+ requirement_id.ljust(col_widths[0]),
275
+ runnable[:short_id].ljust(col_widths[1]),
276
+ runnable[:full_id].ljust(col_widths[2])
277
+ ].join(' | ')
278
+ end
279
+ end
280
+ end
281
+ end
282
+ end
283
+ end
@@ -0,0 +1,14 @@
1
+ test_kit_id: bulk-data-test-kit
2
+
3
+ suites:
4
+ # - id: bulk_data_v101
5
+ # class_name: BulkDataTestKit::BulkDataV101::BulkDataTestSuite
6
+ # suite_actor: Server
7
+ - id: bulk_data_v200
8
+ class_name: BulkDataTestKit::BulkDataV200::BulkDataTestSuite
9
+ suite_actor: Server
10
+
11
+ requirement_sets:
12
+ - id: hl7.fhir.uv.bulkdata_2.0.0
13
+ actor_map:
14
+ - { spec: Server, test_kit: Server }
@@ -0,0 +1,11 @@
1
+ test_kit_id: # the name of the gem. E.g., onc_certification_g31_test_kit
2
+
3
+ suites:
4
+ - id: # "Id" in the metadata tab of the requirements excel sheet. E.g., 170.315(g)(31)_hti-2-proposal
5
+ class_name: # The class name of the suite. E.g., OncCertificationG31TestKit::Suite
6
+ suite_actor: # The actor tested by the suite. Suite requirements will have this actor in the 'actors' column of this repository's requirements csv file.
7
+
8
+ requirement_sets:
9
+ - id: # "Id" in the metadata tab of the requirements excel sheet. E.g., 170.315(g)(31)_hti-2-proposal
10
+ actor_map: # when collecting requirements from the source spreadsheets, this map will be used to translate the Actor column
11
+ - {spec: Provider, test_kit: Provider} # map of the actor in the source spec to the actor to use in this repository's requirements csv file. the "spec" string will be replaced by the "test_kit" string