inferno_core 0.6.8 → 0.6.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. checksums.yaml +4 -4
  2. data/lib/inferno/apps/cli/evaluate/database.yml +15 -0
  3. data/lib/inferno/apps/cli/evaluate/docker-compose.evaluate.yml +16 -0
  4. data/lib/inferno/apps/cli/evaluate.rb +52 -4
  5. data/lib/inferno/apps/cli/main.rb +5 -1
  6. data/lib/inferno/apps/cli/requirements.rb +28 -0
  7. data/lib/inferno/apps/cli/requirements_exporter.rb +194 -0
  8. data/lib/inferno/apps/cli/suite.rb +21 -0
  9. data/lib/inferno/apps/cli/templates/lib/%library_name%/example_suite/patient_group.rb.tt +141 -0
  10. data/lib/inferno/apps/cli/templates/lib/%library_name%/example_suite.rb.tt +128 -0
  11. data/lib/inferno/apps/cli/templates/lib/%library_name%/metadata.rb.tt +65 -3
  12. data/lib/inferno/apps/cli/templates/lib/%library_name%/suite.rb.tt +2 -2
  13. data/lib/inferno/apps/cli/templates/lib/%library_name%/version.rb.tt +1 -0
  14. data/lib/inferno/apps/cli/templates/lib/%library_name%.rb.tt +1 -1
  15. data/lib/inferno/apps/web/controllers/requirements/show.rb +18 -0
  16. data/lib/inferno/apps/web/controllers/test_suites/requirements/index.rb +29 -0
  17. data/lib/inferno/apps/web/router.rb +7 -0
  18. data/lib/inferno/apps/web/serializers/input.rb +1 -0
  19. data/lib/inferno/apps/web/serializers/requirement.rb +18 -0
  20. data/lib/inferno/apps/web/serializers/requirement_set.rb +13 -0
  21. data/lib/inferno/apps/web/serializers/test.rb +1 -0
  22. data/lib/inferno/apps/web/serializers/test_group.rb +1 -0
  23. data/lib/inferno/apps/web/serializers/test_suite.rb +11 -0
  24. data/lib/inferno/config/boot/requirements.rb +40 -0
  25. data/lib/inferno/config/boot/suites.rb +3 -0
  26. data/lib/inferno/dsl/fhir_evaluation/default.yml +68 -0
  27. data/lib/inferno/dsl/fhir_evaluation/evaluator.rb +3 -5
  28. data/lib/inferno/dsl/fhir_evaluation/rules/all_defined_extensions_have_examples.rb +2 -2
  29. data/lib/inferno/dsl/fhir_evaluation/rules/all_extensions_used.rb +76 -0
  30. data/lib/inferno/dsl/fhir_evaluation/rules/all_must_supports_present.rb +1 -1
  31. data/lib/inferno/dsl/fhir_evaluation/rules/all_profiles_have_examples.rb +1 -1
  32. data/lib/inferno/dsl/fhir_evaluation/rules/all_references_resolve.rb +2 -2
  33. data/lib/inferno/dsl/fhir_evaluation/rules/all_resources_reachable.rb +2 -2
  34. data/lib/inferno/dsl/fhir_evaluation/rules/all_search_parameters_have_examples.rb +22 -11
  35. data/lib/inferno/dsl/fhir_evaluation/rules/differential_content_has_examples.rb +124 -0
  36. data/lib/inferno/dsl/fhir_evaluation/rules/value_sets_demonstrate.rb +233 -0
  37. data/lib/inferno/dsl/fhir_resource_navigation.rb +11 -2
  38. data/lib/inferno/dsl/fhir_resource_validation.rb +25 -3
  39. data/lib/inferno/dsl/fhirpath_evaluation.rb +25 -1
  40. data/lib/inferno/dsl/input_output_handling.rb +1 -0
  41. data/lib/inferno/dsl/must_support_assessment.rb +15 -3
  42. data/lib/inferno/dsl/requirement_set.rb +82 -0
  43. data/lib/inferno/dsl/runnable.rb +27 -0
  44. data/lib/inferno/dsl/short_id_manager.rb +55 -0
  45. data/lib/inferno/dsl/suite_requirements.rb +46 -0
  46. data/lib/inferno/entities/ig.rb +4 -0
  47. data/lib/inferno/entities/input.rb +14 -5
  48. data/lib/inferno/entities/requirement.rb +75 -0
  49. data/lib/inferno/entities/test.rb +3 -1
  50. data/lib/inferno/entities/test_group.rb +3 -1
  51. data/lib/inferno/entities/test_suite.rb +4 -0
  52. data/lib/inferno/exceptions.rb +6 -0
  53. data/lib/inferno/public/237.bundle.js +1 -1
  54. data/lib/inferno/public/bundle.js +54 -54
  55. data/lib/inferno/public/bundle.js.LICENSE.txt +3 -36
  56. data/lib/inferno/repositories/igs.rb +1 -2
  57. data/lib/inferno/repositories/requirements.rb +120 -0
  58. data/lib/inferno/version.rb +1 -1
  59. data/spec/shared/test_kit_examples.rb +32 -0
  60. metadata +36 -3
  61. data/lib/inferno/apps/cli/templates/lib/%library_name%/patient_group.rb.tt +0 -44
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 6daa4c1864a7e7004ca9a166f3e516fcddc157c3f6b6e24722965e44330cc450
4
- data.tar.gz: b158c90f63eaa8a157a27b92af68b7a3be7798e18aca2243ecbd1071c9d9bfdf
3
+ metadata.gz: 2ff5c612e8052b2c2a817e5ad58d27d935b8838adc2d738a8924e61ba7ecc1a0
4
+ data.tar.gz: 4324693af54d9c83d4eb60f2a2b85dc8012c72d372d9858bda39693d2b1de8b4
5
5
  SHA512:
6
- metadata.gz: 6298ba6c1f61a6ee3f5e50045ac58e1ebaa63a2ebc863755d1c610d3eed4b0e7ca76ea74ee12c506298624248c5f3b874582eaccc1c41600f731b33a601d72ce
7
- data.tar.gz: 61f9b65616cf01dd42c838ab0b9be5c38490dea22bb2a3481a82c30a25c687e2aada97321498920f6219a62afe88445ad95e71b2300859fce5ed19af7cef1f78
6
+ metadata.gz: 2cbff301066bd9b295edc08790bbf8b6d658d2cd5c647a636a09c5918ff0024fc06eb566742d271bf6ea1a145745072f16ab157a674b6be3e8076a298d8110fd
7
+ data.tar.gz: 5a9281d55a608b9356d535c5e7fc0b9bc8afb6d23a5cc6f76192e074582d385358b81ff07dd8959286440634b08411d8866b306313c4826d81ca85eb02a4d225
@@ -0,0 +1,15 @@
1
+ # Inferno is using `Psych::safe_load` so YAML anchors are disabled
2
+ development:
3
+ adapter: sqlite
4
+ database: ':memory:'
5
+ max_connections: 10
6
+
7
+ production:
8
+ adapter: sqlite
9
+ database: ':memory:'
10
+ max_connections: 10
11
+
12
+ test:
13
+ adapter: sqlite
14
+ database: ':memory:'
15
+ max_connections: 10
@@ -0,0 +1,16 @@
1
+ name: inferno_evaluator_services
2
+
3
+ services:
4
+ hl7_validator_service:
5
+ image: infernocommunity/inferno-resource-validator
6
+ volumes:
7
+ - ${TMPDIR}/data/igs:/app/igs
8
+ # To let the service share your local FHIR package cache,
9
+ # uncomment the below line
10
+ # - ~/.fhir:/home/ktor/.fhir
11
+ ports:
12
+ - "3501:3500"
13
+ fhirpath:
14
+ image: infernocommunity/fhirpath-service
15
+ ports:
16
+ - "6790:6789"
@@ -2,13 +2,62 @@ require_relative '../../dsl/fhir_evaluation/evaluator'
2
2
  require_relative '../../dsl/fhir_evaluation/config'
3
3
  require_relative '../../entities'
4
4
  require_relative '../../utils/ig_downloader'
5
+ require_relative 'migration'
5
6
 
7
+ require 'fileutils'
6
8
  require 'tempfile'
7
9
 
8
10
  module Inferno
9
11
  module CLI
10
- class Evaluate < Thor::Group
11
- def evaluate(ig_path, data_path, _log_level)
12
+ class Evaluate
13
+ # @see Inferno::CLI::Main#evaluate
14
+ def run(ig_path, data_path, options)
15
+ tmpdir = Dir.mktmpdir
16
+ Dir.mkdir("#{tmpdir}/data")
17
+ Dir.mkdir("#{tmpdir}/data/igs")
18
+ Dir.mkdir("#{tmpdir}/config")
19
+ FileUtils.cp(File.expand_path('evaluate/database.yml', __dir__), "#{tmpdir}/config/database.yml")
20
+
21
+ ENV['TMPDIR'] = tmpdir
22
+ ENV['FHIRPATH_URL'] = 'http://localhost:6790'
23
+ ENV['FHIR_RESOURCE_VALIDATOR_URL'] = 'http://localhost:3501'
24
+
25
+ puts 'Starting Inferno Evaluator Services...'
26
+ system("#{services_base_command} up -d #{services_names}")
27
+
28
+ ig_path = absolute_path_with_home_expansion(ig_path)
29
+ data_path = absolute_path_with_home_expansion(data_path) if data_path
30
+
31
+ Dir.chdir(tmpdir) do
32
+ Migration.new.run(Logger::FATAL) # Hide migration output for evaluator
33
+ evaluate(ig_path, data_path, options)
34
+ end
35
+ ensure
36
+ system("#{services_base_command} down #{services_names}")
37
+ puts 'Stopped Inferno Evaluator Services'
38
+
39
+ FileUtils.remove_entry_secure tmpdir
40
+ end
41
+
42
+ def services_base_command
43
+ "docker compose -f #{File.join(__dir__, 'evaluate', 'docker-compose.evaluate.yml')}"
44
+ end
45
+
46
+ def services_names
47
+ 'hl7_validator_service fhirpath'
48
+ end
49
+
50
+ # @private
51
+ def absolute_path_with_home_expansion(path)
52
+ if path.starts_with? '~'
53
+ path.sub('~', Dir.home)
54
+ else
55
+ File.absolute_path(path)
56
+ end
57
+ end
58
+
59
+ # @see Inferno::CLI::Main#evaluate
60
+ def evaluate(ig_path, data_path, options)
12
61
  # NOTE: repositories is required here rather than at the top of the file because
13
62
  # the tree of requires means that this file and its requires get required by every CLI app.
14
63
  # Sequel::Model, used in some repositories, fetches the table schema at instantiation.
@@ -22,7 +71,7 @@ module Inferno
22
71
 
23
72
  data =
24
73
  if data_path
25
- DatasetLoader.from_path(File.join(__dir__, data_path))
74
+ Inferno::DSL::FHIREvaluation::DatasetLoader.from_path(File.join(__dir__, data_path))
26
75
  else
27
76
  ig.examples
28
77
  end
@@ -55,7 +104,6 @@ module Inferno
55
104
  def setup_validator(ig_path)
56
105
  igs_directory = File.join(Dir.pwd, 'data', 'igs')
57
106
  if File.exist?(ig_path) && !File.realpath(ig_path).start_with?(igs_directory)
58
- puts "Copying #{File.basename(ig_path)} to data/igs so it is accessible to validator"
59
107
  destination_file_path = File.join(igs_directory, File.basename(ig_path))
60
108
  FileUtils.copy_file(ig_path, destination_file_path, true)
61
109
  ig_path = "igs/#{File.basename(ig_path)}"
@@ -1,6 +1,7 @@
1
1
  require_relative 'console'
2
2
  require_relative 'evaluate'
3
3
  require_relative 'migration'
4
+ require_relative 'requirements'
4
5
  require_relative 'services'
5
6
  require_relative 'suite'
6
7
  require_relative 'suites'
@@ -45,7 +46,7 @@ module Inferno
45
46
  type: :string,
46
47
  desc: 'Export evaluation result to outcome.json as an OperationOutcome'
47
48
  def evaluate(ig_path)
48
- Evaluate.new.evaluate(ig_path, options[:data_path], Logger::INFO)
49
+ Evaluate.new.run(ig_path, options[:data_path], options)
49
50
  end
50
51
 
51
52
  desc 'console', 'Start an interactive console session with Inferno'
@@ -59,6 +60,9 @@ module Inferno
59
60
  Migration.new.run
60
61
  end
61
62
 
63
+ desc 'requirements SUBCOMMAND ...ARGS', 'Perform requirements operations'
64
+ subcommand 'requirements', Requirements
65
+
62
66
  desc 'start', 'Start Inferno'
63
67
  option :watch,
64
68
  default: false,
@@ -0,0 +1,28 @@
1
+ require_relative 'requirements_exporter'
2
+
3
+ module Inferno
4
+ module CLI
5
+ class Requirements < Thor
6
+ desc 'export_csv', 'Export a CSV represantation of requirements from an excel file'
7
+ long_desc <<~LONGDESC
8
+ Creates CSV files for tested requirements and requirements which are not
9
+ planned to be tested based on the excel files located in
10
+ "lib/test_kit_name/requirements"
11
+ LONGDESC
12
+ def export_csv
13
+ ENV['NO_DB'] = 'true'
14
+ RequirementsExporter.new.run
15
+ end
16
+
17
+ desc 'check', 'Check whether the current requirements CSV files are up to date'
18
+ long_desc <<~LONGDESC
19
+ Check whether the requirements CSV files are up to date with the excel
20
+ files in "lib/test_kit_name/requirements"
21
+ LONGDESC
22
+ def check
23
+ ENV['NO_DB'] = 'true'
24
+ RequirementsExporter.new.run_check
25
+ end
26
+ end
27
+ end
28
+ end
@@ -0,0 +1,194 @@
1
+ require 'csv'
2
+ require 'roo'
3
+
4
+ module Inferno
5
+ module CLI
6
+ class RequirementsExporter
7
+ INPUT_HEADERS =
8
+ [
9
+ 'ID*',
10
+ 'URL*',
11
+ 'Requirement*',
12
+ 'Conformance*',
13
+ 'Actor*',
14
+ 'Sub-Requirement(s)',
15
+ 'Conditionality',
16
+ 'Verifiable?',
17
+ 'Verifiability Details',
18
+ 'Planning To Test?',
19
+ 'Planning To Test Details'
20
+ ].freeze
21
+ REQUIREMENTS_OUTPUT_HEADERS =
22
+ [
23
+ 'Req Set',
24
+ 'ID',
25
+ 'URL',
26
+ 'Requirement',
27
+ 'Conformance',
28
+ 'Actor',
29
+ 'Sub-Requirement(s)',
30
+ 'Conditionality',
31
+ 'Not Tested Reason',
32
+ 'Not Tested Details'
33
+ ].freeze
34
+
35
+ def local_test_kit_gem
36
+ @local_test_kit_gem ||= Bundler.definition.specs.find { |spec| spec.full_gem_path == Dir.pwd }
37
+ end
38
+
39
+ def test_kit_name
40
+ local_test_kit_gem.name
41
+ end
42
+
43
+ def base_requirements_folder
44
+ @base_requirements_folder ||= Dir.glob(File.join(Dir.pwd, 'lib', '*', 'requirements')).first
45
+ end
46
+
47
+ def requirements_output_file_name
48
+ "#{test_kit_name}_requirements.csv"
49
+ end
50
+
51
+ def requirements_output_file_path
52
+ File.join(base_requirements_folder, requirements_output_file_name).freeze
53
+ end
54
+
55
+ def available_input_worksheets
56
+ @available_input_worksheets ||=
57
+ Dir.glob(File.join(base_requirements_folder, '*.xlsx'))
58
+ .reject { |f| f.include?('~$') }
59
+ end
60
+
61
+ def requirement_set_id(worksheet)
62
+ sheet = worksheet.sheet('Metadata')
63
+ id_row = sheet.column(1).find_index('Id') + 1
64
+ sheet.row(id_row)[1]
65
+ end
66
+
67
+ # Of the form:
68
+ # {
69
+ # requirement_set_id_1: [row1, row2, row 3, ...],
70
+ # requirement_set_id_2: [row1, row2, row 3, ...]
71
+ # }
72
+ def input_requirement_sets
73
+ requirement_set_hash = Hash.new { |hash, key| hash[key] = [] }
74
+ available_input_worksheets.each_with_object(requirement_set_hash) do |worksheet_file, requirement_sets|
75
+ worksheet = Roo::Spreadsheet.open(worksheet_file)
76
+ set_identifier = requirement_set_id(worksheet)
77
+
78
+ CSV.parse(
79
+ worksheet.sheet('Requirements').to_csv,
80
+ headers: true
81
+ ).each do |row|
82
+ row_hash = row.to_h.slice(*INPUT_HEADERS)
83
+ row_hash['Sub-Requirement(s)']&.delete_prefix!('mailto:')
84
+
85
+ requirement_sets[set_identifier] << row_hash
86
+ end
87
+ end
88
+ end
89
+
90
+ def new_requirements_csv # rubocop:disable Metrics/CyclomaticComplexity
91
+ @new_requirements_csv ||=
92
+ CSV.generate(+"\xEF\xBB\xBF") do |csv| # start with an unnecessary BOM to make viewing in excel easier
93
+ csv << REQUIREMENTS_OUTPUT_HEADERS
94
+
95
+ input_requirement_sets.each do |requirement_set_id, input_rows|
96
+ input_rows.each do |row| # NOTE: use row order from source file
97
+ csv << REQUIREMENTS_OUTPUT_HEADERS.map do |header|
98
+ (
99
+ case header
100
+ when 'Req Set'
101
+ requirement_set_id
102
+ when 'Not Tested Reason'
103
+ if spreadsheet_value_falsy?(row['Verifiable?'])
104
+ 'Not Verifiable'
105
+ elsif spreadsheet_value_falsy?(row['Planning To Test?'])
106
+ 'Not Tested'
107
+ end
108
+ when 'Not Tested Details'
109
+ if spreadsheet_value_falsy?(row['Verifiable?'])
110
+ row['Verifiability Details']
111
+ elsif spreadsheet_value_falsy?(row['Planning To Test?'])
112
+ row['Planning To Test Details']
113
+ end
114
+ else
115
+ row[header] || row["#{header}*"]
116
+ end
117
+ )&.strip
118
+ end
119
+ end
120
+ end
121
+ end
122
+ end
123
+
124
+ def old_requirements_csv
125
+ @old_requirements_csv ||= File.read(requirements_output_file_path)
126
+ end
127
+
128
+ def run
129
+ check_presence_of_input_files
130
+
131
+ update_requirements =
132
+ if File.exist?(requirements_output_file_path)
133
+ if old_requirements_csv == new_requirements_csv
134
+ puts "'#{requirements_output_file_name}' file is up to date."
135
+ false
136
+ else
137
+ puts 'Requirements set has changed.'
138
+ true
139
+ end
140
+ else
141
+ puts "No existing #{requirements_output_file_name}."
142
+ true
143
+ end
144
+
145
+ if update_requirements
146
+ puts "Writing to file #{requirements_output_file_name}..."
147
+ File.write(requirements_output_file_path, new_requirements_csv, encoding: Encoding::UTF_8)
148
+ end
149
+
150
+ puts 'Done.'
151
+ end
152
+
153
+ def run_check
154
+ check_presence_of_input_files
155
+
156
+ requirements_ok =
157
+ if File.exist?(requirements_output_file_path)
158
+ if old_requirements_csv == new_requirements_csv
159
+ puts "'#{requirements_output_file_name}' file is up to date."
160
+ true
161
+ else
162
+ puts "#{requirements_output_file_name} file is out of date."
163
+ false
164
+ end
165
+ else
166
+ puts "No existing #{requirements_output_file_name} file."
167
+ false
168
+ end
169
+
170
+ return if requirements_ok
171
+
172
+ puts <<~MESSAGE
173
+ Check Failed. To resolve, run:
174
+
175
+ bundle exec inferno requirements export_csv
176
+
177
+ MESSAGE
178
+ exit(1)
179
+ end
180
+
181
+ def check_presence_of_input_files
182
+ return if available_input_worksheets.present?
183
+
184
+ puts 'Could not find any input files in directory ' \
185
+ "#{base_requirements_folder}. Aborting requirements collection."
186
+ exit(1)
187
+ end
188
+
189
+ def spreadsheet_value_falsy?(string)
190
+ ['no', 'false'].include? string&.downcase
191
+ end
192
+ end
193
+ end
194
+ end
@@ -52,6 +52,27 @@ module Inferno
52
52
 
53
53
  puts TTY::Markdown.parse(description)
54
54
  end
55
+
56
+ desc 'lock_short_ids SUITE_ID', 'Persist the current short_id map for a suite'
57
+ long_desc <<~LONGDESC
58
+ Loads the given suite and writes its current short_id map to its corresponding YAML file.
59
+ LONGDESC
60
+ def lock_short_ids(suite_id)
61
+ ENV['NO_DB'] = 'true'
62
+ Inferno::Application.start(:suites)
63
+
64
+ suite = Inferno::Repositories::TestSuites.new.find(suite_id)
65
+
66
+ if suite.blank?
67
+ message = "No suite found with id `#{suite_id}`. Run `inferno suites` to see a list of available suites"
68
+
69
+ puts TTY::Markdown.parse(message)
70
+ return
71
+ end
72
+
73
+ File.write(suite.short_id_file_path, suite.current_short_id_map.to_yaml)
74
+ puts "Short ID map saved to #{suite.short_id_file_path}"
75
+ end
55
76
  end
56
77
  end
57
78
  end
@@ -0,0 +1,141 @@
1
+ module <%= module_name %>
2
+ class PatientGroup < Inferno::TestGroup
3
+ title 'Patient'
4
+
5
+ description <<~DESCRIPTION
6
+ This scenario verifies the ability of a system to provide a Patient as described in the Example criterion.
7
+
8
+ *or*
9
+
10
+ The Example Patient sequence verifies that the system under test is able to provide correct responses for Patient queries.
11
+
12
+ ## Requirements
13
+
14
+ Patient queries must contain resources conforming to the Example Patient as specified in the Example Implementation Guide.
15
+
16
+ *or*
17
+
18
+ All Must Support elements must be seen before the test can pass, as well as Data Absent Reason to demonstrate that the server
19
+ can properly handle missing data. Note that Organization, Practitioner, and RelatedPerson resources must be accessible as
20
+ references in some Example profiles to satisfy must support requirements, and those references will be validated to their Example
21
+ profile. These resources will not be tested for FHIR search support.
22
+
23
+ ## <*If applicable*> Dependencies
24
+ Prior to running this scenario, systems must recieve a verified access token from one of the previous SMART App Launch scenarios.
25
+
26
+ *or*
27
+
28
+ Prior to running this scenario, first run the Single Patient API tests using resource-level scopes, as this scenario uses content
29
+ saved from that scenario as a baseline for comparison when finer-grained scopes are granted.
30
+
31
+ ## <*If applicable*> Methodology
32
+
33
+ *Only include if different from instructions included in a parent group or suite*
34
+
35
+ The test begins by searching by one or more patients, with the expectation that the Bearer token provided to the test grants
36
+ access to all Resources. It uses results returned from that query to generate other queries and checks that the results are
37
+ consistent with the provided search parameters. It then performs a read on each Resource returned and validates the response
38
+ against the relevant profile as currently defined in the Example Implementation Guide.
39
+
40
+ *or*
41
+
42
+ ### Searching
43
+
44
+ This test sequence will first perform each required search associated with this resource.
45
+ This sequence will perform searches with the following parameters:
46
+ - _id
47
+ - identifier
48
+ - name
49
+ - birthdate + name
50
+ - gender + name
51
+
52
+ #### Search Parameters
53
+
54
+ The first search uses the selected patient(s) from the prior launch sequence. Any subsequent searches will look for its parameter
55
+ values from the results of the first search. For example, the `identifier` search in the patient sequence is performed by looking
56
+ for an existing `Patient.identifier` from any of the resources returned in the `_id` search. If a value cannot be found this way,
57
+ the search is skipped.
58
+
59
+ #### Search Validation
60
+
61
+ Inferno will retrieve up to the first 20 bundle pages of the reply for Patient resources and save them for subsequent tests.
62
+ Each of these resources is then checked to see if it matches the searched parameters in accordance with [FHIR search guidelines](https://www.hl7.org/fhir/search.html).
63
+ The test will fail, for example, if a Patient search for gender=male returns a female patient.
64
+
65
+ ### Must Support
66
+
67
+ Each profile contains elements marked as "must support". This test sequence expects to see each of these elements at least once.
68
+ If at least one cannot be found, the test will fail. The test will look through the Patient resources found in the first test
69
+ for these elements.
70
+
71
+ ### Profile Validation
72
+
73
+ Each resource returned from the first search is expected to conform to the [Example Patient Profile](https://www.example.com/patient/profile).
74
+ Each element is checked against teminology binding and cardinality requirements.
75
+
76
+ Elements with a required binding are validated against their bound ValueSet. If the code/system in the element is not part of the
77
+ ValueSet, then the test will fail.
78
+
79
+ ### Reference Validation
80
+
81
+ At least one instance of each external reference in elements marked as "must support" within the resources provided by the
82
+ system must resolve. The test will attempt to read each reference found and will fail if no read succeeds.
83
+
84
+ ## <*If applicable*> Running the Tests
85
+
86
+ *Only include if different from instructions included in a parent group or suite*
87
+
88
+ Register Inferno as an EHR-launched application using patient-level scopes and the following URIs:
89
+ - Launch URI: https://inferno.healthit.gov/suites/custom/smart/launch
90
+ - Redirect URI: https://inferno.healthit.gov/suites/custom/smart/redirect
91
+
92
+ ## <*If top-level group for criteria*> Relevant Specifications
93
+
94
+ The following implementation specifications are relevant to this scenario:
95
+ - [Specification 1 v1](https://www.example.com/spec1/v1)
96
+ - [Specification 1 v2](https://www.example.com/spec1/v2)
97
+ - [Specification 2 v5](https://www.example.com/spec1/v1)
98
+
99
+ DESCRIPTION
100
+
101
+ id :patient_group
102
+
103
+ test do
104
+ title 'Server returns requested Patient resource from the Patient read interaction'
105
+ description %(
106
+ Verify that Patient resources can be read from the server. Expects a 200 response that includes a Patient
107
+ resource whose ID matches the requested patient ID.
108
+ )
109
+
110
+ input :patient_id,
111
+ title: 'Patient ID'
112
+
113
+ # Named requests can be used by other tests
114
+ makes_request :patient
115
+
116
+ run do
117
+ fhir_read(:patient, patient_id, name: :patient)
118
+
119
+ assert_response_status(200)
120
+ assert_resource_type(:patient)
121
+ assert resource.id == patient_id,
122
+ "Requested resource with id #{patient_id}, received resource with id #{resource.id}"
123
+ end
124
+ end
125
+
126
+ test do
127
+ title 'Patient resource is valid'
128
+ description %(
129
+ Verify that the Patient resource returned from the server is a valid FHIR resource.
130
+ )
131
+ # This test will use the response from the :patient request in the
132
+ # previous test
133
+ uses_request :patient
134
+
135
+ run do
136
+ assert_resource_type(:patient)
137
+ assert_valid_resource
138
+ end
139
+ end
140
+ end
141
+ end
@@ -0,0 +1,128 @@
1
+ require_relative 'metadata'
2
+ require_relative 'example_suite/patient_group'
3
+
4
+ module <%= module_name %>
5
+ class ExampleSuite < Inferno::TestSuite
6
+
7
+ id :<%= test_suite_id %>
8
+ title '<%= title_name %>'
9
+ short_title '<%= title_name %>'
10
+
11
+ # TODO: Update the description below to align with the test suite
12
+ description <<~DESCRIPTION
13
+ The Example Test Suite is a testing tool for Health Level 7 (HL7®) Fast Healthcare Interoperability Resources (FHIR®)
14
+ services seeking to meet the requirements of the API criterion within the Example Certification Program.
15
+
16
+ *or*
17
+
18
+ The Example Test Suite tests systems for their conformance to the [Example Implementation Guide](https://example.com/example).
19
+
20
+ ## Organization
21
+
22
+ This test suite is organized into testing scenarios that in sum cover all requirements within the Example criterion.
23
+ The scenarios are intended to be run in order during certification, but can be run out of order to support testing
24
+ during development or certification preparation. Some scenarios depend on data collected during previous scenarios
25
+ to function. In these cases, the scenario description describes these dependencies.
26
+
27
+ The first three scenarios require the system under test to demonstrate basic SMART App Launch functionality.
28
+ The fourth uses a valid token provided during earlier tests to verify support for the Single Patient API as
29
+ described in the criterion. The fifth verifies support for the Multi Patient API, including Backend Services
30
+ for authorization. Not all authorization-related requirements are verified in the first three scenarios,
31
+ and the 'Additional Authorization Tests' verify these additional requirements. The last scenario contains
32
+ a list of 'attestations' and 'visual inspections' for requirements that could not be verified through automated testing.
33
+
34
+ *or*
35
+
36
+ This test suite is split into three different categories:
37
+ - All Patients: FHIR Operation to obtain a detailed set of FHIR resources of diverse resource types pertaining to all patients
38
+ - Group of Patients: FHIR Operation to obtain a detailed set of FHIR resources of diverse resource types pertaining to all members of a specified Group
39
+ - System Level Export: FHIR Operation to export data from a FHIR server, whether or not it is associated with a patient
40
+
41
+ ## Getting Started
42
+
43
+ The best way to learn about how to use these tests is the [Example Test Kit walkthrough](https://example.com/Walkthrough),
44
+ which demonstrates the tests running against a simulated system.
45
+
46
+ To get started with the first group of scenarios, please first register the Inferno client as a SMART App with the following information:
47
+ - SMART Launch URI: https://example.com/smart/launch
48
+ - OAuth Redirect URI: https://example.com/smart/redirect
49
+
50
+ For the multi-patient API, register Inferno with the following JWK Set Url:
51
+ - https://example.com/suites/custom/example/.well-known/jwks.json
52
+
53
+ *or*
54
+
55
+ To get started, if your server supports SMART backend services authorization, please first register Inferno with the following JWK Set URL:
56
+ - https://example.com/suites/custom/example/.well-known/jwks.json
57
+
58
+ Then, run the full Example test suite containing both the SMART Backend Services test group and the Bulk Data Export Tests test group.
59
+ If your server does not support SMART Backend Services authorization, only run the second test group, Bulk Data Export Tests.
60
+
61
+ ## Limitations
62
+
63
+ Inferno is unable to determine what requests will result in specific kinds of responses from the server under test
64
+ (e.g., what will result in Instructions being returned vs. Coverage Information). As a result, the tester must
65
+ supply the request bodies which will cause the system under test to return the desired response types.
66
+
67
+ The ability of an Example Server to request additional FHIR resources is not tested.
68
+
69
+ Hook configuration is not tested.
70
+
71
+ ## *if applicable:* Certification Requirements
72
+
73
+ Systems must pass all tests to qualify for Example certification.
74
+
75
+ DESCRIPTION
76
+
77
+ # These inputs will be available to all tests in this suite
78
+ input :url,
79
+ title: 'FHIR Server Base Url'
80
+
81
+ input :credentials,
82
+ title: 'OAuth Credentials',
83
+ type: :auth_info,
84
+ optional: true
85
+
86
+ # All FHIR requests in this suite will use this FHIR client
87
+
88
+ # All FHIR requests in this suite will use this FHIR client
89
+ fhir_client do
90
+ url :url
91
+ auth_info :credentials
92
+ end
93
+
94
+ # All FHIR validation requests will use this FHIR validator
95
+ fhir_resource_validator do
96
+ # igs 'identifier#version' # Use this method for published IGs/versions
97
+ # igs 'igs/filename.tgz' # Use this otherwise
98
+
99
+ exclude_message do |message|
100
+ message.message.match?(/\A\S+: \S+: URL value '.*' does not resolve/)
101
+ end
102
+ end
103
+
104
+ # Tests and TestGroups can be defined inline
105
+ group do
106
+ id :capability_statement
107
+ title 'Capability Statement'
108
+ description 'See a sample description in the Patient Test Group'
109
+
110
+ test do
111
+ id :capability_statement_read
112
+ title 'Read CapabilityStatement'
113
+ description 'Read CapabilityStatement from /metadata endpoint'
114
+
115
+ run do
116
+ fhir_get_capability_statement
117
+
118
+ assert_response_status(200)
119
+ assert_resource_type(:capability_statement)
120
+ end
121
+ end
122
+ end
123
+
124
+ # Tests and TestGroups can be written in separate files and then included
125
+ # using their id
126
+ group from: :patient_group
127
+ end
128
+ end