fhir-sheets 1.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of fhir-sheets might be problematic. Click here for more details.
- fhir_sheets-1.1.0/LICENSE +21 -0
- fhir_sheets-1.1.0/PKG-INFO +14 -0
- fhir_sheets-1.1.0/pyproject.toml +18 -0
- fhir_sheets-1.1.0/src/fhir_sheets/__init__.py +0 -0
- fhir_sheets-1.1.0/src/fhir_sheets/cli/__init__.py +0 -0
- fhir_sheets-1.1.0/src/fhir_sheets/cli/fhirsheets.py +64 -0
- fhir_sheets-1.1.0/src/fhir_sheets/core/__init__.py +0 -0
- fhir_sheets-1.1.0/src/fhir_sheets/core/conversion.py +308 -0
- fhir_sheets-1.1.0/src/fhir_sheets/core/fhir_formatting.py +279 -0
- fhir_sheets-1.1.0/src/fhir_sheets/core/read_input.py +99 -0
- fhir_sheets-1.1.0/src/fhir_sheets/core/special_values.py +360 -0
- fhir_sheets-1.1.0/src/fhir_sheets/core/util.py +1 -0
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) [year] [fullname]
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
Metadata-Version: 2.3
|
|
2
|
+
Name: fhir-sheets
|
|
3
|
+
Version: 1.1.0
|
|
4
|
+
Summary: FhirSheetsiIs a command-line tool that reads an Excel file in FHIR cohort format and generates FHIR bundle JSON files from it. Each row in the template Excel file is used to create an individual JSON file, outputting them to a specified folder.
|
|
5
|
+
Author: Michael Riley
|
|
6
|
+
Author-email: Michael.Riley@gtri.gatech.edu
|
|
7
|
+
Requires-Python: >=3.13
|
|
8
|
+
Classifier: Programming Language :: Python :: 3
|
|
9
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
10
|
+
Requires-Dist: et-xmlfile (==1.1.0)
|
|
11
|
+
Requires-Dist: jsonpath-ng (==1.6.1)
|
|
12
|
+
Requires-Dist: openpyxl (==3.1.5)
|
|
13
|
+
Requires-Dist: orjson (==3.10.7)
|
|
14
|
+
Requires-Dist: ply (==3.11)
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
[tool.poetry]
|
|
2
|
+
name = "fhir-sheets"
|
|
3
|
+
version = "1.1.0"
|
|
4
|
+
description = "FhirSheetsiIs a command-line tool that reads an Excel file in FHIR cohort format and generates FHIR bundle JSON files from it. Each row in the template Excel file is used to create an individual JSON file, outputting them to a specified folder."
|
|
5
|
+
authors = ["Michael Riley <Michael.Riley@gtri.gatech.edu>"]
|
|
6
|
+
packages = [{include = "fhir_sheets", from = "src"}]
|
|
7
|
+
|
|
8
|
+
[tool.poetry.dependencies]
|
|
9
|
+
python = ">=3.13" # Specify the compatible Python version here
|
|
10
|
+
et-xmlfile = "1.1.0"
|
|
11
|
+
jsonpath-ng = "1.6.1"
|
|
12
|
+
openpyxl = "3.1.5"
|
|
13
|
+
orjson = "3.10.7"
|
|
14
|
+
ply = "3.11"
|
|
15
|
+
|
|
16
|
+
[build-system]
|
|
17
|
+
requires = ["poetry-core"]
|
|
18
|
+
build-backend = "poetry.core.masonry.api"
|
|
File without changes
|
|
File without changes
|
|
@@ -0,0 +1,64 @@
|
|
|
1
|
+
from ..core import read_input
|
|
2
|
+
from ..core import conversion
|
|
3
|
+
|
|
4
|
+
import argparse
|
|
5
|
+
import orjson
|
|
6
|
+
import json
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
|
|
9
|
+
def find_sets(d, path=""):
|
|
10
|
+
if isinstance(d, dict):
|
|
11
|
+
for key, value in d.items():
|
|
12
|
+
new_path = f"{path}.{key}" if path else str(key)
|
|
13
|
+
find_sets(value, new_path)
|
|
14
|
+
elif isinstance(d, list): # Handle lists of dictionaries
|
|
15
|
+
for idx, item in enumerate(d):
|
|
16
|
+
find_sets(item, f"{path}[{idx}]")
|
|
17
|
+
elif isinstance(d, set):
|
|
18
|
+
print(f"Set found at path: {path}")
|
|
19
|
+
|
|
20
|
+
def main(input_file, output_folder):
|
|
21
|
+
# Step 1: Read the input file using read_input module
|
|
22
|
+
|
|
23
|
+
# Check if the output folder exists, and create it if not
|
|
24
|
+
|
|
25
|
+
output_folder_path = Path(output_folder)
|
|
26
|
+
if not output_folder_path.is_absolute():
|
|
27
|
+
output_folder_path = Path().cwd() / Path(output_folder)
|
|
28
|
+
if not output_folder_path.exists():
|
|
29
|
+
output_folder_path.mkdir(parents=True, exist_ok=True) # Create the folder if it doesn't exist
|
|
30
|
+
data = read_input.read_xlsx_and_process(input_file)
|
|
31
|
+
resource_definition_entities = data['resource_definition_entities']
|
|
32
|
+
|
|
33
|
+
#For each index of patients
|
|
34
|
+
for i in range(0,data['num_entries']):
|
|
35
|
+
# Construct the file path for each JSON file
|
|
36
|
+
file_path = output_folder_path / f"{i}.json"
|
|
37
|
+
#Create a bundle
|
|
38
|
+
fhir_bundle = conversion.create_transaction_bundle(data['resource_definition_entities'],
|
|
39
|
+
data['resource_link_entities'], data['patient_data_entities'], i)
|
|
40
|
+
# Step 3: Write the processed data to the output file
|
|
41
|
+
find_sets(fhir_bundle)
|
|
42
|
+
json_string = orjson.dumps(fhir_bundle)
|
|
43
|
+
with open(file_path, 'wb') as json_file:
|
|
44
|
+
json_file.write(json_string)
|
|
45
|
+
with open(file_path, 'r') as json_file:
|
|
46
|
+
json_string = json.load(json_file)
|
|
47
|
+
with open(file_path, 'w') as json_file:
|
|
48
|
+
json.dump(json_string, json_file, indent = 4)
|
|
49
|
+
|
|
50
|
+
if __name__ == "__main__":
|
|
51
|
+
# Create the argparse CLI
|
|
52
|
+
parser = argparse.ArgumentParser(description="Process input, convert data, and write output.")
|
|
53
|
+
|
|
54
|
+
# Define the input file argument
|
|
55
|
+
parser.add_argument('--input_file', type=str, help="Path to the input xlsx ", default="src/resources/Synthetic_Input_Baseline.xlsx")
|
|
56
|
+
|
|
57
|
+
# Define the output file argument
|
|
58
|
+
parser.add_argument('--output_folder', type=str, help="Path to save the output files", default="output/")
|
|
59
|
+
|
|
60
|
+
# Parse the arguments
|
|
61
|
+
args = parser.parse_args()
|
|
62
|
+
|
|
63
|
+
# Call the main function with the provided arguments
|
|
64
|
+
main(args.input_file, args.output_folder)
|
|
File without changes
|
|
@@ -0,0 +1,308 @@
|
|
|
1
|
+
import uuid
|
|
2
|
+
from jsonpath_ng.jsonpath import Fields, Slice, Where
|
|
3
|
+
from jsonpath_ng.ext import parse as parse_ext
|
|
4
|
+
from . import fhir_formatting
|
|
5
|
+
from . import special_values
|
|
6
|
+
|
|
7
|
+
#Main top level function
|
|
8
|
+
#Creates a full transaction bundle for a patient at index
|
|
9
|
+
def create_transaction_bundle(resource_definition_entities, resource_link_entities, patient_data, index = 0):
|
|
10
|
+
root_bundle = initialize_bundle()
|
|
11
|
+
created_resources = {}
|
|
12
|
+
for resource_definition in resource_definition_entities:
|
|
13
|
+
entity_name = resource_definition['Entity Name']
|
|
14
|
+
#Create and collect fhir resources
|
|
15
|
+
fhir_resource = create_fhir_resource(resource_definition, patient_data, index)
|
|
16
|
+
created_resources[entity_name] = fhir_resource
|
|
17
|
+
#Link resources after creation
|
|
18
|
+
add_default_resource_links(created_resources, resource_link_entities)
|
|
19
|
+
create_resource_links(created_resources, resource_link_entities)
|
|
20
|
+
#Construct into fhir bundle
|
|
21
|
+
for fhir_resource in created_resources.values():
|
|
22
|
+
add_resource_to_transaction_bundle(root_bundle, fhir_resource)
|
|
23
|
+
return root_bundle
|
|
24
|
+
|
|
25
|
+
#Initialize root bundle definition
|
|
26
|
+
def initialize_bundle():
|
|
27
|
+
root_bundle = {}
|
|
28
|
+
root_bundle['resourceType'] = 'Bundle'
|
|
29
|
+
root_bundle['id'] = str(uuid.uuid4())
|
|
30
|
+
root_bundle['type'] = 'transaction'
|
|
31
|
+
root_bundle['entry'] = []
|
|
32
|
+
return root_bundle
|
|
33
|
+
|
|
34
|
+
# Creates a fhir-json structure from a resource definition entity and the patient_data_sheet
|
|
35
|
+
def create_fhir_resource(resource_definition, patient_data, index = 0):
|
|
36
|
+
resource_dict = initialize_resource(resource_definition)
|
|
37
|
+
#Get field entries for this entitiy
|
|
38
|
+
try:
|
|
39
|
+
all_field_entries = patient_data[resource_definition['Entity Name']]
|
|
40
|
+
except KeyError:
|
|
41
|
+
print(f"WARNING: Patient index {index} - Create Fhir Resource Error - {resource_definition['Entity Name']} - No columns for entity '{resource_definition['Entity Name']}' found for resource in 'PatientData' sheet")
|
|
42
|
+
return resource_dict
|
|
43
|
+
#For each field within the entity
|
|
44
|
+
for field_entry in all_field_entries.values():
|
|
45
|
+
#Create a jsonpath from each provided json path and value for this resource
|
|
46
|
+
if field_entry['values'] and len(field_entry['values']) > index:
|
|
47
|
+
create_structure_from_jsonpath(resource_dict, field_entry['jsonpath'], resource_definition, field_entry, field_entry['valueType'], field_entry['values'][index])
|
|
48
|
+
return resource_dict
|
|
49
|
+
|
|
50
|
+
#Initialize a resource from a resource definition. Adding basic
|
|
51
|
+
def initialize_resource(resource_definition):
|
|
52
|
+
initial_resource = {}
|
|
53
|
+
initial_resource['resourceType'] = resource_definition['ResourceType'].strip()
|
|
54
|
+
resource_definition['id'] = str(uuid.uuid4())
|
|
55
|
+
initial_resource['id'] = resource_definition['id'].strip()
|
|
56
|
+
if resource_definition['Profile(s)']:
|
|
57
|
+
initial_resource['meta'] = {
|
|
58
|
+
'profile': resource_definition['Profile(s)']
|
|
59
|
+
}
|
|
60
|
+
return initial_resource
|
|
61
|
+
|
|
62
|
+
#Create a resource_link for default references in the cases where only 1 resourceType of the source and destination exist
|
|
63
|
+
def add_default_resource_links(created_resources, resource_link_entities):
|
|
64
|
+
default_references = [
|
|
65
|
+
('allergyintolerance', 'patient', 'patient'),
|
|
66
|
+
('allergyintolerance', 'practitioner', 'asserter'),
|
|
67
|
+
('careplan', 'goal', 'goal'),
|
|
68
|
+
('careplan', 'patient', 'subject'),
|
|
69
|
+
('careplan', 'practitioner', 'performer'),
|
|
70
|
+
('diagnosticreport', 'careteam', 'performer'),
|
|
71
|
+
('diagnosticreport', 'imagingStudy', 'imagingStudy'),
|
|
72
|
+
('diagnosticreport', 'observation', 'result'),
|
|
73
|
+
('diagnosticreport', 'organization', 'performer'),
|
|
74
|
+
('diagnosticreport', 'practitioner', 'performer'),
|
|
75
|
+
('diagnosticreport', 'practitionerrole', 'performer'),
|
|
76
|
+
('diagnosticreport', 'specimen', 'specimen'),
|
|
77
|
+
('encounter', 'location', 'location'),
|
|
78
|
+
('encounter', 'organization', 'serviceProvider'),
|
|
79
|
+
('encounter', 'patient', 'subject'),
|
|
80
|
+
('encounter', 'practitioner', 'participant'),
|
|
81
|
+
('goal', 'condition', 'addresses'),
|
|
82
|
+
('goal', 'patient', 'subject'),
|
|
83
|
+
('immunization', 'patient', 'patient'),
|
|
84
|
+
('immunization', 'practitioner', 'performer'),
|
|
85
|
+
('immunization', 'organization', 'manufacturer'),
|
|
86
|
+
('medicationrequest', 'medication', 'medicationReference'),
|
|
87
|
+
('medicationrequest', 'patient', 'subject'),
|
|
88
|
+
('medicationrequest', 'practitioner', 'requester'),
|
|
89
|
+
('observation', 'device', 'device'),
|
|
90
|
+
('observation', 'patient', 'subject'),
|
|
91
|
+
('observation', 'practitioner', 'performer'),
|
|
92
|
+
('observation', 'specimen', 'specimen'),
|
|
93
|
+
('procedure', 'device', 'usedReference'),
|
|
94
|
+
('procedure', 'location', 'location'),
|
|
95
|
+
('procedure', 'patient', 'subject'),
|
|
96
|
+
('procedure', 'practitioner', 'performer'),
|
|
97
|
+
]
|
|
98
|
+
|
|
99
|
+
resource_counts = {}
|
|
100
|
+
for resourceName, resource in created_resources.items():
|
|
101
|
+
resourceType = resource['resourceType'].lower().strip()
|
|
102
|
+
if resourceType not in resource_counts:
|
|
103
|
+
resource_counts[resourceType]= {'count': 1, 'singletonEntityName': resourceName, 'singleResource': resource}
|
|
104
|
+
else:
|
|
105
|
+
resource_counts[resourceType]['count'] += 1
|
|
106
|
+
resource_counts[resourceType]['singletonResource'] = resource
|
|
107
|
+
resource_counts[resourceType]['singletonEntityName'] = resourceName
|
|
108
|
+
|
|
109
|
+
for default_reference in default_references:
|
|
110
|
+
sourceType = default_reference[0]
|
|
111
|
+
destinationType = default_reference[1]
|
|
112
|
+
fieldName = default_reference[2]
|
|
113
|
+
if sourceType in resource_counts and destinationType in resource_counts and \
|
|
114
|
+
resource_counts[sourceType]['count'] == 1 and resource_counts[destinationType]['count'] == 1:
|
|
115
|
+
originResourceEntityName = resource_counts[sourceType]['singletonEntityName']
|
|
116
|
+
destinationResourceEntityName = resource_counts[destinationType]['singletonEntityName']
|
|
117
|
+
resource_link_entities.append(
|
|
118
|
+
{
|
|
119
|
+
"OriginResource": originResourceEntityName,
|
|
120
|
+
"DestinationResource": destinationResourceEntityName,
|
|
121
|
+
"ReferencePath": fieldName
|
|
122
|
+
}
|
|
123
|
+
)
|
|
124
|
+
return
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
#Create resource references/links with created entities
|
|
128
|
+
def create_resource_links(created_resources, resource_link_entites):
|
|
129
|
+
reference_json_block = {
|
|
130
|
+
"reference" : "$value"
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
arrayType_references = [
|
|
134
|
+
('diagnosticreport', 'specimen', 'specimen'),
|
|
135
|
+
('diagnosticreport', 'practitioner', 'performer'),
|
|
136
|
+
('diagnosticreport', 'practitionerrole', 'performer'),
|
|
137
|
+
('diagnosticreport', 'organization', 'performer'),
|
|
138
|
+
('diagnosticreport', 'careteam', 'performer'),
|
|
139
|
+
('diagnosticreport', 'observation', 'result'),
|
|
140
|
+
('diagnosticreport', 'imagingStudy', 'imagingStudy'),
|
|
141
|
+
]
|
|
142
|
+
#TODO: Build resource links
|
|
143
|
+
print("Building resource links")
|
|
144
|
+
for resource_link_entity in resource_link_entites:
|
|
145
|
+
try:
|
|
146
|
+
origin_resource = created_resources[resource_link_entity['OriginResource']]
|
|
147
|
+
except KeyError:
|
|
148
|
+
print(f"WARNING: In ResourceLinks tab, found a Origin Resource of : {resource_link_entity['OriginResource']} but no such entity found in PatientData")
|
|
149
|
+
continue
|
|
150
|
+
try:
|
|
151
|
+
destination_resource = created_resources[resource_link_entity['DestinationResource']]
|
|
152
|
+
except KeyError:
|
|
153
|
+
print(f"WARNING: In ResourceLinks tab, found a Desitnation Resource of : {resource_link_entity['DestinationResource']} but no such entity found in PatientData")
|
|
154
|
+
continue
|
|
155
|
+
destination_resource_type = created_resources[resource_link_entity['DestinationResource']]['resourceType']
|
|
156
|
+
destination_resource_id = created_resources[resource_link_entity['DestinationResource']]['id']
|
|
157
|
+
link_tuple = (created_resources[resource_link_entity['OriginResource']]['resourceType'].strip().lower(),
|
|
158
|
+
created_resources[resource_link_entity['DestinationResource']]['resourceType'].strip().lower(),
|
|
159
|
+
resource_link_entity['ReferencePath'].strip().lower())
|
|
160
|
+
if link_tuple in arrayType_references:
|
|
161
|
+
if resource_link_entity['ReferencePath'].strip().lower() not in origin_resource:
|
|
162
|
+
origin_resource[resource_link_entity['ReferencePath'].strip().lower()] = []
|
|
163
|
+
new_reference = reference_json_block.copy()
|
|
164
|
+
new_reference['reference'] = destination_resource_type + "/" + destination_resource_id
|
|
165
|
+
origin_resource[resource_link_entity['ReferencePath'].strip().lower()].append(new_reference)
|
|
166
|
+
else:
|
|
167
|
+
origin_resource[resource_link_entity['ReferencePath'].strip().lower()] = reference_json_block.copy()
|
|
168
|
+
origin_resource[resource_link_entity['ReferencePath'].strip().lower()]["reference"] = destination_resource_type + "/" + destination_resource_id
|
|
169
|
+
return
|
|
170
|
+
|
|
171
|
+
def add_resource_to_transaction_bundle(root_bundle, fhir_resource):
|
|
172
|
+
entry = {}
|
|
173
|
+
entry['fullUrl'] = "urn:uuid:"+fhir_resource['id']
|
|
174
|
+
entry['resource'] = fhir_resource
|
|
175
|
+
entry['request'] = {
|
|
176
|
+
"method": "PUT",
|
|
177
|
+
"url": fhir_resource['resourceType'] + "/" + fhir_resource['id']
|
|
178
|
+
}
|
|
179
|
+
root_bundle['entry'].append(entry)
|
|
180
|
+
return root_bundle
|
|
181
|
+
|
|
182
|
+
#Drill down and create a structure from a json path with a simple recurisve process
|
|
183
|
+
# Supports 2 major features:
|
|
184
|
+
# 1) dot notation such as $.codeableconcept.coding[0].value = 1234
|
|
185
|
+
# 2) simple qualifiers such as $.name[use=official].family = Dickerson
|
|
186
|
+
# rootStruct: top level structure to drill into
|
|
187
|
+
# json_path: dotnotation path to follow
|
|
188
|
+
# resource_definition: resource description model from import
|
|
189
|
+
# entity_definition: specific field entry information for this function
|
|
190
|
+
# value: Actual value to assign
|
|
191
|
+
def create_structure_from_jsonpath(root_struct, json_path, resource_definition, entity_definition, dataType, value):
|
|
192
|
+
#Get all dot notation components as seperate
|
|
193
|
+
if dataType is not None and dataType.strip().lower() == 'string':
|
|
194
|
+
value = str(value)
|
|
195
|
+
|
|
196
|
+
if value == None:
|
|
197
|
+
print(f"WARNING: Full jsonpath: {json_path} - Expected to find a value but found None instead")
|
|
198
|
+
return root_struct
|
|
199
|
+
#Start of top-level function which calls the enclosed recursive function
|
|
200
|
+
parts = json_path.split('.')
|
|
201
|
+
return build_structure(root_struct, json_path, resource_definition, entity_definition, parts, value, [])
|
|
202
|
+
|
|
203
|
+
# main recursive function to drill into the json structure, assign paths, and create structure where needed
|
|
204
|
+
def build_structure(current_struct, json_path, resource_definition, entity_definition, parts, value, previous_parts):
|
|
205
|
+
if len(parts) == 0:
|
|
206
|
+
return current_struct
|
|
207
|
+
#Grab current part
|
|
208
|
+
part = parts[0]
|
|
209
|
+
#SPECIAL HANDLING CLAUSE
|
|
210
|
+
matching_handler = next((handler for handler in special_values.custom_handlers if (json_path.startswith(handler) or json_path == handler)), None)
|
|
211
|
+
if matching_handler is not None:
|
|
212
|
+
return special_values.custom_handlers[matching_handler].assign_value(json_path, resource_definition, entity_definition, current_struct, parts[-1], value)
|
|
213
|
+
#Ignore dollar sign ($) and drill farther down
|
|
214
|
+
if part == '$' or part == resource_definition['ResourceType'].strip():
|
|
215
|
+
#Ignore the dollar sign and the resourcetype
|
|
216
|
+
return build_structure_recurse(current_struct, json_path, resource_definition, entity_definition, parts, value, previous_parts, part)
|
|
217
|
+
|
|
218
|
+
# If parts length is one then this is the final key to access and pair
|
|
219
|
+
if len(parts) == 1:
|
|
220
|
+
#Check for numeic qualifier '[0]' and '[1]'
|
|
221
|
+
if '[' in part and ']' in part:
|
|
222
|
+
#Seperate the key from the qualifier
|
|
223
|
+
key_part = part[:part.index('[')]
|
|
224
|
+
qualifier = part[part.index('[')+1:part.index(']')]
|
|
225
|
+
qualifier_condition = qualifier.split('=')
|
|
226
|
+
|
|
227
|
+
#If there is no key part, aka '[0]', '[1]' etc, then it's a simple accessor
|
|
228
|
+
if key_part is None or key_part == '':
|
|
229
|
+
if not qualifier.isdigit():
|
|
230
|
+
raise TypeError(f"ERROR: Full jsonpath: {json_path} - current path - {'.'.join(previous_parts + parts[:1])} - qualifier - {qualifier} - standalone qualifier expected to be a single index numeric ([0], [1], etc)")
|
|
231
|
+
if current_struct == {}:
|
|
232
|
+
current_struct = []
|
|
233
|
+
if not isinstance(current_struct, list):
|
|
234
|
+
raise TypeError(f"ERROR: Full jsonpath: {json_path} - current path - {'.'.join(previous_parts + parts[:1])} - Expected a list, but got {type(current_struct).__name__} instead.")
|
|
235
|
+
part = int(qualifier)
|
|
236
|
+
if part + 1 > len(current_struct):
|
|
237
|
+
current_struct.extend({} for x in range (part + 1 - len(current_struct)))
|
|
238
|
+
#Actual assigning to the path
|
|
239
|
+
fhir_formatting.assign_value(current_struct, part, value, entity_definition['valueType'])
|
|
240
|
+
return current_struct
|
|
241
|
+
|
|
242
|
+
# If there is a simple qualifier with '['and ']'
|
|
243
|
+
elif '[' in part and ']' in part:
|
|
244
|
+
#Seperate the key from the qualifier
|
|
245
|
+
key_part = part[:part.index('[')]
|
|
246
|
+
qualifier = part[part.index('[')+1:part.index(']')]
|
|
247
|
+
qualifier_condition = qualifier.split('=')
|
|
248
|
+
|
|
249
|
+
#If there is no key part, aka '[0]', '[1]' etc, then it's a simple accessor
|
|
250
|
+
if key_part is None or key_part == '':
|
|
251
|
+
if not qualifier.isdigit():
|
|
252
|
+
raise TypeError(f"ERROR: Full jsonpath: {json_path} - current path - {'.'.join(previous_parts + parts[:1])} - qualifier - {qualifier} - standalone qualifier expected to be a single index numeric ([0], [1], etc)")
|
|
253
|
+
if current_struct == {}:
|
|
254
|
+
current_struct = []
|
|
255
|
+
if not isinstance(current_struct, list):
|
|
256
|
+
raise TypeError(f"ERROR: Full jsonpath: {json_path} - current path - {'.'.join(previous_parts + parts[:1])} - Expected a list, but got {type(current_struct).__name__} instead.")
|
|
257
|
+
qualifier_as_number = int(qualifier)
|
|
258
|
+
if qualifier_as_number + 1 > len(current_struct):
|
|
259
|
+
current_struct.extend({} for x in range (qualifier_as_number + 1 - len(current_struct)))
|
|
260
|
+
inner_struct = current_struct[qualifier_as_number]
|
|
261
|
+
inner_struct = build_structure_recurse(inner_struct, json_path, resource_definition, entity_definition, parts, value, previous_parts, part)
|
|
262
|
+
current_struct[qualifier_as_number] = inner_struct
|
|
263
|
+
return current_struct
|
|
264
|
+
# Create the key part in the structure
|
|
265
|
+
if (not key_part in current_struct) or (isinstance(current_struct[key_part], dict)):
|
|
266
|
+
current_struct[key_part] = []
|
|
267
|
+
#If there is a key_part and the If the qualifier condition is defined
|
|
268
|
+
if len(qualifier_condition) == 2:
|
|
269
|
+
#special handling for code
|
|
270
|
+
if key_part != "coding" and (qualifier_condition[0] in ('code', 'system')):
|
|
271
|
+
#Move into the coding section if a qualifier asks for 'code' or 'system'
|
|
272
|
+
if 'coding' not in current_struct:
|
|
273
|
+
current_struct['coding'] = []
|
|
274
|
+
current_struct = current_struct['coding']
|
|
275
|
+
qualifier_key, qualifier_value = qualifier_condition
|
|
276
|
+
# Retrieve an inner structure if it exists allready that matches the criteria
|
|
277
|
+
inner_struct = next((innerElement for innerElement in current_struct[key_part] if isinstance(innerElement, dict) and innerElement.get(qualifier_key) == qualifier_value), None)
|
|
278
|
+
#If no inner structure exists, create one instead
|
|
279
|
+
if inner_struct is None:
|
|
280
|
+
inner_struct = {qualifier_key: qualifier_value}
|
|
281
|
+
current_struct[key_part].append(inner_struct)
|
|
282
|
+
#Recurse into that innerstructure where the qualifier matched to continue the part traversal
|
|
283
|
+
inner_struct = build_structure_recurse(inner_struct, json_path, resource_definition, entity_definition, parts, value, previous_parts, part)
|
|
284
|
+
return current_struct
|
|
285
|
+
#If there's no qualifier condition, but an index aka '[0]', '[1]' etc, then it's a simple accessor
|
|
286
|
+
elif qualifier.isdigit():
|
|
287
|
+
if not isinstance(current_struct[key_part], list):
|
|
288
|
+
raise TypeError(f"ERROR: Full jsonpath: {json_path} - current path - {'.'.join(previous_parts + parts[0])} - Expected a list, but got {type(current_struct).__name__} instead.")
|
|
289
|
+
qualifier_as_number = int(qualifier)
|
|
290
|
+
if qualifier_as_number > len(current_struct):
|
|
291
|
+
current_struct[key_part].extend({} for x in range (qualifier_as_number - len(current_struct)))
|
|
292
|
+
inner_struct = current_struct[key_part][qualifier_as_number]
|
|
293
|
+
inner_struct = build_structure_recurse(inner_struct, json_path, resource_definition, entity_definition, parts, value, previous_parts, part)
|
|
294
|
+
current_struct[key_part][qualifier_as_number] = inner_struct
|
|
295
|
+
return current_struct
|
|
296
|
+
#None qualifier accessor
|
|
297
|
+
else:
|
|
298
|
+
if(part not in current_struct):
|
|
299
|
+
current_struct[part] = {}
|
|
300
|
+
inner_struct = build_structure_recurse(current_struct[part], json_path, resource_definition, entity_definition, parts, value, previous_parts, part)
|
|
301
|
+
current_struct[part] = inner_struct
|
|
302
|
+
return current_struct
|
|
303
|
+
|
|
304
|
+
#Helper function to quickly recurse and return the next level of structure. Used by main recursive function
|
|
305
|
+
def build_structure_recurse(current_struct, json_path, resource_definition, entity_definition, parts, value, previous_parts, part):
|
|
306
|
+
previous_parts.append(part)
|
|
307
|
+
return_struct = build_structure(current_struct, json_path, resource_definition, entity_definition, parts[1:], value, previous_parts)
|
|
308
|
+
return return_struct
|
|
@@ -0,0 +1,279 @@
|
|
|
1
|
+
import re
|
|
2
|
+
from datetime import datetime, time, timezone
|
|
3
|
+
|
|
4
|
+
#Dictionary of regexes
|
|
5
|
+
type_regexes = {
|
|
6
|
+
'code': '[^\s]+( [^\s]+)*',
|
|
7
|
+
'decimal': '-?(0|[1-9][0-9]{0,17})(\.[0-9]{1,17})?([eE][+-]?[0-9]{1,9}})?',
|
|
8
|
+
'id': '[A-Za-z0-9\-\.]{1,64}',
|
|
9
|
+
'integer': '[0]|[-+]?[1-9][0-9]*',
|
|
10
|
+
'oid': 'urn:oid:[0-2](\.(0|[1-9][0-9]*))+',
|
|
11
|
+
'positiveInt': '[1-9][0-9]*',
|
|
12
|
+
'unsignedInt':'[0]|([1-9][0-9]*)',
|
|
13
|
+
'uuid':'urn:uuid:[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}'
|
|
14
|
+
}
|
|
15
|
+
# Assign final_struct[key] to value; with formatting given the valueType
|
|
16
|
+
def assign_value(final_struct, key, value, valueType):
|
|
17
|
+
# Removing white space
|
|
18
|
+
if isinstance(value, str):
|
|
19
|
+
value = value.strip()
|
|
20
|
+
# Checking for null or empty string values. If so; we do not construct the value
|
|
21
|
+
if not value:
|
|
22
|
+
return final_struct
|
|
23
|
+
# If the valueType is not provide, do not construct the value.
|
|
24
|
+
if valueType is None:
|
|
25
|
+
return final_struct
|
|
26
|
+
# Swtich case for valueType to construct a value
|
|
27
|
+
try:
|
|
28
|
+
if valueType.lower() == 'address':
|
|
29
|
+
address_value = parse_flexible_address(value)
|
|
30
|
+
if address_value:
|
|
31
|
+
final_struct[key] = address_value
|
|
32
|
+
elif valueType.lower() == 'base64binary':
|
|
33
|
+
final_struct[key] = value
|
|
34
|
+
elif valueType.lower() == 'boolean':
|
|
35
|
+
final_struct[key] = bool(value)
|
|
36
|
+
elif valueType.lower() == 'codeableconcept':
|
|
37
|
+
final_struct[key] = caret_delimited_string_to_codeableconcept(value)
|
|
38
|
+
elif valueType.lower() == 'code':
|
|
39
|
+
match = re.search(type_regexes['code'], value)
|
|
40
|
+
final_struct[key] = match.group(0) if match else ''
|
|
41
|
+
elif valueType.lower() == 'coding':
|
|
42
|
+
final_struct[key] = caret_delimited_string_to_coding(value)
|
|
43
|
+
elif valueType.lower() == 'date':
|
|
44
|
+
if isinstance(value, datetime):
|
|
45
|
+
final_struct[key] = value.date()
|
|
46
|
+
elif isinstance(value, str):
|
|
47
|
+
final_struct[key] = parse_iso8601_date(value).replace(tzinfo=timezone.utc)
|
|
48
|
+
elif valueType.lower() == 'datetime':
|
|
49
|
+
if isinstance(value, datetime):
|
|
50
|
+
final_struct[key] = value.replace(tzinfo=timezone.utc)
|
|
51
|
+
else:
|
|
52
|
+
final_struct[key] = parse_iso8601_datetime(value).replace(tzinfo=timezone.utc)
|
|
53
|
+
elif valueType.lower() == 'decimal':
|
|
54
|
+
final_struct[key] = value
|
|
55
|
+
elif valueType.lower() == 'id':
|
|
56
|
+
match = re.search(value, type_regexes['id'])
|
|
57
|
+
final_struct[key] = match.group(0) if match else ''
|
|
58
|
+
elif valueType.lower() == 'instant':
|
|
59
|
+
if isinstance(value, datetime):
|
|
60
|
+
final_struct[key] = value.replace(tzinfo=timezone.utc)
|
|
61
|
+
else:
|
|
62
|
+
final_struct[key] = final_struct[key] = parse_iso8601_instant(value).replace(tzinfo=timezone.utc)
|
|
63
|
+
elif valueType.lower() == 'integer':
|
|
64
|
+
match = re.search(value, type_regexes['integer'])
|
|
65
|
+
final_struct[key] = int(match.group(0)) if match else 0
|
|
66
|
+
elif valueType.lower() == 'oid':
|
|
67
|
+
match = re.search(value, type_regexes['oid'])
|
|
68
|
+
final_struct[key] = match.group(0) if match else ''
|
|
69
|
+
elif valueType.lower() == 'positiveInt':
|
|
70
|
+
match = re.search(value, type_regexes['positiveInt'])
|
|
71
|
+
final_struct[key] = int(match.group(0)) if match else 0
|
|
72
|
+
elif valueType.lower() == 'quantity':
|
|
73
|
+
final_struct[key] = string_to_quantity(value)
|
|
74
|
+
elif valueType.lower() == 'string':
|
|
75
|
+
final_struct[key] = value
|
|
76
|
+
elif valueType.lower() == 'string[]':
|
|
77
|
+
if not key in final_struct:
|
|
78
|
+
final_struct[key] = [value]
|
|
79
|
+
else:
|
|
80
|
+
final_struct[key].append(value)
|
|
81
|
+
elif valueType.lower() == 'time':
|
|
82
|
+
if isinstance(time):
|
|
83
|
+
final_struct[key] = value
|
|
84
|
+
else:
|
|
85
|
+
final_struct[key] = parse_iso8601_time(value)
|
|
86
|
+
elif valueType.lower() == 'unsignedInt':
|
|
87
|
+
match = re.search(value, type_regexes['unsignedInt'])
|
|
88
|
+
final_struct[key] = int(match.group(0)) if match else 0
|
|
89
|
+
elif valueType.lower() == 'uri':
|
|
90
|
+
final_struct[key] = value
|
|
91
|
+
elif valueType.lower() == 'url':
|
|
92
|
+
final_struct[key] = value
|
|
93
|
+
elif valueType.lower() == 'uuid':
|
|
94
|
+
match = re.search(value, type_regexes['uuid'])
|
|
95
|
+
final_struct[key] = match.group(0) if match else ''
|
|
96
|
+
elif valueType.lower() == 'coding':
|
|
97
|
+
if not isinstance(final_struct, list):
|
|
98
|
+
final_struct = []
|
|
99
|
+
final_struct.append(value)
|
|
100
|
+
else:
|
|
101
|
+
print(f"ERROR: - Rending Value - {key} - {value} - {valueType} - Saw a valueType of '{valueType}' unsupported in current formatting")
|
|
102
|
+
except ValueError as e:
|
|
103
|
+
print(e)
|
|
104
|
+
return final_struct
|
|
105
|
+
|
|
106
|
+
def parse_iso8601_date(input_string):
|
|
107
|
+
# Regular expression to match ISO 8601 format with optional timezone 'Z'
|
|
108
|
+
pattern = r'(\d{4}-\d{2}-\d{2})'
|
|
109
|
+
match = re.search(pattern, input_string)
|
|
110
|
+
# Check if the input string matches the pattern
|
|
111
|
+
if match:
|
|
112
|
+
return datetime.strptime(match.group(1), '%Y-%m-%d')
|
|
113
|
+
else:
|
|
114
|
+
raise ValueError(f"Input string '{input_string}' is not in the valid ISO 8601 date format")
|
|
115
|
+
|
|
116
|
+
def parse_iso8601_datetime(input_string):
|
|
117
|
+
# Regular expression to match ISO 8601 format with optional timezone 'Z'
|
|
118
|
+
pattern = r'(\d{4}-\d{2}-\d{2}(T\d{2}:\d{2}:\d{2}(Z)?)?)'
|
|
119
|
+
match = re.search(pattern, input_string)
|
|
120
|
+
# Check if the input string matches the pattern
|
|
121
|
+
if match:
|
|
122
|
+
# Convert to datetime object
|
|
123
|
+
if input_string.endswith('Z'):
|
|
124
|
+
# If it has 'Z', convert to UTC
|
|
125
|
+
try:
|
|
126
|
+
return datetime.strptime(match.group(1), '%Y-%m-%dT%H:%M:%S').replace(tzinfo=timezone.utc)
|
|
127
|
+
except ValueError: # If it fails (because the time part is missing), parse the date-only format and set time to midnight
|
|
128
|
+
try:
|
|
129
|
+
parsed_date = datetime.strptime(match.group(1), '%Y-%m-%d')
|
|
130
|
+
parsed_datetime = parsed_date.replace(hour=0, minute=0, second=0)
|
|
131
|
+
return parsed_datetime
|
|
132
|
+
except ValueError: # Neither format worked so catch an entire error
|
|
133
|
+
raise ValueError(f"Input string '{input_string}' is not in the valid ISO 8601 format date or datetime format")
|
|
134
|
+
else:
|
|
135
|
+
# Otherwise, just convert without timezone
|
|
136
|
+
try:
|
|
137
|
+
return datetime.strptime(match.group(1), '%Y-%m-%dT%H:%M:%S').replace(tzinfo=timezone.utc)
|
|
138
|
+
except ValueError: # If it fails (because the time part is missing), parse the date-only format and set time to midnight
|
|
139
|
+
try:
|
|
140
|
+
parsed_date = datetime.strptime(match.group(1), '%Y-%m-%d')
|
|
141
|
+
parsed_datetime = parsed_date.replace(hour=0, minute=0, second=0)
|
|
142
|
+
return parsed_datetime
|
|
143
|
+
except ValueError: # Neither format worked so catch an entire error
|
|
144
|
+
raise ValueError(f"Input string '{input_string}' is not in the valid ISO 8601 format date or datetime format")
|
|
145
|
+
else:
|
|
146
|
+
raise ValueError(f"Input string '{input_string}' is not in the valid ISO 8601 format date or datetime format")
|
|
147
|
+
|
|
148
|
+
def parse_iso8601_instant(input_string):
|
|
149
|
+
# Regular expression to match ISO 8601 instant format with optional milliseconds and 'Z'
|
|
150
|
+
pattern = r'(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d{1,3})?(Z)?)'
|
|
151
|
+
match = re.search(pattern, input_string)
|
|
152
|
+
# Check if the input string matches the pattern
|
|
153
|
+
if match:
|
|
154
|
+
# If it ends with 'Z', it's UTC
|
|
155
|
+
if input_string.endswith('Z'):
|
|
156
|
+
if '.' in input_string:
|
|
157
|
+
# With milliseconds
|
|
158
|
+
return datetime.strptime(match.group(1), '%Y-%m-%dT%H:%M:%S.%f').replace(tzinfo=timezone.utc)
|
|
159
|
+
else:
|
|
160
|
+
# Without milliseconds
|
|
161
|
+
return datetime.strptime(match.group(1), '%Y-%m-%dT%H:%M:%S').replace(tzinfo=timezone.utc)
|
|
162
|
+
else:
|
|
163
|
+
if '.' in input_string:
|
|
164
|
+
# With milliseconds
|
|
165
|
+
return datetime.strptime(match.group(1), '%Y-%m-%dT%H:%M:%S.%f')
|
|
166
|
+
else:
|
|
167
|
+
# Without milliseconds
|
|
168
|
+
return datetime.strptime(match.group(1), '%Y-%m-%dT%H:%M:%S')
|
|
169
|
+
else:
|
|
170
|
+
raise ValueError(f"Input string '{input_string}' is not in the valid ISO 8601 instant format")
|
|
171
|
+
|
|
172
|
+
def parse_iso8601_time(input_string):
|
|
173
|
+
# Regular expression to match the time format HH:MM:SS or HH:MM:SS.ssssss
|
|
174
|
+
pattern = r'((?:[01][0-9]|2[0-3]):[0-5][0-9]:([0-5][0-9]|60)(\.[0-9]{1,9})?)'
|
|
175
|
+
match = re.search(pattern, input_string)
|
|
176
|
+
# Check if the input string matches the pattern
|
|
177
|
+
if match:
|
|
178
|
+
# Parse the time
|
|
179
|
+
time_parts = match.group(1).split(':')
|
|
180
|
+
hours = int(time_parts[0])
|
|
181
|
+
minutes = int(time_parts[1])
|
|
182
|
+
seconds = float(time_parts[2]) # This can handle the fractional part
|
|
183
|
+
|
|
184
|
+
return datetime.time(hour=hours, minute=minutes, second=int(seconds), microsecond=int((seconds % 1) * 1_000_000))
|
|
185
|
+
else:
|
|
186
|
+
raise ValueError(f"Input string '{input_string}' is not in the valid time format")
|
|
187
|
+
|
|
188
|
+
def parse_flexible_address(address):
|
|
189
|
+
# Attempt to capture postal code, which is often at the end and typically numeric (though it may vary internationally)
|
|
190
|
+
postal_code_pattern = r'(?P<postalCode>\d{5}(?:-\d{4})?|)'
|
|
191
|
+
|
|
192
|
+
# State is typically a two-letter code (though this may vary internationally as well)
|
|
193
|
+
state_pattern = r'(?P<state>[A-Za-z]{2}|)'
|
|
194
|
+
|
|
195
|
+
# This captures a country after a comma (or space-separated) if it's present
|
|
196
|
+
country_pattern = r'(?:\s*(?P<country>[\w\s]+|))?$'
|
|
197
|
+
|
|
198
|
+
# Compile the full pattern to match the postal code, state, and country
|
|
199
|
+
full_pattern = rf'^(?P<line>.*?)\^(?P<city>.*?)\^(?P<district>.*?)\^{state_pattern}\^{postal_code_pattern}\^{country_pattern}'
|
|
200
|
+
|
|
201
|
+
match = re.search(full_pattern, address)
|
|
202
|
+
|
|
203
|
+
if match:
|
|
204
|
+
# Extract the components found in the regex
|
|
205
|
+
result = {k: v for k, v in match.groupdict().items() if v not in ("", None)}
|
|
206
|
+
if not result:
|
|
207
|
+
return None
|
|
208
|
+
#Assign the line as an array of 1
|
|
209
|
+
if result['line'] and isinstance(result['line'], str):
|
|
210
|
+
result['line'] = [result['line']]
|
|
211
|
+
return result
|
|
212
|
+
else:
|
|
213
|
+
return None # Return None if the format doesn't match
|
|
214
|
+
|
|
215
|
+
def caret_delimited_string_to_codeableconcept(caret_delimited_str):
|
|
216
|
+
# Split the string by '~' to separate multiple codings
|
|
217
|
+
codings = caret_delimited_str.split('~')
|
|
218
|
+
|
|
219
|
+
# Initialize the CodeableConcept dictionary
|
|
220
|
+
codeable_concept = {"coding": []}
|
|
221
|
+
|
|
222
|
+
# Loop over each coding section
|
|
223
|
+
for coding_str in codings:
|
|
224
|
+
# Split each part by '^' to get system, code, and display (optionally text at the end)
|
|
225
|
+
parts = coding_str.split('^')
|
|
226
|
+
|
|
227
|
+
# Create a coding dictionary from the components
|
|
228
|
+
coding_dict = {}
|
|
229
|
+
if len(parts) > 0:
|
|
230
|
+
coding_dict['system'] = parts[0] if parts[0] else ''
|
|
231
|
+
if len(parts) > 1:
|
|
232
|
+
coding_dict['code'] = parts[1] if parts[1] else ''
|
|
233
|
+
if len(parts) > 2:
|
|
234
|
+
coding_dict['display'] = parts[2] if parts[2] else ''
|
|
235
|
+
|
|
236
|
+
# Add coding to the 'coding' list in CodeableConcept
|
|
237
|
+
codeable_concept['coding'].append(coding_dict)
|
|
238
|
+
|
|
239
|
+
# Check if the last element contains 'text' (for the entire CodeableConcept)
|
|
240
|
+
if len(parts) == 4:
|
|
241
|
+
codeable_concept['text'] = parts[3]
|
|
242
|
+
return codeable_concept
|
|
243
|
+
|
|
244
|
+
def caret_delimited_string_to_coding(caret_delimited_str):
|
|
245
|
+
# Split the string by '~' to separate multiple codings
|
|
246
|
+
|
|
247
|
+
# Initialize the CodeableConcept dictionary
|
|
248
|
+
coding = {}
|
|
249
|
+
|
|
250
|
+
parts = caret_delimited_str.split('^')
|
|
251
|
+
|
|
252
|
+
# Create a coding dictionary from the components
|
|
253
|
+
if len(parts) > 0:
|
|
254
|
+
coding['system'] = parts[0] if parts[0] else ''
|
|
255
|
+
if len(parts) > 1:
|
|
256
|
+
coding['code'] = parts[1] if parts[1] else ''
|
|
257
|
+
if len(parts) > 2:
|
|
258
|
+
coding['display'] = parts[2] if parts[2] else ''
|
|
259
|
+
return coding
|
|
260
|
+
|
|
261
|
+
def string_to_quantity(quantity_str):
|
|
262
|
+
# Split the string into value and unit by whitespace
|
|
263
|
+
parts = quantity_str.split('^',maxsplit=1)
|
|
264
|
+
|
|
265
|
+
# Initialize the Quantity dictionary
|
|
266
|
+
quantity = {}
|
|
267
|
+
|
|
268
|
+
# First part is the value (convert to float)
|
|
269
|
+
if len(parts) > 0:
|
|
270
|
+
quantity['value'] = float(parts[0])
|
|
271
|
+
|
|
272
|
+
# Second part is the unit (if present)
|
|
273
|
+
if len(parts) > 1:
|
|
274
|
+
quantity['unit'] = parts[1]
|
|
275
|
+
quantity['system'] = 'http://unitsofmeasure.org'
|
|
276
|
+
quantity['code'] = parts[1]
|
|
277
|
+
|
|
278
|
+
|
|
279
|
+
return quantity
|
|
@@ -0,0 +1,99 @@
|
|
|
1
|
+
import openpyxl
|
|
2
|
+
|
|
3
|
+
# Function to read the xlsx file and access specific sheets
|
|
4
|
+
def read_xlsx_and_process(file_path):
|
|
5
|
+
# Load the workbook
|
|
6
|
+
workbook = openpyxl.load_workbook(file_path)
|
|
7
|
+
|
|
8
|
+
# Example of accessing specific sheets
|
|
9
|
+
if 'ResourceDefinitions' in workbook.sheetnames:
|
|
10
|
+
sheet = workbook['ResourceDefinitions']
|
|
11
|
+
resource_definition_entities = process_sheet_resource_definitions(sheet)
|
|
12
|
+
|
|
13
|
+
if 'ResourceLinks' in workbook.sheetnames:
|
|
14
|
+
sheet = workbook['ResourceLinks']
|
|
15
|
+
resource_link_entities = process_sheet_resource_links(sheet)
|
|
16
|
+
|
|
17
|
+
if 'PatientData' in workbook.sheetnames:
|
|
18
|
+
sheet = workbook['PatientData']
|
|
19
|
+
patient_data_entities, num_entries = process_sheet_patient_data(sheet, resource_definition_entities)
|
|
20
|
+
|
|
21
|
+
return {
|
|
22
|
+
"resource_definition_entities": resource_definition_entities,
|
|
23
|
+
"resource_link_entities": resource_link_entities,
|
|
24
|
+
"patient_data_entities": patient_data_entities,
|
|
25
|
+
"num_entries": num_entries
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
# Function to process the specific sheet with 'Entity Name', 'ResourceType', and 'Profile(s)'
|
|
30
|
+
def process_sheet_resource_definitions(sheet):
|
|
31
|
+
resource_definitions = []
|
|
32
|
+
headers = [cell.value for cell in next(sheet.iter_rows(min_row=1, max_row=1))] # Get headers
|
|
33
|
+
|
|
34
|
+
for row in sheet.iter_rows(min_row=3, values_only=True):
|
|
35
|
+
row_data = dict(zip(headers, row)) # Create a dictionary for each row
|
|
36
|
+
if all(cell is None or cell == "" for cell in row_data.values()):
|
|
37
|
+
continue
|
|
38
|
+
# Split 'Profile(s)' column into a list of URLs
|
|
39
|
+
if row_data.get("Profile(s)"):
|
|
40
|
+
row_data["Profile(s)"] = [url.strip() for url in row_data["Profile(s)"].split(",")]
|
|
41
|
+
|
|
42
|
+
resource_definitions.append(row_data)
|
|
43
|
+
|
|
44
|
+
return resource_definitions
|
|
45
|
+
|
|
46
|
+
# Function to process the specific sheet with 'OriginResource', 'ReferencePath', and 'DestinationResource'
|
|
47
|
+
def process_sheet_resource_links(sheet):
|
|
48
|
+
resource_links = []
|
|
49
|
+
headers = [cell.value for cell in next(sheet.iter_rows(min_row=1, max_row=1))] # Get headers
|
|
50
|
+
for row in sheet.iter_rows(min_row=3, values_only=True):
|
|
51
|
+
row_data = dict(zip(headers, row)) # Create a dictionary for each row
|
|
52
|
+
if all(cell is None or cell == "" for cell in row_data):
|
|
53
|
+
continue
|
|
54
|
+
resource_links.append(row_data)
|
|
55
|
+
|
|
56
|
+
return resource_links
|
|
57
|
+
|
|
58
|
+
# Function to process the "PatientData" sheet
|
|
59
|
+
def process_sheet_patient_data(sheet, resource_definition_entities):
|
|
60
|
+
# Initialize the dictionary to store the processed data
|
|
61
|
+
patient_data = {}
|
|
62
|
+
# Extract the data from the first 6 rows (Entity To Query, JsonPath, etc.)
|
|
63
|
+
for col in sheet.iter_cols(min_row=1, max_row=6, min_col=3, values_only=True): # Start from 3rd column
|
|
64
|
+
if all(entry is None for entry in col):
|
|
65
|
+
continue
|
|
66
|
+
entity_name = col[0] # The entity name comes from the first row (Entity To Query)
|
|
67
|
+
field_name = col[5] #The "Data Element" comes from the fifth row
|
|
68
|
+
if (entity_name is None or entity_name == "") and (field_name is not None and field_name != ""):
|
|
69
|
+
print(f"WARNING: - Reading Patient Data Issue - {field_name} - 'Entity To Query' cell missing for column labelled '{field_name}', please provide entity name from the ResourceDefinitions tab.")
|
|
70
|
+
|
|
71
|
+
if entity_name not in [entry['Entity Name'] for entry in resource_definition_entities]:
|
|
72
|
+
print(f"WARNING: - Reading Patient Data Issue - {field_name} - 'Entity To Query' cell has entity named '{entity_name}', however, the ResourceDefinition tab has no matching resource. Please provide a corresponding entry in the ResourceDefinition tab.")
|
|
73
|
+
# Create structure for this entity if not already present
|
|
74
|
+
if entity_name not in patient_data:
|
|
75
|
+
patient_data[entity_name] = {}
|
|
76
|
+
|
|
77
|
+
# Add jsonpath, valuesets, and initialize an empty list for 'values'
|
|
78
|
+
if field_name not in patient_data[entity_name]:
|
|
79
|
+
patient_data[entity_name][field_name] = {
|
|
80
|
+
"jsonpath": col[1], # JsonPath from the second row
|
|
81
|
+
"valueType": col[2], # Value Type from the third row
|
|
82
|
+
"valuesets": col[3], # Value Set from the fourth row
|
|
83
|
+
"values": [] # Initialize empty list for actual values
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
# Now process the rows starting from the 6th row (the actual data entries)
|
|
87
|
+
num_entries = 0
|
|
88
|
+
for row in sheet.iter_rows(min_row=7, values_only=True): # Start from row 6 for actual data
|
|
89
|
+
if all(cell is None for cell in row):
|
|
90
|
+
continue
|
|
91
|
+
num_entries = num_entries + 1
|
|
92
|
+
entity_name = row[0] # The entity name comes from the first column of each row
|
|
93
|
+
for i, value in enumerate(row[2:], start=1): # Iterate through the values in the columns
|
|
94
|
+
entity_name = sheet.cell(row=1, column=i + 2).value
|
|
95
|
+
field_name = sheet.cell(row=6, column=i + 2).value # Get the Data Element for this column
|
|
96
|
+
if entity_name in patient_data and field_name in patient_data[entity_name]:
|
|
97
|
+
# Append the actual data values to the 'values' array
|
|
98
|
+
patient_data[entity_name][field_name]["values"].append(value)
|
|
99
|
+
return patient_data, num_entries
|
|
@@ -0,0 +1,360 @@
|
|
|
1
|
+
|
|
2
|
+
from . import conversion
|
|
3
|
+
|
|
4
|
+
from abc import ABC, abstractmethod
|
|
5
|
+
|
|
6
|
+
# Define an abstract base class
|
|
7
|
+
class AbstractCustomValueHandler(ABC):
|
|
8
|
+
|
|
9
|
+
@abstractmethod
|
|
10
|
+
def assign_value(self, json_path, resource_definition, entity_definition, final_struct, key, value):
|
|
11
|
+
pass
|
|
12
|
+
|
|
13
|
+
class PatientRaceExtensionValueHandler(AbstractCustomValueHandler):
|
|
14
|
+
omb_categories = {
|
|
15
|
+
"american indian or alaska native" : {
|
|
16
|
+
"url" : "ombCategory",
|
|
17
|
+
"valueCoding" : {
|
|
18
|
+
"system" : "urn:oid:2.16.840.1.113883.6.238",
|
|
19
|
+
"code" : "1002-5",
|
|
20
|
+
"display" : "American Indian or Alaska Native"
|
|
21
|
+
}
|
|
22
|
+
},
|
|
23
|
+
"asian" : {
|
|
24
|
+
"url" : "ombCategory",
|
|
25
|
+
"valueCoding" : {
|
|
26
|
+
"system" : "urn:oid:2.16.840.1.113883.6.238",
|
|
27
|
+
"code" : "2028-9",
|
|
28
|
+
"display" : "Asian"
|
|
29
|
+
}
|
|
30
|
+
},
|
|
31
|
+
"black or african american" : {
|
|
32
|
+
"url" : "ombCategory",
|
|
33
|
+
"valueCoding" : {
|
|
34
|
+
"system" : "urn:oid:2.16.840.1.113883.6.238",
|
|
35
|
+
"code" : "2054-5",
|
|
36
|
+
"display" : "Black or African American"
|
|
37
|
+
}
|
|
38
|
+
},
|
|
39
|
+
"native hawaiian or other pacific islander" : {
|
|
40
|
+
"url" : "ombCategory",
|
|
41
|
+
"valueCoding" : {
|
|
42
|
+
"system" : "urn:oid:2.16.840.1.113883.6.238",
|
|
43
|
+
"code" : "2054-5",
|
|
44
|
+
"display" : "Native Hawaiian or Other Pacific Islander"
|
|
45
|
+
}
|
|
46
|
+
},
|
|
47
|
+
"white" : {
|
|
48
|
+
"url" : "ombCategory",
|
|
49
|
+
"valueCoding" : {
|
|
50
|
+
"system" : "urn:oid:2.16.840.1.113883.6.238",
|
|
51
|
+
"code" : "2106-3",
|
|
52
|
+
"display" : "White"
|
|
53
|
+
}
|
|
54
|
+
}
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
initial_race_json = {
|
|
58
|
+
"extension" : [
|
|
59
|
+
{
|
|
60
|
+
"$ombCategory"
|
|
61
|
+
},
|
|
62
|
+
{
|
|
63
|
+
"url" : "text",
|
|
64
|
+
"valueString" : "$text"
|
|
65
|
+
}
|
|
66
|
+
],
|
|
67
|
+
"url" : "http://hl7.org/fhir/us/core/StructureDefinition/us-core-race"
|
|
68
|
+
}
|
|
69
|
+
#Create an ombcategory and detailed section of race extension
|
|
70
|
+
def assign_value(self, json_path, resource_definition, entity_definition, final_struct, key, value):
|
|
71
|
+
#Retrieve the race extension if it exists; make it if it does not.
|
|
72
|
+
if 'extension' not in final_struct:
|
|
73
|
+
final_struct['extension'] = []
|
|
74
|
+
race_block = utilFindExtensionWithURL(final_struct['extension'], 'http://hl7.org/fhir/us/core/StructureDefinition/us-core-race')
|
|
75
|
+
if race_block is None:
|
|
76
|
+
race_block = self.initial_race_json
|
|
77
|
+
final_struct['extension'].append(race_block)
|
|
78
|
+
for race_key, race_structure in self.omb_categories.items():
|
|
79
|
+
if value.strip().lower() == race_key:
|
|
80
|
+
# Replace $ombCategory in the extension list
|
|
81
|
+
for i, item in enumerate(race_block["extension"]):
|
|
82
|
+
if isinstance(item, set) and "$ombCategory" in item:
|
|
83
|
+
# Replace the set with the new structure
|
|
84
|
+
race_block["extension"][i] = race_structure
|
|
85
|
+
elif isinstance(item, dict) and item.get("valueString") == "$text":
|
|
86
|
+
item['valueString'] = race_key
|
|
87
|
+
return final_struct
|
|
88
|
+
pass
|
|
89
|
+
|
|
90
|
+
class PatientEthnicityExtensionValueHandler(AbstractCustomValueHandler):
|
|
91
|
+
omb_categories = {
|
|
92
|
+
"Hispanic or Latino" : {
|
|
93
|
+
"url" : "ombCategory",
|
|
94
|
+
"valueCoding" : {
|
|
95
|
+
"system" : "urn:oid:2.16.840.1.113883.6.238",
|
|
96
|
+
"code" : "2135-2",
|
|
97
|
+
"display" : "Hispanic or Latino"
|
|
98
|
+
}
|
|
99
|
+
},
|
|
100
|
+
"Non Hispanic or Latino" : {
|
|
101
|
+
"url" : "ombCategory",
|
|
102
|
+
"valueCoding" : {
|
|
103
|
+
"system" : "urn:oid:2.16.840.1.113883.6.238",
|
|
104
|
+
"code" : "2186-5",
|
|
105
|
+
"display" : "Not Hispanic or Latino"
|
|
106
|
+
}
|
|
107
|
+
},
|
|
108
|
+
"Not Hispanic or Latino" : {
|
|
109
|
+
"url" : "ombCategory",
|
|
110
|
+
"valueCoding" : {
|
|
111
|
+
"system" : "urn:oid:2.16.840.1.113883.6.238",
|
|
112
|
+
"code" : "2186-5",
|
|
113
|
+
"display" : "Not Hispanic or Latino"
|
|
114
|
+
}
|
|
115
|
+
}
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
initial_ethnicity_json = {
|
|
119
|
+
"extension" : [
|
|
120
|
+
{
|
|
121
|
+
"$ombCategory"
|
|
122
|
+
},
|
|
123
|
+
{
|
|
124
|
+
"url" : "text",
|
|
125
|
+
"valueString" : "$text"
|
|
126
|
+
}
|
|
127
|
+
],
|
|
128
|
+
"url" : "http://hl7.org/fhir/us/core/StructureDefinition/us-core-ethnicity"
|
|
129
|
+
}
|
|
130
|
+
#Create an ombcategory and detailed section of ethnicitiy extension
|
|
131
|
+
def assign_value(self, json_path, resource_definition, entity_definition, final_struct, key, value):
|
|
132
|
+
#Retrieve the ethncitiy extension if it exists; make it if it does not.
|
|
133
|
+
if 'extension' not in final_struct:
|
|
134
|
+
final_struct['extension'] = []
|
|
135
|
+
ethnicity_block = utilFindExtensionWithURL(final_struct['extension'], 'http://hl7.org/fhir/us/core/StructureDefinition/us-core-ethnicity')
|
|
136
|
+
if ethnicity_block is None:
|
|
137
|
+
ethnicity_block = self.initial_ethnicity_json
|
|
138
|
+
final_struct['extension'].append(ethnicity_block)
|
|
139
|
+
for race_key, race_structure in self.omb_categories.items():
|
|
140
|
+
if value.strip().lower() == race_key.strip().lower():
|
|
141
|
+
# Replace $ombCategory in the extension list
|
|
142
|
+
for i, item in enumerate(ethnicity_block["extension"]):
|
|
143
|
+
if isinstance(item, set) and "$ombCategory" in item:
|
|
144
|
+
# Replace the set with the new structure
|
|
145
|
+
ethnicity_block["extension"][i] = race_structure
|
|
146
|
+
elif isinstance(item, dict) and item.get("valueString") == "$text":
|
|
147
|
+
item['valueString'] = race_key
|
|
148
|
+
return final_struct
|
|
149
|
+
pass
|
|
150
|
+
|
|
151
|
+
class PatientBirthSexExtensionValueHandler(AbstractCustomValueHandler):
|
|
152
|
+
birth_sex_block = {
|
|
153
|
+
"url" : "http://hl7.org/fhir/us/core/StructureDefinition/us-core-birthsex",
|
|
154
|
+
"valueCode" : "$value"
|
|
155
|
+
}
|
|
156
|
+
#Assigna birthsex extension
|
|
157
|
+
def assign_value(self, json_path, resource_definition, entity_definition, final_struct, key, value):
|
|
158
|
+
#Retrieve the birthsex extension if it exists; make it if it does not.
|
|
159
|
+
if 'extension' not in final_struct:
|
|
160
|
+
final_struct['extension'] = []
|
|
161
|
+
birthsex_block = utilFindExtensionWithURL(final_struct['extension'], 'http://hl7.org/fhir/us/core/StructureDefinition/us-core-ethnicity')
|
|
162
|
+
if birthsex_block is None:
|
|
163
|
+
birthsex_block = self.birth_sex_block
|
|
164
|
+
birthsex_block['valueCode'] = value
|
|
165
|
+
final_struct['extension'].append(birthsex_block)
|
|
166
|
+
pass
|
|
167
|
+
|
|
168
|
+
class PatientMRNIdentifierValueHandler(AbstractCustomValueHandler):
|
|
169
|
+
patient_mrn_block = {
|
|
170
|
+
"use" : "usual",
|
|
171
|
+
"type" : {
|
|
172
|
+
"coding" : [
|
|
173
|
+
{
|
|
174
|
+
"system" : "http://terminology.hl7.org/CodeSystem/v2-0203",
|
|
175
|
+
"code" : "MR",
|
|
176
|
+
"display" : "Medical Record Number"
|
|
177
|
+
}
|
|
178
|
+
],
|
|
179
|
+
"text" : "Medical Record Number"
|
|
180
|
+
},
|
|
181
|
+
"system" : "$system",
|
|
182
|
+
"value" : "$value"
|
|
183
|
+
}
|
|
184
|
+
#Assign a MRN identifier
|
|
185
|
+
def assign_value(self, json_path, resource_definition, entity_definition, final_struct, key, value):
|
|
186
|
+
#Retrieve the MRN identifier if it exists; make it if it does not.
|
|
187
|
+
target_identifier = self.patient_mrn_block
|
|
188
|
+
new_identifier = True
|
|
189
|
+
if 'identifier' not in final_struct:
|
|
190
|
+
final_struct['identifier'] = []
|
|
191
|
+
for identifier in final_struct['identifier']:
|
|
192
|
+
if 'type' in identifier:
|
|
193
|
+
for coding in identifier['type']['coding']:
|
|
194
|
+
if coding['code'] == 'MR':
|
|
195
|
+
target_identifier = identifier
|
|
196
|
+
new_identifier = False
|
|
197
|
+
if new_identifier:
|
|
198
|
+
final_struct['identifier'].append(target_identifier)
|
|
199
|
+
target_identifier[key] = value
|
|
200
|
+
pass
|
|
201
|
+
|
|
202
|
+
class PatientSSNIdentifierValueHandler(AbstractCustomValueHandler):
|
|
203
|
+
patient_mrn_block = {
|
|
204
|
+
"use" : "usual",
|
|
205
|
+
"type" : {
|
|
206
|
+
"coding" : [
|
|
207
|
+
{
|
|
208
|
+
"system" : "http://terminology.hl7.org/CodeSystem/v2-0203",
|
|
209
|
+
"code" : "SS"
|
|
210
|
+
}
|
|
211
|
+
],
|
|
212
|
+
"text" : "Social Security Number"
|
|
213
|
+
},
|
|
214
|
+
"system" : "$system",
|
|
215
|
+
"value" : "$value"
|
|
216
|
+
}
|
|
217
|
+
#Assign a MRN identifier
|
|
218
|
+
def assign_value(self, json_path, resource_definition, entity_definition, final_struct, key, value):
|
|
219
|
+
#Retrieve the MRN identifier if it exists; make it if it does not.
|
|
220
|
+
target_identifier = self.patient_mrn_block
|
|
221
|
+
new_identifier = True
|
|
222
|
+
if 'identifier' not in final_struct:
|
|
223
|
+
final_struct['identifier'] = []
|
|
224
|
+
for identifier in final_struct['identifier']:
|
|
225
|
+
if 'type' in identifier:
|
|
226
|
+
for coding in identifier['type']['coding']:
|
|
227
|
+
if coding['code'] == 'SS':
|
|
228
|
+
target_identifier = identifier
|
|
229
|
+
new_identifier = False
|
|
230
|
+
if new_identifier:
|
|
231
|
+
final_struct['identifier'].append(target_identifier)
|
|
232
|
+
target_identifier[key] = value
|
|
233
|
+
pass
|
|
234
|
+
|
|
235
|
+
class OrganizationIdentiferNPIValueHandler(AbstractCustomValueHandler):
|
|
236
|
+
npi_identifier_block = {
|
|
237
|
+
"system" : "http://hl7.org.fhir/sid/us-npi",
|
|
238
|
+
"value" : "$value"
|
|
239
|
+
}
|
|
240
|
+
#Assigna birthsex extension
|
|
241
|
+
def assign_value(self, json_path, resource_definition, entity_definition, final_struct, key, value):
|
|
242
|
+
#Retrieve the birthsex extension if it exists; make it if it does not.
|
|
243
|
+
if 'identifier' not in final_struct:
|
|
244
|
+
final_struct['identifier'] = []
|
|
245
|
+
identifier_block = next((entry for entry in final_struct['identifier'] if entry['system'] == "http://hl7.org.fhir/sid/us-npi"), None)
|
|
246
|
+
if identifier_block is None:
|
|
247
|
+
identifier_block = self.npi_identifier_block
|
|
248
|
+
final_struct['identifier'].append(identifier_block)
|
|
249
|
+
identifier_block['value'] = str(value)
|
|
250
|
+
pass
|
|
251
|
+
|
|
252
|
+
class OrganizationIdentiferCLIAValueHandler(AbstractCustomValueHandler):
|
|
253
|
+
clia_identifier_block = {
|
|
254
|
+
"system" : "urn:oid:2.16.840.1.113883.4.7",
|
|
255
|
+
"value" : "$value"
|
|
256
|
+
}
|
|
257
|
+
#Assign a birthsex extension
|
|
258
|
+
def assign_value(self, json_path, resource_definition, entity_definition, final_struct, key, value):
|
|
259
|
+
#Retrieve the birthsex extension if it exists; make it if it does not.
|
|
260
|
+
if 'identifier' not in final_struct:
|
|
261
|
+
final_struct['identifier'] = []
|
|
262
|
+
identifier_block = next((entry for entry in final_struct['identifier'] if entry['system'] == "urn:oid:2.16.840.1.113883.4.7"), None)
|
|
263
|
+
if identifier_block is None:
|
|
264
|
+
identifier_block = self.clia_identifier_block
|
|
265
|
+
final_struct['identifier'].append(identifier_block)
|
|
266
|
+
identifier_block['value'] = str(value)
|
|
267
|
+
pass
|
|
268
|
+
|
|
269
|
+
class PractitionerIdentiferNPIValueHandler(AbstractCustomValueHandler):
|
|
270
|
+
npi_identifier_block = {
|
|
271
|
+
"system" : "http://hl7.org.fhir/sid/us-npi",
|
|
272
|
+
"value" : "$value"
|
|
273
|
+
}
|
|
274
|
+
#Assigna birthsex extension
|
|
275
|
+
def assign_value(self, json_path, resource_definition, entity_definition, final_struct, key, value):
|
|
276
|
+
#Retrieve the birthsex extension if it exists; make it if it does not.
|
|
277
|
+
if 'identifier' not in final_struct:
|
|
278
|
+
final_struct['identifier'] = []
|
|
279
|
+
identifier_block = next((entry for entry in final_struct['identifier'] if entry['system'] == "http://hl7.org.fhir/sid/us-npi"), None)
|
|
280
|
+
if identifier_block is None:
|
|
281
|
+
identifier_block = self.npi_identifier_block
|
|
282
|
+
final_struct['identifier'].append(identifier_block)
|
|
283
|
+
identifier_block['value'] = value
|
|
284
|
+
pass
|
|
285
|
+
|
|
286
|
+
class ObservationComponentHandler(AbstractCustomValueHandler):
|
|
287
|
+
pulse_oximetry_oxygen_flow_rate = {
|
|
288
|
+
"code" : {
|
|
289
|
+
"coding" : [
|
|
290
|
+
{
|
|
291
|
+
"system" : "http://loinc.org",
|
|
292
|
+
"code" : "3151-8",
|
|
293
|
+
"display" : "Inhaled oxygen flow rate"
|
|
294
|
+
}
|
|
295
|
+
],
|
|
296
|
+
"text" : "Inhaled oxygen flow rate"
|
|
297
|
+
}
|
|
298
|
+
}
|
|
299
|
+
pulse_oximetry_oxygen_concentration = {
|
|
300
|
+
"code" : {
|
|
301
|
+
"coding" : [
|
|
302
|
+
{
|
|
303
|
+
"system" : "http://loinc.org",
|
|
304
|
+
"code" : "3150-0",
|
|
305
|
+
"display" : "Inhaled oxygen concentration"
|
|
306
|
+
}
|
|
307
|
+
],
|
|
308
|
+
"text" : "Inhaled oxygen concentration"
|
|
309
|
+
}
|
|
310
|
+
}
|
|
311
|
+
#Find the appropriate component for the observaiton; then call build_structure again to continue the drill down
|
|
312
|
+
def assign_value(self, json_path, resource_definition, entity_definition, final_struct, key, value):
|
|
313
|
+
#Check to make sure the component part exists
|
|
314
|
+
if 'component' not in final_struct:
|
|
315
|
+
final_struct['component'] = []
|
|
316
|
+
components = final_struct['component']
|
|
317
|
+
#Look through the qualifier parts.
|
|
318
|
+
parts = json_path.split('.')
|
|
319
|
+
key_part = parts[1][:parts[1].index('[')]
|
|
320
|
+
qualifier = parts[1][parts[1].index('[')+1:parts[1].index(']')]
|
|
321
|
+
qualifier_condition = qualifier.split('=')
|
|
322
|
+
|
|
323
|
+
target_component = None
|
|
324
|
+
if qualifier_condition[0] == 'code' and qualifier_condition[1] == '3151-8':
|
|
325
|
+
target_component = findComponentWithCoding(components, '3151-8') or self.pulse_oximetry_oxygen_flow_rate
|
|
326
|
+
if target_component is self.pulse_oximetry_oxygen_flow_rate:
|
|
327
|
+
components.append(target_component)
|
|
328
|
+
if qualifier_condition[0] == 'code' and qualifier_condition[1] == '3150-0':
|
|
329
|
+
target_component = findComponentWithCoding(components, '3150-0') or self.pulse_oximetry_oxygen_concentration
|
|
330
|
+
if target_component is self.pulse_oximetry_oxygen_concentration:
|
|
331
|
+
components.append(target_component)
|
|
332
|
+
#Recurse back down into
|
|
333
|
+
return conversion.build_structure(target_component, '.'.join(parts[2:]), resource_definition, entity_definition, parts[2:], value, parts[:2])
|
|
334
|
+
pass
|
|
335
|
+
|
|
336
|
+
def utilFindExtensionWithURL(extension_block, url):
|
|
337
|
+
for extension in extension_block:
|
|
338
|
+
if "url" in extension and extension['url'] == url:
|
|
339
|
+
return extension
|
|
340
|
+
return None
|
|
341
|
+
|
|
342
|
+
def findComponentWithCoding(components, code):
|
|
343
|
+
return next((component for component in components if any(coding['code'] == code for coding in component['code']['coding'])), None)
|
|
344
|
+
|
|
345
|
+
#Data dictionary of jsonpaths to match vs classes that need to be called
|
|
346
|
+
custom_handlers = {
|
|
347
|
+
"Patient.extension[Race].ombCategory": PatientRaceExtensionValueHandler(),
|
|
348
|
+
"Patient.extension[Ethnicity].ombCategory": PatientEthnicityExtensionValueHandler(),
|
|
349
|
+
"Patient.extension[Birthsex].value": PatientBirthSexExtensionValueHandler(),
|
|
350
|
+
"Patient.identifier[type=MR].system": PatientMRNIdentifierValueHandler(),
|
|
351
|
+
"Patient.identifier[type=MR].value": PatientMRNIdentifierValueHandler(),
|
|
352
|
+
"Patient.identifier[type=MRN].system": PatientMRNIdentifierValueHandler(),
|
|
353
|
+
"Patient.identifier[type=MRN].value": PatientMRNIdentifierValueHandler(),
|
|
354
|
+
"Patient.identifier[type=SSN].system": PatientSSNIdentifierValueHandler(),
|
|
355
|
+
"Patient.identifier[type=SSN].value": PatientSSNIdentifierValueHandler(),
|
|
356
|
+
"Organization.identifier[system=NPI].value": OrganizationIdentiferNPIValueHandler(),
|
|
357
|
+
"Organization.identifier[system=CLIA].value": OrganizationIdentiferCLIAValueHandler(),
|
|
358
|
+
"Practitioner.identifier[system=NPI].value": PractitionerIdentiferNPIValueHandler(),
|
|
359
|
+
"Observation.component[": ObservationComponentHandler()
|
|
360
|
+
}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
resource_
|