medicafe 0.250822.2__py3-none-any.whl → 0.250909.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- MediBot/MediBot.py +11 -4
- MediBot/MediBot_Crosswalk_Library.py +16 -3
- MediBot/MediBot_Crosswalk_Utils.py +12 -2
- MediBot/MediBot_Preprocessor_lib.py +1821 -1728
- MediBot/MediBot_docx_decoder.py +14 -3
- MediBot/__init__.py +1 -1
- MediCafe/MediLink_ConfigLoader.py +12 -1
- MediCafe/__init__.py +1 -1
- MediCafe/api_core.py +116 -14
- MediCafe/core_utils.py +9 -4
- MediCafe/deductible_utils.py +1233 -0
- MediLink/MediLink_837p_encoder_library.py +123 -39
- MediLink/MediLink_Deductible.py +569 -555
- MediLink/MediLink_Deductible_Validator.py +9 -3
- MediLink/MediLink_Display_Utils.py +364 -2
- MediLink/MediLink_UI.py +20 -2
- MediLink/__init__.py +1 -1
- {medicafe-0.250822.2.dist-info → medicafe-0.250909.0.dist-info}/METADATA +1 -1
- {medicafe-0.250822.2.dist-info → medicafe-0.250909.0.dist-info}/RECORD +23 -27
- MediCafe/api_core_backup.py +0 -428
- MediLink/insurance_type_integration_test.py +0 -361
- MediLink/test_cob_library.py +0 -436
- MediLink/test_timing.py +0 -59
- MediLink/test_validation.py +0 -127
- {medicafe-0.250822.2.dist-info → medicafe-0.250909.0.dist-info}/LICENSE +0 -0
- {medicafe-0.250822.2.dist-info → medicafe-0.250909.0.dist-info}/WHEEL +0 -0
- {medicafe-0.250822.2.dist-info → medicafe-0.250909.0.dist-info}/entry_points.txt +0 -0
- {medicafe-0.250822.2.dist-info → medicafe-0.250909.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,1233 @@
|
|
1
|
+
# MediCafe/deductible_utils.py
|
2
|
+
"""
|
3
|
+
Deductible utility functions for MediCafe
|
4
|
+
This module contains shared functionality for eligibility and deductible processing
|
5
|
+
to avoid code duplication and leverage existing MediCafe infrastructure.
|
6
|
+
|
7
|
+
COMPATIBILITY: Python 3.4.4 and Windows XP compatible
|
8
|
+
ASCII-only encoding required
|
9
|
+
|
10
|
+
API RESPONSE PARSER DEBUGGING STATUS:
|
11
|
+
This module addresses the TODO items from the original MediLink_Deductible.py commentary:
|
12
|
+
|
13
|
+
ADDRESSED:
|
14
|
+
Enhanced logging with log_api_response_structure() function
|
15
|
+
Schema validation with validate_api_response_schema() function
|
16
|
+
Response compatibility analysis with analyze_response_compatibility()
|
17
|
+
Robust null checking and fallback mechanisms
|
18
|
+
Detailed debugging logs for parser functions
|
19
|
+
Graceful handling of missing fields and schema mismatches
|
20
|
+
Centralized parser functions to eliminate duplication
|
21
|
+
|
22
|
+
PENDING (API Developer Fix Required):
|
23
|
+
Complete Super Connector API response schema (API developers working on fix)
|
24
|
+
Full response structure validation (depends on API fix)
|
25
|
+
Comprehensive test cases (requires stable API responses)
|
26
|
+
|
27
|
+
SEE ALSO: docs/API_RESPONSE_DEBUGGING_ROADMAP.md for detailed status and roadmap
|
28
|
+
|
29
|
+
IMPLEMENTATION NOTES:
|
30
|
+
- Primary path uses CSV/crosswalk as authoritative source (O(N) complexity)
|
31
|
+
- API probing retained behind DEBUG_MODE_PAYER_PROBE toggle for troubleshooting
|
32
|
+
- All parser functions include enhanced logging and error handling
|
33
|
+
- Schema validation helps identify API response format changes
|
34
|
+
- Compatibility analysis provides detailed debugging information
|
35
|
+
"""
|
36
|
+
|
37
|
+
import os, sys, json
|
38
|
+
from datetime import datetime
|
39
|
+
|
40
|
+
# Use core utilities for standardized imports
|
41
|
+
try:
|
42
|
+
from MediCafe.core_utils import get_shared_config_loader
|
43
|
+
MediLink_ConfigLoader = get_shared_config_loader()
|
44
|
+
except ImportError:
|
45
|
+
# Fallback for standalone usage
|
46
|
+
MediLink_ConfigLoader = None
|
47
|
+
|
48
|
+
# Import existing date utilities from MediBot
|
49
|
+
try:
|
50
|
+
from MediBot.MediBot_Preprocessor_lib import OptimizedDate
|
51
|
+
HAS_OPTIMIZED_DATE = True
|
52
|
+
except ImportError:
|
53
|
+
HAS_OPTIMIZED_DATE = False
|
54
|
+
|
55
|
+
# Import existing date utilities from MediLink
|
56
|
+
try:
|
57
|
+
from MediLink.MediLink_837p_utilities import convert_date_format
|
58
|
+
HAS_DATE_UTILS = True
|
59
|
+
except ImportError:
|
60
|
+
HAS_DATE_UTILS = False
|
61
|
+
|
62
|
+
# =============================================================================
|
63
|
+
# DATE VALIDATION UTILITIES
|
64
|
+
# =============================================================================
|
65
|
+
|
66
|
+
def validate_and_format_date(date_str):
|
67
|
+
"""
|
68
|
+
Enhanced date parsing that handles ambiguous formats intelligently.
|
69
|
+
For ambiguous formats like MM/DD vs DD/MM, uses heuristics to determine the most likely interpretation.
|
70
|
+
|
71
|
+
COMPATIBILITY: Python 3.4.4 and Windows XP compatible
|
72
|
+
ASCII-only encoding required
|
73
|
+
"""
|
74
|
+
import re
|
75
|
+
|
76
|
+
# First, try unambiguous formats (4-digit years, month names, etc.)
|
77
|
+
unambiguous_formats = [
|
78
|
+
'%Y-%m-%d', # 1990-01-15
|
79
|
+
'%d-%b-%Y', # 15-Jan-1990
|
80
|
+
'%d %b %Y', # 15 Jan 1990
|
81
|
+
'%b %d, %Y', # Jan 15, 1990
|
82
|
+
'%b %d %Y', # Jan 15 1990
|
83
|
+
'%B %d, %Y', # January 15, 1990
|
84
|
+
'%B %d %Y', # January 15 1990
|
85
|
+
'%Y/%m/%d', # 1990/01/15
|
86
|
+
'%Y%m%d', # 19900115
|
87
|
+
'%y%m%d', # 900115 (unambiguous compact format)
|
88
|
+
]
|
89
|
+
|
90
|
+
# Try unambiguous formats first
|
91
|
+
for fmt in unambiguous_formats:
|
92
|
+
try:
|
93
|
+
if '%y' in fmt:
|
94
|
+
parsed_date = datetime.strptime(date_str, fmt)
|
95
|
+
if parsed_date.year < 50:
|
96
|
+
parsed_date = parsed_date.replace(year=parsed_date.year + 2000)
|
97
|
+
elif parsed_date.year < 100:
|
98
|
+
parsed_date = parsed_date.replace(year=parsed_date.year + 1900)
|
99
|
+
return parsed_date.strftime('%Y-%m-%d')
|
100
|
+
else:
|
101
|
+
return datetime.strptime(date_str, fmt).strftime('%Y-%m-%d')
|
102
|
+
except ValueError:
|
103
|
+
continue
|
104
|
+
|
105
|
+
# Handle potentially ambiguous formats with smart heuristics
|
106
|
+
# Check if it's a MM/DD/YYYY or DD/MM/YYYY pattern
|
107
|
+
ambiguous_pattern = re.match(r'^(\d{1,2})[/-](\d{1,2})[/-](\d{4})$', date_str)
|
108
|
+
if ambiguous_pattern:
|
109
|
+
first_num, second_num, year = map(int, ambiguous_pattern.groups())
|
110
|
+
|
111
|
+
# If first number > 12, it must be DD/MM/YYYY format
|
112
|
+
if first_num > 12:
|
113
|
+
try:
|
114
|
+
return datetime(int(year), int(second_num), int(first_num)).strftime('%Y-%m-%d')
|
115
|
+
except ValueError:
|
116
|
+
return None
|
117
|
+
|
118
|
+
# If second number > 12, it must be MM/DD/YYYY format
|
119
|
+
elif second_num > 12:
|
120
|
+
try:
|
121
|
+
return datetime(int(year), int(first_num), int(second_num)).strftime('%Y-%m-%d')
|
122
|
+
except ValueError:
|
123
|
+
return None
|
124
|
+
|
125
|
+
# Both numbers could be valid months (1-12), need to make an educated guess
|
126
|
+
else:
|
127
|
+
# Preference heuristic: In US context, MM/DD/YYYY is more common
|
128
|
+
# But also consider: if first number is 1-12 and second is 1-31, both are possible
|
129
|
+
# Default to MM/DD/YYYY for US-centric systems, but this could be configurable
|
130
|
+
try:
|
131
|
+
# Try MM/DD/YYYY first (US preference)
|
132
|
+
return datetime(int(year), int(first_num), int(second_num)).strftime('%Y-%m-%d')
|
133
|
+
except ValueError:
|
134
|
+
try:
|
135
|
+
# If that fails, try DD/MM/YYYY
|
136
|
+
return datetime(int(year), int(second_num), int(first_num)).strftime('%Y-%m-%d')
|
137
|
+
except ValueError:
|
138
|
+
return None
|
139
|
+
|
140
|
+
# Handle 2-digit year ambiguous formats
|
141
|
+
ambiguous_2digit_pattern = re.match(r'^(\d{1,2})[/-](\d{1,2})[/-](\d{2})$', date_str)
|
142
|
+
if ambiguous_2digit_pattern:
|
143
|
+
first_num, second_num, year = map(int, ambiguous_2digit_pattern.groups())
|
144
|
+
|
145
|
+
# Apply same logic as above, but handle 2-digit year
|
146
|
+
year = 2000 + year if year < 50 else 1900 + year
|
147
|
+
|
148
|
+
if first_num > 12:
|
149
|
+
try:
|
150
|
+
return datetime(year, second_num, first_num).strftime('%Y-%m-%d')
|
151
|
+
except ValueError:
|
152
|
+
return None
|
153
|
+
elif second_num > 12:
|
154
|
+
try:
|
155
|
+
return datetime(year, first_num, second_num).strftime('%Y-%m-%d')
|
156
|
+
except ValueError:
|
157
|
+
return None
|
158
|
+
else:
|
159
|
+
# Default to MM/DD/YY (US preference)
|
160
|
+
try:
|
161
|
+
return datetime(year, first_num, second_num).strftime('%Y-%m-%d')
|
162
|
+
except ValueError:
|
163
|
+
try:
|
164
|
+
return datetime(year, second_num, first_num).strftime('%Y-%m-%d')
|
165
|
+
except ValueError:
|
166
|
+
return None
|
167
|
+
|
168
|
+
# Try remaining formats that are less likely to be ambiguous
|
169
|
+
remaining_formats = [
|
170
|
+
'%m-%d-%Y', # 01-15-1990
|
171
|
+
'%d-%m-%Y', # 15-01-1990
|
172
|
+
'%d/%m/%Y', # 15/01/1990
|
173
|
+
'%m-%d-%y', # 01-15-90
|
174
|
+
'%d-%m-%y', # 15-01-90
|
175
|
+
'%b %d, %y', # Jan 15, 90
|
176
|
+
'%b %d %y', # Jan 15 90
|
177
|
+
'%y/%m/%d', # 90/01/15
|
178
|
+
'%y-%m-%d', # 90-01-15
|
179
|
+
]
|
180
|
+
|
181
|
+
for fmt in remaining_formats:
|
182
|
+
try:
|
183
|
+
if '%y' in fmt:
|
184
|
+
parsed_date = datetime.strptime(date_str, fmt)
|
185
|
+
if parsed_date.year < 50:
|
186
|
+
parsed_date = parsed_date.replace(year=parsed_date.year + 2000)
|
187
|
+
elif parsed_date.year < 100:
|
188
|
+
parsed_date = parsed_date.replace(year=parsed_date.year + 1900)
|
189
|
+
return parsed_date.strftime('%Y-%m-%d')
|
190
|
+
else:
|
191
|
+
return datetime.strptime(date_str, fmt).strftime('%Y-%m-%d')
|
192
|
+
except ValueError:
|
193
|
+
continue
|
194
|
+
|
195
|
+
return None
|
196
|
+
|
197
|
+
# =============================================================================
|
198
|
+
# API RESPONSE DEBUGGING AND LOGGING UTILITIES
|
199
|
+
# =============================================================================
|
200
|
+
|
201
|
+
def log_api_response_structure(response, context="", level="DEBUG"):
|
202
|
+
"""
|
203
|
+
Enhanced logging function to analyze API response structure.
|
204
|
+
This addresses the TODO item for API Response Parser Debugging.
|
205
|
+
|
206
|
+
Args:
|
207
|
+
response: API response data to analyze
|
208
|
+
context: Context string for logging (e.g., "Super Connector", "Legacy")
|
209
|
+
level: Logging level (DEBUG, INFO, WARNING)
|
210
|
+
"""
|
211
|
+
if not MediLink_ConfigLoader:
|
212
|
+
return
|
213
|
+
|
214
|
+
try:
|
215
|
+
MediLink_ConfigLoader.log("=" * 60, level=level)
|
216
|
+
MediLink_ConfigLoader.log("API RESPONSE STRUCTURE ANALYSIS - {}".format(context), level=level)
|
217
|
+
MediLink_ConfigLoader.log("=" * 60, level=level)
|
218
|
+
|
219
|
+
if response is None:
|
220
|
+
MediLink_ConfigLoader.log("Response is None", level=level)
|
221
|
+
return
|
222
|
+
|
223
|
+
# Log top-level structure
|
224
|
+
MediLink_ConfigLoader.log("Response type: {}".format(type(response)), level=level)
|
225
|
+
if isinstance(response, dict):
|
226
|
+
MediLink_ConfigLoader.log("Top-level keys: {}".format(list(response.keys())), level=level)
|
227
|
+
|
228
|
+
# Check for key response indicators
|
229
|
+
if "rawGraphQLResponse" in response:
|
230
|
+
MediLink_ConfigLoader.log("Detected Super Connector format (has rawGraphQLResponse)", level=level)
|
231
|
+
if "memberPolicies" in response:
|
232
|
+
MediLink_ConfigLoader.log("Detected Legacy format (has memberPolicies)", level=level)
|
233
|
+
|
234
|
+
# Log response size
|
235
|
+
response_str = json.dumps(response, indent=2)
|
236
|
+
MediLink_ConfigLoader.log("Response size: {} characters".format(len(response_str)), level=level)
|
237
|
+
|
238
|
+
# Log first 1000 characters for debugging
|
239
|
+
if len(response_str) > 1000:
|
240
|
+
MediLink_ConfigLoader.log("Response preview (first 1000 chars): {}".format(response_str[:1000]), level=level)
|
241
|
+
MediLink_ConfigLoader.log("... (truncated)", level=level)
|
242
|
+
else:
|
243
|
+
MediLink_ConfigLoader.log("Full response: {}".format(response_str), level=level)
|
244
|
+
|
245
|
+
MediLink_ConfigLoader.log("=" * 60, level=level)
|
246
|
+
|
247
|
+
except Exception as e:
|
248
|
+
if MediLink_ConfigLoader:
|
249
|
+
MediLink_ConfigLoader.log("Error in log_api_response_structure: {}".format(str(e)), level="ERROR")
|
250
|
+
|
251
|
+
def validate_api_response_schema(response, expected_schema, context=""):
|
252
|
+
"""
|
253
|
+
Validate API response against expected schema.
|
254
|
+
This addresses the TODO item for schema validation.
|
255
|
+
|
256
|
+
Args:
|
257
|
+
response: API response data to validate
|
258
|
+
expected_schema: Dictionary defining expected structure
|
259
|
+
context: Context string for validation
|
260
|
+
|
261
|
+
Returns:
|
262
|
+
tuple: (is_valid, validation_errors)
|
263
|
+
"""
|
264
|
+
if not response or not expected_schema:
|
265
|
+
return True, []
|
266
|
+
|
267
|
+
validation_errors = []
|
268
|
+
|
269
|
+
try:
|
270
|
+
for key, expected_type in expected_schema.items():
|
271
|
+
if key not in response:
|
272
|
+
validation_errors.append("Missing required key: {}".format(key))
|
273
|
+
elif not isinstance(response[key], expected_type):
|
274
|
+
validation_errors.append("Key '{}' has wrong type. Expected {}, got {}".format(
|
275
|
+
key, expected_type.__name__, type(response[key]).__name__))
|
276
|
+
|
277
|
+
if validation_errors and MediLink_ConfigLoader:
|
278
|
+
MediLink_ConfigLoader.log("Schema validation failed for {}: {}".format(context, validation_errors), level="WARNING")
|
279
|
+
|
280
|
+
return len(validation_errors) == 0, validation_errors
|
281
|
+
|
282
|
+
except Exception as e:
|
283
|
+
if MediLink_ConfigLoader:
|
284
|
+
MediLink_ConfigLoader.log("Error in validate_api_response_schema: {}".format(str(e)), level="ERROR")
|
285
|
+
return False, ["Validation error: {}".format(str(e))]
|
286
|
+
|
287
|
+
# Define expected schemas for different API response formats
|
288
|
+
LEGACY_RESPONSE_SCHEMA = {
|
289
|
+
"memberPolicies": list,
|
290
|
+
"status": str
|
291
|
+
}
|
292
|
+
|
293
|
+
# TODO (API DEVELOPER FIX REQUIRED):
|
294
|
+
# This schema is incomplete and needs to be updated once API developers
|
295
|
+
# complete the Super Connector response format fixes.
|
296
|
+
#
|
297
|
+
# CURRENT ISSUES:
|
298
|
+
# - Super Connector API responses are not consistently structured
|
299
|
+
# - Some required fields may be missing or have different names
|
300
|
+
# - Response format may vary between different payer IDs
|
301
|
+
#
|
302
|
+
# FUTURE ENHANCEMENTS NEEDED:
|
303
|
+
# - Complete schema definition once API is stable
|
304
|
+
# - Add field-level validation for all required data
|
305
|
+
# - Implement schema version detection for API updates
|
306
|
+
# - Add support for payer-specific response variations
|
307
|
+
SUPER_CONNECTOR_RESPONSE_SCHEMA = {
|
308
|
+
"rawGraphQLResponse": dict,
|
309
|
+
"statuscode": str
|
310
|
+
# Additional fields will be added once API response format is finalized
|
311
|
+
}
|
312
|
+
|
313
|
+
def analyze_response_compatibility(response, context=""):
|
314
|
+
"""
|
315
|
+
Analyze response compatibility with our parser functions.
|
316
|
+
This helps identify schema mismatches mentioned in the TODO.
|
317
|
+
|
318
|
+
Args:
|
319
|
+
response: API response to analyze
|
320
|
+
context: Context string for analysis
|
321
|
+
|
322
|
+
Returns:
|
323
|
+
dict: Analysis results with compatibility information
|
324
|
+
"""
|
325
|
+
analysis = {
|
326
|
+
"context": context,
|
327
|
+
"is_legacy_format": False,
|
328
|
+
"is_super_connector_format": False,
|
329
|
+
"has_patient_info": False,
|
330
|
+
"has_insurance_info": False,
|
331
|
+
"has_deductible_info": False,
|
332
|
+
"compatibility_issues": []
|
333
|
+
}
|
334
|
+
|
335
|
+
try:
|
336
|
+
if not response:
|
337
|
+
analysis["compatibility_issues"].append("Response is None or empty")
|
338
|
+
return analysis
|
339
|
+
|
340
|
+
# Check format detection
|
341
|
+
analysis["is_legacy_format"] = is_legacy_response_format(response)
|
342
|
+
analysis["is_super_connector_format"] = is_super_connector_response_format(response)
|
343
|
+
|
344
|
+
if not analysis["is_legacy_format"] and not analysis["is_super_connector_format"]:
|
345
|
+
analysis["compatibility_issues"].append("Unknown response format")
|
346
|
+
|
347
|
+
# Check for required data structures
|
348
|
+
if analysis["is_legacy_format"]:
|
349
|
+
member_policies = response.get("memberPolicies", [])
|
350
|
+
if member_policies:
|
351
|
+
first_policy = member_policies[0]
|
352
|
+
analysis["has_patient_info"] = "patientInfo" in first_policy
|
353
|
+
analysis["has_insurance_info"] = "insuranceInfo" in first_policy
|
354
|
+
analysis["has_deductible_info"] = "deductibleInfo" in first_policy
|
355
|
+
|
356
|
+
if not analysis["has_patient_info"]:
|
357
|
+
analysis["compatibility_issues"].append("Legacy format missing patientInfo")
|
358
|
+
if not analysis["has_insurance_info"]:
|
359
|
+
analysis["compatibility_issues"].append("Legacy format missing insuranceInfo")
|
360
|
+
if not analysis["has_deductible_info"]:
|
361
|
+
analysis["compatibility_issues"].append("Legacy format missing deductibleInfo")
|
362
|
+
|
363
|
+
elif analysis["is_super_connector_format"]:
|
364
|
+
raw_response = response.get("rawGraphQLResponse", {})
|
365
|
+
data = raw_response.get("data", {})
|
366
|
+
check_eligibility = data.get("checkEligibility", {})
|
367
|
+
eligibility_list = check_eligibility.get("eligibility", [])
|
368
|
+
|
369
|
+
if eligibility_list:
|
370
|
+
first_eligibility = eligibility_list[0]
|
371
|
+
eligibility_info = first_eligibility.get("eligibilityInfo", {})
|
372
|
+
analysis["has_patient_info"] = "member" in eligibility_info
|
373
|
+
analysis["has_insurance_info"] = "insuranceInfo" in eligibility_info
|
374
|
+
analysis["has_deductible_info"] = "planLevels" in eligibility_info
|
375
|
+
|
376
|
+
if not analysis["has_patient_info"]:
|
377
|
+
analysis["compatibility_issues"].append("Super Connector format missing member info")
|
378
|
+
if not analysis["has_insurance_info"]:
|
379
|
+
analysis["compatibility_issues"].append("Super Connector format missing insuranceInfo")
|
380
|
+
if not analysis["has_deductible_info"]:
|
381
|
+
analysis["compatibility_issues"].append("Super Connector format missing planLevels")
|
382
|
+
|
383
|
+
# Log analysis results
|
384
|
+
if MediLink_ConfigLoader:
|
385
|
+
MediLink_ConfigLoader.log("Response compatibility analysis for {}: {}".format(context, analysis), level="DEBUG")
|
386
|
+
|
387
|
+
return analysis
|
388
|
+
|
389
|
+
except Exception as e:
|
390
|
+
if MediLink_ConfigLoader:
|
391
|
+
MediLink_ConfigLoader.log("Error in analyze_response_compatibility: {}".format(str(e)), level="ERROR")
|
392
|
+
analysis["compatibility_issues"].append("Analysis error: {}".format(str(e)))
|
393
|
+
return analysis
|
394
|
+
|
395
|
+
# =============================================================================
|
396
|
+
# API RESPONSE PARSING UTILITIES
|
397
|
+
# =============================================================================
|
398
|
+
|
399
|
+
def extract_legacy_patient_info(policy):
|
400
|
+
"""Extract patient information from legacy API response format"""
|
401
|
+
patient_info = policy.get("patientInfo", [{}])[0]
|
402
|
+
return {
|
403
|
+
'lastName': patient_info.get("lastName", ""),
|
404
|
+
'firstName': patient_info.get("firstName", ""),
|
405
|
+
'middleName': patient_info.get("middleName", "")
|
406
|
+
}
|
407
|
+
|
408
|
+
def extract_super_connector_patient_info(eligibility_data):
|
409
|
+
"""Extract patient information from Super Connector API response format"""
|
410
|
+
if not eligibility_data:
|
411
|
+
return {'lastName': '', 'firstName': '', 'middleName': ''}
|
412
|
+
|
413
|
+
# ENHANCED DEBUGGING: Log response structure before parsing
|
414
|
+
log_api_response_structure(eligibility_data, "Super Connector Patient Info", "DEBUG")
|
415
|
+
|
416
|
+
# Analyze compatibility with our parser
|
417
|
+
compatibility = analyze_response_compatibility(eligibility_data, "Super Connector Patient Info")
|
418
|
+
if compatibility["compatibility_issues"]:
|
419
|
+
if MediLink_ConfigLoader:
|
420
|
+
MediLink_ConfigLoader.log("Compatibility issues detected: {}".format(compatibility["compatibility_issues"]), level="WARNING")
|
421
|
+
|
422
|
+
# Handle multiple eligibility records - use the first one with valid data
|
423
|
+
if "rawGraphQLResponse" in eligibility_data:
|
424
|
+
raw_response = eligibility_data.get('rawGraphQLResponse', {})
|
425
|
+
data = raw_response.get('data', {})
|
426
|
+
check_eligibility = data.get('checkEligibility', {})
|
427
|
+
eligibility_list = check_eligibility.get('eligibility', [])
|
428
|
+
|
429
|
+
# Try to get from the first eligibility record
|
430
|
+
if eligibility_list:
|
431
|
+
first_eligibility = eligibility_list[0]
|
432
|
+
member_info = first_eligibility.get('eligibilityInfo', {}).get('member', {})
|
433
|
+
if member_info:
|
434
|
+
result = {
|
435
|
+
'lastName': member_info.get("lastName", ""),
|
436
|
+
'firstName': member_info.get("firstName", ""),
|
437
|
+
'middleName': member_info.get("middleName", "")
|
438
|
+
}
|
439
|
+
if MediLink_ConfigLoader:
|
440
|
+
MediLink_ConfigLoader.log("Successfully extracted patient info from member: {}".format(result), level="DEBUG")
|
441
|
+
return result
|
442
|
+
|
443
|
+
# Check for data in error extensions (some APIs return data here despite errors)
|
444
|
+
errors = raw_response.get('errors', [])
|
445
|
+
for error in errors:
|
446
|
+
extensions = error.get('extensions', {})
|
447
|
+
if extensions and 'details' in extensions:
|
448
|
+
details = extensions.get('details', [])
|
449
|
+
if details:
|
450
|
+
# Use the first detail record that has patient info
|
451
|
+
for detail in details:
|
452
|
+
if detail.get('lastName') or detail.get('firstName'):
|
453
|
+
result = {
|
454
|
+
'lastName': detail.get("lastName", ""),
|
455
|
+
'firstName': detail.get("firstName", ""),
|
456
|
+
'middleName': detail.get("middleName", "")
|
457
|
+
}
|
458
|
+
if MediLink_ConfigLoader:
|
459
|
+
MediLink_ConfigLoader.log("Extracted patient info from error extensions: {}".format(result), level="DEBUG")
|
460
|
+
return result
|
461
|
+
|
462
|
+
# Fallback to top-level fields
|
463
|
+
result = {
|
464
|
+
'lastName': eligibility_data.get("lastName", ""),
|
465
|
+
'firstName': eligibility_data.get("firstName", ""),
|
466
|
+
'middleName': eligibility_data.get("middleName", "")
|
467
|
+
}
|
468
|
+
if MediLink_ConfigLoader:
|
469
|
+
MediLink_ConfigLoader.log("Using fallback top-level fields for patient info: {}".format(result), level="DEBUG")
|
470
|
+
return result
|
471
|
+
|
472
|
+
def extract_legacy_remaining_amount(policy):
|
473
|
+
"""Extract remaining amount from legacy API response format"""
|
474
|
+
deductible_info = policy.get("deductibleInfo", {})
|
475
|
+
if 'individual' in deductible_info:
|
476
|
+
remaining = deductible_info['individual']['inNetwork'].get("remainingAmount", "")
|
477
|
+
return remaining if remaining else "Not Found"
|
478
|
+
elif 'family' in deductible_info:
|
479
|
+
remaining = deductible_info['family']['inNetwork'].get("remainingAmount", "")
|
480
|
+
return remaining if remaining else "Not Found"
|
481
|
+
else:
|
482
|
+
return "Not Found"
|
483
|
+
|
484
|
+
def extract_super_connector_remaining_amount(eligibility_data):
|
485
|
+
"""Extract remaining amount from Super Connector API response format"""
|
486
|
+
if not eligibility_data:
|
487
|
+
return "Not Found"
|
488
|
+
|
489
|
+
# First, check top-level metYearToDateAmount which might indicate deductible met
|
490
|
+
met_amount = eligibility_data.get('metYearToDateAmount')
|
491
|
+
if met_amount is not None:
|
492
|
+
return str(met_amount)
|
493
|
+
|
494
|
+
# Collect all deductible amounts to find the most relevant one
|
495
|
+
all_deductible_amounts = []
|
496
|
+
|
497
|
+
# Look for deductible information in planLevels (based on validation report)
|
498
|
+
plan_levels = eligibility_data.get('planLevels', [])
|
499
|
+
for plan_level in plan_levels:
|
500
|
+
if plan_level.get('level') == 'deductibleInfo':
|
501
|
+
# Collect individual deductible amounts
|
502
|
+
individual_levels = plan_level.get('individual', [])
|
503
|
+
if individual_levels:
|
504
|
+
for individual in individual_levels:
|
505
|
+
remaining = individual.get('remainingAmount')
|
506
|
+
if remaining is not None:
|
507
|
+
try:
|
508
|
+
amount = float(remaining)
|
509
|
+
all_deductible_amounts.append(('individual', amount))
|
510
|
+
except (ValueError, TypeError):
|
511
|
+
pass
|
512
|
+
|
513
|
+
# Collect family deductible amounts
|
514
|
+
family_levels = plan_level.get('family', [])
|
515
|
+
if family_levels:
|
516
|
+
for family in family_levels:
|
517
|
+
remaining = family.get('remainingAmount')
|
518
|
+
if remaining is not None:
|
519
|
+
try:
|
520
|
+
amount = float(remaining)
|
521
|
+
all_deductible_amounts.append(('family', amount))
|
522
|
+
except (ValueError, TypeError):
|
523
|
+
pass
|
524
|
+
|
525
|
+
# Navigate to the rawGraphQLResponse structure as fallback
|
526
|
+
raw_response = eligibility_data.get('rawGraphQLResponse', {})
|
527
|
+
if raw_response:
|
528
|
+
data = raw_response.get('data', {})
|
529
|
+
check_eligibility = data.get('checkEligibility', {})
|
530
|
+
eligibility_list = check_eligibility.get('eligibility', [])
|
531
|
+
|
532
|
+
# Try all eligibility records for deductible information
|
533
|
+
for eligibility in eligibility_list:
|
534
|
+
plan_levels = eligibility.get('eligibilityInfo', {}).get('planLevels', [])
|
535
|
+
for plan_level in plan_levels:
|
536
|
+
if plan_level.get('level') == 'deductibleInfo':
|
537
|
+
# Collect individual deductible amounts
|
538
|
+
individual_levels = plan_level.get('individual', [])
|
539
|
+
if individual_levels:
|
540
|
+
for individual in individual_levels:
|
541
|
+
remaining = individual.get('remainingAmount')
|
542
|
+
if remaining is not None:
|
543
|
+
try:
|
544
|
+
amount = float(remaining)
|
545
|
+
all_deductible_amounts.append(('individual', amount))
|
546
|
+
except (ValueError, TypeError):
|
547
|
+
pass
|
548
|
+
|
549
|
+
# Collect family deductible amounts
|
550
|
+
family_levels = plan_level.get('family', [])
|
551
|
+
if family_levels:
|
552
|
+
for family in family_levels:
|
553
|
+
remaining = family.get('remainingAmount')
|
554
|
+
if remaining is not None:
|
555
|
+
try:
|
556
|
+
amount = float(remaining)
|
557
|
+
all_deductible_amounts.append(('family', amount))
|
558
|
+
except (ValueError, TypeError):
|
559
|
+
pass
|
560
|
+
|
561
|
+
# Select the most relevant deductible amount
|
562
|
+
if all_deductible_amounts:
|
563
|
+
# Strategy: Prefer individual over family, and prefer non-zero amounts
|
564
|
+
# First, try to find non-zero individual amounts
|
565
|
+
non_zero_individual = [amt for type_, amt in all_deductible_amounts if type_ == 'individual' and amt > 0]
|
566
|
+
if non_zero_individual:
|
567
|
+
return str(max(non_zero_individual)) # Return highest non-zero individual amount
|
568
|
+
|
569
|
+
# If no non-zero individual, try non-zero family amounts
|
570
|
+
non_zero_family = [amt for type_, amt in all_deductible_amounts if type_ == 'family' and amt > 0]
|
571
|
+
if non_zero_family:
|
572
|
+
return str(max(non_zero_family)) # Return highest non-zero family amount
|
573
|
+
|
574
|
+
# If all amounts are zero, return the first individual amount (or family if no individual)
|
575
|
+
individual_amounts = [amt for type_, amt in all_deductible_amounts if type_ == 'individual']
|
576
|
+
if individual_amounts:
|
577
|
+
return str(individual_amounts[0])
|
578
|
+
|
579
|
+
# Fallback to first family amount
|
580
|
+
family_amounts = [amt for type_, amt in all_deductible_amounts if type_ == 'family']
|
581
|
+
if family_amounts:
|
582
|
+
return str(family_amounts[0])
|
583
|
+
|
584
|
+
return "Not Found"
|
585
|
+
|
586
|
+
def extract_legacy_insurance_info(policy):
|
587
|
+
"""Extract insurance information from legacy API response format"""
|
588
|
+
insurance_info = policy.get("insuranceInfo", {})
|
589
|
+
return {
|
590
|
+
'insuranceType': insurance_info.get("insuranceType", ""),
|
591
|
+
'insuranceTypeCode': insurance_info.get("insuranceTypeCode", ""),
|
592
|
+
'memberId': insurance_info.get("memberId", ""),
|
593
|
+
'payerId': insurance_info.get("payerId", "")
|
594
|
+
}
|
595
|
+
|
596
|
+
def extract_super_connector_insurance_info(eligibility_data):
|
597
|
+
"""Extract insurance information from Super Connector API response format"""
|
598
|
+
if not eligibility_data:
|
599
|
+
return {'insuranceType': '', 'insuranceTypeCode': '', 'memberId': '', 'payerId': ''}
|
600
|
+
|
601
|
+
# Handle multiple eligibility records - use the first one with valid data
|
602
|
+
if "rawGraphQLResponse" in eligibility_data:
|
603
|
+
raw_response = eligibility_data.get('rawGraphQLResponse', {})
|
604
|
+
data = raw_response.get('data', {})
|
605
|
+
check_eligibility = data.get('checkEligibility', {})
|
606
|
+
eligibility_list = check_eligibility.get('eligibility', [])
|
607
|
+
|
608
|
+
# Try to get from the first eligibility record
|
609
|
+
if eligibility_list:
|
610
|
+
first_eligibility = eligibility_list[0]
|
611
|
+
insurance_info = first_eligibility.get('eligibilityInfo', {}).get('insuranceInfo', {})
|
612
|
+
if insurance_info:
|
613
|
+
return {
|
614
|
+
'insuranceType': insurance_info.get("planTypeDescription", ""),
|
615
|
+
'insuranceTypeCode': insurance_info.get("productServiceCode", ""),
|
616
|
+
'memberId': insurance_info.get("memberId", ""),
|
617
|
+
'payerId': insurance_info.get("payerId", "")
|
618
|
+
}
|
619
|
+
|
620
|
+
# Check for data in error extensions (some APIs return data here despite errors)
|
621
|
+
errors = raw_response.get('errors', [])
|
622
|
+
for error in errors:
|
623
|
+
extensions = error.get('extensions', {})
|
624
|
+
if extensions and 'details' in extensions:
|
625
|
+
details = extensions.get('details', [])
|
626
|
+
if details:
|
627
|
+
# Use the first detail record that has insurance info
|
628
|
+
for detail in details:
|
629
|
+
if detail.get('memberId') or detail.get('payerId'):
|
630
|
+
# Try to determine insurance type from available data
|
631
|
+
insurance_type = detail.get('planType', '')
|
632
|
+
if not insurance_type:
|
633
|
+
insurance_type = detail.get('productType', '')
|
634
|
+
|
635
|
+
return {
|
636
|
+
'insuranceType': insurance_type,
|
637
|
+
'insuranceTypeCode': detail.get("productServiceCode", ""),
|
638
|
+
'memberId': detail.get("memberId", ""),
|
639
|
+
'payerId': detail.get("payerId", "")
|
640
|
+
}
|
641
|
+
|
642
|
+
# Fallback to top-level fields
|
643
|
+
insurance_type = eligibility_data.get("planTypeDescription", "")
|
644
|
+
if not insurance_type:
|
645
|
+
insurance_type = eligibility_data.get("productType", "")
|
646
|
+
|
647
|
+
# Clean up the insurance type if it's too long (like the LPPO description)
|
648
|
+
if insurance_type and len(insurance_type) > 50:
|
649
|
+
# Extract just the plan type part
|
650
|
+
if "PPO" in insurance_type:
|
651
|
+
insurance_type = "Preferred Provider Organization (PPO)"
|
652
|
+
elif "HMO" in insurance_type:
|
653
|
+
insurance_type = "Health Maintenance Organization (HMO)"
|
654
|
+
elif "EPO" in insurance_type:
|
655
|
+
insurance_type = "Exclusive Provider Organization (EPO)"
|
656
|
+
elif "POS" in insurance_type:
|
657
|
+
insurance_type = "Point of Service (POS)"
|
658
|
+
|
659
|
+
# Get insurance type code from multiple possible locations
|
660
|
+
insurance_type_code = eligibility_data.get("productServiceCode", "")
|
661
|
+
if not insurance_type_code:
|
662
|
+
# Try to get from coverageTypes
|
663
|
+
coverage_types = eligibility_data.get("coverageTypes", [])
|
664
|
+
if coverage_types:
|
665
|
+
insurance_type_code = coverage_types[0].get("typeCode", "")
|
666
|
+
|
667
|
+
return {
|
668
|
+
'insuranceType': insurance_type,
|
669
|
+
'insuranceTypeCode': insurance_type_code,
|
670
|
+
'memberId': eligibility_data.get("subscriberId", ""),
|
671
|
+
'payerId': eligibility_data.get("payerId", "")
|
672
|
+
}
|
673
|
+
|
674
|
+
def extract_legacy_policy_status(policy):
|
675
|
+
"""Extract policy status from legacy API response format"""
|
676
|
+
policy_info = policy.get("policyInfo", {})
|
677
|
+
return policy_info.get("policyStatus", "")
|
678
|
+
|
679
|
+
def extract_super_connector_policy_status(eligibility_data):
|
680
|
+
"""Extract policy status from Super Connector API response format"""
|
681
|
+
if not eligibility_data:
|
682
|
+
return ""
|
683
|
+
|
684
|
+
# Handle multiple eligibility records - use the first one with valid data
|
685
|
+
if "rawGraphQLResponse" in eligibility_data:
|
686
|
+
raw_response = eligibility_data.get('rawGraphQLResponse', {})
|
687
|
+
data = raw_response.get('data', {})
|
688
|
+
check_eligibility = data.get('checkEligibility', {})
|
689
|
+
eligibility_list = check_eligibility.get('eligibility', [])
|
690
|
+
|
691
|
+
# Try to get from the first eligibility record
|
692
|
+
if eligibility_list:
|
693
|
+
first_eligibility = eligibility_list[0]
|
694
|
+
insurance_info = first_eligibility.get('eligibilityInfo', {}).get('insuranceInfo', {})
|
695
|
+
if insurance_info:
|
696
|
+
return insurance_info.get("policyStatus", "")
|
697
|
+
|
698
|
+
# Fallback to top-level field
|
699
|
+
return eligibility_data.get("policyStatus", "")
|
700
|
+
|
701
|
+
def is_legacy_response_format(data):
|
702
|
+
"""Determine if the response is in legacy format (has memberPolicies)"""
|
703
|
+
return data is not None and "memberPolicies" in data
|
704
|
+
|
705
|
+
def is_super_connector_response_format(data):
|
706
|
+
"""Determine if the response is in Super Connector format (has rawGraphQLResponse)"""
|
707
|
+
return data is not None and "rawGraphQLResponse" in data
|
708
|
+
|
709
|
+
# =============================================================================
|
710
|
+
# ELIGIBILITY DATA CONVERSION UTILITIES
|
711
|
+
# =============================================================================
|
712
|
+
|
713
|
+
def convert_eligibility_to_enhanced_format(data, dob, member_id, patient_id="", service_date=""):
|
714
|
+
"""Convert API eligibility response to enhanced display format"""
|
715
|
+
if data is None:
|
716
|
+
return None
|
717
|
+
|
718
|
+
# Check if data is already processed (from merge_responses)
|
719
|
+
if isinstance(data, dict) and 'patient_name' in data and 'data_source' in data:
|
720
|
+
# Already processed data - just add missing fields if needed
|
721
|
+
if 'patient_id' not in data:
|
722
|
+
data['patient_id'] = patient_id
|
723
|
+
if 'service_date_display' not in data:
|
724
|
+
data['service_date_display'] = service_date
|
725
|
+
if 'service_date_sort' not in data:
|
726
|
+
data['service_date_sort'] = datetime.min
|
727
|
+
if 'status' not in data:
|
728
|
+
data['status'] = 'Processed'
|
729
|
+
return data
|
730
|
+
|
731
|
+
# ENHANCED DEBUGGING: Log response structure and analyze compatibility
|
732
|
+
log_api_response_structure(data, "Eligibility Conversion", "DEBUG")
|
733
|
+
compatibility = analyze_response_compatibility(data, "Eligibility Conversion")
|
734
|
+
|
735
|
+
if compatibility["compatibility_issues"]:
|
736
|
+
if MediLink_ConfigLoader:
|
737
|
+
MediLink_ConfigLoader.log("Compatibility issues in eligibility conversion: {}".format(compatibility["compatibility_issues"]), level="WARNING")
|
738
|
+
|
739
|
+
# Determine which API response format we're dealing with
|
740
|
+
if is_legacy_response_format(data):
|
741
|
+
if MediLink_ConfigLoader:
|
742
|
+
MediLink_ConfigLoader.log("Processing Legacy API response format", level="DEBUG")
|
743
|
+
|
744
|
+
# Handle legacy API response format
|
745
|
+
for policy in data.get("memberPolicies", []):
|
746
|
+
# Skip non-medical policies
|
747
|
+
if policy.get("policyInfo", {}).get("coverageType", "") != "Medical":
|
748
|
+
continue
|
749
|
+
|
750
|
+
patient_info = extract_legacy_patient_info(policy)
|
751
|
+
remaining_amount = extract_legacy_remaining_amount(policy)
|
752
|
+
insurance_info = extract_legacy_insurance_info(policy)
|
753
|
+
policy_status = extract_legacy_policy_status(policy)
|
754
|
+
|
755
|
+
patient_name = "{} {} {}".format(
|
756
|
+
patient_info['firstName'],
|
757
|
+
patient_info['middleName'],
|
758
|
+
patient_info['lastName']
|
759
|
+
).strip()
|
760
|
+
|
761
|
+
result = {
|
762
|
+
'patient_id': patient_id,
|
763
|
+
'patient_name': patient_name,
|
764
|
+
'dob': dob,
|
765
|
+
'member_id': member_id,
|
766
|
+
'payer_id': insurance_info['payerId'],
|
767
|
+
'service_date_display': service_date,
|
768
|
+
'service_date_sort': datetime.min, # Will be enhanced later
|
769
|
+
'status': 'Processed',
|
770
|
+
'insurance_type': insurance_info['insuranceType'],
|
771
|
+
'policy_status': policy_status,
|
772
|
+
'remaining_amount': remaining_amount,
|
773
|
+
'data_source': 'Legacy',
|
774
|
+
'is_successful': bool(patient_name and remaining_amount != 'Not Found')
|
775
|
+
}
|
776
|
+
|
777
|
+
if MediLink_ConfigLoader:
|
778
|
+
MediLink_ConfigLoader.log("Successfully converted Legacy API response: {}".format(result), level="DEBUG")
|
779
|
+
return result
|
780
|
+
|
781
|
+
elif is_super_connector_response_format(data):
|
782
|
+
if MediLink_ConfigLoader:
|
783
|
+
MediLink_ConfigLoader.log("Processing Super Connector API response format", level="DEBUG")
|
784
|
+
|
785
|
+
# Handle Super Connector API response format
|
786
|
+
patient_info = extract_super_connector_patient_info(data)
|
787
|
+
remaining_amount = extract_super_connector_remaining_amount(data)
|
788
|
+
insurance_info = extract_super_connector_insurance_info(data)
|
789
|
+
policy_status = extract_super_connector_policy_status(data)
|
790
|
+
|
791
|
+
patient_name = "{} {} {}".format(
|
792
|
+
patient_info['firstName'],
|
793
|
+
patient_info['middleName'],
|
794
|
+
patient_info['lastName']
|
795
|
+
).strip()
|
796
|
+
|
797
|
+
result = {
|
798
|
+
'patient_id': patient_id,
|
799
|
+
'patient_name': patient_name,
|
800
|
+
'dob': dob,
|
801
|
+
'member_id': member_id,
|
802
|
+
'payer_id': insurance_info['payerId'],
|
803
|
+
'service_date_display': service_date,
|
804
|
+
'service_date_sort': datetime.min, # Will be enhanced later
|
805
|
+
'status': 'Processed',
|
806
|
+
'insurance_type': insurance_info['insuranceType'],
|
807
|
+
'policy_status': policy_status,
|
808
|
+
'remaining_amount': remaining_amount,
|
809
|
+
'data_source': 'OptumAI',
|
810
|
+
'is_successful': bool(patient_name and remaining_amount != 'Not Found')
|
811
|
+
}
|
812
|
+
|
813
|
+
if MediLink_ConfigLoader:
|
814
|
+
MediLink_ConfigLoader.log("Successfully converted Super Connector API response: {}".format(result), level="DEBUG")
|
815
|
+
return result
|
816
|
+
|
817
|
+
else:
|
818
|
+
# Unknown response format - enhanced logging for debugging
|
819
|
+
if MediLink_ConfigLoader:
|
820
|
+
MediLink_ConfigLoader.log("Unknown response format in convert_eligibility_to_enhanced_format", level="WARNING")
|
821
|
+
MediLink_ConfigLoader.log("Response structure: {}".format(json.dumps(data, indent=2)), level="DEBUG")
|
822
|
+
|
823
|
+
# Additional debugging information
|
824
|
+
MediLink_ConfigLoader.log("Response type: {}".format(type(data)), level="DEBUG")
|
825
|
+
if isinstance(data, dict):
|
826
|
+
MediLink_ConfigLoader.log("Available keys: {}".format(list(data.keys())), level="DEBUG")
|
827
|
+
return None
|
828
|
+
|
829
|
+
# =============================================================================
|
830
|
+
# PAYER ID RESOLUTION UTILITIES
|
831
|
+
# =============================================================================
|
832
|
+
|
833
|
+
def resolve_payer_ids_from_csv(csv_data, config, crosswalk, payer_ids):
|
834
|
+
"""
|
835
|
+
Resolve payer IDs for each patient from CSV data using crosswalk mapping.
|
836
|
+
This eliminates the need for multi-payer probing and reduces complexity from O(PxN) to O(N).
|
837
|
+
|
838
|
+
Args:
|
839
|
+
csv_data (list): CSV data containing patient information
|
840
|
+
config (dict): Configuration object
|
841
|
+
crosswalk (dict): Crosswalk data containing payer mappings
|
842
|
+
payer_ids (list): List of supported payer IDs
|
843
|
+
|
844
|
+
Returns:
|
845
|
+
dict: Mapping of (dob, member_id) tuples to resolved payer_id
|
846
|
+
"""
|
847
|
+
if MediLink_ConfigLoader:
|
848
|
+
MediLink_ConfigLoader.log("Resolving payer IDs from CSV data using crosswalk...", level="INFO")
|
849
|
+
|
850
|
+
# Initialize cache
|
851
|
+
payer_id_cache = {}
|
852
|
+
|
853
|
+
# Build payer ID to endpoint mapping from crosswalk
|
854
|
+
payer_endpoint_map = {}
|
855
|
+
crosswalk_payers = crosswalk.get('payer_id', {})
|
856
|
+
for payer_id, details in crosswalk_payers.items():
|
857
|
+
endpoint = details.get('endpoint', 'UHCAPI') # Default to UHCAPI
|
858
|
+
payer_endpoint_map[payer_id] = endpoint
|
859
|
+
|
860
|
+
# Process each CSV row to resolve payer IDs
|
861
|
+
for row in csv_data:
|
862
|
+
ins1_payer_id = row.get('Ins1 Payer ID', '').strip()
|
863
|
+
dob = row.get('Patient DOB', row.get('DOB', ''))
|
864
|
+
member_id = row.get('Primary Policy Number', row.get('Ins1 Member ID', '')).strip()
|
865
|
+
|
866
|
+
# Skip rows without required data
|
867
|
+
if not ins1_payer_id or not dob or not member_id:
|
868
|
+
continue
|
869
|
+
|
870
|
+
# Validate and format DOB
|
871
|
+
formatted_dob = validate_and_format_date(dob)
|
872
|
+
if not formatted_dob:
|
873
|
+
continue
|
874
|
+
|
875
|
+
# Check if this payer ID is in our supported list
|
876
|
+
if ins1_payer_id in payer_ids:
|
877
|
+
# Use the payer ID from CSV as authoritative source
|
878
|
+
payer_id_cache[(formatted_dob, member_id)] = ins1_payer_id
|
879
|
+
if MediLink_ConfigLoader:
|
880
|
+
MediLink_ConfigLoader.log("Resolved payer ID {} for patient {} (DOB: {})".format(
|
881
|
+
ins1_payer_id, member_id, formatted_dob), level="DEBUG")
|
882
|
+
else:
|
883
|
+
# Payer ID not in supported list - log for review
|
884
|
+
if MediLink_ConfigLoader:
|
885
|
+
MediLink_ConfigLoader.log("Payer ID {} not in supported list for patient {} (DOB: {})".format(
|
886
|
+
ins1_payer_id, member_id, formatted_dob), level="WARNING")
|
887
|
+
|
888
|
+
if MediLink_ConfigLoader:
|
889
|
+
MediLink_ConfigLoader.log("Payer ID resolution complete. Resolved {} patient-payer mappings.".format(
|
890
|
+
len(payer_id_cache)), level="INFO")
|
891
|
+
|
892
|
+
return payer_id_cache
|
893
|
+
|
894
|
+
def get_payer_id_for_patient(dob, member_id, payer_id_cache):
|
895
|
+
"""
|
896
|
+
Get the appropriate payer ID for a specific patient.
|
897
|
+
|
898
|
+
Args:
|
899
|
+
dob (str): Patient date of birth
|
900
|
+
member_id (str): Patient member ID
|
901
|
+
payer_id_cache (dict): Cached payer ID mappings
|
902
|
+
|
903
|
+
Returns:
|
904
|
+
str: Payer ID for the patient, or None if not found
|
905
|
+
"""
|
906
|
+
return payer_id_cache.get((dob, member_id))
|
907
|
+
|
908
|
+
def merge_responses(optumai_data, legacy_data, dob, member_id):
|
909
|
+
"""
|
910
|
+
Intelligently merge OptumAI and Legacy API responses.
|
911
|
+
Prioritizes OptumAI data but backfills missing fields from Legacy API.
|
912
|
+
Adds [*] flag to indicate when data comes from Legacy API.
|
913
|
+
"""
|
914
|
+
merged = {}
|
915
|
+
|
916
|
+
# Handle None inputs gracefully
|
917
|
+
if optumai_data is None and legacy_data is None:
|
918
|
+
return {
|
919
|
+
'patient_name': 'Unknown Patient',
|
920
|
+
'dob': dob,
|
921
|
+
'member_id': member_id,
|
922
|
+
'insurance_type': 'Not Available',
|
923
|
+
'policy_status': 'Not Available',
|
924
|
+
'remaining_amount': 'Not Found',
|
925
|
+
'data_source': 'None',
|
926
|
+
'is_successful': False
|
927
|
+
}
|
928
|
+
|
929
|
+
# Helper to check if data is valid (not None, has required fields)
|
930
|
+
def is_valid_data(data):
|
931
|
+
if not data:
|
932
|
+
return False
|
933
|
+
# Basic check for key structures
|
934
|
+
if "rawGraphQLResponse" in data:
|
935
|
+
raw = data.get('rawGraphQLResponse', {})
|
936
|
+
errors = raw.get('errors', [])
|
937
|
+
if errors and all(e.get('code') != 'SUCCESS' for e in errors):
|
938
|
+
return False # All errors, no data
|
939
|
+
return bool(raw.get('data', {}).get('checkEligibility', {}).get('eligibility'))
|
940
|
+
elif "memberPolicies" in data:
|
941
|
+
return bool(data.get("memberPolicies"))
|
942
|
+
return False
|
943
|
+
|
944
|
+
# Handle PARTIAL_DATA_RECEIVED errors - these still contain usable data
|
945
|
+
def has_partial_data(data):
|
946
|
+
if not data or "rawGraphQLResponse" not in data:
|
947
|
+
return False
|
948
|
+
raw = data.get('rawGraphQLResponse', {})
|
949
|
+
errors = raw.get('errors', [])
|
950
|
+
for error in errors:
|
951
|
+
if error.get('code') == 'PARTIAL_DATA_RECEIVED':
|
952
|
+
extensions = error.get('extensions', {})
|
953
|
+
if extensions and 'details' in extensions:
|
954
|
+
return bool(extensions.get('details'))
|
955
|
+
return False
|
956
|
+
|
957
|
+
# ALWAYS prioritize OptumAI as primary source for real patient data
|
958
|
+
# Legacy API is sandbox data and should only be used for backfilling missing fields
|
959
|
+
primary = optumai_data
|
960
|
+
secondary = legacy_data
|
961
|
+
|
962
|
+
# If primary is OptumAI with errors but extensions, extract from there
|
963
|
+
if primary == optumai_data and has_partial_data(optumai_data):
|
964
|
+
raw = optumai_data.get('rawGraphQLResponse', {})
|
965
|
+
errors = raw.get('errors', [])
|
966
|
+
for error in errors:
|
967
|
+
if error.get('code') in ['INFORMATIONAL', 'PARTIAL_DATA_RECEIVED']:
|
968
|
+
extensions = error.get('extensions', {})
|
969
|
+
details = extensions.get('details', [])
|
970
|
+
if details:
|
971
|
+
# Select most recent plan
|
972
|
+
try:
|
973
|
+
sorted_details = sorted(details, key=lambda d: datetime.strptime(d.get('planStartDate', '1900-01-01'), '%Y-%m-%d'), reverse=True)
|
974
|
+
selected = sorted_details[0]
|
975
|
+
merged['patient_name'] = "{} {}".format(selected.get('firstName', ''), selected.get('lastName', '')).strip()
|
976
|
+
merged['dob'] = selected.get('dateOfBirth', dob)
|
977
|
+
merged['member_id'] = selected.get('memberId', member_id)
|
978
|
+
merged['insurance_type'] = selected.get('insuranceType', selected.get('coverageType', ''))
|
979
|
+
merged['policy_status'] = selected.get('policyStatus', 'Active')
|
980
|
+
merged['payer_id'] = selected.get('payerId', '')
|
981
|
+
# Extract deductible from plan levels if available
|
982
|
+
plan_levels = selected.get('planLevels', [])
|
983
|
+
deductible_found = False
|
984
|
+
for plan_level in plan_levels:
|
985
|
+
if plan_level.get('level') == 'deductibleInfo':
|
986
|
+
individual = plan_level.get('individual', [])
|
987
|
+
if individual and individual[0].get('remainingAmount'):
|
988
|
+
merged['remaining_amount'] = str(individual[0]['remainingAmount'])
|
989
|
+
deductible_found = True
|
990
|
+
break
|
991
|
+
if not deductible_found:
|
992
|
+
merged['remaining_amount'] = 'Not Found'
|
993
|
+
merged['data_source'] = 'OptumAI-Extensions'
|
994
|
+
break # Use first error with valid extensions
|
995
|
+
except Exception as e:
|
996
|
+
# If extraction fails, continue to normal processing
|
997
|
+
pass
|
998
|
+
|
999
|
+
# Extract from OptumAI (primary) first - this contains real patient data
|
1000
|
+
if is_super_connector_response_format(primary) and primary:
|
1001
|
+
# Extract real patient data from OptumAI
|
1002
|
+
merged['patient_name'] = "{} {} {}".format(
|
1003
|
+
primary.get('firstName', ""),
|
1004
|
+
primary.get('middleName', ""),
|
1005
|
+
primary.get('lastName', "")
|
1006
|
+
).strip()
|
1007
|
+
merged['payer_id'] = primary.get('payerId', '')
|
1008
|
+
merged['remaining_amount'] = extract_super_connector_remaining_amount(primary)
|
1009
|
+
merged['policy_status'] = primary.get('policyStatus', '')
|
1010
|
+
|
1011
|
+
# OptumAI doesn't have insurance_type yet, so we'll backfill from Legacy
|
1012
|
+
merged['insurance_type'] = ''
|
1013
|
+
merged['data_source'] = 'OptumAI'
|
1014
|
+
|
1015
|
+
elif is_legacy_response_format(primary) and primary:
|
1016
|
+
# Only use Legacy as primary if OptumAI is completely unavailable
|
1017
|
+
policy = primary.get("memberPolicies", [{}])[0]
|
1018
|
+
patient_info = policy.get("patientInfo", [{}])[0]
|
1019
|
+
merged['patient_name'] = "{} {} {}".format(
|
1020
|
+
patient_info.get("firstName", ""),
|
1021
|
+
patient_info.get("middleName", ""),
|
1022
|
+
patient_info.get("lastName", "")
|
1023
|
+
).strip()
|
1024
|
+
merged['insurance_type'] = policy.get("insuranceInfo", {}).get("insuranceType", "")
|
1025
|
+
merged['policy_status'] = policy.get("policyInfo", {}).get("policyStatus", "")
|
1026
|
+
merged['remaining_amount'] = extract_legacy_remaining_amount(policy)
|
1027
|
+
merged['payer_id'] = policy.get("insuranceInfo", {}).get("payerId", "")
|
1028
|
+
merged['data_source'] = 'Legacy'
|
1029
|
+
|
1030
|
+
# Set fallback data if still empty
|
1031
|
+
if not merged:
|
1032
|
+
merged = {
|
1033
|
+
'patient_name': 'Unknown Patient',
|
1034
|
+
'insurance_type': 'Not Available',
|
1035
|
+
'policy_status': 'Not Available',
|
1036
|
+
'remaining_amount': 'Not Found',
|
1037
|
+
'payer_id': '',
|
1038
|
+
'data_source': 'None'
|
1039
|
+
}
|
1040
|
+
|
1041
|
+
# Intelligently backfill missing fields from Legacy API (secondary)
|
1042
|
+
# This is where we enrich OptumAI data with missing fields from Legacy
|
1043
|
+
if secondary and is_valid_data(secondary) and is_legacy_response_format(secondary):
|
1044
|
+
legacy_policy = secondary.get("memberPolicies", [{}])[0]
|
1045
|
+
legacy_insurance_info = legacy_policy.get("insuranceInfo", {})
|
1046
|
+
|
1047
|
+
# Track which fields are backfilled from Legacy API
|
1048
|
+
backfilled_fields = []
|
1049
|
+
|
1050
|
+
# Backfill insurance_type from Legacy API (OptumAI doesn't have this yet)
|
1051
|
+
if not merged.get('insurance_type') or merged['insurance_type'].strip() == '':
|
1052
|
+
legacy_insurance_type = legacy_insurance_info.get("insuranceType", "")
|
1053
|
+
if legacy_insurance_type:
|
1054
|
+
merged['insurance_type'] = legacy_insurance_type + " [*]"
|
1055
|
+
backfilled_fields.append('insurance_type')
|
1056
|
+
|
1057
|
+
# Backfill policy_status if missing
|
1058
|
+
if not merged.get('policy_status') or merged['policy_status'].strip() == '':
|
1059
|
+
legacy_policy_status = legacy_policy.get("policyInfo", {}).get("policyStatus", "")
|
1060
|
+
if legacy_policy_status:
|
1061
|
+
merged['policy_status'] = legacy_policy_status + " [*]"
|
1062
|
+
backfilled_fields.append('policy_status')
|
1063
|
+
|
1064
|
+
# Backfill remaining_amount if missing or "Not Found"
|
1065
|
+
if not merged.get('remaining_amount') or merged['remaining_amount'] == 'Not Found':
|
1066
|
+
legacy_remaining = extract_legacy_remaining_amount(legacy_policy)
|
1067
|
+
if legacy_remaining and legacy_remaining != 'Not Found':
|
1068
|
+
merged['remaining_amount'] = legacy_remaining + " [*]"
|
1069
|
+
backfilled_fields.append('remaining_amount')
|
1070
|
+
|
1071
|
+
# Backfill payer_id if missing (should be preserved from CSV anyway)
|
1072
|
+
if not merged.get('payer_id') or merged['payer_id'].strip() == '':
|
1073
|
+
legacy_payer_id = legacy_insurance_info.get("payerId", "")
|
1074
|
+
if legacy_payer_id:
|
1075
|
+
merged['payer_id'] = legacy_payer_id + " [*]"
|
1076
|
+
backfilled_fields.append('payer_id')
|
1077
|
+
|
1078
|
+
# Update data source to indicate backfilling occurred
|
1079
|
+
if backfilled_fields:
|
1080
|
+
if merged.get('data_source') == 'OptumAI':
|
1081
|
+
merged['data_source'] = 'OptumAI+Legacy'
|
1082
|
+
else:
|
1083
|
+
merged['data_source'] = 'Legacy'
|
1084
|
+
|
1085
|
+
if MediLink_ConfigLoader:
|
1086
|
+
MediLink_ConfigLoader.log("Backfilled fields from Legacy API: {}".format(backfilled_fields), level="DEBUG")
|
1087
|
+
|
1088
|
+
# For multi-plan, select most recent
|
1089
|
+
if 'multiple_plans' in merged: # Flag if multi
|
1090
|
+
plans = merged['multiple_plans']
|
1091
|
+
if plans:
|
1092
|
+
# Sort by planStartDate descending
|
1093
|
+
sorted_plans = sorted(plans, key=lambda p: datetime.strptime(p.get('planStartDate', '1900-01-01'), '%Y-%m-%d'), reverse=True)
|
1094
|
+
selected = sorted_plans[0]
|
1095
|
+
# Update merged with selected plan data
|
1096
|
+
|
1097
|
+
# Search for deductible in primary/secondary (only if not already set by backfill)
|
1098
|
+
if 'remaining_amount' not in merged or merged['remaining_amount'] == 'Not Found':
|
1099
|
+
if primary == optumai_data:
|
1100
|
+
merged['remaining_amount'] = extract_super_connector_remaining_amount(primary)
|
1101
|
+
else:
|
1102
|
+
merged['remaining_amount'] = extract_legacy_remaining_amount(primary)
|
1103
|
+
|
1104
|
+
# Add required fields if missing
|
1105
|
+
if 'dob' not in merged:
|
1106
|
+
merged['dob'] = dob
|
1107
|
+
if 'member_id' not in merged:
|
1108
|
+
merged['member_id'] = member_id
|
1109
|
+
if 'patient_id' not in merged:
|
1110
|
+
merged['patient_id'] = ''
|
1111
|
+
if 'service_date_display' not in merged:
|
1112
|
+
merged['service_date_display'] = ''
|
1113
|
+
if 'status' not in merged:
|
1114
|
+
merged['status'] = 'Processed'
|
1115
|
+
|
1116
|
+
# Update data source if not already set
|
1117
|
+
if 'data_source' not in merged or not merged['data_source']:
|
1118
|
+
merged['data_source'] = 'OptumAI' if primary == optumai_data else 'Legacy' if primary else 'None'
|
1119
|
+
|
1120
|
+
# Set final success status
|
1121
|
+
merged['is_successful'] = bool(merged.get('patient_name') and
|
1122
|
+
merged.get('patient_name') != 'Unknown Patient' and
|
1123
|
+
merged.get('remaining_amount') != 'Not Found')
|
1124
|
+
return merged
|
1125
|
+
|
1126
|
+
def _format_patient_name_from_csv_row(row):
|
1127
|
+
"""Local CSV name formatter to avoid cross-package imports"""
|
1128
|
+
try:
|
1129
|
+
if row is None:
|
1130
|
+
return ""
|
1131
|
+
if 'Patient Name' in row and row['Patient Name']:
|
1132
|
+
return str(row['Patient Name'])[:25]
|
1133
|
+
first_name = str(row.get('Patient First', '')).strip()
|
1134
|
+
last_name = str(row.get('Patient Last', '')).strip()
|
1135
|
+
middle_name = str(row.get('Patient Middle', '')).strip()
|
1136
|
+
if last_name or first_name:
|
1137
|
+
name_parts = []
|
1138
|
+
if last_name:
|
1139
|
+
name_parts.append(last_name)
|
1140
|
+
if first_name:
|
1141
|
+
if name_parts:
|
1142
|
+
name_parts.append(", {}".format(first_name))
|
1143
|
+
else:
|
1144
|
+
name_parts.append(first_name)
|
1145
|
+
if middle_name:
|
1146
|
+
name_parts.append(" {}".format(middle_name[:1]))
|
1147
|
+
return ''.join(name_parts)[:25]
|
1148
|
+
# Try alternative fields
|
1149
|
+
alt_first = str(row.get('First Name', row.get('First', row.get('FirstName', '')))).strip()
|
1150
|
+
alt_last = str(row.get('Last Name', row.get('Last', row.get('LastName', '')))).strip()
|
1151
|
+
if alt_first or alt_last:
|
1152
|
+
combined = (alt_first + ' ' + alt_last).strip()
|
1153
|
+
return combined[:25]
|
1154
|
+
return ""
|
1155
|
+
except Exception:
|
1156
|
+
return ""
|
1157
|
+
|
1158
|
+
|
1159
|
+
def _extract_service_date_from_csv_row(row):
|
1160
|
+
"""Extract service_date_display and service_date_sort (datetime) from CSV row"""
|
1161
|
+
try:
|
1162
|
+
if row is None:
|
1163
|
+
return '', datetime.min
|
1164
|
+
# Preferred fields
|
1165
|
+
candidates = [row.get('Service Date'), row.get('Surgery Date'), row.get('Date of Service'), row.get('DOS'), row.get('Date')]
|
1166
|
+
for val in candidates:
|
1167
|
+
if not val:
|
1168
|
+
continue
|
1169
|
+
if isinstance(val, datetime):
|
1170
|
+
if val != datetime.min:
|
1171
|
+
return val.strftime('%m-%d'), val
|
1172
|
+
elif isinstance(val, str) and val.strip() and val != 'MISSING':
|
1173
|
+
for fmt in ['%m-%d-%Y', '%m/%d/%Y', '%Y-%m-%d', '%m-%d-%y', '%m/%d/%y']:
|
1174
|
+
try:
|
1175
|
+
parsed = datetime.strptime(val.strip(), fmt)
|
1176
|
+
return parsed.strftime('%m-%d'), parsed
|
1177
|
+
except ValueError:
|
1178
|
+
continue
|
1179
|
+
return '', datetime.min
|
1180
|
+
except Exception:
|
1181
|
+
return '', datetime.min
|
1182
|
+
|
1183
|
+
|
1184
|
+
def backfill_enhanced_result(enhanced_result, csv_row=None):
|
1185
|
+
"""
|
1186
|
+
Ensure enhanced eligibility result has required fields populated using CSV fallbacks.
|
1187
|
+
- Fills patient_name if missing using CSV fields
|
1188
|
+
- Fills patient_id from CSV if missing
|
1189
|
+
- Fills service_date_display and service_date_sort from CSV if missing
|
1190
|
+
- Fills payer_id from CSV if missing
|
1191
|
+
- Keeps existing values intact; only fills blanks
|
1192
|
+
"""
|
1193
|
+
if enhanced_result is None:
|
1194
|
+
return None
|
1195
|
+
try:
|
1196
|
+
# Shallow copy to avoid mutating caller dict
|
1197
|
+
result = dict(enhanced_result)
|
1198
|
+
|
1199
|
+
# Backfill patient_name
|
1200
|
+
name_missing = (not result.get('patient_name')) or (result.get('patient_name', '').strip() == '') or (result.get('patient_name') == 'Unknown Patient')
|
1201
|
+
if name_missing:
|
1202
|
+
csv_name = _format_patient_name_from_csv_row(csv_row)
|
1203
|
+
if csv_name:
|
1204
|
+
result['patient_name'] = csv_name
|
1205
|
+
|
1206
|
+
# Backfill patient_id
|
1207
|
+
if not result.get('patient_id') and csv_row is not None:
|
1208
|
+
result['patient_id'] = str(csv_row.get('Patient ID #2', csv_row.get('Patient ID', '')))
|
1209
|
+
|
1210
|
+
# Backfill payer_id
|
1211
|
+
current_payer_id = result.get('payer_id', '')
|
1212
|
+
if (not current_payer_id or not current_payer_id.strip()) and csv_row is not None:
|
1213
|
+
payer = csv_row.get('Ins1 Payer ID', '')
|
1214
|
+
if payer:
|
1215
|
+
result['payer_id'] = str(payer)
|
1216
|
+
|
1217
|
+
# Backfill service dates
|
1218
|
+
if (not result.get('service_date_display')) or (result.get('service_date_sort') is None):
|
1219
|
+
display, sort_val = _extract_service_date_from_csv_row(csv_row)
|
1220
|
+
if display:
|
1221
|
+
result['service_date_display'] = display
|
1222
|
+
if 'service_date_sort' not in result or result['service_date_sort'] is None or result['service_date_sort'] == datetime.min:
|
1223
|
+
result['service_date_sort'] = sort_val
|
1224
|
+
|
1225
|
+
# Normalize status
|
1226
|
+
if not result.get('status'):
|
1227
|
+
result['status'] = 'Processed'
|
1228
|
+
|
1229
|
+
return result
|
1230
|
+
except Exception as e:
|
1231
|
+
if MediLink_ConfigLoader:
|
1232
|
+
MediLink_ConfigLoader.log("Error in backfill_enhanced_result: {}".format(str(e)), level="WARNING")
|
1233
|
+
return enhanced_result
|