medicafe 0.250711.1__py3-none-any.whl → 0.250720.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of medicafe might be problematic. Click here for more details.

@@ -0,0 +1,484 @@
1
+ """
2
+ MediLink_Deductible_Validator.py
3
+ Validation helper functions to compare legacy API responses with Super Connector API responses
4
+ Compatible with Python 3.4.4
5
+ """
6
+
7
+ import json
8
+ import os
9
+ import sys
10
+ from datetime import datetime
11
+
12
+ # Python 3.4.4 compatibility imports
13
+ try:
14
+ from io import open
15
+ except ImportError:
16
+ pass
17
+
18
+ def deep_search_for_value(data, target_value, path="", max_depth=10, current_depth=0):
19
+ """
20
+ Recursively search for a value in nested dictionaries and lists.
21
+ Returns all paths where the value is found.
22
+ """
23
+ if current_depth > max_depth:
24
+ return []
25
+
26
+ found_paths = []
27
+
28
+ if isinstance(data, dict):
29
+ for key, value in data.items():
30
+ current_path = "{}.{}".format(path, key) if path else key
31
+
32
+ # Check if this value matches our target
33
+ if str(value) == str(target_value):
34
+ found_paths.append(current_path)
35
+
36
+ # Recursively search nested structures
37
+ if isinstance(value, (dict, list)):
38
+ found_paths.extend(deep_search_for_value(value, target_value, current_path, max_depth, current_depth + 1))
39
+
40
+ elif isinstance(data, list):
41
+ for i, item in enumerate(data):
42
+ current_path = "{}[{}]".format(path, i)
43
+
44
+ # Check if this item matches our target
45
+ if str(item) == str(target_value):
46
+ found_paths.append(current_path)
47
+
48
+ # Recursively search nested structures
49
+ if isinstance(item, (dict, list)):
50
+ found_paths.extend(deep_search_for_value(item, target_value, current_path, max_depth, current_depth + 1))
51
+
52
+ return found_paths
53
+
54
+ def extract_legacy_values_for_comparison(legacy_data):
55
+ """
56
+ Extract key values from legacy API response for comparison
57
+ """
58
+ comparison_values = {}
59
+
60
+ if not legacy_data or "memberPolicies" not in legacy_data:
61
+ return comparison_values
62
+
63
+ for policy in legacy_data.get("memberPolicies", []):
64
+ # Skip non-medical policies
65
+ if policy.get("policyInfo", {}).get("coverageType", "") != "Medical":
66
+ continue
67
+
68
+ # Extract patient info
69
+ patient_info = policy.get("patientInfo", [{}])[0]
70
+ comparison_values["patient_lastName"] = patient_info.get("lastName", "")
71
+ comparison_values["patient_firstName"] = patient_info.get("firstName", "")
72
+ comparison_values["patient_middleName"] = patient_info.get("middleName", "")
73
+
74
+ # Extract insurance info
75
+ insurance_info = policy.get("insuranceInfo", {})
76
+ comparison_values["insurance_type"] = insurance_info.get("insuranceType", "")
77
+ comparison_values["insurance_typeCode"] = insurance_info.get("insuranceTypeCode", "")
78
+ comparison_values["insurance_memberId"] = insurance_info.get("memberId", "")
79
+ comparison_values["insurance_payerId"] = insurance_info.get("payerId", "")
80
+
81
+ # Extract policy info
82
+ policy_info = policy.get("policyInfo", {})
83
+ comparison_values["policy_status"] = policy_info.get("policyStatus", "")
84
+
85
+ # Extract deductible info
86
+ deductible_info = policy.get("deductibleInfo", {})
87
+ if 'individual' in deductible_info:
88
+ comparison_values["deductible_remaining"] = deductible_info['individual']['inNetwork'].get("remainingAmount", "")
89
+ elif 'family' in deductible_info:
90
+ comparison_values["deductible_remaining"] = deductible_info['family']['inNetwork'].get("remainingAmount", "")
91
+ else:
92
+ comparison_values["deductible_remaining"] = "Not Found"
93
+
94
+ # Only process the first medical policy
95
+ break
96
+
97
+ return comparison_values
98
+
99
+ def validate_super_connector_response(legacy_values, super_connector_data):
100
+ """
101
+ Compare legacy API values with Super Connector API response
102
+ """
103
+ validation_report = {
104
+ "timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
105
+ "legacy_values": legacy_values,
106
+ "super_connector_analysis": {},
107
+ "missing_values": [],
108
+ "found_values": [],
109
+ "data_quality_issues": []
110
+ }
111
+
112
+ if not super_connector_data:
113
+ validation_report["super_connector_analysis"]["error"] = "No Super Connector data provided"
114
+ return validation_report
115
+
116
+ # Search for each legacy value in the Super Connector response
117
+ for key, legacy_value in legacy_values.items():
118
+ if legacy_value and legacy_value != "Not Found":
119
+ found_paths = deep_search_for_value(super_connector_data, legacy_value)
120
+
121
+ if found_paths:
122
+ validation_report["found_values"].append({
123
+ "legacy_key": key,
124
+ "legacy_value": legacy_value,
125
+ "found_paths": found_paths
126
+ })
127
+ else:
128
+ validation_report["missing_values"].append({
129
+ "legacy_key": key,
130
+ "legacy_value": legacy_value,
131
+ "status": "Not Found"
132
+ })
133
+
134
+ # Check for data quality issues
135
+ data_quality_issues = check_data_quality_issues(super_connector_data)
136
+ validation_report["data_quality_issues"] = data_quality_issues
137
+
138
+ # Analyze the Super Connector response structure
139
+ validation_report["super_connector_analysis"]["structure"] = analyze_response_structure(super_connector_data)
140
+
141
+ return validation_report
142
+
143
+ def check_data_quality_issues(super_connector_data):
144
+ """
145
+ Check for data quality issues in Super Connector response
146
+ """
147
+ issues = []
148
+
149
+ # Check for non-standard plan descriptions
150
+ plan_type_desc = super_connector_data.get("planTypeDescription", "")
151
+ if plan_type_desc and len(plan_type_desc) > 50:
152
+ issues.append({
153
+ "type": "Non-Standard Plan Description",
154
+ "field": "planTypeDescription",
155
+ "value": plan_type_desc,
156
+ "issue": "Using vendor-specific description instead of CMS standard name",
157
+ "recommendation": "Use standard CMS plan type names (e.g., 'Preferred Provider Organization (PPO)')"
158
+ })
159
+
160
+ # Check for generic type codes
161
+ coverage_types = super_connector_data.get("coverageTypes", [])
162
+ if coverage_types:
163
+ type_code = coverage_types[0].get("typeCode", "")
164
+ if type_code == "M":
165
+ issues.append({
166
+ "type": "Generic Type Code",
167
+ "field": "coverageTypes[0].typeCode",
168
+ "value": type_code,
169
+ "issue": "Using generic 'Medical' code instead of specific plan type code",
170
+ "recommendation": "Use CMS standard codes (e.g., '12' for PPO, 'HM' for HMO)"
171
+ })
172
+
173
+ # Check for missing standard fields
174
+ if not super_connector_data.get("productType"):
175
+ issues.append({
176
+ "type": "Missing Standard Field",
177
+ "field": "productType",
178
+ "issue": "Missing standard product type field",
179
+ "recommendation": "Include standard product type (PPO, HMO, etc.)"
180
+ })
181
+
182
+ # Check for multiple deductible amounts
183
+ deductible_amounts = []
184
+ if "rawGraphQLResponse" in super_connector_data:
185
+ raw_response = super_connector_data.get('rawGraphQLResponse', {})
186
+ data = raw_response.get('data', {})
187
+ check_eligibility = data.get('checkEligibility', {})
188
+ eligibility_list = check_eligibility.get('eligibility', [])
189
+
190
+ for eligibility in eligibility_list:
191
+ plan_levels = eligibility.get('eligibilityInfo', {}).get('planLevels', [])
192
+ for plan_level in plan_levels:
193
+ if plan_level.get('level') == 'deductibleInfo':
194
+ # Check individual deductibles
195
+ individual_levels = plan_level.get('individual', [])
196
+ for individual in individual_levels:
197
+ remaining = individual.get('remainingAmount')
198
+ if remaining is not None:
199
+ try:
200
+ amount = float(remaining)
201
+ deductible_amounts.append(('individual', amount))
202
+ except (ValueError, TypeError):
203
+ pass
204
+
205
+ # Check family deductibles
206
+ family_levels = plan_level.get('family', [])
207
+ for family in family_levels:
208
+ remaining = family.get('remainingAmount')
209
+ if remaining is not None:
210
+ try:
211
+ amount = float(remaining)
212
+ deductible_amounts.append(('family', amount))
213
+ except (ValueError, TypeError):
214
+ pass
215
+
216
+ # Also check top-level planLevels
217
+ plan_levels = super_connector_data.get('planLevels', [])
218
+ for plan_level in plan_levels:
219
+ if plan_level.get('level') == 'deductibleInfo':
220
+ # Check individual deductibles
221
+ individual_levels = plan_level.get('individual', [])
222
+ for individual in individual_levels:
223
+ remaining = individual.get('remainingAmount')
224
+ if remaining is not None:
225
+ try:
226
+ amount = float(remaining)
227
+ deductible_amounts.append(('individual', amount))
228
+ except (ValueError, TypeError):
229
+ pass
230
+
231
+ # Check family deductibles
232
+ family_levels = plan_level.get('family', [])
233
+ for family in family_levels:
234
+ remaining = family.get('remainingAmount')
235
+ if remaining is not None:
236
+ try:
237
+ amount = float(remaining)
238
+ deductible_amounts.append(('family', amount))
239
+ except (ValueError, TypeError):
240
+ pass
241
+
242
+ if len(deductible_amounts) > 1:
243
+ # Group by type and find ranges
244
+ individual_amounts = [amt for type_, amt in deductible_amounts if type_ == 'individual']
245
+ family_amounts = [amt for type_, amt in deductible_amounts if type_ == 'family']
246
+
247
+ issue_details = []
248
+ if individual_amounts:
249
+ issue_details.append("Individual: {} values (range: {}-{})".format(
250
+ len(individual_amounts), min(individual_amounts), max(individual_amounts)))
251
+ if family_amounts:
252
+ issue_details.append("Family: {} values (range: {}-{})".format(
253
+ len(family_amounts), min(family_amounts), max(family_amounts)))
254
+
255
+ issues.append({
256
+ "type": "Multiple Deductible Amounts",
257
+ "field": "planLevels[].deductibleInfo",
258
+ "value": "{} total amounts found".format(len(deductible_amounts)),
259
+ "issue": "Multiple deductible amounts found: {}".format("; ".join(issue_details)),
260
+ "recommendation": "System will select highest non-zero individual amount, then family amount"
261
+ })
262
+
263
+ # Check for API errors
264
+ if "rawGraphQLResponse" in super_connector_data:
265
+ raw_response = super_connector_data.get('rawGraphQLResponse', {})
266
+ errors = raw_response.get('errors', [])
267
+ if errors:
268
+ for error in errors:
269
+ error_code = error.get('code', 'UNKNOWN')
270
+ error_desc = error.get('description', 'No description')
271
+
272
+ # Check if this is an informational error with data
273
+ if error_code == 'INFORMATIONAL':
274
+ extensions = error.get('extensions', {})
275
+ if extensions and 'details' in extensions:
276
+ details = extensions.get('details', [])
277
+ if details:
278
+ issues.append({
279
+ "type": "Informational Error with Data",
280
+ "field": "rawGraphQLResponse.errors",
281
+ "value": error_code,
282
+ "issue": "API returned informational error but provided data in extensions: {}".format(error_desc),
283
+ "recommendation": "Data available in error extensions - system will attempt to extract"
284
+ })
285
+ else:
286
+ issues.append({
287
+ "type": "API Error",
288
+ "field": "rawGraphQLResponse.errors",
289
+ "value": error_code,
290
+ "issue": "Super Connector API returned error: {}".format(error_desc),
291
+ "recommendation": "Review API implementation and error handling"
292
+ })
293
+ else:
294
+ issues.append({
295
+ "type": "API Error",
296
+ "field": "rawGraphQLResponse.errors",
297
+ "value": error_code,
298
+ "issue": "Super Connector API returned error: {}".format(error_desc),
299
+ "recommendation": "Review API implementation and error handling"
300
+ })
301
+ else:
302
+ issues.append({
303
+ "type": "API Error",
304
+ "field": "rawGraphQLResponse.errors",
305
+ "value": error_code,
306
+ "issue": "Super Connector API returned error: {}".format(error_desc),
307
+ "recommendation": "Review API implementation and error handling"
308
+ })
309
+
310
+ # Check status code
311
+ status_code = super_connector_data.get('statuscode')
312
+ if status_code and status_code != '200':
313
+ issues.append({
314
+ "type": "Non-200 Status Code",
315
+ "field": "statuscode",
316
+ "value": status_code,
317
+ "issue": "API returned status code {} instead of 200".format(status_code),
318
+ "recommendation": "Check API health and error handling"
319
+ })
320
+
321
+ # Check for multiple eligibility records (this is actually good, but worth noting)
322
+ if "rawGraphQLResponse" in super_connector_data:
323
+ raw_response = super_connector_data.get('rawGraphQLResponse', {})
324
+ data = raw_response.get('data', {})
325
+ check_eligibility = data.get('checkEligibility', {})
326
+ eligibility_list = check_eligibility.get('eligibility', [])
327
+ if len(eligibility_list) > 1:
328
+ issues.append({
329
+ "type": "Multiple Eligibility Records",
330
+ "field": "rawGraphQLResponse.data.checkEligibility.eligibility",
331
+ "value": "{} records found".format(len(eligibility_list)),
332
+ "issue": "Multiple eligibility records returned - this is normal but may need special handling",
333
+ "recommendation": "Ensure parsing logic handles multiple records correctly"
334
+ })
335
+
336
+ return issues
337
+
338
+ def analyze_response_structure(data, max_depth=5):
339
+ """
340
+ Analyze the structure of the Super Connector response
341
+ """
342
+ structure = {}
343
+
344
+ def analyze_recursive(obj, path="", depth=0):
345
+ if depth > max_depth:
346
+ return
347
+
348
+ if isinstance(obj, dict):
349
+ for key, value in obj.items():
350
+ current_path = "{}.{}".format(path, key) if path else key
351
+ if isinstance(value, (dict, list)):
352
+ structure[current_path] = "Type: {}".format(type(value).__name__)
353
+ analyze_recursive(value, current_path, depth + 1)
354
+ else:
355
+ # Safe string conversion for Python 3.4.4
356
+ try:
357
+ value_str = str(value)[:50]
358
+ except (UnicodeEncodeError, UnicodeDecodeError):
359
+ value_str = repr(value)[:50]
360
+ structure[current_path] = "Type: {}, Value: {}".format(type(value).__name__, value_str)
361
+ elif isinstance(obj, list):
362
+ for i, item in enumerate(obj):
363
+ current_path = "{}[{}]".format(path, i)
364
+ if isinstance(item, (dict, list)):
365
+ structure[current_path] = "Type: {}".format(type(item).__name__)
366
+ analyze_recursive(item, current_path, depth + 1)
367
+ else:
368
+ # Safe string conversion for Python 3.4.4
369
+ try:
370
+ item_str = str(item)[:50]
371
+ except (UnicodeEncodeError, UnicodeDecodeError):
372
+ item_str = repr(item)[:50]
373
+ structure[current_path] = "Type: {}, Value: {}".format(type(item).__name__, item_str)
374
+
375
+ analyze_recursive(data)
376
+ return structure
377
+
378
+ def generate_validation_report(validation_report, output_file_path):
379
+ """
380
+ Generate a detailed validation report
381
+ """
382
+ # Use explicit encoding for Python 3.4.4 compatibility
383
+ try:
384
+ with open(output_file_path, 'w', encoding='utf-8') as f:
385
+ f.write("=" * 80 + "\n")
386
+ f.write("SUPER CONNECTOR API VALIDATION REPORT\n")
387
+ f.write("=" * 80 + "\n")
388
+ f.write("Generated: {}\n\n".format(validation_report['timestamp']))
389
+
390
+ # Legacy values section
391
+ f.write("LEGACY API VALUES:\n")
392
+ f.write("-" * 40 + "\n")
393
+ for key, value in validation_report["legacy_values"].items():
394
+ f.write("{}: {}\n".format(key, value))
395
+ f.write("\n")
396
+
397
+ # Found values section
398
+ if validation_report["found_values"]:
399
+ f.write("FOUND VALUES IN SUPER CONNECTOR RESPONSE:\n")
400
+ f.write("-" * 50 + "\n")
401
+ for item in validation_report["found_values"]:
402
+ f.write("Legacy Key: {}\n".format(item['legacy_key']))
403
+ f.write("Legacy Value: {}\n".format(item['legacy_value']))
404
+ f.write("Found at paths:\n")
405
+ for path in item['found_paths']:
406
+ f.write(" - {}\n".format(path))
407
+ f.write("\n")
408
+
409
+ # Missing values section
410
+ if validation_report["missing_values"]:
411
+ f.write("MISSING VALUES IN SUPER CONNECTOR RESPONSE:\n")
412
+ f.write("-" * 50 + "\n")
413
+ for item in validation_report["missing_values"]:
414
+ f.write("Legacy Key: {}\n".format(item['legacy_key']))
415
+ f.write("Legacy Value: {}\n".format(item['legacy_value']))
416
+ f.write("Status: {}\n\n".format(item['status']))
417
+
418
+ # Data Quality Issues section
419
+ if validation_report["data_quality_issues"]:
420
+ f.write("\nDATA QUALITY ISSUES:\n")
421
+ f.write("-" * 50 + "\n")
422
+ for issue in validation_report["data_quality_issues"]:
423
+ f.write("Type: {}\n".format(issue['type']))
424
+ f.write("Field: {}\n".format(issue['field']))
425
+ if 'value' in issue:
426
+ f.write("Value: {}\n".format(issue['value']))
427
+ f.write("Issue: {}\n".format(issue['issue']))
428
+ f.write("Recommendation: {}\n".format(issue['recommendation']))
429
+ f.write("\n")
430
+
431
+ # Super Connector structure analysis
432
+ if "structure" in validation_report["super_connector_analysis"]:
433
+ f.write("SUPER CONNECTOR RESPONSE STRUCTURE:\n")
434
+ f.write("-" * 50 + "\n")
435
+ for path, description in validation_report["super_connector_analysis"]["structure"].items():
436
+ f.write("{}: {}\n".format(path, description))
437
+
438
+ # Summary
439
+ f.write("\n" + "=" * 80 + "\n")
440
+ f.write("SUMMARY:\n")
441
+ f.write("Total legacy values: {}\n".format(len(validation_report['legacy_values'])))
442
+ f.write("Found in Super Connector: {}\n".format(len(validation_report['found_values'])))
443
+ f.write("Missing from Super Connector: {}\n".format(len(validation_report['missing_values'])))
444
+ f.write("Data quality issues: {}\n".format(len(validation_report['data_quality_issues'])))
445
+ f.write("=" * 80 + "\n")
446
+ except (UnicodeEncodeError, UnicodeDecodeError) as e:
447
+ # Fallback for encoding issues
448
+ with open(output_file_path, 'w', encoding='latin-1') as f:
449
+ f.write("=" * 80 + "\n")
450
+ f.write("SUPER CONNECTOR API VALIDATION REPORT\n")
451
+ f.write("=" * 80 + "\n")
452
+ f.write("Generated: {}\n\n".format(validation_report['timestamp']))
453
+ f.write("Note: Some characters may not display correctly due to encoding limitations.\n\n")
454
+
455
+ # Legacy values section
456
+ f.write("LEGACY API VALUES:\n")
457
+ f.write("-" * 40 + "\n")
458
+ for key, value in validation_report["legacy_values"].items():
459
+ f.write("{}: {}\n".format(key, value))
460
+ f.write("\n")
461
+
462
+ # Summary
463
+ f.write("\n" + "=" * 80 + "\n")
464
+ f.write("SUMMARY:\n")
465
+ f.write("Total legacy values: {}\n".format(len(validation_report['legacy_values'])))
466
+ f.write("Found in Super Connector: {}\n".format(len(validation_report['found_values'])))
467
+ f.write("Missing from Super Connector: {}\n".format(len(validation_report['missing_values'])))
468
+ f.write("Data quality issues: {}\n".format(len(validation_report['data_quality_issues'])))
469
+ f.write("=" * 80 + "\n")
470
+
471
+ def run_validation_comparison(legacy_data, super_connector_data, output_file_path):
472
+ """
473
+ Main function to run the validation comparison
474
+ """
475
+ # Extract values from legacy response
476
+ legacy_values = extract_legacy_values_for_comparison(legacy_data)
477
+
478
+ # Validate Super Connector response
479
+ validation_report = validate_super_connector_response(legacy_values, super_connector_data)
480
+
481
+ # Generate report
482
+ generate_validation_report(validation_report, output_file_path)
483
+
484
+ return validation_report