medicafe 0.250711.1__py3-none-any.whl → 0.250720.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of medicafe might be problematic. Click here for more details.

@@ -0,0 +1,440 @@
1
+ """
2
+ MediLink_Deductible_Validator.py
3
+ Validation helper functions to compare legacy API responses with Super Connector API responses
4
+ Compatible with Python 3.4.4
5
+ """
6
+
7
+ import json
8
+ import os
9
+ import sys
10
+ from datetime import datetime
11
+
12
+ # Python 3.4.4 compatibility imports
13
+ try:
14
+ from io import open
15
+ except ImportError:
16
+ pass
17
+
18
+ def deep_search_for_value(data, target_value, path="", max_depth=10, current_depth=0):
19
+ """
20
+ Recursively search for a value in nested dictionaries and lists.
21
+ Returns all paths where the value is found.
22
+ """
23
+ if current_depth > max_depth:
24
+ return []
25
+
26
+ found_paths = []
27
+
28
+ if isinstance(data, dict):
29
+ for key, value in data.items():
30
+ current_path = "{}.{}".format(path, key) if path else key
31
+
32
+ # Check if this value matches our target
33
+ if str(value) == str(target_value):
34
+ found_paths.append(current_path)
35
+
36
+ # Recursively search nested structures
37
+ if isinstance(value, (dict, list)):
38
+ found_paths.extend(deep_search_for_value(value, target_value, current_path, max_depth, current_depth + 1))
39
+
40
+ elif isinstance(data, list):
41
+ for i, item in enumerate(data):
42
+ current_path = "{}[{}]".format(path, i)
43
+
44
+ # Check if this item matches our target
45
+ if str(item) == str(target_value):
46
+ found_paths.append(current_path)
47
+
48
+ # Recursively search nested structures
49
+ if isinstance(item, (dict, list)):
50
+ found_paths.extend(deep_search_for_value(item, target_value, current_path, max_depth, current_depth + 1))
51
+
52
+ return found_paths
53
+
54
+ def extract_legacy_values_for_comparison(legacy_data):
55
+ """
56
+ Extract key values from legacy API response for comparison
57
+ """
58
+ comparison_values = {}
59
+
60
+ if not legacy_data or "memberPolicies" not in legacy_data:
61
+ return comparison_values
62
+
63
+ for policy in legacy_data.get("memberPolicies", []):
64
+ # Skip non-medical policies
65
+ if policy.get("policyInfo", {}).get("coverageType", "") != "Medical":
66
+ continue
67
+
68
+ # Extract patient info
69
+ patient_info = policy.get("patientInfo", [{}])[0]
70
+ comparison_values["patient_lastName"] = patient_info.get("lastName", "")
71
+ comparison_values["patient_firstName"] = patient_info.get("firstName", "")
72
+ comparison_values["patient_middleName"] = patient_info.get("middleName", "")
73
+
74
+ # Extract insurance info
75
+ insurance_info = policy.get("insuranceInfo", {})
76
+ comparison_values["insurance_type"] = insurance_info.get("insuranceType", "")
77
+ comparison_values["insurance_typeCode"] = insurance_info.get("insuranceTypeCode", "")
78
+ comparison_values["insurance_memberId"] = insurance_info.get("memberId", "")
79
+ comparison_values["insurance_payerId"] = insurance_info.get("payerId", "")
80
+
81
+ # Extract policy info
82
+ policy_info = policy.get("policyInfo", {})
83
+ comparison_values["policy_status"] = policy_info.get("policyStatus", "")
84
+
85
+ # Extract deductible info
86
+ deductible_info = policy.get("deductibleInfo", {})
87
+ if 'individual' in deductible_info:
88
+ comparison_values["deductible_remaining"] = deductible_info['individual']['inNetwork'].get("remainingAmount", "")
89
+ elif 'family' in deductible_info:
90
+ comparison_values["deductible_remaining"] = deductible_info['family']['inNetwork'].get("remainingAmount", "")
91
+ else:
92
+ comparison_values["deductible_remaining"] = "Not Found"
93
+
94
+ # Only process the first medical policy
95
+ break
96
+
97
+ return comparison_values
98
+
99
+ def validate_super_connector_response(legacy_values, super_connector_data):
100
+ """
101
+ Compare legacy API values with Super Connector API response
102
+ """
103
+ validation_report = {
104
+ "timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
105
+ "legacy_values": legacy_values,
106
+ "super_connector_analysis": {},
107
+ "missing_values": [],
108
+ "found_values": [],
109
+ "data_quality_issues": []
110
+ }
111
+
112
+ if not super_connector_data:
113
+ validation_report["super_connector_analysis"]["error"] = "No Super Connector data provided"
114
+ return validation_report
115
+
116
+ # Search for each legacy value in the Super Connector response
117
+ for key, legacy_value in legacy_values.items():
118
+ if legacy_value and legacy_value != "Not Found":
119
+ found_paths = deep_search_for_value(super_connector_data, legacy_value)
120
+
121
+ if found_paths:
122
+ validation_report["found_values"].append({
123
+ "legacy_key": key,
124
+ "legacy_value": legacy_value,
125
+ "found_paths": found_paths
126
+ })
127
+ else:
128
+ validation_report["missing_values"].append({
129
+ "legacy_key": key,
130
+ "legacy_value": legacy_value,
131
+ "status": "Not Found"
132
+ })
133
+
134
+ # Check for data quality issues
135
+ data_quality_issues = check_data_quality_issues(super_connector_data)
136
+ validation_report["data_quality_issues"] = data_quality_issues
137
+
138
+ # Analyze the Super Connector response structure
139
+ validation_report["super_connector_analysis"]["structure"] = analyze_response_structure(super_connector_data)
140
+
141
+ return validation_report
142
+
143
+ def check_data_quality_issues(super_connector_data):
144
+ """
145
+ Check for data quality issues in Super Connector response
146
+ """
147
+ issues = []
148
+
149
+ # Check for non-standard plan descriptions
150
+ plan_type_desc = super_connector_data.get("planTypeDescription", "")
151
+ if plan_type_desc and len(plan_type_desc) > 50:
152
+ issues.append({
153
+ "type": "Non-Standard Plan Description",
154
+ "field": "planTypeDescription",
155
+ "value": plan_type_desc,
156
+ "issue": "Using vendor-specific description instead of CMS standard name",
157
+ "recommendation": "Use standard CMS plan type names (e.g., 'Preferred Provider Organization (PPO)')"
158
+ })
159
+
160
+ # Check for generic type codes
161
+ coverage_types = super_connector_data.get("coverageTypes", [])
162
+ if coverage_types:
163
+ type_code = coverage_types[0].get("typeCode", "")
164
+ if type_code == "M":
165
+ issues.append({
166
+ "type": "Generic Type Code",
167
+ "field": "coverageTypes[0].typeCode",
168
+ "value": type_code,
169
+ "issue": "Using generic 'Medical' code instead of specific plan type code",
170
+ "recommendation": "Use CMS standard codes (e.g., '12' for PPO, 'HM' for HMO)"
171
+ })
172
+
173
+ # Check for missing standard fields
174
+ if not super_connector_data.get("productType"):
175
+ issues.append({
176
+ "type": "Missing Standard Field",
177
+ "field": "productType",
178
+ "issue": "Missing standard product type field",
179
+ "recommendation": "Include standard product type (PPO, HMO, etc.)"
180
+ })
181
+
182
+ # Check for multiple deductible amounts
183
+ deductible_amounts = []
184
+ if "rawGraphQLResponse" in super_connector_data:
185
+ raw_response = super_connector_data.get('rawGraphQLResponse', {})
186
+ data = raw_response.get('data', {})
187
+ check_eligibility = data.get('checkEligibility', {})
188
+ eligibility_list = check_eligibility.get('eligibility', [])
189
+
190
+ for eligibility in eligibility_list:
191
+ plan_levels = eligibility.get('eligibilityInfo', {}).get('planLevels', [])
192
+ for plan_level in plan_levels:
193
+ if plan_level.get('level') == 'deductibleInfo':
194
+ # Check individual deductibles
195
+ individual_levels = plan_level.get('individual', [])
196
+ for individual in individual_levels:
197
+ remaining = individual.get('remainingAmount')
198
+ if remaining is not None:
199
+ try:
200
+ amount = float(remaining)
201
+ deductible_amounts.append(('individual', amount))
202
+ except (ValueError, TypeError):
203
+ pass
204
+
205
+ # Check family deductibles
206
+ family_levels = plan_level.get('family', [])
207
+ for family in family_levels:
208
+ remaining = family.get('remainingAmount')
209
+ if remaining is not None:
210
+ try:
211
+ amount = float(remaining)
212
+ deductible_amounts.append(('family', amount))
213
+ except (ValueError, TypeError):
214
+ pass
215
+
216
+ # Also check top-level planLevels
217
+ plan_levels = super_connector_data.get('planLevels', [])
218
+ for plan_level in plan_levels:
219
+ if plan_level.get('level') == 'deductibleInfo':
220
+ # Check individual deductibles
221
+ individual_levels = plan_level.get('individual', [])
222
+ for individual in individual_levels:
223
+ remaining = individual.get('remainingAmount')
224
+ if remaining is not None:
225
+ try:
226
+ amount = float(remaining)
227
+ deductible_amounts.append(('individual', amount))
228
+ except (ValueError, TypeError):
229
+ pass
230
+
231
+ # Check family deductibles
232
+ family_levels = plan_level.get('family', [])
233
+ for family in family_levels:
234
+ remaining = family.get('remainingAmount')
235
+ if remaining is not None:
236
+ try:
237
+ amount = float(remaining)
238
+ deductible_amounts.append(('family', amount))
239
+ except (ValueError, TypeError):
240
+ pass
241
+
242
+ if len(deductible_amounts) > 1:
243
+ # Group by type and find ranges
244
+ individual_amounts = [amt for type_, amt in deductible_amounts if type_ == 'individual']
245
+ family_amounts = [amt for type_, amt in deductible_amounts if type_ == 'family']
246
+
247
+ issue_details = []
248
+ if individual_amounts:
249
+ issue_details.append("Individual: {} values (range: {}-{})".format(
250
+ len(individual_amounts), min(individual_amounts), max(individual_amounts)))
251
+ if family_amounts:
252
+ issue_details.append("Family: {} values (range: {}-{})".format(
253
+ len(family_amounts), min(family_amounts), max(family_amounts)))
254
+
255
+ issues.append({
256
+ "type": "Multiple Deductible Amounts",
257
+ "field": "planLevels[].deductibleInfo",
258
+ "value": "{} total amounts found".format(len(deductible_amounts)),
259
+ "issue": "Multiple deductible amounts found: {}".format("; ".join(issue_details)),
260
+ "recommendation": "System will select highest non-zero individual amount, then family amount"
261
+ })
262
+
263
+ # Check for API errors
264
+ if "rawGraphQLResponse" in super_connector_data:
265
+ raw_response = super_connector_data.get('rawGraphQLResponse', {})
266
+ errors = raw_response.get('errors', [])
267
+ if errors:
268
+ for error in errors:
269
+ issues.append({
270
+ "type": "API Error",
271
+ "field": "rawGraphQLResponse.errors",
272
+ "value": error.get('code', 'UNKNOWN'),
273
+ "issue": "Super Connector API returned error: {}".format(error.get('description', 'No description')),
274
+ "recommendation": "Review API implementation and error handling"
275
+ })
276
+
277
+ # Check for multiple eligibility records (this is actually good, but worth noting)
278
+ if "rawGraphQLResponse" in super_connector_data:
279
+ raw_response = super_connector_data.get('rawGraphQLResponse', {})
280
+ data = raw_response.get('data', {})
281
+ check_eligibility = data.get('checkEligibility', {})
282
+ eligibility_list = check_eligibility.get('eligibility', [])
283
+ if len(eligibility_list) > 1:
284
+ issues.append({
285
+ "type": "Multiple Eligibility Records",
286
+ "field": "rawGraphQLResponse.data.checkEligibility.eligibility",
287
+ "value": "{} records found".format(len(eligibility_list)),
288
+ "issue": "Multiple eligibility records returned - this is normal but may need special handling",
289
+ "recommendation": "Ensure parsing logic handles multiple records correctly"
290
+ })
291
+
292
+ return issues
293
+
294
+ def analyze_response_structure(data, max_depth=5):
295
+ """
296
+ Analyze the structure of the Super Connector response
297
+ """
298
+ structure = {}
299
+
300
+ def analyze_recursive(obj, path="", depth=0):
301
+ if depth > max_depth:
302
+ return
303
+
304
+ if isinstance(obj, dict):
305
+ for key, value in obj.items():
306
+ current_path = "{}.{}".format(path, key) if path else key
307
+ if isinstance(value, (dict, list)):
308
+ structure[current_path] = "Type: {}".format(type(value).__name__)
309
+ analyze_recursive(value, current_path, depth + 1)
310
+ else:
311
+ # Safe string conversion for Python 3.4.4
312
+ try:
313
+ value_str = str(value)[:50]
314
+ except (UnicodeEncodeError, UnicodeDecodeError):
315
+ value_str = repr(value)[:50]
316
+ structure[current_path] = "Type: {}, Value: {}".format(type(value).__name__, value_str)
317
+ elif isinstance(obj, list):
318
+ for i, item in enumerate(obj):
319
+ current_path = "{}[{}]".format(path, i)
320
+ if isinstance(item, (dict, list)):
321
+ structure[current_path] = "Type: {}".format(type(item).__name__)
322
+ analyze_recursive(item, current_path, depth + 1)
323
+ else:
324
+ # Safe string conversion for Python 3.4.4
325
+ try:
326
+ item_str = str(item)[:50]
327
+ except (UnicodeEncodeError, UnicodeDecodeError):
328
+ item_str = repr(item)[:50]
329
+ structure[current_path] = "Type: {}, Value: {}".format(type(item).__name__, item_str)
330
+
331
+ analyze_recursive(data)
332
+ return structure
333
+
334
+ def generate_validation_report(validation_report, output_file_path):
335
+ """
336
+ Generate a detailed validation report
337
+ """
338
+ # Use explicit encoding for Python 3.4.4 compatibility
339
+ try:
340
+ with open(output_file_path, 'w', encoding='utf-8') as f:
341
+ f.write("=" * 80 + "\n")
342
+ f.write("SUPER CONNECTOR API VALIDATION REPORT\n")
343
+ f.write("=" * 80 + "\n")
344
+ f.write("Generated: {}\n\n".format(validation_report['timestamp']))
345
+
346
+ # Legacy values section
347
+ f.write("LEGACY API VALUES:\n")
348
+ f.write("-" * 40 + "\n")
349
+ for key, value in validation_report["legacy_values"].items():
350
+ f.write("{}: {}\n".format(key, value))
351
+ f.write("\n")
352
+
353
+ # Found values section
354
+ if validation_report["found_values"]:
355
+ f.write("FOUND VALUES IN SUPER CONNECTOR RESPONSE:\n")
356
+ f.write("-" * 50 + "\n")
357
+ for item in validation_report["found_values"]:
358
+ f.write("Legacy Key: {}\n".format(item['legacy_key']))
359
+ f.write("Legacy Value: {}\n".format(item['legacy_value']))
360
+ f.write("Found at paths:\n")
361
+ for path in item['found_paths']:
362
+ f.write(" - {}\n".format(path))
363
+ f.write("\n")
364
+
365
+ # Missing values section
366
+ if validation_report["missing_values"]:
367
+ f.write("MISSING VALUES IN SUPER CONNECTOR RESPONSE:\n")
368
+ f.write("-" * 50 + "\n")
369
+ for item in validation_report["missing_values"]:
370
+ f.write("Legacy Key: {}\n".format(item['legacy_key']))
371
+ f.write("Legacy Value: {}\n".format(item['legacy_value']))
372
+ f.write("Status: {}\n\n".format(item['status']))
373
+
374
+ # Data Quality Issues section
375
+ if validation_report["data_quality_issues"]:
376
+ f.write("\nDATA QUALITY ISSUES:\n")
377
+ f.write("-" * 50 + "\n")
378
+ for issue in validation_report["data_quality_issues"]:
379
+ f.write("Type: {}\n".format(issue['type']))
380
+ f.write("Field: {}\n".format(issue['field']))
381
+ if 'value' in issue:
382
+ f.write("Value: {}\n".format(issue['value']))
383
+ f.write("Issue: {}\n".format(issue['issue']))
384
+ f.write("Recommendation: {}\n".format(issue['recommendation']))
385
+ f.write("\n")
386
+
387
+ # Super Connector structure analysis
388
+ if "structure" in validation_report["super_connector_analysis"]:
389
+ f.write("SUPER CONNECTOR RESPONSE STRUCTURE:\n")
390
+ f.write("-" * 50 + "\n")
391
+ for path, description in validation_report["super_connector_analysis"]["structure"].items():
392
+ f.write("{}: {}\n".format(path, description))
393
+
394
+ # Summary
395
+ f.write("\n" + "=" * 80 + "\n")
396
+ f.write("SUMMARY:\n")
397
+ f.write("Total legacy values: {}\n".format(len(validation_report['legacy_values'])))
398
+ f.write("Found in Super Connector: {}\n".format(len(validation_report['found_values'])))
399
+ f.write("Missing from Super Connector: {}\n".format(len(validation_report['missing_values'])))
400
+ f.write("Data quality issues: {}\n".format(len(validation_report['data_quality_issues'])))
401
+ f.write("=" * 80 + "\n")
402
+ except (UnicodeEncodeError, UnicodeDecodeError) as e:
403
+ # Fallback for encoding issues
404
+ with open(output_file_path, 'w', encoding='latin-1') as f:
405
+ f.write("=" * 80 + "\n")
406
+ f.write("SUPER CONNECTOR API VALIDATION REPORT\n")
407
+ f.write("=" * 80 + "\n")
408
+ f.write("Generated: {}\n\n".format(validation_report['timestamp']))
409
+ f.write("Note: Some characters may not display correctly due to encoding limitations.\n\n")
410
+
411
+ # Legacy values section
412
+ f.write("LEGACY API VALUES:\n")
413
+ f.write("-" * 40 + "\n")
414
+ for key, value in validation_report["legacy_values"].items():
415
+ f.write("{}: {}\n".format(key, value))
416
+ f.write("\n")
417
+
418
+ # Summary
419
+ f.write("\n" + "=" * 80 + "\n")
420
+ f.write("SUMMARY:\n")
421
+ f.write("Total legacy values: {}\n".format(len(validation_report['legacy_values'])))
422
+ f.write("Found in Super Connector: {}\n".format(len(validation_report['found_values'])))
423
+ f.write("Missing from Super Connector: {}\n".format(len(validation_report['missing_values'])))
424
+ f.write("Data quality issues: {}\n".format(len(validation_report['data_quality_issues'])))
425
+ f.write("=" * 80 + "\n")
426
+
427
+ def run_validation_comparison(legacy_data, super_connector_data, output_file_path):
428
+ """
429
+ Main function to run the validation comparison
430
+ """
431
+ # Extract values from legacy response
432
+ legacy_values = extract_legacy_values_for_comparison(legacy_data)
433
+
434
+ # Validate Super Connector response
435
+ validation_report = validate_super_connector_response(legacy_values, super_connector_data)
436
+
437
+ # Generate report
438
+ generate_validation_report(validation_report, output_file_path)
439
+
440
+ return validation_report