medicafe 0.250728.8__py3-none-any.whl → 0.250805.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of medicafe might be problematic. Click here for more details.
- MediBot/MediBot.bat +233 -19
- MediBot/MediBot.py +138 -46
- MediBot/MediBot_Crosswalk_Library.py +127 -623
- MediBot/MediBot_Crosswalk_Utils.py +618 -0
- MediBot/MediBot_Preprocessor.py +72 -17
- MediBot/MediBot_Preprocessor_lib.py +470 -76
- MediBot/MediBot_UI.py +32 -17
- MediBot/MediBot_dataformat_library.py +68 -20
- MediBot/MediBot_docx_decoder.py +120 -19
- MediBot/MediBot_smart_import.py +180 -0
- MediBot/__init__.py +89 -0
- MediBot/get_medicafe_version.py +25 -0
- MediBot/update_json.py +35 -6
- MediBot/update_medicafe.py +19 -1
- MediCafe/MediLink_ConfigLoader.py +160 -0
- MediCafe/__init__.py +171 -0
- MediCafe/__main__.py +222 -0
- MediCafe/api_core.py +1098 -0
- MediCafe/api_core_backup.py +427 -0
- MediCafe/api_factory.py +306 -0
- MediCafe/api_utils.py +356 -0
- MediCafe/core_utils.py +450 -0
- MediCafe/graphql_utils.py +445 -0
- MediCafe/logging_config.py +123 -0
- MediCafe/logging_demo.py +61 -0
- MediCafe/migration_helpers.py +463 -0
- MediCafe/smart_import.py +436 -0
- MediLink/MediLink.py +66 -26
- MediLink/MediLink_837p_cob_library.py +28 -28
- MediLink/MediLink_837p_encoder.py +33 -34
- MediLink/MediLink_837p_encoder_library.py +243 -151
- MediLink/MediLink_837p_utilities.py +129 -5
- MediLink/MediLink_API_Generator.py +83 -60
- MediLink/MediLink_API_v3.py +1 -1
- MediLink/MediLink_ClaimStatus.py +177 -31
- MediLink/MediLink_DataMgmt.py +405 -72
- MediLink/MediLink_Decoder.py +20 -1
- MediLink/MediLink_Deductible.py +155 -28
- MediLink/MediLink_Display_Utils.py +72 -0
- MediLink/MediLink_Down.py +127 -5
- MediLink/MediLink_Gmail.py +712 -653
- MediLink/MediLink_PatientProcessor.py +257 -0
- MediLink/MediLink_UI.py +85 -61
- MediLink/MediLink_Up.py +28 -4
- MediLink/MediLink_insurance_utils.py +227 -264
- MediLink/MediLink_main.py +248 -0
- MediLink/MediLink_smart_import.py +264 -0
- MediLink/__init__.py +93 -0
- MediLink/insurance_type_integration_test.py +66 -76
- MediLink/test.py +1 -1
- MediLink/test_timing.py +59 -0
- {medicafe-0.250728.8.dist-info → medicafe-0.250805.0.dist-info}/METADATA +1 -1
- medicafe-0.250805.0.dist-info/RECORD +81 -0
- medicafe-0.250805.0.dist-info/entry_points.txt +2 -0
- {medicafe-0.250728.8.dist-info → medicafe-0.250805.0.dist-info}/top_level.txt +1 -0
- medicafe-0.250728.8.dist-info/RECORD +0 -59
- {medicafe-0.250728.8.dist-info → medicafe-0.250805.0.dist-info}/LICENSE +0 -0
- {medicafe-0.250728.8.dist-info → medicafe-0.250805.0.dist-info}/WHEEL +0 -0
MediLink/MediLink_Deductible.py
CHANGED
|
@@ -41,29 +41,77 @@ Features Added:
|
|
|
41
41
|
2. Supports multiple manual requests, each generating its own Notepad file.
|
|
42
42
|
3. Validates user inputs and provides feedback on required formats.
|
|
43
43
|
4. Displays available Payer IDs as a note after manual entries.
|
|
44
|
+
|
|
45
|
+
UPGRADED TO LATEST CORE_UTILS:
|
|
46
|
+
- Uses setup_project_path() for standardized path management
|
|
47
|
+
- Uses get_api_core_client() for improved API client handling
|
|
48
|
+
- Uses create_config_cache() for better performance
|
|
49
|
+
- Uses log_import_error() for enhanced error logging
|
|
50
|
+
- Improved import error handling with fallbacks
|
|
44
51
|
"""
|
|
45
52
|
# MediLink_Deductible.py
|
|
46
|
-
import
|
|
47
|
-
import os, sys, requests, json
|
|
53
|
+
import os, sys, json
|
|
48
54
|
from datetime import datetime
|
|
49
55
|
|
|
56
|
+
# Add parent directory to Python path to access MediCafe module
|
|
57
|
+
current_dir = os.path.dirname(os.path.abspath(__file__))
|
|
58
|
+
parent_dir = os.path.dirname(current_dir)
|
|
59
|
+
if parent_dir not in sys.path:
|
|
60
|
+
sys.path.insert(0, parent_dir)
|
|
61
|
+
|
|
62
|
+
# Use latest core_utils for standardized setup and imports
|
|
63
|
+
try:
|
|
64
|
+
from MediCafe.core_utils import (
|
|
65
|
+
setup_project_path,
|
|
66
|
+
get_shared_config_loader,
|
|
67
|
+
get_api_core_client,
|
|
68
|
+
log_import_error,
|
|
69
|
+
create_config_cache
|
|
70
|
+
)
|
|
71
|
+
# Set up project paths using latest core_utils
|
|
72
|
+
project_dir = setup_project_path(__file__)
|
|
73
|
+
MediLink_ConfigLoader = get_shared_config_loader()
|
|
74
|
+
|
|
75
|
+
# Import api_core for eligibility functions
|
|
76
|
+
try:
|
|
77
|
+
from MediCafe import api_core
|
|
78
|
+
except ImportError:
|
|
79
|
+
api_core = None
|
|
80
|
+
|
|
81
|
+
# Import api_core for eligibility functions
|
|
82
|
+
try:
|
|
83
|
+
from MediCafe import api_core
|
|
84
|
+
except ImportError:
|
|
85
|
+
api_core = None
|
|
86
|
+
|
|
87
|
+
# Import api_core for eligibility functions
|
|
88
|
+
try:
|
|
89
|
+
from MediCafe import api_core
|
|
90
|
+
except ImportError:
|
|
91
|
+
api_core = None
|
|
92
|
+
except ImportError as e:
|
|
93
|
+
print("Error: Unable to import MediCafe.core_utils. Please ensure MediCafe package is properly installed.")
|
|
94
|
+
# Don't call log_import_error here since it's not available yet
|
|
95
|
+
print("Import error: {}".format(e))
|
|
96
|
+
sys.exit(1)
|
|
97
|
+
|
|
98
|
+
# Safe import for requests with fallback
|
|
50
99
|
try:
|
|
51
|
-
|
|
100
|
+
import requests
|
|
52
101
|
except ImportError:
|
|
53
|
-
|
|
102
|
+
requests = None
|
|
103
|
+
print("Warning: requests module not available. Some API functionality may be limited.")
|
|
54
104
|
|
|
55
105
|
try:
|
|
56
106
|
from MediLink import MediLink_Deductible_Validator
|
|
57
|
-
except ImportError:
|
|
107
|
+
except ImportError as e:
|
|
108
|
+
print("Warning: Unable to import MediLink_Deductible_Validator: {}".format(e))
|
|
58
109
|
import MediLink_Deductible_Validator
|
|
59
110
|
|
|
60
|
-
project_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
|
|
61
|
-
if project_dir not in sys.path:
|
|
62
|
-
sys.path.append(project_dir)
|
|
63
|
-
|
|
64
111
|
try:
|
|
65
112
|
from MediBot import MediBot_Preprocessor_lib
|
|
66
|
-
except ImportError:
|
|
113
|
+
except ImportError as e:
|
|
114
|
+
print("Warning: Unable to import MediBot_Preprocessor_lib: {}".format(e))
|
|
67
115
|
import MediBot_Preprocessor_lib
|
|
68
116
|
|
|
69
117
|
# Function to check if the date format is correct
|
|
@@ -76,11 +124,25 @@ def validate_and_format_date(date_str):
|
|
|
76
124
|
continue
|
|
77
125
|
return None
|
|
78
126
|
|
|
79
|
-
#
|
|
80
|
-
|
|
127
|
+
# Use latest core_utils configuration cache for better performance
|
|
128
|
+
_get_config, (_config_cache, _crosswalk_cache) = create_config_cache()
|
|
81
129
|
|
|
82
|
-
#
|
|
83
|
-
|
|
130
|
+
# Load configuration using latest core_utils pattern
|
|
131
|
+
config, _ = _get_config()
|
|
132
|
+
|
|
133
|
+
# Initialize the API client using latest core_utils
|
|
134
|
+
client = get_api_core_client()
|
|
135
|
+
if client is None:
|
|
136
|
+
print("Warning: API client not available via core_utils")
|
|
137
|
+
# Fallback to direct instantiation
|
|
138
|
+
try:
|
|
139
|
+
if api_core:
|
|
140
|
+
client = api_core.APIClient()
|
|
141
|
+
else:
|
|
142
|
+
raise ImportError("api_core not available")
|
|
143
|
+
except ImportError as e:
|
|
144
|
+
print("Error: Unable to create API client: {}".format(e))
|
|
145
|
+
client = None
|
|
84
146
|
|
|
85
147
|
# Get provider_last_name and npi from configuration
|
|
86
148
|
provider_last_name = config['MediLink_Config'].get('default_billing_provider_last_name', 'Unknown')
|
|
@@ -97,14 +159,14 @@ payer_ids = ['87726', '03432', '96385', '95467', '86050', '86047', '95378', '061
|
|
|
97
159
|
CSV_FILE_PATH = config.get('CSV_FILE_PATH', "")
|
|
98
160
|
csv_data = MediBot_Preprocessor_lib.load_csv_data(CSV_FILE_PATH)
|
|
99
161
|
|
|
100
|
-
# Only keep rows that
|
|
101
|
-
valid_rows = [row for row in csv_data if str(row.get('Ins1 Payer ID', '')) in payer_ids]
|
|
162
|
+
# Only keep rows that have an exact match with a payer ID from the payer_ids list
|
|
163
|
+
valid_rows = [row for row in csv_data if str(row.get('Ins1 Payer ID', '')).strip() in payer_ids]
|
|
102
164
|
|
|
103
165
|
# Extract important columns for summary with fallback
|
|
104
166
|
summary_valid_rows = [
|
|
105
167
|
{
|
|
106
168
|
'DOB': row.get('Patient DOB', row.get('DOB', '')), # Try 'Patient DOB' first, then 'DOB'
|
|
107
|
-
'Ins1 Member ID': row.get('Primary Policy Number', row.get('Ins1 Member ID', '')), # Try 'Primary Policy Number' first, then 'Ins1 Member ID'
|
|
169
|
+
'Ins1 Member ID': row.get('Primary Policy Number', row.get('Ins1 Member ID', '')).strip(), # Try 'Primary Policy Number' first, then 'Ins1 Member ID'
|
|
108
170
|
'Ins1 Payer ID': row.get('Ins1 Payer ID', '')
|
|
109
171
|
}
|
|
110
172
|
for row in valid_rows
|
|
@@ -210,7 +272,7 @@ def get_eligibility_info(client, payer_id, provider_last_name, date_of_birth, me
|
|
|
210
272
|
|
|
211
273
|
# Get legacy response
|
|
212
274
|
MediLink_ConfigLoader.log("Getting legacy get_eligibility_v3 API response", level="INFO")
|
|
213
|
-
legacy_eligibility =
|
|
275
|
+
legacy_eligibility = api_core.get_eligibility_v3(
|
|
214
276
|
client, payer_id, provider_last_name, 'MemberIDDateOfBirth', date_of_birth, member_id, npi
|
|
215
277
|
)
|
|
216
278
|
|
|
@@ -218,7 +280,7 @@ def get_eligibility_info(client, payer_id, provider_last_name, date_of_birth, me
|
|
|
218
280
|
MediLink_ConfigLoader.log("Getting new get_eligibility_super_connector API response", level="INFO")
|
|
219
281
|
super_connector_eligibility = None
|
|
220
282
|
try:
|
|
221
|
-
super_connector_eligibility =
|
|
283
|
+
super_connector_eligibility = api_core.get_eligibility_super_connector(
|
|
222
284
|
client, payer_id, provider_last_name, 'MemberIDDateOfBirth', date_of_birth, member_id, npi
|
|
223
285
|
)
|
|
224
286
|
except Exception as e:
|
|
@@ -271,7 +333,7 @@ def get_eligibility_info(client, payer_id, provider_last_name, date_of_birth, me
|
|
|
271
333
|
|
|
272
334
|
# Only get legacy response
|
|
273
335
|
MediLink_ConfigLoader.log("Getting legacy get_eligibility_v3 API response", level="INFO")
|
|
274
|
-
eligibility =
|
|
336
|
+
eligibility = api_core.get_eligibility_v3(
|
|
275
337
|
client, payer_id, provider_last_name, 'MemberIDDateOfBirth', date_of_birth, member_id, npi
|
|
276
338
|
)
|
|
277
339
|
|
|
@@ -279,17 +341,82 @@ def get_eligibility_info(client, payer_id, provider_last_name, date_of_birth, me
|
|
|
279
341
|
MediLink_ConfigLoader.log("Eligibility response: {}".format(json.dumps(eligibility, indent=4)), level="DEBUG")
|
|
280
342
|
|
|
281
343
|
return eligibility
|
|
282
|
-
except requests.exceptions.HTTPError as e:
|
|
283
|
-
# Log the HTTP error response
|
|
284
|
-
MediLink_ConfigLoader.log("HTTPError: {}".format(e), level="ERROR")
|
|
285
|
-
MediLink_ConfigLoader.log("Response content: {}".format(e.response.content), level="ERROR")
|
|
286
344
|
except Exception as e:
|
|
287
|
-
#
|
|
288
|
-
|
|
345
|
+
# Handle HTTP errors if requests is available
|
|
346
|
+
if requests and hasattr(requests, 'exceptions') and isinstance(e, requests.exceptions.HTTPError):
|
|
347
|
+
# Log the HTTP error response
|
|
348
|
+
print("API Request Error: {}".format(e))
|
|
349
|
+
if hasattr(e, 'response') and hasattr(e.response, 'content'):
|
|
350
|
+
MediLink_ConfigLoader.log("Response content: {}".format(e.response.content), level="ERROR")
|
|
351
|
+
else:
|
|
352
|
+
# Log any other exceptions
|
|
353
|
+
print("Eligibility Check Error: {}".format(e))
|
|
289
354
|
return None
|
|
290
355
|
|
|
291
356
|
# Helper functions to extract data from different API response formats
|
|
292
|
-
#
|
|
357
|
+
# TODO (HIGH PRIORITY - API Response Parser Debugging):
|
|
358
|
+
# PROBLEM: API responses are returning correctly but the parser functions below
|
|
359
|
+
# are not successfully extracting the super_connector variables (likely eligibility data).
|
|
360
|
+
# This suggests a schema mismatch between expected and actual API response format.
|
|
361
|
+
#
|
|
362
|
+
# DEBUGGING STEPS:
|
|
363
|
+
# 1. Response Structure Analysis:
|
|
364
|
+
# - Add comprehensive logging of raw API responses before parsing
|
|
365
|
+
# - Compare current response format vs expected format in parser functions
|
|
366
|
+
# - Check if API endpoint has changed response schema recently
|
|
367
|
+
# - Verify if different endpoints return different response structures
|
|
368
|
+
#
|
|
369
|
+
# 2. Parser Function Validation:
|
|
370
|
+
# - Test each extract_*_patient_info() function with sample responses
|
|
371
|
+
# - Check if field names/paths have changed (e.g., 'patientInfo' vs 'patient_info')
|
|
372
|
+
# - Verify array indexing logic (e.g., [0] access on empty arrays)
|
|
373
|
+
# - Check case sensitivity in field access
|
|
374
|
+
#
|
|
375
|
+
# 3. Super Connector Variable Mapping:
|
|
376
|
+
# - Document what "super_connector variables" should contain
|
|
377
|
+
# - Identify which fields from API response map to these variables
|
|
378
|
+
# - Verify the expected format vs actual format
|
|
379
|
+
# - Check if variable names have changed in the application
|
|
380
|
+
#
|
|
381
|
+
# IMPLEMENTATION PLAN:
|
|
382
|
+
# 1. Enhanced Logging:
|
|
383
|
+
# - Add log_api_response_structure(response) function
|
|
384
|
+
# - Log raw JSON before each parser function call
|
|
385
|
+
# - Add field-by-field parsing logs with null checks
|
|
386
|
+
#
|
|
387
|
+
# 2. Parser Robustness:
|
|
388
|
+
# - Add null/empty checks for all field accesses
|
|
389
|
+
# - Implement graceful fallbacks for missing fields
|
|
390
|
+
# - Add validation for expected data types
|
|
391
|
+
# - Handle both old and new response formats if schema changed
|
|
392
|
+
#
|
|
393
|
+
# 3. Schema Validation:
|
|
394
|
+
# - Create validate_api_response_schema(response, expected_schema) function
|
|
395
|
+
# - Define expected schemas for each API endpoint
|
|
396
|
+
# - Alert when response doesn't match expected schema
|
|
397
|
+
# - Suggest schema updates when mismatches occur
|
|
398
|
+
#
|
|
399
|
+
# 4. Testing Framework:
|
|
400
|
+
# - Create test cases with known good API responses
|
|
401
|
+
# - Test parser functions independently of API calls
|
|
402
|
+
# - Add integration tests for end-to-end parsing workflow
|
|
403
|
+
# - Create mock responses for development testing
|
|
404
|
+
#
|
|
405
|
+
# IMMEDIATE ACTIONS:
|
|
406
|
+
# 1. Add detailed logging before each extract_*_patient_info() call
|
|
407
|
+
# 2. Log the structure of the 'policy' object being passed to parsers
|
|
408
|
+
# 3. Check if the issue is in extract_legacy_patient_info() vs extract_super_connector_patient_info()
|
|
409
|
+
# 4. Verify which API endpoint is being called and if it matches expected parser
|
|
410
|
+
#
|
|
411
|
+
# FILES TO EXAMINE:
|
|
412
|
+
# - This file: all extract_*_patient_info() functions
|
|
413
|
+
# - MediCafe/api_core.py: API call implementation and response handling
|
|
414
|
+
# - Config files: Check if API endpoints or credentials have changed
|
|
415
|
+
#
|
|
416
|
+
# RELATED ISSUES:
|
|
417
|
+
# - May be connected to authentication or endpoint configuration problems
|
|
418
|
+
# - Could indicate API version updates that changed response format
|
|
419
|
+
# - Might be related to different payer-specific response formats
|
|
293
420
|
|
|
294
421
|
def extract_legacy_patient_info(policy):
|
|
295
422
|
"""Extract patient information from legacy API response format"""
|
|
@@ -725,7 +852,7 @@ if __name__ == "__main__":
|
|
|
725
852
|
print(table_header)
|
|
726
853
|
print("-" * len(table_header))
|
|
727
854
|
|
|
728
|
-
# PERFORMANCE FIX: Optimize patient-payer processing to avoid O(
|
|
855
|
+
# PERFORMANCE FIX: Optimize patient-payer processing to avoid O(PxN) complexity
|
|
729
856
|
# Instead of nested loops, process each patient once and try payer_ids until success
|
|
730
857
|
# TODO: We should be able to determine the correct payer_id for each patient ahead of time
|
|
731
858
|
# by looking up their insurance information from the CSV data or crosswalk mapping.
|
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
# MediLink_Display_Utils.py
|
|
2
|
+
# Display utility functions extracted from MediLink_UI.py to eliminate circular dependencies
|
|
3
|
+
# Provides centralized display functions for insurance options and patient summaries
|
|
4
|
+
|
|
5
|
+
from datetime import datetime
|
|
6
|
+
|
|
7
|
+
# Use core utilities for standardized imports
|
|
8
|
+
from MediCafe.core_utils import get_shared_config_loader
|
|
9
|
+
MediLink_ConfigLoader = get_shared_config_loader()
|
|
10
|
+
|
|
11
|
+
def display_insurance_options(insurance_options=None):
|
|
12
|
+
"""Display insurance options, loading from config if not provided"""
|
|
13
|
+
|
|
14
|
+
if insurance_options is None:
|
|
15
|
+
config, _ = MediLink_ConfigLoader.load_configuration()
|
|
16
|
+
insurance_options = config.get('MediLink_Config', {}).get('insurance_options', {})
|
|
17
|
+
|
|
18
|
+
print("\nInsurance Type Options (SBR09 Codes):")
|
|
19
|
+
print("-" * 50)
|
|
20
|
+
for code, description in sorted(insurance_options.items()):
|
|
21
|
+
print("{:>3}: {}".format(code, description))
|
|
22
|
+
print("-" * 50)
|
|
23
|
+
print("Note: '12' (PPO) is the default if no selection is made.")
|
|
24
|
+
print() # Add a blank line for better readability
|
|
25
|
+
|
|
26
|
+
def display_patient_summaries(detailed_patient_data):
|
|
27
|
+
"""
|
|
28
|
+
Displays summaries of all patients and their suggested endpoints.
|
|
29
|
+
"""
|
|
30
|
+
print("\nSummary of patient details and suggested endpoint:")
|
|
31
|
+
for index, summary in enumerate(detailed_patient_data, start=1):
|
|
32
|
+
try:
|
|
33
|
+
display_file_summary(index, summary)
|
|
34
|
+
except KeyError as e:
|
|
35
|
+
print("Summary at index {} is missing key: {}".format(index, e))
|
|
36
|
+
print() # add blank line for improved readability.
|
|
37
|
+
|
|
38
|
+
def display_file_summary(index, summary):
|
|
39
|
+
# Ensure surgery_date is converted to a datetime object
|
|
40
|
+
surgery_date = datetime.strptime(summary['surgery_date'], "%m-%d-%y")
|
|
41
|
+
|
|
42
|
+
# Add header row if it's the first index
|
|
43
|
+
if index == 1:
|
|
44
|
+
print("{:<3} {:5} {:<10} {:20} {:15} {:3} {:20}".format(
|
|
45
|
+
"No.", "Date", "ID", "Name", "Primary Ins.", "IT", "Current Endpoint"
|
|
46
|
+
))
|
|
47
|
+
print("-"*82)
|
|
48
|
+
|
|
49
|
+
# Check if insurance_type is available; if not, set a default placeholder (this should already be '12' at this point)
|
|
50
|
+
insurance_type = summary.get('insurance_type', '--')
|
|
51
|
+
|
|
52
|
+
# Get the effective endpoint (confirmed > user preference > suggestion > default)
|
|
53
|
+
effective_endpoint = (summary.get('confirmed_endpoint') or
|
|
54
|
+
summary.get('user_preferred_endpoint') or
|
|
55
|
+
summary.get('suggested_endpoint', 'AVAILITY'))
|
|
56
|
+
|
|
57
|
+
# Format insurance type for display - handle both 2 and 3 character codes
|
|
58
|
+
if insurance_type and len(insurance_type) <= 3:
|
|
59
|
+
insurance_display = insurance_type
|
|
60
|
+
else:
|
|
61
|
+
insurance_display = insurance_type[:3] if insurance_type else '--'
|
|
62
|
+
|
|
63
|
+
# Displays the summary of a file.
|
|
64
|
+
print("{:02d}. {:5} ({:<8}) {:20} {:15} {:3} {:20}".format(
|
|
65
|
+
index,
|
|
66
|
+
surgery_date.strftime("%m-%d"),
|
|
67
|
+
summary['patient_id'],
|
|
68
|
+
summary['patient_name'][:20],
|
|
69
|
+
summary['primary_insurance'][:15],
|
|
70
|
+
insurance_display,
|
|
71
|
+
effective_endpoint[:20])
|
|
72
|
+
)
|
MediLink/MediLink_Down.py
CHANGED
|
@@ -1,8 +1,49 @@
|
|
|
1
1
|
# MediLink_Down.py
|
|
2
|
-
import os, shutil
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
|
|
2
|
+
import os, shutil, sys
|
|
3
|
+
|
|
4
|
+
# Add paths
|
|
5
|
+
project_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
|
|
6
|
+
current_dir = os.path.abspath(os.path.dirname(__file__))
|
|
7
|
+
if project_dir not in sys.path:
|
|
8
|
+
sys.path.append(project_dir)
|
|
9
|
+
if current_dir not in sys.path:
|
|
10
|
+
sys.path.append(current_dir)
|
|
11
|
+
|
|
12
|
+
# Use core utilities for imports
|
|
13
|
+
try:
|
|
14
|
+
from MediCafe.core_utils import get_shared_config_loader
|
|
15
|
+
MediLink_ConfigLoader = get_shared_config_loader()
|
|
16
|
+
if MediLink_ConfigLoader is not None:
|
|
17
|
+
log = MediLink_ConfigLoader.log
|
|
18
|
+
load_configuration = MediLink_ConfigLoader.load_configuration
|
|
19
|
+
else:
|
|
20
|
+
raise ImportError("MediLink_ConfigLoader not available")
|
|
21
|
+
except ImportError:
|
|
22
|
+
# Fallback for when core_utils is not available
|
|
23
|
+
def log(message, level="INFO"):
|
|
24
|
+
print("[{}] {}".format(level, message))
|
|
25
|
+
def load_configuration():
|
|
26
|
+
return {}, {}
|
|
27
|
+
|
|
28
|
+
try:
|
|
29
|
+
from MediLink_Decoder import process_decoded_file, display_consolidated_records, write_records_to_csv
|
|
30
|
+
except ImportError:
|
|
31
|
+
# Fallback if decoder not available
|
|
32
|
+
process_decoded_file = None
|
|
33
|
+
display_consolidated_records = None
|
|
34
|
+
write_records_to_csv = None
|
|
35
|
+
|
|
36
|
+
try:
|
|
37
|
+
from MediLink_DataMgmt import operate_winscp
|
|
38
|
+
except ImportError:
|
|
39
|
+
operate_winscp = None
|
|
40
|
+
|
|
41
|
+
try:
|
|
42
|
+
from tqdm import tqdm
|
|
43
|
+
except ImportError:
|
|
44
|
+
# Fallback for when tqdm is not available
|
|
45
|
+
def tqdm(iterable, **kwargs):
|
|
46
|
+
return iterable
|
|
6
47
|
|
|
7
48
|
def handle_files(local_storage_path, downloaded_files):
|
|
8
49
|
"""
|
|
@@ -139,10 +180,91 @@ def main(desired_endpoint=None):
|
|
|
139
180
|
else:
|
|
140
181
|
log("No files were downloaded for endpoint: {}. Exiting...".format(desired_endpoint), level="WARNING")
|
|
141
182
|
return None, None
|
|
142
|
-
|
|
183
|
+
|
|
143
184
|
except Exception as e:
|
|
144
185
|
log("An error occurred in MediLink_Down.main: {}".format(e), level="ERROR")
|
|
145
186
|
return None, None
|
|
146
187
|
|
|
188
|
+
|
|
189
|
+
def check_for_new_remittances(config=None):
|
|
190
|
+
"""
|
|
191
|
+
Function to check for new remittance files across all configured endpoints.
|
|
192
|
+
Loads the configuration, validates it, and processes each endpoint to download and handle files.
|
|
193
|
+
Accumulates results from all endpoints and processes them together at the end.
|
|
194
|
+
"""
|
|
195
|
+
# Start the process and log the initiation
|
|
196
|
+
log("Starting check_for_new_remittances function")
|
|
197
|
+
print("\nChecking for new files across all endpoints...")
|
|
198
|
+
log("Checking for new files across all endpoints...")
|
|
199
|
+
|
|
200
|
+
# Step 1: Load and validate the configuration
|
|
201
|
+
if config is None:
|
|
202
|
+
config, _ = load_configuration()
|
|
203
|
+
|
|
204
|
+
if not config or 'MediLink_Config' not in config or 'endpoints' not in config['MediLink_Config']:
|
|
205
|
+
log("Error: Config is missing necessary sections. Aborting...", level="ERROR")
|
|
206
|
+
return
|
|
207
|
+
|
|
208
|
+
endpoints = config['MediLink_Config'].get('endpoints')
|
|
209
|
+
if not isinstance(endpoints, dict):
|
|
210
|
+
log("Error: 'endpoints' is not a dictionary. Aborting...", level="ERROR")
|
|
211
|
+
return
|
|
212
|
+
|
|
213
|
+
# Lists to accumulate all consolidated records and translated files across all endpoints
|
|
214
|
+
all_consolidated_records = []
|
|
215
|
+
all_translated_files = []
|
|
216
|
+
|
|
217
|
+
# Step 2: Process each endpoint and accumulate results
|
|
218
|
+
for endpoint_key, endpoint_info in tqdm(endpoints.items(), desc="Processing endpoints"):
|
|
219
|
+
# Validate endpoint structure
|
|
220
|
+
if not endpoint_info or not isinstance(endpoint_info, dict):
|
|
221
|
+
log("Error: Invalid endpoint structure for {}. Skipping...".format(endpoint_key), level="ERROR")
|
|
222
|
+
continue
|
|
223
|
+
|
|
224
|
+
if 'remote_directory_down' in endpoint_info:
|
|
225
|
+
# Process the endpoint and handle the files
|
|
226
|
+
log("Processing endpoint: {}".format(endpoint_key))
|
|
227
|
+
consolidated_records, translated_files = process_endpoint(endpoint_key, endpoint_info, config)
|
|
228
|
+
|
|
229
|
+
# Accumulate the results for later processing
|
|
230
|
+
if consolidated_records:
|
|
231
|
+
all_consolidated_records.extend(consolidated_records)
|
|
232
|
+
if translated_files:
|
|
233
|
+
all_translated_files.extend(translated_files)
|
|
234
|
+
else:
|
|
235
|
+
log("Skipping endpoint '{}'. 'remote_directory_down' not configured.".format(endpoint_info.get('name', 'Unknown')), level="WARNING")
|
|
236
|
+
|
|
237
|
+
# Step 3: After processing all endpoints, handle the accumulated results
|
|
238
|
+
if all_consolidated_records:
|
|
239
|
+
display_consolidated_records(all_consolidated_records) # Ensure this is called only once
|
|
240
|
+
prompt_csv_export(all_consolidated_records, config['MediLink_Config']['local_storage_path'])
|
|
241
|
+
else:
|
|
242
|
+
log("No records to display after processing all endpoints.", level="WARNING")
|
|
243
|
+
print("No records to display after processing all endpoints.")
|
|
244
|
+
|
|
245
|
+
|
|
246
|
+
def process_endpoint(endpoint_key, endpoint_info, config):
|
|
247
|
+
"""
|
|
248
|
+
Helper function to process a single endpoint.
|
|
249
|
+
Downloads files from the endpoint, processes them, and returns the consolidated records and translated files.
|
|
250
|
+
"""
|
|
251
|
+
try:
|
|
252
|
+
# Process the files for the given endpoint
|
|
253
|
+
local_storage_path = config['MediLink_Config']['local_storage_path']
|
|
254
|
+
log("[Process Endpoint] Local storage path set to {}".format(local_storage_path))
|
|
255
|
+
downloaded_files = operate_winscp("download", None, endpoint_info, local_storage_path, config)
|
|
256
|
+
|
|
257
|
+
if downloaded_files:
|
|
258
|
+
log("[Process Endpoint] WinSCP Downloaded the following files: \n{}".format(downloaded_files))
|
|
259
|
+
return handle_files(local_storage_path, downloaded_files)
|
|
260
|
+
else:
|
|
261
|
+
log("[Process Endpoint]No files were downloaded for endpoint: {}.".format(endpoint_key), level="WARNING")
|
|
262
|
+
return [], []
|
|
263
|
+
|
|
264
|
+
except Exception as e:
|
|
265
|
+
# Handle any exceptions that occur during the processing
|
|
266
|
+
log("Error processing endpoint {}: {}".format(endpoint_key, e), level="ERROR")
|
|
267
|
+
return [], []
|
|
268
|
+
|
|
147
269
|
if __name__ == "__main__":
|
|
148
270
|
main()
|