medicafe 0.240613.0__py3-none-any.whl → 0.240716.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of medicafe might be problematic. Click here for more details.
- MediBot/MediBot.bat +37 -5
- MediBot/MediBot_Crosswalk_Library.py +15 -8
- MediBot/MediBot_docx_decoder.py +13 -5
- MediLink/MediLink.py +38 -73
- MediLink/MediLink_837p_encoder_library.py +4 -27
- MediLink/MediLink_API_Generator.py +246 -0
- MediLink/MediLink_API_v2.py +2 -0
- MediLink/MediLink_API_v3.py +325 -0
- MediLink/MediLink_ClaimStatus.py +144 -0
- MediLink/MediLink_ConfigLoader.py +13 -7
- MediLink/MediLink_Decoder.py +122 -20
- MediLink/MediLink_Deductible.py +203 -0
- MediLink/MediLink_Down.py +84 -52
- MediLink/MediLink_Parser.py +106 -24
- MediLink/MediLink_UI.py +4 -26
- MediLink/MediLink_Up.py +2 -1
- {medicafe-0.240613.0.dist-info → medicafe-0.240716.2.dist-info}/METADATA +2 -1
- {medicafe-0.240613.0.dist-info → medicafe-0.240716.2.dist-info}/RECORD +21 -17
- {medicafe-0.240613.0.dist-info → medicafe-0.240716.2.dist-info}/WHEEL +1 -1
- {medicafe-0.240613.0.dist-info → medicafe-0.240716.2.dist-info}/LICENSE +0 -0
- {medicafe-0.240613.0.dist-info → medicafe-0.240716.2.dist-info}/top_level.txt +0 -0
MediLink/MediLink_Decoder.py
CHANGED
|
@@ -3,9 +3,9 @@ import os
|
|
|
3
3
|
import sys
|
|
4
4
|
import csv
|
|
5
5
|
from MediLink_ConfigLoader import load_configuration, log
|
|
6
|
-
from MediLink_Parser import parse_era_content, parse_277_content
|
|
6
|
+
from MediLink_Parser import parse_era_content, parse_277_content, parse_277IBR_content, parse_277EBR_content, parse_dpt_content, parse_ebt_content, parse_ibt_content
|
|
7
7
|
|
|
8
|
-
def process_file(file_path, output_directory):
|
|
8
|
+
def process_file(file_path, output_directory, return_records=False):
|
|
9
9
|
if not os.path.exists(output_directory):
|
|
10
10
|
os.makedirs(output_directory)
|
|
11
11
|
|
|
@@ -14,50 +14,152 @@ def process_file(file_path, output_directory):
|
|
|
14
14
|
|
|
15
15
|
if file_type == 'ERA':
|
|
16
16
|
records = parse_era_content(content)
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
elif file_type == '
|
|
20
|
-
records =
|
|
21
|
-
|
|
22
|
-
|
|
17
|
+
elif file_type in ['277', '277IBR', '277EBR']:
|
|
18
|
+
records = parse_277_content(content) if file_type == '277' else parse_277IBR_content(content) if file_type == '277IBR' else parse_277EBR_content(content)
|
|
19
|
+
elif file_type == 'DPT':
|
|
20
|
+
records = parse_dpt_content(content)
|
|
21
|
+
elif file_type == 'EBT':
|
|
22
|
+
records = parse_ebt_content(content)
|
|
23
|
+
elif file_type == 'IBT':
|
|
24
|
+
records = parse_ibt_content(content)
|
|
23
25
|
else:
|
|
24
|
-
|
|
26
|
+
log("Unsupported file type: {}".format(file_type))
|
|
27
|
+
return []
|
|
25
28
|
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
+
formatted_records = format_records(records, file_type)
|
|
30
|
+
if not return_records:
|
|
31
|
+
display_table(formatted_records)
|
|
32
|
+
output_file_path = os.path.join(output_directory, os.path.basename(file_path) + '_decoded.csv')
|
|
33
|
+
write_records_to_csv(formatted_records, output_file_path)
|
|
34
|
+
log("Decoded data written to {}".format(output_file_path))
|
|
35
|
+
return formatted_records
|
|
29
36
|
|
|
30
37
|
def determine_file_type(file_path):
|
|
31
38
|
if file_path.endswith('.era'):
|
|
32
39
|
return 'ERA'
|
|
33
40
|
elif file_path.endswith('.277'):
|
|
34
41
|
return '277'
|
|
42
|
+
elif file_path.endswith('.277ibr'):
|
|
43
|
+
return '277IBR'
|
|
44
|
+
elif file_path.endswith('.277ebr'):
|
|
45
|
+
return '277EBR'
|
|
46
|
+
elif file_path.endswith('.dpt'):
|
|
47
|
+
return 'DPT'
|
|
48
|
+
elif file_path.endswith('.ebt'):
|
|
49
|
+
return 'EBT'
|
|
50
|
+
elif file_path.endswith('.ibt'):
|
|
51
|
+
return 'IBT'
|
|
35
52
|
else:
|
|
36
|
-
|
|
37
|
-
|
|
53
|
+
log("Unsupported file type for file: {}".format(file_path))
|
|
54
|
+
return None
|
|
55
|
+
|
|
38
56
|
def read_file(file_path):
|
|
39
57
|
with open(file_path, 'r') as file:
|
|
40
|
-
content = file.read()
|
|
58
|
+
content = file.read()
|
|
41
59
|
return content
|
|
42
60
|
|
|
43
|
-
def write_records_to_csv(records, output_file_path
|
|
61
|
+
def write_records_to_csv(records, output_file_path):
|
|
62
|
+
fieldnames = ['Claim #', 'Status', 'Patient', 'Proc.', 'Serv.', 'Allowed', 'Paid', 'Pt Resp', 'Charged']
|
|
44
63
|
with open(output_file_path, 'w', newline='') as csvfile:
|
|
45
64
|
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
|
|
46
65
|
writer.writeheader()
|
|
47
66
|
for record in records:
|
|
48
67
|
writer.writerow(record)
|
|
49
68
|
|
|
69
|
+
def format_records(records, file_type):
|
|
70
|
+
formatted_records = []
|
|
71
|
+
for record in records:
|
|
72
|
+
if file_type == 'IBT':
|
|
73
|
+
formatted_record = {
|
|
74
|
+
'Claim #': record.get('Patient Control Number', ''),
|
|
75
|
+
'Status': record.get('Status', ''),
|
|
76
|
+
'Patient': record.get('Patient Name', ''),
|
|
77
|
+
'Proc.': format_date(record.get('To Date', '')),
|
|
78
|
+
'Serv.': format_date(record.get('From Date', '')),
|
|
79
|
+
'Allowed': '',
|
|
80
|
+
'Paid': '',
|
|
81
|
+
'Pt Resp': '',
|
|
82
|
+
'Charged': record.get('Charge', '')
|
|
83
|
+
}
|
|
84
|
+
else:
|
|
85
|
+
formatted_record = {
|
|
86
|
+
'Claim #': record.get('Chart Number', record.get('Claim Status Tracking #', record.get('Claim #', ''))),
|
|
87
|
+
'Status': record.get('claimStatus', record.get('Status', '')),
|
|
88
|
+
'Patient': record.get('memberInfo', {}).get('ptntFn', '') + ' ' + record.get('memberInfo', {}).get('ptntLn', '') if 'memberInfo' in record else record.get('Patient', ''),
|
|
89
|
+
'Proc.': format_date(record.get('processed_date', record.get('Received Date', ''))),
|
|
90
|
+
'Serv.': format_date(record.get('firstSrvcDt', record.get('Date of Service', ''))),
|
|
91
|
+
'Allowed': record.get('totalAllowdAmt', record.get('Allowed Amount', '')),
|
|
92
|
+
'Paid': record.get('totalPaidAmt', record.get('Amount Paid', '')),
|
|
93
|
+
'Pt Resp': record.get('totalPtntRespAmt', record.get('Patient Responsibility', '')),
|
|
94
|
+
'Charged': record.get('totalChargedAmt', record.get('Charge', ''))
|
|
95
|
+
}
|
|
96
|
+
formatted_records.append(formatted_record)
|
|
97
|
+
return formatted_records
|
|
98
|
+
|
|
99
|
+
def format_date(date_str):
|
|
100
|
+
if date_str and len(date_str) >= 10:
|
|
101
|
+
return date_str[5:7] + '-' + date_str[8:10] # Assuming date format is YYYY-MM-DD, this returns MM-DD
|
|
102
|
+
return ''
|
|
103
|
+
|
|
104
|
+
def display_table(records):
|
|
105
|
+
# Define the new fieldnames and their respective widths
|
|
106
|
+
new_fieldnames = ['Claim #', 'Status', 'Patient', 'Proc.', 'Serv.', 'Allowed', 'Paid', 'Pt Resp', 'Charged']
|
|
107
|
+
col_widths = {field: len(field) for field in new_fieldnames}
|
|
108
|
+
|
|
109
|
+
# Update column widths based on records
|
|
110
|
+
for record in records:
|
|
111
|
+
for field in new_fieldnames:
|
|
112
|
+
col_widths[field] = max(col_widths[field], len(str(record.get(field, ''))))
|
|
113
|
+
|
|
114
|
+
# Create table header
|
|
115
|
+
header = " | ".join("{:<{}}".format(field, col_widths[field]) for field in new_fieldnames)
|
|
116
|
+
print(header)
|
|
117
|
+
print("-" * len(header))
|
|
118
|
+
|
|
119
|
+
# Create table rows
|
|
120
|
+
for record in records:
|
|
121
|
+
row = " | ".join("{:<{}}".format(str(record.get(field, '')), col_widths[field]) for field in new_fieldnames)
|
|
122
|
+
print(row)
|
|
123
|
+
|
|
124
|
+
def display_consolidated_records(records):
|
|
125
|
+
if not records:
|
|
126
|
+
return
|
|
127
|
+
|
|
128
|
+
new_fieldnames = ['Claim #', 'Status', 'Patient', 'Proc.', 'Serv.', 'Allowed', 'Paid', 'Pt Resp', 'Charged']
|
|
129
|
+
col_widths = {field: len(field) for field in new_fieldnames}
|
|
130
|
+
|
|
131
|
+
for record in records:
|
|
132
|
+
for field in new_fieldnames:
|
|
133
|
+
col_widths[field] = max(col_widths[field], len(str(record.get(field, ''))))
|
|
134
|
+
|
|
135
|
+
header = " | ".join("{:<{}}".format(field, col_widths[field]) for field in new_fieldnames)
|
|
136
|
+
print(header)
|
|
137
|
+
print("-" * len(header))
|
|
138
|
+
|
|
139
|
+
for record in records:
|
|
140
|
+
row = " | ".join("{:<{}}".format(str(record.get(field, '')), col_widths[field]) for field in new_fieldnames)
|
|
141
|
+
print(row)
|
|
142
|
+
|
|
50
143
|
if __name__ == "__main__":
|
|
51
|
-
config = load_configuration()
|
|
144
|
+
config, _ = load_configuration()
|
|
52
145
|
|
|
53
146
|
files = sys.argv[1:]
|
|
54
147
|
if not files:
|
|
55
148
|
log("No files provided as arguments.", 'error')
|
|
56
149
|
sys.exit(1)
|
|
57
150
|
|
|
58
|
-
output_directory = config['
|
|
151
|
+
output_directory = config['MediLink_Config'].get('local_storage_path')
|
|
152
|
+
all_records = []
|
|
59
153
|
for file_path in files:
|
|
60
154
|
try:
|
|
61
|
-
process_file(file_path, output_directory)
|
|
155
|
+
records = process_file(file_path, output_directory, return_records=True)
|
|
156
|
+
all_records.extend(records)
|
|
62
157
|
except Exception as e:
|
|
63
|
-
log("Failed to process {}: {}".format(file_path, e), 'error')
|
|
158
|
+
log("Failed to process {}: {}".format(file_path, e), 'error')
|
|
159
|
+
|
|
160
|
+
display_consolidated_records(all_records)
|
|
161
|
+
|
|
162
|
+
if input("Do you want to export the consolidated records to a CSV file? (y/n): ").strip().lower() == 'y':
|
|
163
|
+
consolidated_csv_path = os.path.join(output_directory, "Consolidated_Records.csv")
|
|
164
|
+
write_records_to_csv(all_records, consolidated_csv_path)
|
|
165
|
+
log("Consolidated records written to {}".format(consolidated_csv_path))
|
|
@@ -0,0 +1,203 @@
|
|
|
1
|
+
"""
|
|
2
|
+
# Create a summary JSON
|
|
3
|
+
summary = {
|
|
4
|
+
"Payer ID": ins_payerID,
|
|
5
|
+
"Provider": provider_last_name,
|
|
6
|
+
"Member ID": ins_memberID,
|
|
7
|
+
"Date of Birth": dob,
|
|
8
|
+
"Patient Name": patient_name,
|
|
9
|
+
"Patient Info": {
|
|
10
|
+
"DOB": dob,
|
|
11
|
+
"Address": "{} {}".format(patient_info.get("addressLine1", ""), patient_info.get("addressLine2", "")).strip(),
|
|
12
|
+
"City": patient_info.get("city", ""),
|
|
13
|
+
"State": patient_info.get("state", ""),
|
|
14
|
+
"ZIP": patient_info.get("zip", ""),
|
|
15
|
+
"Relationship": patient_info.get("relationship", "")
|
|
16
|
+
},
|
|
17
|
+
"Insurance Info": {
|
|
18
|
+
"Payer Name": insurance_info.get("payerName", ""),
|
|
19
|
+
"Payer ID": ins_payerID,
|
|
20
|
+
"Member ID": ins_memberID,
|
|
21
|
+
"Group Number": insurance_info.get("groupNumber", ""),
|
|
22
|
+
"Insurance Type": ins_insuranceType,
|
|
23
|
+
"Type Code": ins_insuranceTypeCode,
|
|
24
|
+
"Address": "{} {}".format(insurance_info.get("addressLine1", ""), insurance_info.get("addressLine2", "")).strip(),
|
|
25
|
+
"City": insurance_info.get("city", ""),
|
|
26
|
+
"State": insurance_info.get("state", ""),
|
|
27
|
+
"ZIP": insurance_info.get("zip", "")
|
|
28
|
+
},
|
|
29
|
+
"Policy Info": {
|
|
30
|
+
"Eligibility Dates": eligibilityDates,
|
|
31
|
+
"Policy Member ID": policy_info.get("memberId", ""),
|
|
32
|
+
"Policy Status": policy_status
|
|
33
|
+
},
|
|
34
|
+
"Deductible Info": {
|
|
35
|
+
"Remaining Amount": remaining_amount
|
|
36
|
+
}
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
# Print debug JSON
|
|
40
|
+
# Uncomment below if you need to debug later
|
|
41
|
+
# print("\nDebug JSON Summary:")
|
|
42
|
+
# print(json.dumps(summary, indent=2))
|
|
43
|
+
"""
|
|
44
|
+
import MediLink_API_v3
|
|
45
|
+
import os
|
|
46
|
+
import sys
|
|
47
|
+
from datetime import datetime
|
|
48
|
+
import requests
|
|
49
|
+
import json
|
|
50
|
+
|
|
51
|
+
try:
|
|
52
|
+
from MediLink import MediLink_ConfigLoader
|
|
53
|
+
except ImportError:
|
|
54
|
+
import MediLink_ConfigLoader
|
|
55
|
+
|
|
56
|
+
project_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
|
|
57
|
+
sys.path.append(project_dir)
|
|
58
|
+
|
|
59
|
+
try:
|
|
60
|
+
from MediBot import MediBot_Preprocessor_lib
|
|
61
|
+
except ImportError:
|
|
62
|
+
import MediBot_Preprocessor_lib
|
|
63
|
+
|
|
64
|
+
# Load configuration
|
|
65
|
+
config, _ = MediLink_ConfigLoader.load_configuration()
|
|
66
|
+
|
|
67
|
+
# Initialize the API client
|
|
68
|
+
client = MediLink_API_v3.APIClient()
|
|
69
|
+
|
|
70
|
+
# Get provider_last_name and npi from configuration
|
|
71
|
+
provider_last_name = config['MediLink_Config'].get('default_billing_provider_last_name')
|
|
72
|
+
npi = config['MediLink_Config'].get('default_billing_provider_npi')
|
|
73
|
+
|
|
74
|
+
# Define the list of payer_id's to iterate over
|
|
75
|
+
payer_ids = ['87726', '03432', '96385', '95467', '86050', '86047', '95378', '06111', '37602']
|
|
76
|
+
|
|
77
|
+
# Get the latest CSV
|
|
78
|
+
CSV_FILE_PATH = config.get('CSV_FILE_PATH', "")
|
|
79
|
+
csv_data = MediBot_Preprocessor_lib.load_csv_data(CSV_FILE_PATH)
|
|
80
|
+
|
|
81
|
+
# Only keep rows that contain a valid number from the payer_ids list
|
|
82
|
+
valid_rows = [row for row in csv_data if str(row['Ins1 Payer ID']) in payer_ids]
|
|
83
|
+
|
|
84
|
+
# Function to check if the date format is correct
|
|
85
|
+
def validate_and_format_date(date_str):
|
|
86
|
+
for fmt in ('%Y-%m-%d', '%m/%d/%Y', '%d-%b-%Y', '%d-%m-%Y'):
|
|
87
|
+
try:
|
|
88
|
+
formatted_date = datetime.strptime(date_str, fmt).strftime('%Y-%m-%d')
|
|
89
|
+
return formatted_date
|
|
90
|
+
except ValueError:
|
|
91
|
+
continue
|
|
92
|
+
return None
|
|
93
|
+
|
|
94
|
+
# List of patients with DOB and MemberID from CSV data
|
|
95
|
+
patients = [
|
|
96
|
+
(validate_and_format_date(row['Patient DOB']), row['Primary Policy Number'])
|
|
97
|
+
for row in valid_rows if validate_and_format_date(row['Patient DOB']) is not None
|
|
98
|
+
]
|
|
99
|
+
|
|
100
|
+
# Function to get eligibility information
|
|
101
|
+
def get_eligibility_info(client, payer_id, provider_last_name, date_of_birth, member_id, npi):
|
|
102
|
+
try:
|
|
103
|
+
# Log the parameters being sent to the function
|
|
104
|
+
MediLink_ConfigLoader.log("Calling get_eligibility_v3 with parameters:", level="INFO")
|
|
105
|
+
MediLink_ConfigLoader.log("payer_id: {}".format(payer_id), level="INFO")
|
|
106
|
+
MediLink_ConfigLoader.log("provider_last_name: {}".format(provider_last_name), level="INFO")
|
|
107
|
+
MediLink_ConfigLoader.log("date_of_birth: {}".format(date_of_birth), level="INFO")
|
|
108
|
+
MediLink_ConfigLoader.log("member_id: {}".format(member_id), level="INFO")
|
|
109
|
+
MediLink_ConfigLoader.log("npi: {}".format(npi), level="INFO")
|
|
110
|
+
|
|
111
|
+
# Call the get_eligibility_v3 function
|
|
112
|
+
eligibility = MediLink_API_v3.get_eligibility_v3(
|
|
113
|
+
client, payer_id, provider_last_name, 'MemberIDDateOfBirth', date_of_birth, member_id, npi
|
|
114
|
+
)
|
|
115
|
+
|
|
116
|
+
# Log the response
|
|
117
|
+
MediLink_ConfigLoader.log("Eligibility response: {}".format(json.dumps(eligibility, indent=4)), level="INFO")
|
|
118
|
+
|
|
119
|
+
return eligibility
|
|
120
|
+
except requests.exceptions.HTTPError as e:
|
|
121
|
+
# Log the HTTP error response
|
|
122
|
+
MediLink_ConfigLoader.log("HTTPError: {}".format(e), level="ERROR")
|
|
123
|
+
MediLink_ConfigLoader.log("Response content: {}".format(e.response.content), level="ERROR")
|
|
124
|
+
except Exception as e:
|
|
125
|
+
# Log any other exceptions
|
|
126
|
+
MediLink_ConfigLoader.log("Error: {}".format(e), level="ERROR")
|
|
127
|
+
return None
|
|
128
|
+
|
|
129
|
+
# Function to extract required fields and display in a tabular format
|
|
130
|
+
def display_eligibility_info(data, dob, member_id):
|
|
131
|
+
if data is None:
|
|
132
|
+
return
|
|
133
|
+
|
|
134
|
+
for policy in data["memberPolicies"]:
|
|
135
|
+
# Skip non-medical policies
|
|
136
|
+
if policy["policyInfo"]["coverageType"] != "Medical":
|
|
137
|
+
continue
|
|
138
|
+
|
|
139
|
+
patient_info = policy["patientInfo"][0]
|
|
140
|
+
lastName = patient_info.get("lastName", "")
|
|
141
|
+
firstName = patient_info.get("firstName", "")
|
|
142
|
+
middleName = patient_info.get("middleName", "")
|
|
143
|
+
|
|
144
|
+
# Check if the remaining amount is per individual first, then fallback to family
|
|
145
|
+
if 'individual' in policy["deductibleInfo"]:
|
|
146
|
+
remaining_amount = policy["deductibleInfo"]["individual"]["inNetwork"].get("remainingAmount", "")
|
|
147
|
+
else:
|
|
148
|
+
remaining_amount = policy["deductibleInfo"]["family"]["inNetwork"].get("remainingAmount", "")
|
|
149
|
+
|
|
150
|
+
insurance_info = policy["insuranceInfo"]
|
|
151
|
+
ins_insuranceType = insurance_info.get("insuranceType", "")
|
|
152
|
+
ins_insuranceTypeCode = insurance_info.get("insuranceTypeCode", "")
|
|
153
|
+
ins_memberID = insurance_info.get("memberId", "")
|
|
154
|
+
ins_payerID = insurance_info.get("payerId", "")
|
|
155
|
+
|
|
156
|
+
policy_info = policy["policyInfo"]
|
|
157
|
+
eligibilityDates = policy_info.get("eligibilityDates", "")
|
|
158
|
+
policy_status = policy_info.get("policyStatus", "")
|
|
159
|
+
|
|
160
|
+
patient_name = "{} {} {}".format(firstName, middleName, lastName).strip()[:20]
|
|
161
|
+
|
|
162
|
+
# Display patient information in a table row format
|
|
163
|
+
eligibility_end_date = eligibilityDates.get("endDate", "")
|
|
164
|
+
table_row = "{:<20} | {:<10} | {:<5} | {:<30}".format(
|
|
165
|
+
patient_name, dob, ins_payerID, ins_insuranceType)
|
|
166
|
+
print(table_row)
|
|
167
|
+
table_row_details = "{:<20} | {:<10} | {:<5} | {:<15} | {:<8} | {:<15} | {:<20}".format(
|
|
168
|
+
"", "", "", ins_insuranceTypeCode, eligibility_end_date[-10:], policy_status, remaining_amount)
|
|
169
|
+
print(table_row_details)
|
|
170
|
+
|
|
171
|
+
# Print the table header once before entering the loop
|
|
172
|
+
table_header = "{:<20} | {:<10} | {:<5} | {:<30}".format(
|
|
173
|
+
"Patient Name", "DOB", "Payer ID", "Insurance Type")
|
|
174
|
+
print(table_header)
|
|
175
|
+
print("-" * len(table_header))
|
|
176
|
+
sub_header = "{:<20} | {:<10} | {:<5} | {:<15} | {:<8} | {:<15} | {:<20}".format(
|
|
177
|
+
"", "", "", "Type Code", "End Date", "Policy Status", "Remaining Amount")
|
|
178
|
+
print(sub_header)
|
|
179
|
+
print("-" * len(sub_header))
|
|
180
|
+
|
|
181
|
+
# Set to keep track of processed patients
|
|
182
|
+
processed_patients = set()
|
|
183
|
+
|
|
184
|
+
# Loop through each payer_id and patient to call the API, then display the eligibility information
|
|
185
|
+
errors = []
|
|
186
|
+
for payer_id in payer_ids:
|
|
187
|
+
for dob, member_id in patients:
|
|
188
|
+
# Skip if this patient has already been processed
|
|
189
|
+
if (dob, member_id) in processed_patients:
|
|
190
|
+
continue
|
|
191
|
+
try:
|
|
192
|
+
eligibility_data = get_eligibility_info(client, payer_id, provider_last_name, dob, member_id, npi)
|
|
193
|
+
if eligibility_data is not None:
|
|
194
|
+
display_eligibility_info(eligibility_data, dob, member_id) # Display as we get the result
|
|
195
|
+
processed_patients.add((dob, member_id)) # Mark this patient as processed
|
|
196
|
+
except Exception as e:
|
|
197
|
+
errors.append((dob, member_id, str(e)))
|
|
198
|
+
|
|
199
|
+
# Display errors if any
|
|
200
|
+
if errors:
|
|
201
|
+
print("\nErrors encountered during API calls:")
|
|
202
|
+
for error in errors:
|
|
203
|
+
print("DOB: {}, Member ID: {}, Error: {}".format(error[0], error[1], error[2]))
|
MediLink/MediLink_Down.py
CHANGED
|
@@ -3,22 +3,11 @@ import os
|
|
|
3
3
|
import argparse
|
|
4
4
|
import shutil
|
|
5
5
|
import glob
|
|
6
|
+
import csv
|
|
6
7
|
from MediLink_Decoder import process_file
|
|
7
|
-
from
|
|
8
|
-
import
|
|
9
|
-
# Import decoders for other file types
|
|
8
|
+
from MediLink_ConfigLoader import load_configuration, log
|
|
9
|
+
from MediLink_DataMgmt import operate_winscp
|
|
10
10
|
|
|
11
|
-
"""
|
|
12
|
-
Main triaging function for handling report downloads and processing from various endpoints. This function
|
|
13
|
-
handles downloading reports, moving files, and decoding them into a readable format. The goal is to
|
|
14
|
-
provide detailed receipt and troubleshooting information for the claims.
|
|
15
|
-
|
|
16
|
-
Key Enhancements:
|
|
17
|
-
- Handle multiple file types (ERA, 277, etc.) and integrate respective decoders.
|
|
18
|
-
- Support multi-endpoint processing.
|
|
19
|
-
- Implement progress tracking for long-running operations.
|
|
20
|
-
- Provide both consolidated CSV output and in-memory parsed data for real-time display.
|
|
21
|
-
"""
|
|
22
11
|
def move_downloaded_files(local_storage_path, config):
|
|
23
12
|
local_response_directory = os.path.join(local_storage_path, "responses")
|
|
24
13
|
|
|
@@ -26,7 +15,7 @@ def move_downloaded_files(local_storage_path, config):
|
|
|
26
15
|
os.makedirs(local_response_directory)
|
|
27
16
|
|
|
28
17
|
download_dir = config['MediLink_Config']['local_storage_path']
|
|
29
|
-
file_extensions = ['.era', '.277'] # Extendable list of file extensions
|
|
18
|
+
file_extensions = ['.era', '.277', '.277ibr', '.277ebr', '.dpt', '.ebt', '.ibt'] # Extendable list of file extensions
|
|
30
19
|
|
|
31
20
|
for ext in file_extensions:
|
|
32
21
|
downloaded_files = [f for f in os.listdir(download_dir) if f.endswith(ext)]
|
|
@@ -34,12 +23,13 @@ def move_downloaded_files(local_storage_path, config):
|
|
|
34
23
|
source_path = os.path.join(download_dir, file)
|
|
35
24
|
destination_path = os.path.join(local_response_directory, file)
|
|
36
25
|
shutil.move(source_path, destination_path)
|
|
37
|
-
|
|
26
|
+
log("Moved '{}' to '{}'".format(file, local_response_directory))
|
|
38
27
|
|
|
39
28
|
def find_files(file_path_pattern):
|
|
40
29
|
normalized_path = os.path.normpath(file_path_pattern)
|
|
41
|
-
|
|
42
|
-
|
|
30
|
+
if os.path.isdir(normalized_path):
|
|
31
|
+
return [os.path.join(normalized_path, f) for f in os.listdir(normalized_path) if os.path.isfile(os.path.join(normalized_path, f))]
|
|
32
|
+
elif "*" in normalized_path:
|
|
43
33
|
matching_files = glob.glob(normalized_path)
|
|
44
34
|
return [os.path.normpath(file) for file in matching_files]
|
|
45
35
|
else:
|
|
@@ -47,53 +37,66 @@ def find_files(file_path_pattern):
|
|
|
47
37
|
|
|
48
38
|
def translate_files(files, output_directory):
|
|
49
39
|
translated_files = []
|
|
40
|
+
consolidated_records = []
|
|
41
|
+
file_counts = {'.era': 0, '.277': 0, '.277ibr': 0, '.277ebr': 0, '.dpt': 0, '.ebt': 0, '.ibt': 0}
|
|
42
|
+
|
|
50
43
|
for file in files:
|
|
44
|
+
ext = os.path.splitext(file)[1]
|
|
45
|
+
if ext in file_counts:
|
|
46
|
+
file_counts[ext] += 1
|
|
47
|
+
|
|
51
48
|
try:
|
|
52
|
-
process_file(file, output_directory)
|
|
49
|
+
records = process_file(file, output_directory, return_records=True)
|
|
50
|
+
consolidated_records.extend(records)
|
|
53
51
|
csv_file_path = os.path.join(output_directory, os.path.basename(file) + '_decoded.csv')
|
|
54
|
-
|
|
52
|
+
log("Translated file to CSV: {}".format(csv_file_path), level="INFO")
|
|
55
53
|
translated_files.append(csv_file_path)
|
|
56
54
|
except ValueError as ve:
|
|
57
|
-
|
|
55
|
+
log("Unsupported file type: {}".format(file), level="WARNING")
|
|
58
56
|
except Exception as e:
|
|
59
|
-
|
|
57
|
+
log("Error processing file {}: {}".format(file, e), level="ERROR")
|
|
60
58
|
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
59
|
+
print("Detected and processed file counts by type:")
|
|
60
|
+
for ext, count in file_counts.items():
|
|
61
|
+
print("{}: {} files detected".format(ext, count))
|
|
62
|
+
|
|
63
|
+
return consolidated_records, translated_files
|
|
64
64
|
|
|
65
65
|
def display_translated_files(translated_files):
|
|
66
66
|
print("\nTranslated Files Summary:")
|
|
67
67
|
for file in translated_files:
|
|
68
|
-
print(" - {}"
|
|
68
|
+
print(" - {}".format(file))
|
|
69
69
|
|
|
70
|
-
def main(
|
|
70
|
+
def main():
|
|
71
71
|
parser = argparse.ArgumentParser(description="Process files and convert them to CSV format.")
|
|
72
72
|
parser.add_argument('--config_path', type=str, help='Path to the configuration JSON file', default="json/config.json")
|
|
73
|
-
parser.add_argument('--
|
|
74
|
-
parser.add_argument('--file_path_pattern', type=str, help='Optional: Specify a path pattern for files for direct translation.', default=None)
|
|
73
|
+
parser.add_argument('--file_path_pattern', type=str, help='Path pattern or directory for files to process.', default=None)
|
|
75
74
|
args = parser.parse_args()
|
|
76
75
|
|
|
77
|
-
config, _ =
|
|
76
|
+
config, _ = load_configuration(args.config_path)
|
|
78
77
|
local_storage_path = config['MediLink_Config']['local_storage_path']
|
|
79
78
|
output_directory = os.path.join(local_storage_path, "translated_csvs")
|
|
80
|
-
|
|
79
|
+
|
|
80
|
+
# If a file path pattern is provided, process those files directly
|
|
81
81
|
if args.file_path_pattern:
|
|
82
82
|
files = find_files(args.file_path_pattern)
|
|
83
83
|
if files:
|
|
84
84
|
files_str = ', '.join(files)
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
85
|
+
log("Translating files: {}".format(files_str), level="INFO")
|
|
86
|
+
consolidated_records, translated_files = translate_files(files, output_directory)
|
|
87
|
+
log("Translation completed.", level="INFO")
|
|
88
|
+
if consolidated_records:
|
|
89
|
+
display_consolidated_records(consolidated_records)
|
|
90
|
+
prompt_csv_export(consolidated_records, output_directory)
|
|
91
|
+
return
|
|
90
92
|
else:
|
|
91
|
-
|
|
93
|
+
log("No files found matching: {}".format(args.file_path_pattern), level="WARNING")
|
|
92
94
|
return
|
|
93
|
-
|
|
94
|
-
|
|
95
|
+
|
|
96
|
+
# Handle downloading files via WinSCP
|
|
97
|
+
endpoint_key = 'AVAILITY'
|
|
95
98
|
if endpoint_key not in config['MediLink_Config']['endpoints']:
|
|
96
|
-
|
|
99
|
+
log("Endpoint '{}' not found in configuration. Using default 'AVAILITY'.".format(endpoint_key), level="WARNING")
|
|
97
100
|
endpoint_key = 'AVAILITY'
|
|
98
101
|
|
|
99
102
|
endpoint_configs = [config['MediLink_Config']['endpoints'][key] for key in config['MediLink_Config']['endpoints']]
|
|
@@ -104,19 +107,48 @@ def main(desired_endpoint='AVAILITY'):
|
|
|
104
107
|
|
|
105
108
|
move_downloaded_files(local_storage_path, config)
|
|
106
109
|
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
110
|
+
consolidated_records, translated_files = translate_files(downloaded_files, output_directory)
|
|
111
|
+
if consolidated_records:
|
|
112
|
+
display_consolidated_records(consolidated_records)
|
|
113
|
+
prompt_csv_export(consolidated_records, output_directory)
|
|
114
|
+
|
|
115
|
+
def display_consolidated_records(records):
|
|
116
|
+
# Define the new fieldnames and their respective widths
|
|
117
|
+
new_fieldnames = ['Claim #', 'Status', 'Patient', 'Proc.', 'Serv.', 'Allowed', 'Paid', 'Pt Resp', 'Charged']
|
|
118
|
+
col_widths = {field: len(field) for field in new_fieldnames}
|
|
119
|
+
|
|
120
|
+
# Update column widths based on records
|
|
121
|
+
for record in records:
|
|
122
|
+
for field in new_fieldnames:
|
|
123
|
+
col_widths[field] = max(col_widths[field], len(str(record.get(field, ''))))
|
|
111
124
|
|
|
112
|
-
|
|
113
|
-
|
|
125
|
+
# Create table header
|
|
126
|
+
header = " | ".join("{:<{}}".format(field, col_widths[field]) for field in new_fieldnames)
|
|
127
|
+
print(header)
|
|
128
|
+
print("-" * len(header))
|
|
114
129
|
|
|
115
|
-
|
|
130
|
+
# Create table rows
|
|
131
|
+
for record in records:
|
|
132
|
+
row = " | ".join("{:<{}}".format(str(record.get(field, '')), col_widths[field]) for field in new_fieldnames)
|
|
133
|
+
print(row)
|
|
134
|
+
|
|
135
|
+
def prompt_csv_export(records, output_directory):
|
|
136
|
+
if records:
|
|
137
|
+
user_input = input("Do you want to export the consolidated records to a CSV file? (y/n): ")
|
|
138
|
+
if user_input.lower() == 'y':
|
|
139
|
+
output_file_path = os.path.join(output_directory, "Consolidated_Records.csv")
|
|
140
|
+
write_records_to_csv(records, output_file_path)
|
|
141
|
+
log("Consolidated CSV file created at: {}".format(output_file_path), level="INFO")
|
|
142
|
+
else:
|
|
143
|
+
log("CSV export skipped by user.", level="INFO")
|
|
144
|
+
|
|
145
|
+
def write_records_to_csv(records, output_file_path):
|
|
146
|
+
fieldnames = ['Claim #', 'Status', 'Patient', 'Proc.', 'Serv.', 'Allowed', 'Paid', 'Pt Resp', 'Charged']
|
|
147
|
+
with open(output_file_path, 'w', newline='') as csvfile:
|
|
148
|
+
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
|
|
149
|
+
writer.writeheader()
|
|
150
|
+
for record in records:
|
|
151
|
+
writer.writerow(record)
|
|
116
152
|
|
|
117
153
|
if __name__ == "__main__":
|
|
118
|
-
|
|
119
|
-
if consolidate_csv_path:
|
|
120
|
-
print("CSV File Created: {}".format(consolidate_csv_path))
|
|
121
|
-
else:
|
|
122
|
-
print("No CSV file was created.")
|
|
154
|
+
main()
|