medicafe 0.240415.1__py3-none-any.whl → 0.240419.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of medicafe might be problematic. Click here for more details.
- MediBot/MediBot.bat +70 -0
- MediBot/MediBot.py +316 -0
- MediBot/MediBot_Charges.py +28 -0
- MediBot/MediBot_Preprocessor.py +283 -0
- MediBot/MediBot_UI.py +190 -0
- MediBot/MediBot_dataformat_library.py +145 -0
- MediBot/MediPost.py +5 -0
- MediBot/PDF_to_CSV_Cleaner.py +211 -0
- MediBot/__init__.py +0 -0
- MediBot/update_json.py +43 -0
- MediBot/update_medicafe.py +19 -0
- MediLink/MediLink.py +277 -0
- MediLink/MediLink_277_decoder.py +92 -0
- MediLink/MediLink_837p_encoder.py +392 -0
- MediLink/MediLink_837p_encoder_library.py +679 -0
- MediLink/MediLink_ConfigLoader.py +69 -0
- MediLink/MediLink_DataMgmt.py +206 -0
- MediLink/MediLink_Down.py +151 -0
- MediLink/MediLink_ERA_decoder.py +192 -0
- MediLink/MediLink_Gmail.py +4 -0
- MediLink/MediLink_Scheduler.py +132 -0
- MediLink/MediLink_StatusCheck.py +4 -0
- MediLink/MediLink_UI.py +116 -0
- MediLink/MediLink_Up.py +117 -0
- MediLink/MediLink_batch.bat +7 -0
- MediLink/Soumit_api.py +22 -0
- MediLink/__init__.py +0 -0
- {medicafe-0.240415.1.dist-info → medicafe-0.240419.2.dist-info}/METADATA +19 -17
- medicafe-0.240419.2.dist-info/RECORD +32 -0
- {medicafe-0.240415.1.dist-info → medicafe-0.240419.2.dist-info}/WHEEL +5 -5
- medicafe-0.240419.2.dist-info/top_level.txt +2 -0
- medicafe-0.240415.1.dist-info/RECORD +0 -5
- medicafe-0.240415.1.dist-info/top_level.txt +0 -1
- {medicafe-0.240415.1.dist-info → medicafe-0.240419.2.dist-info}/LICENSE +0 -0
|
@@ -0,0 +1,211 @@
|
|
|
1
|
+
import pandas as pd
|
|
2
|
+
import re
|
|
3
|
+
|
|
4
|
+
def load_csv(file_path):
|
|
5
|
+
# Loads a CSV file and returns a pandas DataFrame
|
|
6
|
+
return pd.read_csv(file_path, header=None)
|
|
7
|
+
|
|
8
|
+
def segment_data(data):
|
|
9
|
+
# Segments the data into individual patient records
|
|
10
|
+
|
|
11
|
+
patient_records = []
|
|
12
|
+
current_record = []
|
|
13
|
+
|
|
14
|
+
for line in data.itertuples(index=False):
|
|
15
|
+
# Convert the tuple to a list to process it easier
|
|
16
|
+
line = list(line)
|
|
17
|
+
# Flatten the list and filter out NaN values
|
|
18
|
+
line = [item for item in line if pd.notna(item)]
|
|
19
|
+
|
|
20
|
+
if line: # Make sure there is data in the line
|
|
21
|
+
# Check for the delimiter indicating a new patient record
|
|
22
|
+
if 'PATIENT INFORMATION' in line[0]:
|
|
23
|
+
if current_record:
|
|
24
|
+
# If there's an existing record, this means we've reached a new one
|
|
25
|
+
# Save the current record and start a new one
|
|
26
|
+
patient_records.append(current_record)
|
|
27
|
+
current_record = []
|
|
28
|
+
# Add the line to the current patient record
|
|
29
|
+
current_record.extend(line)
|
|
30
|
+
|
|
31
|
+
# Don't forget to add the last record after exiting the loop
|
|
32
|
+
if current_record:
|
|
33
|
+
patient_records.append(current_record)
|
|
34
|
+
|
|
35
|
+
return patient_records
|
|
36
|
+
|
|
37
|
+
# Function to extract key-value pairs from a patient record segment
|
|
38
|
+
def extract_patient_data(patient_record):
|
|
39
|
+
patient_data = {
|
|
40
|
+
"Name": None,
|
|
41
|
+
"Patient ID": None,
|
|
42
|
+
"Address": None,
|
|
43
|
+
"Home Phone": None,
|
|
44
|
+
"DOB": None,
|
|
45
|
+
"Gender": None
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
# Function to extract value after a specific key in the patient record
|
|
49
|
+
def extract_value_after_key(lines, key):
|
|
50
|
+
for index, line in enumerate(lines):
|
|
51
|
+
if key in line:
|
|
52
|
+
try:
|
|
53
|
+
split_line = line.split('\n')
|
|
54
|
+
if len(split_line) > 1:
|
|
55
|
+
# Return the value only if it exists after the newline character
|
|
56
|
+
return split_line[1].strip()
|
|
57
|
+
except AttributeError:
|
|
58
|
+
# Handle the case where 'line' is not a string and doesn't have the 'split' method
|
|
59
|
+
print("Error extracting value after key:", line)
|
|
60
|
+
pass
|
|
61
|
+
|
|
62
|
+
# For each key in patient_data, extract its value from the patient_record
|
|
63
|
+
for key in patient_data.keys():
|
|
64
|
+
patient_data[key] = extract_value_after_key(patient_record, key)
|
|
65
|
+
return patient_data
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
def parse_insurance_info(patient_record):
|
|
69
|
+
insurance_data = {
|
|
70
|
+
"Primary Insurance": None,
|
|
71
|
+
"Primary Policy Number": None,
|
|
72
|
+
"Primary Group Number": None,
|
|
73
|
+
"Secondary Insurance": None,
|
|
74
|
+
"Secondary Policy Number": None,
|
|
75
|
+
"Secondary Group Number": None
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
insurance_section_started = False
|
|
79
|
+
secondary_insurance_detected = False
|
|
80
|
+
group_header_detected = False
|
|
81
|
+
|
|
82
|
+
for element in patient_record:
|
|
83
|
+
if 'INSURANCE INFORMATION' in element:
|
|
84
|
+
insurance_section_started = True
|
|
85
|
+
secondary_insurance_detected = False
|
|
86
|
+
continue
|
|
87
|
+
|
|
88
|
+
if insurance_section_started:
|
|
89
|
+
split_element = element.split('\n')
|
|
90
|
+
if 'Primary Insurance' in element:
|
|
91
|
+
insurance_data["Primary Insurance"] = element.split('\n')[1].strip() if len(element.split('\n')) > 1 else None
|
|
92
|
+
elif 'Secondary Insurance' in element and len(split_element) > 1 and split_element[1].strip():
|
|
93
|
+
insurance_data["Secondary Insurance"] = element.split('\n')[1].strip() if len(element.split('\n')) > 1 else None
|
|
94
|
+
secondary_insurance_detected = True
|
|
95
|
+
elif 'Policy Number' in element:
|
|
96
|
+
split_element = element.split('\n')
|
|
97
|
+
if len(split_element) > 1:
|
|
98
|
+
if not insurance_data["Primary Policy Number"]:
|
|
99
|
+
insurance_data["Primary Policy Number"] = split_element[1].strip()
|
|
100
|
+
elif secondary_insurance_detected and not insurance_data["Secondary Policy Number"]:
|
|
101
|
+
insurance_data["Secondary Policy Number"] = split_element[1].strip()
|
|
102
|
+
elif 'Group Number' in element:
|
|
103
|
+
#print("Group Detected: ", element, secondary_insurance_detected)
|
|
104
|
+
group_header_detected = not group_header_detected # toggle between T/F to proxy as first or second position.
|
|
105
|
+
split_element = element.split('\n')
|
|
106
|
+
if len(split_element) > 1:
|
|
107
|
+
if not insurance_data["Primary Group Number"] and group_header_detected:
|
|
108
|
+
insurance_data["Primary Group Number"] = split_element[1].strip()
|
|
109
|
+
elif secondary_insurance_detected and not insurance_data["Secondary Group Number"] and not group_header_detected:
|
|
110
|
+
insurance_data["Secondary Group Number"] = split_element[1].strip()
|
|
111
|
+
|
|
112
|
+
return insurance_data
|
|
113
|
+
|
|
114
|
+
def structure_data(patient_data_list):
|
|
115
|
+
# Define the column headers based on the sample data provided earlier
|
|
116
|
+
column_headers = [
|
|
117
|
+
"Name",
|
|
118
|
+
"Patient ID",
|
|
119
|
+
"Address",
|
|
120
|
+
"Home Phone",
|
|
121
|
+
"DOB",
|
|
122
|
+
"Gender",
|
|
123
|
+
"Primary Insurance",
|
|
124
|
+
"Primary Policy Number",
|
|
125
|
+
"Primary Group Number",
|
|
126
|
+
"Secondary Insurance",
|
|
127
|
+
"Secondary Policy Number",
|
|
128
|
+
"Secondary Group Number"
|
|
129
|
+
]
|
|
130
|
+
|
|
131
|
+
# Initialize a list to hold structured patient records
|
|
132
|
+
structured_patient_records = []
|
|
133
|
+
|
|
134
|
+
# Iterate over each patient record in the list
|
|
135
|
+
for patient_record in patient_data_list:
|
|
136
|
+
# Extract the basic patient data
|
|
137
|
+
patient_data = extract_patient_data(patient_record)
|
|
138
|
+
# Extract the insurance information
|
|
139
|
+
insurance_data = parse_insurance_info(patient_record)
|
|
140
|
+
# Merge the two dictionaries
|
|
141
|
+
full_patient_data = {**patient_data, **insurance_data}
|
|
142
|
+
|
|
143
|
+
# Add the cleaned and transformed data to the list
|
|
144
|
+
structured_patient_records.append(full_patient_data)
|
|
145
|
+
|
|
146
|
+
# Create the DataFrame with the structured patient data
|
|
147
|
+
structured_patient_df = pd.DataFrame(structured_patient_records, columns=column_headers)
|
|
148
|
+
|
|
149
|
+
# Return the structured DataFrame
|
|
150
|
+
return structured_patient_df
|
|
151
|
+
|
|
152
|
+
def validate_data(data_frame):
|
|
153
|
+
# Performing Quality Assurance and Validation checks on the structured data
|
|
154
|
+
|
|
155
|
+
# Completeness Check: Check for missing values in critical fields
|
|
156
|
+
missing_values_check = data_frame.isnull().sum()
|
|
157
|
+
|
|
158
|
+
# Consistency Check: Ensure data formats are consistent
|
|
159
|
+
date_format_check = data_frame['DOB'].apply(lambda x: bool(re.match(r'\d{4}-\d{2}-\d{2}', x)) if pd.notnull(x) else True)
|
|
160
|
+
phone_format_check = data_frame['Home Phone'].apply(lambda x: bool(re.match(r'\+\d-\d{3}-\d{3}-\d{4}', x)) if pd.notnull(x) else True)
|
|
161
|
+
|
|
162
|
+
# Anomaly Detection: This can be complex and domain-specific. As a basic check, we can look for outliers in data like dates.
|
|
163
|
+
dob_anomalies_check = data_frame['DOB'].describe()
|
|
164
|
+
|
|
165
|
+
# Compile the results of the checks
|
|
166
|
+
validation_results = {
|
|
167
|
+
"Missing Values Check": missing_values_check,
|
|
168
|
+
"Date Format Consistency": all(date_format_check),
|
|
169
|
+
"Phone Format Consistency": all(phone_format_check),
|
|
170
|
+
"DOB Anomalies Check": dob_anomalies_check
|
|
171
|
+
}
|
|
172
|
+
|
|
173
|
+
print(validation_results) # Display validation results
|
|
174
|
+
return data_frame # Return the validated DataFrame
|
|
175
|
+
|
|
176
|
+
|
|
177
|
+
# Main function to orchestrate the cleaning process
|
|
178
|
+
def clean_patient_data(file_path):
|
|
179
|
+
# Load the CSV file
|
|
180
|
+
sxpatient_data = load_csv(file_path)
|
|
181
|
+
|
|
182
|
+
# Segment the data
|
|
183
|
+
segmented_patient_records = segment_data(sxpatient_data)
|
|
184
|
+
|
|
185
|
+
# Structure the data
|
|
186
|
+
structured_data_frame = structure_data(segmented_patient_records)
|
|
187
|
+
|
|
188
|
+
# Validate the data
|
|
189
|
+
validated_data = validate_data(structured_data_frame)
|
|
190
|
+
|
|
191
|
+
return validated_data
|
|
192
|
+
|
|
193
|
+
# Path to the CSV file with escaped backslashes
|
|
194
|
+
file_path_sxpatient = 'C:\\Users\\danie\\OneDrive\\Desktop\\CSV02012024.CSV'
|
|
195
|
+
# Define the file path for the output CSV file
|
|
196
|
+
output_file_path = 'G:\\My Drive\\CocoWave\\XP typing bot\\cleaned_FEB01SXcsv_group.csv'
|
|
197
|
+
|
|
198
|
+
# Call the main function to clean the patient data
|
|
199
|
+
cleaned_patient_data = clean_patient_data(file_path_sxpatient)
|
|
200
|
+
|
|
201
|
+
# Display the first few rows of the cleaned and validated data to verify the output
|
|
202
|
+
print(cleaned_patient_data.head())
|
|
203
|
+
|
|
204
|
+
# Save the processed data to a CSV file
|
|
205
|
+
cleaned_patient_data.to_csv(output_file_path, index=False)
|
|
206
|
+
|
|
207
|
+
print(f"Processed data saved to {output_file_path}")
|
|
208
|
+
|
|
209
|
+
# Development Roadmap
|
|
210
|
+
|
|
211
|
+
# Do not delete leading zeros from insurance numbers
|
MediBot/__init__.py
ADDED
|
File without changes
|
MediBot/update_json.py
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import sys
|
|
3
|
+
from collections import OrderedDict
|
|
4
|
+
|
|
5
|
+
def update_csv_path(json_file, new_path):
|
|
6
|
+
try:
|
|
7
|
+
with open(json_file, 'r', encoding='utf-8') as file:
|
|
8
|
+
try:
|
|
9
|
+
data = json.load(file, object_pairs_hook=OrderedDict)
|
|
10
|
+
except ValueError as decode_err:
|
|
11
|
+
print("Error decoding JSON file '{}': {}".format(json_file, decode_err))
|
|
12
|
+
sys.exit(1)
|
|
13
|
+
|
|
14
|
+
# Ensure correct backslash formatting for JSON
|
|
15
|
+
if "\\" in new_path and "\\\\" not in new_path:
|
|
16
|
+
formatted_path = new_path.replace("\\", "\\\\")
|
|
17
|
+
else:
|
|
18
|
+
formatted_path = new_path
|
|
19
|
+
|
|
20
|
+
data['CSV_FILE_PATH'] = formatted_path
|
|
21
|
+
|
|
22
|
+
with open(json_file, 'w', encoding='utf-8') as file:
|
|
23
|
+
try:
|
|
24
|
+
json.dump(data, file, ensure_ascii=False, indent=4)
|
|
25
|
+
except ValueError as encode_err:
|
|
26
|
+
print("Error encoding JSON data to file '{}': {}".format(json_file, encode_err))
|
|
27
|
+
sys.exit(1)
|
|
28
|
+
|
|
29
|
+
except IOError as io_err:
|
|
30
|
+
print("Error accessing file '{}': {}".format(json_file, io_err))
|
|
31
|
+
sys.exit(1)
|
|
32
|
+
except Exception as e:
|
|
33
|
+
print("An unexpected error occurred: {}".format(e))
|
|
34
|
+
sys.exit(1)
|
|
35
|
+
|
|
36
|
+
if __name__ == "__main__":
|
|
37
|
+
if len(sys.argv) == 3:
|
|
38
|
+
json_path = sys.argv[1]
|
|
39
|
+
new_csv_path = sys.argv[2]
|
|
40
|
+
update_csv_path(json_path, new_csv_path)
|
|
41
|
+
else:
|
|
42
|
+
print("Usage: update_json.py <path_to_json_file> <new_csv_path>")
|
|
43
|
+
sys.exit(1)
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
import subprocess
|
|
2
|
+
import sys
|
|
3
|
+
from tqdm import tqdm
|
|
4
|
+
|
|
5
|
+
def upgrade_medicafe(package):
|
|
6
|
+
try:
|
|
7
|
+
# Use tqdm to create a progress bar
|
|
8
|
+
with tqdm(total=100, desc="Upgrading %s" % package, unit="%") as progress_bar:
|
|
9
|
+
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '--upgrade', package, '--no-deps', '--disable-pip-version-check'], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
|
|
10
|
+
# Update progress bar to 100% upon completion
|
|
11
|
+
progress_bar.update(100 - progress_bar.n)
|
|
12
|
+
print("Update successful.")
|
|
13
|
+
except subprocess.CalledProcessError:
|
|
14
|
+
print("Update failed. Please check your internet connection and try again later.")
|
|
15
|
+
sys.exit(1)
|
|
16
|
+
|
|
17
|
+
if __name__ == "__main__":
|
|
18
|
+
medicafe_package = "medicafe"
|
|
19
|
+
upgrade_medicafe(medicafe_package)
|
MediLink/MediLink.py
ADDED
|
@@ -0,0 +1,277 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import MediLink_ConfigLoader
|
|
3
|
+
import MediLink_837p_encoder
|
|
4
|
+
import logging
|
|
5
|
+
import MediLink_Down
|
|
6
|
+
import MediLink_Up
|
|
7
|
+
|
|
8
|
+
# For UI Functions
|
|
9
|
+
import os
|
|
10
|
+
import MediLink_UI # Import UI module for handling all user interfaces
|
|
11
|
+
|
|
12
|
+
"""
|
|
13
|
+
Development Tasks for Backend Enhancement in MediSoft Claims Submittal (MediLink) Script:
|
|
14
|
+
|
|
15
|
+
Implement dynamic configurations for multiple endpoints (Availity, Optum, PNT Data) with environmental settings support.
|
|
16
|
+
Enhance file detection with detailed logging and introduce integrity checks for pre-processing validation.
|
|
17
|
+
Verify file transmissions via WinSCP log analysis for successful endpoint acknowledgments and secure data transfer.
|
|
18
|
+
Automate response file handling from endpoints and integrate feedback into MediSoft with exception alerts.
|
|
19
|
+
De-persisting Intermediate Files.
|
|
20
|
+
When transmissions fail, there is some retaining of patient data in memory or something that seems to default
|
|
21
|
+
any new endpoint changes to Optum. May need to "de-confirm" patients, but leave the suggested endpoints as the previously
|
|
22
|
+
confirmed endpoints. This should be similar logic to if the user made a mistake and wants to go back and fix it.
|
|
23
|
+
These tasks involve backend enhancements such as dynamic configurations, file detection improvements, file transmission verification, automation of response file handling, and management of intermediate files and transmission failures.
|
|
24
|
+
|
|
25
|
+
TODO Crosswalk should be to PayerID key vs Medisoft:Endpoint.
|
|
26
|
+
TODO Availity has a response file that says "File was received at TIME. File was sent for processing." as a confirmation
|
|
27
|
+
that sits in the SendFiles folder after a submittal.
|
|
28
|
+
|
|
29
|
+
BUG Suggested Endpoint when you say 'n' to proceed with transmission is not getting updated with the endpoint
|
|
30
|
+
that was selected previously by the user. However, when we go back to the confirmation list, we do have a persist of the assignment.
|
|
31
|
+
This can be confusing for the user.
|
|
32
|
+
"""
|
|
33
|
+
|
|
34
|
+
# Setup basic logging
|
|
35
|
+
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s\n')
|
|
36
|
+
|
|
37
|
+
def detect_and_display_file_summaries(directory_path, config, crosswalk):
|
|
38
|
+
"""
|
|
39
|
+
Detects new files in the specified directory and prepares detailed patient data for processing,
|
|
40
|
+
including suggestions for endpoints based on insurance provider information found in the config.
|
|
41
|
+
|
|
42
|
+
:param directory_path: Path to the directory containing files to be detected.
|
|
43
|
+
:param config: Configuration settings loaded from a JSON file.
|
|
44
|
+
:return: A tuple containing a list of new file paths and the detailed patient data.
|
|
45
|
+
"""
|
|
46
|
+
new_files = detect_new_files(directory_path)
|
|
47
|
+
if not new_files:
|
|
48
|
+
print(" No new claims detected. Check Medisoft claims output.\n")
|
|
49
|
+
return False, []
|
|
50
|
+
|
|
51
|
+
detailed_patient_data = [] # Initialize list for detailed patient data
|
|
52
|
+
for file_path in new_files:
|
|
53
|
+
detailed_data = extract_and_suggest_endpoint(file_path, config, crosswalk)
|
|
54
|
+
detailed_patient_data.extend(detailed_data) # Accumulate detailed data for processing
|
|
55
|
+
|
|
56
|
+
# Return just the list of new files and the enriched detailed patient data
|
|
57
|
+
return new_files, detailed_patient_data
|
|
58
|
+
|
|
59
|
+
def detect_new_files(directory_path, file_extension='.DAT'):
|
|
60
|
+
"""
|
|
61
|
+
Scans the specified directory for new files with a given extension.
|
|
62
|
+
|
|
63
|
+
:param directory_path: Path to the directory containing files to be detected.
|
|
64
|
+
:param file_extension: Extension of the files to detect. Defaults to '.csv'.
|
|
65
|
+
:return: A list of paths to new files detected in the directory.
|
|
66
|
+
"""
|
|
67
|
+
detected_file_paths = []
|
|
68
|
+
for filename in os.listdir(directory_path):
|
|
69
|
+
if filename.endswith(file_extension):
|
|
70
|
+
file_path = os.path.join(directory_path, filename)
|
|
71
|
+
detected_file_paths.append(file_path)
|
|
72
|
+
return detected_file_paths
|
|
73
|
+
|
|
74
|
+
def extract_and_suggest_endpoint(file_path, config, crosswalk):
|
|
75
|
+
"""
|
|
76
|
+
Reads a fixed-width file, extracts file details including surgery date, patient ID,
|
|
77
|
+
patient name, primary insurance, and other necessary details for each record. It suggests
|
|
78
|
+
an endpoint based on insurance provider information found in the crosswalk and prepares
|
|
79
|
+
detailed patient data for processing.
|
|
80
|
+
|
|
81
|
+
Parameters:
|
|
82
|
+
- file_path: Path to the fixed-width file.
|
|
83
|
+
- crosswalk: Crosswalk dictionary loaded from a JSON file.
|
|
84
|
+
|
|
85
|
+
Returns:
|
|
86
|
+
- A comprehensive data structure retaining detailed patient claim details needed for processing,
|
|
87
|
+
including new key-value pairs for file path, surgery date, patient name, and primary insurance.
|
|
88
|
+
"""
|
|
89
|
+
detailed_patient_data = []
|
|
90
|
+
|
|
91
|
+
for personal_info, insurance_info, service_info in MediLink_837p_encoder.read_fixed_width_data(file_path, config.get('MediLink_Config', {})):
|
|
92
|
+
parsed_data = MediLink_837p_encoder.parse_fixed_width_data(personal_info, insurance_info, service_info, config.get('MediLink_Config', {}))
|
|
93
|
+
|
|
94
|
+
primary_insurance = parsed_data.get('INAME')
|
|
95
|
+
|
|
96
|
+
# TODO This suggested endpoint should be a payerid_to_endpoint_mapping.
|
|
97
|
+
suggested_endpoint = crosswalk['insurance_to_endpoint_mapping'].get(primary_insurance, 'AVAILITY')
|
|
98
|
+
|
|
99
|
+
# Directly enrich detailed patient data with additional information and suggested endpoint
|
|
100
|
+
detailed_data = parsed_data.copy() # Copy parsed_data to avoid modifying the original dictionary
|
|
101
|
+
detailed_data.update({
|
|
102
|
+
'file_path': file_path,
|
|
103
|
+
'patient_id': parsed_data.get('CHART'),
|
|
104
|
+
'surgery_date': parsed_data.get('DATE'),
|
|
105
|
+
'patient_name': ' '.join([parsed_data.get(key, '') for key in ['FIRST', 'MIDDLE', 'LAST']]),
|
|
106
|
+
'amount': parsed_data.get('AMOUNT'),
|
|
107
|
+
'primary_insurance': primary_insurance,
|
|
108
|
+
'suggested_endpoint': suggested_endpoint
|
|
109
|
+
})
|
|
110
|
+
detailed_patient_data.append(detailed_data)
|
|
111
|
+
|
|
112
|
+
# Return only the enriched detailed patient data, eliminating the need for a separate summary list
|
|
113
|
+
return detailed_patient_data
|
|
114
|
+
|
|
115
|
+
def organize_patient_data_by_endpoint(detailed_patient_data):
|
|
116
|
+
"""
|
|
117
|
+
Organizes detailed patient data by their confirmed endpoints.
|
|
118
|
+
This simplifies processing and conversion per endpoint basis, ensuring that claims are generated and submitted
|
|
119
|
+
according to the endpoint-specific requirements.
|
|
120
|
+
|
|
121
|
+
:param detailed_patient_data: A list of dictionaries, each containing detailed patient data including confirmed endpoint.
|
|
122
|
+
:return: A dictionary with endpoints as keys and lists of detailed patient data as values for processing.
|
|
123
|
+
"""
|
|
124
|
+
organized = {}
|
|
125
|
+
for data in detailed_patient_data:
|
|
126
|
+
# Retrieve confirmed endpoint from each patient's data
|
|
127
|
+
endpoint = data['confirmed_endpoint'] if 'confirmed_endpoint' in data else data['suggested_endpoint']
|
|
128
|
+
# Initialize a list for the endpoint if it doesn't exist
|
|
129
|
+
if endpoint not in organized:
|
|
130
|
+
organized[endpoint] = []
|
|
131
|
+
organized[endpoint].append(data)
|
|
132
|
+
return organized
|
|
133
|
+
|
|
134
|
+
def check_for_new_remittances(config):
|
|
135
|
+
print("\nChecking for new files across all endpoints...")
|
|
136
|
+
endpoints = config['MediLink_Config']['endpoints']
|
|
137
|
+
for endpoint_key, endpoint_info in endpoints.items():
|
|
138
|
+
try:
|
|
139
|
+
# Pass the endpoint key to MediLink_Down.main() as an argument
|
|
140
|
+
ERA_path = MediLink_Down.main(desired_endpoint=endpoint_key)
|
|
141
|
+
# BUG This needs to check to see if this actually worked maybe winscplog before saying it completed successfully
|
|
142
|
+
print("New remittances for {} completed successfully.".format(endpoint_info['name']))
|
|
143
|
+
print("Results saved to: {}\n".format(ERA_path))
|
|
144
|
+
except Exception as e:
|
|
145
|
+
print("An error occurred while checking remittances for {}: {}".format(endpoint_info['name'], e))
|
|
146
|
+
|
|
147
|
+
def user_decision_on_suggestions(detailed_patient_data, config):
|
|
148
|
+
"""
|
|
149
|
+
Presents the user with all patient summaries and suggested endpoints,
|
|
150
|
+
then asks for confirmation to proceed with all or specify adjustments manually.
|
|
151
|
+
"""
|
|
152
|
+
# Display summaries of patient details and endpoints.
|
|
153
|
+
MediLink_UI.display_patient_summaries(detailed_patient_data)
|
|
154
|
+
|
|
155
|
+
# Ask the user if they want to proceed with all suggested endpoints.
|
|
156
|
+
proceed = MediLink_UI.ask_for_proceeding_with_endpoints()
|
|
157
|
+
|
|
158
|
+
# If the user agrees to proceed with all suggested endpoints, confirm them.
|
|
159
|
+
if proceed:
|
|
160
|
+
return confirm_all_suggested_endpoints(detailed_patient_data)
|
|
161
|
+
# Otherwise, allow the user to adjust the endpoints manually.
|
|
162
|
+
else:
|
|
163
|
+
return select_and_adjust_files(detailed_patient_data, config)
|
|
164
|
+
|
|
165
|
+
def confirm_all_suggested_endpoints(detailed_patient_data):
|
|
166
|
+
"""
|
|
167
|
+
Confirms all suggested endpoints for each patient's detailed data.
|
|
168
|
+
"""
|
|
169
|
+
for data in detailed_patient_data:
|
|
170
|
+
if 'confirmed_endpoint' not in data:
|
|
171
|
+
data['confirmed_endpoint'] = data['suggested_endpoint']
|
|
172
|
+
return detailed_patient_data
|
|
173
|
+
|
|
174
|
+
def select_and_adjust_files(detailed_patient_data, config):
|
|
175
|
+
"""
|
|
176
|
+
Allows users to select patients and adjust their endpoints by interfacing with UI functions.
|
|
177
|
+
"""
|
|
178
|
+
# Display options for patients
|
|
179
|
+
MediLink_UI.display_patient_options(detailed_patient_data)
|
|
180
|
+
|
|
181
|
+
# Get user-selected indices for adjustment
|
|
182
|
+
selected_indices = MediLink_UI.get_selected_indices(len(detailed_patient_data))
|
|
183
|
+
|
|
184
|
+
# Fetch endpoint names dynamically from the JSON config
|
|
185
|
+
endpoint_mapping = {str(i + 1): config['MediLink_Config']['endpoints'][endpoint]['name'] for i, endpoint in enumerate(config['MediLink_Config']['endpoints'])}
|
|
186
|
+
|
|
187
|
+
# Iterate over each selected index and process endpoint changes
|
|
188
|
+
for i in selected_indices:
|
|
189
|
+
data = detailed_patient_data[i]
|
|
190
|
+
MediLink_UI.display_patient_for_adjustment(data['patient_name'], data.get('suggested_endpoint', 'N/A'))
|
|
191
|
+
|
|
192
|
+
endpoint_change = MediLink_UI.get_endpoint_decision()
|
|
193
|
+
|
|
194
|
+
if endpoint_change == 'y':
|
|
195
|
+
MediLink_UI.display_endpoint_options(endpoint_mapping)
|
|
196
|
+
new_endpoint_choice = MediLink_UI.get_new_endpoint_choice()
|
|
197
|
+
|
|
198
|
+
if new_endpoint_choice in endpoint_mapping:
|
|
199
|
+
data['confirmed_endpoint'] = endpoint_mapping[new_endpoint_choice]
|
|
200
|
+
print("Endpoint changed to {0} for patient {1}.".format(data['confirmed_endpoint'], data['patient_name']))
|
|
201
|
+
else:
|
|
202
|
+
print("Invalid selection. Keeping the suggested endpoint.")
|
|
203
|
+
else:
|
|
204
|
+
data['confirmed_endpoint'] = data.get('suggested_endpoint', 'N/A')
|
|
205
|
+
|
|
206
|
+
# Return the updated data
|
|
207
|
+
return detailed_patient_data
|
|
208
|
+
|
|
209
|
+
def main_menu():
|
|
210
|
+
"""
|
|
211
|
+
Initializes the main menu loop and handles the overall program flow,
|
|
212
|
+
including loading configurations and managing user input for menu selections.
|
|
213
|
+
"""
|
|
214
|
+
# Load configuration settings and display the initial welcome message.
|
|
215
|
+
config, crosswalk = MediLink_ConfigLoader.load_configuration() # BUG does this need an argument?
|
|
216
|
+
|
|
217
|
+
# Display Welcome Message
|
|
218
|
+
MediLink_UI.display_welcome()
|
|
219
|
+
|
|
220
|
+
# Normalize the directory path for file operations.
|
|
221
|
+
directory_path = os.path.normpath(config['MediLink_Config']['inputFilePath'])
|
|
222
|
+
|
|
223
|
+
# Detect new files and collect detailed patient data if available.
|
|
224
|
+
new_files, detailed_patient_data = detect_and_display_file_summaries(directory_path, config, crosswalk)
|
|
225
|
+
|
|
226
|
+
while True:
|
|
227
|
+
# Define the menu options. Base options include checking remittances and exiting the program.
|
|
228
|
+
options = ["Check for new remittances", "Exit"]
|
|
229
|
+
# If new files are detected, add the option to submit claims.
|
|
230
|
+
if new_files:
|
|
231
|
+
options.insert(1, "Submit claims")
|
|
232
|
+
|
|
233
|
+
# Display the dynamically adjusted menu options.
|
|
234
|
+
MediLink_UI.display_menu(options)
|
|
235
|
+
# Retrieve user choice and handle it.
|
|
236
|
+
choice = MediLink_UI.get_user_choice()
|
|
237
|
+
|
|
238
|
+
if choice == '1':
|
|
239
|
+
# Handle remittance checking.
|
|
240
|
+
check_for_new_remittances(config)
|
|
241
|
+
elif choice == '2' and new_files:
|
|
242
|
+
# Handle the claims submission flow if new files are present.
|
|
243
|
+
handle_submission(detailed_patient_data, config)
|
|
244
|
+
elif choice == '3' or (choice == '2' and not new_files):
|
|
245
|
+
# Exit the program if the user chooses to exit or if no new files are present.
|
|
246
|
+
MediLink_UI.display_exit_message()
|
|
247
|
+
break
|
|
248
|
+
else:
|
|
249
|
+
# Display an error message if the user's choice does not match any valid option.
|
|
250
|
+
MediLink_UI.display_invalid_choice()
|
|
251
|
+
|
|
252
|
+
def handle_submission(detailed_patient_data, config):
|
|
253
|
+
"""
|
|
254
|
+
Handles the submission process for claims based on detailed patient data.
|
|
255
|
+
This function orchestrates the flow from user decision on endpoint suggestions to the actual submission of claims.
|
|
256
|
+
"""
|
|
257
|
+
# Initiate user interaction to confirm or adjust suggested endpoints.
|
|
258
|
+
adjusted_data = user_decision_on_suggestions(detailed_patient_data, config)
|
|
259
|
+
# Confirm all remaining suggested endpoints.
|
|
260
|
+
confirmed_data = confirm_all_suggested_endpoints(adjusted_data)
|
|
261
|
+
if confirmed_data: # Proceed if there are confirmed data entries.
|
|
262
|
+
# Organize data by confirmed endpoints for submission.
|
|
263
|
+
organized_data = organize_patient_data_by_endpoint(confirmed_data)
|
|
264
|
+
# Confirm transmission with the user and check for internet connectivity.
|
|
265
|
+
if MediLink_Up.confirm_transmission(organized_data):
|
|
266
|
+
if MediLink_Up.check_internet_connection():
|
|
267
|
+
# Submit claims if internet connectivity is confirmed.
|
|
268
|
+
MediLink_Up.submit_claims(organized_data, config)
|
|
269
|
+
else:
|
|
270
|
+
# Notify the user of an internet connection error.
|
|
271
|
+
print("Internet connection error. Please ensure you're connected and try again.")
|
|
272
|
+
else:
|
|
273
|
+
# Notify the user if the submission is cancelled.
|
|
274
|
+
print("Submission cancelled. No changes were made.")
|
|
275
|
+
|
|
276
|
+
if __name__ == "__main__":
|
|
277
|
+
main_menu()
|
|
@@ -0,0 +1,92 @@
|
|
|
1
|
+
import csv
|
|
2
|
+
import os
|
|
3
|
+
import sys
|
|
4
|
+
|
|
5
|
+
"""
|
|
6
|
+
This script processes a 277 healthcare claim status response file, extracting and structuring key information
|
|
7
|
+
about each claim into a CSV format. The goal is to interpret acknowledgment returns and provide a readable receipt
|
|
8
|
+
of claim statuses for further analysis or record-keeping.
|
|
9
|
+
|
|
10
|
+
Extracted fields and their sources from the 277 transaction set include:
|
|
11
|
+
- Clearing House: Extracted from 'NM1' segment where entity identifier code is '41' (payer) as the clearinghouse or payer name.
|
|
12
|
+
- Received Date: Extracted from the 'DTP' segment with qualifier '050' indicating the date claim information was received.
|
|
13
|
+
- Claim Status Tracking #: Extracted from the 'TRN' segment, representing a unique identifier used to track the claim.
|
|
14
|
+
- Billed Amount: Extracted from the 'AMT' segment with qualifier 'YU' representing the total billed amount.
|
|
15
|
+
- Date of Service: Extracted from the 'DTP' segment with qualifier '472', indicating the date services were rendered.
|
|
16
|
+
- Last and First Name: Extracted from 'NM1' segment where entity identifier is 'QC' (patient) to obtain patient's last and first names.
|
|
17
|
+
- Acknowledged Amount: Extracted from the 'STC' segment, specifically the monetary amount acknowledged.
|
|
18
|
+
- Status: Extracted from the 'STC' segment, indicating the processing status of the claim.
|
|
19
|
+
|
|
20
|
+
Each record corresponds to a single claim, and the script consolidates these records from the raw 277 file into a structured CSV file.
|
|
21
|
+
The CSV output contains headers corresponding to the above fields for ease of review and use in subsequent processes.
|
|
22
|
+
|
|
23
|
+
Prerequisites:
|
|
24
|
+
- Python 3.x
|
|
25
|
+
- Access to a filesystem for reading input files and writing output CSV files.
|
|
26
|
+
|
|
27
|
+
Usage:
|
|
28
|
+
The script requires the path to the 277 file as input and specifies an output directory for the CSV files.
|
|
29
|
+
Example command-line usage:
|
|
30
|
+
python3 MediLink_277_decoder.py input_file.txt output_directory
|
|
31
|
+
"""
|
|
32
|
+
|
|
33
|
+
def parse_277_content(content):
|
|
34
|
+
segments = content.split('~')
|
|
35
|
+
records = []
|
|
36
|
+
current_record = {}
|
|
37
|
+
for segment in segments:
|
|
38
|
+
parts = segment.split('*')
|
|
39
|
+
if parts[0] == 'HL':
|
|
40
|
+
if current_record:
|
|
41
|
+
records.append(current_record)
|
|
42
|
+
current_record = {}
|
|
43
|
+
elif parts[0] == 'NM1':
|
|
44
|
+
if parts[1] == 'QC': # Patient information
|
|
45
|
+
current_record['Last'] = parts[3]
|
|
46
|
+
current_record['First'] = parts[4]
|
|
47
|
+
elif parts[1] == '41': # Payer information
|
|
48
|
+
current_record['Clearing House'] = parts[3]
|
|
49
|
+
elif parts[0] == 'TRN':
|
|
50
|
+
current_record['Claim Status Tracking #'] = parts[2]
|
|
51
|
+
elif parts[0] == 'STC':
|
|
52
|
+
current_record['Status'] = parts[1]
|
|
53
|
+
current_record['Acknowledged Amt'] = parts[4]
|
|
54
|
+
elif parts[0] == 'DTP':
|
|
55
|
+
if parts[1] == '472': # Service date
|
|
56
|
+
current_record['Date of Service'] = parts[3]
|
|
57
|
+
elif parts[1] == '050': # Received date
|
|
58
|
+
current_record['Received Date'] = parts[3]
|
|
59
|
+
elif parts[0] == 'AMT':
|
|
60
|
+
if parts[1] == 'YU':
|
|
61
|
+
current_record['Billed Amt'] = parts[2]
|
|
62
|
+
|
|
63
|
+
if current_record:
|
|
64
|
+
records.append(current_record)
|
|
65
|
+
|
|
66
|
+
return records
|
|
67
|
+
|
|
68
|
+
def write_records_to_csv(records, output_file_path):
|
|
69
|
+
with open(output_file_path, 'w', newline='') as csvfile:
|
|
70
|
+
fieldnames = ['Clearing House', 'Received Date', 'Claim Status Tracking #', 'Billed Amt', 'Date of Service', 'Last', 'First', 'Acknowledged Amt', 'Status']
|
|
71
|
+
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
|
|
72
|
+
writer.writeheader()
|
|
73
|
+
for record in records:
|
|
74
|
+
writer.writerow(record)
|
|
75
|
+
|
|
76
|
+
def main(file_path, output_directory):
|
|
77
|
+
if not os.path.exists(output_directory):
|
|
78
|
+
os.makedirs(output_directory)
|
|
79
|
+
|
|
80
|
+
output_file_path = os.path.join(output_directory, os.path.basename(file_path) + '_decoded.csv')
|
|
81
|
+
|
|
82
|
+
with open(file_path, 'r') as file:
|
|
83
|
+
content = file.read().replace('\n', '')
|
|
84
|
+
|
|
85
|
+
records = parse_277_content(content)
|
|
86
|
+
write_records_to_csv(records, output_file_path)
|
|
87
|
+
print("Decoded data written to {}".format(output_file_path))
|
|
88
|
+
|
|
89
|
+
if __name__ == "__main__":
|
|
90
|
+
file_path = sys.argv[1]
|
|
91
|
+
output_directory = 'output'
|
|
92
|
+
main(file_path, output_directory)
|