medicafe 0.240419.2__zip
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of medicafe might be problematic. Click here for more details.
- medicafe-0.240419.2/LICENSE +21 -0
- medicafe-0.240419.2/MANIFEST.in +2 -0
- medicafe-0.240419.2/MediBot/MediBot.bat +70 -0
- medicafe-0.240419.2/MediBot/MediBot.py +316 -0
- medicafe-0.240419.2/MediBot/MediBot_Charges.py +28 -0
- medicafe-0.240419.2/MediBot/MediBot_Preprocessor.py +283 -0
- medicafe-0.240419.2/MediBot/MediBot_UI.py +190 -0
- medicafe-0.240419.2/MediBot/MediBot_dataformat_library.py +145 -0
- medicafe-0.240419.2/MediBot/MediPost.py +5 -0
- medicafe-0.240419.2/MediBot/PDF_to_CSV_Cleaner.py +211 -0
- medicafe-0.240419.2/MediBot/__init__.py +0 -0
- medicafe-0.240419.2/MediBot/update_json.py +43 -0
- medicafe-0.240419.2/MediBot/update_medicafe.py +19 -0
- medicafe-0.240419.2/MediLink/MediLink.py +277 -0
- medicafe-0.240419.2/MediLink/MediLink_277_decoder.py +92 -0
- medicafe-0.240419.2/MediLink/MediLink_837p_encoder.py +392 -0
- medicafe-0.240419.2/MediLink/MediLink_837p_encoder_library.py +679 -0
- medicafe-0.240419.2/MediLink/MediLink_ConfigLoader.py +69 -0
- medicafe-0.240419.2/MediLink/MediLink_DataMgmt.py +206 -0
- medicafe-0.240419.2/MediLink/MediLink_Down.py +151 -0
- medicafe-0.240419.2/MediLink/MediLink_ERA_decoder.py +192 -0
- medicafe-0.240419.2/MediLink/MediLink_Gmail.py +4 -0
- medicafe-0.240419.2/MediLink/MediLink_Scheduler.py +132 -0
- medicafe-0.240419.2/MediLink/MediLink_StatusCheck.py +4 -0
- medicafe-0.240419.2/MediLink/MediLink_UI.py +116 -0
- medicafe-0.240419.2/MediLink/MediLink_Up.py +117 -0
- medicafe-0.240419.2/MediLink/MediLink_batch.bat +7 -0
- medicafe-0.240419.2/MediLink/Soumit_api.py +22 -0
- medicafe-0.240419.2/MediLink/__init__.py +0 -0
- medicafe-0.240419.2/PKG-INFO +11 -0
- medicafe-0.240419.2/README.md +28 -0
- medicafe-0.240419.2/medicafe.egg-info/PKG-INFO +11 -0
- medicafe-0.240419.2/medicafe.egg-info/SOURCES.txt +37 -0
- medicafe-0.240419.2/medicafe.egg-info/dependency_links.txt +1 -0
- medicafe-0.240419.2/medicafe.egg-info/not-zip-safe +1 -0
- medicafe-0.240419.2/medicafe.egg-info/requires.txt +5 -0
- medicafe-0.240419.2/medicafe.egg-info/top_level.txt +2 -0
- medicafe-0.240419.2/setup.cfg +5 -0
- medicafe-0.240419.2/setup.py +28 -0
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import json
|
|
3
|
+
import logging
|
|
4
|
+
from datetime import datetime
|
|
5
|
+
from collections import OrderedDict
|
|
6
|
+
import sys
|
|
7
|
+
|
|
8
|
+
"""
|
|
9
|
+
This function should be generalizable to have a initialization script over all the Medi* functions
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
# Setup basic logging.
|
|
13
|
+
# BUG Consolidate this with MediLink_837p_encoder_library.log
|
|
14
|
+
def setup_logger(local_storage_path):
|
|
15
|
+
# Define a reasonable name for the log file, e.g., "MediLink_Down_Process.log"
|
|
16
|
+
log_filename = datetime.now().strftime("MediLink_Down_Process_%m%d%Y.log")
|
|
17
|
+
log_filepath = os.path.join(local_storage_path, log_filename)
|
|
18
|
+
|
|
19
|
+
for handler in logging.root.handlers[:]:
|
|
20
|
+
logging.root.removeHandler(handler)
|
|
21
|
+
|
|
22
|
+
# Setup logging to file
|
|
23
|
+
logging.basicConfig(level=logging.INFO,
|
|
24
|
+
format='%(asctime)s - %(levelname)s - %(message)s',
|
|
25
|
+
filename=log_filepath, # Direct logging to a file in local_storage_path
|
|
26
|
+
filemode='a') # Append mode
|
|
27
|
+
|
|
28
|
+
# If you also want to see the logs in the console, add a StreamHandler
|
|
29
|
+
#console_handler = logging.StreamHandler()
|
|
30
|
+
#console_handler.setLevel(logging.INFO)
|
|
31
|
+
#formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
|
|
32
|
+
#console_handler.setFormatter(formatter)
|
|
33
|
+
#logging.getLogger('').addHandler(console_handler)
|
|
34
|
+
|
|
35
|
+
def load_configuration(config_path=os.path.join(os.path.dirname(__file__), '..', 'json', 'config.json'), crosswalk_path=os.path.join(os.path.dirname(__file__), '..', 'json', 'crosswalk.json')):
|
|
36
|
+
"""
|
|
37
|
+
Loads endpoint configuration, credentials, and other settings from JSON files.
|
|
38
|
+
|
|
39
|
+
Returns: A tuple containing dictionaries with configuration settings for the main config and crosswalk.
|
|
40
|
+
"""
|
|
41
|
+
# BUG HARDCODE FOR NOW
|
|
42
|
+
config_path="G:\\My Drive\\Codes\\MediCafe\\json\\config.json"
|
|
43
|
+
# "F:\\Medibot\\json\\config.json"
|
|
44
|
+
crosswalk_path="G:\\My Drive\\Codes\\MediCafe\\json\\crosswalk.json"
|
|
45
|
+
# "F:\\Medibot\\json\\crosswalk.json"
|
|
46
|
+
|
|
47
|
+
try:
|
|
48
|
+
with open(config_path, 'r') as config_file:
|
|
49
|
+
config = json.load(config_file, object_pairs_hook=OrderedDict)
|
|
50
|
+
if 'MediLink_Config' not in config:
|
|
51
|
+
raise KeyError("MediLink_Config key is missing from the loaded configuration.")
|
|
52
|
+
# MediLink_config = config['MediLink_Config']
|
|
53
|
+
|
|
54
|
+
with open(crosswalk_path, 'r') as crosswalk_file:
|
|
55
|
+
crosswalk = json.load(crosswalk_file)
|
|
56
|
+
|
|
57
|
+
return config, crosswalk
|
|
58
|
+
except json.JSONDecodeError as e:
|
|
59
|
+
print("Error parsing JSON file: {}".format(e))
|
|
60
|
+
sys.exit(1) # Exit the script due to a critical error in configuration loading
|
|
61
|
+
except FileNotFoundError:
|
|
62
|
+
print("One or both JSON files not found. Config: {}, Crosswalk: {}".format(config_path, crosswalk_path))
|
|
63
|
+
sys.exit(1) # Exit the script due to a critical error in configuration loading
|
|
64
|
+
except KeyError as e:
|
|
65
|
+
print("Critical configuration is missing: {}".format(e))
|
|
66
|
+
sys.exit(1) # Exit the script due to a critical error in configuration loading
|
|
67
|
+
except Exception as e:
|
|
68
|
+
print("An unexpected error occurred while loading the configuration: {}".format(e))
|
|
69
|
+
sys.exit(1) # Exit the script due to a critical error in configuration loading
|
|
@@ -0,0 +1,206 @@
|
|
|
1
|
+
import csv
|
|
2
|
+
import os
|
|
3
|
+
from datetime import datetime, timedelta
|
|
4
|
+
import logging
|
|
5
|
+
import MediLink_837p_encoder_library
|
|
6
|
+
import subprocess # BUG Currently disabled for testing.
|
|
7
|
+
import logging
|
|
8
|
+
|
|
9
|
+
# Helper function to slice and strip values
|
|
10
|
+
def slice_data(data, slices):
|
|
11
|
+
# Convert slices list to a tuple for slicing operation
|
|
12
|
+
return {key: data[slice(*slices[key])].strip() for key in slices}
|
|
13
|
+
|
|
14
|
+
# Function to parse fixed-width Medisoft output and extract claim data
|
|
15
|
+
def parse_fixed_width_data(personal_info, insurance_info, service_info, config):
|
|
16
|
+
# Load slice definitions from config within the MediLink_Config section
|
|
17
|
+
personal_slices = config['fixedWidthSlices']['personal_slices']
|
|
18
|
+
insurance_slices = config['fixedWidthSlices']['insurance_slices']
|
|
19
|
+
service_slices = config['fixedWidthSlices']['service_slices']
|
|
20
|
+
|
|
21
|
+
# Parse each segment
|
|
22
|
+
parsed_data = {}
|
|
23
|
+
parsed_data.update(slice_data(personal_info, personal_slices))
|
|
24
|
+
parsed_data.update(slice_data(insurance_info, insurance_slices))
|
|
25
|
+
parsed_data.update(slice_data(service_info, service_slices))
|
|
26
|
+
|
|
27
|
+
MediLink_837p_encoder_library.log("Successfully parsed data from segments", config, level="INFO")
|
|
28
|
+
|
|
29
|
+
return parsed_data
|
|
30
|
+
|
|
31
|
+
# Function to read fixed-width Medisoft output and extract claim data
|
|
32
|
+
def read_fixed_width_data(file_path, config):
|
|
33
|
+
# Reads the fixed width data from the file and yields each patient's
|
|
34
|
+
# personal, insurance, and service information.
|
|
35
|
+
with open(file_path, 'r') as file:
|
|
36
|
+
lines_buffer = [] # Buffer to hold lines for current patient data
|
|
37
|
+
for line in file:
|
|
38
|
+
stripped_line = line.strip()
|
|
39
|
+
if stripped_line: # Only process non-empty lines
|
|
40
|
+
lines_buffer.append(stripped_line)
|
|
41
|
+
# Once we have 3 lines of data, yield them as a patient record
|
|
42
|
+
if len(lines_buffer) == 3:
|
|
43
|
+
personal_info, insurance_info, service_info = lines_buffer
|
|
44
|
+
MediLink_837p_encoder_library.log("Successfully read data from file: {}".format(file_path), config, level="INFO")
|
|
45
|
+
yield personal_info, insurance_info, service_info
|
|
46
|
+
lines_buffer.clear() # Reset buffer for the next patient record
|
|
47
|
+
# If the line is blank but we have already started collecting a patient record,
|
|
48
|
+
# we continue without resetting the buffer, effectively skipping blank lines.
|
|
49
|
+
|
|
50
|
+
def consolidate_csvs(source_directory):
|
|
51
|
+
"""
|
|
52
|
+
This default overwrites any existing CSV for the same day. We want this for the automated runs but want to switch through
|
|
53
|
+
the user interaction option if we're running interactive. This has not been implemented, but the helper function exists.
|
|
54
|
+
"""
|
|
55
|
+
today = datetime.now()
|
|
56
|
+
consolidated_filename = today.strftime("ERA_%m%d%y.csv")
|
|
57
|
+
consolidated_filepath = os.path.join(source_directory, consolidated_filename)
|
|
58
|
+
|
|
59
|
+
consolidated_data = []
|
|
60
|
+
header_saved = False
|
|
61
|
+
|
|
62
|
+
# Check if the file already exists and log the action
|
|
63
|
+
if os.path.exists(consolidated_filepath):
|
|
64
|
+
MediLink_837p_encoder_library.log("The file {} already exists. It will be overwritten.".format(consolidated_filename))
|
|
65
|
+
|
|
66
|
+
for filename in os.listdir(source_directory):
|
|
67
|
+
filepath = os.path.join(source_directory, filename)
|
|
68
|
+
if not filepath.endswith('.csv') or os.path.isdir(filepath) or filepath == consolidated_filepath:
|
|
69
|
+
continue # Skip non-CSV files, directories, and the target consolidated file itself
|
|
70
|
+
|
|
71
|
+
# Check if the file was created within the last day
|
|
72
|
+
modification_time = datetime.fromtimestamp(os.path.getmtime(filepath))
|
|
73
|
+
if modification_time < today - timedelta(days=1):
|
|
74
|
+
continue # Skip files not modified in the last day
|
|
75
|
+
|
|
76
|
+
# Read and append data from each CSV
|
|
77
|
+
with open(filepath, 'r', newline='') as csvfile:
|
|
78
|
+
reader = csv.reader(csvfile)
|
|
79
|
+
header = next(reader) # Assumes all CSV files have the same header
|
|
80
|
+
if not header_saved: # Save header from the first file
|
|
81
|
+
consolidated_data.append(header)
|
|
82
|
+
header_saved = True
|
|
83
|
+
consolidated_data.extend(row for row in reader)
|
|
84
|
+
|
|
85
|
+
# Delete the source file after its contents have been added to the consolidation list
|
|
86
|
+
os.remove(filepath)
|
|
87
|
+
|
|
88
|
+
# Write consolidated data to a new or existing CSV file, overwriting it if it exists
|
|
89
|
+
with open(consolidated_filepath, 'w', newline='') as csvfile:
|
|
90
|
+
writer = csv.writer(csvfile)
|
|
91
|
+
writer.writerows(consolidated_data)
|
|
92
|
+
|
|
93
|
+
MediLink_837p_encoder_library.log("Consolidated CSVs into {}".format(consolidated_filepath))
|
|
94
|
+
|
|
95
|
+
return consolidated_filepath
|
|
96
|
+
|
|
97
|
+
def operate_winscp(operation_type, files, endpoint_config, local_storage_path):
|
|
98
|
+
"""
|
|
99
|
+
General function to operate WinSCP for uploading or downloading files.
|
|
100
|
+
|
|
101
|
+
:param operation_type: 'upload' or 'download'
|
|
102
|
+
:param files: List of files to upload or pattern for files to download.
|
|
103
|
+
:param endpoint_config: Dictionary containing endpoint configuration.
|
|
104
|
+
:param local_storage_path: Base local storage path for logs and files.
|
|
105
|
+
|
|
106
|
+
# Example of how to call this function for uploads
|
|
107
|
+
upload_files = ['path/to/local/file1.txt', 'path/to/local/file2.txt']
|
|
108
|
+
upload_config = {
|
|
109
|
+
'session_name': 'MySession',
|
|
110
|
+
'remote_directory_up': '/remote/upload/path'
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
operate_winscp('upload', upload_files, upload_config, 'path/to/local/storage')
|
|
114
|
+
|
|
115
|
+
# Example of how to call this function for downloads
|
|
116
|
+
download_config = {
|
|
117
|
+
'session_name': 'MySession',
|
|
118
|
+
'remote_directory_down': '/remote/download/path'
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
operate_winscp('download', None, download_config, 'path/to/local/storage')
|
|
122
|
+
"""
|
|
123
|
+
# Setup paths
|
|
124
|
+
try:
|
|
125
|
+
winscp_path = endpoint_config['winscp_path']
|
|
126
|
+
except KeyError:
|
|
127
|
+
winscp_path = os.path.join(os.getcwd(), "Installers", "WinSCP-Portable", "WinSCP.com")
|
|
128
|
+
except Exception as e:
|
|
129
|
+
# Handle any other exceptions here
|
|
130
|
+
print("An error occurred:", e)
|
|
131
|
+
winscp_path = None
|
|
132
|
+
|
|
133
|
+
if not os.path.isfile(winscp_path):
|
|
134
|
+
logging.error("WinSCP.com not found at {}".format(winscp_path))
|
|
135
|
+
return False
|
|
136
|
+
|
|
137
|
+
# Setup logging
|
|
138
|
+
log_filename = "winscp_upload.log" if operation_type == "upload" else "winscp_download.log"
|
|
139
|
+
winscp_log_path = os.path.join(local_storage_path, log_filename)
|
|
140
|
+
|
|
141
|
+
# Session and directory setup
|
|
142
|
+
session_name = endpoint_config.get('session_name', '')
|
|
143
|
+
remote_directory = endpoint_config['remote_directory_up'] if operation_type == "upload" else endpoint_config['remote_directory_down']
|
|
144
|
+
|
|
145
|
+
# Command building
|
|
146
|
+
command = [
|
|
147
|
+
winscp_path,
|
|
148
|
+
'/log=' + winscp_log_path,
|
|
149
|
+
'/loglevel=1',
|
|
150
|
+
'/command',
|
|
151
|
+
'open {}'.format(session_name),
|
|
152
|
+
'cd /',
|
|
153
|
+
'cd {}'.format(remote_directory)
|
|
154
|
+
]
|
|
155
|
+
|
|
156
|
+
# Add commands to WinSCP script
|
|
157
|
+
# BUG We really need to fix this path situation.
|
|
158
|
+
# Unfortunately, this just needs to be a non-spaced path because WinSCP can't
|
|
159
|
+
# handle the spaces. Also, Windows won't let me use shutil to move the files out of G:\ into C:\ and it it wants an administrator security
|
|
160
|
+
# check or verification thing for me to even move the file by hand so that doesn't work either.
|
|
161
|
+
# command.append("put {}".format("C:\\Z_optumedi_04161742.txt"))
|
|
162
|
+
if operation_type == "upload":
|
|
163
|
+
for file_path in files:
|
|
164
|
+
normalized_path = os.path.normpath(file_path)
|
|
165
|
+
command.append("put \"{}\"".format(normalized_path))
|
|
166
|
+
else:
|
|
167
|
+
command.append('get *') # Adjust pattern as needed
|
|
168
|
+
|
|
169
|
+
command += ['close', 'exit']
|
|
170
|
+
|
|
171
|
+
# Execute command
|
|
172
|
+
# process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False)
|
|
173
|
+
# stdout, stderr = process.communicate()
|
|
174
|
+
|
|
175
|
+
if True: #process.returncode == 0:
|
|
176
|
+
print("WINSCP IS CURRENTLY DISABLED FOR TESTING.") # BUG
|
|
177
|
+
logging.info("Files {}ed successfully.".format(operation_type))
|
|
178
|
+
# BUG This return code is a little trigger happy.
|
|
179
|
+
# BUG If the WinSCP command specifies the correct download path, this might not be necessary
|
|
180
|
+
# move_downloaded_files(local_storage_path)
|
|
181
|
+
return True
|
|
182
|
+
else:
|
|
183
|
+
logging.error("Failed to {} files. Details: {}".format(operation_type, stderr.decode('utf-8')))
|
|
184
|
+
return False
|
|
185
|
+
|
|
186
|
+
# UNUSED CSV Functions
|
|
187
|
+
def remove_blank_rows_from_csv(csv_file_path):
|
|
188
|
+
with open(csv_file_path, 'r') as csv_file:
|
|
189
|
+
# Read the CSV file and filter out any empty rows
|
|
190
|
+
rows = [row for row in csv.reader(csv_file) if any(field.strip() for field in row)]
|
|
191
|
+
|
|
192
|
+
# Write the filtered rows back to the CSV file
|
|
193
|
+
with open(csv_file_path, 'w', newline='') as csv_file:
|
|
194
|
+
writer = csv.writer(csv_file)
|
|
195
|
+
writer.writerows(rows)
|
|
196
|
+
|
|
197
|
+
def list_chart_numbers_in_existing_file(filepath):
|
|
198
|
+
"""Lists the Chart Numbers contained in an existing CSV file."""
|
|
199
|
+
chart_numbers = []
|
|
200
|
+
with open(filepath, 'r', newline='') as csvfile:
|
|
201
|
+
reader = csv.reader(csvfile)
|
|
202
|
+
next(reader) # Skip header
|
|
203
|
+
for row in reader:
|
|
204
|
+
if len(row) > 2: # Assuming Chart Number is in the 3rd column
|
|
205
|
+
chart_numbers.append(row[2])
|
|
206
|
+
return chart_numbers
|
|
@@ -0,0 +1,151 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import logging
|
|
3
|
+
import argparse
|
|
4
|
+
import shutil
|
|
5
|
+
from datetime import datetime
|
|
6
|
+
import glob
|
|
7
|
+
import MediLink_ERA_decoder
|
|
8
|
+
from MediLink_DataMgmt import operate_winscp
|
|
9
|
+
import MediLink_ConfigLoader
|
|
10
|
+
|
|
11
|
+
"""
|
|
12
|
+
We need to make another function that figures out claim rejections and tries to solve them.
|
|
13
|
+
|
|
14
|
+
1. Config File Path Adjustment: Ensure the configuration file's path is adaptable for various environments, or clearly document the process for setting this path.
|
|
15
|
+
2. Logging Enhancements: Improve the logging mechanism to offer comprehensive insights through both file and console outputs, aiding in troubleshooting and operational monitoring.
|
|
16
|
+
3. CSV Output Refinement: Update the CSV output structure to include essential ERA data such as Payer Address, ensuring completeness and accuracy of information.
|
|
17
|
+
4. CSV Consolidation Logic: Develop logic for intelligently consolidating CSV outputs from batch-processed ERA files, ensuring coherent and comprehensive data aggregation.
|
|
18
|
+
5. Secure Endpoint Authentication: Establish a secure method for inputting and storing endpoint authentication details, enhancing script security.
|
|
19
|
+
6. Automated Endpoint Processing: Integrate automated looping through configured endpoints for ERA file retrieval, maximizing efficiency and reducing manual oversight.
|
|
20
|
+
7. Configuration Key Accuracy: Audit the script to correct any inaccuracies in configuration key references, ensuring seamless configuration data retrieval.
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
# Setup basic logging
|
|
24
|
+
def setup_logger(local_storage_path):
|
|
25
|
+
# Define a reasonable name for the log file, e.g., "MediLink_Down_Process.log"
|
|
26
|
+
log_filename = datetime.now().strftime("MediLink_Down_Process_%m%d%Y.log")
|
|
27
|
+
log_filepath = os.path.join(local_storage_path, log_filename)
|
|
28
|
+
|
|
29
|
+
for handler in logging.root.handlers[:]:
|
|
30
|
+
logging.root.removeHandler(handler)
|
|
31
|
+
|
|
32
|
+
# Setup logging to file
|
|
33
|
+
logging.basicConfig(level=logging.INFO,
|
|
34
|
+
format='%(asctime)s - %(levelname)s - %(message)s',
|
|
35
|
+
filename=log_filepath, # Direct logging to a file in local_storage_path
|
|
36
|
+
filemode='a') # Append mode
|
|
37
|
+
|
|
38
|
+
# If you also want to see the logs in the console, add a StreamHandler
|
|
39
|
+
#console_handler = logging.StreamHandler()
|
|
40
|
+
#console_handler.setLevel(logging.INFO)
|
|
41
|
+
#formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
|
|
42
|
+
#console_handler.setFormatter(formatter)
|
|
43
|
+
#logging.getLogger('').addHandler(console_handler)
|
|
44
|
+
|
|
45
|
+
# Because I can't figure out how to get it to work directly in the WinSCP command.
|
|
46
|
+
# And on the Windows XP machine apparently the default path is C:\\ ...
|
|
47
|
+
# This needs to get fixed. Ugh.
|
|
48
|
+
def move_downloaded_files(local_storage_path):
|
|
49
|
+
# Define the target directory for storing downloaded files
|
|
50
|
+
local_response_directory = os.path.join(local_storage_path, "responses")
|
|
51
|
+
|
|
52
|
+
if not os.path.exists(local_response_directory):
|
|
53
|
+
os.makedirs(local_response_directory)
|
|
54
|
+
|
|
55
|
+
# Identify all downloaded .era files in the current directory
|
|
56
|
+
# downloaded_files = [f for f in os.listdir('.') if f.endswith('.era')]
|
|
57
|
+
downloaded_files = [f for f in os.listdir('C:\\Users\\danie\\OneDrive\\Documents') if f.endswith('.era')]
|
|
58
|
+
|
|
59
|
+
# Move each file to the local_response_directory
|
|
60
|
+
for file in downloaded_files:
|
|
61
|
+
source_path = os.path.join('C:\\Users\\danie\\OneDrive\\Documents', file)
|
|
62
|
+
# source_path = os.path.join('.', file) for the XP machine? -- This whole thing needs repaired.
|
|
63
|
+
destination_path = os.path.join(local_response_directory, file)
|
|
64
|
+
shutil.move(source_path, destination_path)
|
|
65
|
+
logging.info("Moved '{}' to '{}'".format(file, local_response_directory))
|
|
66
|
+
|
|
67
|
+
def find_era_files(era_file_path):
|
|
68
|
+
"""
|
|
69
|
+
Find all files matching the era_file_path pattern.
|
|
70
|
+
This function normalizes the path and supports wildcard patterns.
|
|
71
|
+
"""
|
|
72
|
+
# Normalize the path to handle slashes correctly
|
|
73
|
+
normalized_path = os.path.normpath(era_file_path)
|
|
74
|
+
|
|
75
|
+
# Handling different wildcard scenarios
|
|
76
|
+
if "*" in normalized_path:
|
|
77
|
+
# Use glob to find all files matching the pattern
|
|
78
|
+
matching_files = glob.glob(normalized_path)
|
|
79
|
+
# Normalize paths in the resulting list
|
|
80
|
+
return [os.path.normpath(file) for file in matching_files]
|
|
81
|
+
else:
|
|
82
|
+
# Single file specified, return it in a list if it exists
|
|
83
|
+
return [normalized_path] if os.path.exists(normalized_path) else []
|
|
84
|
+
|
|
85
|
+
def main(desired_endpoint='AVAILITY'):
|
|
86
|
+
parser = argparse.ArgumentParser(description="Process ERA files and convert them to CSV format.")
|
|
87
|
+
parser.add_argument('--config_path', type=str, help='Path to the configuration JSON file', default="json\\config.json") # Default handling of json path
|
|
88
|
+
parser.add_argument('--desired_endpoint', type=str, help='The desired endpoint key from the configuration.', default=desired_endpoint)
|
|
89
|
+
parser.add_argument('--era_file_path', type=str, help='Optional: Specify a path to an ERA file for direct translation.', default=None)
|
|
90
|
+
args = parser.parse_args()
|
|
91
|
+
|
|
92
|
+
# Setup Logger, Load configuration and output directory
|
|
93
|
+
config, _ = MediLink_ConfigLoader.load_configuration(args.config_path)
|
|
94
|
+
local_storage_path = config['MediLink_Config']['local_storage_path']
|
|
95
|
+
setup_logger(local_storage_path)
|
|
96
|
+
output_directory = os.path.join(local_storage_path, "translated_csvs")
|
|
97
|
+
|
|
98
|
+
# Direct ERA file translation if a file path is provided
|
|
99
|
+
if args.era_file_path:
|
|
100
|
+
era_files = find_era_files(args.era_file_path)
|
|
101
|
+
if era_files:
|
|
102
|
+
era_files_str = ', '.join(era_files)
|
|
103
|
+
logging.info("Translating ERA files: {}".format(era_files_str))
|
|
104
|
+
MediLink_ERA_decoder.translate_era_to_csv(era_files, output_directory)
|
|
105
|
+
# Instead of returning a single CSV file path, consolidate here
|
|
106
|
+
consolidate_csv_path = MediLink_ERA_decoder.consolidate_csvs(output_directory)
|
|
107
|
+
logging.info("Translation and consolidation completed.")
|
|
108
|
+
return consolidate_csv_path
|
|
109
|
+
else:
|
|
110
|
+
logging.error("No ERA files found matching: {}".format(args.era_file_path))
|
|
111
|
+
return
|
|
112
|
+
|
|
113
|
+
# TODO This probably needs to be built into a loop that cycles through all 3 endpoints.
|
|
114
|
+
# The loop should use the tdqa or whatever the progress bar is called.
|
|
115
|
+
print("Please wait...\n")
|
|
116
|
+
|
|
117
|
+
# Validate endpoint key
|
|
118
|
+
endpoint_key = args.desired_endpoint
|
|
119
|
+
if endpoint_key not in config['MediLink_Config']['endpoints']:
|
|
120
|
+
logging.error("Endpoint '{}' not found in configuration. Using default 'AVAILITY'.".format(endpoint_key))
|
|
121
|
+
endpoint_key = 'AVAILITY'
|
|
122
|
+
|
|
123
|
+
# Retrieve endpoint configuration and local storage path
|
|
124
|
+
endpoint_config = config['MediLink_Config']['endpoints'][endpoint_key]
|
|
125
|
+
local_storage_path = config['MediLink_Config']['local_storage_path']
|
|
126
|
+
|
|
127
|
+
# Download ERA files from the configured endpoint
|
|
128
|
+
downloaded_files = operate_winscp("download", None, endpoint_config, local_storage_path)
|
|
129
|
+
|
|
130
|
+
# Translate downloaded ERA files to CSV format
|
|
131
|
+
translated_csv_paths = []
|
|
132
|
+
for file in downloaded_files:
|
|
133
|
+
# TODO This needs to add functionality for differentiating between ERA and 277 or
|
|
134
|
+
# whatever else might be included in the download folders.
|
|
135
|
+
MediLink_ERA_decoder.translate_era_to_csv([file], output_directory)
|
|
136
|
+
csv_file_path = os.path.join(output_directory, os.path.basename(file) + '.csv')
|
|
137
|
+
translated_csv_paths.append(csv_file_path)
|
|
138
|
+
logging.info("Translated ERA to CSV: {}".format(csv_file_path))
|
|
139
|
+
|
|
140
|
+
# Consolidate new CSVs
|
|
141
|
+
consolidate_csv_path = MediLink_ERA_decoder.consolidate_csvs(output_directory)
|
|
142
|
+
|
|
143
|
+
# Return the list of translated CSV file paths
|
|
144
|
+
return consolidate_csv_path
|
|
145
|
+
|
|
146
|
+
if __name__ == "__main__":
|
|
147
|
+
consolidate_csv_path = main()
|
|
148
|
+
if consolidate_csv_path:
|
|
149
|
+
print("CSV File Created: {}".format(consolidate_csv_path))
|
|
150
|
+
else:
|
|
151
|
+
print("No CSV file was created.")
|
|
@@ -0,0 +1,192 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import logging
|
|
3
|
+
import sys
|
|
4
|
+
from MediLink_ConfigLoader import setup_logger, load_configuration
|
|
5
|
+
from MediLink_DataMgmt import consolidate_csvs
|
|
6
|
+
|
|
7
|
+
"""
|
|
8
|
+
1. ERA File Processing: Implement robust mechanisms for reading and parsing ERA files, addressing potential file integrity issues and accommodating scenarios with multiple payer addresses within a single ERA.
|
|
9
|
+
2. Wildcard File Processing: Enable effective batch processing of ERA files using wildcard patterns in the `--era_file_path` argument, resulting in a unified CSV output.
|
|
10
|
+
3. Date of Service Parsing: Enhance the parsing logic for 'Date of Service' to accommodate different segment identifiers, improving data extraction reliability.
|
|
11
|
+
4. Payer Address Extraction: Fine-tune the logic for extracting payer and provider addresses from ERA files, ensuring only relevant information is captured.
|
|
12
|
+
5. De-persisting Intermediate Files.
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
# ERA Parser
|
|
16
|
+
def parse_era_content(content):
|
|
17
|
+
extracted_data = []
|
|
18
|
+
normalized_content = content.replace('~\n', '~')
|
|
19
|
+
lines = normalized_content.split('~')
|
|
20
|
+
|
|
21
|
+
# Reset these values for each new CLP segment
|
|
22
|
+
record = {}
|
|
23
|
+
check_eft, payer_address = None, None
|
|
24
|
+
allowed_amount, write_off, patient_responsibility, adjustment_amount = 0, 0, 0, 0
|
|
25
|
+
is_payer_section = False # Flag to identify payer section for accurate address capture
|
|
26
|
+
|
|
27
|
+
for line in lines:
|
|
28
|
+
segments = line.split('*')
|
|
29
|
+
|
|
30
|
+
if segments[0] == 'TRN' and len(segments) > 2:
|
|
31
|
+
check_eft = segments[2]
|
|
32
|
+
|
|
33
|
+
# Determine the start and end of the payer section to correctly capture the payer's address
|
|
34
|
+
if segments[0] == 'N1':
|
|
35
|
+
if segments[1] == 'PR': # Payer information starts
|
|
36
|
+
is_payer_section = True
|
|
37
|
+
# payer_name = segments[2] # Can capture payer name here if needed
|
|
38
|
+
elif segments[1] == 'PE': # Provider information starts, ending payer section
|
|
39
|
+
is_payer_section = False
|
|
40
|
+
|
|
41
|
+
# Correctly capture payer address only within payer section
|
|
42
|
+
if is_payer_section and segments[0] == 'N3' and len(segments) > 1:
|
|
43
|
+
payer_address = segments[1]
|
|
44
|
+
|
|
45
|
+
if segments[0] == 'CLP' and len(segments) >= 5:
|
|
46
|
+
if record:
|
|
47
|
+
if adjustment_amount == 0 and (write_off > 0 or patient_responsibility > 0):
|
|
48
|
+
adjustment_amount = write_off + patient_responsibility
|
|
49
|
+
|
|
50
|
+
# Finalize and append the current record before starting a new one
|
|
51
|
+
record.update({
|
|
52
|
+
# 'Payer Name': payer_name,
|
|
53
|
+
'Payer Address': payer_address,
|
|
54
|
+
'Allowed Amount': allowed_amount,
|
|
55
|
+
'Write Off': write_off,
|
|
56
|
+
'Patient Responsibility': patient_responsibility,
|
|
57
|
+
'Adjustment Amount': adjustment_amount,
|
|
58
|
+
})
|
|
59
|
+
extracted_data.append(record)
|
|
60
|
+
|
|
61
|
+
# Reset variables for the next record
|
|
62
|
+
allowed_amount, write_off, patient_responsibility, adjustment_amount = 0, 0, 0, 0
|
|
63
|
+
# payer_address = None # Reset address for the next CLP segment if it changes within one ERA file (so no. disable.)
|
|
64
|
+
|
|
65
|
+
# Initialize a new record
|
|
66
|
+
record = {
|
|
67
|
+
'Check EFT': check_eft,
|
|
68
|
+
'Chart Number': segments[1],
|
|
69
|
+
'Payer Address': payer_address,
|
|
70
|
+
'Amount Paid': segments[4],
|
|
71
|
+
'Charge': segments[3], # Total submitted charges for the claim
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
elif segments[0] == 'CAS':
|
|
75
|
+
# Parsing CAS segments for Write Off and Patient Responsibility
|
|
76
|
+
if segments[1] == 'CO': # Write Off
|
|
77
|
+
write_off += float(segments[3])
|
|
78
|
+
elif segments[1] == 'PR': # Patient Responsibility
|
|
79
|
+
patient_responsibility += float(segments[3])
|
|
80
|
+
elif segments[1] == 'OA': # Capture Adjustment Amount from CAS*OA segment
|
|
81
|
+
adjustment_amount += float(segments[3])
|
|
82
|
+
|
|
83
|
+
elif segments[0] == 'AMT' and segments[1] == 'B6':
|
|
84
|
+
# Allowed Amount from AMT segment
|
|
85
|
+
allowed_amount += float(segments[2])
|
|
86
|
+
|
|
87
|
+
elif segments[0] == 'DTM' and (segments[1] == '232' or segments[1] == '472'):
|
|
88
|
+
record['Date of Service'] = segments[2]
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
if record:
|
|
92
|
+
# Final record handling
|
|
93
|
+
if adjustment_amount == 0 and (write_off > 0 or patient_responsibility > 0):
|
|
94
|
+
adjustment_amount = write_off + patient_responsibility
|
|
95
|
+
# Append the last record
|
|
96
|
+
record.update({
|
|
97
|
+
'Allowed Amount': allowed_amount,
|
|
98
|
+
'Write Off': write_off,
|
|
99
|
+
'Patient Responsibility': patient_responsibility,
|
|
100
|
+
'Adjustment Amount': adjustment_amount,
|
|
101
|
+
})
|
|
102
|
+
extracted_data.append(record)
|
|
103
|
+
|
|
104
|
+
return extracted_data
|
|
105
|
+
|
|
106
|
+
def translate_era_to_csv(files, output_directory):
|
|
107
|
+
if not os.path.exists(output_directory):
|
|
108
|
+
os.makedirs(output_directory)
|
|
109
|
+
|
|
110
|
+
for file_path in files:
|
|
111
|
+
# Ensure the file is read correctly
|
|
112
|
+
with open(file_path, 'r') as era_file:
|
|
113
|
+
era_content = era_file.read()
|
|
114
|
+
|
|
115
|
+
data = parse_era_content(era_content)
|
|
116
|
+
# print("Parsed Data: ", data) # DEBUG
|
|
117
|
+
|
|
118
|
+
csv_file_path = os.path.join(output_directory, os.path.basename(file_path) + '.csv')
|
|
119
|
+
|
|
120
|
+
try:
|
|
121
|
+
# Open the CSV file with explicit newline handling
|
|
122
|
+
with open(csv_file_path, 'w', newline='') as csv_file:
|
|
123
|
+
fieldnames = ['Date of Service',
|
|
124
|
+
'Check EFT',
|
|
125
|
+
'Chart Number',
|
|
126
|
+
'Payer Address',
|
|
127
|
+
'Amount Paid',
|
|
128
|
+
'Adjustment Amount',
|
|
129
|
+
'Allowed Amount',
|
|
130
|
+
'Write Off',
|
|
131
|
+
'Patient Responsibility',
|
|
132
|
+
'Charge'
|
|
133
|
+
]
|
|
134
|
+
writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
|
|
135
|
+
|
|
136
|
+
writer.writeheader()
|
|
137
|
+
for record in data:
|
|
138
|
+
|
|
139
|
+
# print("Writing record: ", record)
|
|
140
|
+
|
|
141
|
+
writer.writerow({
|
|
142
|
+
'Date of Service': record.get('Date of Service', ''),
|
|
143
|
+
'Check EFT': record.get('Check EFT', ''),
|
|
144
|
+
'Chart Number': record.get('Chart Number', ''),
|
|
145
|
+
'Payer Address': record.get('Payer Address', ''),
|
|
146
|
+
'Amount Paid': record.get('Amount Paid', ''),
|
|
147
|
+
'Adjustment Amount': record.get('Adjustment Amount', ''),
|
|
148
|
+
'Allowed Amount': record.get('Allowed Amount', ''),
|
|
149
|
+
'Write Off': record.get('Write Off', ''),
|
|
150
|
+
'Patient Responsibility': record.get('Patient Responsibility', ''),
|
|
151
|
+
'Charge': record.get('Charge', ''),
|
|
152
|
+
})
|
|
153
|
+
# Explicitly flush data to ensure it's written
|
|
154
|
+
csv_file.flush()
|
|
155
|
+
except Exception as e:
|
|
156
|
+
print("Error writing CSV: ", e)
|
|
157
|
+
|
|
158
|
+
# User Interface
|
|
159
|
+
def user_confirm_overwrite(chart_numbers):
|
|
160
|
+
"""Asks the user for confirmation to overwrite an existing file, showing Chart Numbers."""
|
|
161
|
+
print("The following Chart Numbers are in the existing file:")
|
|
162
|
+
for number in chart_numbers:
|
|
163
|
+
print(number)
|
|
164
|
+
return input("The file already exists. Do you want to overwrite it? (y/n): ").strip().lower() == 'y'
|
|
165
|
+
|
|
166
|
+
if __name__ == "__main__":
|
|
167
|
+
# Load configuration
|
|
168
|
+
|
|
169
|
+
config, _ = load_configuration()
|
|
170
|
+
|
|
171
|
+
# Setup logger
|
|
172
|
+
local_storage_path = config['MediLink_Config']['local_storage_path']
|
|
173
|
+
setup_logger(local_storage_path)
|
|
174
|
+
|
|
175
|
+
# Define output directory
|
|
176
|
+
output_directory = os.path.join(local_storage_path, "translated_csvs")
|
|
177
|
+
|
|
178
|
+
# Retrieve ERA files from command line arguments
|
|
179
|
+
files = sys.argv[1:] # Exclude the script name
|
|
180
|
+
if not files:
|
|
181
|
+
logging.error("No ERA files provided as arguments.")
|
|
182
|
+
sys.exit(1)
|
|
183
|
+
|
|
184
|
+
# Translate ERA files to CSV format
|
|
185
|
+
translate_era_to_csv(files, output_directory)
|
|
186
|
+
|
|
187
|
+
# Consolidate CSVs
|
|
188
|
+
consolidate_csv_path = consolidate_csvs(output_directory)
|
|
189
|
+
if consolidate_csv_path:
|
|
190
|
+
print("Consolidated CSV File Created: {}".format(consolidate_csv_path))
|
|
191
|
+
else:
|
|
192
|
+
print("No CSV file was created.")
|