medicafe 0.240517.0__py3-none-any.whl → 0.240716.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of medicafe might be problematic. Click here for more details.

Files changed (37) hide show
  1. MediBot/MediBot.bat +46 -6
  2. MediBot/MediBot.py +9 -36
  3. MediBot/MediBot_Charges.py +0 -28
  4. MediBot/MediBot_Crosswalk_Library.py +16 -8
  5. MediBot/MediBot_Post.py +0 -0
  6. MediBot/MediBot_Preprocessor.py +26 -63
  7. MediBot/MediBot_Preprocessor_lib.py +182 -43
  8. MediBot/MediBot_UI.py +2 -7
  9. MediBot/MediBot_dataformat_library.py +0 -9
  10. MediBot/MediBot_docx_decoder.py +283 -60
  11. MediLink/MediLink.py +80 -120
  12. MediLink/MediLink_837p_encoder.py +3 -28
  13. MediLink/MediLink_837p_encoder_library.py +19 -53
  14. MediLink/MediLink_API_Generator.py +246 -0
  15. MediLink/MediLink_API_v2.py +2 -0
  16. MediLink/MediLink_API_v3.py +325 -0
  17. MediLink/MediLink_APIs.py +2 -0
  18. MediLink/MediLink_ClaimStatus.py +144 -0
  19. MediLink/MediLink_ConfigLoader.py +13 -7
  20. MediLink/MediLink_DataMgmt.py +224 -68
  21. MediLink/MediLink_Decoder.py +165 -0
  22. MediLink/MediLink_Deductible.py +203 -0
  23. MediLink/MediLink_Down.py +122 -96
  24. MediLink/MediLink_Gmail.py +453 -74
  25. MediLink/MediLink_Mailer.py +0 -7
  26. MediLink/MediLink_Parser.py +193 -0
  27. MediLink/MediLink_Scan.py +0 -0
  28. MediLink/MediLink_Scheduler.py +2 -172
  29. MediLink/MediLink_StatusCheck.py +0 -4
  30. MediLink/MediLink_UI.py +54 -18
  31. MediLink/MediLink_Up.py +6 -15
  32. {medicafe-0.240517.0.dist-info → medicafe-0.240716.2.dist-info}/METADATA +4 -1
  33. medicafe-0.240716.2.dist-info/RECORD +47 -0
  34. {medicafe-0.240517.0.dist-info → medicafe-0.240716.2.dist-info}/WHEEL +1 -1
  35. medicafe-0.240517.0.dist-info/RECORD +0 -39
  36. {medicafe-0.240517.0.dist-info → medicafe-0.240716.2.dist-info}/LICENSE +0 -0
  37. {medicafe-0.240517.0.dist-info → medicafe-0.240716.2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,203 @@
1
+ """
2
+ # Create a summary JSON
3
+ summary = {
4
+ "Payer ID": ins_payerID,
5
+ "Provider": provider_last_name,
6
+ "Member ID": ins_memberID,
7
+ "Date of Birth": dob,
8
+ "Patient Name": patient_name,
9
+ "Patient Info": {
10
+ "DOB": dob,
11
+ "Address": "{} {}".format(patient_info.get("addressLine1", ""), patient_info.get("addressLine2", "")).strip(),
12
+ "City": patient_info.get("city", ""),
13
+ "State": patient_info.get("state", ""),
14
+ "ZIP": patient_info.get("zip", ""),
15
+ "Relationship": patient_info.get("relationship", "")
16
+ },
17
+ "Insurance Info": {
18
+ "Payer Name": insurance_info.get("payerName", ""),
19
+ "Payer ID": ins_payerID,
20
+ "Member ID": ins_memberID,
21
+ "Group Number": insurance_info.get("groupNumber", ""),
22
+ "Insurance Type": ins_insuranceType,
23
+ "Type Code": ins_insuranceTypeCode,
24
+ "Address": "{} {}".format(insurance_info.get("addressLine1", ""), insurance_info.get("addressLine2", "")).strip(),
25
+ "City": insurance_info.get("city", ""),
26
+ "State": insurance_info.get("state", ""),
27
+ "ZIP": insurance_info.get("zip", "")
28
+ },
29
+ "Policy Info": {
30
+ "Eligibility Dates": eligibilityDates,
31
+ "Policy Member ID": policy_info.get("memberId", ""),
32
+ "Policy Status": policy_status
33
+ },
34
+ "Deductible Info": {
35
+ "Remaining Amount": remaining_amount
36
+ }
37
+ }
38
+
39
+ # Print debug JSON
40
+ # Uncomment below if you need to debug later
41
+ # print("\nDebug JSON Summary:")
42
+ # print(json.dumps(summary, indent=2))
43
+ """
44
+ import MediLink_API_v3
45
+ import os
46
+ import sys
47
+ from datetime import datetime
48
+ import requests
49
+ import json
50
+
51
+ try:
52
+ from MediLink import MediLink_ConfigLoader
53
+ except ImportError:
54
+ import MediLink_ConfigLoader
55
+
56
+ project_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
57
+ sys.path.append(project_dir)
58
+
59
+ try:
60
+ from MediBot import MediBot_Preprocessor_lib
61
+ except ImportError:
62
+ import MediBot_Preprocessor_lib
63
+
64
+ # Load configuration
65
+ config, _ = MediLink_ConfigLoader.load_configuration()
66
+
67
+ # Initialize the API client
68
+ client = MediLink_API_v3.APIClient()
69
+
70
+ # Get provider_last_name and npi from configuration
71
+ provider_last_name = config['MediLink_Config'].get('default_billing_provider_last_name')
72
+ npi = config['MediLink_Config'].get('default_billing_provider_npi')
73
+
74
+ # Define the list of payer_id's to iterate over
75
+ payer_ids = ['87726', '03432', '96385', '95467', '86050', '86047', '95378', '06111', '37602']
76
+
77
+ # Get the latest CSV
78
+ CSV_FILE_PATH = config.get('CSV_FILE_PATH', "")
79
+ csv_data = MediBot_Preprocessor_lib.load_csv_data(CSV_FILE_PATH)
80
+
81
+ # Only keep rows that contain a valid number from the payer_ids list
82
+ valid_rows = [row for row in csv_data if str(row['Ins1 Payer ID']) in payer_ids]
83
+
84
+ # Function to check if the date format is correct
85
+ def validate_and_format_date(date_str):
86
+ for fmt in ('%Y-%m-%d', '%m/%d/%Y', '%d-%b-%Y', '%d-%m-%Y'):
87
+ try:
88
+ formatted_date = datetime.strptime(date_str, fmt).strftime('%Y-%m-%d')
89
+ return formatted_date
90
+ except ValueError:
91
+ continue
92
+ return None
93
+
94
+ # List of patients with DOB and MemberID from CSV data
95
+ patients = [
96
+ (validate_and_format_date(row['Patient DOB']), row['Primary Policy Number'])
97
+ for row in valid_rows if validate_and_format_date(row['Patient DOB']) is not None
98
+ ]
99
+
100
+ # Function to get eligibility information
101
+ def get_eligibility_info(client, payer_id, provider_last_name, date_of_birth, member_id, npi):
102
+ try:
103
+ # Log the parameters being sent to the function
104
+ MediLink_ConfigLoader.log("Calling get_eligibility_v3 with parameters:", level="INFO")
105
+ MediLink_ConfigLoader.log("payer_id: {}".format(payer_id), level="INFO")
106
+ MediLink_ConfigLoader.log("provider_last_name: {}".format(provider_last_name), level="INFO")
107
+ MediLink_ConfigLoader.log("date_of_birth: {}".format(date_of_birth), level="INFO")
108
+ MediLink_ConfigLoader.log("member_id: {}".format(member_id), level="INFO")
109
+ MediLink_ConfigLoader.log("npi: {}".format(npi), level="INFO")
110
+
111
+ # Call the get_eligibility_v3 function
112
+ eligibility = MediLink_API_v3.get_eligibility_v3(
113
+ client, payer_id, provider_last_name, 'MemberIDDateOfBirth', date_of_birth, member_id, npi
114
+ )
115
+
116
+ # Log the response
117
+ MediLink_ConfigLoader.log("Eligibility response: {}".format(json.dumps(eligibility, indent=4)), level="INFO")
118
+
119
+ return eligibility
120
+ except requests.exceptions.HTTPError as e:
121
+ # Log the HTTP error response
122
+ MediLink_ConfigLoader.log("HTTPError: {}".format(e), level="ERROR")
123
+ MediLink_ConfigLoader.log("Response content: {}".format(e.response.content), level="ERROR")
124
+ except Exception as e:
125
+ # Log any other exceptions
126
+ MediLink_ConfigLoader.log("Error: {}".format(e), level="ERROR")
127
+ return None
128
+
129
+ # Function to extract required fields and display in a tabular format
130
+ def display_eligibility_info(data, dob, member_id):
131
+ if data is None:
132
+ return
133
+
134
+ for policy in data["memberPolicies"]:
135
+ # Skip non-medical policies
136
+ if policy["policyInfo"]["coverageType"] != "Medical":
137
+ continue
138
+
139
+ patient_info = policy["patientInfo"][0]
140
+ lastName = patient_info.get("lastName", "")
141
+ firstName = patient_info.get("firstName", "")
142
+ middleName = patient_info.get("middleName", "")
143
+
144
+ # Check if the remaining amount is per individual first, then fallback to family
145
+ if 'individual' in policy["deductibleInfo"]:
146
+ remaining_amount = policy["deductibleInfo"]["individual"]["inNetwork"].get("remainingAmount", "")
147
+ else:
148
+ remaining_amount = policy["deductibleInfo"]["family"]["inNetwork"].get("remainingAmount", "")
149
+
150
+ insurance_info = policy["insuranceInfo"]
151
+ ins_insuranceType = insurance_info.get("insuranceType", "")
152
+ ins_insuranceTypeCode = insurance_info.get("insuranceTypeCode", "")
153
+ ins_memberID = insurance_info.get("memberId", "")
154
+ ins_payerID = insurance_info.get("payerId", "")
155
+
156
+ policy_info = policy["policyInfo"]
157
+ eligibilityDates = policy_info.get("eligibilityDates", "")
158
+ policy_status = policy_info.get("policyStatus", "")
159
+
160
+ patient_name = "{} {} {}".format(firstName, middleName, lastName).strip()[:20]
161
+
162
+ # Display patient information in a table row format
163
+ eligibility_end_date = eligibilityDates.get("endDate", "")
164
+ table_row = "{:<20} | {:<10} | {:<5} | {:<30}".format(
165
+ patient_name, dob, ins_payerID, ins_insuranceType)
166
+ print(table_row)
167
+ table_row_details = "{:<20} | {:<10} | {:<5} | {:<15} | {:<8} | {:<15} | {:<20}".format(
168
+ "", "", "", ins_insuranceTypeCode, eligibility_end_date[-10:], policy_status, remaining_amount)
169
+ print(table_row_details)
170
+
171
+ # Print the table header once before entering the loop
172
+ table_header = "{:<20} | {:<10} | {:<5} | {:<30}".format(
173
+ "Patient Name", "DOB", "Payer ID", "Insurance Type")
174
+ print(table_header)
175
+ print("-" * len(table_header))
176
+ sub_header = "{:<20} | {:<10} | {:<5} | {:<15} | {:<8} | {:<15} | {:<20}".format(
177
+ "", "", "", "Type Code", "End Date", "Policy Status", "Remaining Amount")
178
+ print(sub_header)
179
+ print("-" * len(sub_header))
180
+
181
+ # Set to keep track of processed patients
182
+ processed_patients = set()
183
+
184
+ # Loop through each payer_id and patient to call the API, then display the eligibility information
185
+ errors = []
186
+ for payer_id in payer_ids:
187
+ for dob, member_id in patients:
188
+ # Skip if this patient has already been processed
189
+ if (dob, member_id) in processed_patients:
190
+ continue
191
+ try:
192
+ eligibility_data = get_eligibility_info(client, payer_id, provider_last_name, dob, member_id, npi)
193
+ if eligibility_data is not None:
194
+ display_eligibility_info(eligibility_data, dob, member_id) # Display as we get the result
195
+ processed_patients.add((dob, member_id)) # Mark this patient as processed
196
+ except Exception as e:
197
+ errors.append((dob, member_id, str(e)))
198
+
199
+ # Display errors if any
200
+ if errors:
201
+ print("\nErrors encountered during API calls:")
202
+ for error in errors:
203
+ print("DOB: {}, Member ID: {}, Error: {}".format(error[0], error[1], error[2]))
MediLink/MediLink_Down.py CHANGED
@@ -1,128 +1,154 @@
1
+ # MediLink_Down.py
1
2
  import os
2
3
  import argparse
3
4
  import shutil
4
- from datetime import datetime
5
5
  import glob
6
- import MediLink_ERA_decoder
6
+ import csv
7
+ from MediLink_Decoder import process_file
8
+ from MediLink_ConfigLoader import load_configuration, log
7
9
  from MediLink_DataMgmt import operate_winscp
8
- import MediLink_ConfigLoader
9
10
 
10
- """
11
- We need to make another function that figures out claim rejections and tries to solve them.
12
-
13
- 1. Config File Path Adjustment: Ensure the configuration file's path is adaptable for various environments, or clearly document the process for setting this path.
14
- 2. Logging Enhancements: Improve the logging mechanism to offer comprehensive insights through both file and console outputs, aiding in troubleshooting and operational monitoring.
15
- 3. CSV Output Refinement: Update the CSV output structure to include essential ERA data such as Payer Address, ensuring completeness and accuracy of information.
16
- 4. CSV Consolidation Logic: Develop logic for intelligently consolidating CSV outputs from batch-processed ERA files, ensuring coherent and comprehensive data aggregation.
17
- 5. Secure Endpoint Authentication: Establish a secure method for inputting and storing endpoint authentication details, enhancing script security.
18
- 6. Automated Endpoint Processing: Integrate automated looping through configured endpoints for ERA file retrieval, maximizing efficiency and reducing manual oversight.
19
- 7. Configuration Key Accuracy: Audit the script to correct any inaccuracies in configuration key references, ensuring seamless configuration data retrieval.
20
- """
21
-
22
- # Because I can't figure out how to get it to work directly in the WinSCP command.
23
- # And on the Windows XP machine apparently the default path is C:\\ ...
24
- # This needs to get fixed. Ugh.
25
- def move_downloaded_files(local_storage_path):
26
- # Define the target directory for storing downloaded files
11
+ def move_downloaded_files(local_storage_path, config):
27
12
  local_response_directory = os.path.join(local_storage_path, "responses")
28
13
 
29
14
  if not os.path.exists(local_response_directory):
30
15
  os.makedirs(local_response_directory)
31
16
 
32
- # Identify all downloaded .era files in the current directory
33
- # downloaded_files = [f for f in os.listdir('.') if f.endswith('.era')]
34
- downloaded_files = [f for f in os.listdir('C:\\Users\\danie\\OneDrive\\Documents') if f.endswith('.era')]
17
+ download_dir = config['MediLink_Config']['local_storage_path']
18
+ file_extensions = ['.era', '.277', '.277ibr', '.277ebr', '.dpt', '.ebt', '.ibt'] # Extendable list of file extensions
35
19
 
36
- # Move each file to the local_response_directory
37
- for file in downloaded_files:
38
- source_path = os.path.join('C:\\Users\\danie\\OneDrive\\Documents', file)
39
- # source_path = os.path.join('.', file) for the XP machine? -- This whole thing needs repaired.
40
- destination_path = os.path.join(local_response_directory, file)
41
- shutil.move(source_path, destination_path)
42
- MediLink_ConfigLoader.log("Moved '{}' to '{}'".format(file, local_response_directory))
43
-
44
- def find_era_files(era_file_path):
45
- """
46
- Find all files matching the era_file_path pattern.
47
- This function normalizes the path and supports wildcard patterns.
48
- """
49
- # Normalize the path to handle slashes correctly
50
- normalized_path = os.path.normpath(era_file_path)
20
+ for ext in file_extensions:
21
+ downloaded_files = [f for f in os.listdir(download_dir) if f.endswith(ext)]
22
+ for file in downloaded_files:
23
+ source_path = os.path.join(download_dir, file)
24
+ destination_path = os.path.join(local_response_directory, file)
25
+ shutil.move(source_path, destination_path)
26
+ log("Moved '{}' to '{}'".format(file, local_response_directory))
51
27
 
52
- # Handling different wildcard scenarios
53
- if "*" in normalized_path:
54
- # Use glob to find all files matching the pattern
28
+ def find_files(file_path_pattern):
29
+ normalized_path = os.path.normpath(file_path_pattern)
30
+ if os.path.isdir(normalized_path):
31
+ return [os.path.join(normalized_path, f) for f in os.listdir(normalized_path) if os.path.isfile(os.path.join(normalized_path, f))]
32
+ elif "*" in normalized_path:
55
33
  matching_files = glob.glob(normalized_path)
56
- # Normalize paths in the resulting list
57
34
  return [os.path.normpath(file) for file in matching_files]
58
35
  else:
59
- # Single file specified, return it in a list if it exists
60
36
  return [normalized_path] if os.path.exists(normalized_path) else []
61
37
 
62
- def main(desired_endpoint='AVAILITY'):
63
- parser = argparse.ArgumentParser(description="Process ERA files and convert them to CSV format.")
64
- parser.add_argument('--config_path', type=str, help='Path to the configuration JSON file', default="json\\config.json") # Default handling of json path
65
- parser.add_argument('--desired_endpoint', type=str, help='The desired endpoint key from the configuration.', default=desired_endpoint)
66
- parser.add_argument('--era_file_path', type=str, help='Optional: Specify a path to an ERA file for direct translation.', default=None)
38
+ def translate_files(files, output_directory):
39
+ translated_files = []
40
+ consolidated_records = []
41
+ file_counts = {'.era': 0, '.277': 0, '.277ibr': 0, '.277ebr': 0, '.dpt': 0, '.ebt': 0, '.ibt': 0}
42
+
43
+ for file in files:
44
+ ext = os.path.splitext(file)[1]
45
+ if ext in file_counts:
46
+ file_counts[ext] += 1
47
+
48
+ try:
49
+ records = process_file(file, output_directory, return_records=True)
50
+ consolidated_records.extend(records)
51
+ csv_file_path = os.path.join(output_directory, os.path.basename(file) + '_decoded.csv')
52
+ log("Translated file to CSV: {}".format(csv_file_path), level="INFO")
53
+ translated_files.append(csv_file_path)
54
+ except ValueError as ve:
55
+ log("Unsupported file type: {}".format(file), level="WARNING")
56
+ except Exception as e:
57
+ log("Error processing file {}: {}".format(file, e), level="ERROR")
58
+
59
+ print("Detected and processed file counts by type:")
60
+ for ext, count in file_counts.items():
61
+ print("{}: {} files detected".format(ext, count))
62
+
63
+ return consolidated_records, translated_files
64
+
65
+ def display_translated_files(translated_files):
66
+ print("\nTranslated Files Summary:")
67
+ for file in translated_files:
68
+ print(" - {}".format(file))
69
+
70
+ def main():
71
+ parser = argparse.ArgumentParser(description="Process files and convert them to CSV format.")
72
+ parser.add_argument('--config_path', type=str, help='Path to the configuration JSON file', default="json/config.json")
73
+ parser.add_argument('--file_path_pattern', type=str, help='Path pattern or directory for files to process.', default=None)
67
74
  args = parser.parse_args()
68
75
 
69
- # Setup Logger, Load configuration and output directory
70
- config, _ = MediLink_ConfigLoader.load_configuration(args.config_path)
76
+ config, _ = load_configuration(args.config_path)
71
77
  local_storage_path = config['MediLink_Config']['local_storage_path']
72
78
  output_directory = os.path.join(local_storage_path, "translated_csvs")
73
-
74
- # Direct ERA file translation if a file path is provided
75
- if args.era_file_path:
76
- era_files = find_era_files(args.era_file_path)
77
- if era_files:
78
- era_files_str = ', '.join(era_files)
79
- MediLink_ConfigLoader.log("Translating ERA files: {}".format(era_files_str))
80
- MediLink_ERA_decoder.translate_era_to_csv(era_files, output_directory)
81
- # Instead of returning a single CSV file path, consolidate here
82
- consolidate_csv_path = MediLink_ERA_decoder.consolidate_csvs(output_directory)
83
- MediLink_ConfigLoader.log("Translation and consolidation completed.")
84
- return consolidate_csv_path
79
+
80
+ # If a file path pattern is provided, process those files directly
81
+ if args.file_path_pattern:
82
+ files = find_files(args.file_path_pattern)
83
+ if files:
84
+ files_str = ', '.join(files)
85
+ log("Translating files: {}".format(files_str), level="INFO")
86
+ consolidated_records, translated_files = translate_files(files, output_directory)
87
+ log("Translation completed.", level="INFO")
88
+ if consolidated_records:
89
+ display_consolidated_records(consolidated_records)
90
+ prompt_csv_export(consolidated_records, output_directory)
91
+ return
85
92
  else:
86
- MediLink_ConfigLoader.log("No ERA files found matching: {}".format(args.era_file_path))
93
+ log("No files found matching: {}".format(args.file_path_pattern), level="WARNING")
87
94
  return
88
-
89
- # TODO (Low Remit) This probably needs to be built into a loop that cycles through all 3 endpoints.
90
- # I think the uploader has something like this implemented already since it sends to all the endpoints.
91
- # The loop should use the tdqa or whatever the progress bar is called.
92
- # print("Please wait...\n")
93
-
94
- # Validate endpoint key
95
- endpoint_key = args.desired_endpoint
95
+
96
+ # Handle downloading files via WinSCP
97
+ endpoint_key = 'AVAILITY'
96
98
  if endpoint_key not in config['MediLink_Config']['endpoints']:
97
- MediLink_ConfigLoader.log("Endpoint '{}' not found in configuration. Using default 'AVAILITY'.".format(endpoint_key))
99
+ log("Endpoint '{}' not found in configuration. Using default 'AVAILITY'.".format(endpoint_key), level="WARNING")
98
100
  endpoint_key = 'AVAILITY'
99
101
 
100
- # Retrieve endpoint configuration and local storage path
101
- endpoint_config = config['MediLink_Config']['endpoints'][endpoint_key]
102
- local_storage_path = config['MediLink_Config']['local_storage_path']
103
-
104
- # Download ERA files from the configured endpoint
105
- downloaded_files = operate_winscp("download", None, endpoint_config, local_storage_path, config)
102
+ endpoint_configs = [config['MediLink_Config']['endpoints'][key] for key in config['MediLink_Config']['endpoints']]
103
+ downloaded_files = []
106
104
 
107
- # Translate downloaded ERA files to CSV format
108
- translated_csv_paths = []
109
- for file in downloaded_files:
110
- # TODO (Low Remit) This needs to add functionality for differentiating between ERA, 277, IBT or
111
- # whatever else might be included in the download folders.
112
- MediLink_ERA_decoder.translate_era_to_csv([file], output_directory)
113
- csv_file_path = os.path.join(output_directory, os.path.basename(file) + '.csv')
114
- translated_csv_paths.append(csv_file_path)
115
- MediLink_ConfigLoader.log("Translated ERA to CSV: {}".format(csv_file_path))
105
+ for endpoint_config in endpoint_configs:
106
+ downloaded_files += operate_winscp("download", None, endpoint_config, local_storage_path, config)
116
107
 
117
- # Consolidate new CSVs
118
- consolidate_csv_path = MediLink_ERA_decoder.consolidate_csvs(output_directory)
108
+ move_downloaded_files(local_storage_path, config)
119
109
 
120
- # Return the list of translated CSV file paths
121
- return consolidate_csv_path
110
+ consolidated_records, translated_files = translate_files(downloaded_files, output_directory)
111
+ if consolidated_records:
112
+ display_consolidated_records(consolidated_records)
113
+ prompt_csv_export(consolidated_records, output_directory)
114
+
115
+ def display_consolidated_records(records):
116
+ # Define the new fieldnames and their respective widths
117
+ new_fieldnames = ['Claim #', 'Status', 'Patient', 'Proc.', 'Serv.', 'Allowed', 'Paid', 'Pt Resp', 'Charged']
118
+ col_widths = {field: len(field) for field in new_fieldnames}
119
+
120
+ # Update column widths based on records
121
+ for record in records:
122
+ for field in new_fieldnames:
123
+ col_widths[field] = max(col_widths[field], len(str(record.get(field, ''))))
124
+
125
+ # Create table header
126
+ header = " | ".join("{:<{}}".format(field, col_widths[field]) for field in new_fieldnames)
127
+ print(header)
128
+ print("-" * len(header))
129
+
130
+ # Create table rows
131
+ for record in records:
132
+ row = " | ".join("{:<{}}".format(str(record.get(field, '')), col_widths[field]) for field in new_fieldnames)
133
+ print(row)
134
+
135
+ def prompt_csv_export(records, output_directory):
136
+ if records:
137
+ user_input = input("Do you want to export the consolidated records to a CSV file? (y/n): ")
138
+ if user_input.lower() == 'y':
139
+ output_file_path = os.path.join(output_directory, "Consolidated_Records.csv")
140
+ write_records_to_csv(records, output_file_path)
141
+ log("Consolidated CSV file created at: {}".format(output_file_path), level="INFO")
142
+ else:
143
+ log("CSV export skipped by user.", level="INFO")
144
+
145
+ def write_records_to_csv(records, output_file_path):
146
+ fieldnames = ['Claim #', 'Status', 'Patient', 'Proc.', 'Serv.', 'Allowed', 'Paid', 'Pt Resp', 'Charged']
147
+ with open(output_file_path, 'w', newline='') as csvfile:
148
+ writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
149
+ writer.writeheader()
150
+ for record in records:
151
+ writer.writerow(record)
122
152
 
123
153
  if __name__ == "__main__":
124
- consolidate_csv_path = main()
125
- if consolidate_csv_path:
126
- print("CSV File Created: {}".format(consolidate_csv_path))
127
- else:
128
- print("No CSV file was created.")
154
+ main()