medicafe 0.240809.0__py3-none-any.whl → 0.240925.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of medicafe might be problematic. Click here for more details.

@@ -1,57 +1,93 @@
1
1
  # MediLink_Decoder.py
2
- import os
3
- import sys
4
- import csv
2
+ import os, sys, csv
5
3
  from MediLink_ConfigLoader import load_configuration, log
6
4
  from MediLink_Parser import parse_era_content, parse_277_content, parse_277IBR_content, parse_277EBR_content, parse_dpt_content, parse_ebt_content, parse_ibt_content
7
5
 
8
- def process_file(file_path, output_directory, return_records=False):
9
- if not os.path.exists(output_directory):
10
- os.makedirs(output_directory)
6
+ # Define new_fieldnames globally
7
+ new_fieldnames = ['Claim #', 'Payer', 'Status', 'Patient', 'Proc.', 'Serv.', 'Allowed', 'Paid', 'Pt Resp', 'Charged']
8
+
9
+ class UnifiedRecord:
10
+ def __init__(self, claim_number='', status='', patient='', payer='', proc_date='', serv_date='', allowed='', paid='', pt_resp='', charged=''):
11
+ self.claim_number = claim_number
12
+ self.payer = payer # Added payer to the constructor
13
+ self.status = status
14
+ self.patient = patient
15
+ self.proc_date = proc_date
16
+ self.serv_date = serv_date
17
+ self.allowed = allowed
18
+ self.paid = paid
19
+ self.pt_resp = pt_resp
20
+ self.charged = charged
21
+
22
+ def to_dict(self):
23
+ return {
24
+ 'Claim #': self.claim_number,
25
+ 'Payer': self.payer, # Added payer to the dictionary representation
26
+ 'Status': self.status,
27
+ 'Patient': self.patient,
28
+ 'Proc.': self.proc_date,
29
+ 'Serv.': self.serv_date,
30
+ 'Allowed': self.allowed,
31
+ 'Paid': self.paid,
32
+ 'Pt Resp': self.pt_resp,
33
+ 'Charged': self.charged
34
+ }
35
+
36
+ def __repr__(self):
37
+ return ("UnifiedRecord(claim_number='{0}', status='{1}', patient='{2}', payer='{3}', proc_date='{4}', serv_date='{5}', "
38
+ "allowed='{6}', paid='{7}', pt_resp='{8}', charged='{9}')").format(
39
+ self.claim_number, self.status, self.patient, self.payer, self.proc_date,
40
+ self.serv_date, self.allowed, self.paid, self.pt_resp, self.charged) # Added payer to the repr
41
+
42
+ def process_decoded_file(file_path, output_directory, return_records=False, debug=False): # Renamed from process_file
43
+ os.makedirs(output_directory, exist_ok=True)
11
44
 
12
45
  file_type = determine_file_type(file_path)
13
46
  content = read_file(file_path)
14
-
15
- if file_type == 'ERA':
16
- records = parse_era_content(content)
17
- elif file_type in ['277', '277IBR', '277EBR']:
18
- records = parse_277_content(content) if file_type == '277' else parse_277IBR_content(content) if file_type == '277IBR' else parse_277EBR_content(content)
19
- elif file_type == 'DPT':
20
- records = parse_dpt_content(content)
21
- elif file_type == 'EBT':
22
- records = parse_ebt_content(content)
23
- elif file_type == 'IBT':
24
- records = parse_ibt_content(content)
25
- else:
47
+
48
+ parse_functions = {
49
+ 'ERA': parse_era_content,
50
+ '277': parse_277_content,
51
+ '277IBR': parse_277IBR_content,
52
+ '277EBR': parse_277EBR_content,
53
+ 'DPT': parse_dpt_content,
54
+ 'EBT': parse_ebt_content,
55
+ 'IBT': parse_ibt_content
56
+ }
57
+
58
+ parse_function = parse_functions.get(file_type)
59
+ if parse_function is None:
26
60
  log("Unsupported file type: {}".format(file_type))
27
61
  return []
28
62
 
63
+ records = parse_function(content, debug=debug)
29
64
  formatted_records = format_records(records, file_type)
65
+
30
66
  if not return_records:
31
- display_table(formatted_records)
32
- output_file_path = os.path.join(output_directory, os.path.basename(file_path) + '_decoded.csv')
67
+ display_table([record.to_dict() for record in formatted_records])
68
+ output_file_path = os.path.join(output_directory, "{}_decoded.csv".format(os.path.basename(file_path)))
33
69
  write_records_to_csv(formatted_records, output_file_path)
34
70
  log("Decoded data written to {}".format(output_file_path))
35
- return formatted_records
71
+
72
+ return formatted_records # Returns list of UnifiedRecord instances
36
73
 
37
74
  def determine_file_type(file_path):
38
- if file_path.endswith('.era'):
39
- return 'ERA'
40
- elif file_path.endswith('.277'):
41
- return '277'
42
- elif file_path.endswith('.277ibr'):
43
- return '277IBR'
44
- elif file_path.endswith('.277ebr'):
45
- return '277EBR'
46
- elif file_path.endswith('.dpt'):
47
- return 'DPT'
48
- elif file_path.endswith('.ebt'):
49
- return 'EBT'
50
- elif file_path.endswith('.ibt'):
51
- return 'IBT'
52
- else:
53
- log("Unsupported file type for file: {}".format(file_path))
54
- return None
75
+ file_extensions = {
76
+ '.era': 'ERA',
77
+ '.277': '277',
78
+ '.277ibr': '277IBR',
79
+ '.277ebr': '277EBR',
80
+ '.dpt': 'DPT',
81
+ '.ebt': 'EBT',
82
+ '.ibt': 'IBT'
83
+ }
84
+
85
+ for ext, file_type in file_extensions.items():
86
+ if file_path.endswith(ext):
87
+ return file_type
88
+
89
+ log("Unsupported file type for file: {}".format(file_path))
90
+ return None
55
91
 
56
92
  def read_file(file_path):
57
93
  with open(file_path, 'r') as file:
@@ -59,86 +95,209 @@ def read_file(file_path):
59
95
  return content
60
96
 
61
97
  def write_records_to_csv(records, output_file_path):
62
- fieldnames = ['Claim #', 'Status', 'Patient', 'Proc.', 'Serv.', 'Allowed', 'Paid', 'Pt Resp', 'Charged']
98
+ if not records:
99
+ log("No records to write.", 'error')
100
+ return
101
+
102
+ # Use the global variable for fieldnames
103
+ fieldnames = new_fieldnames
104
+
63
105
  with open(output_file_path, 'w', newline='') as csvfile:
64
106
  writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
65
107
  writer.writeheader()
66
108
  for record in records:
67
- writer.writerow(record)
109
+ writer.writerow(record.to_dict())
68
110
 
69
111
  def format_records(records, file_type):
70
112
  formatted_records = []
71
- for record in records:
113
+ seen_claim_numbers = set() # Set to track unique claim numbers
114
+
115
+ for i, record in enumerate(records):
116
+ # Determine the claim number based on the file type
72
117
  if file_type == 'IBT':
73
- formatted_record = {
74
- 'Claim #': record.get('Patient Control Number', ''),
75
- 'Status': record.get('Status', ''),
76
- 'Patient': record.get('Patient Name', ''),
77
- 'Proc.': format_date(record.get('To Date', '')),
78
- 'Serv.': format_date(record.get('From Date', '')),
79
- 'Allowed': '',
80
- 'Paid': '',
81
- 'Pt Resp': '',
82
- 'Charged': record.get('Charge', '')
83
- }
118
+ claim_number = record.get('Patient Control Number', '')
119
+ elif file_type == 'ERA':
120
+ claim_number = record.get('Chart Number', '')
121
+ elif file_type == 'EBT':
122
+ claim_number = record.get('Patient Control Number', '')
84
123
  else:
85
- formatted_record = {
86
- 'Claim #': record.get('Chart Number', record.get('Claim Status Tracking #', record.get('Claim #', ''))),
87
- 'Status': record.get('claimStatus', record.get('Status', '')),
88
- 'Patient': record.get('memberInfo', {}).get('ptntFn', '') + ' ' + record.get('memberInfo', {}).get('ptntLn', '') if 'memberInfo' in record else record.get('Patient', ''),
89
- 'Proc.': format_date(record.get('processed_date', record.get('Received Date', ''))),
90
- 'Serv.': format_date(record.get('firstSrvcDt', record.get('Date of Service', ''))),
91
- 'Allowed': record.get('totalAllowdAmt', record.get('Allowed Amount', '')),
92
- 'Paid': record.get('totalPaidAmt', record.get('Amount Paid', '')),
93
- 'Pt Resp': record.get('totalPtntRespAmt', record.get('Patient Responsibility', '')),
94
- 'Charged': record.get('totalChargedAmt', record.get('Charge', ''))
95
- }
96
- formatted_records.append(formatted_record)
124
+ claim_number = '' # Default to empty if file type is not recognized
125
+
126
+ # Skip records without a claim number
127
+ if not claim_number:
128
+ log("Record {} missing claim_number. Skipping.".format(i + 1), level="WARNING")
129
+ continue
130
+
131
+ # Check for duplicates
132
+ if claim_number in seen_claim_numbers:
133
+ log("Duplicate claim_number {} found at record {}. Skipping.".format(claim_number, i + 1), level="DEBUG")
134
+ continue
135
+
136
+ seen_claim_numbers.add(claim_number) # Add claim number to the set. BUG This isn't successfully removing duplicate records. -- it might be across files and not within them.
137
+
138
+ unified_record = UnifiedRecord()
139
+
140
+ # Populate the unified_record based on the file type
141
+ if file_type == 'IBT':
142
+ unified_record.claim_number = claim_number
143
+ unified_record.status = record.get('Status', '')
144
+ unified_record.patient = record.get('Patient Name', '')
145
+ unified_record.proc_date = format_date(record.get('To Date', ''))
146
+ unified_record.serv_date = format_date(record.get('From Date', ''))
147
+ unified_record.charged = record.get('Charge', '')
148
+
149
+ elif file_type == 'ERA':
150
+ unified_record.claim_number = claim_number
151
+ unified_record.status = record.get('claimStatus', '')
152
+ unified_record.patient = record.get('Patient', '')
153
+ unified_record.proc_date = format_date(record.get('processed_date', ''))
154
+ unified_record.serv_date = format_date(record.get('Date of Service', ''))
155
+ unified_record.allowed = record.get('Allowed Amount', '')
156
+ unified_record.paid = record.get('Amount Paid', '')
157
+ unified_record.pt_resp = record.get('Patient Responsibility', '')
158
+ unified_record.charged = record.get('Charge', '')
159
+
160
+ elif file_type == 'EBT':
161
+ if 'Patient Control Number' in record:
162
+ unified_record.claim_number = claim_number
163
+ message_type = record.get('Message Type', '').upper()
164
+ status_mapping = {
165
+ 'A': 'Accepted',
166
+ 'R': 'Rejected',
167
+ }
168
+ # unified_record.status = status_mapping.get(message_type, message_type)
169
+ unified_record.status = record.get('Message', '')
170
+ unified_record.payer = record.get('Message Initiator', '')
171
+ unified_record.patient = record.get('Patient Name', '')
172
+ unified_record.proc_date = format_date(record.get('To Date', ''))
173
+ unified_record.serv_date = format_date(record.get('From Date', ''))
174
+ unified_record.allowed = ''
175
+ unified_record.paid = ''
176
+ unified_record.pt_resp = ''
177
+ unified_record.charged = record.get('Charge', '')
178
+ log("Formatted EBT Record {}: {}".format(i + 1, unified_record), level="DEBUG")
179
+ else:
180
+ log("Skipped non-claim EBT Record {}: {}".format(i + 1, record), level="DEBUG")
181
+ continue
182
+
183
+ # Append the unified record to the list
184
+ formatted_records.append(unified_record)
185
+
97
186
  return formatted_records
98
187
 
99
188
  def format_date(date_str):
100
- if date_str and len(date_str) >= 10:
101
- return date_str[5:7] + '-' + date_str[8:10] # Assuming date format is YYYY-MM-DD, this returns MM-DD
189
+ if date_str and len(date_str) >= 8:
190
+ return date_str[4:6] + '-' + date_str[6:8] # Adjusted to match sample date format 'YYYYMMDD'
102
191
  return ''
103
192
 
104
193
  def display_table(records):
105
- # Define the new fieldnames and their respective widths
106
- new_fieldnames = ['Claim #', 'Status', 'Patient', 'Proc.', 'Serv.', 'Allowed', 'Paid', 'Pt Resp', 'Charged']
107
- col_widths = {field: len(field) for field in new_fieldnames}
194
+ """
195
+ Display records in a formatted table after deduplication.
108
196
 
109
- # Update column widths based on records
197
+ Args:
198
+ records (list): List of UnifiedRecord instances.
199
+ """
200
+ # Deduplicate records before displaying
201
+ records = deduplicate_records(records)
202
+
203
+ if not records:
204
+ print("No records to display.")
205
+ return
206
+
207
+ # Determine which fields have at least one non-empty value
208
+ used_fields = [field for field in new_fieldnames if any(str(record.get(field, '')).strip() for record in records)]
209
+
210
+ if not used_fields:
211
+ print("No data to display.")
212
+ return
213
+
214
+ # Calculate column widths based on the longest item in each used column
215
+ col_widths = {field: len(field) for field in used_fields}
216
+
110
217
  for record in records:
111
- for field in new_fieldnames:
218
+ for field in used_fields:
112
219
  col_widths[field] = max(col_widths[field], len(str(record.get(field, ''))))
113
-
220
+
114
221
  # Create table header
115
- header = " | ".join("{:<{}}".format(field, col_widths[field]) for field in new_fieldnames)
222
+ header = " | ".join("{:<{}}".format(field, col_widths[field]) for field in used_fields)
116
223
  print(header)
117
224
  print("-" * len(header))
118
-
225
+
119
226
  # Create table rows
120
227
  for record in records:
121
- row = " | ".join("{:<{}}".format(str(record.get(field, '')), col_widths[field]) for field in new_fieldnames)
228
+ row = " | ".join("{:<{}}".format(str(record.get(field, '')), col_widths[field]) for field in used_fields)
122
229
  print(row)
123
230
 
124
231
  def display_consolidated_records(records):
125
- if not records:
232
+ """
233
+ Display the consolidated records in a formatted table.
234
+ Removes any records that are completely empty or only contain whitespace.
235
+ If no valid records are found, displays a message to that effect.
236
+ """
237
+ # Deduplicate records before displaying
238
+ records = deduplicate_records(records)
239
+
240
+ # If records are UnifiedRecord instances, convert them to dictionaries
241
+ if records and isinstance(records[0], UnifiedRecord):
242
+ dict_records = [record.to_dict() for record in records]
243
+ elif records and isinstance(records[0], dict):
244
+ dict_records = records
245
+ else:
246
+ log("Invalid record format for display.", level="ERROR")
247
+ return
248
+
249
+ # Filter out records that are completely empty or only contain whitespace
250
+ filtered_records = [
251
+ record for record in dict_records
252
+ if any(str(record.get(field, '')).strip() for field in new_fieldnames)
253
+ ]
254
+
255
+ if not filtered_records:
256
+ print("No valid records to display after filtering empty rows.")
126
257
  return
127
258
 
128
- new_fieldnames = ['Claim #', 'Status', 'Patient', 'Proc.', 'Serv.', 'Allowed', 'Paid', 'Pt Resp', 'Charged']
129
- col_widths = {field: len(field) for field in new_fieldnames}
259
+ # Determine which fields have at least one non-empty value
260
+ used_fields = [field for field in new_fieldnames if any(str(record.get(field, '')).strip() for record in filtered_records)]
130
261
 
131
- for record in records:
132
- for field in new_fieldnames:
262
+ if not used_fields:
263
+ print("No data to display.")
264
+ return
265
+
266
+ # Calculate column widths based on the longest item in each used column
267
+ col_widths = {field: len(field) for field in used_fields}
268
+
269
+ for record in filtered_records:
270
+ for field in used_fields:
133
271
  col_widths[field] = max(col_widths[field], len(str(record.get(field, ''))))
134
-
135
- header = " | ".join("{:<{}}".format(field, col_widths[field]) for field in new_fieldnames)
272
+
273
+ # Print header
274
+ header = " | ".join("{:<{}}".format(field, col_widths[field]) for field in used_fields)
136
275
  print(header)
137
276
  print("-" * len(header))
277
+
278
+ # Print each row
279
+ for record in filtered_records:
280
+ row = " | ".join("{:<{}}".format(str(record.get(field, '')), col_widths[field]) for field in used_fields)
281
+ print(row)
138
282
 
283
+ def deduplicate_records(records):
284
+ """
285
+ Remove duplicate records based on claim_number.
286
+
287
+ Args:
288
+ records (list): List of UnifiedRecord instances.
289
+
290
+ Returns:
291
+ list: List of unique UnifiedRecord instances.
292
+ """
293
+ unique_records_dict = {}
139
294
  for record in records:
140
- row = " | ".join("{:<{}}".format(str(record.get(field, '')), col_widths[field]) for field in new_fieldnames)
141
- print(row)
295
+ if record.claim_number not in unique_records_dict:
296
+ unique_records_dict[record.claim_number] = record
297
+ else:
298
+ log("Duplicate record found for claim_number {}. Skipping.".format(record.claim_number), "DEBUG")
299
+
300
+ return list(unique_records_dict.values())
142
301
 
143
302
  if __name__ == "__main__":
144
303
  config, _ = load_configuration()
@@ -152,14 +311,17 @@ if __name__ == "__main__":
152
311
  all_records = []
153
312
  for file_path in files:
154
313
  try:
155
- records = process_file(file_path, output_directory, return_records=True)
314
+ records = process_decoded_file(file_path, output_directory, return_records=True)
156
315
  all_records.extend(records)
157
316
  except Exception as e:
158
317
  log("Failed to process {}: {}".format(file_path, e), 'error')
159
318
 
160
- display_consolidated_records(all_records)
319
+ # Call the deduplication function
320
+ unique_records = deduplicate_records(all_records)
321
+
322
+ display_consolidated_records([record.to_dict() for record in unique_records])
161
323
 
162
324
  if input("Do you want to export the consolidated records to a CSV file? (y/n): ").strip().lower() == 'y':
163
325
  consolidated_csv_path = os.path.join(output_directory, "Consolidated_Records.csv")
164
- write_records_to_csv(all_records, consolidated_csv_path)
326
+ write_records_to_csv(unique_records, consolidated_csv_path)
165
327
  log("Consolidated records written to {}".format(consolidated_csv_path))
@@ -41,12 +41,10 @@ summary = {
41
41
  # print("\nDebug JSON Summary:")
42
42
  # print(json.dumps(summary, indent=2))
43
43
  """
44
+ # MediLink_Deductible.py
44
45
  import MediLink_API_v3
45
- import os
46
- import sys
46
+ import os, sys, requests, json
47
47
  from datetime import datetime
48
- import requests
49
- import json
50
48
 
51
49
  try:
52
50
  from MediLink import MediLink_ConfigLoader
@@ -54,7 +52,8 @@ except ImportError:
54
52
  import MediLink_ConfigLoader
55
53
 
56
54
  project_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
57
- sys.path.append(project_dir)
55
+ if project_dir not in sys.path:
56
+ sys.path.append(project_dir)
58
57
 
59
58
  try:
60
59
  from MediBot import MediBot_Preprocessor_lib
@@ -76,7 +75,7 @@ if provider_last_name == 'Unknown':
76
75
  MediLink_ConfigLoader.log("Warning: provider_last_name was not found in the configuration.", level="WARNING")
77
76
 
78
77
  # Define the list of payer_id's to iterate over
79
- payer_ids = ['87726', '03432', '96385', '95467', '86050', '86047', '95378', '06111', '37602']
78
+ payer_ids = ['87726', '03432', '96385', '95467', '86050', '86047', '95378', '06111', '37602'] # United Healthcare.
80
79
 
81
80
  # Get the latest CSV
82
81
  CSV_FILE_PATH = config.get('CSV_FILE_PATH', "")
@@ -163,7 +162,7 @@ def display_eligibility_info(data, dob, member_id, output_file):
163
162
  patient_name = "{} {} {}".format(firstName, middleName, lastName).strip()[:20]
164
163
 
165
164
  # Display patient information in a table row format
166
- table_row = "{:<20} | {:<10} | {:<40} | {:<5} | {:<15} | {:<15}".format(
165
+ table_row = "{:<20} | {:<10} | {:<40} | {:<5} | {:<14} | {:<14}".format(
167
166
  patient_name, dob, ins_insuranceType, ins_payerID, policy_status, remaining_amount)
168
167
  output_file.write(table_row + "\n")
169
168
  print(table_row) # Print to console for progressive display
@@ -171,7 +170,7 @@ def display_eligibility_info(data, dob, member_id, output_file):
171
170
  # Print the table header once before entering the loop
172
171
  output_file_path = os.path.join(os.getenv('TEMP'), 'eligibility_report.txt')
173
172
  with open(output_file_path, 'w') as output_file:
174
- table_header = "{:<20} | {:<10} | {:<40} | {:<5} | {:<15} | {:<15}".format(
173
+ table_header = "{:<20} | {:<10} | {:<40} | {:<5} | {:<14} | {:<14}".format(
175
174
  "Patient Name", "DOB", "Insurance Type", "PayID", "Policy Status", "Remaining Amt")
176
175
  output_file.write(table_header + "\n")
177
176
  output_file.write("-" * len(table_header) + "\n")