medicafe 0.250723.5__py3-none-any.whl → 0.250724.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of medicafe might be problematic. Click here for more details.

MediBot/MediBot.py CHANGED
@@ -187,8 +187,10 @@ def data_entry_loop(csv_data, field_mapping, reverse_mapping, fixed_values):
187
187
  # last_processed_entry, parsed_address_components = None, {} // BUG should this just be this line rather than the global line above?
188
188
  error_message = '' # Initialize error_message once
189
189
  current_row_index = 0
190
+ # PERFORMANCE FIX: Cache list length to avoid repeated len() calls
191
+ csv_data_length = len(csv_data)
190
192
 
191
- while current_row_index < len(csv_data):
193
+ while current_row_index < csv_data_length:
192
194
  row = csv_data[current_row_index]
193
195
 
194
196
  # PERFORMANCE FIX: Clear accumulating memory while preserving F11 menu context
@@ -1,7 +1,7 @@
1
1
  #MediBot_Preprocessor_lib.py
2
2
  from collections import OrderedDict, defaultdict
3
3
  from datetime import datetime, timedelta
4
- import os, csv, sys
4
+ import os, csv, sys, time
5
5
  import chardet # Ensure chardet is imported
6
6
 
7
7
  # Add the parent directory of the project to the Python path
@@ -120,8 +120,8 @@ def load_csv_data(csv_file_path):
120
120
  # Clean the headers
121
121
  cleaned_headers = clean_header(reader.fieldnames)
122
122
 
123
- # Create a mapping of cleaned headers to original headers (pre-compute once)
124
- header_mapping = {cleaned_headers[i]: reader.fieldnames[i] for i in range(len(cleaned_headers))}
123
+ # PERFORMANCE FIX: Use zip() instead of range(len()) for header mapping
124
+ header_mapping = {clean: orig for clean, orig in zip(cleaned_headers, reader.fieldnames)}
125
125
 
126
126
  # Process the remaining rows - optimize by pre-allocating the list
127
127
  csv_data = []
@@ -129,9 +129,8 @@ def load_csv_data(csv_file_path):
129
129
  # csv_data = [None] * estimated_size # if we had row count
130
130
 
131
131
  for row in reader:
132
- # Use dict() constructor with generator expression for better performance
133
- cleaned_row = dict((cleaned_headers[i], row[header_mapping[cleaned_headers[i]]])
134
- for i in range(len(cleaned_headers)))
132
+ # PERFORMANCE FIX: Use zip() instead of range(len()) for row processing
133
+ cleaned_row = {clean: row[header_mapping[clean]] for clean in cleaned_headers}
135
134
  csv_data.append(cleaned_row)
136
135
 
137
136
  return csv_data # Return a list of dictionaries
@@ -161,9 +160,10 @@ def add_columns(csv_data, column_headers):
161
160
  elif not isinstance(column_headers, list):
162
161
  raise ValueError("column_headers should be a list or a string")
163
162
 
163
+ # PERFORMANCE FIX: Optimize column initialization to avoid nested loop
164
164
  for row in csv_data:
165
- for header in column_headers:
166
- row[header] = '' # Initialize the column with empty values
165
+ # Use dict.update() to set multiple columns at once
166
+ row.update({header: '' for header in column_headers})
167
167
 
168
168
  # Extracting the list to a variable for future refactoring:
169
169
  def filter_rows(csv_data):
@@ -304,6 +304,17 @@ def NEW_update_insurance_ids(csv_data, config, crosswalk):
304
304
  processed_payer_ids = set() # Track processed Payer IDs
305
305
  MediLink_ConfigLoader.log("Starting update of insurance IDs.", level="INFO")
306
306
 
307
+ # PERFORMANCE FIX: Pre-build flattened payer lookup cache to avoid nested dictionary access
308
+ payer_cache = {}
309
+ crosswalk_payers = crosswalk.get('payer_id', {})
310
+ for payer_id, details in crosswalk_payers.items():
311
+ payer_cache[payer_id] = {
312
+ 'medisoft_id': details.get('medisoft_id', []),
313
+ 'medisoft_medicare_id': details.get('medisoft_medicare_id', []),
314
+ 'endpoint': details.get('endpoint', None)
315
+ }
316
+ MediLink_ConfigLoader.log("Built payer cache for {} payers".format(len(payer_cache)), level="DEBUG")
317
+
307
318
  # Load MAINS data to get mapping from Medisoft ID to MAINS names
308
319
  insurance_to_id = load_insurance_data_from_mains(config) # Assuming it returns a dict mapping insurance names to IDs
309
320
  MediLink_ConfigLoader.log("Loaded MAINS data for insurance to ID mapping.", level="DEBUG")
@@ -313,7 +324,9 @@ def NEW_update_insurance_ids(csv_data, config, crosswalk):
313
324
  for insurance_name, medisoft_id in insurance_to_id.items():
314
325
  medisoft_to_mains_names[medisoft_id].append(insurance_name)
315
326
 
316
- for row in csv_data:
327
+ for row_idx, row in enumerate(csv_data, 1):
328
+ # PERFORMANCE FIX: Store row index to avoid O(n) csv_data.index() calls later
329
+ row['_row_index'] = row_idx
317
330
  ins1_payer_id = row.get('Ins1 Payer ID', '').strip()
318
331
  MediLink_ConfigLoader.log("Processing row with Ins1 Payer ID: '{}'.".format(ins1_payer_id), level="DEBUG")
319
332
 
@@ -323,20 +336,24 @@ def NEW_update_insurance_ids(csv_data, config, crosswalk):
323
336
  processed_payer_ids.add(ins1_payer_id) # Add to set
324
337
  MediLink_ConfigLoader.log("Marked Payer ID '{}' as processed.".format(ins1_payer_id), level="DEBUG")
325
338
 
326
- # Retrieve Medisoft IDs for the current Payer ID
327
- medisoft_ids = crosswalk.get('payer_id', {}).get(ins1_payer_id, {}).get('medisoft_id', [])
339
+ # PERFORMANCE FIX: Use flattened cache instead of nested dictionary lookups
340
+ payer_info = payer_cache.get(ins1_payer_id, {})
341
+ medisoft_ids = payer_info.get('medisoft_id', [])
328
342
  MediLink_ConfigLoader.log("Retrieved Medisoft IDs for Payer ID '{}': {}".format(ins1_payer_id, medisoft_ids), level="DEBUG")
329
343
 
330
344
  if not medisoft_ids:
331
345
  MediLink_ConfigLoader.log("No Medisoft IDs available for Payer ID '{}', creating placeholder entry.".format(ins1_payer_id), level="WARNING")
332
- # Create a placeholder entry in the crosswalk
333
- if 'payer_id' not in crosswalk:
334
- crosswalk['payer_id'] = {}
335
- crosswalk['payer_id'][ins1_payer_id] = {
346
+ # Create a placeholder entry in the crosswalk and cache
347
+ placeholder_entry = {
336
348
  'medisoft_id': [], # Placeholder for future Medisoft IDs
337
349
  'medisoft_medicare_id': [], # Placeholder for future Medicare IDs
338
350
  'endpoint': None # Placeholder for future endpoint
339
351
  }
352
+ if 'payer_id' not in crosswalk:
353
+ crosswalk['payer_id'] = {}
354
+ crosswalk['payer_id'][ins1_payer_id] = placeholder_entry
355
+ # PERFORMANCE FIX: Update cache with placeholder entry
356
+ payer_cache[ins1_payer_id] = placeholder_entry
340
357
  continue # Skip further processing for this Payer ID
341
358
 
342
359
  # If only one Medisoft ID is associated, assign it directly
@@ -344,7 +361,9 @@ def NEW_update_insurance_ids(csv_data, config, crosswalk):
344
361
  try:
345
362
  medisoft_id = int(medisoft_ids[0])
346
363
  row['Ins1 Insurance ID'] = medisoft_id
347
- MediLink_ConfigLoader.log("Assigned Medisoft ID '{}' to row number {} with Payer ID '{}'.".format(medisoft_id, csv_data.index(row) + 1, ins1_payer_id), level="DEBUG")
364
+ # PERFORMANCE FIX: Use enumerate index instead of csv_data.index() which is O(n)
365
+ row_number = getattr(row, '_row_index', 'Unknown')
366
+ MediLink_ConfigLoader.log("Assigned Medisoft ID '{}' to row number {} with Payer ID '{}'.".format(medisoft_id, row_number, ins1_payer_id), level="DEBUG")
348
367
  except ValueError as e:
349
368
  MediLink_ConfigLoader.log("Error converting Medisoft ID '{}' to integer for Payer ID '{}': {}".format(medisoft_ids[0], ins1_payer_id, e), level="ERROR")
350
369
  row['Ins1 Insurance ID'] = None
@@ -398,9 +417,10 @@ def update_insurance_ids(csv_data, config, crosswalk):
398
417
  payer_id, medisoft_ids, medicare_ids), level="DEBUG")
399
418
 
400
419
  # PERFORMANCE FIX: Single pass through CSV data with optimized Medicare ID resolution
401
- for row in csv_data:
420
+ for row_idx, row in enumerate(csv_data, 1):
402
421
  ins1_payer_id = row.get('Ins1 Payer ID', '').strip()
403
- MediLink_ConfigLoader.log("Processing row #{} with Ins1 Payer ID '{}'.".format(csv_data.index(row) + 1, ins1_payer_id), level="DEBUG")
422
+ # PERFORMANCE FIX: Use enumerate index instead of csv_data.index() which is O(n)
423
+ MediLink_ConfigLoader.log("Processing row #{} with Ins1 Payer ID '{}'.".format(row_idx, ins1_payer_id), level="DEBUG")
404
424
 
405
425
  # Try Medicare ID first, then fall back to regular ID (optimized Medicare processing)
406
426
  insurance_id = (payer_id_to_medicare.get(ins1_payer_id) or
@@ -517,19 +537,22 @@ def update_diagnosis_codes(csv_data):
517
537
 
518
538
  MediLink_ConfigLoader.log("BAD IDEA: Processing DOCX files modified between {} and {}.".format(threshold_start, threshold_end), level="INFO")
519
539
 
520
- # PERFORMANCE OPTIMIZATION: Use os.scandir() for more efficient file system operations
521
- # This reduces the number of file system calls and improves performance with large directories
540
+ # PERFORMANCE OPTIMIZATION: Batch file system operations with caching
541
+ # Pre-convert threshold timestamps for efficient comparison (Windows XP compatible)
542
+ threshold_start_ts = threshold_start.timestamp() if hasattr(threshold_start, 'timestamp') else time.mktime(threshold_start.timetuple())
543
+ threshold_end_ts = threshold_end.timestamp() if hasattr(threshold_end, 'timestamp') else time.mktime(threshold_end.timetuple())
544
+
522
545
  valid_files = []
523
546
  try:
524
- # Use os.scandir() for better performance (XP/3.4.4 compatible)
547
+ # Use os.scandir() with optimized timestamp comparison (XP/3.4.4 compatible)
525
548
  with os.scandir(local_storage_path) as entries:
526
549
  for entry in entries:
527
550
  if entry.name.endswith('.docx'):
528
551
  # Get file modification time in single operation
529
552
  try:
530
553
  stat_info = entry.stat()
531
- mtime = stat_info.st_mtime
532
- if threshold_start <= datetime.fromtimestamp(mtime) <= threshold_end:
554
+ # Direct timestamp comparison avoids datetime conversion overhead
555
+ if threshold_start_ts <= stat_info.st_mtime <= threshold_end_ts:
533
556
  valid_files.append(entry.path)
534
557
  except (OSError, ValueError):
535
558
  # Skip files with invalid modification times
@@ -537,6 +560,9 @@ def update_diagnosis_codes(csv_data):
537
560
  except OSError:
538
561
  MediLink_ConfigLoader.log("Error accessing directory: {}".format(local_storage_path), level="ERROR")
539
562
  return
563
+
564
+ # PERFORMANCE OPTIMIZATION: Log file count for debugging without processing overhead
565
+ MediLink_ConfigLoader.log("Found {} DOCX files within date threshold".format(len(valid_files)), level="INFO")
540
566
 
541
567
  # PERFORMANCE OPTIMIZATION: Pre-process patient IDs for efficient lookup
542
568
  # Create a set of patient IDs from CSV data for faster lookups
@@ -821,13 +847,12 @@ def capitalize_all_fields(csv_data):
821
847
  Returns:
822
848
  None: The function modifies the csv_data in place.
823
849
  """
850
+ # PERFORMANCE FIX: Optimize uppercase conversion using dict comprehension
824
851
  for row in csv_data:
825
- for key, value in row.items():
826
- if isinstance(value, str):
827
- row[key] = value.upper()
828
- elif isinstance(value, datetime):
829
- # Keep datetime objects as they are
830
- pass
831
- elif value is not None:
832
- # Convert any other non-None values to string and then uppercase
833
- row[key] = str(value).upper()
852
+ # Single-pass update using dict comprehension
853
+ row.update({
854
+ key: (value.upper() if isinstance(value, str)
855
+ else str(value).upper() if value is not None and not isinstance(value, datetime)
856
+ else value)
857
+ for key, value in row.items()
858
+ })
@@ -78,14 +78,17 @@ def enforce_significant_length(output):
78
78
  # First line of defense: Replace ' APT ' or ' UNIT ' with ' #' if the original length is longer than 30 characters.
79
79
  temp_output = temp_output.replace(' APT ', ' #').replace(' UNIT ', ' #')
80
80
 
81
- # Remove spaces in a controlled manner from right to left if still too long
82
- while len(temp_output) > 30:
81
+ # PERFORMANCE FIX: Remove spaces in a controlled manner from right to left if still too long
82
+ # Cache length calculation to avoid repeated calls
83
+ temp_length = len(temp_output)
84
+ while temp_length > 30:
83
85
  # Find the last space
84
86
  last_space_index = temp_output.rfind(' ')
85
87
  if last_space_index == -1:
86
88
  break
87
89
  # Remove the last space
88
90
  temp_output = temp_output[:last_space_index] + temp_output[last_space_index+7:]
91
+ temp_length = len(temp_output) # Update cached length
89
92
 
90
93
  # If still greater than 30, truncate to 30 characters
91
94
  if len(temp_output) > 30:
@@ -407,9 +407,11 @@ def reassemble_year(text):
407
407
 
408
408
  # Handle the less common cases where the year might be split as (1,1,2) or (2,1,1) or (1,2,1)
409
409
  parts = _DIGIT_PARTS_PATTERN.findall(text)
410
- if len(parts) >= 4:
411
- # Early exit: only process if we have enough parts
412
- for i in range(len(parts) - 3):
410
+ parts_len = len(parts)
411
+ if parts_len >= 4:
412
+ # PERFORMANCE FIX: Use direct indexing instead of range(len()) pattern
413
+ max_index = parts_len - 3
414
+ for i in range(max_index):
413
415
  candidate = ''.join(parts[i:i + 4])
414
416
  if len(candidate) == 4 and candidate.isdigit():
415
417
  # More efficient pattern construction
@@ -204,20 +204,30 @@ def display_table(records):
204
204
  print("No records to display.")
205
205
  return
206
206
 
207
- # Determine which fields have at least one non-empty value
208
- used_fields = [field for field in new_fieldnames if any(str(record.get(field, '')).strip() for record in records)]
207
+ # PERFORMANCE FIX: Single-pass optimization - determine used fields and calculate widths in one pass
208
+ used_fields = []
209
+ col_widths = {}
210
+
211
+ # First pass: identify used fields and initialize widths
212
+ for field in new_fieldnames:
213
+ col_widths[field] = len(field) # Header width
214
+
215
+ # Second pass: check for used fields and calculate max widths
216
+ for record in records:
217
+ for field in new_fieldnames:
218
+ value_str = str(record.get(field, ''))
219
+ if value_str.strip() and field not in used_fields:
220
+ used_fields.append(field)
221
+ if field in col_widths:
222
+ col_widths[field] = max(col_widths[field], len(value_str))
223
+
224
+ # Filter col_widths to only used fields
225
+ col_widths = {field: col_widths[field] for field in used_fields}
209
226
 
210
227
  if not used_fields:
211
228
  print("No data to display.")
212
229
  return
213
230
 
214
- # Calculate column widths based on the longest item in each used column
215
- col_widths = {field: len(field) for field in used_fields}
216
-
217
- for record in records:
218
- for field in used_fields:
219
- col_widths[field] = max(col_widths[field], len(str(record.get(field, ''))))
220
-
221
231
  # Create table header
222
232
  header = " | ".join("{:<{}}".format(field, col_widths[field]) for field in used_fields)
223
233
  print(header)
@@ -256,20 +266,30 @@ def display_consolidated_records(records):
256
266
  print("No valid records to display after filtering empty rows.")
257
267
  return
258
268
 
259
- # Determine which fields have at least one non-empty value
260
- used_fields = [field for field in new_fieldnames if any(str(record.get(field, '')).strip() for record in filtered_records)]
269
+ # PERFORMANCE FIX: Single-pass optimization - determine used fields and calculate widths in one pass
270
+ used_fields = []
271
+ col_widths = {}
272
+
273
+ # First pass: initialize column widths with header lengths
274
+ for field in new_fieldnames:
275
+ col_widths[field] = len(field)
276
+
277
+ # Second pass: check for used fields and calculate max widths
278
+ for record in filtered_records:
279
+ for field in new_fieldnames:
280
+ value_str = str(record.get(field, ''))
281
+ if value_str.strip() and field not in used_fields:
282
+ used_fields.append(field)
283
+ if field in col_widths:
284
+ col_widths[field] = max(col_widths[field], len(value_str))
285
+
286
+ # Filter col_widths to only used fields
287
+ col_widths = {field: col_widths[field] for field in used_fields}
261
288
 
262
289
  if not used_fields:
263
290
  print("No data to display.")
264
291
  return
265
292
 
266
- # Calculate column widths based on the longest item in each used column
267
- col_widths = {field: len(field) for field in used_fields}
268
-
269
- for record in filtered_records:
270
- for field in used_fields:
271
- col_widths[field] = max(col_widths[field], len(str(record.get(field, ''))))
272
-
273
293
  # Print header
274
294
  header = " | ".join("{:<{}}".format(field, col_widths[field]) for field in used_fields)
275
295
  print(header)
@@ -725,33 +725,39 @@ if __name__ == "__main__":
725
725
  print(table_header)
726
726
  print("-" * len(table_header))
727
727
 
728
- # Set to keep track of processed patients
729
- processed_patients = set()
730
-
731
- # Loop through each payer_id and patient to call the API, then display the eligibility information
728
+ # PERFORMANCE FIX: Optimize patient-payer processing to avoid O(P×N) complexity
729
+ # Instead of nested loops, process each patient once and try payer_ids until success
730
+ # TODO: We should be able to determine the correct payer_id for each patient ahead of time
731
+ # by looking up their insurance information from the CSV data or crosswalk mapping.
732
+ # This would eliminate the need to try multiple payer_ids per patient and make this O(N).
732
733
  errors = []
733
734
  validation_reports = []
734
- total_patients = len(patients) * len(payer_ids)
735
735
  processed_count = 0
736
736
 
737
- for payer_id in payer_ids:
738
- for dob, member_id in patients:
739
- # Skip if this patient has already been processed
740
- if (dob, member_id) in processed_patients:
741
- continue
737
+ for dob, member_id in patients:
738
+ processed_count += 1
739
+ print("Processing patient {}/{}: Member ID {}, DOB {}".format(
740
+ processed_count, len(patients), member_id, dob))
741
+
742
+ # Try each payer_id for this patient until we get a successful response
743
+ patient_processed = False
744
+ for payer_id in payer_ids:
742
745
  try:
743
- processed_count += 1
744
- print("Processing patient {}/{}: Member ID {}, DOB {}".format(
745
- processed_count, total_patients, member_id, dob))
746
-
747
746
  # Run with validation enabled only in debug mode
748
747
  run_validation = DEBUG_MODE
749
748
  eligibility_data = get_eligibility_info(client, payer_id, provider_last_name, dob, member_id, npi, run_validation=run_validation)
750
749
  if eligibility_data is not None:
751
- display_eligibility_info(eligibility_data, dob, member_id, output_file) # Display as we get the result
752
- processed_patients.add((dob, member_id)) # Mark this patient as processed
750
+ display_eligibility_info(eligibility_data, dob, member_id, output_file)
751
+ patient_processed = True
752
+ break # Stop trying other payer_ids for this patient once we get a response
753
753
  except Exception as e:
754
- errors.append((dob, member_id, str(e)))
754
+ # Continue trying other payer_ids
755
+ continue
756
+
757
+ # If no payer_id worked for this patient, log the error
758
+ if not patient_processed:
759
+ error_msg = "No successful payer_id found for patient"
760
+ errors.append((dob, member_id, error_msg))
755
761
 
756
762
  # Display errors if any
757
763
  if errors:
@@ -1,7 +1,5 @@
1
1
  # MediLink_Parser.py
2
-
3
2
  import re
4
- from collections import defaultdict
5
3
 
6
4
  # Pre-compile regex patterns for better performance
7
5
  _EBT_KEY_VALUE_PATTERN = re.compile(r'([^:]+):\s*(.+?)(?=\s{2,}[^:]+:|$)')
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: medicafe
3
- Version: 0.250723.5
3
+ Version: 0.250724.0
4
4
  Summary: MediCafe
5
5
  Home-page: https://github.com/katanada2
6
6
  Author: Daniel Vidaud
@@ -1,13 +1,13 @@
1
1
  MediBot/MediBot.bat,sha256=anz5i-Td1k3HhRUvkCqHsw9lBLVmO6q9bt5kLTfr1Iw,13282
2
- MediBot/MediBot.py,sha256=iO9rQPFJky9jxYKU8b3jbxcnTdipnfeCU2VS-2vLBI8,24172
2
+ MediBot/MediBot.py,sha256=PQvAlfc-B15tr3Lbv1tV16ws2P1B04x0uCJ725fOlEc,24283
3
3
  MediBot/MediBot_Charges.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
4
  MediBot/MediBot_Crosswalk_Library.py,sha256=Ix4QlAcg3O9Y6n6ZeSUtbmtV-_n-t0-jnefXDBFlhhI,51441
5
5
  MediBot/MediBot_Post.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
6
  MediBot/MediBot_Preprocessor.py,sha256=Lc9uQnE5SAa0dQTOREdPV1QUB2cywXTHJ1h2w-fyeeQ,13331
7
- MediBot/MediBot_Preprocessor_lib.py,sha256=E759aYflr6GbOIaFJjMSkV4Y_dPUyjxNdbfbWEWS5JM,43940
7
+ MediBot/MediBot_Preprocessor_lib.py,sha256=LXzV85uq7YoAWbZi88HzAs_GObl7vP8mhFbWZQbd0M8,45687
8
8
  MediBot/MediBot_UI.py,sha256=mJiLud8C6mztP4APA_LKgxj8dJ05YxLag47khhkLj20,13087
9
- MediBot/MediBot_dataformat_library.py,sha256=JXTV-HWahqeYF_lbNn1UYxqUtZ6ZBeFXHOyRGlDq4xM,8406
10
- MediBot/MediBot_docx_decoder.py,sha256=d_t5LsdXUQ5713BTBIcRxDx9Go422dRUDhNtPRh786I,27293
9
+ MediBot/MediBot_dataformat_library.py,sha256=XNyeiOC6uJUp15UXP_rhtB3rMTPus9ZXDnz5zHNoRYM,8586
10
+ MediBot/MediBot_docx_decoder.py,sha256=GbhX58pMAsWNhBF7B8AtWiNpUOB4bU0zAM81moXYkkE,27370
11
11
  MediBot/MediPost.py,sha256=C1hZJFr65rN6F_dckjdBxFC0vL2CoqY9W3YFqU5HXtE,336
12
12
  MediBot/PDF_to_CSV_Cleaner.py,sha256=ZZphmq-5K04DkrZNlcwNAIoZPOD_ROWvS3PMkKFxeiM,8799
13
13
  MediBot/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -27,15 +27,15 @@ MediLink/MediLink_Azure.py,sha256=Ow70jctiHFIylskBExN7WUoRgrKOvBR6jNTnQMk6lJA,21
27
27
  MediLink/MediLink_ClaimStatus.py,sha256=kXIDidxSGuqTwjFNMQIKms42jqIu5Qmnet-7Ohe8zjE,11645
28
28
  MediLink/MediLink_ConfigLoader.py,sha256=u9ecB0SIN7zuJAo8KcoQys95BtyAo-8S2n4mRd0S3XU,4356
29
29
  MediLink/MediLink_DataMgmt.py,sha256=MjCF1L-4RkQnz_vBULPB-DVsEtv0X1WHT1o9YjCGQ7s,33280
30
- MediLink/MediLink_Decoder.py,sha256=Suw9CmUHgoe0ZW8sJP_pIO8URBrhO5FmxFF8RcUj9lI,13318
31
- MediLink/MediLink_Deductible.py,sha256=btFzmW48biiSce8zES_giAGQggPs5foutNG2slk-Pcw,38916
30
+ MediLink/MediLink_Decoder.py,sha256=lKWiOcRClz8F5P3jrvFTq_hW9XF4OrPfA4LFz2zLSLg,14013
31
+ MediLink/MediLink_Deductible.py,sha256=els8CQMK3pRzlqzs12HDgqx42WLXuHFU-nfXiA4y0Js,39426
32
32
  MediLink/MediLink_Deductible_Validator.py,sha256=2g-lZd-Y5fJ1mfP87vM6oABg0t5Om-7EkEkilVvDWYY,22888
33
33
  MediLink/MediLink_Down.py,sha256=hrDODhs-zRfOKCdiRGENN5Czu-AvdtwJj4Q7grcRXME,6518
34
34
  MediLink/MediLink_ERA_decoder.py,sha256=MiOtDcXnmevPfHAahIlTLlUc14VcQWAor9Xa7clA2Ts,8710
35
35
  MediLink/MediLink_Gmail.py,sha256=OYsASNgP4YSTaSnj9XZxPPiy0cw41JC-suLIgRyNrlQ,31439
36
36
  MediLink/MediLink_GraphQL.py,sha256=O6OCaumT0zIC7YcIAwLOOYxiQnYhoMc48UL8ilNIBec,45720
37
37
  MediLink/MediLink_Mailer.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
38
- MediLink/MediLink_Parser.py,sha256=SdXquRIviDPG5q1R6nnjp9n3lDs9bUNXHL4IyAGp8NU,9826
38
+ MediLink/MediLink_Parser.py,sha256=w2ZD4minjwkaMz7nzP_r8v_Ow_uM5KHjpPSY8mIHcdE,9787
39
39
  MediLink/MediLink_Scan.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
40
40
  MediLink/MediLink_Scheduler.py,sha256=UJvxhDvHraqra2_TlQVlGeh5jRFrrfK6nCVUHnKOEMY,38
41
41
  MediLink/MediLink_StatusCheck.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -49,8 +49,8 @@ MediLink/test.py,sha256=kSvvJRL_3fWuNS3_x4hToOnUljGLoeEw6SUTHQWQRJk,3108
49
49
  MediLink/test_cob_library.py,sha256=wUMv0-Y6fNsKcAs8Z9LwfmEBRO7oBzBAfWmmzwoNd1g,13841
50
50
  MediLink/test_validation.py,sha256=FJrfdUFK--xRScIzrHCg1JeGdm0uJEoRnq6CgkP2lwM,4154
51
51
  MediLink/webapp.html,sha256=JPKT559aFVBi1r42Hz7C77Jj0teZZRumPhBev8eSOLk,19806
52
- medicafe-0.250723.5.dist-info/LICENSE,sha256=65lb-vVujdQK7uMH3RRJSMwUW-WMrMEsc5sOaUn2xUk,1096
53
- medicafe-0.250723.5.dist-info/METADATA,sha256=Xi59ITdlqeJUrbhkW8OLaaj57BeBa2xTCogCZR6tsTY,5501
54
- medicafe-0.250723.5.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
55
- medicafe-0.250723.5.dist-info/top_level.txt,sha256=3uOwR4q_SP8Gufk2uCHoKngAgbtdOwQC6Qjl7ViBa_c,17
56
- medicafe-0.250723.5.dist-info/RECORD,,
52
+ medicafe-0.250724.0.dist-info/LICENSE,sha256=65lb-vVujdQK7uMH3RRJSMwUW-WMrMEsc5sOaUn2xUk,1096
53
+ medicafe-0.250724.0.dist-info/METADATA,sha256=wQblamJ71lD6awYx3z5wDMZ-PZYp3tCMg1z36lmlhMg,5501
54
+ medicafe-0.250724.0.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
55
+ medicafe-0.250724.0.dist-info/top_level.txt,sha256=3uOwR4q_SP8Gufk2uCHoKngAgbtdOwQC6Qjl7ViBa_c,17
56
+ medicafe-0.250724.0.dist-info/RECORD,,