medicafe 0.250812.6__py3-none-any.whl → 0.250813.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- MediBot/update_medicafe.py +73 -61
- MediCafe/api_core.py +39 -12
- MediCafe/submission_index.py +44 -0
- MediLink/MediLink_DataMgmt.py +74 -43
- MediLink/MediLink_Decoder.py +38 -9
- MediLink/MediLink_Down.py +358 -21
- MediLink/MediLink_Parser.py +80 -1
- MediLink/MediLink_main.py +101 -1
- {medicafe-0.250812.6.dist-info → medicafe-0.250813.1.dist-info}/METADATA +1 -1
- {medicafe-0.250812.6.dist-info → medicafe-0.250813.1.dist-info}/RECORD +14 -14
- {medicafe-0.250812.6.dist-info → medicafe-0.250813.1.dist-info}/LICENSE +0 -0
- {medicafe-0.250812.6.dist-info → medicafe-0.250813.1.dist-info}/WHEEL +0 -0
- {medicafe-0.250812.6.dist-info → medicafe-0.250813.1.dist-info}/entry_points.txt +0 -0
- {medicafe-0.250812.6.dist-info → medicafe-0.250813.1.dist-info}/top_level.txt +0 -0
MediLink/MediLink_Down.py
CHANGED
@@ -45,6 +45,15 @@ except ImportError:
|
|
45
45
|
def tqdm(iterable, **kwargs):
|
46
46
|
return iterable
|
47
47
|
|
48
|
+
try:
|
49
|
+
from MediCafe.submission_index import append_submission_record as _append_submission_record, ensure_submission_index as _ensure_submission_index, append_ack_event as _append_ack_event
|
50
|
+
except ImportError:
|
51
|
+
# Fallback if submission_index not available
|
52
|
+
_append_submission_record = None
|
53
|
+
_ensure_submission_index = None
|
54
|
+
_append_ack_event = None
|
55
|
+
|
56
|
+
|
48
57
|
def handle_files(local_storage_path, downloaded_files):
|
49
58
|
"""
|
50
59
|
Moves downloaded files to the appropriate directory and translates them to CSV format.
|
@@ -55,15 +64,16 @@ def handle_files(local_storage_path, downloaded_files):
|
|
55
64
|
local_response_directory = os.path.join(local_storage_path, "responses")
|
56
65
|
os.makedirs(local_response_directory, exist_ok=True)
|
57
66
|
|
58
|
-
# Supported file extensions
|
59
|
-
file_extensions = ['.era', '.277', '.277ibr', '.277ebr', '.dpt', '.ebt', '.ibt', '.txt']
|
67
|
+
# Supported file extensions (enable ERA/277/999; keep EBT)
|
68
|
+
file_extensions = ['.era', '.277', '.277ibr', '.277ebr', '.999', '.dpt', '.ebt', '.ibt', '.txt']
|
60
69
|
|
61
70
|
files_moved = []
|
62
71
|
|
63
72
|
for file in downloaded_files:
|
64
|
-
|
73
|
+
lower = file.lower()
|
74
|
+
if any(lower.endswith(ext) for ext in file_extensions): # Case-insensitive match
|
65
75
|
source_path = os.path.join(local_storage_path, file)
|
66
|
-
destination_path = os.path.join(local_response_directory, file)
|
76
|
+
destination_path = os.path.join(local_response_directory, os.path.basename(file))
|
67
77
|
|
68
78
|
try:
|
69
79
|
shutil.move(source_path, destination_path)
|
@@ -71,6 +81,8 @@ def handle_files(local_storage_path, downloaded_files):
|
|
71
81
|
files_moved.append(destination_path)
|
72
82
|
except Exception as e:
|
73
83
|
log("Error moving file '{}' to '{}': {}".format(file, destination_path, e), level="ERROR")
|
84
|
+
else:
|
85
|
+
log("Skipping unsupported file '{}'.".format(file), level="DEBUG")
|
74
86
|
|
75
87
|
if not files_moved:
|
76
88
|
log("No files were moved. Ensure that files with supported extensions exist in the download directory.", level="WARNING")
|
@@ -93,14 +105,15 @@ def translate_files(files, output_directory):
|
|
93
105
|
translated_files = []
|
94
106
|
consolidated_records = []
|
95
107
|
|
96
|
-
#
|
108
|
+
# Enable processing for ERA, 277 family, 999, and EBT
|
97
109
|
file_type_selector = {
|
98
|
-
'.era':
|
99
|
-
'.277':
|
100
|
-
'.277ibr':
|
101
|
-
'.277ebr':
|
110
|
+
'.era': True,
|
111
|
+
'.277': True,
|
112
|
+
'.277ibr': True,
|
113
|
+
'.277ebr': True,
|
114
|
+
'.999': True,
|
102
115
|
'.dpt': False,
|
103
|
-
'.ebt': True,
|
116
|
+
'.ebt': True,
|
104
117
|
'.ibt': False,
|
105
118
|
'.txt': False
|
106
119
|
}
|
@@ -108,12 +121,24 @@ def translate_files(files, output_directory):
|
|
108
121
|
file_counts = {ext: 0 for ext in file_type_selector.keys()}
|
109
122
|
|
110
123
|
for file in files:
|
111
|
-
ext = os.path.splitext(file)[1]
|
124
|
+
ext = os.path.splitext(file)[1].lower()
|
112
125
|
if file_type_selector.get(ext, False): # Check if the file type is selected
|
113
126
|
file_counts[ext] += 1
|
114
127
|
|
115
128
|
try:
|
116
|
-
|
129
|
+
src_path = os.path.join(output_directory, os.path.basename(file))
|
130
|
+
records = process_decoded_file(src_path, output_directory, return_records=True)
|
131
|
+
# Annotate records with source metadata for downstream persistence
|
132
|
+
try:
|
133
|
+
mtime = os.path.getmtime(src_path)
|
134
|
+
except Exception:
|
135
|
+
mtime = None
|
136
|
+
for r in records:
|
137
|
+
try:
|
138
|
+
setattr(r, 'source_file', src_path)
|
139
|
+
setattr(r, 'source_mtime', mtime)
|
140
|
+
except Exception:
|
141
|
+
pass
|
117
142
|
consolidated_records.extend(records)
|
118
143
|
csv_file_path = os.path.join(output_directory, os.path.basename(file) + '_decoded.csv')
|
119
144
|
translated_files.append(csv_file_path)
|
@@ -122,11 +147,34 @@ def translate_files(files, output_directory):
|
|
122
147
|
log("Unsupported file type: {}".format(file), level="WARNING")
|
123
148
|
except Exception as e:
|
124
149
|
log("Error processing file {}: {}".format(file, e), level="ERROR")
|
150
|
+
else:
|
151
|
+
log("Skipping unselected file type for '{}'.".format(file), level="DEBUG")
|
125
152
|
|
126
153
|
log("Detected and processed file counts by type:")
|
127
154
|
for ext, count in file_counts.items():
|
128
155
|
log("{}: {} files detected".format(ext, count), level="INFO")
|
129
156
|
|
157
|
+
# Simple, elegant summary for console UI
|
158
|
+
try:
|
159
|
+
if consolidated_records:
|
160
|
+
total = len(consolidated_records)
|
161
|
+
num_rejected = 0
|
162
|
+
num_accepted = 0
|
163
|
+
for r in consolidated_records:
|
164
|
+
status = getattr(r, 'status', '') if hasattr(r, 'status') else r.get('Status', '')
|
165
|
+
if status:
|
166
|
+
if ('Reject' in status) or (':' in status and status.upper().startswith('R')):
|
167
|
+
num_rejected += 1
|
168
|
+
elif ('Accept' in status) or (':' in status and status.upper().startswith('A')):
|
169
|
+
num_accepted += 1
|
170
|
+
print("\nAcknowledgements Summary:")
|
171
|
+
print(" Total records: {}".format(total))
|
172
|
+
print(" Accepted: {}".format(num_accepted))
|
173
|
+
print(" Rejected: {}".format(num_rejected))
|
174
|
+
print("")
|
175
|
+
except Exception:
|
176
|
+
pass
|
177
|
+
|
130
178
|
return consolidated_records, translated_files
|
131
179
|
|
132
180
|
def prompt_csv_export(records, output_directory):
|
@@ -134,6 +182,50 @@ def prompt_csv_export(records, output_directory):
|
|
134
182
|
Prompts the user to export consolidated records to a CSV file.
|
135
183
|
"""
|
136
184
|
if records:
|
185
|
+
# Persist lightweight ack events into receipts index (optional, best-effort)
|
186
|
+
try:
|
187
|
+
config, _ = load_configuration()
|
188
|
+
medi = extract_medilink_config(config)
|
189
|
+
receipts_root = medi.get('local_claims_path', None)
|
190
|
+
if receipts_root and _ensure_submission_index and _append_ack_event:
|
191
|
+
_ensure_submission_index(receipts_root)
|
192
|
+
for rec in records:
|
193
|
+
try:
|
194
|
+
# rec may be UnifiedRecord; convert
|
195
|
+
if hasattr(rec, 'to_dict'):
|
196
|
+
d = rec.to_dict()
|
197
|
+
else:
|
198
|
+
d = rec
|
199
|
+
claim_no = d.get('Claim #', '')
|
200
|
+
status_text = d.get('Status', '')
|
201
|
+
# infer ack_type by presence of fields
|
202
|
+
ack_type = ''
|
203
|
+
if d.get('Paid', '') != '' or d.get('Allowed', '') != '':
|
204
|
+
ack_type = 'ERA'
|
205
|
+
elif status_text and ':' in status_text:
|
206
|
+
ack_type = '277'
|
207
|
+
else:
|
208
|
+
ack_type = 'EBT' # default for text notifications
|
209
|
+
# Use file metadata when available
|
210
|
+
file_name = os.path.basename(getattr(rec, 'source_file', '')) if hasattr(rec, 'source_file') else 'responses'
|
211
|
+
ts = getattr(rec, 'source_mtime', None)
|
212
|
+
control_ids = {}
|
213
|
+
if claim_no:
|
214
|
+
_append_ack_event(
|
215
|
+
receipts_root,
|
216
|
+
'', # claim_key unknown here
|
217
|
+
status_text,
|
218
|
+
ack_type,
|
219
|
+
file_name,
|
220
|
+
control_ids,
|
221
|
+
'download_ack',
|
222
|
+
int(ts) if isinstance(ts, (int, float)) else None
|
223
|
+
)
|
224
|
+
except Exception:
|
225
|
+
continue
|
226
|
+
except Exception:
|
227
|
+
pass
|
228
|
+
|
137
229
|
user_input = input("Do you want to export the consolidated records to a CSV file? (y/n): ")
|
138
230
|
if user_input.lower() == 'y':
|
139
231
|
output_file_path = os.path.join(output_directory, "Consolidated_Records.csv")
|
@@ -187,15 +279,23 @@ def main(desired_endpoint=None):
|
|
187
279
|
return None, None
|
188
280
|
|
189
281
|
|
190
|
-
def check_for_new_remittances(config=None):
|
282
|
+
def check_for_new_remittances(config=None, is_boot_scan=False):
|
191
283
|
"""
|
192
284
|
Function to check for new remittance files across all configured endpoints.
|
193
285
|
Loads the configuration, validates it, and processes each endpoint to download and handle files.
|
194
286
|
Accumulates results from all endpoints and processes them together at the end.
|
287
|
+
|
288
|
+
Args:
|
289
|
+
config: Configuration object
|
290
|
+
is_boot_scan: If True, suppresses "No records" message for boot-time scans
|
291
|
+
|
292
|
+
Returns:
|
293
|
+
bool: True if new records were found, False otherwise
|
195
294
|
"""
|
196
295
|
# Start the process and log the initiation
|
197
296
|
log("Starting check_for_new_remittances function")
|
198
|
-
|
297
|
+
if not is_boot_scan:
|
298
|
+
print("\nChecking for new files across all endpoints...")
|
199
299
|
log("Checking for new files across all endpoints...")
|
200
300
|
|
201
301
|
# Step 1: Load and validate the configuration
|
@@ -205,44 +305,95 @@ def check_for_new_remittances(config=None):
|
|
205
305
|
medi = extract_medilink_config(config)
|
206
306
|
if not medi or 'endpoints' not in medi:
|
207
307
|
log("Error: Config is missing necessary sections. Aborting...", level="ERROR")
|
208
|
-
return
|
308
|
+
return False
|
209
309
|
|
210
310
|
endpoints = medi.get('endpoints')
|
211
311
|
if not isinstance(endpoints, dict):
|
212
312
|
log("Error: 'endpoints' is not a dictionary. Aborting...", level="ERROR")
|
213
|
-
return
|
313
|
+
return False
|
314
|
+
|
315
|
+
# DIAGNOSTIC: Log endpoint configuration details
|
316
|
+
log("Found {} configured endpoints: {}".format(len(endpoints), list(endpoints.keys())), level="INFO")
|
317
|
+
for endpoint_key, endpoint_info in endpoints.items():
|
318
|
+
log("Endpoint '{}': session_name={}, remote_directory_down={}, has_filemask={}".format(
|
319
|
+
endpoint_key,
|
320
|
+
endpoint_info.get('session_name', 'NOT_SET'),
|
321
|
+
endpoint_info.get('remote_directory_down', 'NOT_SET'),
|
322
|
+
'filemask' in endpoint_info
|
323
|
+
), level="DEBUG")
|
214
324
|
|
215
325
|
# Lists to accumulate all consolidated records and translated files across all endpoints
|
216
326
|
all_consolidated_records = []
|
217
327
|
all_translated_files = []
|
328
|
+
endpoint_results = {} # Track results per endpoint for diagnostics
|
218
329
|
|
219
330
|
# Step 2: Process each endpoint and accumulate results
|
220
331
|
for endpoint_key, endpoint_info in tqdm(endpoints.items(), desc="Processing endpoints"):
|
332
|
+
log("=== Processing endpoint: {} ===".format(endpoint_key), level="INFO")
|
333
|
+
|
221
334
|
# Validate endpoint structure
|
222
335
|
if not endpoint_info or not isinstance(endpoint_info, dict):
|
223
336
|
log("Error: Invalid endpoint structure for {}. Skipping...".format(endpoint_key), level="ERROR")
|
337
|
+
endpoint_results[endpoint_key] = {"status": "error", "reason": "invalid_structure"}
|
224
338
|
continue
|
225
339
|
|
226
340
|
if 'remote_directory_down' in endpoint_info:
|
227
341
|
# Process the endpoint and handle the files
|
228
|
-
log("Processing endpoint: {}".format(
|
342
|
+
log("Processing endpoint: {} with remote_directory_down: {}".format(
|
343
|
+
endpoint_key, endpoint_info.get('remote_directory_down')), level="INFO")
|
344
|
+
|
229
345
|
consolidated_records, translated_files = process_endpoint(endpoint_key, endpoint_info, config)
|
230
346
|
|
347
|
+
# Track results for diagnostics
|
348
|
+
endpoint_results[endpoint_key] = {
|
349
|
+
"status": "processed",
|
350
|
+
"records_found": len(consolidated_records) if consolidated_records else 0,
|
351
|
+
"files_translated": len(translated_files) if translated_files else 0
|
352
|
+
}
|
353
|
+
|
231
354
|
# Accumulate the results for later processing
|
232
355
|
if consolidated_records:
|
233
356
|
all_consolidated_records.extend(consolidated_records)
|
357
|
+
log("Added {} records from endpoint {}".format(len(consolidated_records), endpoint_key), level="INFO")
|
234
358
|
if translated_files:
|
235
359
|
all_translated_files.extend(translated_files)
|
360
|
+
log("Added {} translated files from endpoint {}".format(len(translated_files), endpoint_key), level="INFO")
|
236
361
|
else:
|
237
362
|
log("Skipping endpoint '{}'. 'remote_directory_down' not configured.".format(endpoint_info.get('name', 'Unknown')), level="WARNING")
|
363
|
+
endpoint_results[endpoint_key] = {"status": "skipped", "reason": "no_remote_directory_down"}
|
364
|
+
|
365
|
+
# DIAGNOSTIC: Log summary of endpoint processing
|
366
|
+
log("=== Endpoint Processing Summary ===", level="INFO")
|
367
|
+
for endpoint_key, result in endpoint_results.items():
|
368
|
+
if result["status"] == "processed":
|
369
|
+
log("Endpoint '{}': {} records found, {} files translated".format(
|
370
|
+
endpoint_key, result["records_found"], result["files_translated"]), level="INFO")
|
371
|
+
else:
|
372
|
+
log("Endpoint '{}': {} ({})".format(
|
373
|
+
endpoint_key, result["status"], result.get("reason", "unknown")), level="WARNING")
|
238
374
|
|
239
375
|
# Step 3: After processing all endpoints, handle the accumulated results
|
240
376
|
if all_consolidated_records:
|
377
|
+
log("Total records found across all endpoints: {}".format(len(all_consolidated_records)), level="INFO")
|
241
378
|
display_consolidated_records(all_consolidated_records) # Ensure this is called only once
|
242
379
|
prompt_csv_export(all_consolidated_records, medi.get('local_storage_path', '.'))
|
380
|
+
return True
|
243
381
|
else:
|
244
382
|
log("No records to display after processing all endpoints.", level="WARNING")
|
245
|
-
|
383
|
+
# Enhanced diagnostic message when no records found
|
384
|
+
if not is_boot_scan:
|
385
|
+
print("No records to display after processing all endpoints.")
|
386
|
+
print("\nDiagnostic Information:")
|
387
|
+
print("- Total endpoints configured: {}".format(len(endpoints)))
|
388
|
+
print("- Endpoints with remote_directory_down: {}".format(
|
389
|
+
sum(1 for ep in endpoints.values() if 'remote_directory_down' in ep)))
|
390
|
+
print("- Endpoints processed: {}".format(
|
391
|
+
sum(1 for result in endpoint_results.values() if result["status"] == "processed")))
|
392
|
+
print("- Endpoints skipped: {}".format(
|
393
|
+
sum(1 for result in endpoint_results.values() if result["status"] == "skipped")))
|
394
|
+
print("- Endpoints with errors: {}".format(
|
395
|
+
sum(1 for result in endpoint_results.values() if result["status"] == "error")))
|
396
|
+
return False
|
246
397
|
|
247
398
|
|
248
399
|
def process_endpoint(endpoint_key, endpoint_info, config):
|
@@ -255,19 +406,205 @@ def process_endpoint(endpoint_key, endpoint_info, config):
|
|
255
406
|
medi = extract_medilink_config(config)
|
256
407
|
local_storage_path = medi.get('local_storage_path', '.')
|
257
408
|
log("[Process Endpoint] Local storage path set to {}".format(local_storage_path))
|
409
|
+
|
410
|
+
# DIAGNOSTIC: Check WinSCP availability and configuration
|
411
|
+
try:
|
412
|
+
from MediLink_DataMgmt import get_winscp_path
|
413
|
+
winscp_path = get_winscp_path(config)
|
414
|
+
if os.path.exists(winscp_path):
|
415
|
+
log("[Process Endpoint] WinSCP found at: {}".format(winscp_path), level="INFO")
|
416
|
+
else:
|
417
|
+
log("[Process Endpoint] WinSCP not found at: {}".format(winscp_path), level="ERROR")
|
418
|
+
return [], []
|
419
|
+
except Exception as e:
|
420
|
+
log("[Process Endpoint] Error checking WinSCP path: {}".format(e), level="ERROR")
|
421
|
+
return [], []
|
422
|
+
|
423
|
+
# DIAGNOSTIC: Log endpoint configuration details
|
424
|
+
log("[Process Endpoint] Endpoint config - session_name: {}, remote_directory_down: {}, filemask: {}".format(
|
425
|
+
endpoint_info.get('session_name', 'NOT_SET'),
|
426
|
+
endpoint_info.get('remote_directory_down', 'NOT_SET'),
|
427
|
+
endpoint_info.get('filemask', 'NOT_SET')
|
428
|
+
), level="DEBUG")
|
429
|
+
|
430
|
+
# DIAGNOSTIC: Check if we're in test mode
|
431
|
+
if config.get("MediLink_Config", {}).get("TestMode", False):
|
432
|
+
log("[Process Endpoint] Test mode is enabled - simulating download", level="WARNING")
|
433
|
+
|
258
434
|
downloaded_files = operate_winscp("download", None, endpoint_info, local_storage_path, config)
|
259
435
|
|
260
436
|
if downloaded_files:
|
261
437
|
log("[Process Endpoint] WinSCP Downloaded the following files: \n{}".format(downloaded_files))
|
262
|
-
|
438
|
+
consolidated_records, translated_files = handle_files(local_storage_path, downloaded_files)
|
439
|
+
log("[Process Endpoint] File processing complete - {} records, {} translated files".format(
|
440
|
+
len(consolidated_records) if consolidated_records else 0,
|
441
|
+
len(translated_files) if translated_files else 0
|
442
|
+
), level="INFO")
|
443
|
+
return consolidated_records, translated_files
|
263
444
|
else:
|
264
|
-
log("[Process Endpoint]No files were downloaded for endpoint: {}.".format(endpoint_key), level="WARNING")
|
445
|
+
log("[Process Endpoint] No files were downloaded for endpoint: {}.".format(endpoint_key), level="WARNING")
|
446
|
+
|
447
|
+
# DIAGNOSTIC: Check if WinSCP log exists and analyze it
|
448
|
+
try:
|
449
|
+
log_filename = "winscp_download.log"
|
450
|
+
log_path = os.path.join(local_storage_path, log_filename)
|
451
|
+
if os.path.exists(log_path):
|
452
|
+
log("[Process Endpoint] WinSCP log exists at: {}".format(log_path), level="INFO")
|
453
|
+
# Read last few lines of log for diagnostics
|
454
|
+
try:
|
455
|
+
with open(log_path, 'r') as f:
|
456
|
+
lines = f.readlines()
|
457
|
+
if lines:
|
458
|
+
last_lines = lines[-5:] # Last 5 lines
|
459
|
+
log("[Process Endpoint] Last 5 lines of WinSCP log:", level="DEBUG")
|
460
|
+
for line in last_lines:
|
461
|
+
log("[Process Endpoint] Log: {}".format(line.strip()), level="DEBUG")
|
462
|
+
except Exception as e:
|
463
|
+
log("[Process Endpoint] Error reading WinSCP log: {}".format(e), level="ERROR")
|
464
|
+
else:
|
465
|
+
log("[Process Endpoint] WinSCP log not found at: {}".format(log_path), level="WARNING")
|
466
|
+
except Exception as e:
|
467
|
+
log("[Process Endpoint] Error checking WinSCP log: {}".format(e), level="ERROR")
|
468
|
+
|
265
469
|
return [], []
|
266
470
|
|
267
471
|
except Exception as e:
|
268
472
|
# Handle any exceptions that occur during the processing
|
269
473
|
log("Error processing endpoint {}: {}".format(endpoint_key, e), level="ERROR")
|
474
|
+
import traceback
|
475
|
+
log("Full traceback: {}".format(traceback.format_exc()), level="DEBUG")
|
270
476
|
return [], []
|
271
477
|
|
478
|
+
def test_endpoint_connectivity(config=None, endpoint_key=None):
|
479
|
+
"""
|
480
|
+
Test basic connectivity to a specific endpoint or all endpoints.
|
481
|
+
This is a diagnostic function to help identify connection issues.
|
482
|
+
|
483
|
+
Args:
|
484
|
+
config: Configuration object
|
485
|
+
endpoint_key: Specific endpoint to test, or None for all endpoints
|
486
|
+
|
487
|
+
Returns:
|
488
|
+
dict: Results of connectivity tests
|
489
|
+
"""
|
490
|
+
if config is None:
|
491
|
+
config, _ = load_configuration()
|
492
|
+
|
493
|
+
medi = extract_medilink_config(config)
|
494
|
+
if not medi or 'endpoints' not in medi:
|
495
|
+
log("Error: Config is missing necessary sections.", level="ERROR")
|
496
|
+
return {}
|
497
|
+
|
498
|
+
endpoints = medi.get('endpoints')
|
499
|
+
results = {}
|
500
|
+
|
501
|
+
# Determine which endpoints to test
|
502
|
+
if endpoint_key:
|
503
|
+
if endpoint_key in endpoints:
|
504
|
+
test_endpoints = {endpoint_key: endpoints[endpoint_key]}
|
505
|
+
else:
|
506
|
+
log("Error: Endpoint '{}' not found in configuration.".format(endpoint_key), level="ERROR")
|
507
|
+
return {}
|
508
|
+
else:
|
509
|
+
test_endpoints = endpoints
|
510
|
+
|
511
|
+
log("Testing connectivity for {} endpoint(s)...".format(len(test_endpoints)), level="INFO")
|
512
|
+
|
513
|
+
for ep_key, ep_info in test_endpoints.items():
|
514
|
+
log("Testing endpoint: {}".format(ep_key), level="INFO")
|
515
|
+
result = {"status": "unknown", "details": []}
|
516
|
+
|
517
|
+
# Check basic configuration
|
518
|
+
if not ep_info.get('session_name'):
|
519
|
+
result["status"] = "error"
|
520
|
+
result["details"].append("Missing session_name")
|
521
|
+
elif not ep_info.get('remote_directory_down'):
|
522
|
+
result["status"] = "error"
|
523
|
+
result["details"].append("Missing remote_directory_down")
|
524
|
+
else:
|
525
|
+
result["details"].append("Configuration appears valid")
|
526
|
+
|
527
|
+
# Check WinSCP availability
|
528
|
+
try:
|
529
|
+
from MediLink_DataMgmt import get_winscp_path
|
530
|
+
winscp_path = get_winscp_path(config)
|
531
|
+
if os.path.exists(winscp_path):
|
532
|
+
result["details"].append("WinSCP found at: {}".format(winscp_path))
|
533
|
+
else:
|
534
|
+
result["status"] = "error"
|
535
|
+
result["details"].append("WinSCP not found at: {}".format(winscp_path))
|
536
|
+
except Exception as e:
|
537
|
+
result["status"] = "error"
|
538
|
+
result["details"].append("Error checking WinSCP: {}".format(e))
|
539
|
+
|
540
|
+
# Check test mode
|
541
|
+
if config.get("MediLink_Config", {}).get("TestMode", False):
|
542
|
+
result["details"].append("Test mode is enabled - no real connection will be made")
|
543
|
+
result["status"] = "test_mode"
|
544
|
+
|
545
|
+
results[ep_key] = result
|
546
|
+
|
547
|
+
return results
|
548
|
+
|
549
|
+
|
272
550
|
if __name__ == "__main__":
|
273
|
-
|
551
|
+
import sys
|
552
|
+
|
553
|
+
print("=" * 60)
|
554
|
+
print("MediLink_Down Standalone Testing Tool")
|
555
|
+
print("=" * 60)
|
556
|
+
print()
|
557
|
+
|
558
|
+
# Check if endpoint was provided as command line argument
|
559
|
+
if len(sys.argv) > 1:
|
560
|
+
desired_endpoint = sys.argv[1]
|
561
|
+
print("Testing specific endpoint: {}".format(desired_endpoint))
|
562
|
+
print()
|
563
|
+
main(desired_endpoint)
|
564
|
+
else:
|
565
|
+
# No specific endpoint provided - run connectivity diagnostics
|
566
|
+
print("No specific endpoint provided.")
|
567
|
+
print("Running connectivity diagnostics for all endpoints...")
|
568
|
+
print()
|
569
|
+
|
570
|
+
try:
|
571
|
+
config, _ = load_configuration()
|
572
|
+
connectivity_results = test_endpoint_connectivity(config)
|
573
|
+
|
574
|
+
if connectivity_results:
|
575
|
+
print("Connectivity Test Results:")
|
576
|
+
print("-" * 40)
|
577
|
+
|
578
|
+
for endpoint, result in connectivity_results.items():
|
579
|
+
status = result["status"]
|
580
|
+
details = result["details"]
|
581
|
+
|
582
|
+
if status == "error":
|
583
|
+
print("[ERROR] {}: {}".format(endpoint, status))
|
584
|
+
elif status == "test_mode":
|
585
|
+
print("[TEST] {}: {} (Test Mode)".format(endpoint, status))
|
586
|
+
else:
|
587
|
+
print("[OK] {}: {}".format(endpoint, status))
|
588
|
+
|
589
|
+
for detail in details:
|
590
|
+
print(" - {}".format(detail))
|
591
|
+
print()
|
592
|
+
|
593
|
+
# Show available endpoints for testing
|
594
|
+
medi = extract_medilink_config(config)
|
595
|
+
endpoints = medi.get('endpoints', {})
|
596
|
+
if endpoints:
|
597
|
+
print("Available endpoints for testing:")
|
598
|
+
print("-" * 30)
|
599
|
+
for endpoint in endpoints.keys():
|
600
|
+
print(" - {}".format(endpoint))
|
601
|
+
print()
|
602
|
+
print("To test a specific endpoint, run:")
|
603
|
+
print(" python MediLink_Down.py <endpoint_name>")
|
604
|
+
else:
|
605
|
+
print("ERROR: No connectivity test results returned.")
|
606
|
+
|
607
|
+
except Exception as e:
|
608
|
+
print("ERROR: Failed to run diagnostics: {}".format(e))
|
609
|
+
import traceback
|
610
|
+
traceback.print_exc()
|
MediLink/MediLink_Parser.py
CHANGED
@@ -241,4 +241,83 @@ def parse_ibt_content(content, debug=False):
|
|
241
241
|
for data in extracted_data:
|
242
242
|
print(data)
|
243
243
|
|
244
|
-
return extracted_data
|
244
|
+
return extracted_data
|
245
|
+
|
246
|
+
def parse_999_content(content, debug=False):
|
247
|
+
"""
|
248
|
+
Minimal 999 Implementation Acknowledgment parser.
|
249
|
+
Extracts overall transaction set acknowledgment (AK9) and per-set (AK5) statuses when available.
|
250
|
+
Returns a list with a single summary dict plus optional per-set entries.
|
251
|
+
"""
|
252
|
+
records = []
|
253
|
+
segments = content.split('~')
|
254
|
+
overall_status = None
|
255
|
+
functional_id = None
|
256
|
+
control_numbers = [] # AK2 ST02 values
|
257
|
+
per_set_statuses = [] # List of {'set_control': str, 'status': str}
|
258
|
+
|
259
|
+
for seg in segments:
|
260
|
+
parts = seg.split('*')
|
261
|
+
if not parts or not parts[0]:
|
262
|
+
continue
|
263
|
+
tag = parts[0]
|
264
|
+
if tag == 'AK1' and len(parts) > 1:
|
265
|
+
functional_id = parts[1]
|
266
|
+
elif tag == 'AK2' and len(parts) > 2:
|
267
|
+
# Transaction Set Acknowledgment - capture ST02 control number
|
268
|
+
control_numbers.append(parts[2])
|
269
|
+
elif tag == 'AK5' and len(parts) > 1:
|
270
|
+
# Transaction Set Response Trailer - status code in AK5-01 (A, E, R)
|
271
|
+
status_code = parts[1]
|
272
|
+
per_set_statuses.append({'status': status_code})
|
273
|
+
elif tag == 'AK9' and len(parts) > 1:
|
274
|
+
# Functional Group Response Trailer - overall status in AK9-01
|
275
|
+
overall_status = parts[1]
|
276
|
+
|
277
|
+
# Map X12 codes to friendly text
|
278
|
+
status_map = {'A': 'Accepted', 'E': 'Accepted with Errors', 'R': 'Rejected'}
|
279
|
+
overall_text = status_map.get(overall_status, overall_status or '')
|
280
|
+
|
281
|
+
summary = {
|
282
|
+
'Ack Type': '999',
|
283
|
+
'Functional ID': functional_id or '',
|
284
|
+
'Status': overall_text,
|
285
|
+
'Sets Acknowledged': len(control_numbers) if control_numbers else 0,
|
286
|
+
}
|
287
|
+
records.append(summary)
|
288
|
+
|
289
|
+
# Optionally include per-set detail rows
|
290
|
+
for idx, st in enumerate(per_set_statuses):
|
291
|
+
detail = {
|
292
|
+
'Ack Type': '999',
|
293
|
+
'Functional ID': functional_id or '',
|
294
|
+
'Set #': str(idx + 1),
|
295
|
+
'Status': status_map.get(st.get('status', ''), st.get('status', '')),
|
296
|
+
}
|
297
|
+
# Claim # not available in 999; leave out
|
298
|
+
records.append(detail)
|
299
|
+
|
300
|
+
if debug:
|
301
|
+
print('Parsed 999 Content:')
|
302
|
+
for r in records:
|
303
|
+
print(r)
|
304
|
+
return records
|
305
|
+
|
306
|
+
def determine_file_type(file_path):
|
307
|
+
file_extensions = {
|
308
|
+
'.era': 'ERA',
|
309
|
+
'.277': '277',
|
310
|
+
'.277ibr': '277IBR',
|
311
|
+
'.277ebr': '277EBR',
|
312
|
+
'.dpt': 'DPT',
|
313
|
+
'.ebt': 'EBT',
|
314
|
+
'.ibt': 'IBT',
|
315
|
+
'.999': '999'
|
316
|
+
}
|
317
|
+
|
318
|
+
for ext, file_type in file_extensions.items():
|
319
|
+
if file_path.endswith(ext):
|
320
|
+
return file_type
|
321
|
+
|
322
|
+
log("Unsupported file type for file: {}".format(file_path))
|
323
|
+
return None
|