qmenta-client 2.0__py3-none-any.whl → 2.1.dev1509__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- qmenta/client/Account.py +43 -11
- qmenta/client/File.py +44 -12
- qmenta/client/Project.py +971 -399
- qmenta/client/Subject.py +10 -3
- qmenta/client/utils.py +6 -2
- {qmenta_client-2.0.dist-info → qmenta_client-2.1.dev1509.dist-info}/METADATA +2 -1
- qmenta_client-2.1.dev1509.dist-info/RECORD +10 -0
- qmenta_client-2.0.dist-info/RECORD +0 -10
- {qmenta_client-2.0.dist-info → qmenta_client-2.1.dev1509.dist-info}/WHEEL +0 -0
qmenta/client/Project.py
CHANGED
|
@@ -9,7 +9,6 @@ import sys
|
|
|
9
9
|
import time
|
|
10
10
|
from collections import defaultdict
|
|
11
11
|
from enum import Enum
|
|
12
|
-
|
|
13
12
|
from qmenta.core import errors
|
|
14
13
|
from qmenta.core import platform
|
|
15
14
|
|
|
@@ -21,6 +20,17 @@ if sys.version_info[0] == 3:
|
|
|
21
20
|
|
|
22
21
|
logger_name = "qmenta.client"
|
|
23
22
|
OPERATOR_LIST = ["eq", "ne", "gt", "gte", "lt", "lte"]
|
|
23
|
+
ANALYSIS_NAME_EXCLUDED_CHARACTERS = [
|
|
24
|
+
"\\",
|
|
25
|
+
"[",
|
|
26
|
+
"]",
|
|
27
|
+
"(",
|
|
28
|
+
")",
|
|
29
|
+
"{",
|
|
30
|
+
"}",
|
|
31
|
+
"+",
|
|
32
|
+
"*",
|
|
33
|
+
]
|
|
24
34
|
|
|
25
35
|
|
|
26
36
|
def convert_qc_value_to_qcstatus(value):
|
|
@@ -46,7 +56,9 @@ def convert_qc_value_to_qcstatus(value):
|
|
|
46
56
|
elif value == "":
|
|
47
57
|
return QCStatus.UNDERTERMINED
|
|
48
58
|
else:
|
|
49
|
-
logger.error(
|
|
59
|
+
logger.error(
|
|
60
|
+
f"The input value '{value}' cannot be converted to class QCStatus."
|
|
61
|
+
)
|
|
50
62
|
return False
|
|
51
63
|
|
|
52
64
|
|
|
@@ -84,11 +96,24 @@ class Project:
|
|
|
84
96
|
# project id (int)
|
|
85
97
|
if isinstance(project_id, str):
|
|
86
98
|
project_name = project_id
|
|
87
|
-
project_id = next(
|
|
99
|
+
project_id = next(
|
|
100
|
+
iter(
|
|
101
|
+
filter(
|
|
102
|
+
lambda proj: proj["name"] == project_id,
|
|
103
|
+
account.projects,
|
|
104
|
+
)
|
|
105
|
+
)
|
|
106
|
+
)["id"]
|
|
88
107
|
else:
|
|
89
108
|
if isinstance(project_id, float):
|
|
90
109
|
project_id = int(project_id)
|
|
91
|
-
project_name = next(
|
|
110
|
+
project_name = next(
|
|
111
|
+
iter(
|
|
112
|
+
filter(
|
|
113
|
+
lambda proj: proj["id"] == project_id, account.projects
|
|
114
|
+
)
|
|
115
|
+
)
|
|
116
|
+
)["name"]
|
|
92
117
|
|
|
93
118
|
self._account = account
|
|
94
119
|
self._project_id = project_id
|
|
@@ -121,7 +146,9 @@ class Project:
|
|
|
121
146
|
try:
|
|
122
147
|
platform.parse_response(
|
|
123
148
|
platform.post(
|
|
124
|
-
self._account.auth,
|
|
149
|
+
self._account.auth,
|
|
150
|
+
"projectset_manager/activate_project",
|
|
151
|
+
data={"project_id": int(project_id)},
|
|
125
152
|
)
|
|
126
153
|
)
|
|
127
154
|
except errors.PlatformError:
|
|
@@ -156,6 +183,7 @@ class Project:
|
|
|
156
183
|
result=False,
|
|
157
184
|
add_to_container_id=0,
|
|
158
185
|
split_data=False,
|
|
186
|
+
mock_response=False,
|
|
159
187
|
):
|
|
160
188
|
"""
|
|
161
189
|
Upload a chunk of a file to the platform.
|
|
@@ -189,6 +217,8 @@ class Project:
|
|
|
189
217
|
"Content-Length": str(length),
|
|
190
218
|
"Content-Disposition": disposition,
|
|
191
219
|
}
|
|
220
|
+
if mock_response:
|
|
221
|
+
request_headers["mock_case"] = mock_response
|
|
192
222
|
|
|
193
223
|
if last_chunk:
|
|
194
224
|
request_headers["X-Mint-Name"] = name
|
|
@@ -215,9 +245,12 @@ class Project:
|
|
|
215
245
|
|
|
216
246
|
response_time = 900.0 if last_chunk else 120.0
|
|
217
247
|
response = platform.post(
|
|
218
|
-
auth=self._account.auth,
|
|
248
|
+
auth=self._account.auth,
|
|
249
|
+
endpoint="upload",
|
|
250
|
+
data=data,
|
|
251
|
+
headers=request_headers,
|
|
252
|
+
timeout=response_time,
|
|
219
253
|
)
|
|
220
|
-
|
|
221
254
|
return response
|
|
222
255
|
|
|
223
256
|
def upload_file(
|
|
@@ -231,8 +264,10 @@ class Project:
|
|
|
231
264
|
name="",
|
|
232
265
|
input_data_type="qmenta_medical_image_data:3.10",
|
|
233
266
|
add_to_container_id=0,
|
|
234
|
-
chunk_size=2
|
|
267
|
+
chunk_size=2**9,
|
|
268
|
+
max_retries=10,
|
|
235
269
|
split_data=False,
|
|
270
|
+
mock_response=False,
|
|
236
271
|
):
|
|
237
272
|
"""
|
|
238
273
|
Upload a ZIP file to the platform.
|
|
@@ -262,24 +297,27 @@ class Project:
|
|
|
262
297
|
a power of 2: 2**x. Default value of x is 9 (chunk_size = 512 kB)
|
|
263
298
|
split_data : bool
|
|
264
299
|
If True, the platform will try to split the uploaded file into
|
|
265
|
-
different sessions. It will be ignored when the ssid
|
|
300
|
+
different sessions. It will be ignored when the ssid or a
|
|
301
|
+
add_to_container_id are given.
|
|
266
302
|
|
|
267
303
|
Returns
|
|
268
304
|
-------
|
|
269
305
|
bool
|
|
270
306
|
True if correctly uploaded, False otherwise.
|
|
271
307
|
"""
|
|
272
|
-
|
|
273
308
|
filename = os.path.split(file_path)[1]
|
|
274
309
|
input_data_type = "offline_analysis:1.0" if result else input_data_type
|
|
275
310
|
|
|
276
311
|
chunk_size *= 1024
|
|
277
|
-
max_retries = 10
|
|
278
312
|
|
|
279
313
|
name = name or os.path.split(file_path)[1]
|
|
280
314
|
|
|
281
315
|
total_bytes = os.path.getsize(file_path)
|
|
282
316
|
|
|
317
|
+
split_data = self.__assert_split_data(
|
|
318
|
+
split_data, ssid, add_to_container_id
|
|
319
|
+
)
|
|
320
|
+
|
|
283
321
|
# making chunks of the file and sending one by one
|
|
284
322
|
logger = logging.getLogger(logger_name)
|
|
285
323
|
with open(file_path, "rb") as file_object:
|
|
@@ -295,10 +333,7 @@ class Project:
|
|
|
295
333
|
uploaded_bytes = 0
|
|
296
334
|
response = None
|
|
297
335
|
last_chunk = False
|
|
298
|
-
|
|
299
|
-
if ssid and split_data:
|
|
300
|
-
logger.warning("split-data argument will be ignored because" + " ssid has been specified")
|
|
301
|
-
split_data = False
|
|
336
|
+
error_message = None
|
|
302
337
|
|
|
303
338
|
while True:
|
|
304
339
|
data = file_object.read(chunk_size)
|
|
@@ -314,7 +349,14 @@ class Project:
|
|
|
314
349
|
end_position = total_bytes - 1
|
|
315
350
|
bytes_to_send = total_bytes - uploaded_bytes
|
|
316
351
|
|
|
317
|
-
bytes_range =
|
|
352
|
+
bytes_range = (
|
|
353
|
+
"bytes "
|
|
354
|
+
+ str(start_position)
|
|
355
|
+
+ "-"
|
|
356
|
+
+ str(end_position)
|
|
357
|
+
+ "/"
|
|
358
|
+
+ str(total_bytes)
|
|
359
|
+
)
|
|
318
360
|
|
|
319
361
|
dispstr = f"attachment; filename={filename}"
|
|
320
362
|
response = self._upload_chunk(
|
|
@@ -334,14 +376,13 @@ class Project:
|
|
|
334
376
|
result,
|
|
335
377
|
add_to_container_id,
|
|
336
378
|
split_data,
|
|
379
|
+
mock_response=mock_response,
|
|
337
380
|
)
|
|
338
|
-
|
|
339
381
|
if response is None:
|
|
340
382
|
retries_count += 1
|
|
341
383
|
time.sleep(retries_count * 5)
|
|
342
384
|
if retries_count > max_retries:
|
|
343
385
|
error_message = "HTTP Connection Problem"
|
|
344
|
-
logger.error(error_message)
|
|
345
386
|
break
|
|
346
387
|
elif int(response.status_code) == 201:
|
|
347
388
|
chunk_num += 1
|
|
@@ -354,31 +395,63 @@ class Project:
|
|
|
354
395
|
retries_count += 1
|
|
355
396
|
time.sleep(retries_count * 5)
|
|
356
397
|
if retries_count > self.max_retries:
|
|
357
|
-
error_message =
|
|
358
|
-
|
|
398
|
+
error_message = (
|
|
399
|
+
"Error Code: 416; Requested Range "
|
|
400
|
+
"Not Satisfiable (NGINX)"
|
|
401
|
+
)
|
|
359
402
|
break
|
|
360
403
|
else:
|
|
361
404
|
retries_count += 1
|
|
362
405
|
time.sleep(retries_count * 5)
|
|
363
406
|
if retries_count > max_retries:
|
|
364
|
-
error_message =
|
|
365
|
-
|
|
407
|
+
error_message = (
|
|
408
|
+
"Number of retries has been reached. "
|
|
409
|
+
"Upload process stops here !"
|
|
410
|
+
)
|
|
366
411
|
break
|
|
367
412
|
|
|
368
413
|
uploaded += chunk_size
|
|
369
414
|
self.__show_progress(uploaded, file_size)
|
|
370
|
-
|
|
415
|
+
if error_message is not None:
|
|
416
|
+
raise Exception(error_message)
|
|
371
417
|
try:
|
|
372
418
|
platform.parse_response(response)
|
|
373
419
|
except errors.PlatformError as error:
|
|
374
420
|
logger.error(error)
|
|
375
421
|
return False
|
|
376
422
|
|
|
377
|
-
message =
|
|
378
|
-
|
|
423
|
+
message = (
|
|
424
|
+
"Your data was successfully uploaded. "
|
|
425
|
+
"The uploaded file will be soon processed !"
|
|
426
|
+
)
|
|
379
427
|
logger.info(message)
|
|
380
428
|
return True
|
|
381
429
|
|
|
430
|
+
def delete_file(self, container_id, filenames):
|
|
431
|
+
"""
|
|
432
|
+
Delete a file or files from a container.
|
|
433
|
+
Can be an input or an output container
|
|
434
|
+
|
|
435
|
+
Parameters
|
|
436
|
+
----------
|
|
437
|
+
container_id : int
|
|
438
|
+
filenames : str or list of str
|
|
439
|
+
|
|
440
|
+
"""
|
|
441
|
+
if not isinstance(filenames, str):
|
|
442
|
+
if isinstance(filenames, list):
|
|
443
|
+
if not all([isinstance(f, str) for f in filenames]):
|
|
444
|
+
raise TypeError("Elements of `filenames` must be str")
|
|
445
|
+
filenames = ";".join(filenames)
|
|
446
|
+
else:
|
|
447
|
+
raise TypeError("`filenames` must be str or list of str")
|
|
448
|
+
|
|
449
|
+
platform.post(
|
|
450
|
+
self._account.auth,
|
|
451
|
+
"file_manager/delete_files",
|
|
452
|
+
data={"container_id": container_id, "files": filenames},
|
|
453
|
+
)
|
|
454
|
+
|
|
382
455
|
def upload_mri(self, file_path, subject_name):
|
|
383
456
|
"""
|
|
384
457
|
Upload new MRI data to the subject.
|
|
@@ -415,7 +488,11 @@ class Project:
|
|
|
415
488
|
"""
|
|
416
489
|
|
|
417
490
|
if self.__check_upload_file(file_path):
|
|
418
|
-
return self.upload_file(
|
|
491
|
+
return self.upload_file(
|
|
492
|
+
file_path,
|
|
493
|
+
subject_name,
|
|
494
|
+
input_data_type="parkinson_gametection",
|
|
495
|
+
)
|
|
419
496
|
return False
|
|
420
497
|
|
|
421
498
|
def upload_result(self, file_path, subject_name):
|
|
@@ -438,7 +515,9 @@ class Project:
|
|
|
438
515
|
return self.upload_file(file_path, subject_name, result=True)
|
|
439
516
|
return False
|
|
440
517
|
|
|
441
|
-
def download_file(
|
|
518
|
+
def download_file(
|
|
519
|
+
self, container_id, file_name, local_filename=False, overwrite=False
|
|
520
|
+
):
|
|
442
521
|
"""
|
|
443
522
|
Download a single file from a specific container.
|
|
444
523
|
|
|
@@ -455,36 +534,50 @@ class Project:
|
|
|
455
534
|
"""
|
|
456
535
|
logger = logging.getLogger(logger_name)
|
|
457
536
|
if not isinstance(file_name, str):
|
|
458
|
-
raise ValueError(
|
|
459
|
-
|
|
460
|
-
|
|
537
|
+
raise ValueError(
|
|
538
|
+
"The name of the file to download (file_name) should be of "
|
|
539
|
+
"type string."
|
|
540
|
+
)
|
|
541
|
+
if not isinstance(local_filename, str):
|
|
542
|
+
raise ValueError(
|
|
543
|
+
"The name of the output file (local_filename) should be of "
|
|
544
|
+
"type string."
|
|
545
|
+
)
|
|
461
546
|
|
|
462
547
|
if file_name not in self.list_container_files(container_id):
|
|
463
|
-
msg =
|
|
464
|
-
|
|
465
|
-
|
|
548
|
+
msg = (
|
|
549
|
+
f'File "{file_name}" does not exist in container '
|
|
550
|
+
f"{container_id}"
|
|
551
|
+
)
|
|
552
|
+
raise Exception(msg)
|
|
466
553
|
|
|
467
554
|
local_filename = local_filename or file_name
|
|
468
555
|
|
|
469
556
|
if os.path.exists(local_filename) and not overwrite:
|
|
470
557
|
msg = f"File {local_filename} already exists"
|
|
471
|
-
|
|
472
|
-
return False
|
|
558
|
+
raise Exception(msg)
|
|
473
559
|
|
|
474
560
|
params = {"container_id": container_id, "files": file_name}
|
|
475
|
-
|
|
476
561
|
with platform.post(
|
|
477
|
-
self._account.auth,
|
|
562
|
+
self._account.auth,
|
|
563
|
+
"file_manager/download_file",
|
|
564
|
+
data=params,
|
|
565
|
+
stream=True,
|
|
478
566
|
) as response, open(local_filename, "wb") as f:
|
|
479
567
|
|
|
480
|
-
for chunk in response.iter_content(chunk_size=2
|
|
568
|
+
for chunk in response.iter_content(chunk_size=2**9 * 1024):
|
|
481
569
|
f.write(chunk)
|
|
482
570
|
f.flush()
|
|
483
571
|
|
|
484
|
-
logger.info(
|
|
572
|
+
logger.info(
|
|
573
|
+
f"File {file_name} from container {container_id} saved "
|
|
574
|
+
f"to {local_filename}"
|
|
575
|
+
)
|
|
485
576
|
return True
|
|
486
577
|
|
|
487
|
-
def download_files(
|
|
578
|
+
def download_files(
|
|
579
|
+
self, container_id, filenames, zip_name="files.zip", overwrite=False
|
|
580
|
+
):
|
|
488
581
|
"""
|
|
489
582
|
Download a set of files from a given container.
|
|
490
583
|
|
|
@@ -502,32 +595,51 @@ class Project:
|
|
|
502
595
|
logger = logging.getLogger(logger_name)
|
|
503
596
|
|
|
504
597
|
if not all([isinstance(file_name, str) for file_name in filenames]):
|
|
505
|
-
raise ValueError(
|
|
598
|
+
raise ValueError(
|
|
599
|
+
"The name of the files to download (filenames) should be "
|
|
600
|
+
"of type string."
|
|
601
|
+
)
|
|
506
602
|
if not isinstance(zip_name, str):
|
|
507
|
-
raise ValueError(
|
|
603
|
+
raise ValueError(
|
|
604
|
+
"The name of the output ZIP file (zip_name) should be "
|
|
605
|
+
"of type string."
|
|
606
|
+
)
|
|
508
607
|
|
|
509
|
-
files_not_in_container = list(
|
|
608
|
+
files_not_in_container = list(
|
|
609
|
+
filter(
|
|
610
|
+
lambda f: f not in self.list_container_files(container_id),
|
|
611
|
+
filenames,
|
|
612
|
+
)
|
|
613
|
+
)
|
|
510
614
|
|
|
511
615
|
if files_not_in_container:
|
|
512
|
-
msg =
|
|
513
|
-
|
|
514
|
-
|
|
616
|
+
msg = (
|
|
617
|
+
f"The following files are missing in container "
|
|
618
|
+
f"{container_id}: {', '.join(files_not_in_container)}"
|
|
619
|
+
)
|
|
620
|
+
raise Exception(msg)
|
|
515
621
|
|
|
516
622
|
if os.path.exists(zip_name) and not overwrite:
|
|
517
623
|
msg = f'File "{zip_name}" already exists'
|
|
518
|
-
|
|
519
|
-
return False
|
|
624
|
+
raise Exception(msg)
|
|
520
625
|
|
|
521
626
|
params = {"container_id": container_id, "files": ";".join(filenames)}
|
|
522
627
|
with platform.post(
|
|
523
|
-
self._account.auth,
|
|
628
|
+
self._account.auth,
|
|
629
|
+
"file_manager/download_file",
|
|
630
|
+
data=params,
|
|
631
|
+
stream=True,
|
|
524
632
|
) as response, open(zip_name, "wb") as f:
|
|
525
633
|
|
|
526
|
-
for chunk in response.iter_content(chunk_size=2
|
|
634
|
+
for chunk in response.iter_content(chunk_size=2**9 * 1024):
|
|
527
635
|
f.write(chunk)
|
|
528
636
|
f.flush()
|
|
529
637
|
|
|
530
|
-
logger.info(
|
|
638
|
+
logger.info(
|
|
639
|
+
"Files from container {} saved to {}".format(
|
|
640
|
+
container_id, zip_name
|
|
641
|
+
)
|
|
642
|
+
)
|
|
531
643
|
return True
|
|
532
644
|
|
|
533
645
|
def copy_container_to_project(self, container_id, project_id):
|
|
@@ -551,9 +663,14 @@ class Project:
|
|
|
551
663
|
p_id = int(project_id)
|
|
552
664
|
elif type(project_id) is str:
|
|
553
665
|
projects = self._account.projects
|
|
554
|
-
projects_match = [
|
|
666
|
+
projects_match = [
|
|
667
|
+
proj for proj in projects if proj["name"] == project_id
|
|
668
|
+
]
|
|
555
669
|
if not projects_match:
|
|
556
|
-
raise Exception(
|
|
670
|
+
raise Exception(
|
|
671
|
+
f"Project {project_id}"
|
|
672
|
+
+ " does not exist or is not available for this user."
|
|
673
|
+
)
|
|
557
674
|
p_id = int(projects_match[0]["id"])
|
|
558
675
|
else:
|
|
559
676
|
raise TypeError("project_id")
|
|
@@ -564,10 +681,16 @@ class Project:
|
|
|
564
681
|
|
|
565
682
|
try:
|
|
566
683
|
platform.parse_response(
|
|
567
|
-
platform.post(
|
|
684
|
+
platform.post(
|
|
685
|
+
self._account.auth,
|
|
686
|
+
"file_manager/copy_container_to_another_project",
|
|
687
|
+
data=data,
|
|
688
|
+
)
|
|
568
689
|
)
|
|
569
690
|
except errors.PlatformError as e:
|
|
570
|
-
logging.getLogger(logger_name).error(
|
|
691
|
+
logging.getLogger(logger_name).error(
|
|
692
|
+
"Couldn not copy container: {}".format(e)
|
|
693
|
+
)
|
|
571
694
|
return False
|
|
572
695
|
|
|
573
696
|
return True
|
|
@@ -625,7 +748,11 @@ class Project:
|
|
|
625
748
|
"""
|
|
626
749
|
logger = logging.getLogger(logger_name)
|
|
627
750
|
try:
|
|
628
|
-
data = platform.parse_response(
|
|
751
|
+
data = platform.parse_response(
|
|
752
|
+
platform.post(
|
|
753
|
+
self._account.auth, "patient_manager/module_config"
|
|
754
|
+
)
|
|
755
|
+
)
|
|
629
756
|
except errors.PlatformError:
|
|
630
757
|
logger.error("Could not retrieve metadata parameters.")
|
|
631
758
|
return None
|
|
@@ -671,7 +798,10 @@ class Project:
|
|
|
671
798
|
response = self.list_input_containers(search_criteria=search_criteria)
|
|
672
799
|
|
|
673
800
|
for subject in response:
|
|
674
|
-
if
|
|
801
|
+
if (
|
|
802
|
+
subject["patient_secret_name"] == subject_name
|
|
803
|
+
and subject["ssid"] == ssid
|
|
804
|
+
):
|
|
675
805
|
return subject["container_id"]
|
|
676
806
|
return False
|
|
677
807
|
|
|
@@ -695,20 +825,25 @@ class Project:
|
|
|
695
825
|
"""
|
|
696
826
|
|
|
697
827
|
for user in self.get_subjects_metadata():
|
|
698
|
-
if user["patient_secret_name"] == str(subject_name) and user[
|
|
828
|
+
if user["patient_secret_name"] == str(subject_name) and user[
|
|
829
|
+
"ssid"
|
|
830
|
+
] == str(ssid):
|
|
699
831
|
return int(user["_id"])
|
|
700
832
|
return False
|
|
701
833
|
|
|
702
|
-
def get_subjects_metadata(self, search_criteria=
|
|
834
|
+
def get_subjects_metadata(self, search_criteria=None, items=(0, 9999)):
|
|
703
835
|
"""
|
|
704
836
|
List all Subject ID/Session ID from the selected project that meet the
|
|
705
|
-
|
|
837
|
+
defined search criteria at a session level.
|
|
706
838
|
|
|
707
839
|
Parameters
|
|
708
840
|
----------
|
|
709
841
|
search_criteria: dict
|
|
710
842
|
Each element is a string and is built using the formatting
|
|
711
843
|
"type;value", or "type;operation|value"
|
|
844
|
+
items : List[int]
|
|
845
|
+
list containing two elements [min, max] that correspond to the
|
|
846
|
+
mininum and maximum range of analysis listed
|
|
712
847
|
|
|
713
848
|
Complete search_criteria Dictionary Explanation:
|
|
714
849
|
|
|
@@ -722,8 +857,8 @@ class Project:
|
|
|
722
857
|
"pars_PROJECTMETADATA": "METADATATYPE;METADATAVALUE",
|
|
723
858
|
}
|
|
724
859
|
|
|
725
|
-
|
|
726
|
-
|
|
860
|
+
where "pars_patient_secret_name": Applies the search to the
|
|
861
|
+
'Subject ID'.
|
|
727
862
|
SUBJECTID is a comma separated list of strings.
|
|
728
863
|
"pars_ssid": Applies the search to the 'Session ID'.
|
|
729
864
|
SSID is an integer.
|
|
@@ -809,12 +944,26 @@ class Project:
|
|
|
809
944
|
|
|
810
945
|
"""
|
|
811
946
|
|
|
812
|
-
|
|
813
|
-
|
|
947
|
+
if search_criteria is None:
|
|
948
|
+
search_criteria = {}
|
|
949
|
+
if len(items) != 2:
|
|
950
|
+
raise ValueError(
|
|
951
|
+
f"The number of elements in items '{len(items)}' "
|
|
952
|
+
f"should be equal to two."
|
|
953
|
+
)
|
|
814
954
|
|
|
815
|
-
|
|
816
|
-
|
|
817
|
-
|
|
955
|
+
if not all([isinstance(item, int) for item in items]):
|
|
956
|
+
raise ValueError(
|
|
957
|
+
f"All values in items " f"'{items}' must be integers"
|
|
958
|
+
)
|
|
959
|
+
|
|
960
|
+
if search_criteria != {} and not all(
|
|
961
|
+
[item.startswith("pars_") for item in search_criteria.keys()]
|
|
962
|
+
):
|
|
963
|
+
raise ValueError(
|
|
964
|
+
f"All keys of the search_criteria dictionary "
|
|
965
|
+
f"'{search_criteria.keys()}' must start with 'pars_'."
|
|
966
|
+
)
|
|
818
967
|
|
|
819
968
|
for key, value in search_criteria.items():
|
|
820
969
|
if value.split(";")[0] in ["integer", "decimal"]:
|
|
@@ -833,7 +982,9 @@ class Project:
|
|
|
833
982
|
)
|
|
834
983
|
return content
|
|
835
984
|
|
|
836
|
-
def change_subject_metadata(
|
|
985
|
+
def change_subject_metadata(
|
|
986
|
+
self, patient_id, subject_name, ssid, tags, age_at_scan, metadata
|
|
987
|
+
):
|
|
837
988
|
"""
|
|
838
989
|
Change the Subject ID, Session ID, Tags, Age at Scan and Metadata of
|
|
839
990
|
the session with Patient ID
|
|
@@ -868,36 +1019,57 @@ class Project:
|
|
|
868
1019
|
try:
|
|
869
1020
|
patient_id = str(int(patient_id))
|
|
870
1021
|
except ValueError:
|
|
871
|
-
raise ValueError(
|
|
1022
|
+
raise ValueError(
|
|
1023
|
+
f"'patient_id': '{patient_id}' not valid. Must be convertible "
|
|
1024
|
+
f"to int."
|
|
1025
|
+
)
|
|
872
1026
|
|
|
873
|
-
|
|
1027
|
+
if not isinstance(tags, list) or not all(
|
|
874
1028
|
isinstance(item, str) for item in tags
|
|
875
|
-
)
|
|
1029
|
+
):
|
|
1030
|
+
raise ValueError(f"tags: '{tags}' should be a list of strings.")
|
|
876
1031
|
tags = [tag.lower() for tag in tags]
|
|
877
1032
|
|
|
878
|
-
|
|
879
|
-
|
|
1033
|
+
if not isinstance(subject_name, str) or (
|
|
1034
|
+
subject_name is None or subject_name == ""
|
|
1035
|
+
):
|
|
1036
|
+
raise ValueError("subject_name must be a non empty string.")
|
|
1037
|
+
|
|
1038
|
+
if not isinstance(ssid, str) or (ssid is None or ssid == ""):
|
|
1039
|
+
raise ValueError("ssid must be a non empty string.")
|
|
880
1040
|
|
|
881
1041
|
try:
|
|
882
1042
|
age_at_scan = str(int(age_at_scan)) if age_at_scan else None
|
|
883
1043
|
except ValueError:
|
|
884
|
-
raise ValueError(
|
|
1044
|
+
raise ValueError(
|
|
1045
|
+
f"age_at_scan: '{age_at_scan}' not valid. Must be an integer."
|
|
1046
|
+
)
|
|
885
1047
|
|
|
886
|
-
|
|
1048
|
+
if not isinstance(metadata, dict):
|
|
1049
|
+
raise ValueError(f"metadata: '{metadata}' should be a dictionary.")
|
|
887
1050
|
|
|
888
|
-
|
|
889
|
-
|
|
890
|
-
|
|
1051
|
+
has_md_prefix = ["md_" == key[:3] for key in metadata.keys()]
|
|
1052
|
+
if not (all(has_md_prefix) or not any(has_md_prefix)):
|
|
1053
|
+
raise ValueError(
|
|
1054
|
+
f"metadata: '{metadata}' must be a dictionary whose keys are "
|
|
1055
|
+
f"either all starting with 'md_' or none."
|
|
1056
|
+
)
|
|
891
1057
|
|
|
892
1058
|
metadata_keys = self.metadata_parameters.keys()
|
|
893
|
-
|
|
894
|
-
|
|
895
|
-
|
|
896
|
-
|
|
897
|
-
|
|
898
|
-
|
|
899
|
-
|
|
900
|
-
)
|
|
1059
|
+
if not all(
|
|
1060
|
+
(
|
|
1061
|
+
key[3:] in metadata_keys
|
|
1062
|
+
if "md_" == key[:3]
|
|
1063
|
+
else key in metadata_keys
|
|
1064
|
+
)
|
|
1065
|
+
for key in metadata.keys()
|
|
1066
|
+
):
|
|
1067
|
+
raise ValueError(
|
|
1068
|
+
f"Some metadata keys provided ({', '.join(metadata.keys())}) "
|
|
1069
|
+
"are not available in the project. They can be added via the "
|
|
1070
|
+
"Metadata Manager via the QMENTA Platform graphical user "
|
|
1071
|
+
"interface (GUI)."
|
|
1072
|
+
)
|
|
901
1073
|
|
|
902
1074
|
post_data = {
|
|
903
1075
|
"patient_id": patient_id,
|
|
@@ -907,11 +1079,17 @@ class Project:
|
|
|
907
1079
|
"age_at_scan": age_at_scan,
|
|
908
1080
|
}
|
|
909
1081
|
for key, value in metadata.items():
|
|
910
|
-
|
|
911
|
-
post_data[f"last_vals.{
|
|
1082
|
+
id_ = key[3:] if "md_" == key[:3] else key
|
|
1083
|
+
post_data[f"last_vals.{id_}"] = value
|
|
912
1084
|
|
|
913
1085
|
try:
|
|
914
|
-
platform.parse_response(
|
|
1086
|
+
platform.parse_response(
|
|
1087
|
+
platform.post(
|
|
1088
|
+
self._account.auth,
|
|
1089
|
+
"patient_manager/upsert_patient",
|
|
1090
|
+
data=post_data,
|
|
1091
|
+
)
|
|
1092
|
+
)
|
|
915
1093
|
except errors.PlatformError:
|
|
916
1094
|
logger.error(f"Patient ID '{patient_id}' could not be modified.")
|
|
917
1095
|
return False
|
|
@@ -919,7 +1097,9 @@ class Project:
|
|
|
919
1097
|
logger.info(f"Patient ID '{patient_id}' successfully modified.")
|
|
920
1098
|
return True
|
|
921
1099
|
|
|
922
|
-
def get_subjects_files_metadata(
|
|
1100
|
+
def get_subjects_files_metadata(
|
|
1101
|
+
self, search_criteria=None, items=(0, 9999)
|
|
1102
|
+
):
|
|
923
1103
|
"""
|
|
924
1104
|
List all Subject ID/Session ID from the selected project that meet the
|
|
925
1105
|
defined search criteria at a file level.
|
|
@@ -935,6 +1115,9 @@ class Project:
|
|
|
935
1115
|
search_criteria: dict
|
|
936
1116
|
Each element is a string and is built using the formatting
|
|
937
1117
|
"type;value", or "type;operation|value"
|
|
1118
|
+
items : List[int]
|
|
1119
|
+
list containing two elements [min, max] that correspond to the
|
|
1120
|
+
mininum and maximum range of analysis listed
|
|
938
1121
|
|
|
939
1122
|
Complete search_criteria Dictionary Explanation:
|
|
940
1123
|
|
|
@@ -1037,10 +1220,14 @@ class Project:
|
|
|
1037
1220
|
|
|
1038
1221
|
"""
|
|
1039
1222
|
|
|
1040
|
-
|
|
1223
|
+
if search_criteria is None:
|
|
1224
|
+
search_criteria = {}
|
|
1225
|
+
content = self.get_subjects_metadata(search_criteria, items=items)
|
|
1041
1226
|
|
|
1042
1227
|
# Wrap search criteria.
|
|
1043
|
-
modality, tags,
|
|
1228
|
+
modality, tags, dicom_metadata = self.__wrap_search_criteria(
|
|
1229
|
+
search_criteria
|
|
1230
|
+
)
|
|
1044
1231
|
|
|
1045
1232
|
# Iterate over the files of each subject selected to include/exclude
|
|
1046
1233
|
# them from the results.
|
|
@@ -1055,17 +1242,23 @@ class Project:
|
|
|
1055
1242
|
)
|
|
1056
1243
|
|
|
1057
1244
|
for file in files["meta"]:
|
|
1058
|
-
if modality and modality != (file.get("metadata") or {}).get(
|
|
1245
|
+
if modality and modality != (file.get("metadata") or {}).get(
|
|
1246
|
+
"modality"
|
|
1247
|
+
):
|
|
1059
1248
|
continue
|
|
1060
1249
|
if tags and not all([tag in file.get("tags") for tag in tags]):
|
|
1061
1250
|
continue
|
|
1062
|
-
if
|
|
1251
|
+
if dicom_metadata:
|
|
1063
1252
|
result_values = list()
|
|
1064
|
-
for key, dict_value in
|
|
1065
|
-
f_value = (
|
|
1253
|
+
for key, dict_value in dicom_metadata.items():
|
|
1254
|
+
f_value = (
|
|
1255
|
+
(file.get("metadata") or {}).get("info") or {}
|
|
1256
|
+
).get(key)
|
|
1066
1257
|
d_operator = dict_value["operation"]
|
|
1067
1258
|
d_value = dict_value["value"]
|
|
1068
|
-
result_values.append(
|
|
1259
|
+
result_values.append(
|
|
1260
|
+
self.__operation(d_value, d_operator, f_value)
|
|
1261
|
+
)
|
|
1069
1262
|
|
|
1070
1263
|
if not all(result_values):
|
|
1071
1264
|
continue
|
|
@@ -1119,7 +1312,12 @@ class Project:
|
|
|
1119
1312
|
platform.post(
|
|
1120
1313
|
self._account.auth,
|
|
1121
1314
|
"file_manager/edit_file",
|
|
1122
|
-
data={
|
|
1315
|
+
data={
|
|
1316
|
+
"container_id": container_id,
|
|
1317
|
+
"filename": filename,
|
|
1318
|
+
"tags": tags_str,
|
|
1319
|
+
"modality": modality,
|
|
1320
|
+
},
|
|
1123
1321
|
)
|
|
1124
1322
|
)
|
|
1125
1323
|
|
|
@@ -1132,7 +1330,7 @@ class Project:
|
|
|
1132
1330
|
----------
|
|
1133
1331
|
subject_name : str
|
|
1134
1332
|
Subject ID of the subject
|
|
1135
|
-
session_id :
|
|
1333
|
+
session_id : str
|
|
1136
1334
|
The Session ID of the session that will be deleted
|
|
1137
1335
|
|
|
1138
1336
|
Returns
|
|
@@ -1144,16 +1342,29 @@ class Project:
|
|
|
1144
1342
|
all_sessions = self.get_subjects_metadata()
|
|
1145
1343
|
|
|
1146
1344
|
session_to_del = [
|
|
1147
|
-
s
|
|
1345
|
+
s
|
|
1346
|
+
for s in all_sessions
|
|
1347
|
+
if s["patient_secret_name"] == subject_name
|
|
1348
|
+
and s["ssid"] == session_id
|
|
1148
1349
|
]
|
|
1149
1350
|
|
|
1150
1351
|
if not session_to_del:
|
|
1151
|
-
logger.error(
|
|
1352
|
+
logger.error(
|
|
1353
|
+
f"Session {subject_name}/{session_id} could not be found in "
|
|
1354
|
+
f"this project."
|
|
1355
|
+
)
|
|
1152
1356
|
return False
|
|
1153
1357
|
elif len(session_to_del) > 1:
|
|
1154
|
-
raise RuntimeError(
|
|
1358
|
+
raise RuntimeError(
|
|
1359
|
+
"Multiple sessions with same Subject ID and Session ID. "
|
|
1360
|
+
"Contact support."
|
|
1361
|
+
)
|
|
1155
1362
|
else:
|
|
1156
|
-
logger.info(
|
|
1363
|
+
logger.info(
|
|
1364
|
+
"{}/{} found (id {})".format(
|
|
1365
|
+
subject_name, session_id, session_to_del[0]["_id"]
|
|
1366
|
+
)
|
|
1367
|
+
)
|
|
1157
1368
|
|
|
1158
1369
|
session = session_to_del[0]
|
|
1159
1370
|
|
|
@@ -1162,14 +1373,23 @@ class Project:
|
|
|
1162
1373
|
platform.post(
|
|
1163
1374
|
self._account.auth,
|
|
1164
1375
|
"patient_manager/delete_patient",
|
|
1165
|
-
data={
|
|
1376
|
+
data={
|
|
1377
|
+
"patient_id": str(int(session["_id"])),
|
|
1378
|
+
"delete_files": 1,
|
|
1379
|
+
},
|
|
1166
1380
|
)
|
|
1167
1381
|
)
|
|
1168
1382
|
except errors.PlatformError:
|
|
1169
|
-
logger.error(
|
|
1383
|
+
logger.error(
|
|
1384
|
+
f"Session \"{subject_name}/{session['ssid']}\" could "
|
|
1385
|
+
f"not be deleted."
|
|
1386
|
+
)
|
|
1170
1387
|
return False
|
|
1171
1388
|
|
|
1172
|
-
logger.info(
|
|
1389
|
+
logger.info(
|
|
1390
|
+
f"Session \"{subject_name}/{session['ssid']}\" successfully "
|
|
1391
|
+
f"deleted."
|
|
1392
|
+
)
|
|
1173
1393
|
return True
|
|
1174
1394
|
|
|
1175
1395
|
def delete_session_by_patientid(self, patient_id):
|
|
@@ -1194,7 +1414,10 @@ class Project:
|
|
|
1194
1414
|
platform.post(
|
|
1195
1415
|
self._account.auth,
|
|
1196
1416
|
"patient_manager/delete_patient",
|
|
1197
|
-
data={
|
|
1417
|
+
data={
|
|
1418
|
+
"patient_id": str(int(patient_id)),
|
|
1419
|
+
"delete_files": 1,
|
|
1420
|
+
},
|
|
1198
1421
|
)
|
|
1199
1422
|
)
|
|
1200
1423
|
except errors.PlatformError:
|
|
@@ -1224,10 +1447,16 @@ class Project:
|
|
|
1224
1447
|
# Always fetch the session IDs from the platform before deleting them
|
|
1225
1448
|
all_sessions = self.get_subjects_metadata()
|
|
1226
1449
|
|
|
1227
|
-
sessions_to_del = [
|
|
1450
|
+
sessions_to_del = [
|
|
1451
|
+
s for s in all_sessions if s["patient_secret_name"] == subject_name
|
|
1452
|
+
]
|
|
1228
1453
|
|
|
1229
1454
|
if not sessions_to_del:
|
|
1230
|
-
logger.error(
|
|
1455
|
+
logger.error(
|
|
1456
|
+
"Subject {} cannot be found in this project.".format(
|
|
1457
|
+
subject_name
|
|
1458
|
+
)
|
|
1459
|
+
)
|
|
1231
1460
|
return False
|
|
1232
1461
|
|
|
1233
1462
|
for ssid in [s["ssid"] for s in sessions_to_del]:
|
|
@@ -1237,7 +1466,7 @@ class Project:
|
|
|
1237
1466
|
|
|
1238
1467
|
""" Container Related Methods """
|
|
1239
1468
|
|
|
1240
|
-
def list_input_containers(self, search_criteria=
|
|
1469
|
+
def list_input_containers(self, search_criteria=None, items=(0, 9999)):
|
|
1241
1470
|
"""
|
|
1242
1471
|
Retrieve the list of input containers available to the user under a
|
|
1243
1472
|
certain search criteria.
|
|
@@ -1271,8 +1500,17 @@ class Project:
|
|
|
1271
1500
|
{"container_name", "container_id", "patient_secret_name", "ssid"}
|
|
1272
1501
|
"""
|
|
1273
1502
|
|
|
1274
|
-
|
|
1275
|
-
|
|
1503
|
+
if search_criteria is None:
|
|
1504
|
+
search_criteria = {}
|
|
1505
|
+
if len(items) != 2:
|
|
1506
|
+
raise ValueError(
|
|
1507
|
+
f"The number of elements in items '{len(items)}' "
|
|
1508
|
+
f"should be equal to two."
|
|
1509
|
+
)
|
|
1510
|
+
if not all(isinstance(item, int) for item in items):
|
|
1511
|
+
raise ValueError(
|
|
1512
|
+
f"All items elements '{items}' should be integers."
|
|
1513
|
+
)
|
|
1276
1514
|
|
|
1277
1515
|
response = platform.parse_response(
|
|
1278
1516
|
platform.post(
|
|
@@ -1285,7 +1523,7 @@ class Project:
|
|
|
1285
1523
|
containers = [
|
|
1286
1524
|
{
|
|
1287
1525
|
"patient_secret_name": container_item["patient_secret_name"],
|
|
1288
|
-
"container_name": container_item["name"],
|
|
1526
|
+
"container_name": container_item["name"], # ???
|
|
1289
1527
|
"container_id": container_item["_id"],
|
|
1290
1528
|
"ssid": container_item["ssid"],
|
|
1291
1529
|
}
|
|
@@ -1293,7 +1531,7 @@ class Project:
|
|
|
1293
1531
|
]
|
|
1294
1532
|
return containers
|
|
1295
1533
|
|
|
1296
|
-
def list_result_containers(self, search_condition=
|
|
1534
|
+
def list_result_containers(self, search_condition=None, items=(0, 9999)):
|
|
1297
1535
|
"""
|
|
1298
1536
|
List the result containers available to the user.
|
|
1299
1537
|
Examples
|
|
@@ -1321,7 +1559,8 @@ class Project:
|
|
|
1321
1559
|
- qa_status: str or None pass/fail/nd QC status
|
|
1322
1560
|
- secret_name: str or None Subject ID
|
|
1323
1561
|
- tags: str or None
|
|
1324
|
-
- with_child_analysis: 1 or None if 1, child analysis of workflows
|
|
1562
|
+
- with_child_analysis: 1 or None if 1, child analysis of workflows
|
|
1563
|
+
will appear
|
|
1325
1564
|
- id: str or None ID
|
|
1326
1565
|
- state: running, completed, pending, exception or None
|
|
1327
1566
|
- username: str or None
|
|
@@ -1338,8 +1577,16 @@ class Project:
|
|
|
1338
1577
|
if "id": None, that analysis did not had an output container,
|
|
1339
1578
|
probably it is a workflow
|
|
1340
1579
|
"""
|
|
1580
|
+
if search_condition is None:
|
|
1581
|
+
search_condition = {}
|
|
1341
1582
|
analyses = self.list_analysis(search_condition, items)
|
|
1342
|
-
return [
|
|
1583
|
+
return [
|
|
1584
|
+
{
|
|
1585
|
+
"name": analysis["name"],
|
|
1586
|
+
"id": (analysis.get("out_container_id") or None),
|
|
1587
|
+
}
|
|
1588
|
+
for analysis in analyses
|
|
1589
|
+
]
|
|
1343
1590
|
|
|
1344
1591
|
def list_container_files(
|
|
1345
1592
|
self,
|
|
@@ -1360,7 +1607,9 @@ class Project:
|
|
|
1360
1607
|
try:
|
|
1361
1608
|
content = platform.parse_response(
|
|
1362
1609
|
platform.post(
|
|
1363
|
-
self._account.auth,
|
|
1610
|
+
self._account.auth,
|
|
1611
|
+
"file_manager/get_container_files",
|
|
1612
|
+
data={"container_id": container_id},
|
|
1364
1613
|
)
|
|
1365
1614
|
)
|
|
1366
1615
|
except errors.PlatformError as e:
|
|
@@ -1371,7 +1620,9 @@ class Project:
|
|
|
1371
1620
|
return False
|
|
1372
1621
|
return content["files"]
|
|
1373
1622
|
|
|
1374
|
-
def list_container_filter_files(
|
|
1623
|
+
def list_container_filter_files(
|
|
1624
|
+
self, container_id, modality="", metadata_info={}, tags=[]
|
|
1625
|
+
):
|
|
1375
1626
|
"""
|
|
1376
1627
|
List the name of the files available inside a given container.
|
|
1377
1628
|
search condition example:
|
|
@@ -1407,11 +1658,17 @@ class Project:
|
|
|
1407
1658
|
if modality == "":
|
|
1408
1659
|
modality_bool = True
|
|
1409
1660
|
else:
|
|
1410
|
-
modality_bool = modality == metadata_file["metadata"].get(
|
|
1661
|
+
modality_bool = modality == metadata_file["metadata"].get(
|
|
1662
|
+
"modality"
|
|
1663
|
+
)
|
|
1411
1664
|
for key in metadata_info.keys():
|
|
1412
|
-
meta_key = (
|
|
1665
|
+
meta_key = (
|
|
1666
|
+
(metadata_file.get("metadata") or {}).get("info") or {}
|
|
1667
|
+
).get(key)
|
|
1413
1668
|
if meta_key is None:
|
|
1414
|
-
logging.getLogger(logger_name).warning(
|
|
1669
|
+
logging.getLogger(logger_name).warning(
|
|
1670
|
+
f"{key} is not in file_info from file {file}"
|
|
1671
|
+
)
|
|
1415
1672
|
info_bool.append(metadata_info[key] == meta_key)
|
|
1416
1673
|
if all(tags_bool) and all(info_bool) and modality_bool:
|
|
1417
1674
|
selected_files.append(file)
|
|
@@ -1435,7 +1692,9 @@ class Project:
|
|
|
1435
1692
|
try:
|
|
1436
1693
|
data = platform.parse_response(
|
|
1437
1694
|
platform.post(
|
|
1438
|
-
self._account.auth,
|
|
1695
|
+
self._account.auth,
|
|
1696
|
+
"file_manager/get_container_files",
|
|
1697
|
+
data={"container_id": container_id},
|
|
1439
1698
|
)
|
|
1440
1699
|
)
|
|
1441
1700
|
except errors.PlatformError as e:
|
|
@@ -1448,7 +1707,8 @@ class Project:
|
|
|
1448
1707
|
|
|
1449
1708
|
def get_analysis(self, analysis_name_or_id):
|
|
1450
1709
|
"""
|
|
1451
|
-
Returns the analysis corresponding with the analysis id or analysis
|
|
1710
|
+
Returns the analysis corresponding with the analysis id or analysis
|
|
1711
|
+
name
|
|
1452
1712
|
|
|
1453
1713
|
Parameters
|
|
1454
1714
|
----------
|
|
@@ -1468,28 +1728,41 @@ class Project:
|
|
|
1468
1728
|
analysis_name_or_id = int(analysis_name_or_id)
|
|
1469
1729
|
else:
|
|
1470
1730
|
search_tag = "p_n"
|
|
1471
|
-
|
|
1472
|
-
|
|
1731
|
+
excluded_bool = [
|
|
1732
|
+
character in analysis_name_or_id
|
|
1733
|
+
for character in ANALYSIS_NAME_EXCLUDED_CHARACTERS
|
|
1734
|
+
]
|
|
1473
1735
|
if any(excluded_bool):
|
|
1474
|
-
raise Exception(
|
|
1736
|
+
raise Exception(
|
|
1737
|
+
f"p_n does not allow "
|
|
1738
|
+
f"characters {ANALYSIS_NAME_EXCLUDED_CHARACTERS}"
|
|
1739
|
+
)
|
|
1475
1740
|
else:
|
|
1476
|
-
raise Exception(
|
|
1741
|
+
raise Exception(
|
|
1742
|
+
"The analysis identifier must be its name or an integer"
|
|
1743
|
+
)
|
|
1477
1744
|
|
|
1478
1745
|
search_condition = {
|
|
1479
1746
|
search_tag: analysis_name_or_id,
|
|
1480
1747
|
}
|
|
1481
1748
|
response = platform.parse_response(
|
|
1482
|
-
platform.post(
|
|
1749
|
+
platform.post(
|
|
1750
|
+
self._account.auth,
|
|
1751
|
+
"analysis_manager/get_analysis_list",
|
|
1752
|
+
data=search_condition,
|
|
1753
|
+
)
|
|
1483
1754
|
)
|
|
1484
1755
|
|
|
1485
1756
|
if len(response) > 1:
|
|
1486
|
-
raise Exception(
|
|
1757
|
+
raise Exception(
|
|
1758
|
+
f"multiple analyses with name {analysis_name_or_id} found"
|
|
1759
|
+
)
|
|
1487
1760
|
elif len(response) == 1:
|
|
1488
1761
|
return response[0]
|
|
1489
1762
|
else:
|
|
1490
1763
|
return None
|
|
1491
1764
|
|
|
1492
|
-
def list_analysis(self, search_condition=
|
|
1765
|
+
def list_analysis(self, search_condition=None, items=(0, 9999)):
|
|
1493
1766
|
"""
|
|
1494
1767
|
List the analysis available to the user.
|
|
1495
1768
|
|
|
@@ -1518,10 +1791,12 @@ class Project:
|
|
|
1518
1791
|
- qa_status: str or None pass/fail/nd QC status
|
|
1519
1792
|
- secret_name: str or None Subject ID
|
|
1520
1793
|
- tags: str or None
|
|
1521
|
-
- with_child_analysis: 1 or None if 1, child analysis of workflows
|
|
1794
|
+
- with_child_analysis: 1 or None if 1, child analysis of workflows
|
|
1795
|
+
will appear
|
|
1522
1796
|
- id: int or None ID
|
|
1523
1797
|
- state: running, completed, pending, exception or None
|
|
1524
1798
|
- username: str or None
|
|
1799
|
+
- only_data: int or None
|
|
1525
1800
|
|
|
1526
1801
|
items : List[int]
|
|
1527
1802
|
list containing two elements [min, max] that correspond to the
|
|
@@ -1532,8 +1807,17 @@ class Project:
|
|
|
1532
1807
|
dict
|
|
1533
1808
|
List of analysis, each a dictionary
|
|
1534
1809
|
"""
|
|
1535
|
-
|
|
1536
|
-
|
|
1810
|
+
if search_condition is None:
|
|
1811
|
+
search_condition = {}
|
|
1812
|
+
if len(items) != 2:
|
|
1813
|
+
raise ValueError(
|
|
1814
|
+
f"The number of elements in items '{len(items)}' "
|
|
1815
|
+
f"should be equal to two."
|
|
1816
|
+
)
|
|
1817
|
+
if not all(isinstance(item, int) for item in items):
|
|
1818
|
+
raise ValueError(
|
|
1819
|
+
f"All items elements '{items}' should be integers."
|
|
1820
|
+
)
|
|
1537
1821
|
search_keys = {
|
|
1538
1822
|
"p_n": str,
|
|
1539
1823
|
"type": str,
|
|
@@ -1546,19 +1830,37 @@ class Project:
|
|
|
1546
1830
|
"with_child_analysis": int,
|
|
1547
1831
|
"id": int,
|
|
1548
1832
|
"state": str,
|
|
1833
|
+
"only_data": int,
|
|
1549
1834
|
"username": str,
|
|
1550
1835
|
}
|
|
1551
1836
|
for key in search_condition.keys():
|
|
1552
1837
|
if key not in search_keys.keys():
|
|
1553
|
-
raise Exception(
|
|
1554
|
-
|
|
1555
|
-
|
|
1838
|
+
raise Exception(
|
|
1839
|
+
(
|
|
1840
|
+
f"This key '{key}' is not accepted by this search "
|
|
1841
|
+
f"condition"
|
|
1842
|
+
)
|
|
1843
|
+
)
|
|
1844
|
+
if (
|
|
1845
|
+
not isinstance(search_condition[key], search_keys[key])
|
|
1846
|
+
and search_condition[key] is not None
|
|
1847
|
+
):
|
|
1848
|
+
raise Exception(
|
|
1849
|
+
(
|
|
1850
|
+
f"The key {key} in the search condition is not type "
|
|
1851
|
+
f"{search_keys[key]}"
|
|
1852
|
+
)
|
|
1853
|
+
)
|
|
1556
1854
|
if "p_n" == key:
|
|
1557
|
-
|
|
1558
|
-
|
|
1855
|
+
excluded_bool = [
|
|
1856
|
+
character in search_condition["p_n"]
|
|
1857
|
+
for character in ANALYSIS_NAME_EXCLUDED_CHARACTERS
|
|
1858
|
+
]
|
|
1559
1859
|
if any(excluded_bool):
|
|
1560
|
-
raise Exception(
|
|
1561
|
-
|
|
1860
|
+
raise Exception(
|
|
1861
|
+
"p_n does not allow "
|
|
1862
|
+
f"characters {ANALYSIS_NAME_EXCLUDED_CHARACTERS}"
|
|
1863
|
+
)
|
|
1562
1864
|
req_headers = {"X-Range": f"items={items[0]}-{items[1] - 1}"}
|
|
1563
1865
|
return platform.parse_response(
|
|
1564
1866
|
platform.post(
|
|
@@ -1623,7 +1925,9 @@ class Project:
|
|
|
1623
1925
|
logger = logging.getLogger(logger_name)
|
|
1624
1926
|
|
|
1625
1927
|
if in_container_id is None and settings is None:
|
|
1626
|
-
raise ValueError(
|
|
1928
|
+
raise ValueError(
|
|
1929
|
+
"Pass a value for either in_container_id or settings."
|
|
1930
|
+
)
|
|
1627
1931
|
|
|
1628
1932
|
post_data = {"script_name": script_name, "version": version}
|
|
1629
1933
|
|
|
@@ -1656,15 +1960,19 @@ class Project:
|
|
|
1656
1960
|
|
|
1657
1961
|
logger.debug(f"post_data = {post_data}")
|
|
1658
1962
|
return self.__handle_start_analysis(
|
|
1659
|
-
post_data,
|
|
1963
|
+
post_data,
|
|
1964
|
+
ignore_warnings=ignore_warnings,
|
|
1965
|
+
ignore_file_selection=ignore_file_selection,
|
|
1660
1966
|
)
|
|
1661
1967
|
|
|
1662
1968
|
def delete_analysis(self, analysis_id):
|
|
1663
1969
|
"""
|
|
1664
1970
|
Delete an analysis
|
|
1665
1971
|
|
|
1666
|
-
|
|
1667
|
-
|
|
1972
|
+
Parameters
|
|
1973
|
+
----------
|
|
1974
|
+
analysis_id : int
|
|
1975
|
+
ID of the analysis to be deleted
|
|
1668
1976
|
"""
|
|
1669
1977
|
logger = logging.getLogger(logger_name)
|
|
1670
1978
|
|
|
@@ -1692,18 +2000,23 @@ class Project:
|
|
|
1692
2000
|
Tools can not be restarted given that they are considered as single
|
|
1693
2001
|
processing units. You can start execution of another analysis instead.
|
|
1694
2002
|
|
|
1695
|
-
For the workflow to restart, all its failed child must be removed
|
|
1696
|
-
You can only restart your own analysis.
|
|
2003
|
+
For the workflow to restart, all its failed child must be removed
|
|
2004
|
+
first. You can only restart your own analysis.
|
|
1697
2005
|
|
|
1698
|
-
|
|
1699
|
-
|
|
2006
|
+
Parameters
|
|
2007
|
+
----------
|
|
2008
|
+
analysis_id : int
|
|
2009
|
+
ID of the analysis to be restarted
|
|
1700
2010
|
"""
|
|
1701
2011
|
logger = logging.getLogger(logger_name)
|
|
1702
2012
|
|
|
1703
2013
|
analysis = self.list_analysis({"id": analysis_id})[0]
|
|
1704
2014
|
|
|
1705
2015
|
if analysis.get("super_analysis_type") != 1:
|
|
1706
|
-
raise ValueError(
|
|
2016
|
+
raise ValueError(
|
|
2017
|
+
"The analysis indicated is not a workflow and hence, it "
|
|
2018
|
+
"cannot be restarted."
|
|
2019
|
+
)
|
|
1707
2020
|
|
|
1708
2021
|
try:
|
|
1709
2022
|
platform.parse_response(
|
|
@@ -1725,7 +2038,8 @@ class Project:
|
|
|
1725
2038
|
Get the log of an analysis and save it in the provided file.
|
|
1726
2039
|
The logs of analysis can only be obtained for the tools you created.
|
|
1727
2040
|
|
|
1728
|
-
Note workflows do not have a log so the printed message will only be
|
|
2041
|
+
Note workflows do not have a log so the printed message will only be
|
|
2042
|
+
ERROR.
|
|
1729
2043
|
You can only download the anlaysis log of the tools that you own.
|
|
1730
2044
|
|
|
1731
2045
|
Note this method is very time consuming.
|
|
@@ -1748,22 +2062,32 @@ class Project:
|
|
|
1748
2062
|
try:
|
|
1749
2063
|
analysis_id = str(int(analysis_id))
|
|
1750
2064
|
except ValueError:
|
|
1751
|
-
raise ValueError(
|
|
2065
|
+
raise ValueError(
|
|
2066
|
+
f"'analysis_id' has to be an integer not '{analysis_id}'."
|
|
2067
|
+
)
|
|
1752
2068
|
|
|
1753
2069
|
file_name = file_name if file_name else f"logs_{analysis_id}.txt"
|
|
1754
2070
|
try:
|
|
1755
2071
|
res = platform.post(
|
|
1756
2072
|
auth=self._account.auth,
|
|
1757
2073
|
endpoint="analysis_manager/download_execution_file",
|
|
1758
|
-
data={
|
|
2074
|
+
data={
|
|
2075
|
+
"project_id": analysis_id,
|
|
2076
|
+
"file": f"logs_{analysis_id}",
|
|
2077
|
+
},
|
|
1759
2078
|
timeout=1000,
|
|
1760
2079
|
)
|
|
1761
2080
|
except Exception:
|
|
1762
|
-
logger.error(
|
|
2081
|
+
logger.error(
|
|
2082
|
+
f"Could not export the analysis log of '{analysis_id}'"
|
|
2083
|
+
)
|
|
1763
2084
|
return False
|
|
1764
2085
|
|
|
1765
2086
|
if not res.ok:
|
|
1766
|
-
logger.error(
|
|
2087
|
+
logger.error(
|
|
2088
|
+
f"The log file could not be extracted for Analysis ID:"
|
|
2089
|
+
f" {analysis_id}."
|
|
2090
|
+
)
|
|
1767
2091
|
return False
|
|
1768
2092
|
|
|
1769
2093
|
with open(file_name, "w") as f:
|
|
@@ -1772,7 +2096,9 @@ class Project:
|
|
|
1772
2096
|
|
|
1773
2097
|
""" QC Status Related Methods """
|
|
1774
2098
|
|
|
1775
|
-
def set_qc_status_analysis(
|
|
2099
|
+
def set_qc_status_analysis(
|
|
2100
|
+
self, analysis_id, status=QCStatus.UNDERTERMINED, comments=""
|
|
2101
|
+
):
|
|
1776
2102
|
"""
|
|
1777
2103
|
Changes the analysis QC status.
|
|
1778
2104
|
|
|
@@ -1801,7 +2127,10 @@ class Project:
|
|
|
1801
2127
|
try:
|
|
1802
2128
|
analysis_id = str(int(analysis_id))
|
|
1803
2129
|
except ValueError:
|
|
1804
|
-
raise ValueError(
|
|
2130
|
+
raise ValueError(
|
|
2131
|
+
f"analysis_id: '{analysis_id}' not valid. Must be convertible "
|
|
2132
|
+
f"to int."
|
|
2133
|
+
)
|
|
1805
2134
|
|
|
1806
2135
|
try:
|
|
1807
2136
|
platform.parse_response(
|
|
@@ -1817,11 +2146,16 @@ class Project:
|
|
|
1817
2146
|
)
|
|
1818
2147
|
)
|
|
1819
2148
|
except Exception:
|
|
1820
|
-
logger.error(
|
|
2149
|
+
logger.error(
|
|
2150
|
+
f"It was not possible to change the QC status of Analysis ID:"
|
|
2151
|
+
f" {analysis_id}"
|
|
2152
|
+
)
|
|
1821
2153
|
return False
|
|
1822
2154
|
return True
|
|
1823
2155
|
|
|
1824
|
-
def set_qc_status_subject(
|
|
2156
|
+
def set_qc_status_subject(
|
|
2157
|
+
self, patient_id, status=QCStatus.UNDERTERMINED, comments=""
|
|
2158
|
+
):
|
|
1825
2159
|
"""
|
|
1826
2160
|
Changes the QC status of a Patient ID (equivalent to a
|
|
1827
2161
|
Subject ID/Session ID).
|
|
@@ -1850,7 +2184,10 @@ class Project:
|
|
|
1850
2184
|
try:
|
|
1851
2185
|
patient_id = str(int(patient_id))
|
|
1852
2186
|
except ValueError:
|
|
1853
|
-
raise ValueError(
|
|
2187
|
+
raise ValueError(
|
|
2188
|
+
f"'patient_id': '{patient_id}' not valid. Must be convertible"
|
|
2189
|
+
f" to int."
|
|
2190
|
+
)
|
|
1854
2191
|
|
|
1855
2192
|
try:
|
|
1856
2193
|
platform.parse_response(
|
|
@@ -1866,7 +2203,10 @@ class Project:
|
|
|
1866
2203
|
)
|
|
1867
2204
|
)
|
|
1868
2205
|
except Exception:
|
|
1869
|
-
logger.error(
|
|
2206
|
+
logger.error(
|
|
2207
|
+
f"It was not possible to change the QC status of Patient ID:"
|
|
2208
|
+
f" {patient_id}"
|
|
2209
|
+
)
|
|
1870
2210
|
return False
|
|
1871
2211
|
return True
|
|
1872
2212
|
|
|
@@ -1891,17 +2231,28 @@ class Project:
|
|
|
1891
2231
|
try:
|
|
1892
2232
|
search_criteria = {"id": analysis_id}
|
|
1893
2233
|
to_return = self.list_analysis(search_criteria)
|
|
1894
|
-
return
|
|
2234
|
+
return (
|
|
2235
|
+
convert_qc_value_to_qcstatus(to_return[0]["qa_status"]),
|
|
2236
|
+
to_return[0]["qa_comments"],
|
|
2237
|
+
)
|
|
1895
2238
|
except IndexError:
|
|
1896
2239
|
# Handle the case where no matching analysis is found
|
|
1897
|
-
logging.error(
|
|
2240
|
+
logging.error(
|
|
2241
|
+
f"No analysis was found with such Analysis ID: "
|
|
2242
|
+
f"'{analysis_id}'."
|
|
2243
|
+
)
|
|
1898
2244
|
return False, False
|
|
1899
2245
|
except Exception:
|
|
1900
2246
|
# Handle other potential exceptions
|
|
1901
|
-
logging.error(
|
|
2247
|
+
logging.error(
|
|
2248
|
+
f"It was not possible to extract the QC status from Analysis "
|
|
2249
|
+
f"ID: {analysis_id}"
|
|
2250
|
+
)
|
|
1902
2251
|
return False, False
|
|
1903
2252
|
|
|
1904
|
-
def get_qc_status_subject(
|
|
2253
|
+
def get_qc_status_subject(
|
|
2254
|
+
self, patient_id=None, subject_name=None, ssid=None
|
|
2255
|
+
):
|
|
1905
2256
|
"""
|
|
1906
2257
|
Gets the session QC status via the patient ID or the Subject ID
|
|
1907
2258
|
and the Session ID.
|
|
@@ -1929,26 +2280,50 @@ class Project:
|
|
|
1929
2280
|
try:
|
|
1930
2281
|
patient_id = int(patient_id)
|
|
1931
2282
|
except ValueError:
|
|
1932
|
-
raise ValueError(
|
|
2283
|
+
raise ValueError(
|
|
2284
|
+
f"patient_id '{patient_id}' should be an integer."
|
|
2285
|
+
)
|
|
1933
2286
|
sessions = self.get_subjects_metadata(search_criteria={})
|
|
1934
|
-
session = [
|
|
2287
|
+
session = [
|
|
2288
|
+
session
|
|
2289
|
+
for session in sessions
|
|
2290
|
+
if int(session["_id"]) == patient_id
|
|
2291
|
+
]
|
|
1935
2292
|
if len(session) < 1:
|
|
1936
|
-
logging.error(
|
|
2293
|
+
logging.error(
|
|
2294
|
+
f"No session was found with Patient ID: '{patient_id}'."
|
|
2295
|
+
)
|
|
1937
2296
|
return False, False
|
|
1938
|
-
return
|
|
2297
|
+
return (
|
|
2298
|
+
convert_qc_value_to_qcstatus(session[0]["qa_status"]),
|
|
2299
|
+
session[0]["qa_comments"],
|
|
2300
|
+
)
|
|
1939
2301
|
elif subject_name and ssid:
|
|
1940
2302
|
session = self.get_subjects_metadata(
|
|
1941
2303
|
search_criteria={
|
|
1942
2304
|
"pars_patient_secret_name": f"string;{subject_name}",
|
|
1943
|
-
"pars_ssid":
|
|
2305
|
+
"pars_ssid": (
|
|
2306
|
+
f"integer;eq|{ssid}"
|
|
2307
|
+
if str(ssid).isdigit()
|
|
2308
|
+
else f"string;{ssid}"
|
|
2309
|
+
),
|
|
1944
2310
|
}
|
|
1945
2311
|
)
|
|
1946
2312
|
if len(session) < 1:
|
|
1947
|
-
logging.error(
|
|
2313
|
+
logging.error(
|
|
2314
|
+
f"No session was found with Subject ID: '{subject_name}'"
|
|
2315
|
+
f" and Session ID: '{ssid}'."
|
|
2316
|
+
)
|
|
1948
2317
|
return False, False
|
|
1949
|
-
return
|
|
2318
|
+
return (
|
|
2319
|
+
convert_qc_value_to_qcstatus(session[0]["qa_status"]),
|
|
2320
|
+
session[0]["qa_comments"],
|
|
2321
|
+
)
|
|
1950
2322
|
else:
|
|
1951
|
-
raise ValueError(
|
|
2323
|
+
raise ValueError(
|
|
2324
|
+
"Either 'patient_id' or 'subject_name' and 'ssid' must "
|
|
2325
|
+
"not be empty."
|
|
2326
|
+
)
|
|
1952
2327
|
|
|
1953
2328
|
""" Protocol Adherence Related Methods """
|
|
1954
2329
|
|
|
@@ -1976,7 +2351,9 @@ class Project:
|
|
|
1976
2351
|
with open(rules_file_path, "r") as fr:
|
|
1977
2352
|
rules = json.load(fr)
|
|
1978
2353
|
except FileNotFoundError:
|
|
1979
|
-
logger.error(
|
|
2354
|
+
logger.error(
|
|
2355
|
+
f"Protocol adherence rule file '{rules_file_path}' not found."
|
|
2356
|
+
)
|
|
1980
2357
|
return False
|
|
1981
2358
|
|
|
1982
2359
|
# Update the project's QA rules
|
|
@@ -1984,18 +2361,26 @@ class Project:
|
|
|
1984
2361
|
platform.post(
|
|
1985
2362
|
auth=self._account.auth,
|
|
1986
2363
|
endpoint="projectset_manager/set_session_qa_requirements",
|
|
1987
|
-
data={
|
|
2364
|
+
data={
|
|
2365
|
+
"project_id": self._project_id,
|
|
2366
|
+
"rules": json.dumps(rules),
|
|
2367
|
+
"guidance_text": guidance_text,
|
|
2368
|
+
},
|
|
1988
2369
|
)
|
|
1989
2370
|
)
|
|
1990
2371
|
|
|
1991
2372
|
if not res.get("success") == 1:
|
|
1992
|
-
logger.error(
|
|
2373
|
+
logger.error(
|
|
2374
|
+
"There was an error setting up the protocol adherence rules."
|
|
2375
|
+
)
|
|
1993
2376
|
logger.error(platform.parse_response(res))
|
|
1994
2377
|
return False
|
|
1995
2378
|
|
|
1996
2379
|
return True
|
|
1997
2380
|
|
|
1998
|
-
def get_project_pa_rules(
|
|
2381
|
+
def get_project_pa_rules(
|
|
2382
|
+
self, rules_file_path, project_has_no_rules=False
|
|
2383
|
+
):
|
|
1999
2384
|
"""
|
|
2000
2385
|
Retrive the active project's protocol adherence rules
|
|
2001
2386
|
|
|
@@ -2004,6 +2389,8 @@ class Project:
|
|
|
2004
2389
|
rules_file_path : str
|
|
2005
2390
|
The file path to the JSON file to store the protocol adherence
|
|
2006
2391
|
rules.
|
|
2392
|
+
project_has_no_rules: bool
|
|
2393
|
+
for testing purposes
|
|
2007
2394
|
|
|
2008
2395
|
Returns
|
|
2009
2396
|
-------
|
|
@@ -2023,47 +2410,58 @@ class Project:
|
|
|
2023
2410
|
)
|
|
2024
2411
|
)
|
|
2025
2412
|
|
|
2026
|
-
if "rules" not in res:
|
|
2027
|
-
logger.error(
|
|
2413
|
+
if "rules" not in res or project_has_no_rules:
|
|
2414
|
+
logger.error(
|
|
2415
|
+
f"There was an error extracting the protocol adherence rules"
|
|
2416
|
+
f" from {self._project_name}."
|
|
2417
|
+
)
|
|
2028
2418
|
logger.error(platform.parse_response(res))
|
|
2029
2419
|
return False
|
|
2030
2420
|
|
|
2031
2421
|
try:
|
|
2032
2422
|
for rule in res["rules"]:
|
|
2033
|
-
|
|
2034
|
-
|
|
2035
|
-
|
|
2423
|
+
for key in ["_id", "order", "time_modified"]:
|
|
2424
|
+
if rule.get(key, False):
|
|
2425
|
+
del rule[key]
|
|
2036
2426
|
with open(rules_file_path, "w") as fr:
|
|
2037
2427
|
json.dump(res["rules"], fr, indent=4)
|
|
2038
2428
|
except FileNotFoundError:
|
|
2039
|
-
logger.error(
|
|
2429
|
+
logger.error(
|
|
2430
|
+
f"Protocol adherence rules could not be exported to file: "
|
|
2431
|
+
f"'{rules_file_path}'."
|
|
2432
|
+
)
|
|
2040
2433
|
return False
|
|
2041
2434
|
|
|
2042
2435
|
return res["guidance_text"]
|
|
2043
2436
|
|
|
2044
2437
|
def parse_qc_text(self, patient_id=None, subject_name=None, ssid=None):
|
|
2045
2438
|
"""
|
|
2046
|
-
Parse QC (Quality Control) text output into a structured dictionary
|
|
2439
|
+
Parse QC (Quality Control) text output into a structured dictionary
|
|
2440
|
+
format.
|
|
2047
2441
|
|
|
2048
|
-
This function takes raw QC text output (
|
|
2049
|
-
and parses it into a structured format that
|
|
2050
|
-
along with their associated files
|
|
2442
|
+
This function takes raw QC text output (from the Protocol Adherence
|
|
2443
|
+
analysis) and parses it into a structured format that
|
|
2444
|
+
separates passed and failed rules, along with their associated files
|
|
2445
|
+
and conditions.
|
|
2051
2446
|
|
|
2052
2447
|
Args:
|
|
2053
2448
|
patient_id (str, optional):
|
|
2054
2449
|
Patient identifier. Defaults to None.
|
|
2055
2450
|
subject_name (str, optional):
|
|
2056
|
-
Subject/patient name. Defaults to None. Mandatory if no
|
|
2451
|
+
Subject/patient name. Defaults to None. Mandatory if no
|
|
2452
|
+
patient_id is provided.
|
|
2057
2453
|
ssid (str, optional):
|
|
2058
|
-
Session ID. Defaults to None. Mandatory if subject_name is
|
|
2454
|
+
Session ID. Defaults to None. Mandatory if subject_name is
|
|
2455
|
+
provided.
|
|
2059
2456
|
|
|
2060
2457
|
Returns:
|
|
2061
|
-
dict: A structured dictionary containing a list of dictionaries
|
|
2062
|
-
|
|
2063
|
-
|
|
2064
|
-
|
|
2065
|
-
|
|
2066
|
-
|
|
2458
|
+
dict: A structured dictionary containing a list of dictionaries
|
|
2459
|
+
with passed rules and their details and failed rules and their
|
|
2460
|
+
details. Details of passed rules are:
|
|
2461
|
+
per each rule: Files that have passed the rule. Per each file name
|
|
2462
|
+
of the file and number of conditions of the rule. Details of
|
|
2463
|
+
failed rules are: per each rule failed conditions: Number of
|
|
2464
|
+
times it failed. Each condition status.
|
|
2067
2465
|
|
|
2068
2466
|
Example:
|
|
2069
2467
|
>>> parse_qc_text(subject_name="patient_123", ssid=1)
|
|
@@ -2089,7 +2487,7 @@ class Project:
|
|
|
2089
2487
|
"conditions": [
|
|
2090
2488
|
{
|
|
2091
2489
|
"status": "failed",
|
|
2092
|
-
"condition": "SliceThickness between
|
|
2490
|
+
"condition": "SliceThickness between.."
|
|
2093
2491
|
}
|
|
2094
2492
|
]
|
|
2095
2493
|
}
|
|
@@ -2102,20 +2500,19 @@ class Project:
|
|
|
2102
2500
|
}
|
|
2103
2501
|
"""
|
|
2104
2502
|
|
|
2105
|
-
_, text = self.get_qc_status_subject(
|
|
2503
|
+
_, text = self.get_qc_status_subject(
|
|
2504
|
+
patient_id=patient_id, subject_name=subject_name, ssid=ssid
|
|
2505
|
+
)
|
|
2106
2506
|
|
|
2107
|
-
result = {
|
|
2108
|
-
"passed": [],
|
|
2109
|
-
"failed": []
|
|
2110
|
-
}
|
|
2507
|
+
result = {"passed": [], "failed": []}
|
|
2111
2508
|
|
|
2112
2509
|
# Split into failed and passed sections
|
|
2113
|
-
sections = re.split(r
|
|
2510
|
+
sections = re.split(r"={10,}\n\n", text)
|
|
2114
2511
|
if len(sections) == 3:
|
|
2115
|
-
failed_section = sections[1].split(
|
|
2512
|
+
failed_section = sections[1].split("=" * 10)[0].strip()
|
|
2116
2513
|
passed_section = sections[2].strip()
|
|
2117
2514
|
else:
|
|
2118
|
-
section = sections[1].split(
|
|
2515
|
+
section = sections[1].split("=" * 10)[0].strip()
|
|
2119
2516
|
if "PASSED QC MESSAGES" in section:
|
|
2120
2517
|
passed_section = section
|
|
2121
2518
|
failed_section = ""
|
|
@@ -2123,106 +2520,39 @@ class Project:
|
|
|
2123
2520
|
failed_section = section
|
|
2124
2521
|
passed_section = ""
|
|
2125
2522
|
|
|
2126
|
-
|
|
2127
|
-
failed_rules = re.split(r
|
|
2128
|
-
|
|
2129
|
-
rule_name = rule_text.split(' ❌')[0].strip()
|
|
2130
|
-
rule_data = {
|
|
2131
|
-
"rule": rule_name,
|
|
2132
|
-
"files": [],
|
|
2133
|
-
"failed_conditions": {}
|
|
2134
|
-
}
|
|
2135
|
-
|
|
2136
|
-
# Extract all file comparisons for this rule
|
|
2137
|
-
file_comparisons = re.split(r'\t- Comparison with file:', rule_text)
|
|
2138
|
-
for comp in file_comparisons[1:]: # Skip first part
|
|
2139
|
-
file_name = comp.split('\n')[0].strip()
|
|
2140
|
-
conditions_match = re.search(
|
|
2141
|
-
r'Conditions:(.*?)(?=\n\t- Comparison|\n\n|$)',
|
|
2142
|
-
comp,
|
|
2143
|
-
re.DOTALL
|
|
2144
|
-
)
|
|
2145
|
-
if not conditions_match:
|
|
2146
|
-
continue
|
|
2147
|
-
|
|
2148
|
-
conditions_text = conditions_match.group(1).strip()
|
|
2149
|
-
# Parse conditions
|
|
2150
|
-
conditions = []
|
|
2151
|
-
for line in conditions_text.split('\n'):
|
|
2152
|
-
line = line.strip()
|
|
2153
|
-
if line.startswith('·'):
|
|
2154
|
-
status = '✔' if '✔' in line else '🚫'
|
|
2155
|
-
condition = re.sub(r'^· [✔🚫]\s*', '', line)
|
|
2156
|
-
conditions.append({
|
|
2157
|
-
"status": "passed" if status == '✔' else "failed",
|
|
2158
|
-
"condition": condition
|
|
2159
|
-
})
|
|
2160
|
-
|
|
2161
|
-
# Add to failed conditions summary
|
|
2162
|
-
for cond in conditions:
|
|
2163
|
-
if cond['status'] == 'failed':
|
|
2164
|
-
cond_text = cond['condition']
|
|
2165
|
-
if cond_text not in rule_data['failed_conditions']:
|
|
2166
|
-
rule_data['failed_conditions'][cond_text] = 0
|
|
2167
|
-
rule_data['failed_conditions'][cond_text] += 1
|
|
2168
|
-
|
|
2169
|
-
rule_data['files'].append({
|
|
2170
|
-
"file": file_name,
|
|
2171
|
-
"conditions": conditions
|
|
2172
|
-
})
|
|
2173
|
-
|
|
2174
|
-
result['failed'].append(rule_data)
|
|
2523
|
+
# Parse failed rules
|
|
2524
|
+
failed_rules = re.split(r"\n ❌ ", failed_section)
|
|
2525
|
+
result = self.__parse_fail_rules(failed_rules, result)
|
|
2175
2526
|
|
|
2176
2527
|
# Parse passed rules
|
|
2177
|
-
passed_rules = re.split(r
|
|
2178
|
-
|
|
2179
|
-
rule_name = rule_text.split(' ✅')[0].strip()
|
|
2180
|
-
rule_data = {
|
|
2181
|
-
"rule": rule_name,
|
|
2182
|
-
"sub_rule": None,
|
|
2183
|
-
"files": []
|
|
2184
|
-
}
|
|
2185
|
-
|
|
2186
|
-
# Get sub-rule
|
|
2187
|
-
sub_rule_match = re.search(r'Sub-rule: (.*?)\n', rule_text)
|
|
2188
|
-
if sub_rule_match:
|
|
2189
|
-
rule_data['sub_rule'] = sub_rule_match.group(1).strip()
|
|
2190
|
-
|
|
2191
|
-
# Get files passed
|
|
2192
|
-
files_passed = re.search(r'List of files passed:(.*?)(?=\n\n|\Z)', rule_text, re.DOTALL)
|
|
2193
|
-
if files_passed:
|
|
2194
|
-
for line in files_passed.group(1).split('\n'):
|
|
2195
|
-
line = line.strip()
|
|
2196
|
-
if line.startswith('·'):
|
|
2197
|
-
file_match = re.match(r'· (.*?) \((\d+)/(\d+)\)', line)
|
|
2198
|
-
if file_match:
|
|
2199
|
-
rule_data['files'].append({
|
|
2200
|
-
"file": file_match.group(1).strip(),
|
|
2201
|
-
"passed_conditions": int(file_match.group(2)),
|
|
2202
|
-
})
|
|
2203
|
-
|
|
2204
|
-
result['passed'].append(rule_data)
|
|
2528
|
+
passed_rules = re.split(r"\n ✅ ", passed_section)
|
|
2529
|
+
result = self.__parse_pass_rules(passed_rules, result)
|
|
2205
2530
|
|
|
2206
2531
|
return result
|
|
2207
2532
|
|
|
2208
2533
|
def calculate_qc_statistics(self):
|
|
2209
2534
|
"""
|
|
2210
|
-
Calculate comprehensive statistics from multiple QC results across
|
|
2211
|
-
platform.
|
|
2535
|
+
Calculate comprehensive statistics from multiple QC results across
|
|
2536
|
+
subjects from a project in the QMENTA platform.
|
|
2212
2537
|
|
|
2213
|
-
This function aggregates and analyzes QC results from
|
|
2214
|
-
providing statistical insights about
|
|
2215
|
-
and condition failure patterns.
|
|
2538
|
+
This function aggregates and analyzes QC results from
|
|
2539
|
+
multiple subjects/containers, providing statistical insights about
|
|
2540
|
+
rule pass/fail rates, file statistics, and condition failure patterns.
|
|
2216
2541
|
|
|
2217
2542
|
Returns:
|
|
2218
|
-
dict: A dictionary containing comprehensive QC statistics
|
|
2543
|
+
dict: A dictionary containing comprehensive QC statistics
|
|
2544
|
+
including:
|
|
2219
2545
|
- passed_rules: Total count of passed rules across all subjects
|
|
2220
2546
|
- failed_rules: Total count of failed rules across all subjects
|
|
2221
2547
|
- subjects_passed: Count of subjects with no failed rules
|
|
2222
|
-
- subjects_with_failed: Count of subjects with at least one
|
|
2223
|
-
|
|
2224
|
-
-
|
|
2225
|
-
|
|
2548
|
+
- subjects_with_failed: Count of subjects with at least one
|
|
2549
|
+
failed rule
|
|
2550
|
+
- num_passed_files_distribution: Distribution of how many
|
|
2551
|
+
rules have N passed files
|
|
2552
|
+
- file_stats: File-level statistics (total, passed, failed,
|
|
2553
|
+
pass percentage)
|
|
2554
|
+
- condition_failure_rates: Frequency and percentage of each
|
|
2555
|
+
failed condition
|
|
2226
2556
|
- rule_success_rates: Success rates for each rule type
|
|
2227
2557
|
|
|
2228
2558
|
The statistics help identify:
|
|
@@ -2268,88 +2598,144 @@ class Project:
|
|
|
2268
2598
|
containers = self.list_input_containers()
|
|
2269
2599
|
|
|
2270
2600
|
for c in containers:
|
|
2271
|
-
qc_results_list.append(
|
|
2601
|
+
qc_results_list.append(
|
|
2602
|
+
self.parse_qc_text(
|
|
2603
|
+
subject_name=c["patient_secret_name"], ssid=c["ssid"]
|
|
2604
|
+
)
|
|
2605
|
+
)
|
|
2272
2606
|
|
|
2273
2607
|
# Initialize statistics
|
|
2274
2608
|
stats = {
|
|
2275
|
-
|
|
2276
|
-
|
|
2609
|
+
"passed_rules": 0,
|
|
2610
|
+
"failed_rules": 0,
|
|
2277
2611
|
"subjects_passed": 0,
|
|
2278
2612
|
"subjects_with_failed": 0,
|
|
2279
|
-
|
|
2280
|
-
|
|
2281
|
-
|
|
2282
|
-
|
|
2283
|
-
|
|
2284
|
-
|
|
2613
|
+
"num_passed_files_distribution": defaultdict(
|
|
2614
|
+
int
|
|
2615
|
+
), # How many rules have N passed files
|
|
2616
|
+
"file_stats": {
|
|
2617
|
+
"total": 0,
|
|
2618
|
+
"passed": 0,
|
|
2619
|
+
"failed": 0,
|
|
2620
|
+
"pass_percentage": 0.0,
|
|
2285
2621
|
},
|
|
2286
|
-
|
|
2287
|
-
|
|
2622
|
+
"condition_failure_rates": defaultdict(
|
|
2623
|
+
lambda: {"count": 0, "percentage": 0.0}
|
|
2624
|
+
),
|
|
2625
|
+
"rule_success_rates": defaultdict(
|
|
2626
|
+
lambda: {"passed": 0, "failed": 0, "success_rate": 0.0}
|
|
2627
|
+
),
|
|
2288
2628
|
}
|
|
2289
2629
|
|
|
2290
2630
|
total_failures = 0
|
|
2291
2631
|
|
|
2292
2632
|
# sum subjects with not failed qc message
|
|
2293
|
-
stats["subjects_passed"] = sum(
|
|
2633
|
+
stats["subjects_passed"] = sum(
|
|
2634
|
+
[1 for rules in qc_results_list if not rules["failed"]]
|
|
2635
|
+
)
|
|
2294
2636
|
# sum subjects with some failed qc message
|
|
2295
|
-
stats["subjects_with_failed"] = sum(
|
|
2637
|
+
stats["subjects_with_failed"] = sum(
|
|
2638
|
+
[1 for rules in qc_results_list if rules["failed"]]
|
|
2639
|
+
)
|
|
2296
2640
|
# sum rules that have passed
|
|
2297
|
-
stats["passed_rules"] = sum(
|
|
2641
|
+
stats["passed_rules"] = sum(
|
|
2642
|
+
[
|
|
2643
|
+
len(rules["passed"])
|
|
2644
|
+
for rules in qc_results_list
|
|
2645
|
+
if rules["failed"]
|
|
2646
|
+
]
|
|
2647
|
+
)
|
|
2298
2648
|
# sum rules that have failed
|
|
2299
|
-
stats["failed_rules"] = sum(
|
|
2649
|
+
stats["failed_rules"] = sum(
|
|
2650
|
+
[
|
|
2651
|
+
len(rules["failed"])
|
|
2652
|
+
for rules in qc_results_list
|
|
2653
|
+
if rules["failed"]
|
|
2654
|
+
]
|
|
2655
|
+
)
|
|
2300
2656
|
|
|
2301
2657
|
for qc_results in qc_results_list:
|
|
2302
2658
|
|
|
2303
2659
|
# Count passed files distribution
|
|
2304
|
-
for rule in qc_results[
|
|
2305
|
-
num_files = len(rule[
|
|
2306
|
-
stats[
|
|
2307
|
-
stats[
|
|
2308
|
-
stats[
|
|
2309
|
-
rule_name = rule[
|
|
2310
|
-
stats[
|
|
2311
|
-
|
|
2312
|
-
for rule in qc_results[
|
|
2313
|
-
stats[
|
|
2314
|
-
stats[
|
|
2315
|
-
for condition, count in rule[
|
|
2660
|
+
for rule in qc_results["passed"]:
|
|
2661
|
+
num_files = len(rule["files"])
|
|
2662
|
+
stats["num_passed_files_distribution"][num_files] += 1
|
|
2663
|
+
stats["file_stats"]["passed"] += len(rule["files"])
|
|
2664
|
+
stats["file_stats"]["total"] += len(rule["files"])
|
|
2665
|
+
rule_name = rule["rule"]
|
|
2666
|
+
stats["rule_success_rates"][rule_name]["passed"] += 1
|
|
2667
|
+
|
|
2668
|
+
for rule in qc_results["failed"]:
|
|
2669
|
+
stats["file_stats"]["total"] += len(rule["files"])
|
|
2670
|
+
stats["file_stats"]["failed"] += len(rule["files"])
|
|
2671
|
+
for condition, count in rule["failed_conditions"].items():
|
|
2316
2672
|
# Extract just the condition text without actual value
|
|
2317
|
-
clean_condition = re.sub(
|
|
2318
|
-
|
|
2673
|
+
clean_condition = re.sub(
|
|
2674
|
+
r"\.\s*Actual value:.*$", "", condition
|
|
2675
|
+
)
|
|
2676
|
+
stats["condition_failure_rates"][clean_condition][
|
|
2677
|
+
"count"
|
|
2678
|
+
] += count
|
|
2319
2679
|
total_failures += count
|
|
2320
|
-
rule_name = rule[
|
|
2321
|
-
stats[
|
|
2322
|
-
|
|
2323
|
-
if stats[
|
|
2324
|
-
stats[
|
|
2325
|
-
(stats[
|
|
2680
|
+
rule_name = rule["rule"]
|
|
2681
|
+
stats["rule_success_rates"][rule_name]["failed"] += 1
|
|
2682
|
+
|
|
2683
|
+
if stats["file_stats"]["total"] > 0:
|
|
2684
|
+
stats["file_stats"]["pass_percentage"] = round(
|
|
2685
|
+
(stats["file_stats"]["passed"] / stats["file_stats"]["total"])
|
|
2686
|
+
* 100,
|
|
2687
|
+
2,
|
|
2326
2688
|
)
|
|
2327
2689
|
|
|
2328
2690
|
# Calculate condition failure percentages
|
|
2329
|
-
for condition in stats[
|
|
2691
|
+
for condition in stats["condition_failure_rates"]:
|
|
2330
2692
|
if total_failures > 0:
|
|
2331
|
-
stats[
|
|
2332
|
-
(
|
|
2693
|
+
stats["condition_failure_rates"][condition]["percentage"] = (
|
|
2694
|
+
round(
|
|
2695
|
+
(
|
|
2696
|
+
stats["condition_failure_rates"][condition][
|
|
2697
|
+
"count"
|
|
2698
|
+
]
|
|
2699
|
+
/ total_failures
|
|
2700
|
+
)
|
|
2701
|
+
* 100,
|
|
2702
|
+
2,
|
|
2703
|
+
)
|
|
2333
2704
|
)
|
|
2334
2705
|
|
|
2335
2706
|
# Calculate rule success rates
|
|
2336
|
-
for rule in stats[
|
|
2337
|
-
total =
|
|
2707
|
+
for rule in stats["rule_success_rates"]:
|
|
2708
|
+
total = (
|
|
2709
|
+
stats["rule_success_rates"][rule]["passed"]
|
|
2710
|
+
+ stats["rule_success_rates"][rule]["failed"]
|
|
2711
|
+
)
|
|
2338
2712
|
if total > 0:
|
|
2339
|
-
stats[
|
|
2340
|
-
(stats[
|
|
2713
|
+
stats["rule_success_rates"][rule]["success_rate"] = round(
|
|
2714
|
+
(stats["rule_success_rates"][rule]["passed"] / total)
|
|
2715
|
+
* 100,
|
|
2716
|
+
2,
|
|
2341
2717
|
)
|
|
2342
2718
|
|
|
2343
2719
|
# Convert defaultdict to regular dict for cleaner JSON output
|
|
2344
|
-
stats[
|
|
2345
|
-
|
|
2346
|
-
|
|
2720
|
+
stats["num_passed_files_distribution"] = dict(
|
|
2721
|
+
stats["num_passed_files_distribution"]
|
|
2722
|
+
)
|
|
2723
|
+
stats["condition_failure_rates"] = dict(
|
|
2724
|
+
stats["condition_failure_rates"]
|
|
2725
|
+
)
|
|
2726
|
+
stats["rule_success_rates"] = dict(stats["rule_success_rates"])
|
|
2347
2727
|
|
|
2348
2728
|
return stats
|
|
2349
2729
|
|
|
2350
2730
|
""" Helper Methods """
|
|
2351
2731
|
|
|
2352
|
-
def __handle_start_analysis(
|
|
2732
|
+
def __handle_start_analysis(
|
|
2733
|
+
self,
|
|
2734
|
+
post_data,
|
|
2735
|
+
ignore_warnings=False,
|
|
2736
|
+
ignore_file_selection=True,
|
|
2737
|
+
n_calls=0,
|
|
2738
|
+
):
|
|
2353
2739
|
"""
|
|
2354
2740
|
Handle the possible responses from the server after start_analysis.
|
|
2355
2741
|
Sometimes we have to send a request again, and then check again the
|
|
@@ -2369,13 +2755,21 @@ class Project:
|
|
|
2369
2755
|
than {n_calls} times: aborting."
|
|
2370
2756
|
)
|
|
2371
2757
|
return None
|
|
2372
|
-
|
|
2758
|
+
response = None
|
|
2373
2759
|
try:
|
|
2374
2760
|
response = platform.parse_response(
|
|
2375
|
-
platform.post(
|
|
2761
|
+
platform.post(
|
|
2762
|
+
self._account.auth,
|
|
2763
|
+
"analysis_manager/analysis_registration",
|
|
2764
|
+
data=post_data,
|
|
2765
|
+
)
|
|
2376
2766
|
)
|
|
2377
2767
|
logger.info(response["message"])
|
|
2378
|
-
return
|
|
2768
|
+
return (
|
|
2769
|
+
int(response["analysis_id"])
|
|
2770
|
+
if "analysis_id" in response
|
|
2771
|
+
else None
|
|
2772
|
+
)
|
|
2379
2773
|
|
|
2380
2774
|
except platform.ChooseDataError as choose_data:
|
|
2381
2775
|
if ignore_file_selection:
|
|
@@ -2407,19 +2801,25 @@ class Project:
|
|
|
2407
2801
|
self.__handle_manual_choose_data(new_post, choose_data)
|
|
2408
2802
|
else:
|
|
2409
2803
|
if has_warning and not ignore_warnings:
|
|
2410
|
-
logger.error(
|
|
2804
|
+
logger.error(
|
|
2805
|
+
"Cancelling analysis due to warnings, set "
|
|
2806
|
+
"'ignore_warnings' to True to override."
|
|
2807
|
+
)
|
|
2411
2808
|
new_post["cancel"] = "1"
|
|
2412
2809
|
else:
|
|
2413
2810
|
logger.info("suppressing warnings")
|
|
2414
2811
|
new_post["user_preference"] = "{}"
|
|
2415
2812
|
new_post["_mint_only_warning"] = "1"
|
|
2416
2813
|
|
|
2417
|
-
return self.__handle_start_analysis(
|
|
2814
|
+
return self.__handle_start_analysis(
|
|
2815
|
+
new_post, ignore_warnings, ignore_file_selection, n_calls
|
|
2816
|
+
)
|
|
2418
2817
|
except platform.ActionFailedError as e:
|
|
2419
2818
|
logger.error(f"Unable to start the analysis: {e}.")
|
|
2420
2819
|
return None
|
|
2421
2820
|
|
|
2422
|
-
|
|
2821
|
+
@staticmethod
|
|
2822
|
+
def __handle_manual_choose_data(post_data, choose_data):
|
|
2423
2823
|
"""
|
|
2424
2824
|
Handle the responses of the user when there is need to select a file
|
|
2425
2825
|
to start the analysis.
|
|
@@ -2432,15 +2832,22 @@ class Project:
|
|
|
2432
2832
|
post_data : dict
|
|
2433
2833
|
Current post_data dictionary. To be mofidied in-place.
|
|
2434
2834
|
choose_data : platform.ChooseDataError
|
|
2435
|
-
Error raised when trying to start an analysis, but data has to
|
|
2835
|
+
Error raised when trying to start an analysis, but data has to
|
|
2836
|
+
be chosen.
|
|
2436
2837
|
"""
|
|
2437
2838
|
|
|
2438
2839
|
logger = logging.getLogger(logger_name)
|
|
2439
|
-
logger.warning(
|
|
2840
|
+
logger.warning(
|
|
2841
|
+
"Multiple inputs available. You have to select the desired file/s "
|
|
2842
|
+
"to continue."
|
|
2843
|
+
)
|
|
2440
2844
|
# in case we have data to choose
|
|
2441
2845
|
chosen_files = {}
|
|
2442
2846
|
for settings_key in choose_data.data_to_choose:
|
|
2443
|
-
logger.warning(
|
|
2847
|
+
logger.warning(
|
|
2848
|
+
f"Type next the file/s for the input with ID: "
|
|
2849
|
+
f"'{settings_key}'."
|
|
2850
|
+
)
|
|
2444
2851
|
chosen_files[settings_key] = {}
|
|
2445
2852
|
filters = choose_data.data_to_choose[settings_key]["filters"]
|
|
2446
2853
|
for filter_key in filters:
|
|
@@ -2455,7 +2862,9 @@ class Project:
|
|
|
2455
2862
|
if filter_data["range"][0] != 0:
|
|
2456
2863
|
number_of_files_to_select = filter_data["range"][0]
|
|
2457
2864
|
elif filter_data["range"][1] != 0:
|
|
2458
|
-
number_of_files_to_select = min(
|
|
2865
|
+
number_of_files_to_select = min(
|
|
2866
|
+
filter_data["range"][1], len(filter_data["files"])
|
|
2867
|
+
)
|
|
2459
2868
|
else:
|
|
2460
2869
|
number_of_files_to_select = len(filter_data["files"])
|
|
2461
2870
|
|
|
@@ -2467,19 +2876,29 @@ class Project:
|
|
|
2467
2876
|
# list_container_filter_files()
|
|
2468
2877
|
|
|
2469
2878
|
if number_of_files_to_select != len(filter_data["files"]):
|
|
2879
|
+
substring = ""
|
|
2880
|
+
if number_of_files_to_select > 1:
|
|
2881
|
+
substring = "s (i.e., file1.zip, file2.zip, file3.zip)"
|
|
2470
2882
|
logger.warning(
|
|
2471
2883
|
f" · File filter name: '{filter_key}'. Type "
|
|
2472
|
-
f"{number_of_files_to_select} file"
|
|
2473
|
-
f"{'s (i.e., file1.zip, file2.zip, file3.zip)' if number_of_files_to_select > 1 else ''}."
|
|
2884
|
+
f"{number_of_files_to_select} file{substring}."
|
|
2474
2885
|
)
|
|
2475
2886
|
save_file_ids, select_file_filter = {}, ""
|
|
2476
2887
|
for file_ in filter_data["files"]:
|
|
2477
|
-
select_file_filter +=
|
|
2888
|
+
select_file_filter += (
|
|
2889
|
+
f" · File name: {file_['name']}\n"
|
|
2890
|
+
)
|
|
2478
2891
|
save_file_ids[file_["name"]] = file_["_id"]
|
|
2479
|
-
names = [
|
|
2892
|
+
names = [
|
|
2893
|
+
el.strip()
|
|
2894
|
+
for el in input(select_file_filter).strip().split(",")
|
|
2895
|
+
]
|
|
2480
2896
|
|
|
2481
2897
|
if len(names) != number_of_files_to_select:
|
|
2482
|
-
logger.error(
|
|
2898
|
+
logger.error(
|
|
2899
|
+
"The number of files selected does not correspond "
|
|
2900
|
+
"to the number of needed files."
|
|
2901
|
+
)
|
|
2483
2902
|
logger.error(
|
|
2484
2903
|
f"Selected: {len(names)} vs. "
|
|
2485
2904
|
f"Number of files to select: "
|
|
@@ -2489,14 +2908,27 @@ class Project:
|
|
|
2489
2908
|
post_data["cancel"] = "1"
|
|
2490
2909
|
|
|
2491
2910
|
elif any([name not in save_file_ids for name in names]):
|
|
2492
|
-
logger.error(
|
|
2911
|
+
logger.error(
|
|
2912
|
+
f"Some selected file/s '{', '.join(names)}' "
|
|
2913
|
+
f"do not exist. Cancelling analysis..."
|
|
2914
|
+
)
|
|
2493
2915
|
post_data["cancel"] = "1"
|
|
2494
2916
|
else:
|
|
2495
|
-
chosen_files[settings_key][filter_key] = [
|
|
2917
|
+
chosen_files[settings_key][filter_key] = [
|
|
2918
|
+
save_file_ids[name] for name in names
|
|
2919
|
+
]
|
|
2496
2920
|
|
|
2497
2921
|
else:
|
|
2498
|
-
logger.warning(
|
|
2499
|
-
|
|
2922
|
+
logger.warning(
|
|
2923
|
+
"Setting all available files to be input to the "
|
|
2924
|
+
"analysis."
|
|
2925
|
+
)
|
|
2926
|
+
files_selection = [
|
|
2927
|
+
ff["_id"]
|
|
2928
|
+
for ff in filter_data["files"][
|
|
2929
|
+
:number_of_files_to_select
|
|
2930
|
+
]
|
|
2931
|
+
]
|
|
2500
2932
|
chosen_files[settings_key][filter_key] = files_selection
|
|
2501
2933
|
|
|
2502
2934
|
post_data["user_preference"] = json.dumps(chosen_files)
|
|
@@ -2555,11 +2987,12 @@ class Project:
|
|
|
2555
2987
|
else:
|
|
2556
2988
|
return True
|
|
2557
2989
|
|
|
2558
|
-
|
|
2990
|
+
@staticmethod
|
|
2991
|
+
def __operation(reference_value, operator, input_value):
|
|
2559
2992
|
"""
|
|
2560
2993
|
The method performs an operation by comparing the two input values.
|
|
2561
|
-
The Operation is applied to the Input Value in comparison to the
|
|
2562
|
-
Value.
|
|
2994
|
+
The Operation is applied to the Input Value in comparison to the
|
|
2995
|
+
Reference Value.
|
|
2563
2996
|
|
|
2564
2997
|
Parameters
|
|
2565
2998
|
----------
|
|
@@ -2575,39 +3008,32 @@ class Project:
|
|
|
2575
3008
|
bool
|
|
2576
3009
|
True if the operation is satisfied, False otherwise.
|
|
2577
3010
|
"""
|
|
2578
|
-
if input_value
|
|
3011
|
+
if not input_value: # Handles None, "", and other falsy values
|
|
2579
3012
|
return False
|
|
2580
3013
|
|
|
2581
|
-
|
|
2582
|
-
|
|
2583
|
-
|
|
2584
|
-
|
|
2585
|
-
|
|
2586
|
-
|
|
2587
|
-
|
|
2588
|
-
|
|
2589
|
-
|
|
2590
|
-
|
|
2591
|
-
|
|
2592
|
-
|
|
2593
|
-
elif operator == "gte":
|
|
2594
|
-
return input_value >= reference_value
|
|
2595
|
-
|
|
2596
|
-
elif operator == "lt":
|
|
2597
|
-
return input_value < reference_value
|
|
3014
|
+
operator_actions = {
|
|
3015
|
+
"in": lambda: reference_value in input_value,
|
|
3016
|
+
"in-list": lambda: all(
|
|
3017
|
+
el in input_value for el in reference_value
|
|
3018
|
+
),
|
|
3019
|
+
"eq": lambda: input_value == reference_value,
|
|
3020
|
+
"gt": lambda: input_value > reference_value,
|
|
3021
|
+
"gte": lambda: input_value >= reference_value,
|
|
3022
|
+
"lt": lambda: input_value < reference_value,
|
|
3023
|
+
"lte": lambda: input_value <= reference_value,
|
|
3024
|
+
}
|
|
2598
3025
|
|
|
2599
|
-
|
|
2600
|
-
|
|
2601
|
-
else:
|
|
2602
|
-
return False
|
|
3026
|
+
action = operator_actions.get(operator, lambda: False)
|
|
3027
|
+
return action()
|
|
2603
3028
|
|
|
2604
|
-
|
|
3029
|
+
@staticmethod
|
|
3030
|
+
def __wrap_search_criteria(search_criteria=None):
|
|
2605
3031
|
"""
|
|
2606
3032
|
Wraps the conditions specified within the Search Criteria in order for
|
|
2607
3033
|
other methods to handle it easily. The conditions are grouped only into
|
|
2608
|
-
three groups: Modality, Tags and the File Metadata (if DICOM it
|
|
2609
|
-
to the DICOM information), and each of them is output
|
|
2610
|
-
variable.
|
|
3034
|
+
three groups: Modality, Tags and the File Metadata (if DICOM it
|
|
3035
|
+
corresponds to the DICOM information), and each of them is output
|
|
3036
|
+
in a different variable.
|
|
2611
3037
|
|
|
2612
3038
|
Parameters
|
|
2613
3039
|
----------
|
|
@@ -2631,27 +3057,27 @@ class Project:
|
|
|
2631
3057
|
|
|
2632
3058
|
Returns
|
|
2633
3059
|
-------
|
|
2634
|
-
|
|
2635
|
-
|
|
2636
|
-
|
|
2637
|
-
|
|
2638
|
-
|
|
2639
|
-
|
|
2640
|
-
|
|
2641
|
-
|
|
2642
|
-
file_metadata : Dict
|
|
2643
|
-
Dictionary containing the file metadata of the search criteria
|
|
3060
|
+
tuple
|
|
3061
|
+
A tuple containing:
|
|
3062
|
+
- str: modality is a string containing the modality of the search
|
|
3063
|
+
criteria extracted from 'pars_modalities';
|
|
3064
|
+
- list: tags is a list of strings containing the tags of the search
|
|
3065
|
+
criteria extracted 'from pars_tags',
|
|
3066
|
+
- dict: containing the file metadata of the search criteria
|
|
2644
3067
|
extracted from 'pars_[dicom]_KEY'
|
|
2645
3068
|
"""
|
|
2646
3069
|
|
|
2647
3070
|
# The keys not included bellow apply to the whole session.
|
|
3071
|
+
if search_criteria is None:
|
|
3072
|
+
search_criteria = {}
|
|
2648
3073
|
modality, tags, file_metadata = "", list(), dict()
|
|
2649
3074
|
for key, value in search_criteria.items():
|
|
2650
3075
|
if key == "pars_modalities":
|
|
2651
3076
|
modalities = value.split(";")[1].split(",")
|
|
2652
3077
|
if len(modalities) != 1:
|
|
2653
3078
|
raise ValueError(
|
|
2654
|
-
f"A file can only have one modality.
|
|
3079
|
+
f"A file can only have one modality. "
|
|
3080
|
+
f"Provided Modalities: {', '.join(modalities)}."
|
|
2655
3081
|
)
|
|
2656
3082
|
modality = modalities[0]
|
|
2657
3083
|
elif key == "pars_tags":
|
|
@@ -2660,16 +3086,162 @@ class Project:
|
|
|
2660
3086
|
d_tag = key.split("pars_[dicom]_")[1]
|
|
2661
3087
|
d_type = value.split(";")[0]
|
|
2662
3088
|
if d_type == "string":
|
|
2663
|
-
file_metadata[d_tag] = {
|
|
3089
|
+
file_metadata[d_tag] = {
|
|
3090
|
+
"operation": "in",
|
|
3091
|
+
"value": value.replace(d_type + ";", ""),
|
|
3092
|
+
}
|
|
2664
3093
|
elif d_type == "integer":
|
|
2665
3094
|
d_operator = value.split(";")[1].split("|")[0]
|
|
2666
3095
|
d_value = value.split(";")[1].split("|")[1]
|
|
2667
|
-
file_metadata[d_tag] = {
|
|
3096
|
+
file_metadata[d_tag] = {
|
|
3097
|
+
"operation": d_operator,
|
|
3098
|
+
"value": int(d_value),
|
|
3099
|
+
}
|
|
2668
3100
|
elif d_type == "decimal":
|
|
2669
3101
|
d_operator = value.split(";")[1].split("|")[0]
|
|
2670
3102
|
d_value = value.split(";")[1].split("|")[1]
|
|
2671
|
-
file_metadata[d_tag] = {
|
|
3103
|
+
file_metadata[d_tag] = {
|
|
3104
|
+
"operation": d_operator,
|
|
3105
|
+
"value": float(d_value),
|
|
3106
|
+
}
|
|
2672
3107
|
elif d_type == "list":
|
|
2673
3108
|
value.replace(d_type + ";", "")
|
|
2674
|
-
file_metadata[d_tag] = {
|
|
3109
|
+
file_metadata[d_tag] = {
|
|
3110
|
+
"operation": "in-list",
|
|
3111
|
+
"value": value.replace(d_type + ";", "").split(";"),
|
|
3112
|
+
}
|
|
2675
3113
|
return modality, tags, file_metadata
|
|
3114
|
+
|
|
3115
|
+
@staticmethod
|
|
3116
|
+
def __assert_split_data(split_data, ssid, add_to_container_id):
|
|
3117
|
+
"""
|
|
3118
|
+
Assert if the split_data parameter is possible to use in regards
|
|
3119
|
+
to the ssid and add_to_container_id parameters during upload.
|
|
3120
|
+
Changes its status to False if needed.
|
|
3121
|
+
|
|
3122
|
+
Parameters
|
|
3123
|
+
----------
|
|
3124
|
+
split_data : Bool
|
|
3125
|
+
split_data parameter from method 'upload_file'.
|
|
3126
|
+
ssid : str
|
|
3127
|
+
Session ID.
|
|
3128
|
+
add_to_container_id : int or bool
|
|
3129
|
+
Container ID or False
|
|
3130
|
+
|
|
3131
|
+
Returns
|
|
3132
|
+
-------
|
|
3133
|
+
split_data : Bool
|
|
3134
|
+
|
|
3135
|
+
"""
|
|
3136
|
+
|
|
3137
|
+
logger = logging.getLogger(logger_name)
|
|
3138
|
+
if ssid and split_data:
|
|
3139
|
+
logger.warning(
|
|
3140
|
+
"split-data argument will be ignored because ssid has been "
|
|
3141
|
+
"specified"
|
|
3142
|
+
)
|
|
3143
|
+
split_data = False
|
|
3144
|
+
|
|
3145
|
+
if add_to_container_id and split_data:
|
|
3146
|
+
logger.warning(
|
|
3147
|
+
"split-data argument will be ignored because "
|
|
3148
|
+
"add_to_container_id has been specified"
|
|
3149
|
+
)
|
|
3150
|
+
split_data = False
|
|
3151
|
+
|
|
3152
|
+
return split_data
|
|
3153
|
+
|
|
3154
|
+
@staticmethod
|
|
3155
|
+
def __parse_pass_rules(passed_rules, result):
|
|
3156
|
+
"""
|
|
3157
|
+
Parse pass rules.
|
|
3158
|
+
"""
|
|
3159
|
+
|
|
3160
|
+
for rule_text in passed_rules[1:]: # Skip first empty part
|
|
3161
|
+
rule_name = rule_text.split(" ✅")[0].strip()
|
|
3162
|
+
rule_data = {"rule": rule_name, "sub_rule": None, "files": []}
|
|
3163
|
+
|
|
3164
|
+
# Get sub-rule
|
|
3165
|
+
sub_rule_match = re.search(r"Sub-rule: (.*?)\n", rule_text)
|
|
3166
|
+
if sub_rule_match:
|
|
3167
|
+
rule_data["sub_rule"] = sub_rule_match.group(1).strip()
|
|
3168
|
+
|
|
3169
|
+
# Get files passed
|
|
3170
|
+
files_passed = re.search(
|
|
3171
|
+
r"List of files passed:(.*?)(?=\n\n|\Z)", rule_text, re.DOTALL
|
|
3172
|
+
)
|
|
3173
|
+
if files_passed:
|
|
3174
|
+
for line in files_passed.group(1).split("\n"):
|
|
3175
|
+
line = line.strip()
|
|
3176
|
+
if line.startswith("·"):
|
|
3177
|
+
file_match = re.match(r"· (.*?) \((\d+)/(\d+)\)", line)
|
|
3178
|
+
if file_match:
|
|
3179
|
+
rule_data["files"].append(
|
|
3180
|
+
{
|
|
3181
|
+
"file": file_match.group(1).strip(),
|
|
3182
|
+
"passed_conditions": int(
|
|
3183
|
+
file_match.group(2)
|
|
3184
|
+
),
|
|
3185
|
+
}
|
|
3186
|
+
)
|
|
3187
|
+
|
|
3188
|
+
result["passed"].append(rule_data)
|
|
3189
|
+
return result
|
|
3190
|
+
|
|
3191
|
+
@staticmethod
|
|
3192
|
+
def __parse_fail_rules(failed_rules, result):
|
|
3193
|
+
"""
|
|
3194
|
+
Parse fail rules.
|
|
3195
|
+
"""
|
|
3196
|
+
|
|
3197
|
+
for rule_text in failed_rules[1:]: # Skip first empty part
|
|
3198
|
+
rule_name = rule_text.split(" ❌")[0].strip()
|
|
3199
|
+
rule_data = {
|
|
3200
|
+
"rule": rule_name,
|
|
3201
|
+
"files": [],
|
|
3202
|
+
"failed_conditions": {},
|
|
3203
|
+
}
|
|
3204
|
+
|
|
3205
|
+
# Extract all file comparisons for this rule
|
|
3206
|
+
file_comparisons = re.split(r"- Comparison with file:", rule_text)
|
|
3207
|
+
for comp in file_comparisons[1:]: # Skip first part
|
|
3208
|
+
file_name = comp.split("\n")[0].strip()
|
|
3209
|
+
conditions_match = re.search(
|
|
3210
|
+
r"Conditions:(.*?)(?=\n\t- Comparison|\n\n|$)",
|
|
3211
|
+
comp,
|
|
3212
|
+
re.DOTALL,
|
|
3213
|
+
)
|
|
3214
|
+
if not conditions_match:
|
|
3215
|
+
continue
|
|
3216
|
+
|
|
3217
|
+
conditions_text = conditions_match.group(1).strip()
|
|
3218
|
+
# Parse conditions
|
|
3219
|
+
conditions = []
|
|
3220
|
+
for line in conditions_text.split("\n"):
|
|
3221
|
+
line = line.strip()
|
|
3222
|
+
if line.startswith("·"):
|
|
3223
|
+
status = "✔" if "✔" in line else "🚫"
|
|
3224
|
+
condition = re.sub(r"^· [✔🚫]\s*", "", line)
|
|
3225
|
+
conditions.append(
|
|
3226
|
+
{
|
|
3227
|
+
"status": (
|
|
3228
|
+
"passed" if status == "✔" else "failed"
|
|
3229
|
+
),
|
|
3230
|
+
"condition": condition,
|
|
3231
|
+
}
|
|
3232
|
+
)
|
|
3233
|
+
|
|
3234
|
+
# Add to failed conditions summary
|
|
3235
|
+
for cond in conditions:
|
|
3236
|
+
if cond["status"] == "failed":
|
|
3237
|
+
cond_text = cond["condition"]
|
|
3238
|
+
if cond_text not in rule_data["failed_conditions"]:
|
|
3239
|
+
rule_data["failed_conditions"][cond_text] = 0
|
|
3240
|
+
rule_data["failed_conditions"][cond_text] += 1
|
|
3241
|
+
|
|
3242
|
+
rule_data["files"].append(
|
|
3243
|
+
{"file": file_name, "conditions": conditions}
|
|
3244
|
+
)
|
|
3245
|
+
|
|
3246
|
+
result["failed"].append(rule_data)
|
|
3247
|
+
return result
|