qmenta-client 2.1.dev1508__py3-none-any.whl → 2.1.dev1510__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
qmenta/client/Project.py CHANGED
@@ -9,7 +9,6 @@ import sys
9
9
  import time
10
10
  from collections import defaultdict
11
11
  from enum import Enum
12
-
13
12
  from qmenta.core import errors
14
13
  from qmenta.core import platform
15
14
 
@@ -21,6 +20,17 @@ if sys.version_info[0] == 3:
21
20
 
22
21
  logger_name = "qmenta.client"
23
22
  OPERATOR_LIST = ["eq", "ne", "gt", "gte", "lt", "lte"]
23
+ ANALYSIS_NAME_EXCLUDED_CHARACTERS = [
24
+ "\\",
25
+ "[",
26
+ "]",
27
+ "(",
28
+ ")",
29
+ "{",
30
+ "}",
31
+ "+",
32
+ "*",
33
+ ]
24
34
 
25
35
 
26
36
  def convert_qc_value_to_qcstatus(value):
@@ -46,7 +56,9 @@ def convert_qc_value_to_qcstatus(value):
46
56
  elif value == "":
47
57
  return QCStatus.UNDERTERMINED
48
58
  else:
49
- logger.error(f"The input value '{value}' cannot be converted to class QCStatus.")
59
+ logger.error(
60
+ f"The input value '{value}' cannot be converted to class QCStatus."
61
+ )
50
62
  return False
51
63
 
52
64
 
@@ -84,11 +96,24 @@ class Project:
84
96
  # project id (int)
85
97
  if isinstance(project_id, str):
86
98
  project_name = project_id
87
- project_id = next(iter(filter(lambda proj: proj["name"] == project_id, account.projects)))["id"]
99
+ project_id = next(
100
+ iter(
101
+ filter(
102
+ lambda proj: proj["name"] == project_id,
103
+ account.projects,
104
+ )
105
+ )
106
+ )["id"]
88
107
  else:
89
108
  if isinstance(project_id, float):
90
109
  project_id = int(project_id)
91
- project_name = next(iter(filter(lambda proj: proj["id"] == project_id, account.projects)))["name"]
110
+ project_name = next(
111
+ iter(
112
+ filter(
113
+ lambda proj: proj["id"] == project_id, account.projects
114
+ )
115
+ )
116
+ )["name"]
92
117
 
93
118
  self._account = account
94
119
  self._project_id = project_id
@@ -121,7 +146,9 @@ class Project:
121
146
  try:
122
147
  platform.parse_response(
123
148
  platform.post(
124
- self._account.auth, "projectset_manager/activate_project", data={"project_id": int(project_id)}
149
+ self._account.auth,
150
+ "projectset_manager/activate_project",
151
+ data={"project_id": int(project_id)},
125
152
  )
126
153
  )
127
154
  except errors.PlatformError:
@@ -156,6 +183,7 @@ class Project:
156
183
  result=False,
157
184
  add_to_container_id=0,
158
185
  split_data=False,
186
+ mock_response=False,
159
187
  ):
160
188
  """
161
189
  Upload a chunk of a file to the platform.
@@ -189,6 +217,8 @@ class Project:
189
217
  "Content-Length": str(length),
190
218
  "Content-Disposition": disposition,
191
219
  }
220
+ if mock_response:
221
+ request_headers["mock_case"] = mock_response
192
222
 
193
223
  if last_chunk:
194
224
  request_headers["X-Mint-Name"] = name
@@ -215,9 +245,12 @@ class Project:
215
245
 
216
246
  response_time = 900.0 if last_chunk else 120.0
217
247
  response = platform.post(
218
- auth=self._account.auth, endpoint="upload", data=data, headers=request_headers, timeout=response_time
248
+ auth=self._account.auth,
249
+ endpoint="upload",
250
+ data=data,
251
+ headers=request_headers,
252
+ timeout=response_time,
219
253
  )
220
-
221
254
  return response
222
255
 
223
256
  def upload_file(
@@ -232,7 +265,9 @@ class Project:
232
265
  input_data_type="qmenta_medical_image_data:3.10",
233
266
  add_to_container_id=0,
234
267
  chunk_size=2**9,
268
+ max_retries=10,
235
269
  split_data=False,
270
+ mock_response=False,
236
271
  ):
237
272
  """
238
273
  Upload a ZIP file to the platform.
@@ -274,13 +309,14 @@ class Project:
274
309
  input_data_type = "offline_analysis:1.0" if result else input_data_type
275
310
 
276
311
  chunk_size *= 1024
277
- max_retries = 10
278
312
 
279
313
  name = name or os.path.split(file_path)[1]
280
314
 
281
315
  total_bytes = os.path.getsize(file_path)
282
316
 
283
- split_data = self.__assert_split_data(split_data, ssid, add_to_container_id)
317
+ split_data = self.__assert_split_data(
318
+ split_data, ssid, add_to_container_id
319
+ )
284
320
 
285
321
  # making chunks of the file and sending one by one
286
322
  logger = logging.getLogger(logger_name)
@@ -297,6 +333,7 @@ class Project:
297
333
  uploaded_bytes = 0
298
334
  response = None
299
335
  last_chunk = False
336
+ error_message = None
300
337
 
301
338
  while True:
302
339
  data = file_object.read(chunk_size)
@@ -312,7 +349,14 @@ class Project:
312
349
  end_position = total_bytes - 1
313
350
  bytes_to_send = total_bytes - uploaded_bytes
314
351
 
315
- bytes_range = "bytes " + str(start_position) + "-" + str(end_position) + "/" + str(total_bytes)
352
+ bytes_range = (
353
+ "bytes "
354
+ + str(start_position)
355
+ + "-"
356
+ + str(end_position)
357
+ + "/"
358
+ + str(total_bytes)
359
+ )
316
360
 
317
361
  dispstr = f"attachment; filename={filename}"
318
362
  response = self._upload_chunk(
@@ -332,14 +376,13 @@ class Project:
332
376
  result,
333
377
  add_to_container_id,
334
378
  split_data,
379
+ mock_response=mock_response,
335
380
  )
336
-
337
381
  if response is None:
338
382
  retries_count += 1
339
383
  time.sleep(retries_count * 5)
340
384
  if retries_count > max_retries:
341
385
  error_message = "HTTP Connection Problem"
342
- logger.error(error_message)
343
386
  break
344
387
  elif int(response.status_code) == 201:
345
388
  chunk_num += 1
@@ -352,27 +395,35 @@ class Project:
352
395
  retries_count += 1
353
396
  time.sleep(retries_count * 5)
354
397
  if retries_count > self.max_retries:
355
- error_message = "Error Code: 416; Requested Range Not Satisfiable (NGINX)"
356
- logger.error(error_message)
398
+ error_message = (
399
+ "Error Code: 416; Requested Range "
400
+ "Not Satisfiable (NGINX)"
401
+ )
357
402
  break
358
403
  else:
359
404
  retries_count += 1
360
405
  time.sleep(retries_count * 5)
361
406
  if retries_count > max_retries:
362
- error_message = "Number of retries has been reached. Upload process stops here !"
363
- logger.error(error_message)
407
+ error_message = (
408
+ "Number of retries has been reached. "
409
+ "Upload process stops here !"
410
+ )
364
411
  break
365
412
 
366
413
  uploaded += chunk_size
367
414
  self.__show_progress(uploaded, file_size)
368
-
415
+ if error_message is not None:
416
+ raise Exception(error_message)
369
417
  try:
370
418
  platform.parse_response(response)
371
419
  except errors.PlatformError as error:
372
420
  logger.error(error)
373
421
  return False
374
422
 
375
- message = "Your data was successfully uploaded. The uploaded file will be soon processed !"
423
+ message = (
424
+ "Your data was successfully uploaded. "
425
+ "The uploaded file will be soon processed !"
426
+ )
376
427
  logger.info(message)
377
428
  return True
378
429
 
@@ -396,7 +447,9 @@ class Project:
396
447
  raise TypeError("`filenames` must be str or list of str")
397
448
 
398
449
  platform.post(
399
- self._account.auth, "file_manager/delete_files", data={"container_id": container_id, "files": filenames}
450
+ self._account.auth,
451
+ "file_manager/delete_files",
452
+ data={"container_id": container_id, "files": filenames},
400
453
  )
401
454
 
402
455
  def upload_mri(self, file_path, subject_name):
@@ -435,7 +488,11 @@ class Project:
435
488
  """
436
489
 
437
490
  if self.__check_upload_file(file_path):
438
- return self.upload_file(file_path, subject_name, input_data_type="parkinson_gametection")
491
+ return self.upload_file(
492
+ file_path,
493
+ subject_name,
494
+ input_data_type="parkinson_gametection",
495
+ )
439
496
  return False
440
497
 
441
498
  def upload_result(self, file_path, subject_name):
@@ -458,7 +515,9 @@ class Project:
458
515
  return self.upload_file(file_path, subject_name, result=True)
459
516
  return False
460
517
 
461
- def download_file(self, container_id, file_name, local_filename=False, overwrite=False):
518
+ def download_file(
519
+ self, container_id, file_name, local_filename=False, overwrite=False
520
+ ):
462
521
  """
463
522
  Download a single file from a specific container.
464
523
 
@@ -475,36 +534,50 @@ class Project:
475
534
  """
476
535
  logger = logging.getLogger(logger_name)
477
536
  if not isinstance(file_name, str):
478
- raise ValueError("The name of the file to download (file_name) should be of type string.")
479
- if not isinstance(file_name, str):
480
- raise ValueError("The name of the output file (local_filename) should be of type string.")
537
+ raise ValueError(
538
+ "The name of the file to download (file_name) should be of "
539
+ "type string."
540
+ )
541
+ if not isinstance(local_filename, str):
542
+ raise ValueError(
543
+ "The name of the output file (local_filename) should be of "
544
+ "type string."
545
+ )
481
546
 
482
547
  if file_name not in self.list_container_files(container_id):
483
- msg = f'File "{file_name}" does not exist in container {container_id}'
484
- logger.error(msg)
485
- return False
548
+ msg = (
549
+ f'File "{file_name}" does not exist in container '
550
+ f"{container_id}"
551
+ )
552
+ raise Exception(msg)
486
553
 
487
554
  local_filename = local_filename or file_name
488
555
 
489
556
  if os.path.exists(local_filename) and not overwrite:
490
557
  msg = f"File {local_filename} already exists"
491
- logger.error(msg)
492
- return False
558
+ raise Exception(msg)
493
559
 
494
560
  params = {"container_id": container_id, "files": file_name}
495
-
496
561
  with platform.post(
497
- self._account.auth, "file_manager/download_file", data=params, stream=True
562
+ self._account.auth,
563
+ "file_manager/download_file",
564
+ data=params,
565
+ stream=True,
498
566
  ) as response, open(local_filename, "wb") as f:
499
567
 
500
568
  for chunk in response.iter_content(chunk_size=2**9 * 1024):
501
569
  f.write(chunk)
502
570
  f.flush()
503
571
 
504
- logger.info(f"File {file_name} from container {container_id} saved to {local_filename}")
572
+ logger.info(
573
+ f"File {file_name} from container {container_id} saved "
574
+ f"to {local_filename}"
575
+ )
505
576
  return True
506
577
 
507
- def download_files(self, container_id, filenames, zip_name="files.zip", overwrite=False):
578
+ def download_files(
579
+ self, container_id, filenames, zip_name="files.zip", overwrite=False
580
+ ):
508
581
  """
509
582
  Download a set of files from a given container.
510
583
 
@@ -522,32 +595,51 @@ class Project:
522
595
  logger = logging.getLogger(logger_name)
523
596
 
524
597
  if not all([isinstance(file_name, str) for file_name in filenames]):
525
- raise ValueError("The name of the files to download (filenames) should be of type string.")
598
+ raise ValueError(
599
+ "The name of the files to download (filenames) should be "
600
+ "of type string."
601
+ )
526
602
  if not isinstance(zip_name, str):
527
- raise ValueError("The name of the output ZIP file (zip_name) should be of type string.")
603
+ raise ValueError(
604
+ "The name of the output ZIP file (zip_name) should be "
605
+ "of type string."
606
+ )
528
607
 
529
- files_not_in_container = list(filter(lambda f: f not in self.list_container_files(container_id), filenames))
608
+ files_not_in_container = list(
609
+ filter(
610
+ lambda f: f not in self.list_container_files(container_id),
611
+ filenames,
612
+ )
613
+ )
530
614
 
531
615
  if files_not_in_container:
532
- msg = f"The following files are missing in container {container_id}: {', '.join(files_not_in_container)}"
533
- logger.error(msg)
534
- return False
616
+ msg = (
617
+ f"The following files are missing in container "
618
+ f"{container_id}: {', '.join(files_not_in_container)}"
619
+ )
620
+ raise Exception(msg)
535
621
 
536
622
  if os.path.exists(zip_name) and not overwrite:
537
623
  msg = f'File "{zip_name}" already exists'
538
- logger.error(msg)
539
- return False
624
+ raise Exception(msg)
540
625
 
541
626
  params = {"container_id": container_id, "files": ";".join(filenames)}
542
627
  with platform.post(
543
- self._account.auth, "file_manager/download_file", data=params, stream=True
628
+ self._account.auth,
629
+ "file_manager/download_file",
630
+ data=params,
631
+ stream=True,
544
632
  ) as response, open(zip_name, "wb") as f:
545
633
 
546
634
  for chunk in response.iter_content(chunk_size=2**9 * 1024):
547
635
  f.write(chunk)
548
636
  f.flush()
549
637
 
550
- logger.info("Files from container {} saved to {}".format(container_id, zip_name))
638
+ logger.info(
639
+ "Files from container {} saved to {}".format(
640
+ container_id, zip_name
641
+ )
642
+ )
551
643
  return True
552
644
 
553
645
  def copy_container_to_project(self, container_id, project_id):
@@ -571,9 +663,14 @@ class Project:
571
663
  p_id = int(project_id)
572
664
  elif type(project_id) is str:
573
665
  projects = self._account.projects
574
- projects_match = [proj for proj in projects if proj["name"] == project_id]
666
+ projects_match = [
667
+ proj for proj in projects if proj["name"] == project_id
668
+ ]
575
669
  if not projects_match:
576
- raise Exception(f"Project {project_id}" + " does not exist or is not available for this user.")
670
+ raise Exception(
671
+ f"Project {project_id}"
672
+ + " does not exist or is not available for this user."
673
+ )
577
674
  p_id = int(projects_match[0]["id"])
578
675
  else:
579
676
  raise TypeError("project_id")
@@ -584,10 +681,16 @@ class Project:
584
681
 
585
682
  try:
586
683
  platform.parse_response(
587
- platform.post(self._account.auth, "file_manager/copy_container_to_another_project", data=data)
684
+ platform.post(
685
+ self._account.auth,
686
+ "file_manager/copy_container_to_another_project",
687
+ data=data,
688
+ )
588
689
  )
589
690
  except errors.PlatformError as e:
590
- logging.getLogger(logger_name).error("Couldn not copy container: {}".format(e))
691
+ logging.getLogger(logger_name).error(
692
+ "Couldn not copy container: {}".format(e)
693
+ )
591
694
  return False
592
695
 
593
696
  return True
@@ -645,7 +748,11 @@ class Project:
645
748
  """
646
749
  logger = logging.getLogger(logger_name)
647
750
  try:
648
- data = platform.parse_response(platform.post(self._account.auth, "patient_manager/module_config"))
751
+ data = platform.parse_response(
752
+ platform.post(
753
+ self._account.auth, "patient_manager/module_config"
754
+ )
755
+ )
649
756
  except errors.PlatformError:
650
757
  logger.error("Could not retrieve metadata parameters.")
651
758
  return None
@@ -691,7 +798,10 @@ class Project:
691
798
  response = self.list_input_containers(search_criteria=search_criteria)
692
799
 
693
800
  for subject in response:
694
- if subject["patient_secret_name"] == subject_name and subject["ssid"] == ssid:
801
+ if (
802
+ subject["patient_secret_name"] == subject_name
803
+ and subject["ssid"] == ssid
804
+ ):
695
805
  return subject["container_id"]
696
806
  return False
697
807
 
@@ -715,20 +825,25 @@ class Project:
715
825
  """
716
826
 
717
827
  for user in self.get_subjects_metadata():
718
- if user["patient_secret_name"] == str(subject_name) and user["ssid"] == str(ssid):
828
+ if user["patient_secret_name"] == str(subject_name) and user[
829
+ "ssid"
830
+ ] == str(ssid):
719
831
  return int(user["_id"])
720
832
  return False
721
833
 
722
- def get_subjects_metadata(self, search_criteria={}, items=(0, 9999)):
834
+ def get_subjects_metadata(self, search_criteria=None, items=(0, 9999)):
723
835
  """
724
836
  List all Subject ID/Session ID from the selected project that meet the
725
- defined search criteria at a session level.
837
+ defined search criteria at a session level.
726
838
 
727
839
  Parameters
728
840
  ----------
729
841
  search_criteria: dict
730
842
  Each element is a string and is built using the formatting
731
843
  "type;value", or "type;operation|value"
844
+ items : List[int]
845
+ list containing two elements [min, max] that correspond to the
846
+ mininum and maximum range of analysis listed
732
847
 
733
848
  Complete search_criteria Dictionary Explanation:
734
849
 
@@ -742,8 +857,8 @@ class Project:
742
857
  "pars_PROJECTMETADATA": "METADATATYPE;METADATAVALUE",
743
858
  }
744
859
 
745
- Where:
746
- "pars_patient_secret_name": Applies the search to the 'Subject ID'.
860
+ where "pars_patient_secret_name": Applies the search to the
861
+ 'Subject ID'.
747
862
  SUBJECTID is a comma separated list of strings.
748
863
  "pars_ssid": Applies the search to the 'Session ID'.
749
864
  SSID is an integer.
@@ -829,12 +944,26 @@ class Project:
829
944
 
830
945
  """
831
946
 
832
- assert len(items) == 2, f"The number of elements in items '{len(items)}' should be equal to two."
833
- assert all([isinstance(item, int) for item in items]), f"All items elements '{items}' should be integers."
947
+ if search_criteria is None:
948
+ search_criteria = {}
949
+ if len(items) != 2:
950
+ raise ValueError(
951
+ f"The number of elements in items '{len(items)}' "
952
+ f"should be equal to two."
953
+ )
834
954
 
835
- assert all(
836
- [key[:5] == "pars_" for key in search_criteria.keys()]
837
- ), f"All keys of the search_criteria dictionary '{search_criteria.keys()}' must start with 'pars_'."
955
+ if not all([isinstance(item, int) for item in items]):
956
+ raise ValueError(
957
+ f"All values in items " f"'{items}' must be integers"
958
+ )
959
+
960
+ if search_criteria != {} and not all(
961
+ [item.startswith("pars_") for item in search_criteria.keys()]
962
+ ):
963
+ raise ValueError(
964
+ f"All keys of the search_criteria dictionary "
965
+ f"'{search_criteria.keys()}' must start with 'pars_'."
966
+ )
838
967
 
839
968
  for key, value in search_criteria.items():
840
969
  if value.split(";")[0] in ["integer", "decimal"]:
@@ -853,7 +982,9 @@ class Project:
853
982
  )
854
983
  return content
855
984
 
856
- def change_subject_metadata(self, patient_id, subject_name, ssid, tags, age_at_scan, metadata):
985
+ def change_subject_metadata(
986
+ self, patient_id, subject_name, ssid, tags, age_at_scan, metadata
987
+ ):
857
988
  """
858
989
  Change the Subject ID, Session ID, Tags, Age at Scan and Metadata of
859
990
  the session with Patient ID
@@ -888,36 +1019,57 @@ class Project:
888
1019
  try:
889
1020
  patient_id = str(int(patient_id))
890
1021
  except ValueError:
891
- raise ValueError(f"'patient_id': '{patient_id}' not valid. Must be convertible to int.")
1022
+ raise ValueError(
1023
+ f"'patient_id': '{patient_id}' not valid. Must be convertible "
1024
+ f"to int."
1025
+ )
892
1026
 
893
- assert isinstance(tags, list) and all(
1027
+ if not isinstance(tags, list) or not all(
894
1028
  isinstance(item, str) for item in tags
895
- ), f"tags: '{tags}' should be a list of strings."
1029
+ ):
1030
+ raise ValueError(f"tags: '{tags}' should be a list of strings.")
896
1031
  tags = [tag.lower() for tag in tags]
897
1032
 
898
- assert subject_name is not None and subject_name != "", "subject_name must be a non empty string."
899
- assert ssid is not None and ssid != "", "ssid must be a non empty string."
1033
+ if not isinstance(subject_name, str) or (
1034
+ subject_name is None or subject_name == ""
1035
+ ):
1036
+ raise ValueError("subject_name must be a non empty string.")
1037
+
1038
+ if not isinstance(ssid, str) or (ssid is None or ssid == ""):
1039
+ raise ValueError("ssid must be a non empty string.")
900
1040
 
901
1041
  try:
902
1042
  age_at_scan = str(int(age_at_scan)) if age_at_scan else None
903
1043
  except ValueError:
904
- raise ValueError(f"age_at_scan: '{age_at_scan}' not valid. Must be an integer.")
1044
+ raise ValueError(
1045
+ f"age_at_scan: '{age_at_scan}' not valid. Must be an integer."
1046
+ )
905
1047
 
906
- assert isinstance(metadata, dict), f"metadata: '{metadata}' should be a dictionary."
1048
+ if not isinstance(metadata, dict):
1049
+ raise ValueError(f"metadata: '{metadata}' should be a dictionary.")
907
1050
 
908
- assert all("md_" == key[:3] for key in metadata.keys()) or all(
909
- "md_" != key[:3] for key in metadata.keys()
910
- ), f"metadata: '{metadata}' must be a dictionary whose keys are either all starting with 'md_' or none."
1051
+ has_md_prefix = ["md_" == key[:3] for key in metadata.keys()]
1052
+ if not (all(has_md_prefix) or not any(has_md_prefix)):
1053
+ raise ValueError(
1054
+ f"metadata: '{metadata}' must be a dictionary whose keys are "
1055
+ f"either all starting with 'md_' or none."
1056
+ )
911
1057
 
912
1058
  metadata_keys = self.metadata_parameters.keys()
913
- assert all(
914
- [key[3:] in metadata_keys if "md_" == key[:3] else key in metadata_keys for key in metadata.keys()]
915
- ), (
916
- f"Some metadata keys provided ({', '.join(metadata.keys())}) "
917
- f"are not available in the project. They can be added via the "
918
- f"Metadata Manager via the QMENTA Platform graphical user "
919
- f"interface (GUI)."
920
- )
1059
+ if not all(
1060
+ (
1061
+ key[3:] in metadata_keys
1062
+ if "md_" == key[:3]
1063
+ else key in metadata_keys
1064
+ )
1065
+ for key in metadata.keys()
1066
+ ):
1067
+ raise ValueError(
1068
+ f"Some metadata keys provided ({', '.join(metadata.keys())}) "
1069
+ "are not available in the project. They can be added via the "
1070
+ "Metadata Manager via the QMENTA Platform graphical user "
1071
+ "interface (GUI)."
1072
+ )
921
1073
 
922
1074
  post_data = {
923
1075
  "patient_id": patient_id,
@@ -927,11 +1079,17 @@ class Project:
927
1079
  "age_at_scan": age_at_scan,
928
1080
  }
929
1081
  for key, value in metadata.items():
930
- id = key[3:] if "md_" == key[:3] else key
931
- post_data[f"last_vals.{id}"] = value
1082
+ id_ = key[3:] if "md_" == key[:3] else key
1083
+ post_data[f"last_vals.{id_}"] = value
932
1084
 
933
1085
  try:
934
- platform.parse_response(platform.post(self._account.auth, "patient_manager/upsert_patient", data=post_data))
1086
+ platform.parse_response(
1087
+ platform.post(
1088
+ self._account.auth,
1089
+ "patient_manager/upsert_patient",
1090
+ data=post_data,
1091
+ )
1092
+ )
935
1093
  except errors.PlatformError:
936
1094
  logger.error(f"Patient ID '{patient_id}' could not be modified.")
937
1095
  return False
@@ -939,7 +1097,9 @@ class Project:
939
1097
  logger.info(f"Patient ID '{patient_id}' successfully modified.")
940
1098
  return True
941
1099
 
942
- def get_subjects_files_metadata(self, search_criteria={}, items=(0, 9999)):
1100
+ def get_subjects_files_metadata(
1101
+ self, search_criteria=None, items=(0, 9999)
1102
+ ):
943
1103
  """
944
1104
  List all Subject ID/Session ID from the selected project that meet the
945
1105
  defined search criteria at a file level.
@@ -955,6 +1115,9 @@ class Project:
955
1115
  search_criteria: dict
956
1116
  Each element is a string and is built using the formatting
957
1117
  "type;value", or "type;operation|value"
1118
+ items : List[int]
1119
+ list containing two elements [min, max] that correspond to the
1120
+ mininum and maximum range of analysis listed
958
1121
 
959
1122
  Complete search_criteria Dictionary Explanation:
960
1123
 
@@ -1057,10 +1220,14 @@ class Project:
1057
1220
 
1058
1221
  """
1059
1222
 
1060
- content = self.get_subjects_metadata(search_criteria, items=(0, 9999))
1223
+ if search_criteria is None:
1224
+ search_criteria = {}
1225
+ content = self.get_subjects_metadata(search_criteria, items=items)
1061
1226
 
1062
1227
  # Wrap search criteria.
1063
- modality, tags, dicoms = self.__wrap_search_criteria(search_criteria)
1228
+ modality, tags, dicom_metadata = self.__wrap_search_criteria(
1229
+ search_criteria
1230
+ )
1064
1231
 
1065
1232
  # Iterate over the files of each subject selected to include/exclude
1066
1233
  # them from the results.
@@ -1075,17 +1242,23 @@ class Project:
1075
1242
  )
1076
1243
 
1077
1244
  for file in files["meta"]:
1078
- if modality and modality != (file.get("metadata") or {}).get("modality"):
1245
+ if modality and modality != (file.get("metadata") or {}).get(
1246
+ "modality"
1247
+ ):
1079
1248
  continue
1080
1249
  if tags and not all([tag in file.get("tags") for tag in tags]):
1081
1250
  continue
1082
- if dicoms:
1251
+ if dicom_metadata:
1083
1252
  result_values = list()
1084
- for key, dict_value in dicoms.items():
1085
- f_value = ((file.get("metadata") or {}).get("info") or {}).get(key)
1253
+ for key, dict_value in dicom_metadata.items():
1254
+ f_value = (
1255
+ (file.get("metadata") or {}).get("info") or {}
1256
+ ).get(key)
1086
1257
  d_operator = dict_value["operation"]
1087
1258
  d_value = dict_value["value"]
1088
- result_values.append(self.__operation(d_value, d_operator, f_value))
1259
+ result_values.append(
1260
+ self.__operation(d_value, d_operator, f_value)
1261
+ )
1089
1262
 
1090
1263
  if not all(result_values):
1091
1264
  continue
@@ -1139,7 +1312,12 @@ class Project:
1139
1312
  platform.post(
1140
1313
  self._account.auth,
1141
1314
  "file_manager/edit_file",
1142
- data={"container_id": container_id, "filename": filename, "tags": tags_str, "modality": modality},
1315
+ data={
1316
+ "container_id": container_id,
1317
+ "filename": filename,
1318
+ "tags": tags_str,
1319
+ "modality": modality,
1320
+ },
1143
1321
  )
1144
1322
  )
1145
1323
 
@@ -1152,7 +1330,7 @@ class Project:
1152
1330
  ----------
1153
1331
  subject_name : str
1154
1332
  Subject ID of the subject
1155
- session_id : int
1333
+ session_id : str
1156
1334
  The Session ID of the session that will be deleted
1157
1335
 
1158
1336
  Returns
@@ -1164,16 +1342,29 @@ class Project:
1164
1342
  all_sessions = self.get_subjects_metadata()
1165
1343
 
1166
1344
  session_to_del = [
1167
- s for s in all_sessions if s["patient_secret_name"] == subject_name and s["ssid"] == session_id
1345
+ s
1346
+ for s in all_sessions
1347
+ if s["patient_secret_name"] == subject_name
1348
+ and s["ssid"] == session_id
1168
1349
  ]
1169
1350
 
1170
1351
  if not session_to_del:
1171
- logger.error(f"Session {subject_name}/{session_id} could not be found in this project.")
1352
+ logger.error(
1353
+ f"Session {subject_name}/{session_id} could not be found in "
1354
+ f"this project."
1355
+ )
1172
1356
  return False
1173
1357
  elif len(session_to_del) > 1:
1174
- raise RuntimeError("Multiple sessions with same Subject ID and Session ID. Contact support.")
1358
+ raise RuntimeError(
1359
+ "Multiple sessions with same Subject ID and Session ID. "
1360
+ "Contact support."
1361
+ )
1175
1362
  else:
1176
- logger.info("{}/{} found (id {})".format(subject_name, session_id, session_to_del[0]["_id"]))
1363
+ logger.info(
1364
+ "{}/{} found (id {})".format(
1365
+ subject_name, session_id, session_to_del[0]["_id"]
1366
+ )
1367
+ )
1177
1368
 
1178
1369
  session = session_to_del[0]
1179
1370
 
@@ -1182,14 +1373,23 @@ class Project:
1182
1373
  platform.post(
1183
1374
  self._account.auth,
1184
1375
  "patient_manager/delete_patient",
1185
- data={"patient_id": str(int(session["_id"])), "delete_files": 1},
1376
+ data={
1377
+ "patient_id": str(int(session["_id"])),
1378
+ "delete_files": 1,
1379
+ },
1186
1380
  )
1187
1381
  )
1188
1382
  except errors.PlatformError:
1189
- logger.error(f"Session \"{subject_name}/{session['ssid']}\" could not be deleted.")
1383
+ logger.error(
1384
+ f"Session \"{subject_name}/{session['ssid']}\" could "
1385
+ f"not be deleted."
1386
+ )
1190
1387
  return False
1191
1388
 
1192
- logger.info(f"Session \"{subject_name}/{session['ssid']}\" successfully deleted.")
1389
+ logger.info(
1390
+ f"Session \"{subject_name}/{session['ssid']}\" successfully "
1391
+ f"deleted."
1392
+ )
1193
1393
  return True
1194
1394
 
1195
1395
  def delete_session_by_patientid(self, patient_id):
@@ -1214,7 +1414,10 @@ class Project:
1214
1414
  platform.post(
1215
1415
  self._account.auth,
1216
1416
  "patient_manager/delete_patient",
1217
- data={"patient_id": str(int(patient_id)), "delete_files": 1},
1417
+ data={
1418
+ "patient_id": str(int(patient_id)),
1419
+ "delete_files": 1,
1420
+ },
1218
1421
  )
1219
1422
  )
1220
1423
  except errors.PlatformError:
@@ -1244,10 +1447,16 @@ class Project:
1244
1447
  # Always fetch the session IDs from the platform before deleting them
1245
1448
  all_sessions = self.get_subjects_metadata()
1246
1449
 
1247
- sessions_to_del = [s for s in all_sessions if s["patient_secret_name"] == subject_name]
1450
+ sessions_to_del = [
1451
+ s for s in all_sessions if s["patient_secret_name"] == subject_name
1452
+ ]
1248
1453
 
1249
1454
  if not sessions_to_del:
1250
- logger.error("Subject {} cannot be found in this project.".format(subject_name))
1455
+ logger.error(
1456
+ "Subject {} cannot be found in this project.".format(
1457
+ subject_name
1458
+ )
1459
+ )
1251
1460
  return False
1252
1461
 
1253
1462
  for ssid in [s["ssid"] for s in sessions_to_del]:
@@ -1257,7 +1466,7 @@ class Project:
1257
1466
 
1258
1467
  """ Container Related Methods """
1259
1468
 
1260
- def list_input_containers(self, search_criteria={}, items=(0, 9999)):
1469
+ def list_input_containers(self, search_criteria=None, items=(0, 9999)):
1261
1470
  """
1262
1471
  Retrieve the list of input containers available to the user under a
1263
1472
  certain search criteria.
@@ -1291,8 +1500,17 @@ class Project:
1291
1500
  {"container_name", "container_id", "patient_secret_name", "ssid"}
1292
1501
  """
1293
1502
 
1294
- assert len(items) == 2, f"The number of elements in items '{len(items)}' should be equal to two."
1295
- assert all([isinstance(item, int) for item in items]), f"All items elements '{items}' should be integers."
1503
+ if search_criteria is None:
1504
+ search_criteria = {}
1505
+ if len(items) != 2:
1506
+ raise ValueError(
1507
+ f"The number of elements in items '{len(items)}' "
1508
+ f"should be equal to two."
1509
+ )
1510
+ if not all(isinstance(item, int) for item in items):
1511
+ raise ValueError(
1512
+ f"All items elements '{items}' should be integers."
1513
+ )
1296
1514
 
1297
1515
  response = platform.parse_response(
1298
1516
  platform.post(
@@ -1305,7 +1523,7 @@ class Project:
1305
1523
  containers = [
1306
1524
  {
1307
1525
  "patient_secret_name": container_item["patient_secret_name"],
1308
- "container_name": container_item["name"],
1526
+ "container_name": container_item["name"], # ???
1309
1527
  "container_id": container_item["_id"],
1310
1528
  "ssid": container_item["ssid"],
1311
1529
  }
@@ -1313,7 +1531,7 @@ class Project:
1313
1531
  ]
1314
1532
  return containers
1315
1533
 
1316
- def list_result_containers(self, search_condition={}, items=(0, 9999)):
1534
+ def list_result_containers(self, search_condition=None, items=(0, 9999)):
1317
1535
  """
1318
1536
  List the result containers available to the user.
1319
1537
  Examples
@@ -1341,7 +1559,8 @@ class Project:
1341
1559
  - qa_status: str or None pass/fail/nd QC status
1342
1560
  - secret_name: str or None Subject ID
1343
1561
  - tags: str or None
1344
- - with_child_analysis: 1 or None if 1, child analysis of workflows will appear
1562
+ - with_child_analysis: 1 or None if 1, child analysis of workflows
1563
+ will appear
1345
1564
  - id: str or None ID
1346
1565
  - state: running, completed, pending, exception or None
1347
1566
  - username: str or None
@@ -1358,8 +1577,16 @@ class Project:
1358
1577
  if "id": None, that analysis did not had an output container,
1359
1578
  probably it is a workflow
1360
1579
  """
1580
+ if search_condition is None:
1581
+ search_condition = {}
1361
1582
  analyses = self.list_analysis(search_condition, items)
1362
- return [{"name": analysis["name"], "id": (analysis.get("out_container_id") or None)} for analysis in analyses]
1583
+ return [
1584
+ {
1585
+ "name": analysis["name"],
1586
+ "id": (analysis.get("out_container_id") or None),
1587
+ }
1588
+ for analysis in analyses
1589
+ ]
1363
1590
 
1364
1591
  def list_container_files(
1365
1592
  self,
@@ -1380,7 +1607,9 @@ class Project:
1380
1607
  try:
1381
1608
  content = platform.parse_response(
1382
1609
  platform.post(
1383
- self._account.auth, "file_manager/get_container_files", data={"container_id": container_id}
1610
+ self._account.auth,
1611
+ "file_manager/get_container_files",
1612
+ data={"container_id": container_id},
1384
1613
  )
1385
1614
  )
1386
1615
  except errors.PlatformError as e:
@@ -1391,7 +1620,9 @@ class Project:
1391
1620
  return False
1392
1621
  return content["files"]
1393
1622
 
1394
- def list_container_filter_files(self, container_id, modality="", metadata_info={}, tags=[]):
1623
+ def list_container_filter_files(
1624
+ self, container_id, modality="", metadata_info={}, tags=[]
1625
+ ):
1395
1626
  """
1396
1627
  List the name of the files available inside a given container.
1397
1628
  search condition example:
@@ -1427,11 +1658,17 @@ class Project:
1427
1658
  if modality == "":
1428
1659
  modality_bool = True
1429
1660
  else:
1430
- modality_bool = modality == metadata_file["metadata"].get("modality")
1661
+ modality_bool = modality == metadata_file["metadata"].get(
1662
+ "modality"
1663
+ )
1431
1664
  for key in metadata_info.keys():
1432
- meta_key = ((metadata_file.get("metadata") or {}).get("info") or {}).get(key)
1665
+ meta_key = (
1666
+ (metadata_file.get("metadata") or {}).get("info") or {}
1667
+ ).get(key)
1433
1668
  if meta_key is None:
1434
- logging.getLogger(logger_name).warning(f"{key} is not in file_info from file {file}")
1669
+ logging.getLogger(logger_name).warning(
1670
+ f"{key} is not in file_info from file {file}"
1671
+ )
1435
1672
  info_bool.append(metadata_info[key] == meta_key)
1436
1673
  if all(tags_bool) and all(info_bool) and modality_bool:
1437
1674
  selected_files.append(file)
@@ -1455,7 +1692,9 @@ class Project:
1455
1692
  try:
1456
1693
  data = platform.parse_response(
1457
1694
  platform.post(
1458
- self._account.auth, "file_manager/get_container_files", data={"container_id": container_id}
1695
+ self._account.auth,
1696
+ "file_manager/get_container_files",
1697
+ data={"container_id": container_id},
1459
1698
  )
1460
1699
  )
1461
1700
  except errors.PlatformError as e:
@@ -1468,7 +1707,8 @@ class Project:
1468
1707
 
1469
1708
  def get_analysis(self, analysis_name_or_id):
1470
1709
  """
1471
- Returns the analysis corresponding with the analysis id or analysis name
1710
+ Returns the analysis corresponding with the analysis id or analysis
1711
+ name
1472
1712
 
1473
1713
  Parameters
1474
1714
  ----------
@@ -1488,28 +1728,41 @@ class Project:
1488
1728
  analysis_name_or_id = int(analysis_name_or_id)
1489
1729
  else:
1490
1730
  search_tag = "p_n"
1491
- excluded_characters = ["\\", "[", "]", "(", ")", "{", "}", "+", "*"]
1492
- excluded_bool = [character in analysis_name_or_id for character in excluded_characters]
1731
+ excluded_bool = [
1732
+ character in analysis_name_or_id
1733
+ for character in ANALYSIS_NAME_EXCLUDED_CHARACTERS
1734
+ ]
1493
1735
  if any(excluded_bool):
1494
- raise Exception(f"p_n does not allow characters {excluded_characters}")
1736
+ raise Exception(
1737
+ f"p_n does not allow "
1738
+ f"characters {ANALYSIS_NAME_EXCLUDED_CHARACTERS}"
1739
+ )
1495
1740
  else:
1496
- raise Exception("The analysis identifier must be its name or an integer")
1741
+ raise Exception(
1742
+ "The analysis identifier must be its name or an integer"
1743
+ )
1497
1744
 
1498
1745
  search_condition = {
1499
1746
  search_tag: analysis_name_or_id,
1500
1747
  }
1501
1748
  response = platform.parse_response(
1502
- platform.post(self._account.auth, "analysis_manager/get_analysis_list", data=search_condition)
1749
+ platform.post(
1750
+ self._account.auth,
1751
+ "analysis_manager/get_analysis_list",
1752
+ data=search_condition,
1753
+ )
1503
1754
  )
1504
1755
 
1505
1756
  if len(response) > 1:
1506
- raise Exception(f"multiple analyses with name {analysis_name_or_id} found")
1757
+ raise Exception(
1758
+ f"multiple analyses with name {analysis_name_or_id} found"
1759
+ )
1507
1760
  elif len(response) == 1:
1508
1761
  return response[0]
1509
1762
  else:
1510
1763
  return None
1511
1764
 
1512
- def list_analysis(self, search_condition={}, items=(0, 9999)):
1765
+ def list_analysis(self, search_condition=None, items=(0, 9999)):
1513
1766
  """
1514
1767
  List the analysis available to the user.
1515
1768
 
@@ -1538,10 +1791,12 @@ class Project:
1538
1791
  - qa_status: str or None pass/fail/nd QC status
1539
1792
  - secret_name: str or None Subject ID
1540
1793
  - tags: str or None
1541
- - with_child_analysis: 1 or None if 1, child analysis of workflows will appear
1794
+ - with_child_analysis: 1 or None if 1, child analysis of workflows
1795
+ will appear
1542
1796
  - id: int or None ID
1543
1797
  - state: running, completed, pending, exception or None
1544
1798
  - username: str or None
1799
+ - only_data: int or None
1545
1800
 
1546
1801
  items : List[int]
1547
1802
  list containing two elements [min, max] that correspond to the
@@ -1552,8 +1807,17 @@ class Project:
1552
1807
  dict
1553
1808
  List of analysis, each a dictionary
1554
1809
  """
1555
- assert len(items) == 2, f"The number of elements in items '{len(items)}' should be equal to two."
1556
- assert all([isinstance(item, int) for item in items]), f"All items elements '{items}' should be integers."
1810
+ if search_condition is None:
1811
+ search_condition = {}
1812
+ if len(items) != 2:
1813
+ raise ValueError(
1814
+ f"The number of elements in items '{len(items)}' "
1815
+ f"should be equal to two."
1816
+ )
1817
+ if not all(isinstance(item, int) for item in items):
1818
+ raise ValueError(
1819
+ f"All items elements '{items}' should be integers."
1820
+ )
1557
1821
  search_keys = {
1558
1822
  "p_n": str,
1559
1823
  "type": str,
@@ -1566,19 +1830,37 @@ class Project:
1566
1830
  "with_child_analysis": int,
1567
1831
  "id": int,
1568
1832
  "state": str,
1833
+ "only_data": int,
1569
1834
  "username": str,
1570
1835
  }
1571
1836
  for key in search_condition.keys():
1572
1837
  if key not in search_keys.keys():
1573
- raise Exception((f"This key '{key}' is not accepted by this search condition"))
1574
- if not isinstance(search_condition[key], search_keys[key]) and search_condition[key] is not None:
1575
- raise Exception((f"The key {key} in the search condition is not type {search_keys[key]}"))
1838
+ raise Exception(
1839
+ (
1840
+ f"This key '{key}' is not accepted by this search "
1841
+ f"condition"
1842
+ )
1843
+ )
1844
+ if (
1845
+ not isinstance(search_condition[key], search_keys[key])
1846
+ and search_condition[key] is not None
1847
+ ):
1848
+ raise Exception(
1849
+ (
1850
+ f"The key {key} in the search condition is not type "
1851
+ f"{search_keys[key]}"
1852
+ )
1853
+ )
1576
1854
  if "p_n" == key:
1577
- excluded_characters = ["\\", "[", "]", "(", ")", "{", "}", "+", "*"]
1578
- excluded_bool = [character in search_condition["p_n"] for character in excluded_characters]
1855
+ excluded_bool = [
1856
+ character in search_condition["p_n"]
1857
+ for character in ANALYSIS_NAME_EXCLUDED_CHARACTERS
1858
+ ]
1579
1859
  if any(excluded_bool):
1580
- raise Exception(f"p_n does not allow characters {excluded_characters}")
1581
- req_headers = {"X-Range": f"items={items[0]}-{items[1] - 1}"}
1860
+ raise Exception(
1861
+ "p_n does not allow "
1862
+ f"characters {ANALYSIS_NAME_EXCLUDED_CHARACTERS}"
1863
+ )
1582
1864
  req_headers = {"X-Range": f"items={items[0]}-{items[1] - 1}"}
1583
1865
  return platform.parse_response(
1584
1866
  platform.post(
@@ -1643,7 +1925,9 @@ class Project:
1643
1925
  logger = logging.getLogger(logger_name)
1644
1926
 
1645
1927
  if in_container_id is None and settings is None:
1646
- raise ValueError("Pass a value for either in_container_id or settings.")
1928
+ raise ValueError(
1929
+ "Pass a value for either in_container_id or settings."
1930
+ )
1647
1931
 
1648
1932
  post_data = {"script_name": script_name, "version": version}
1649
1933
 
@@ -1676,15 +1960,19 @@ class Project:
1676
1960
 
1677
1961
  logger.debug(f"post_data = {post_data}")
1678
1962
  return self.__handle_start_analysis(
1679
- post_data, ignore_warnings=ignore_warnings, ignore_file_selection=ignore_file_selection
1963
+ post_data,
1964
+ ignore_warnings=ignore_warnings,
1965
+ ignore_file_selection=ignore_file_selection,
1680
1966
  )
1681
1967
 
1682
1968
  def delete_analysis(self, analysis_id):
1683
1969
  """
1684
1970
  Delete an analysis
1685
1971
 
1686
- :param analysis_id: id of the analysis to be deleted
1687
- :type analysis_id: Int
1972
+ Parameters
1973
+ ----------
1974
+ analysis_id : int
1975
+ ID of the analysis to be deleted
1688
1976
  """
1689
1977
  logger = logging.getLogger(logger_name)
1690
1978
 
@@ -1712,18 +2000,23 @@ class Project:
1712
2000
  Tools can not be restarted given that they are considered as single
1713
2001
  processing units. You can start execution of another analysis instead.
1714
2002
 
1715
- For the workflow to restart, all its failed child must be removed first.
1716
- You can only restart your own analysis.
2003
+ For the workflow to restart, all its failed child must be removed
2004
+ first. You can only restart your own analysis.
1717
2005
 
1718
- :param analysis_id: id of the analysis to be restarted
1719
- :type analysis_id: Int
2006
+ Parameters
2007
+ ----------
2008
+ analysis_id : int
2009
+ ID of the analysis to be restarted
1720
2010
  """
1721
2011
  logger = logging.getLogger(logger_name)
1722
2012
 
1723
2013
  analysis = self.list_analysis({"id": analysis_id})[0]
1724
2014
 
1725
2015
  if analysis.get("super_analysis_type") != 1:
1726
- raise ValueError("The analysis indicated is not a workflow and hence, it cannot be restarted.")
2016
+ raise ValueError(
2017
+ "The analysis indicated is not a workflow and hence, it "
2018
+ "cannot be restarted."
2019
+ )
1727
2020
 
1728
2021
  try:
1729
2022
  platform.parse_response(
@@ -1745,7 +2038,8 @@ class Project:
1745
2038
  Get the log of an analysis and save it in the provided file.
1746
2039
  The logs of analysis can only be obtained for the tools you created.
1747
2040
 
1748
- Note workflows do not have a log so the printed message will only be ERROR.
2041
+ Note workflows do not have a log so the printed message will only be
2042
+ ERROR.
1749
2043
  You can only download the anlaysis log of the tools that you own.
1750
2044
 
1751
2045
  Note this method is very time consuming.
@@ -1768,22 +2062,32 @@ class Project:
1768
2062
  try:
1769
2063
  analysis_id = str(int(analysis_id))
1770
2064
  except ValueError:
1771
- raise ValueError(f"'analysis_id' has to be an integer not '{analysis_id}'.")
2065
+ raise ValueError(
2066
+ f"'analysis_id' has to be an integer not '{analysis_id}'."
2067
+ )
1772
2068
 
1773
2069
  file_name = file_name if file_name else f"logs_{analysis_id}.txt"
1774
2070
  try:
1775
2071
  res = platform.post(
1776
2072
  auth=self._account.auth,
1777
2073
  endpoint="analysis_manager/download_execution_file",
1778
- data={"project_id": analysis_id, "file": f"logs_{analysis_id}"},
2074
+ data={
2075
+ "project_id": analysis_id,
2076
+ "file": f"logs_{analysis_id}",
2077
+ },
1779
2078
  timeout=1000,
1780
2079
  )
1781
2080
  except Exception:
1782
- logger.error(f"Could not export the analysis log of '{analysis_id}'")
2081
+ logger.error(
2082
+ f"Could not export the analysis log of '{analysis_id}'"
2083
+ )
1783
2084
  return False
1784
2085
 
1785
2086
  if not res.ok:
1786
- logger.error(f"The log file could not be extracted for Analysis ID: {analysis_id}.")
2087
+ logger.error(
2088
+ f"The log file could not be extracted for Analysis ID:"
2089
+ f" {analysis_id}."
2090
+ )
1787
2091
  return False
1788
2092
 
1789
2093
  with open(file_name, "w") as f:
@@ -1792,7 +2096,9 @@ class Project:
1792
2096
 
1793
2097
  """ QC Status Related Methods """
1794
2098
 
1795
- def set_qc_status_analysis(self, analysis_id, status=QCStatus.UNDERTERMINED, comments=""):
2099
+ def set_qc_status_analysis(
2100
+ self, analysis_id, status=QCStatus.UNDERTERMINED, comments=""
2101
+ ):
1796
2102
  """
1797
2103
  Changes the analysis QC status.
1798
2104
 
@@ -1821,7 +2127,10 @@ class Project:
1821
2127
  try:
1822
2128
  analysis_id = str(int(analysis_id))
1823
2129
  except ValueError:
1824
- raise ValueError(f"analysis_id: '{analysis_id}' not valid. Must be convertible to int.")
2130
+ raise ValueError(
2131
+ f"analysis_id: '{analysis_id}' not valid. Must be convertible "
2132
+ f"to int."
2133
+ )
1825
2134
 
1826
2135
  try:
1827
2136
  platform.parse_response(
@@ -1837,11 +2146,16 @@ class Project:
1837
2146
  )
1838
2147
  )
1839
2148
  except Exception:
1840
- logger.error(f"It was not possible to change the QC status of Analysis ID: {analysis_id}")
2149
+ logger.error(
2150
+ f"It was not possible to change the QC status of Analysis ID:"
2151
+ f" {analysis_id}"
2152
+ )
1841
2153
  return False
1842
2154
  return True
1843
2155
 
1844
- def set_qc_status_subject(self, patient_id, status=QCStatus.UNDERTERMINED, comments=""):
2156
+ def set_qc_status_subject(
2157
+ self, patient_id, status=QCStatus.UNDERTERMINED, comments=""
2158
+ ):
1845
2159
  """
1846
2160
  Changes the QC status of a Patient ID (equivalent to a
1847
2161
  Subject ID/Session ID).
@@ -1870,7 +2184,10 @@ class Project:
1870
2184
  try:
1871
2185
  patient_id = str(int(patient_id))
1872
2186
  except ValueError:
1873
- raise ValueError(f"'patient_id': '{patient_id}' not valid. Must be convertible to int.")
2187
+ raise ValueError(
2188
+ f"'patient_id': '{patient_id}' not valid. Must be convertible"
2189
+ f" to int."
2190
+ )
1874
2191
 
1875
2192
  try:
1876
2193
  platform.parse_response(
@@ -1886,7 +2203,10 @@ class Project:
1886
2203
  )
1887
2204
  )
1888
2205
  except Exception:
1889
- logger.error(f"It was not possible to change the QC status of Patient ID: {patient_id}")
2206
+ logger.error(
2207
+ f"It was not possible to change the QC status of Patient ID:"
2208
+ f" {patient_id}"
2209
+ )
1890
2210
  return False
1891
2211
  return True
1892
2212
 
@@ -1911,17 +2231,28 @@ class Project:
1911
2231
  try:
1912
2232
  search_criteria = {"id": analysis_id}
1913
2233
  to_return = self.list_analysis(search_criteria)
1914
- return convert_qc_value_to_qcstatus(to_return[0]["qa_status"]), to_return[0]["qa_comments"]
2234
+ return (
2235
+ convert_qc_value_to_qcstatus(to_return[0]["qa_status"]),
2236
+ to_return[0]["qa_comments"],
2237
+ )
1915
2238
  except IndexError:
1916
2239
  # Handle the case where no matching analysis is found
1917
- logging.error(f"No analysis was found with such Analysis ID: '{analysis_id}'.")
2240
+ logging.error(
2241
+ f"No analysis was found with such Analysis ID: "
2242
+ f"'{analysis_id}'."
2243
+ )
1918
2244
  return False, False
1919
2245
  except Exception:
1920
2246
  # Handle other potential exceptions
1921
- logging.error(f"It was not possible to extract the QC status from Analysis ID: {analysis_id}")
2247
+ logging.error(
2248
+ f"It was not possible to extract the QC status from Analysis "
2249
+ f"ID: {analysis_id}"
2250
+ )
1922
2251
  return False, False
1923
2252
 
1924
- def get_qc_status_subject(self, patient_id=None, subject_name=None, ssid=None):
2253
+ def get_qc_status_subject(
2254
+ self, patient_id=None, subject_name=None, ssid=None
2255
+ ):
1925
2256
  """
1926
2257
  Gets the session QC status via the patient ID or the Subject ID
1927
2258
  and the Session ID.
@@ -1949,26 +2280,50 @@ class Project:
1949
2280
  try:
1950
2281
  patient_id = int(patient_id)
1951
2282
  except ValueError:
1952
- raise ValueError(f"patient_id '{patient_id}' should be an integer.")
2283
+ raise ValueError(
2284
+ f"patient_id '{patient_id}' should be an integer."
2285
+ )
1953
2286
  sessions = self.get_subjects_metadata(search_criteria={})
1954
- session = [session for session in sessions if int(session["_id"]) == patient_id]
2287
+ session = [
2288
+ session
2289
+ for session in sessions
2290
+ if int(session["_id"]) == patient_id
2291
+ ]
1955
2292
  if len(session) < 1:
1956
- logging.error(f"No session was found with Patient ID: '{patient_id}'.")
2293
+ logging.error(
2294
+ f"No session was found with Patient ID: '{patient_id}'."
2295
+ )
1957
2296
  return False, False
1958
- return convert_qc_value_to_qcstatus(session[0]["qa_status"]), session[0]["qa_comments"]
2297
+ return (
2298
+ convert_qc_value_to_qcstatus(session[0]["qa_status"]),
2299
+ session[0]["qa_comments"],
2300
+ )
1959
2301
  elif subject_name and ssid:
1960
2302
  session = self.get_subjects_metadata(
1961
2303
  search_criteria={
1962
2304
  "pars_patient_secret_name": f"string;{subject_name}",
1963
- "pars_ssid": f"integer;eq|{ssid}" if str(ssid).isdigit() else f"string;{ssid}",
2305
+ "pars_ssid": (
2306
+ f"integer;eq|{ssid}"
2307
+ if str(ssid).isdigit()
2308
+ else f"string;{ssid}"
2309
+ ),
1964
2310
  }
1965
2311
  )
1966
2312
  if len(session) < 1:
1967
- logging.error(f"No session was found with Subject ID: '{subject_name}' and Session ID: '{ssid}'.")
2313
+ logging.error(
2314
+ f"No session was found with Subject ID: '{subject_name}'"
2315
+ f" and Session ID: '{ssid}'."
2316
+ )
1968
2317
  return False, False
1969
- return convert_qc_value_to_qcstatus(session[0]["qa_status"]), session[0]["qa_comments"]
2318
+ return (
2319
+ convert_qc_value_to_qcstatus(session[0]["qa_status"]),
2320
+ session[0]["qa_comments"],
2321
+ )
1970
2322
  else:
1971
- raise ValueError("Either 'patient_id' or 'subject_name' and 'ssid' must not be empty.")
2323
+ raise ValueError(
2324
+ "Either 'patient_id' or 'subject_name' and 'ssid' must "
2325
+ "not be empty."
2326
+ )
1972
2327
 
1973
2328
  """ Protocol Adherence Related Methods """
1974
2329
 
@@ -1996,7 +2351,9 @@ class Project:
1996
2351
  with open(rules_file_path, "r") as fr:
1997
2352
  rules = json.load(fr)
1998
2353
  except FileNotFoundError:
1999
- logger.error(f"Pprotocol adherence rule file '{rules_file_path}' not found.")
2354
+ logger.error(
2355
+ f"Protocol adherence rule file '{rules_file_path}' not found."
2356
+ )
2000
2357
  return False
2001
2358
 
2002
2359
  # Update the project's QA rules
@@ -2004,18 +2361,26 @@ class Project:
2004
2361
  platform.post(
2005
2362
  auth=self._account.auth,
2006
2363
  endpoint="projectset_manager/set_session_qa_requirements",
2007
- data={"project_id": self._project_id, "rules": json.dumps(rules), "guidance_text": guidance_text},
2364
+ data={
2365
+ "project_id": self._project_id,
2366
+ "rules": json.dumps(rules),
2367
+ "guidance_text": guidance_text,
2368
+ },
2008
2369
  )
2009
2370
  )
2010
2371
 
2011
2372
  if not res.get("success") == 1:
2012
- logger.error("There was an error setting up the protocol adherence rules.")
2373
+ logger.error(
2374
+ "There was an error setting up the protocol adherence rules."
2375
+ )
2013
2376
  logger.error(platform.parse_response(res))
2014
2377
  return False
2015
2378
 
2016
2379
  return True
2017
2380
 
2018
- def get_project_pa_rules(self, rules_file_path):
2381
+ def get_project_pa_rules(
2382
+ self, rules_file_path, project_has_no_rules=False
2383
+ ):
2019
2384
  """
2020
2385
  Retrive the active project's protocol adherence rules
2021
2386
 
@@ -2024,6 +2389,8 @@ class Project:
2024
2389
  rules_file_path : str
2025
2390
  The file path to the JSON file to store the protocol adherence
2026
2391
  rules.
2392
+ project_has_no_rules: bool
2393
+ for testing purposes
2027
2394
 
2028
2395
  Returns
2029
2396
  -------
@@ -2043,47 +2410,58 @@ class Project:
2043
2410
  )
2044
2411
  )
2045
2412
 
2046
- if "rules" not in res:
2047
- logger.error(f"There was an error extracting the protocol adherence rules from {self._project_name}.")
2413
+ if "rules" not in res or project_has_no_rules:
2414
+ logger.error(
2415
+ f"There was an error extracting the protocol adherence rules"
2416
+ f" from {self._project_name}."
2417
+ )
2048
2418
  logger.error(platform.parse_response(res))
2049
2419
  return False
2050
2420
 
2051
2421
  try:
2052
2422
  for rule in res["rules"]:
2053
- del rule["_id"]
2054
- del rule["order"]
2055
- del rule["time_modified"]
2423
+ for key in ["_id", "order", "time_modified"]:
2424
+ if rule.get(key, False):
2425
+ del rule[key]
2056
2426
  with open(rules_file_path, "w") as fr:
2057
2427
  json.dump(res["rules"], fr, indent=4)
2058
2428
  except FileNotFoundError:
2059
- logger.error(f"Protocol adherence rules could not be exported to file: '{rules_file_path}'.")
2429
+ logger.error(
2430
+ f"Protocol adherence rules could not be exported to file: "
2431
+ f"'{rules_file_path}'."
2432
+ )
2060
2433
  return False
2061
2434
 
2062
2435
  return res["guidance_text"]
2063
2436
 
2064
2437
  def parse_qc_text(self, patient_id=None, subject_name=None, ssid=None):
2065
2438
  """
2066
- Parse QC (Quality Control) text output into a structured dictionary format.
2439
+ Parse QC (Quality Control) text output into a structured dictionary
2440
+ format.
2067
2441
 
2068
- This function takes raw QC text output (from the Protocol Adherence analysis)
2069
- and parses it into a structured format that separates passed and failed rules,
2070
- along with their associated files and conditions.
2442
+ This function takes raw QC text output (from the Protocol Adherence
2443
+ analysis) and parses it into a structured format that
2444
+ separates passed and failed rules, along with their associated files
2445
+ and conditions.
2071
2446
 
2072
2447
  Args:
2073
2448
  patient_id (str, optional):
2074
2449
  Patient identifier. Defaults to None.
2075
2450
  subject_name (str, optional):
2076
- Subject/patient name. Defaults to None. Mandatory if no patient_id is provided.
2451
+ Subject/patient name. Defaults to None. Mandatory if no
2452
+ patient_id is provided.
2077
2453
  ssid (str, optional):
2078
- Session ID. Defaults to None. Mandatory if subject_name is provided.
2454
+ Session ID. Defaults to None. Mandatory if subject_name is
2455
+ provided.
2079
2456
 
2080
2457
  Returns:
2081
- dict: A structured dictionary containing a list of dictionaries with passed rules and their details
2082
- and failed rules and their details. Details of passed rules are:
2083
- per each rule: Files that have passed the rule. Per each file name of the file and number of conditions
2084
- of the rule.
2085
- Details of failed rules are:
2086
- - Per each rule failed conditions: Number of times it failed. Each condition status.
2458
+ dict: A structured dictionary containing a list of dictionaries
2459
+ with passed rules and their details and failed rules and their
2460
+ details. Details of passed rules are:
2461
+ per each rule: Files that have passed the rule. Per each file name
2462
+ of the file and number of conditions of the rule. Details of
2463
+ failed rules are: per each rule failed conditions: Number of
2464
+ times it failed. Each condition status.
2087
2465
 
2088
2466
  Example:
2089
2467
  >>> parse_qc_text(subject_name="patient_123", ssid=1)
@@ -2109,7 +2487,7 @@ class Project:
2109
2487
  "conditions": [
2110
2488
  {
2111
2489
  "status": "failed",
2112
- "condition": "SliceThickness between..."
2490
+ "condition": "SliceThickness between.."
2113
2491
  }
2114
2492
  ]
2115
2493
  }
@@ -2122,7 +2500,9 @@ class Project:
2122
2500
  }
2123
2501
  """
2124
2502
 
2125
- _, text = self.get_qc_status_subject(patient_id=patient_id, subject_name=subject_name, ssid=ssid)
2503
+ _, text = self.get_qc_status_subject(
2504
+ patient_id=patient_id, subject_name=subject_name, ssid=ssid
2505
+ )
2126
2506
 
2127
2507
  result = {"passed": [], "failed": []}
2128
2508
 
@@ -2152,22 +2532,27 @@ class Project:
2152
2532
 
2153
2533
  def calculate_qc_statistics(self):
2154
2534
  """
2155
- Calculate comprehensive statistics from multiple QC results across subjects from a project in the QMENTA
2156
- platform.
2535
+ Calculate comprehensive statistics from multiple QC results across
2536
+ subjects from a project in the QMENTA platform.
2157
2537
 
2158
- This function aggregates and analyzes QC results from multiple subjects/containers,
2159
- providing statistical insights about rule pass/fail rates, file statistics,
2160
- and condition failure patterns.
2538
+ This function aggregates and analyzes QC results from
2539
+ multiple subjects/containers, providing statistical insights about
2540
+ rule pass/fail rates, file statistics, and condition failure patterns.
2161
2541
 
2162
2542
  Returns:
2163
- dict: A dictionary containing comprehensive QC statistics including:
2543
+ dict: A dictionary containing comprehensive QC statistics
2544
+ including:
2164
2545
  - passed_rules: Total count of passed rules across all subjects
2165
2546
  - failed_rules: Total count of failed rules across all subjects
2166
2547
  - subjects_passed: Count of subjects with no failed rules
2167
- - subjects_with_failed: Count of subjects with at least one failed rule
2168
- - num_passed_files_distribution: Distribution of how many rules have N passed files
2169
- - file_stats: File-level statistics (total, passed, failed, pass percentage)
2170
- - condition_failure_rates: Frequency and percentage of each failed condition
2548
+ - subjects_with_failed: Count of subjects with at least one
2549
+ failed rule
2550
+ - num_passed_files_distribution: Distribution of how many
2551
+ rules have N passed files
2552
+ - file_stats: File-level statistics (total, passed, failed,
2553
+ pass percentage)
2554
+ - condition_failure_rates: Frequency and percentage of each
2555
+ failed condition
2171
2556
  - rule_success_rates: Success rates for each rule type
2172
2557
 
2173
2558
  The statistics help identify:
@@ -2213,7 +2598,11 @@ class Project:
2213
2598
  containers = self.list_input_containers()
2214
2599
 
2215
2600
  for c in containers:
2216
- qc_results_list.append(self.parse_qc_text(subject_name=c["patient_secret_name"], ssid=c["ssid"]))
2601
+ qc_results_list.append(
2602
+ self.parse_qc_text(
2603
+ subject_name=c["patient_secret_name"], ssid=c["ssid"]
2604
+ )
2605
+ )
2217
2606
 
2218
2607
  # Initialize statistics
2219
2608
  stats = {
@@ -2221,23 +2610,49 @@ class Project:
2221
2610
  "failed_rules": 0,
2222
2611
  "subjects_passed": 0,
2223
2612
  "subjects_with_failed": 0,
2224
- "num_passed_files_distribution": defaultdict(int),
2225
- # How many rules have N passed files
2226
- "file_stats": {"total": 0, "passed": 0, "failed": 0, "pass_percentage": 0.0},
2227
- "condition_failure_rates": defaultdict(lambda: {"count": 0, "percentage": 0.0}),
2228
- "rule_success_rates": defaultdict(lambda: {"passed": 0, "failed": 0, "success_rate": 0.0}),
2613
+ "num_passed_files_distribution": defaultdict(
2614
+ int
2615
+ ), # How many rules have N passed files
2616
+ "file_stats": {
2617
+ "total": 0,
2618
+ "passed": 0,
2619
+ "failed": 0,
2620
+ "pass_percentage": 0.0,
2621
+ },
2622
+ "condition_failure_rates": defaultdict(
2623
+ lambda: {"count": 0, "percentage": 0.0}
2624
+ ),
2625
+ "rule_success_rates": defaultdict(
2626
+ lambda: {"passed": 0, "failed": 0, "success_rate": 0.0}
2627
+ ),
2229
2628
  }
2230
2629
 
2231
2630
  total_failures = 0
2232
2631
 
2233
2632
  # sum subjects with not failed qc message
2234
- stats["subjects_passed"] = sum([1 for rules in qc_results_list if not rules["failed"]])
2633
+ stats["subjects_passed"] = sum(
2634
+ [1 for rules in qc_results_list if not rules["failed"]]
2635
+ )
2235
2636
  # sum subjects with some failed qc message
2236
- stats["subjects_with_failed"] = sum([1 for rules in qc_results_list if rules["failed"]])
2637
+ stats["subjects_with_failed"] = sum(
2638
+ [1 for rules in qc_results_list if rules["failed"]]
2639
+ )
2237
2640
  # sum rules that have passed
2238
- stats["passed_rules"] = sum([len(rules["passed"]) for rules in qc_results_list if rules["failed"]])
2641
+ stats["passed_rules"] = sum(
2642
+ [
2643
+ len(rules["passed"])
2644
+ for rules in qc_results_list
2645
+ if rules["failed"]
2646
+ ]
2647
+ )
2239
2648
  # sum rules that have failed
2240
- stats["failed_rules"] = sum([len(rules["failed"]) for rules in qc_results_list if rules["failed"]])
2649
+ stats["failed_rules"] = sum(
2650
+ [
2651
+ len(rules["failed"])
2652
+ for rules in qc_results_list
2653
+ if rules["failed"]
2654
+ ]
2655
+ )
2241
2656
 
2242
2657
  for qc_results in qc_results_list:
2243
2658
 
@@ -2255,42 +2670,72 @@ class Project:
2255
2670
  stats["file_stats"]["failed"] += len(rule["files"])
2256
2671
  for condition, count in rule["failed_conditions"].items():
2257
2672
  # Extract just the condition text without actual value
2258
- clean_condition = re.sub(r"\.\s*Actual value:.*$", "", condition)
2259
- stats["condition_failure_rates"][clean_condition]["count"] += count
2673
+ clean_condition = re.sub(
2674
+ r"\.\s*Actual value:.*$", "", condition
2675
+ )
2676
+ stats["condition_failure_rates"][clean_condition][
2677
+ "count"
2678
+ ] += count
2260
2679
  total_failures += count
2261
2680
  rule_name = rule["rule"]
2262
2681
  stats["rule_success_rates"][rule_name]["failed"] += 1
2263
2682
 
2264
2683
  if stats["file_stats"]["total"] > 0:
2265
2684
  stats["file_stats"]["pass_percentage"] = round(
2266
- (stats["file_stats"]["passed"] / stats["file_stats"]["total"]) * 100, 2
2685
+ (stats["file_stats"]["passed"] / stats["file_stats"]["total"])
2686
+ * 100,
2687
+ 2,
2267
2688
  )
2268
2689
 
2269
2690
  # Calculate condition failure percentages
2270
2691
  for condition in stats["condition_failure_rates"]:
2271
2692
  if total_failures > 0:
2272
- stats["condition_failure_rates"][condition]["percentage"] = round(
2273
- (stats["condition_failure_rates"][condition]["count"] / total_failures) * 100, 2
2693
+ stats["condition_failure_rates"][condition]["percentage"] = (
2694
+ round(
2695
+ (
2696
+ stats["condition_failure_rates"][condition][
2697
+ "count"
2698
+ ]
2699
+ / total_failures
2700
+ )
2701
+ * 100,
2702
+ 2,
2703
+ )
2274
2704
  )
2275
2705
 
2276
2706
  # Calculate rule success rates
2277
2707
  for rule in stats["rule_success_rates"]:
2278
- total = stats["rule_success_rates"][rule]["passed"] + stats["rule_success_rates"][rule]["failed"]
2708
+ total = (
2709
+ stats["rule_success_rates"][rule]["passed"]
2710
+ + stats["rule_success_rates"][rule]["failed"]
2711
+ )
2279
2712
  if total > 0:
2280
2713
  stats["rule_success_rates"][rule]["success_rate"] = round(
2281
- (stats["rule_success_rates"][rule]["passed"] / total) * 100, 2
2714
+ (stats["rule_success_rates"][rule]["passed"] / total)
2715
+ * 100,
2716
+ 2,
2282
2717
  )
2283
2718
 
2284
2719
  # Convert defaultdict to regular dict for cleaner JSON output
2285
- stats["num_passed_files_distribution"] = dict(stats["num_passed_files_distribution"])
2286
- stats["condition_failure_rates"] = dict(stats["condition_failure_rates"])
2720
+ stats["num_passed_files_distribution"] = dict(
2721
+ stats["num_passed_files_distribution"]
2722
+ )
2723
+ stats["condition_failure_rates"] = dict(
2724
+ stats["condition_failure_rates"]
2725
+ )
2287
2726
  stats["rule_success_rates"] = dict(stats["rule_success_rates"])
2288
2727
 
2289
2728
  return stats
2290
2729
 
2291
2730
  """ Helper Methods """
2292
2731
 
2293
- def __handle_start_analysis(self, post_data, ignore_warnings=False, ignore_file_selection=True, n_calls=0):
2732
+ def __handle_start_analysis(
2733
+ self,
2734
+ post_data,
2735
+ ignore_warnings=False,
2736
+ ignore_file_selection=True,
2737
+ n_calls=0,
2738
+ ):
2294
2739
  """
2295
2740
  Handle the possible responses from the server after start_analysis.
2296
2741
  Sometimes we have to send a request again, and then check again the
@@ -2310,13 +2755,21 @@ class Project:
2310
2755
  than {n_calls} times: aborting."
2311
2756
  )
2312
2757
  return None
2313
-
2758
+ response = None
2314
2759
  try:
2315
2760
  response = platform.parse_response(
2316
- platform.post(self._account.auth, "analysis_manager/analysis_registration", data=post_data)
2761
+ platform.post(
2762
+ self._account.auth,
2763
+ "analysis_manager/analysis_registration",
2764
+ data=post_data,
2765
+ )
2317
2766
  )
2318
2767
  logger.info(response["message"])
2319
- return int(response["analysis_id"]) if "analysis_id" in response else None
2768
+ return (
2769
+ int(response["analysis_id"])
2770
+ if "analysis_id" in response
2771
+ else None
2772
+ )
2320
2773
 
2321
2774
  except platform.ChooseDataError as choose_data:
2322
2775
  if ignore_file_selection:
@@ -2336,31 +2789,39 @@ class Project:
2336
2789
  # logging any warning that we have
2337
2790
  if choose_data.warning:
2338
2791
  has_warning = True
2339
- logger.warning(response["warning"])
2792
+ logger.warning(choose_data.warning)
2340
2793
 
2341
2794
  new_post = {
2342
2795
  "analysis_id": choose_data.analysis_id,
2343
2796
  "script_name": post_data["script_name"],
2344
2797
  "version": post_data["version"],
2345
2798
  }
2799
+ if "tags" in post_data.keys():
2800
+ new_post["tags"] = post_data["tags"]
2346
2801
 
2347
2802
  if choose_data.data_to_choose:
2348
2803
  self.__handle_manual_choose_data(new_post, choose_data)
2349
2804
  else:
2350
2805
  if has_warning and not ignore_warnings:
2351
- logger.error("Cancelling analysis due to warnings, set 'ignore_warnings' to True to override.")
2806
+ logger.error(
2807
+ "Cancelling analysis due to warnings, set "
2808
+ "'ignore_warnings' to True to override."
2809
+ )
2352
2810
  new_post["cancel"] = "1"
2353
2811
  else:
2354
2812
  logger.info("suppressing warnings")
2355
2813
  new_post["user_preference"] = "{}"
2356
2814
  new_post["_mint_only_warning"] = "1"
2357
2815
 
2358
- return self.__handle_start_analysis(new_post, ignore_warnings, ignore_file_selection, n_calls)
2816
+ return self.__handle_start_analysis(
2817
+ new_post, ignore_warnings, ignore_file_selection, n_calls
2818
+ )
2359
2819
  except platform.ActionFailedError as e:
2360
2820
  logger.error(f"Unable to start the analysis: {e}.")
2361
2821
  return None
2362
2822
 
2363
- def __handle_manual_choose_data(self, post_data, choose_data):
2823
+ @staticmethod
2824
+ def __handle_manual_choose_data(post_data, choose_data):
2364
2825
  """
2365
2826
  Handle the responses of the user when there is need to select a file
2366
2827
  to start the analysis.
@@ -2373,15 +2834,22 @@ class Project:
2373
2834
  post_data : dict
2374
2835
  Current post_data dictionary. To be mofidied in-place.
2375
2836
  choose_data : platform.ChooseDataError
2376
- Error raised when trying to start an analysis, but data has to be chosen.
2837
+ Error raised when trying to start an analysis, but data has to
2838
+ be chosen.
2377
2839
  """
2378
2840
 
2379
2841
  logger = logging.getLogger(logger_name)
2380
- logger.warning("Multiple inputs available. You have to select the desired file/s to continue.")
2842
+ logger.warning(
2843
+ "Multiple inputs available. You have to select the desired file/s "
2844
+ "to continue."
2845
+ )
2381
2846
  # in case we have data to choose
2382
2847
  chosen_files = {}
2383
2848
  for settings_key in choose_data.data_to_choose:
2384
- logger.warning(f"Type next the file/s for the input with ID: '{settings_key}'.")
2849
+ logger.warning(
2850
+ f"Type next the file/s for the input with ID: "
2851
+ f"'{settings_key}'."
2852
+ )
2385
2853
  chosen_files[settings_key] = {}
2386
2854
  filters = choose_data.data_to_choose[settings_key]["filters"]
2387
2855
  for filter_key in filters:
@@ -2396,7 +2864,9 @@ class Project:
2396
2864
  if filter_data["range"][0] != 0:
2397
2865
  number_of_files_to_select = filter_data["range"][0]
2398
2866
  elif filter_data["range"][1] != 0:
2399
- number_of_files_to_select = min(filter_data["range"][1], len(filter_data["files"]))
2867
+ number_of_files_to_select = min(
2868
+ filter_data["range"][1], len(filter_data["files"])
2869
+ )
2400
2870
  else:
2401
2871
  number_of_files_to_select = len(filter_data["files"])
2402
2872
 
@@ -2408,19 +2878,29 @@ class Project:
2408
2878
  # list_container_filter_files()
2409
2879
 
2410
2880
  if number_of_files_to_select != len(filter_data["files"]):
2881
+ substring = ""
2882
+ if number_of_files_to_select > 1:
2883
+ substring = "s (i.e., file1.zip, file2.zip, file3.zip)"
2411
2884
  logger.warning(
2412
2885
  f" · File filter name: '{filter_key}'. Type "
2413
- f"{number_of_files_to_select} file"
2414
- f"{'s (i.e., file1.zip, file2.zip, file3.zip)' if number_of_files_to_select > 1 else ''}."
2886
+ f"{number_of_files_to_select} file{substring}."
2415
2887
  )
2416
2888
  save_file_ids, select_file_filter = {}, ""
2417
2889
  for file_ in filter_data["files"]:
2418
- select_file_filter += f" · File name: {file_['name']}\n"
2890
+ select_file_filter += (
2891
+ f" · File name: {file_['name']}\n"
2892
+ )
2419
2893
  save_file_ids[file_["name"]] = file_["_id"]
2420
- names = [el.strip() for el in input(select_file_filter).strip().split(",")]
2894
+ names = [
2895
+ el.strip()
2896
+ for el in input(select_file_filter).strip().split(",")
2897
+ ]
2421
2898
 
2422
2899
  if len(names) != number_of_files_to_select:
2423
- logger.error("The number of files selected does not correspond to the number of needed files.")
2900
+ logger.error(
2901
+ "The number of files selected does not correspond "
2902
+ "to the number of needed files."
2903
+ )
2424
2904
  logger.error(
2425
2905
  f"Selected: {len(names)} vs. "
2426
2906
  f"Number of files to select: "
@@ -2430,14 +2910,27 @@ class Project:
2430
2910
  post_data["cancel"] = "1"
2431
2911
 
2432
2912
  elif any([name not in save_file_ids for name in names]):
2433
- logger.error(f"Some selected file/s '{', '.join(names)}' do not exist. Cancelling analysis...")
2913
+ logger.error(
2914
+ f"Some selected file/s '{', '.join(names)}' "
2915
+ f"do not exist. Cancelling analysis..."
2916
+ )
2434
2917
  post_data["cancel"] = "1"
2435
2918
  else:
2436
- chosen_files[settings_key][filter_key] = [save_file_ids[name] for name in names]
2919
+ chosen_files[settings_key][filter_key] = [
2920
+ save_file_ids[name] for name in names
2921
+ ]
2437
2922
 
2438
2923
  else:
2439
- logger.warning("Setting all available files to be input to the analysis.")
2440
- files_selection = [ff["_id"] for ff in filter_data["files"][:number_of_files_to_select]]
2924
+ logger.warning(
2925
+ "Setting all available files to be input to the "
2926
+ "analysis."
2927
+ )
2928
+ files_selection = [
2929
+ ff["_id"]
2930
+ for ff in filter_data["files"][
2931
+ :number_of_files_to_select
2932
+ ]
2933
+ ]
2441
2934
  chosen_files[settings_key][filter_key] = files_selection
2442
2935
 
2443
2936
  post_data["user_preference"] = json.dumps(chosen_files)
@@ -2496,11 +2989,12 @@ class Project:
2496
2989
  else:
2497
2990
  return True
2498
2991
 
2499
- def __operation(self, reference_value, operator, input_value):
2992
+ @staticmethod
2993
+ def __operation(reference_value, operator, input_value):
2500
2994
  """
2501
2995
  The method performs an operation by comparing the two input values.
2502
- The Operation is applied to the Input Value in comparison to the Reference
2503
- Value.
2996
+ The Operation is applied to the Input Value in comparison to the
2997
+ Reference Value.
2504
2998
 
2505
2999
  Parameters
2506
3000
  ----------
@@ -2516,39 +3010,32 @@ class Project:
2516
3010
  bool
2517
3011
  True if the operation is satisfied, False otherwise.
2518
3012
  """
2519
- if input_value is None or input_value == "":
3013
+ if not input_value: # Handles None, "", and other falsy values
2520
3014
  return False
2521
3015
 
2522
- if operator == "in":
2523
- return reference_value in input_value
2524
-
2525
- elif operator == "in-list":
2526
- return all([el in input_value for el in reference_value])
2527
-
2528
- elif operator == "eq":
2529
- return input_value == reference_value
2530
-
2531
- elif operator == "gt":
2532
- return input_value > reference_value
2533
-
2534
- elif operator == "gte":
2535
- return input_value >= reference_value
3016
+ operator_actions = {
3017
+ "in": lambda: reference_value in input_value,
3018
+ "in-list": lambda: all(
3019
+ el in input_value for el in reference_value
3020
+ ),
3021
+ "eq": lambda: input_value == reference_value,
3022
+ "gt": lambda: input_value > reference_value,
3023
+ "gte": lambda: input_value >= reference_value,
3024
+ "lt": lambda: input_value < reference_value,
3025
+ "lte": lambda: input_value <= reference_value,
3026
+ }
2536
3027
 
2537
- elif operator == "lt":
2538
- return input_value < reference_value
3028
+ action = operator_actions.get(operator, lambda: False)
3029
+ return action()
2539
3030
 
2540
- elif operator == "lte":
2541
- return input_value <= reference_value
2542
- else:
2543
- return False
2544
-
2545
- def __wrap_search_criteria(self, search_criteria={}):
3031
+ @staticmethod
3032
+ def __wrap_search_criteria(search_criteria=None):
2546
3033
  """
2547
3034
  Wraps the conditions specified within the Search Criteria in order for
2548
3035
  other methods to handle it easily. The conditions are grouped only into
2549
- three groups: Modality, Tags and the File Metadata (if DICOM it corresponds
2550
- to the DICOM information), and each of them is output in a different
2551
- variable.
3036
+ three groups: Modality, Tags and the File Metadata (if DICOM it
3037
+ corresponds to the DICOM information), and each of them is output
3038
+ in a different variable.
2552
3039
 
2553
3040
  Parameters
2554
3041
  ----------
@@ -2572,27 +3059,27 @@ class Project:
2572
3059
 
2573
3060
  Returns
2574
3061
  -------
2575
- modality : str
2576
- String containing the modality of the search criteria extracted from
2577
- 'pars_modalities'
2578
-
2579
- tags : list of str
2580
- List of strings containing the tags of the search criteria extracted
2581
- 'from pars_tags'
2582
-
2583
- file_metadata : Dict
2584
- Dictionary containing the file metadata of the search criteria
3062
+ tuple
3063
+ A tuple containing:
3064
+ - str: modality is a string containing the modality of the search
3065
+ criteria extracted from 'pars_modalities';
3066
+ - list: tags is a list of strings containing the tags of the search
3067
+ criteria extracted 'from pars_tags',
3068
+ - dict: containing the file metadata of the search criteria
2585
3069
  extracted from 'pars_[dicom]_KEY'
2586
3070
  """
2587
3071
 
2588
3072
  # The keys not included bellow apply to the whole session.
3073
+ if search_criteria is None:
3074
+ search_criteria = {}
2589
3075
  modality, tags, file_metadata = "", list(), dict()
2590
3076
  for key, value in search_criteria.items():
2591
3077
  if key == "pars_modalities":
2592
3078
  modalities = value.split(";")[1].split(",")
2593
3079
  if len(modalities) != 1:
2594
3080
  raise ValueError(
2595
- f"A file can only have one modality. Provided Modalities: {', '.join(modalities)}."
3081
+ f"A file can only have one modality. "
3082
+ f"Provided Modalities: {', '.join(modalities)}."
2596
3083
  )
2597
3084
  modality = modalities[0]
2598
3085
  elif key == "pars_tags":
@@ -2601,21 +3088,34 @@ class Project:
2601
3088
  d_tag = key.split("pars_[dicom]_")[1]
2602
3089
  d_type = value.split(";")[0]
2603
3090
  if d_type == "string":
2604
- file_metadata[d_tag] = {"operation": "in", "value": value.replace(d_type + ";", "")}
3091
+ file_metadata[d_tag] = {
3092
+ "operation": "in",
3093
+ "value": value.replace(d_type + ";", ""),
3094
+ }
2605
3095
  elif d_type == "integer":
2606
3096
  d_operator = value.split(";")[1].split("|")[0]
2607
3097
  d_value = value.split(";")[1].split("|")[1]
2608
- file_metadata[d_tag] = {"operation": d_operator, "value": int(d_value)}
3098
+ file_metadata[d_tag] = {
3099
+ "operation": d_operator,
3100
+ "value": int(d_value),
3101
+ }
2609
3102
  elif d_type == "decimal":
2610
3103
  d_operator = value.split(";")[1].split("|")[0]
2611
3104
  d_value = value.split(";")[1].split("|")[1]
2612
- file_metadata[d_tag] = {"operation": d_operator, "value": float(d_value)}
3105
+ file_metadata[d_tag] = {
3106
+ "operation": d_operator,
3107
+ "value": float(d_value),
3108
+ }
2613
3109
  elif d_type == "list":
2614
3110
  value.replace(d_type + ";", "")
2615
- file_metadata[d_tag] = {"operation": "in-list", "value": value.replace(d_type + ";", "").split(";")}
3111
+ file_metadata[d_tag] = {
3112
+ "operation": "in-list",
3113
+ "value": value.replace(d_type + ";", "").split(";"),
3114
+ }
2616
3115
  return modality, tags, file_metadata
2617
3116
 
2618
- def __assert_split_data(self, split_data, ssid, add_to_container_id):
3117
+ @staticmethod
3118
+ def __assert_split_data(split_data, ssid, add_to_container_id):
2619
3119
  """
2620
3120
  Assert if the split_data parameter is possible to use in regards
2621
3121
  to the ssid and add_to_container_id parameters during upload.
@@ -2638,29 +3138,81 @@ class Project:
2638
3138
 
2639
3139
  logger = logging.getLogger(logger_name)
2640
3140
  if ssid and split_data:
2641
- logger.warning("split-data argument will be ignored because ssid has been specified")
3141
+ logger.warning(
3142
+ "split-data argument will be ignored because ssid has been "
3143
+ "specified"
3144
+ )
2642
3145
  split_data = False
2643
3146
 
2644
3147
  if add_to_container_id and split_data:
2645
- logger.warning("split-data argument will be ignored because add_to_container_id has been specified")
3148
+ logger.warning(
3149
+ "split-data argument will be ignored because "
3150
+ "add_to_container_id has been specified"
3151
+ )
2646
3152
  split_data = False
2647
3153
 
2648
3154
  return split_data
2649
3155
 
2650
- def __parse_fail_rules(self, failed_rules, result):
3156
+ @staticmethod
3157
+ def __parse_pass_rules(passed_rules, result):
3158
+ """
3159
+ Parse pass rules.
3160
+ """
3161
+
3162
+ for rule_text in passed_rules[1:]: # Skip first empty part
3163
+ rule_name = rule_text.split(" ✅")[0].strip()
3164
+ rule_data = {"rule": rule_name, "sub_rule": None, "files": []}
3165
+
3166
+ # Get sub-rule
3167
+ sub_rule_match = re.search(r"Sub-rule: (.*?)\n", rule_text)
3168
+ if sub_rule_match:
3169
+ rule_data["sub_rule"] = sub_rule_match.group(1).strip()
3170
+
3171
+ # Get files passed
3172
+ files_passed = re.search(
3173
+ r"List of files passed:(.*?)(?=\n\n|\Z)", rule_text, re.DOTALL
3174
+ )
3175
+ if files_passed:
3176
+ for line in files_passed.group(1).split("\n"):
3177
+ line = line.strip()
3178
+ if line.startswith("·"):
3179
+ file_match = re.match(r"· (.*?) \((\d+)/(\d+)\)", line)
3180
+ if file_match:
3181
+ rule_data["files"].append(
3182
+ {
3183
+ "file": file_match.group(1).strip(),
3184
+ "passed_conditions": int(
3185
+ file_match.group(2)
3186
+ ),
3187
+ }
3188
+ )
3189
+
3190
+ result["passed"].append(rule_data)
3191
+ return result
3192
+
3193
+ @staticmethod
3194
+ def __parse_fail_rules(failed_rules, result):
2651
3195
  """
2652
3196
  Parse fail rules.
2653
3197
  """
2654
3198
 
2655
3199
  for rule_text in failed_rules[1:]: # Skip first empty part
2656
3200
  rule_name = rule_text.split(" ❌")[0].strip()
2657
- rule_data = {"rule": rule_name, "files": [], "failed_conditions": {}}
3201
+ rule_data = {
3202
+ "rule": rule_name,
3203
+ "files": [],
3204
+ "failed_conditions": {},
3205
+ }
2658
3206
 
2659
3207
  # Extract all file comparisons for this rule
2660
- file_comparisons = re.split(r"\t- Comparison with file:", rule_text)
3208
+ file_comparisons = re.split(r"- Comparison with file:", rule_text)
2661
3209
  for comp in file_comparisons[1:]: # Skip first part
2662
3210
  file_name = comp.split("\n")[0].strip()
2663
- conditions_match = re.search(r"Conditions:(.*?)(?=\n\t- Comparison|\n\n|$)", comp, re.DOTALL)
3211
+ conditions_match = re.search(
3212
+ r"Conditions:(.*?)(?=\n\t- Comparison|\n\n|$)",
3213
+ comp,
3214
+ re.DOTALL,
3215
+ )
2664
3216
  if not conditions_match:
2665
3217
  continue
2666
3218
 
@@ -2672,7 +3224,14 @@ class Project:
2672
3224
  if line.startswith("·"):
2673
3225
  status = "✔" if "✔" in line else "🚫"
2674
3226
  condition = re.sub(r"^· [✔🚫]\s*", "", line)
2675
- conditions.append({"status": "passed" if status == "✔" else "failed", "condition": condition})
3227
+ conditions.append(
3228
+ {
3229
+ "status": (
3230
+ "passed" if status == "✔" else "failed"
3231
+ ),
3232
+ "condition": condition,
3233
+ }
3234
+ )
2676
3235
 
2677
3236
  # Add to failed conditions summary
2678
3237
  for cond in conditions:
@@ -2682,39 +3241,9 @@ class Project:
2682
3241
  rule_data["failed_conditions"][cond_text] = 0
2683
3242
  rule_data["failed_conditions"][cond_text] += 1
2684
3243
 
2685
- rule_data["files"].append({"file": file_name, "conditions": conditions})
3244
+ rule_data["files"].append(
3245
+ {"file": file_name, "conditions": conditions}
3246
+ )
2686
3247
 
2687
3248
  result["failed"].append(rule_data)
2688
3249
  return result
2689
-
2690
- def __parse_pass_rules(self, passed_rules, result):
2691
- """
2692
- Parse pass rules.
2693
- """
2694
-
2695
- for rule_text in passed_rules[1:]: # Skip first empty part
2696
- rule_name = rule_text.split(" ✅")[0].strip()
2697
- rule_data = {"rule": rule_name, "sub_rule": None, "files": []}
2698
-
2699
- # Get sub-rule
2700
- sub_rule_match = re.search(r"Sub-rule: (.*?)\n", rule_text)
2701
- if sub_rule_match:
2702
- rule_data["sub_rule"] = sub_rule_match.group(1).strip()
2703
-
2704
- # Get files passed
2705
- files_passed = re.search(r"List of files passed:(.*?)(?=\n\n|\Z)", rule_text, re.DOTALL)
2706
- if files_passed:
2707
- for line in files_passed.group(1).split("\n"):
2708
- line = line.strip()
2709
- if line.startswith("·"):
2710
- file_match = re.match(r"· (.*?) \((\d+)/(\d+)\)", line)
2711
- if file_match:
2712
- rule_data["files"].append(
2713
- {
2714
- "file": file_match.group(1).strip(),
2715
- "passed_conditions": int(file_match.group(2)),
2716
- }
2717
- )
2718
-
2719
- result["passed"].append(rule_data)
2720
- return result