pinexq-client 0.9.2.20251028.52__py3-none-any.whl → 0.10.4rc1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,5 +1,9 @@
1
1
  import json as json_
2
2
  import warnings
3
+ import queue
4
+ import datetime
5
+ from datetime import datetime, timedelta
6
+
3
7
  from typing import Any, Self, List
4
8
 
5
9
  import httpx
@@ -7,6 +11,7 @@ from httpx import URL
7
11
  from pydantic import BaseModel, ConfigDict
8
12
 
9
13
  from pinexq_client.core import Link, MediaTypes, ClientException, ApiException
14
+ from pinexq_client.core.api_event_manager import ApiEventManagerSingleton
10
15
  from pinexq_client.core.polling import wait_until, PollingException
11
16
  from pinexq_client.job_management.enterjma import enter_jma
12
17
  from pinexq_client.job_management.hcos import WorkDataLink, ProcessingStepLink, InputDataSlotHco, OutputDataSlotHco
@@ -92,7 +97,7 @@ class Job:
92
97
  .select_processing(processing_step='job_processing')
93
98
  .configure_parameters(**job_parameters)
94
99
  .start()
95
- .wait_for_state(JobStates.completed)
100
+ .wait_for_completion()
96
101
  .delete()
97
102
  )
98
103
  """
@@ -127,7 +132,7 @@ class Job:
127
132
  The newly created job as `Job` object
128
133
  """
129
134
  job_link = self._jobs_root.create_job_action.execute(
130
- CreateJobParameters(name=name)
135
+ CreateJobParameters(Name=name)
131
136
  )
132
137
  self._get_by_link(job_link)
133
138
  return self
@@ -182,7 +187,7 @@ class Job:
182
187
  self._raise_if_no_hco()
183
188
  parent_job_url = self.job_hco.self_link.get_url()
184
189
  sub_job_link = self._jobs_root.create_subjob_action.execute(
185
- CreateSubJobParameters(name=name, parent_job_url=str(parent_job_url))
190
+ CreateSubJobParameters(Name=name, ParentJobUrl=str(parent_job_url))
186
191
  )
187
192
  sub_job = Job(self._client)
188
193
  sub_job._get_by_link(sub_job_link)
@@ -256,10 +261,10 @@ class Job:
256
261
  raise TypeError('Instance passed to "function_name" is not of type "str"')
257
262
  # ToDo: provide more parameters to query a processing step
258
263
  query_param = ProcessingStepQueryParameters(
259
- filter=ProcessingStepFilterParameter(
260
- function_name=function_name,
261
- function_name_match_type=FunctionNameMatchTypes.match_exact,
262
- version=function_version
264
+ Filter=ProcessingStepFilterParameter(
265
+ FunctionName=function_name,
266
+ FunctionNameMatchType=FunctionNameMatchTypes.match_exact,
267
+ Version=function_version
263
268
  )
264
269
  )
265
270
  query_result = self._processing_step_root.query_action.execute(query_param)
@@ -271,7 +276,7 @@ class Job:
271
276
  processing_url = query_result.processing_steps[0].self_link.get_url()
272
277
 
273
278
  self.job_hco.select_processing_action.execute(
274
- SelectProcessingParameters(processing_step_url=str(processing_url))
279
+ SelectProcessingParameters(ProcessingStepUrl=str(processing_url))
275
280
  )
276
281
 
277
282
  self.refresh()
@@ -322,13 +327,64 @@ class Job:
322
327
  result = self.job_hco.result
323
328
  return json_.loads(result) if result else None
324
329
 
325
- def wait_for_state(self, state: JobStates, timeout_ms: int = 5000, polling_interval_ms: int = 1000) -> Self:
330
+ def wait_for_state_sse(self, state: JobStates, timeout_s: float | None = None, fallback_polling_interval_s: float = 300) -> Self:
331
+ self._raise_if_no_hco()
332
+
333
+ # early exit
334
+ if self.job_hco.state == state:
335
+ return self
336
+ if self.job_hco.state == JobStates.error:
337
+ error_reason = self.job_hco.error_description
338
+ raise PollingException(f"Job failed'. Error:{error_reason}")
339
+
340
+ if timeout_s is None:
341
+ function_timeout_on = datetime.max
342
+ else:
343
+ # end this wait hard
344
+ function_timeout_on = datetime.now() + timedelta(seconds=timeout_s)
345
+
346
+ job_changed_signal = queue.Queue()
347
+ manager = ApiEventManagerSingleton()
348
+ job_url_str = str(self.job_hco.self_link.get_url())
349
+ manager.subscribe_waiter(self._client, job_url_str, job_changed_signal)
350
+
351
+ try:
352
+ self.get_state()
353
+ job_done = self.job_hco.state == state
354
+ while not job_done:
355
+ time_till_function_timeout = function_timeout_on - datetime.now()
356
+ if time_till_function_timeout.total_seconds() <= 0.0:
357
+ raise PollingException(f"Timeout waiting for Job state. Current state: {self.job_hco.state}")
358
+
359
+ next_wait_timeout_s = min(float(time_till_function_timeout.seconds), fallback_polling_interval_s)
360
+
361
+ try:
362
+ job_changed_signal.get(timeout=next_wait_timeout_s)
363
+ except queue.Empty:
364
+ # nothing we do just a poll and loop again
365
+ pass
366
+
367
+ # read all messages since we only want to poll new state once if there are multiple messages
368
+ while not job_changed_signal.empty():
369
+ job_changed_signal.get(timeout=next_wait_timeout_s)
370
+
371
+ self.get_state()
372
+ job_done = self.job_hco.state == state
373
+ if self.job_hco.state == JobStates.error:
374
+ error_reason = self.job_hco.error_description
375
+ raise PollingException(f"Job failed'. Error:{error_reason}")
376
+ finally:
377
+ manager.unsubscribe_waiter(self._client, job_url_str, job_changed_signal)
378
+
379
+ return self
380
+
381
+ def wait_for_state(self, state: JobStates, timeout_s: float | None = None, polling_interval_s: float = 1) -> Self:
326
382
  """Wait for this job to reach a state. If the job enters error state an exception is risen
327
383
 
328
384
  Args:
329
385
  state: The state to wait for. After the job enters this state this function returns.
330
- timeout_ms: Time span in milliseconds to wait for reaching the state before raising an exception.
331
- polling_interval_ms: will determine how fast the API is polled for updates.
386
+ timeout_s: Time span in seconds to wait for reaching the state before raising an exception.
387
+ polling_interval_s: will determine how fast the API is polled for updates.
332
388
  Note that low values will produce unnecessary load.
333
389
 
334
390
  Returns:
@@ -340,10 +396,10 @@ class Job:
340
396
  try:
341
397
  wait_until(
342
398
  condition=lambda: self.get_state() == state,
343
- timeout_ms=timeout_ms,
399
+ timeout_ms= int(timeout_s * 1000) if timeout_s is not None else None,
344
400
  timeout_message="Waiting for job completion",
345
401
  error_condition=lambda: self.job_hco.state == JobStates.error,
346
- polling_interval_ms=polling_interval_ms
402
+ polling_interval_ms= int(polling_interval_s * 1000)
347
403
  )
348
404
  except TimeoutError as timeout:
349
405
  raise TimeoutError(
@@ -358,18 +414,17 @@ class Job:
358
414
 
359
415
  return self
360
416
 
361
- def wait_for_completion(self, timeout_ms: int = 60000, polling_interval_ms: int = 500) -> Self:
417
+ def wait_for_completion(self, timeout_s: float | None = None) -> Self:
362
418
  """Wait for this job to reach the state 'completed'.
363
419
 
364
420
  Args:
365
- timeout_ms: Timeout to wait for the job to reach the next state.
366
- polling_interval_ms: will determine how fast the API is polled for updates.
421
+ timeout_s: Timeout to wait for the job to reach the next state.
367
422
  Note that low values will produce unnecessary load.
368
423
 
369
424
  Returns:
370
425
  This `Job` object
371
426
  """
372
- return self.wait_for_state(JobStates.completed, timeout_ms, polling_interval_ms)
427
+ return self.wait_for_state_sse(JobStates.completed, timeout_s)
373
428
 
374
429
  def assign_input_dataslot(
375
430
  self,
@@ -404,7 +459,7 @@ class Job:
404
459
  dataslot = self.job_hco.input_dataslots[index]
405
460
  dataslot.select_workdata_action.execute(
406
461
  parameters=SelectWorkDataForDataSlotParameters(
407
- work_data_url=str(work_data.get_url())
462
+ WorkDataUrl=str(work_data.get_url())
408
463
  )
409
464
  )
410
465
  self.refresh()
@@ -445,7 +500,7 @@ class Job:
445
500
  dataslot = self.job_hco.input_dataslots[index]
446
501
  dataslot.select_workdata_collection_action.execute(
447
502
  parameters=SelectWorkDataCollectionForDataSlotParameters(
448
- work_data_urls=[str(workdata_link.get_url()) for workdata_link in work_datas]
503
+ WorkDataUrls=[str(workdata_link.get_url()) for workdata_link in work_datas]
449
504
  )
450
505
  )
451
506
  self.refresh()
@@ -483,15 +538,15 @@ class Job:
483
538
  ) -> JobQueryResultHco:
484
539
  self._raise_if_no_hco()
485
540
  filter_param = JobFilterParameter(
486
- is_sub_job=True,
487
- parent_job_url=str(self.job_hco.self_link.get_url()),
488
- state=state,
489
- name=name,
541
+ IsSubJob=True,
542
+ ParentJobUrl=str(self.job_hco.self_link.get_url()),
543
+ State=state,
544
+ Name=name,
490
545
  show_deleted=show_deleted,
491
- processing_step_url=processing_step_url,
546
+ ProcessingStepUrl=processing_step_url,
492
547
  )
493
548
 
494
- query_param = JobQueryParameters(sort_by=sort_by, filter=filter_param)
549
+ query_param = JobQueryParameters(SortBy=sort_by, Filter=filter_param)
495
550
  job_query_result = self._jobs_root.job_query_action.execute(query_param)
496
551
  return job_query_result
497
552
 
@@ -511,32 +566,32 @@ class Job:
511
566
  query_result = self._get_sub_jobs(state=state)
512
567
  return query_result.total_entities
513
568
 
514
- def wait_for_sub_jobs_complete(self, timeout_ms: int = 60000, polling_interval_ms: int = 1000) -> Self:
569
+ def wait_for_sub_jobs_complete(self, timeout_s: float = 60, polling_interval_s: float = 1) -> Self:
515
570
  """Wait for all sub-jobs to reach the state 'completed'.
516
571
 
517
572
  This function will block execution until the state is reached or raise an exception
518
573
  if the operation timed out or a sub-job returned an error. Only started jobs will be watched.
519
574
 
520
575
  Args:
521
- timeout_ms: Timeout to wait for the sub-jobs to reach the next state.
576
+ timeout_s: Timeout to wait for the sub-jobs to reach the next state.
522
577
 
523
578
  Returns:
524
579
  This `Job` object
525
- :param timeout_ms: Wil determine how long to wait for success
526
- :param polling_interval_ms: will determine how fast the API is polled for updates.
580
+ :param timeout_s: Wil determine how long to wait for success
581
+ :param polling_interval_s: will determine how fast the API is polled for updates.
527
582
  Note that low values will produce unnecessary load.
528
583
  """
529
584
  wait_until(
530
585
  condition=lambda: self.sub_jobs_in_state(JobStates.pending) == 0,
531
- timeout_ms=timeout_ms,
532
- timeout_message=f"Timeout while waiting for sub-jobs to complete! [timeout: {timeout_ms}ms]",
533
- polling_interval_ms=polling_interval_ms
586
+ timeout_ms= int(timeout_s * 1000),
587
+ timeout_message=f"Timeout while waiting for sub-jobs to complete! [timeout: {timeout_s}s]",
588
+ polling_interval_ms= int(polling_interval_s * 1000)
534
589
  )
535
590
  wait_until(
536
591
  condition=lambda: self.sub_jobs_in_state(JobStates.processing) == 0,
537
- timeout_ms=timeout_ms,
538
- timeout_message=f"Timeout while waiting for sub-jobs to complete! [timeout: {timeout_ms}ms]",
539
- polling_interval_ms=polling_interval_ms
592
+ timeout_ms= int(timeout_s * 1000),
593
+ timeout_message=f"Timeout while waiting for sub-jobs to complete! [timeout: {timeout_s}ms]",
594
+ polling_interval_ms= int(polling_interval_s * 1000)
540
595
  )
541
596
 
542
597
  error_count = self.sub_jobs_in_state(JobStates.error)
@@ -574,29 +629,47 @@ class Job:
574
629
  delete_subjobs_with_data: bool = True
575
630
  ) -> Self:
576
631
  """Delete this job after deleting output workdata and subjobs (recursive call) depending on the flag.
577
- Afterward, also deletes input workdata depending on the flag. This is a best effort operation,
578
- if an operation can not be executed a warning will be printed but the process continues.
632
+ Afterward, also deletes input workdata depending on the flag. This is a best effort operation,
633
+ if an operation can not be executed a warning will be printed but the process continues.
579
634
 
580
- Args:
581
- delete_output_workdata: boolean flag to specify if output WorkData should be attempted for deletion. Default: True
582
- delete_input_workdata: boolean flag to specify if input WorkData should be attempted for deletion. Default: False
583
- delete_subjobs_with_data: boolean flag tp specify if Sub jobs should be attempted for deletion. Default: True
635
+ Args:
636
+ delete_output_workdata: boolean flag to specify if output WorkData should be attempted for deletion. Default: True
637
+ delete_input_workdata: boolean flag to specify if input WorkData should be attempted for deletion. Default: False
638
+ delete_subjobs_with_data: boolean flag tp specify if Sub jobs should be attempted for deletion. Default: True
584
639
 
585
- Returns:
586
- This `Job` object
640
+ Returns:
641
+ This `Job` object
587
642
  """
643
+ self._delete_with_associated_internal(
644
+ delete_output_workdata=delete_output_workdata,
645
+ delete_input_workdata=delete_input_workdata,
646
+ delete_subjobs_with_data=delete_subjobs_with_data,
647
+ recursion_depth = 0)
648
+
649
+ def _delete_with_associated_internal(
650
+ self,
651
+ *,
652
+ delete_output_workdata: bool = True,
653
+ delete_input_workdata: bool = False,
654
+ delete_subjobs_with_data: bool = True,
655
+ recursion_depth: int = 0
656
+ ) -> Self:
588
657
  self._raise_if_no_hco()
589
658
 
590
659
  # delete subjobs
591
- if delete_subjobs_with_data is True:
660
+ if delete_subjobs_with_data:
661
+ if recursion_depth > 20:
662
+ raise Exception("Recursion limit of subjob deletion exceeded.")
663
+
592
664
  for subjob in self._get_sub_jobs().iter_flat():
593
665
  try:
594
666
  # recursion
595
667
  subjob_wrapper = Job.from_hco(subjob)
596
- subjob_wrapper.delete_with_associated(
668
+ subjob_wrapper._delete_with_associated_internal(
597
669
  delete_output_workdata=delete_output_workdata,
598
670
  delete_input_workdata=delete_input_workdata,
599
- delete_subjobs_with_data=delete_subjobs_with_data)
671
+ delete_subjobs_with_data=delete_subjobs_with_data,
672
+ recursion_depth = recursion_depth + 1)
600
673
  if subjob.self_link.exists():
601
674
  warnings.warn(f"Could not delete subjob: {subjob.self_link.get_url()}")
602
675
  except (ClientException, ApiException) as e:
@@ -605,7 +678,7 @@ class Job:
605
678
  self.refresh()
606
679
 
607
680
  # delete output workdatas
608
- if delete_output_workdata is True:
681
+ if delete_output_workdata:
609
682
  for slot in self.job_hco.output_dataslots:
610
683
  for wd in slot.assigned_workdatas:
611
684
  try:
@@ -633,7 +706,7 @@ class Job:
633
706
  warnings.warn(f"Could not delete job: {self.self_link().get_url()}\n{e}")
634
707
 
635
708
  # finally delete input workdatas
636
- if delete_input_workdata is True:
709
+ if delete_input_workdata:
637
710
  for slot in self.job_hco.input_dataslots:
638
711
  for wd in slot.selected_workdatas:
639
712
  try:
@@ -692,7 +765,7 @@ class Job:
692
765
  """
693
766
  self._raise_if_no_hco()
694
767
  self.job_hco.edit_tags_action.execute(
695
- SetJobTagsParameters(tags=tags)
768
+ SetJobTagsParameters(Tags=tags)
696
769
  )
697
770
  self.refresh()
698
771
  return self
@@ -730,14 +803,23 @@ class Job:
730
803
  processing_step_instance: ProcessingStep | None = None,
731
804
  start: bool = True,
732
805
  parameters: str | None = None,
733
- allow_output_data_slots: bool | None = None,
806
+ allow_output_data_deletion: bool | None = None,
734
807
  input_data_slots: List[InputDataSlotParameterFlexible] | None = None,
735
808
  ) -> Self:
736
809
  """
737
810
  Creates a new job and configures it rapidly with RapidJobSetupParameters.
738
811
 
739
812
  Args:
740
- parameters: The parameters to configure the job with.
813
+ name: Name of the job to be created
814
+ parent_job_url: URL of the parent job as JobLink. Only one of parent_job_url or parent_job_instance must be provided.
815
+ parent_job_instance: Parent job as Job instance. Only one of parent_job_url or parent_job_instance must be provided.
816
+ tags: Tags to assign to the job
817
+ processing_step_url: URL of the processing step as ProcessingStepLink. Only one of processing_step_url or processing_step_instance must be provided.
818
+ processing_step_instance: Processing step as ProcessingStep instance. Only one of processing_step_url or processing_step_instance must be provided.
819
+ start: Flag indicating whether to start the job after creation
820
+ parameters: Input parameters to the job
821
+ allow_output_data_deletion: Flag indicating whether to allow output data deletion
822
+ input_data_slots: List of InputDataSlotParameterFlexible to assign work data to input data slots
741
823
 
742
824
  Returns:
743
825
  The newly created job as `Job` object
@@ -785,7 +867,7 @@ class Job:
785
867
  Tags=tags,
786
868
  Start=start,
787
869
  Parameters=parameters,
788
- AllowOutputDataDeletion=allow_output_data_slots,
870
+ AllowOutputDataDeletion=allow_output_data_deletion,
789
871
  InputDataSlots=input_data_slots
790
872
  )
791
873