pinexq-client 0.9.2.20250908.48__py3-none-any.whl → 0.10.3rc1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,49 +1,60 @@
1
- import time
1
+ import queue
2
+ from datetime import datetime, timedelta
3
+ from enum import StrEnum
2
4
  from typing import Union, Self
3
5
 
4
- import httpx
5
-
6
+ from pinexq_client.core.api_event_manager import ApiEventManagerSingleton
7
+ from pinexq_client.core.polling import PollingException
6
8
  from pinexq_client.job_management.tool import Job
7
9
  from pinexq_client.job_management.hcos import JobQueryResultHco
8
10
  from pinexq_client.job_management.model import JobStates
9
11
 
10
12
 
13
+
11
14
  class JobGroup:
12
15
  """
13
- A wrapper class for a group of jobs for easier execution and waiting
16
+ A wrapper class for a group of jobs for easier execution and waiting.
17
+ Internally jobs are hold in a set so order of execution is not guaranteed
14
18
 
15
19
  Attributes:
16
- _client:
17
- The http client
18
20
  _jobs:
19
- List of jobs in the group
21
+ Set of jobs in the group
20
22
  """
21
23
 
22
- _client: httpx.Client
24
+ _jobs: set[Job]
25
+
26
+ class WaitJobErrorBehaviour(StrEnum):
27
+ IGNORE = "ignore"
28
+ CONTINUE = "continue"
29
+ COMPLETE = "complete"
23
30
 
24
- def __init__(self, client: httpx.Client):
25
- self._jobs: list[Job] = []
26
- self._client = client
31
+ def __init__(self):
32
+ self._jobs: set[Job] = set()
27
33
 
28
34
  @classmethod
29
- def from_query_result(cls, client: httpx.Client, job_query_result: JobQueryResultHco) -> Self:
35
+ def from_query_result(cls, job_query_result: JobQueryResultHco) -> Self:
30
36
  """
31
37
  Initializes a `JobGroup` object from a JobQueryResultHco object
32
38
  Args:
33
- client: The http client
34
39
  job_query_result: The JobQueryResultHco object whose jobs are to be added to the JobGroup
35
40
 
36
41
  Returns:
37
42
  The newly created `JobGroup` instance
38
43
  """
39
- instance = cls(client)
44
+ instance = cls()
40
45
  for job in job_query_result.iter_flat():
41
46
  instance.add_jobs(Job.from_hco(job))
42
47
  return instance
43
48
 
49
+ def is_empty(self) -> bool:
50
+ """
51
+ True if the group is empty
52
+ """
53
+ return len(self._jobs) <= 0
54
+
44
55
  def add_jobs(self, jobs: Union[Job, list[Job]]) -> Self:
45
56
  """
46
- Add a job or multiple jobs to the group
57
+ Add a job or multiple jobs to the group. Duplicates will not be added.
47
58
 
48
59
  Args:
49
60
  jobs: A job or a list of job objects to be added to the JobGroup
@@ -53,14 +64,14 @@ class JobGroup:
53
64
  """
54
65
 
55
66
  if isinstance(jobs, list):
56
- self._jobs.extend(jobs)
67
+ self._jobs.update(jobs)
57
68
  else:
58
- self._jobs.append(jobs)
69
+ self._jobs.add(jobs)
59
70
  return self
60
71
 
61
72
  def start_all(self) -> Self:
62
73
  """
63
- Start all jobs
74
+ Start all jobs, throws if a job can not be started.
64
75
 
65
76
  Returns:
66
77
  This `JobGroup` object
@@ -69,65 +80,171 @@ class JobGroup:
69
80
  job.start()
70
81
  return self
71
82
 
72
- def wait_all(self, *, job_timeout_ms: int = 5000, total_timeout_ms: int | None = None) -> Self:
83
+ def wait_all(self, job_timeout_s: float | None = None, total_timeout_s: float | None = None) -> Self:
73
84
  """
74
85
  Wait for all jobs to complete or error state.
75
86
  If the overall timeout elapses and some jobs are not complete, then exception.
76
87
 
77
88
  Args:
78
- job_timeout_ms:
79
- Individual job timeout in milliseconds. Default is 5000 ms.
80
- total_timeout_ms:
81
- Timeout for the whole operation in milliseconds. Default is no timeout.
89
+ job_timeout_s:
90
+ Individual job timeout in seconds.
91
+ total_timeout_s:
92
+ Timeout for the whole operation in seconds. Default is no timeout.
82
93
  Returns:
83
94
  This `JobGroup` object
84
95
  """
85
- start_time = time.time()
96
+ start_time = datetime.now()
86
97
  for job in self._jobs:
87
- if total_timeout_ms is not None:
88
- elapsed_time_ms = (time.time() - start_time) * 1000
89
- if total_timeout_ms - elapsed_time_ms <= 0:
98
+ if total_timeout_s is not None:
99
+ elapsed_time = datetime.now() - start_time
100
+ if total_timeout_s - elapsed_time.total_seconds() <= 0:
90
101
  raise Exception("Total timeout exceeded while waiting for jobs.")
91
102
 
92
103
  try:
93
- job.wait_for_state(JobStates.completed, timeout_ms=job_timeout_ms)
104
+ job.wait_for_completion(timeout_s=job_timeout_s)
94
105
  except Exception:
95
106
  pass
96
107
  return self
97
108
 
98
- def all_jobs_completed_ok(self) -> bool:
109
+ def wait_any(self, timeout_s: float | None = None, fallback_polling_interval_s: float = 300)-> Self:
110
+ """
111
+ Waits for any job to completes or enters error state.
112
+
113
+ Args:
114
+ timeout_s:
115
+ Timeout for the function. Throws in timeout.
116
+ fallback_polling_interval_s:
117
+ Interval for hard polls not using SSE notification.
118
+ """
119
+
120
+ # early exits
121
+ if self.is_empty():
122
+ return self
123
+
124
+ # see if current job is already done, no polling
125
+ if self._any_job_ended(self._jobs, refresh_job=False):
126
+ return self
127
+
128
+ if timeout_s is None:
129
+ function_timeout_on = datetime.max
130
+ else:
131
+ # end this wait hard
132
+ function_timeout_on = datetime.now() + timedelta(seconds=timeout_s)
133
+
134
+ # this will be used in for all jobs so we get a notification if one changes
135
+ any_job_changed_signal = queue.Queue()
136
+ manager = ApiEventManagerSingleton()
137
+
99
138
  for job in self._jobs:
100
- state = job.get_state()
139
+ manager.subscribe_waiter(job._client, str(job.job_hco.self_link.get_url()), any_job_changed_signal)
140
+
141
+ try:
142
+ job_done = self._any_job_ended(self._jobs, refresh_job=True)
143
+ poll_all = False
144
+ job_links_to_poll: set[Job] = set()
145
+ while not job_done:
146
+ time_till_function_timeout = function_timeout_on - datetime.now()
147
+ if time_till_function_timeout.total_seconds() <= 0.0:
148
+ raise PollingException(f"Timeout waiting for JobGroup complete.")
149
+
150
+ next_wait_timeout_s = min(float(time_till_function_timeout.seconds), fallback_polling_interval_s)
151
+ try:
152
+ job_links_to_poll = set()
153
+ job_link_of_notified = any_job_changed_signal.get(timeout=next_wait_timeout_s)
154
+ job_links_to_poll.add(self._get_job_by_link(job_link_of_notified))
155
+ except queue.Empty:
156
+ # timeout, check all. One might be silent completed
157
+ poll_all = True
158
+ pass
159
+
160
+ # read all messages since we only want to poll new state once if there are multiple messages
161
+ while not any_job_changed_signal.empty():
162
+ job_link_of_notified = any_job_changed_signal.get(timeout=next_wait_timeout_s)
163
+ job_links_to_poll.add(self._get_job_by_link(job_link_of_notified))
164
+
165
+ if poll_all:
166
+ job_done = self._any_job_ended(self._jobs, refresh_job=True)
167
+ else:
168
+ job_done = self._any_job_ended(job_links_to_poll, refresh_job=True)
169
+
170
+ finally:
171
+ for job in self._jobs:
172
+ manager.unsubscribe_waiter(job._client, str(job.job_hco.self_link.get_url()), any_job_changed_signal)
173
+
174
+ # refresh all that were not yet completed so all jobs in group are up to date
175
+ self._refresh_uncompleted_jobs(self._jobs)
176
+ return self
177
+
178
+ def all_jobs_completed_ok(self, refresh_jobs = False) -> bool:
179
+ for job in self._jobs:
180
+ state = self._get_job_state(job, refresh_jobs)
101
181
  if state is not JobStates.completed:
102
182
  return False
103
183
  return True
104
184
 
105
- def incomplete_jobs(self) -> list[Job]:
185
+ def get_jobs(self) -> list[Job]:
106
186
  """
107
- Returns the incomplete jobs
187
+ Returns the list of jobs in the group
108
188
 
109
189
  Returns:
110
- Count of incomplete jobs
190
+ List of jobs in the group
111
191
  """
112
- incomplete_jobs = []
113
- for job in self._jobs:
114
- state = job.get_state()
115
- if state in (JobStates.processing, JobStates.pending):
116
- incomplete_jobs.append(job)
117
- return incomplete_jobs
192
+ return list(self._jobs)
118
193
 
119
- def jobs_with_error(self) -> list[Job]:
194
+ def get_completed_jobs(self, refresh_jobs = False) -> list[Job]:
120
195
  """
121
- Returns the list of jobs that produced errors
196
+ Returns the list of jobs that are completed
122
197
 
123
198
  Returns:
124
- List of jobs that produced errors
199
+ List of jobs
125
200
  """
126
- return [job for job in self._jobs if job.get_state() == JobStates.error]
201
+ return [job for job in self._jobs if self._get_job_state(job, refresh_jobs) == JobStates.completed]
202
+
203
+ def get_incomplete_jobs(self, refresh_jobs = False) -> list[Job]:
204
+ """
205
+ Returns the incomplete jobs in state JobStates.processing or JobStates.pending
206
+
207
+ Returns:
208
+ List of jobs
209
+ """
210
+ return [job for job in self._jobs if self._get_job_state(job, refresh_jobs) in (JobStates.processing, JobStates.pending)]
211
+
212
+ def get_error_jobs(self, refresh_jobs = False) -> list[Job]:
213
+ """
214
+ Returns the list of jobs that are in error state
215
+
216
+ Returns:
217
+ List of jobs
218
+ """
219
+ return [job for job in self._jobs if self._get_job_state(job, refresh_jobs) == JobStates.error]
220
+
221
+ def has_error_jobs(self, refresh_jobs = False) -> bool:
222
+ """ Check if there are jobs in error state."""
223
+ return any(self._get_job_state(job, refresh_jobs) == JobStates.error for job in self._jobs)
224
+
225
+ def has_incomplete_jobs(self, refresh_jobs = False) -> bool:
226
+ """ Check if there are jobs in state JobStates.processing or JobStates.pending."""
227
+ return any(self._get_job_state(job, refresh_jobs) in (JobStates.processing, JobStates.pending) for job in self._jobs)
228
+
229
+ def has_completed_jobs(self, refresh_jobs = False) -> bool:
230
+ """ Check if there are jobs in completed state."""
231
+ return any(self._get_job_state(job, refresh_jobs) == JobStates.completed for job in self._jobs)
232
+
233
+ def remove_error_jobs(self, refresh_jobs=False):
234
+ """Remove all jobs in error state."""
235
+ self.remove(self.get_error_jobs(refresh_jobs))
236
+
237
+ def remove_incomplete_jobs(self, refresh_jobs=False):
238
+ """Remove all jobs in state JobStates.processing or JobStates.pending."""
239
+ self.remove(self.get_incomplete_jobs(refresh_jobs))
240
+
241
+ def remove_completed_jobs(self, refresh_jobs=False):
242
+ """Remove all completed jobs."""
243
+ self.remove(self.get_completed_jobs(refresh_jobs))
127
244
 
128
245
  def remove(self, jobs: Job | list[Job]) -> Self:
129
246
  """
130
- Removes given job(s) from the group
247
+ Removes given job(s) from the group.
131
248
 
132
249
  Args:
133
250
  jobs:
@@ -136,17 +253,11 @@ class JobGroup:
136
253
  This `JobGroup` object
137
254
  """
138
255
 
139
- def remove_by_url(job_url: str):
140
- for existing_job in self._jobs:
141
- if existing_job.self_link().get_url() == job_url:
142
- self._jobs.remove(existing_job)
143
- break
144
-
145
256
  if isinstance(jobs, list):
146
257
  for job in jobs:
147
- remove_by_url(str(job.self_link().get_url()))
258
+ self._jobs.remove(job)
148
259
  else:
149
- remove_by_url(str(jobs.self_link().get_url()))
260
+ self._jobs.remove(jobs)
150
261
 
151
262
  return self
152
263
 
@@ -157,14 +268,50 @@ class JobGroup:
157
268
  Returns:
158
269
  This `JobGroup` object
159
270
  """
160
- self._jobs = []
271
+ self._jobs = set()
161
272
  return self
162
273
 
163
- def get_jobs(self) -> list[Job]:
164
- """
165
- Returns the list of jobs in the group
274
+ @staticmethod
275
+ def _any_job_ended(jobs: set[Job], refresh_job: bool) -> bool:
276
+ # early exit by checking without polling in any case
277
+ for job in jobs:
278
+ state = JobGroup._get_job_state(job, refresh_job=False)
279
+ if state == JobStates.completed:
280
+ return True
281
+ if state == JobStates.error:
282
+ return True
283
+
284
+ if refresh_job:
285
+ for job in jobs:
286
+ state = JobGroup._get_job_state(job, refresh_job=refresh_job)
287
+ if state == JobStates.completed:
288
+ return True
289
+ if state == JobStates.error:
290
+ return True
291
+ return False
292
+
293
+ @staticmethod
294
+ def _refresh_uncompleted_jobs(jobs: set[Job]):
295
+ for job in jobs:
296
+ if job.job_hco.state != JobStates.completed and job.job_hco.state != JobStates.error:
297
+ job.refresh()
298
+
299
+ for job in jobs:
300
+ if job.job_hco.state == JobStates.error:
301
+ raise PollingException(f"Job failed'. Error:{job.job_hco.error_description}")
302
+
303
+ @staticmethod
304
+ def _get_job_state(job: Job, refresh_job: bool) -> JobStates:
305
+ """Only poll jobs if forced, else use internal state"""
306
+ if refresh_job:
307
+ state = job.get_state()
308
+ else:
309
+ # use current internal job state to avoid polling ALL
310
+ state = job.job_hco.state
311
+ return state
166
312
 
167
- Returns:
168
- List of jobs in the group
169
- """
170
- return self._jobs
313
+ def _get_job_by_link(self, job_link: str)-> Job:
314
+ for job in self._jobs:
315
+ if str(job.self_link().get_url()) == job_link:
316
+ return job
317
+ raise Exception(f"Could not lookup job in internal list for link: {job_link}")
@@ -2,7 +2,6 @@ from typing import Any, Self, Optional
2
2
 
3
3
  import httpx
4
4
  from httpx import URL
5
-
6
5
  from pinexq_client.core import Link, MediaTypes
7
6
  from pinexq_client.core.hco.upload_action_hco import UploadParameters
8
7
  from pinexq_client.job_management.enterjma import enter_jma
@@ -15,7 +14,8 @@ from pinexq_client.job_management.model import (
15
14
  CreateProcessingStepParameters,
16
15
  SetProcessingStepTagsParameters, ProcessingStepQueryParameters, ProcessingStepFilterParameter,
17
16
  FunctionNameMatchTypes, EditProcessingStepParameters, CopyPsFromUserToOrgActionParameters,
18
- CopyPsFromOrgToUserActionParameters,
17
+ CopyPsFromOrgToUserActionParameters, DeprecatePsActionParameters, ConfigureDeploymentParameters,
18
+ DeploymentResourcePresets, ScalingConfiguration, DeploymentStates, AssignCodeHashParameters
19
19
  )
20
20
 
21
21
 
@@ -277,6 +277,140 @@ class ProcessingStep:
277
277
  self.processing_step_hco = None
278
278
  return self
279
279
 
280
+ def deprecate(self, reason: str | None = None) -> Self:
281
+ """Deprecate ProcessingStep.
282
+
283
+ Returns:
284
+ This `ProcessingStep` object"""
285
+ self._raise_if_no_hco()
286
+ self.processing_step_hco.deprecate_ps_action.execute(
287
+ DeprecatePsActionParameters(reason=reason)
288
+ )
289
+ self.refresh()
290
+ return self
291
+
292
+ def is_deprecated(self) -> bool:
293
+ """Check if ProcessingStep is deprecated.
294
+
295
+ Returns:
296
+ True if deprecated, False otherwise.
297
+ """
298
+ self._raise_if_no_hco()
299
+ return self.processing_step_hco.is_deprecated
300
+
301
+ def restore(self) -> Self:
302
+ """Restore ProcessingStep.
303
+
304
+ Returns:
305
+ This `ProcessingStep` object"""
306
+ self._raise_if_no_hco()
307
+ self.processing_step_hco.restore_ps_action.execute()
308
+ self.refresh()
309
+ return self
310
+
311
+ def assign_code_hash(self, code_hash: str) -> Self:
312
+ """Assign a code hash to the ProcessingStep.
313
+
314
+ Args:
315
+ code_hash: The code hash to assign.
316
+
317
+ Returns:
318
+ This `ProcessingStep` object
319
+ """
320
+ self._raise_if_no_hco()
321
+ self.processing_step_hco.assign_code_hash_action.execute(
322
+ AssignCodeHashParameters(code_hash=code_hash)
323
+ )
324
+ self.refresh()
325
+ return self
326
+
327
+ def configure_deployment(
328
+ self,
329
+ *,
330
+ resource_preset: DeploymentResourcePresets,
331
+ entrypoint: str,
332
+ scaling: ScalingConfiguration
333
+ ) -> Self:
334
+ """Specify the desired deployment for this ProcessingStep.
335
+
336
+ Returns:
337
+ This `ProcessingStep` object
338
+ """
339
+ self._raise_if_no_hco()
340
+ self.processing_step_hco.configure_deployment_action.execute(
341
+ ConfigureDeploymentParameters(
342
+ resource_preset = resource_preset,
343
+ entrypoint = entrypoint,
344
+ scaling = scaling
345
+ )
346
+ )
347
+ self.refresh()
348
+ return self
349
+
350
+ def configure_external_deployment(self) -> Self:
351
+ """Specify this ProcessingStep to have an external deployment.
352
+
353
+ Returns:
354
+ This `ProcessingStep` object
355
+ """
356
+ self._raise_if_no_hco()
357
+ self.processing_step_hco.configure_external_deployment_action.execute()
358
+ self.refresh()
359
+ return self
360
+
361
+ def remove_deployment(self) -> Self:
362
+ """Remove the deployment for this ProcessingStep.
363
+
364
+ Returns:
365
+ This `ProcessingStep` object
366
+ """
367
+ self._raise_if_no_hco()
368
+ self.processing_step_hco.remove_deployment_action.execute()
369
+ self.refresh()
370
+ return self
371
+
372
+ def suspend_deployment(self) -> Self:
373
+ """Suspend the deployment for this ProcessingStep.
374
+
375
+ Returns:
376
+ This `ProcessingStep` object
377
+ """
378
+ self._raise_if_no_hco()
379
+ self.processing_step_hco.suspend_deployment_action.execute()
380
+ self.refresh()
381
+ return self
382
+
383
+ def resume_deployment(self) -> Self:
384
+ """Resume the deployment for this ProcessingStep.
385
+
386
+ Returns:
387
+ This `ProcessingStep` object
388
+ """
389
+ self._raise_if_no_hco()
390
+ self.processing_step_hco.resume_deployment_action.execute()
391
+ self.refresh()
392
+ return self
393
+
394
+ def clear_code_hash(self) -> Self:
395
+ """Clear the code hash of the ProcessingStep.
396
+
397
+ Returns:
398
+ This `ProcessingStep` object
399
+ """
400
+ self._raise_if_no_hco()
401
+ self.processing_step_hco.clear_code_hash_action.execute()
402
+ self.refresh()
403
+ return self
404
+
405
+ def get_deployment_state(self) -> DeploymentStates:
406
+ """Get the deployment state of the ProcessingStep.
407
+
408
+ Returns:
409
+ The deployment state
410
+ """
411
+ self._raise_if_no_hco()
412
+ return self.processing_step_hco.deployment_state
413
+
280
414
  def upload_configuration(self, json_data: Any) -> Self:
281
415
  """Upload processing configuration.
282
416