qoro-divi 0.3.4__py3-none-any.whl → 0.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of qoro-divi might be problematic. Click here for more details.

divi/qprog/batch.py CHANGED
@@ -5,11 +5,9 @@
5
5
  import atexit
6
6
  import traceback
7
7
  from abc import ABC, abstractmethod
8
- from concurrent.futures import ProcessPoolExecutor, as_completed
9
- from multiprocessing import Event, Manager
10
- from multiprocessing.synchronize import Event as EventClass
8
+ from concurrent.futures import Future, ThreadPoolExecutor, as_completed
11
9
  from queue import Empty, Queue
12
- from threading import Lock, Thread
10
+ from threading import Event, Lock, Thread
13
11
  from typing import Any
14
12
  from warnings import warn
15
13
 
@@ -21,11 +19,11 @@ from divi.qprog.quantum_program import QuantumProgram
21
19
  from divi.reporting import disable_logging, make_progress_bar
22
20
 
23
21
 
24
- def queue_listener(
22
+ def _queue_listener(
25
23
  queue: Queue,
26
24
  progress_bar: Progress,
27
25
  pb_task_map: dict[QuantumProgram, TaskID],
28
- done_event: EventClass,
26
+ done_event: Event,
29
27
  is_jupyter: bool,
30
28
  lock: Lock,
31
29
  ):
@@ -60,6 +58,7 @@ def queue_listener(
60
58
  update_args["refresh"] = is_jupyter
61
59
 
62
60
  progress_bar.update(task_id, **update_args)
61
+ queue.task_done()
63
62
 
64
63
 
65
64
  def _default_task_function(program: QuantumProgram):
@@ -90,7 +89,7 @@ class ProgramBatch(ABC):
90
89
  self.backend = backend
91
90
  self._executor = None
92
91
  self._task_fn = _default_task_function
93
- self.programs = {}
92
+ self._programs = {}
94
93
 
95
94
  self._total_circuit_count = 0
96
95
  self._total_run_time = 0.0
@@ -103,28 +102,100 @@ class ProgramBatch(ABC):
103
102
 
104
103
  @property
105
104
  def total_circuit_count(self):
105
+ """
106
+ Get the total number of circuits executed across all programs in the batch.
107
+
108
+ Returns:
109
+ int: Cumulative count of circuits submitted by all programs.
110
+ """
106
111
  return self._total_circuit_count
107
112
 
108
113
  @property
109
114
  def total_run_time(self):
115
+ """
116
+ Get the total runtime across all programs in the batch.
117
+
118
+ Returns:
119
+ float: Cumulative execution time in seconds across all programs.
120
+ """
110
121
  return self._total_run_time
111
122
 
123
+ @property
124
+ def programs(self) -> dict:
125
+ """
126
+ Get a copy of the programs dictionary.
127
+
128
+ Returns:
129
+ dict: Copy of the programs dictionary mapping program IDs to
130
+ QuantumProgram instances. Modifications to this dict will not
131
+ affect the internal state.
132
+ """
133
+ return self._programs.copy()
134
+
135
+ @programs.setter
136
+ def programs(self, value: dict):
137
+ """Set the programs dictionary."""
138
+ self._programs = value
139
+
112
140
  @abstractmethod
113
141
  def create_programs(self):
114
- if len(self.programs) > 0:
142
+ """Generate and populate the programs dictionary for batch execution.
143
+
144
+ This method must be implemented by subclasses to create the quantum programs
145
+ that will be executed as part of the batch. The method operates via side effects:
146
+ it populates `self._programs` (or `self.programs`) with a dictionary mapping
147
+ program identifiers to `QuantumProgram` instances.
148
+
149
+ Implementation Notes:
150
+ - Subclasses should call `super().create_programs()` first to initialize
151
+ internal state (queue, events, etc.) and validate that no programs
152
+ already exist.
153
+ - After calling super(), subclasses should populate `self.programs` or
154
+ `self._programs` with their program instances.
155
+ - Program identifiers can be any hashable type (e.g., strings, tuples).
156
+ Common patterns include strings like "prog1", "prog2" or tuples like
157
+ ('A', 5) for partitioned problems.
158
+
159
+ Side Effects:
160
+ - Populates `self._programs` with program instances.
161
+ - Initializes `self._queue` for progress reporting.
162
+ - Initializes `self._done_event` if `max_iterations` attribute exists.
163
+
164
+ Raises:
165
+ RuntimeError: If programs already exist (should call `reset()` first).
166
+
167
+ Example:
168
+ >>> def create_programs(self):
169
+ ... super().create_programs()
170
+ ... self.programs = {
171
+ ... "prog1": QAOA(...),
172
+ ... "prog2": QAOA(...),
173
+ ... }
174
+ """
175
+ if len(self._programs) > 0:
115
176
  raise RuntimeError(
116
177
  "Some programs already exist. "
117
178
  "Clear the program dictionary before creating new ones by using batch.reset()."
118
179
  )
119
180
 
120
- self._manager = Manager()
121
- self._queue = self._manager.Queue()
181
+ self._queue = Queue()
122
182
 
123
183
  if hasattr(self, "max_iterations"):
124
184
  self._done_event = Event()
125
185
 
126
186
  def reset(self):
127
- self.programs.clear()
187
+ """
188
+ Reset the batch to its initial state.
189
+
190
+ Clears all programs, stops any running executors, terminates listener threads,
191
+ and stops progress bars. This allows the batch to be reused for a new set of
192
+ programs.
193
+
194
+ Note:
195
+ Any running programs will be forcefully stopped. Results from incomplete
196
+ programs will be lost.
197
+ """
198
+ self._programs.clear()
128
199
 
129
200
  # Stop any active executor
130
201
  if self._executor is not None:
@@ -143,12 +214,6 @@ class ProgramBatch(ABC):
143
214
  warn("Listener thread did not terminate within timeout.")
144
215
  self._listener_thread = None
145
216
 
146
- # Shut down the manager process, which handles the queue cleanup.
147
- if hasattr(self, "_manager") and self._manager is not None:
148
- self._manager.shutdown()
149
- self._manager = None
150
- self._queue = None
151
-
152
217
  # Stop the progress bar if it's still active
153
218
  if getattr(self, "_progress_bar", None) is not None:
154
219
  try:
@@ -168,8 +233,21 @@ class ProgramBatch(ABC):
168
233
  )
169
234
  self.reset()
170
235
 
171
- def add_program_to_executor(self, program):
172
- self.futures.append(self._executor.submit(self._task_fn, program))
236
+ def _add_program_to_executor(self, program: QuantumProgram) -> Future:
237
+ """
238
+ Add a quantum program to the thread pool executor for execution.
239
+
240
+ Sets up the program with cancellation support and progress tracking, then
241
+ submits it for execution in a separate thread.
242
+
243
+ Args:
244
+ program (QuantumProgram): The quantum program to execute.
245
+
246
+ Returns:
247
+ Future: A Future object representing the program's execution.
248
+ """
249
+ if hasattr(program, "_set_cancellation_event"):
250
+ program._set_cancellation_event(self._cancellation_event)
173
251
 
174
252
  if self._progress_bar is not None:
175
253
  with self._pb_lock:
@@ -178,17 +256,39 @@ class ProgramBatch(ABC):
178
256
  job_name=f"Job {program.job_id}",
179
257
  total=self.max_iterations,
180
258
  completed=0,
181
- poll_attempt=0,
182
259
  message="",
183
- final_status="",
184
260
  mode=("simulation" if self._is_local else "network"),
185
261
  )
186
262
 
263
+ return self._executor.submit(self._task_fn, program)
264
+
187
265
  def run(self, blocking: bool = False):
266
+ """
267
+ Execute all programs in the batch.
268
+
269
+ Starts all quantum programs in parallel using a thread pool. Can run in
270
+ blocking or non-blocking mode.
271
+
272
+ Args:
273
+ blocking (bool, optional): If True, waits for all programs to complete
274
+ before returning. If False, returns immediately and programs run in
275
+ the background. Defaults to False.
276
+
277
+ Returns:
278
+ ProgramBatch: Returns self for method chaining.
279
+
280
+ Raises:
281
+ RuntimeError: If a batch is already running or if no programs have been
282
+ created.
283
+
284
+ Note:
285
+ In non-blocking mode, call `join()` later to wait for completion and
286
+ collect results.
287
+ """
188
288
  if self._executor is not None:
189
289
  raise RuntimeError("A batch is already being run.")
190
290
 
191
- if len(self.programs) == 0:
291
+ if len(self._programs) == 0:
192
292
  raise RuntimeError("No programs to run.")
193
293
 
194
294
  self._progress_bar = (
@@ -197,15 +297,17 @@ class ProgramBatch(ABC):
197
297
  else None
198
298
  )
199
299
 
200
- self._executor = ProcessPoolExecutor()
300
+ self._executor = ThreadPoolExecutor()
301
+ self._cancellation_event = Event()
201
302
  self.futures = []
303
+ self._future_to_program = {}
202
304
  self._pb_task_map = {}
203
305
  self._pb_lock = Lock()
204
306
 
205
307
  if self._progress_bar is not None:
206
308
  self._progress_bar.start()
207
309
  self._listener_thread = Thread(
208
- target=queue_listener,
310
+ target=_queue_listener,
209
311
  args=(
210
312
  self._queue,
211
313
  self._progress_bar,
@@ -218,8 +320,10 @@ class ProgramBatch(ABC):
218
320
  )
219
321
  self._listener_thread.start()
220
322
 
221
- for program in self.programs.values():
222
- self.add_program_to_executor(program)
323
+ for program in self._programs.values():
324
+ future = self._add_program_to_executor(program)
325
+ self.futures.append(future)
326
+ self._future_to_program[future] = program
223
327
 
224
328
  if not blocking:
225
329
  # Arm safety net
@@ -229,60 +333,175 @@ class ProgramBatch(ABC):
229
333
 
230
334
  return self
231
335
 
232
- def check_all_done(self):
336
+ def check_all_done(self) -> bool:
337
+ """
338
+ Check if all programs in the batch have completed execution.
339
+
340
+ Returns:
341
+ bool: True if all programs are finished (successfully or with errors),
342
+ False if any are still running.
343
+ """
233
344
  return all(future.done() for future in self.futures)
234
345
 
346
+ def _collect_completed_results(self, completed_futures: list):
347
+ """
348
+ Collects results from any futures that have completed successfully.
349
+ Appends (circuit_count, run_time) tuples to the completed_futures list.
350
+
351
+ Args:
352
+ completed_futures: List to append results to
353
+ """
354
+ for future in self.futures:
355
+ if future.done() and not future.cancelled():
356
+ try:
357
+ completed_futures.append(future.result())
358
+ except Exception:
359
+ pass # Skip failed futures
360
+
361
+ def _handle_cancellation(self):
362
+ """
363
+ Handles cancellation gracefully, providing accurate feedback by checking
364
+ the result of future.cancel().
365
+ """
366
+ self._cancellation_event.set()
367
+
368
+ successfully_cancelled = []
369
+ unstoppable_futures = []
370
+
371
+ # --- Phase 1: Attempt to cancel all non-finished tasks ---
372
+ for future, program in self._future_to_program.items():
373
+ if future.done():
374
+ continue
375
+
376
+ task_id = self._pb_task_map.get(program.job_id)
377
+ if self._progress_bar and task_id is not None:
378
+ cancel_result = future.cancel()
379
+ if cancel_result:
380
+ # The task was pending and was successfully cancelled.
381
+ successfully_cancelled.append(program)
382
+ else:
383
+ # The task is already running and cannot be stopped.
384
+ unstoppable_futures.append(future)
385
+ self._progress_bar.update(
386
+ task_id,
387
+ message="Finishing... ⏳",
388
+ refresh=self._is_jupyter,
389
+ )
390
+
391
+ # --- Phase 2: Immediately mark the successfully cancelled tasks ---
392
+ for program in successfully_cancelled:
393
+ task_id = self._pb_task_map.get(program.job_id)
394
+ if self._progress_bar and task_id is not None:
395
+ self._progress_bar.update(
396
+ task_id,
397
+ final_status="Cancelled",
398
+ message="Cancelled by user",
399
+ refresh=self._is_jupyter,
400
+ )
401
+
402
+ # --- Phase 3: Wait for the unstoppable tasks to finish ---
403
+ if unstoppable_futures:
404
+ for future in as_completed(unstoppable_futures):
405
+ program = self._future_to_program[future]
406
+ task_id = self._pb_task_map.get(program.job_id)
407
+ if self._progress_bar and task_id is not None:
408
+ self._progress_bar.update(
409
+ task_id,
410
+ final_status="Aborted",
411
+ message="Completed during cancellation",
412
+ refresh=self._is_jupyter,
413
+ )
414
+
235
415
  def join(self):
416
+ """
417
+ Wait for all programs in the batch to complete and collect results.
418
+
419
+ Blocks until all programs finish execution, aggregating their circuit counts
420
+ and run times. Handles keyboard interrupts gracefully by attempting to cancel
421
+ remaining programs.
422
+
423
+ Returns:
424
+ bool or None: Returns False if interrupted by KeyboardInterrupt, None otherwise.
425
+
426
+ Raises:
427
+ RuntimeError: If any program fails with an exception, after cancelling
428
+ remaining programs.
429
+
430
+ Note:
431
+ This method should be called after `run(blocking=False)` to wait for
432
+ completion. It's automatically called when using `run(blocking=True)`.
433
+ """
236
434
  if self._executor is None:
237
435
  return
238
436
 
239
- exceptions = []
437
+ completed_futures = []
240
438
  try:
241
- # Ensure all futures are completed and handle exceptions.
439
+ # The as_completed iterator will yield futures as they finish.
440
+ # If a task fails, future.result() will raise the exception immediately.
242
441
  for future in as_completed(self.futures):
243
- try:
244
- future.result() # Raises an exception if the task failed.
245
- except Exception as e:
246
- exceptions.append(e)
442
+ completed_futures.append(future.result())
443
+
444
+ except KeyboardInterrupt:
445
+
446
+ if self._progress_bar is not None:
447
+ self._progress_bar.console.print(
448
+ "[bold yellow]Shutdown signal received, waiting for programs to finish current iteration...[/bold yellow]"
449
+ )
450
+ self._handle_cancellation()
451
+
452
+ # Collect results from any futures that completed before/during cancellation
453
+ self._collect_completed_results(completed_futures)
454
+
455
+ return False
456
+
457
+ except Exception as e:
458
+ # A task has failed. Print the error and cancel the rest.
459
+ print(f"A task failed with an exception. Cancelling remaining tasks...")
460
+ traceback.print_exception(type(e), e, e.__traceback__)
461
+
462
+ # Collect results from any futures that completed before the failure
463
+ self._collect_completed_results(completed_futures)
464
+
465
+ # Cancel all other futures that have not yet completed.
466
+ for f in self.futures:
467
+ f.cancel()
468
+
469
+ # Re-raise a new error to indicate the batch failed.
470
+ raise RuntimeError("Batch execution failed and was cancelled.") from e
247
471
 
248
472
  finally:
249
- self._executor.shutdown(wait=True, cancel_futures=False)
473
+ # Aggregate results from completed futures
474
+ if completed_futures:
475
+ self._total_circuit_count += sum(
476
+ result[0] for result in completed_futures
477
+ )
478
+ self._total_run_time += sum(result[1] for result in completed_futures)
479
+ self.futures.clear()
480
+
481
+ self._executor.shutdown(wait=False)
250
482
  self._executor = None
251
483
 
252
484
  if self._progress_bar is not None:
485
+ self._queue.join()
253
486
  self._done_event.set()
254
487
  self._listener_thread.join()
255
-
256
- if exceptions:
257
- for i, exc in enumerate(exceptions, 1):
258
- print(f"Task {i} failed with exception:")
259
- traceback.print_exception(type(exc), exc, exc.__traceback__)
260
- raise RuntimeError("One or more tasks failed. Check logs for details.")
261
-
262
- if self._progress_bar is not None:
263
- self._progress_bar.stop()
264
-
265
- self._total_circuit_count += sum(future.result()[0] for future in self.futures)
266
- self._total_run_time += sum(future.result()[1] for future in self.futures)
267
- self.futures.clear()
488
+ self._progress_bar.stop()
268
489
 
269
490
  # After successful cleanup, try to unregister the hook.
270
- # This will only succeed if it was a non-blocking run.
271
491
  try:
272
492
  atexit.unregister(self._atexit_cleanup_hook)
273
493
  except TypeError:
274
- # This is expected for blocking runs where the hook was never registered.
275
494
  pass
276
495
 
277
496
  @abstractmethod
278
497
  def aggregate_results(self):
279
- if len(self.programs) == 0:
498
+ if len(self._programs) == 0:
280
499
  raise RuntimeError("No programs to aggregate. Run create_programs() first.")
281
500
 
282
501
  if self._executor is not None:
283
502
  self.join()
284
503
 
285
- if any(len(program.losses) == 0 for program in self.programs.values()):
504
+ if any(len(program.losses_history) == 0 for program in self._programs.values()):
286
505
  raise RuntimeError(
287
506
  "Some/All programs have empty losses. Did you call run()?"
288
507
  )
@@ -0,0 +1,9 @@
1
+ # SPDX-FileCopyrightText: 2025 Qoro Quantum Ltd <divi@qoroquantum.de>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+
6
+ class _CancelledError(Exception):
7
+ """Internal exception to signal a task to stop due to cancellation."""
8
+
9
+ pass