qoro-divi 0.3.1b0__py3-none-any.whl → 0.3.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of qoro-divi might be problematic. Click here for more details.

divi/qprog/optimizers.py CHANGED
@@ -2,74 +2,200 @@
2
2
  #
3
3
  # SPDX-License-Identifier: Apache-2.0
4
4
 
5
+ from abc import ABC, abstractmethod
6
+ from collections.abc import Callable
5
7
  from enum import Enum
8
+ from typing import Any
6
9
 
7
10
  import numpy as np
11
+ from scipy.optimize import OptimizeResult, minimize
8
12
 
13
+ from divi.exp.scipy._cobyla import _minimize_cobyla as cobyla_fn
9
14
 
10
- class Optimizer(Enum):
15
+
16
+ class ScipyMethod(Enum):
11
17
  NELDER_MEAD = "Nelder-Mead"
12
18
  COBYLA = "COBYLA"
13
- MONTE_CARLO = "Monte Carlo"
14
19
  L_BFGS_B = "L-BFGS-B"
15
20
 
16
- def describe(self):
17
- return self.name, self.value
18
21
 
22
+ class Optimizer(ABC):
19
23
  @property
24
+ @abstractmethod
20
25
  def n_param_sets(self):
21
- if self in (Optimizer.NELDER_MEAD, Optimizer.L_BFGS_B, Optimizer.COBYLA):
22
- return 1
23
- elif self == Optimizer.MONTE_CARLO:
24
- return 10
26
+ """
27
+ Returns the number of parameter sets the optimizer can handle per optimization run.
28
+ Returns:
29
+ int: Number of parameter sets.
30
+ """
31
+ raise NotImplementedError("This method should be implemented by subclasses.")
32
+
33
+ @abstractmethod
34
+ def optimize(
35
+ self,
36
+ cost_fn: Callable[[np.ndarray], float],
37
+ initial_params: np.ndarray,
38
+ callback_fn: Callable | None = None,
39
+ **kwargs,
40
+ ) -> OptimizeResult:
41
+ """
42
+ Optimize the given cost function starting from initial parameters.
43
+
44
+ Parameters:
45
+ cost_fn: The cost function to minimize.
46
+ initial_params: Initial parameters for the optimization.
47
+ **kwargs: Additional keyword arguments for the optimizer.
48
+
49
+ Returns:
50
+ Optimized parameters.
51
+ """
52
+ raise NotImplementedError("This method should be implemented by subclasses.")
53
+
54
+
55
+ class ScipyOptimizer(Optimizer):
56
+ def __init__(self, method: ScipyMethod):
57
+ self.method = method
25
58
 
26
59
  @property
27
- def n_samples(self):
28
- if self == Optimizer.MONTE_CARLO:
29
- return 10
60
+ def n_param_sets(self):
30
61
  return 1
31
62
 
32
- def compute_new_parameters(self, params, iteration, **kwargs):
33
- if self != Optimizer.MONTE_CARLO:
34
- raise NotImplementedError
35
-
36
- rng = kwargs.pop("rng", np.random.default_rng())
37
-
38
- losses = kwargs.pop("losses")
39
- smallest_energy_keys = sorted(losses, key=lambda k: losses[k])[: self.n_samples]
63
+ def optimize(
64
+ self,
65
+ cost_fn: Callable[[np.ndarray], float],
66
+ initial_params: np.ndarray,
67
+ callback_fn: Callable | None = None,
68
+ **kwargs,
69
+ ):
70
+ max_iterations = kwargs.pop("maxiter", None)
71
+
72
+ if max_iterations is None or self.method == ScipyMethod.COBYLA:
73
+ # COBYLA perceive maxiter as maxfev so we need
74
+ # to use the callback fn for counting instead.
75
+ maxiter = None
76
+ else:
77
+ # Need to add one more iteration for Nelder-Mead's simplex initialization step
78
+ maxiter = (
79
+ max_iterations + 1
80
+ if self.method == ScipyMethod.NELDER_MEAD
81
+ else max_iterations
82
+ )
83
+
84
+ return minimize(
85
+ cost_fn,
86
+ initial_params.squeeze(),
87
+ method=(
88
+ cobyla_fn if self.method == ScipyMethod.COBYLA else self.method.value
89
+ ),
90
+ jac=(
91
+ kwargs.pop("jac", None) if self.method == ScipyMethod.L_BFGS_B else None
92
+ ),
93
+ callback=callback_fn,
94
+ options={"maxiter": maxiter},
95
+ )
96
+
97
+
98
+ class MonteCarloOptimizer(Optimizer):
99
+ def __init__(self, n_param_sets: int = 10, n_best_sets: int = 3):
100
+ super().__init__()
101
+
102
+ if n_best_sets > n_param_sets:
103
+ raise ValueError("n_best_sets must be less than or equal to n_param_sets.")
104
+
105
+ self._n_param_sets = n_param_sets
106
+ self._n_best_sets = n_best_sets
107
+
108
+ # Calculate how many times each of the best sets should be repeated
109
+ samples_per_best = self.n_param_sets // self.n_best_sets
110
+ remainder = self.n_param_sets % self.n_best_sets
111
+ self._repeat_counts = np.full(self.n_best_sets, samples_per_best)
112
+ self._repeat_counts[:remainder] += 1
40
113
 
41
- new_params = []
114
+ @property
115
+ def n_param_sets(self):
116
+ return self._n_param_sets
42
117
 
43
- for key in smallest_energy_keys:
44
- new_param_set = [
45
- rng.normal(
46
- params[int(key)],
47
- 1 / (2 * iteration),
48
- size=params[int(key)].shape,
49
- )
50
- for _ in range(self.n_param_sets)
51
- ]
118
+ @property
119
+ def n_best_sets(self):
120
+ return self._n_best_sets
121
+
122
+ def _compute_new_parameters(
123
+ self,
124
+ params: np.ndarray,
125
+ curr_iteration: int,
126
+ best_indices: np.ndarray,
127
+ rng: np.random.Generator,
128
+ ) -> np.ndarray:
129
+ """
130
+ Generates a new population of parameters based on the best-performing ones.
131
+ """
132
+
133
+ # 1. Select the best parameter sets from the current population
134
+ best_params = params[best_indices]
135
+
136
+ # 2. Prepare the means for sampling by repeating each best parameter set
137
+ # according to its assigned count
138
+ new_means = np.repeat(best_params, self._repeat_counts, axis=0)
139
+
140
+ # 3. Define the standard deviation (scale), which shrinks over iterations
141
+ scale = 1.0 / (2.0 * (curr_iteration + 1.0))
142
+
143
+ # 4. Generate all new parameters in a single vectorized call
144
+ new_params = rng.normal(loc=new_means, scale=scale)
145
+
146
+ # Apply periodic boundary conditions
147
+ return new_params % (2 * np.pi)
148
+
149
+ def optimize(
150
+ self,
151
+ cost_fn: Callable[[np.ndarray], float],
152
+ initial_params: np.ndarray,
153
+ callback_fn: Callable[[OptimizeResult], float | np.ndarray] | None = None,
154
+ **kwargs,
155
+ ) -> OptimizeResult:
156
+ """
157
+ Perform Monte Carlo optimization on the cost function.
158
+
159
+ Parameters:
160
+ cost_fn: The cost function to minimize.
161
+ initial_params: Initial parameters for the optimization.
162
+ callback_fn: Optional callback function to monitor progress.
163
+ **kwargs: Additional keyword arguments for the optimizer.
164
+ Returns:
165
+ Optimized parameters.
166
+ """
167
+ rng = kwargs.pop("rng", np.random.default_rng())
168
+ max_iterations = kwargs.pop("maxiter", 5)
52
169
 
53
- for new_param in new_param_set:
54
- new_param = np.clip(new_param, 0, 2 * np.pi)
170
+ population = np.copy(initial_params)
55
171
 
56
- new_params.extend(new_param_set)
172
+ final_params = None
173
+ final_losses = None
57
174
 
58
- return np.array(new_params)
175
+ for curr_iter in range(max_iterations):
176
+ # Evaluate the entire population once
177
+ losses = cost_fn(population)
59
178
 
60
- def compute_parameter_shift_mask(self, n_params):
61
- if self != Optimizer.L_BFGS_B:
62
- raise NotImplementedError
179
+ # Find the indices of the best-performing parameter sets (only once)
180
+ best_indices = np.argpartition(losses, self.n_best_sets - 1)[
181
+ : self.n_best_sets
182
+ ]
63
183
 
64
- mask_arr = np.arange(0, 2 * n_params, 2)
65
- mask_arr[0] = 1
184
+ # Store the current best results
185
+ final_params = population[best_indices]
186
+ final_losses = losses[best_indices]
66
187
 
67
- binary_matrix = (
68
- (mask_arr[:, np.newaxis] & (1 << np.arange(n_params))) > 0
69
- ).astype(np.float64)
188
+ if callback_fn:
189
+ callback_fn(OptimizeResult(x=final_params, fun=final_losses))
70
190
 
71
- binary_matrix = binary_matrix.repeat(2, axis=0)
72
- binary_matrix[1::2] *= -1
73
- binary_matrix *= 0.5 * np.pi
191
+ # Generate the next generation of parameters
192
+ population = self._compute_new_parameters(
193
+ population, curr_iter, best_indices, rng
194
+ )
74
195
 
75
- return binary_matrix
196
+ # Return the best results from the LAST EVALUATED population
197
+ return OptimizeResult(
198
+ x=final_params,
199
+ fun=final_losses,
200
+ nit=max_iterations,
201
+ )
@@ -6,29 +6,59 @@ import logging
6
6
  import pickle
7
7
  from abc import ABC, abstractmethod
8
8
  from functools import partial
9
+ from itertools import groupby
9
10
  from queue import Queue
10
11
 
11
12
  import numpy as np
12
13
  from pennylane.measurements import ExpectationMP
13
- from scipy.optimize import OptimizeResult, minimize
14
+ from scipy.optimize import OptimizeResult
14
15
 
15
16
  from divi import QoroService
16
17
  from divi.circuits import Circuit, MetaCircuit
17
- from divi.exp.scipy._cobyla import _minimize_cobyla as cobyla_fn
18
18
  from divi.interfaces import CircuitRunner
19
19
  from divi.qem import _NoMitigation
20
20
  from divi.qoro_service import JobStatus
21
- from divi.qprog.optimizers import Optimizer
21
+ from divi.qprog.optimizers import ScipyMethod, ScipyOptimizer
22
+ from divi.reporter import LoggingProgressReporter, QueueProgressReporter
22
23
 
23
24
  logger = logging.getLogger(__name__)
24
25
 
25
26
 
27
+ def _compute_parameter_shift_mask(n_params):
28
+ """
29
+ Generate a binary matrix mask for the parameter shift rule.
30
+ This mask is used to determine the shifts to apply to each parameter
31
+ when computing gradients via the parameter shift rule in quantum algorithms.
32
+
33
+ Args:
34
+ n_params (int): The number of parameters in the quantum circuit.
35
+
36
+ Returns:
37
+ np.ndarray: A (2 * n_params, n_params) matrix where each row encodes
38
+ the shift to apply to each parameter for a single evaluation.
39
+ The values are multiples of 0.5 * pi, with alternating signs.
40
+ """
41
+ mask_arr = np.arange(0, 2 * n_params, 2)
42
+ mask_arr[0] = 1
43
+
44
+ binary_matrix = ((mask_arr[:, np.newaxis] & (1 << np.arange(n_params))) > 0).astype(
45
+ np.float64
46
+ )
47
+
48
+ binary_matrix = binary_matrix.repeat(2, axis=0)
49
+ binary_matrix[1::2] *= -1
50
+ binary_matrix *= 0.5 * np.pi
51
+
52
+ return binary_matrix
53
+
54
+
26
55
  class QuantumProgram(ABC):
27
56
  def __init__(
28
57
  self,
29
58
  backend: CircuitRunner,
30
59
  seed: int | None = None,
31
60
  progress_queue: Queue | None = None,
61
+ has_final_computation: bool = False,
32
62
  **kwargs,
33
63
  ):
34
64
  """
@@ -49,6 +79,8 @@ class QuantumProgram(ABC):
49
79
  be used for the parameter initialization.
50
80
  Defaults to None.
51
81
  progress_queue (Queue): a queue for progress bar updates.
82
+ has_final_computation (bool): Whether the program includes a final
83
+ computation step after optimization. This affects progress reporting.
52
84
 
53
85
  **kwargs: Additional keyword arguments that influence behaviour.
54
86
  - grouping_strategy (Literal["default", "wires", "qwc"]): A strategy for grouping operations, used in Pennylane's transforms.
@@ -80,9 +112,15 @@ class QuantumProgram(ABC):
80
112
  self._grad_mode = False
81
113
 
82
114
  self.backend = backend
83
- self.job_id = kwargs.get("job_id", None)
84
115
 
116
+ self.job_id = kwargs.get("job_id", None)
85
117
  self._progress_queue = progress_queue
118
+ if progress_queue and self.job_id:
119
+ self.reporter = QueueProgressReporter(
120
+ self.job_id, progress_queue, has_final_computation=has_final_computation
121
+ )
122
+ else:
123
+ self.reporter = LoggingProgressReporter()
86
124
 
87
125
  # Needed for Pennylane's transforms
88
126
  self._grouping_strategy = kwargs.pop("grouping_strategy", None)
@@ -134,27 +172,6 @@ class QuantumProgram(ABC):
134
172
 
135
173
  return losses
136
174
 
137
- def _update_mc_params(self):
138
- """
139
- Updates the parameters based on previous MC iteration.
140
- """
141
-
142
- if self.current_iteration == 0:
143
- self._initialize_params()
144
-
145
- self.current_iteration += 1
146
-
147
- return
148
-
149
- self._curr_params = self.optimizer.compute_new_parameters(
150
- self._curr_params,
151
- self.current_iteration,
152
- losses=self.losses[-1],
153
- rng=self._rng,
154
- )
155
-
156
- self.current_iteration += 1
157
-
158
175
  def _prepare_and_send_circuits(self):
159
176
  job_circuits = {}
160
177
 
@@ -187,26 +204,25 @@ class QuantumProgram(ABC):
187
204
  if isinstance(response, dict):
188
205
  self._total_run_time += float(response["run_time"])
189
206
  elif isinstance(response, list):
190
- self._total_run_time += sum(float(r["run_time"]) for r in response)
207
+ self._total_run_time += sum(
208
+ float(r.json()["run_time"]) for r in response
209
+ )
191
210
 
192
211
  if isinstance(self.backend, QoroService):
212
+ update_function = lambda n_polls, status: self.reporter.info(
213
+ message="",
214
+ poll_attempt=n_polls,
215
+ max_retries=self.backend.max_retries,
216
+ service_job_id=self._curr_service_job_id,
217
+ job_status=status,
218
+ )
219
+
193
220
  status = self.backend.poll_job_status(
194
221
  self._curr_service_job_id,
195
222
  loop_until_complete=True,
196
223
  on_complete=add_run_time,
197
- **(
198
- {
199
- "pbar_update_fn": lambda n_polls: self._progress_queue.put(
200
- {
201
- "job_id": self.job_id,
202
- "progress": 0,
203
- "poll_attempt": n_polls,
204
- }
205
- )
206
- }
207
- if self._progress_queue is not None
208
- else {}
209
- ),
224
+ verbose=False, # Disable the default logger in QoroService
225
+ poll_callback=update_function, # Use the new, more generic name
210
226
  )
211
227
 
212
228
  if status != JobStatus.COMPLETED:
@@ -247,19 +263,25 @@ class QuantumProgram(ABC):
247
263
  losses = {}
248
264
  measurement_groups = self._meta_circuits["cost_circuit"].measurement_groups
249
265
 
250
- for p in range(self._curr_params.shape[0]):
251
- # Extract relevant entries from the execution results dict
252
- param_results = {k: v for k, v in results.items() if k.startswith(f"{p}_")}
266
+ # Define key functions for both levels of grouping
267
+ get_param_id = lambda item: int(item[0].split("_")[0])
268
+ get_qem_id = lambda item: int(item[0].split("_")[1].split(":")[1])
253
269
 
254
- # Compute the marginal results for each observable
255
- marginal_results = []
256
- for group_idx, curr_measurement_group in enumerate(measurement_groups):
257
- group_results = {
258
- k: v
259
- for k, v in param_results.items()
260
- if k.endswith(f"_{group_idx}")
261
- }
270
+ # Group the pre-sorted results by parameter ID.
271
+ for p, param_group_iterator in groupby(results.items(), key=get_param_id):
272
+ param_group_iterator = list(param_group_iterator)
262
273
 
274
+ shots_by_qem_idx = zip(
275
+ *{
276
+ gid: [value for _, value in group]
277
+ for gid, group in groupby(param_group_iterator, key=get_qem_id)
278
+ }.values()
279
+ )
280
+
281
+ marginal_results = []
282
+ for shots_dicts, curr_measurement_group in zip(
283
+ shots_by_qem_idx, measurement_groups
284
+ ):
263
285
  curr_marginal_results = []
264
286
  for observable in curr_measurement_group:
265
287
  intermediate_exp_values = [
@@ -267,7 +289,7 @@ class QuantumProgram(ABC):
267
289
  shots_dict,
268
290
  tuple(reversed(range(len(next(iter(shots_dict.keys())))))),
269
291
  )
270
- for shots_dict in group_results.values()
292
+ for shots_dict in shots_dicts
271
293
  ]
272
294
 
273
295
  mitigated_exp_value = self._qem_protocol.postprocess_results(
@@ -301,170 +323,83 @@ class QuantumProgram(ABC):
301
323
  data_file (str): The file to store the data in
302
324
  """
303
325
 
304
- if self._progress_queue is not None:
305
- self._progress_queue.put(
306
- {
307
- "job_id": self.job_id,
308
- "message": "Finished Setup",
309
- "progress": 0,
310
- }
326
+ def cost_fn(params):
327
+ self.reporter.info(
328
+ message="💸 Computing Cost 💸", iteration=self.current_iteration
311
329
  )
312
- else:
313
- logger.info("Finished Setup")
314
-
315
- if self.optimizer == Optimizer.MONTE_CARLO:
316
- while self.current_iteration < self.max_iterations:
317
-
318
- self._update_mc_params()
319
-
320
- if self._progress_queue is not None:
321
- self._progress_queue.put(
322
- {
323
- "job_id": self.job_id,
324
- "message": f"⛰️ Sampling from Loss Lansdscape ⛰️",
325
- "progress": 0,
326
- }
327
- )
328
- else:
329
- logger.info(
330
- f"Running Iteration #{self.current_iteration} circuits\r"
331
- )
332
330
 
333
- curr_losses = self._run_optimization_circuits(store_data, data_file)
331
+ self._curr_params = np.atleast_2d(params)
334
332
 
335
- if self._progress_queue is not None:
336
- self._progress_queue.put(
337
- {
338
- "job_id": self.job_id,
339
- "progress": 1,
340
- }
341
- )
342
- else:
343
- logger.info(f"Finished Iteration #{self.current_iteration}\r\n")
333
+ losses = self._run_optimization_circuits(store_data, data_file)
344
334
 
345
- self.losses.append(curr_losses)
335
+ losses = np.fromiter(losses.values(), dtype=np.float64)
346
336
 
347
- self.final_params[:] = np.atleast_2d(self._curr_params)
337
+ if params.ndim > 1:
338
+ return losses
339
+ else:
340
+ return losses.item()
348
341
 
349
- elif self.optimizer in (
350
- Optimizer.NELDER_MEAD,
351
- Optimizer.L_BFGS_B,
352
- Optimizer.COBYLA,
353
- ):
342
+ self._grad_shift_mask = _compute_parameter_shift_mask(
343
+ self.n_layers * self.n_params
344
+ )
354
345
 
355
- def cost_fn(params):
356
- task_name = "💸 Computing Cost 💸"
346
+ def grad_fn(params):
347
+ self._grad_mode = True
357
348
 
358
- if self._progress_queue is not None:
359
- self._progress_queue.put(
360
- {
361
- "job_id": self.job_id,
362
- "message": task_name,
363
- "progress": 0,
364
- }
365
- )
366
- else:
367
- logger.info(
368
- f"Running Iteration #{self.current_iteration + 1} circuits: {task_name}\r"
369
- )
349
+ self.reporter.info(
350
+ message="📈 Computing Gradients 📈", iteration=self.current_iteration
351
+ )
370
352
 
371
- self._curr_params = np.atleast_2d(params)
353
+ self._curr_params = self._grad_shift_mask + params
372
354
 
373
- losses = self._run_optimization_circuits(store_data, data_file)
355
+ exp_vals = self._run_optimization_circuits(store_data, data_file)
356
+ exp_vals_arr = np.fromiter(exp_vals.values(), dtype=np.float64)
374
357
 
375
- return losses[0]
358
+ pos_shifts = exp_vals_arr[::2]
359
+ neg_shifts = exp_vals_arr[1::2]
360
+ grads = 0.5 * (pos_shifts - neg_shifts)
376
361
 
377
- def grad_fn(params):
378
- self._grad_mode = True
362
+ self._grad_mode = False
379
363
 
380
- task_name = "📈 Computing Gradients 📈"
364
+ return grads
381
365
 
382
- if self._progress_queue is not None:
383
- self._progress_queue.put(
384
- {
385
- "job_id": self.job_id,
386
- "message": task_name,
387
- "progress": 0,
388
- }
366
+ def _iteration_counter(intermediate_result: OptimizeResult):
367
+ self.losses.append(
368
+ dict(
369
+ zip(
370
+ range(len(intermediate_result.x)),
371
+ np.atleast_1d(intermediate_result.fun),
389
372
  )
390
- else:
391
- logger.info(
392
- f"Running Iteration #{self.current_iteration + 1} circuits: {task_name}\r"
393
- )
394
-
395
- shift_mask = self.optimizer.compute_parameter_shift_mask(len(params))
396
-
397
- self._curr_params = shift_mask + params
398
-
399
- exp_vals = self._run_optimization_circuits(store_data, data_file)
400
-
401
- grads = np.zeros_like(params)
402
- for i in range(len(params)):
403
- grads[i] = 0.5 * (exp_vals[2 * i] - exp_vals[2 * i + 1])
404
-
405
- self._grad_mode = False
373
+ )
374
+ )
406
375
 
407
- return grads
376
+ self.final_params[:] = np.atleast_2d(intermediate_result.x)
408
377
 
409
- def _iteration_counter(intermediate_result: OptimizeResult):
410
- self.losses.append({0: intermediate_result.fun})
378
+ self.current_iteration += 1
411
379
 
412
- self.final_params[:] = np.atleast_2d(intermediate_result.x)
380
+ self.reporter.update(iteration=self.current_iteration)
413
381
 
414
- self.current_iteration += 1
382
+ if (
383
+ isinstance(self.optimizer, ScipyOptimizer)
384
+ and self.optimizer.method == ScipyMethod.COBYLA
385
+ and intermediate_result.nit + 1 == self.max_iterations
386
+ ):
387
+ raise StopIteration
415
388
 
416
- if self._progress_queue is not None:
417
- self._progress_queue.put(
418
- {
419
- "job_id": self.job_id,
420
- "progress": 1,
421
- }
422
- )
423
- else:
424
- logger.info(f"Finished Iteration #{self.current_iteration}\r\n")
425
-
426
- if (
427
- self.optimizer == Optimizer.COBYLA
428
- and intermediate_result.nit + 1 == self.max_iterations
429
- ):
430
- raise StopIteration
431
-
432
- if self.max_iterations is None or self.optimizer == Optimizer.COBYLA:
433
- # COBYLA perceive maxiter as maxfev so we need
434
- # to use the callback fn for counting instead.
435
- maxiter = None
436
- else:
437
- # Need to add one more iteration for Nelder-Mead's simplex initialization step
438
- maxiter = (
439
- self.max_iterations + 1
440
- if self.optimizer == Optimizer.NELDER_MEAD
441
- else self.max_iterations
442
- )
389
+ self.reporter.info(message="Finished Setup")
443
390
 
444
- self._initialize_params()
445
- self._minimize_res = minimize(
446
- fun=cost_fn,
447
- x0=self._curr_params[0],
448
- method=(
449
- cobyla_fn
450
- if self.optimizer == Optimizer.COBYLA
451
- else self.optimizer.value
452
- ),
453
- jac=grad_fn if self.optimizer == Optimizer.L_BFGS_B else None,
454
- callback=_iteration_counter,
455
- options={"maxiter": maxiter},
456
- )
391
+ self._initialize_params()
392
+ self._minimize_res = self.optimizer.optimize(
393
+ cost_fn=cost_fn,
394
+ initial_params=self._curr_params,
395
+ callback_fn=_iteration_counter,
396
+ jac=grad_fn,
397
+ maxiter=self.max_iterations,
398
+ rng=self._rng,
399
+ )
400
+ self.final_params[:] = np.atleast_2d(self._minimize_res.x)
457
401
 
458
- if self._progress_queue:
459
- self._progress_queue.put(
460
- {
461
- "job_id": self.job_id,
462
- "progress": 0,
463
- "final_status": "Success",
464
- }
465
- )
466
- else:
467
- logger.info(f"Finished Optimization!")
402
+ self.reporter.info(message="Finished Optimization!")
468
403
 
469
404
  return self._total_circuit_count, self._total_run_time
470
405