qadence 1.10.3__py3-none-any.whl → 1.11.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -4,14 +4,11 @@ import copy
4
4
  from itertools import islice
5
5
  from logging import getLogger
6
6
  from typing import Any, Callable, Iterable, cast
7
-
8
- import torch
9
7
  from nevergrad.optimization.base import Optimizer as NGOptimizer
10
- from rich.progress import BarColumn, Progress, TaskProgressColumn, TextColumn, TimeRemainingColumn
11
- from torch import complex128, float32, float64, nn, optim
12
- from torch import device as torch_device
13
- from torch import dtype as torch_dtype
8
+ import torch
9
+ from torch import nn, optim
14
10
  from torch.utils.data import DataLoader
11
+ from rich.progress import BarColumn, Progress, TaskProgressColumn, TextColumn, TimeRemainingColumn
15
12
 
16
13
  from qadence.ml_tools.config import TrainConfig
17
14
  from qadence.ml_tools.data import DictDataLoader, OptimizeResult, data_to_device
@@ -20,6 +17,7 @@ from qadence.ml_tools.optimize_step import optimize_step, update_ng_parameters
20
17
  from qadence.ml_tools.stages import TrainingStage
21
18
 
22
19
  from .train_utils.base_trainer import BaseTrainer
20
+ from .train_utils.accelerator import Accelerator
23
21
 
24
22
  logger = getLogger("ml_tools")
25
23
 
@@ -38,11 +36,6 @@ class Trainer(BaseTrainer):
38
36
  Attributes:
39
37
  current_epoch (int): The current epoch number.
40
38
  global_step (int): The global step across all epochs.
41
- log_device (str): Device for logging, default is "cpu".
42
- device (torch_device): Device used for computation.
43
- dtype (torch_dtype | None): Data type used for computation.
44
- data_dtype (torch_dtype | None): Data type for data.
45
- Depends on the model's data type.
46
39
 
47
40
  Inherited Attributes:
48
41
  use_grad (bool): Indicates if gradients are used for optimization. Default is True.
@@ -240,8 +233,6 @@ class Trainer(BaseTrainer):
240
233
  val_dataloader: DataLoader | DictDataLoader | None = None,
241
234
  test_dataloader: DataLoader | DictDataLoader | None = None,
242
235
  optimize_step: Callable = optimize_step,
243
- device: torch_device | None = None,
244
- dtype: torch_dtype | None = None,
245
236
  max_batches: int | None = None,
246
237
  ):
247
238
  """
@@ -257,8 +248,6 @@ class Trainer(BaseTrainer):
257
248
  val_dataloader (DataLoader | DictDataLoader | None): DataLoader for validation data.
258
249
  test_dataloader (DataLoader | DictDataLoader | None): DataLoader for test data.
259
250
  optimize_step (Callable): Function to execute an optimization step.
260
- device (torch_device): Device to use for computation.
261
- dtype (torch_dtype): Data type for computation.
262
251
  max_batches (int | None): Maximum number of batches to process per epoch.
263
252
  This is only valid in case of finite TensorDataset dataloaders.
264
253
  if max_batches is not None, the maximum number of batches used will
@@ -278,13 +267,21 @@ class Trainer(BaseTrainer):
278
267
  )
279
268
  self.current_epoch: int = 0
280
269
  self.global_step: int = 0
281
- self.log_device: str = "cpu" if device is None else device
282
- self.device: torch_device | None = device
283
- self.dtype: torch_dtype | None = dtype
284
- self.data_dtype: torch_dtype | None = None
285
- self.stop_training: bool = False
286
- if self.dtype:
287
- self.data_dtype = float64 if (self.dtype == complex128) else float32
270
+ self._stop_training: torch.Tensor = torch.tensor(0, dtype=torch.int)
271
+ self.progress: Progress | None = None
272
+
273
+ # Integration with Accelerator:
274
+ self.accelerator = Accelerator(
275
+ backend=config.backend,
276
+ nprocs=config.nprocs,
277
+ compute_setup=config.compute_setup,
278
+ dtype=config.dtype,
279
+ log_setup=config.log_setup,
280
+ )
281
+ # Decorate the unbound Trainer.fit method with accelerator.distribute.
282
+ # We use __get__ to bind the decorated method to the current instance,
283
+ # ensuring that 'self' is passed only once when self.fit is called.
284
+ self.fit = self.accelerator.distribute(Trainer.fit).__get__(self, Trainer) # type: ignore[method-assign]
288
285
 
289
286
  def fit(
290
287
  self,
@@ -323,26 +320,30 @@ class Trainer(BaseTrainer):
323
320
  The callback_manager.start_training takes care of loading checkpoint,
324
321
  and setting up the writer.
325
322
  """
326
- self.stop_training = False
327
- self.config_manager.initialize_config()
323
+ self._stop_training = torch.tensor(
324
+ 0, dtype=torch.int, device=self.accelerator.execution.device
325
+ )
326
+ # initalize config in the first process, and broadcast it to all processes
327
+ if self.accelerator.rank == 0:
328
+ self.config_manager.initialize_config()
329
+ self.config_manager = self.accelerator.broadcast(self.config_manager, src=0)
328
330
  self.callback_manager.start_training(trainer=self)
329
331
 
330
- # Move model to device
331
- if isinstance(self.model, nn.DataParallel):
332
- self.model = self.model.module.to(device=self.device, dtype=self.dtype)
333
- else:
334
- self.model = self.model.to(device=self.device, dtype=self.dtype)
335
-
336
- # Progress bar for training visualization
337
- self.progress: Progress = Progress(
338
- TextColumn("[progress.description]{task.description}"),
339
- BarColumn(),
340
- TaskProgressColumn(),
341
- TimeRemainingColumn(elapsed_when_finished=True),
332
+ # Integration with Accelerator: prepare the model, optimizer, and dataloaders.
333
+ (self.model, self.optimizer, self.train_dataloader, self.val_dataloader) = (
334
+ self.accelerator.prepare(
335
+ self.model, self.optimizer, self.train_dataloader, self.val_dataloader
336
+ )
342
337
  )
343
338
 
344
- # Quick Fix for iteration 0
345
- self._reset_model_and_opt()
339
+ # Progress bar for training visualization
340
+ if self.accelerator.world_size == 1:
341
+ self.progress = Progress(
342
+ TextColumn("[progress.description]{task.description}"),
343
+ BarColumn(),
344
+ TaskProgressColumn(),
345
+ TimeRemainingColumn(elapsed_when_finished=True),
346
+ )
346
347
 
347
348
  # Run validation at the start if specified in the configuration
348
349
  self.perform_val = self.config.val_every > 0
@@ -356,7 +357,11 @@ class Trainer(BaseTrainer):
356
357
  @BaseTrainer.callback("train")
357
358
  def _train(self) -> list[list[tuple[torch.Tensor, dict[str, Any]]]]:
358
359
  """
359
- Runs the main training loop, iterating over epochs.
360
+ Runs the main training loop over multiple epochs.
361
+
362
+ This method sets up the training process by performing any necessary pre-training
363
+ actions (via `on_train_start`), configuring progress tracking (if available), and then
364
+ iteratively calling `_train_epoch` to run through the epochs.
360
365
 
361
366
  Returns:
362
367
  list[list[tuple[torch.Tensor, dict[str, Any]]]]: Training loss
@@ -365,45 +370,97 @@ class Trainer(BaseTrainer):
365
370
  Epochs -> Training Batches -> (loss, metrics)
366
371
  """
367
372
  self.on_train_start()
368
- train_losses = []
369
- val_losses = []
373
+ epoch_start, epoch_end = (
374
+ self.global_step,
375
+ self.global_step + self.config_manager.config.max_iter + 1,
376
+ )
370
377
 
371
- with self.progress:
372
- train_task = self.progress.add_task(
373
- "Training", total=self.config_manager.config.max_iter
374
- )
375
- if self.perform_val:
376
- val_task = self.progress.add_task(
377
- "Validation",
378
- total=(self.config_manager.config.max_iter + 1) / self.config.val_every,
378
+ if self.accelerator.world_size == 1 and self.progress:
379
+ # Progress setup is only available for non-spawned training.
380
+ with self.progress:
381
+ train_task = self.progress.add_task(
382
+ "Training", total=self.config_manager.config.max_iter
383
+ )
384
+ if self.perform_val:
385
+ val_task = self.progress.add_task(
386
+ "Validation",
387
+ total=(self.config_manager.config.max_iter + 1) / self.config.val_every,
388
+ )
389
+ else:
390
+ val_task = None
391
+ train_losses, val_losses = self._train_epochs(
392
+ epoch_start, epoch_end, train_task, val_task
379
393
  )
380
- for epoch in range(
381
- self.global_step, self.global_step + self.config_manager.config.max_iter + 1
382
- ):
383
- if not self.stop_training:
384
- try:
385
- self.current_epoch = epoch
386
- self.on_train_epoch_start()
387
- train_epoch_loss_metrics = self.run_training(self.train_dataloader)
388
- train_losses.append(train_epoch_loss_metrics)
389
- self.on_train_epoch_end(train_epoch_loss_metrics)
390
-
391
- # Run validation periodically if specified
392
- if self.perform_val and self.current_epoch % self.config.val_every == 0:
393
- self.on_val_epoch_start()
394
- val_epoch_loss_metrics = self.run_validation(self.val_dataloader)
395
- val_losses.append(val_epoch_loss_metrics)
396
- self.on_val_epoch_end(val_epoch_loss_metrics)
397
- self.progress.update(val_task, advance=1)
398
-
399
- self.progress.update(train_task, advance=1)
400
- except KeyboardInterrupt:
401
- logger.info("Terminating training gracefully after the current iteration.")
402
- break
394
+ else:
395
+ train_losses, val_losses = self._train_epochs(epoch_start, epoch_end)
403
396
 
404
397
  self.on_train_end(train_losses, val_losses)
405
398
  return train_losses
406
399
 
400
+ def _train_epochs(
401
+ self,
402
+ epoch_start: int,
403
+ epoch_end: int,
404
+ train_task: int | None = None,
405
+ val_task: int | None = None,
406
+ ) -> tuple[
407
+ list[list[tuple[torch.Tensor, dict[str, Any]]]],
408
+ list[list[tuple[torch.Tensor, dict[str, Any]]]],
409
+ ]:
410
+ """
411
+ Executes the training loop for a series of epochs.
412
+
413
+ Args:
414
+ epoch_start (int): The starting epoch index.
415
+ epoch_end (int): The ending epoch index (non-inclusive).
416
+ train_task (int | None, optional): The progress bar task ID for training updates.
417
+ If provided, the progress bar will be updated after each epoch. Defaults to None.
418
+ val_task (int | None, optional): The progress bar task ID for validation updates.
419
+ If provided and validation is enabled, the progress bar will be updated after each validation run.
420
+ Defaults to None.
421
+
422
+ Returns:
423
+ list[list[tuple[torch.Tensor, dict[str, Any]]]]: A tuple of
424
+ Training loss metrics for all epochs.
425
+ list -> list -> tuples
426
+ Epochs -> Training Batches -> (loss, metrics)
427
+ And Validation loss metrics for all epochs
428
+ list -> list -> tuples
429
+ Epochs -> Training Batches -> (loss, metrics)
430
+ """
431
+ train_losses = []
432
+ val_losses = []
433
+
434
+ # Iterate over the epochs
435
+ for epoch in range(epoch_start, epoch_end):
436
+ if not self.stop_training():
437
+ try:
438
+ self.current_epoch = epoch
439
+ self.on_train_epoch_start()
440
+ train_epoch_loss_metrics = self.run_training(self.train_dataloader)
441
+ train_losses.append(train_epoch_loss_metrics)
442
+ self.on_train_epoch_end(train_epoch_loss_metrics)
443
+
444
+ # Run validation periodically if specified
445
+ if self.perform_val and (epoch % self.config.val_every == 0):
446
+ self.on_val_epoch_start()
447
+ val_epoch_loss_metrics = self.run_validation(self.val_dataloader)
448
+ val_losses.append(val_epoch_loss_metrics)
449
+ self.on_val_epoch_end(val_epoch_loss_metrics)
450
+ if val_task is not None:
451
+ self.progress.update(val_task, advance=1) # type: ignore[union-attr]
452
+
453
+ if train_task is not None:
454
+ self.progress.update(train_task, advance=1) # type: ignore[union-attr]
455
+ except KeyboardInterrupt:
456
+ self._stop_training.fill_(1)
457
+ else:
458
+ if self.accelerator.rank == 0:
459
+ logger.info("Terminating training gracefully after the current iteration.")
460
+ self.accelerator.finalize()
461
+ break
462
+ return train_losses, val_losses
463
+
407
464
  @BaseTrainer.callback("train_epoch")
408
465
  def run_training(self, dataloader: DataLoader) -> list[tuple[torch.Tensor, dict[str, Any]]]:
409
466
  """
@@ -419,12 +476,12 @@ class Trainer(BaseTrainer):
419
476
  """
420
477
  self.model.train()
421
478
  train_epoch_loss_metrics = []
422
- # Quick Fix for iteration 0
423
- self._reset_model_and_opt()
424
479
 
425
480
  for batch in self._batch_iter(dataloader, self.num_training_batches):
426
481
  self.on_train_batch_start(batch)
427
482
  train_batch_loss_metrics = self.run_train_batch(batch)
483
+ if self.config.all_reduce_metrics:
484
+ train_batch_loss_metrics = self._aggregate_result(train_batch_loss_metrics)
428
485
  train_epoch_loss_metrics.append(train_batch_loss_metrics)
429
486
  self.on_train_batch_end(train_batch_loss_metrics)
430
487
 
@@ -458,8 +515,8 @@ class Trainer(BaseTrainer):
458
515
  optimizer=self.optimizer,
459
516
  loss_fn=self.loss_fn,
460
517
  xs=batch,
461
- device=self.device,
462
- dtype=self.data_dtype,
518
+ device=self.accelerator.execution.device,
519
+ dtype=self.accelerator.execution.data_dtype,
463
520
  )
464
521
  else:
465
522
  # Perform optimization using Nevergrad
@@ -494,6 +551,8 @@ class Trainer(BaseTrainer):
494
551
  for batch in self._batch_iter(dataloader, self.num_validation_batches):
495
552
  self.on_val_batch_start(batch)
496
553
  val_batch_loss_metrics = self.run_val_batch(batch)
554
+ if self.config.all_reduce_metrics:
555
+ val_batch_loss_metrics = self._aggregate_result(val_batch_loss_metrics)
497
556
  val_epoch_loss_metrics.append(val_batch_loss_metrics)
498
557
  self.on_val_batch_end(val_batch_loss_metrics)
499
558
 
@@ -568,6 +627,9 @@ class Trainer(BaseTrainer):
568
627
  """
569
628
  Yields batches from the provided dataloader.
570
629
 
630
+ The batch of data is also moved
631
+ to the correct device and dtype using accelerator.prepare.
632
+
571
633
  Args:
572
634
  dataloader ([DataLoader]): The dataloader to iterate over.
573
635
  num_batches (int): The maximum number of batches to yield.
@@ -581,9 +643,7 @@ class Trainer(BaseTrainer):
581
643
  yield None
582
644
  else:
583
645
  for batch in islice(dataloader, num_batches):
584
- # batch is moved to device inside optimize step
585
- # batch = data_to_device(batch, device=self.device, dtype=self.data_dtype)
586
- yield batch
646
+ yield self.accelerator.prepare_batch(batch)
587
647
 
588
648
  def _modify_batch_end_loss_metrics(
589
649
  self, loss_metrics: tuple[torch.Tensor, dict[str, Any]]
@@ -609,27 +669,43 @@ class Trainer(BaseTrainer):
609
669
  return loss, updated_metrics
610
670
  return loss_metrics
611
671
 
612
- def _reset_model_and_opt(self) -> None:
672
+ def _aggregate_result(
673
+ self, result: tuple[torch.Tensor, dict[str, Any]]
674
+ ) -> tuple[torch.Tensor, dict[str, Any]]:
613
675
  """
614
- Save model_old and optimizer_old for epoch 0.
676
+ Aggregates the loss and metrics using the Accelerator's all_reduce_dict method if aggregation is enabled.
615
677
 
616
- This allows us to create a copy of model
617
- and optimizer before running the optimization.
678
+ Args:
679
+ result: (tuple[torch.Tensor, dict[str, Any]])
680
+ The result consisting of loss and metrics.For more details,
681
+ look at the signature of build_optimize_result.
682
+
683
+ Returns:
684
+ tuple[torch.Tensor, dict[str, Any]]: The aggregated loss and metrics.
685
+ """
686
+ loss, metrics = result
687
+ if self.config.all_reduce_metrics:
688
+ reduced = self.accelerator.all_reduce_dict({"loss": loss, **metrics})
689
+ loss = reduced.pop("loss")
690
+ metrics = reduced
691
+ return loss, metrics
692
+ else:
693
+ return loss, metrics
618
694
 
619
- We do this because optimize step provides loss, metrics
620
- before step of optimization
621
- To align them with model/optimizer correctly, we checkpoint
622
- the older copy of the model.
695
+ def stop_training(self) -> bool:
623
696
  """
697
+ Helper function to indicate if the training should be stopped.
698
+
699
+ We all_reduce the indicator across all processes to ensure all processes are stopped.
624
700
 
625
- # TODO: review optimize_step to provide iteration aligned model and loss.
626
- try:
627
- # Deep copy model and optimizer to maintain checkpoints
628
- self.model_old = copy.deepcopy(self.model)
629
- self.optimizer_old = copy.deepcopy(self.optimizer)
630
- except Exception:
631
- self.model_old = self.model
632
- self.optimizer_old = self.optimizer
701
+ Notes:
702
+ self._stop_training indicator indicates if the training should be stopped.
703
+ 0 is continue. 1 is stop.
704
+ """
705
+ _stop_training = self.accelerator.all_reduce_dict(
706
+ {"indicator": self._stop_training}, op="max"
707
+ )
708
+ return bool(_stop_training["indicator"] > 0)
633
709
 
634
710
  def build_optimize_result(
635
711
  self,
@@ -710,7 +786,13 @@ class Trainer(BaseTrainer):
710
786
 
711
787
  # Store the optimization result
712
788
  self.opt_result = OptimizeResult(
713
- self.current_epoch, self.model_old, self.optimizer_old, loss, metrics
789
+ self.current_epoch,
790
+ self.model,
791
+ self.optimizer,
792
+ loss,
793
+ metrics,
794
+ rank=self.accelerator.rank,
795
+ device=self.accelerator.execution.device,
714
796
  )
715
797
 
716
798
  def get_ic_grad_bounds(
@@ -808,9 +890,7 @@ class Trainer(BaseTrainer):
808
890
 
809
891
  batch = next(iter(self._batch_iter(dataloader, num_batches=1)))
810
892
 
811
- xs = data_to_device(batch, device=self.device, dtype=self.data_dtype)
812
-
813
- ic = InformationContent(self.model, self.loss_fn, xs, epsilons)
893
+ ic = InformationContent(self.model, self.loss_fn, batch, epsilons)
814
894
 
815
895
  max_ic_lower_bound, max_ic_upper_bound = ic.get_grad_norm_bounds_max_IC()
816
896
  sensitivity_ic_upper_bound = ic.get_grad_norm_bounds_sensitivity_IC(eta)
qadence/model.py CHANGED
@@ -27,6 +27,7 @@ from qadence.mitigations import Mitigations
27
27
  from qadence.noise import NoiseHandler
28
28
  from qadence.parameters import Parameter
29
29
  from qadence.types import DiffMode, Endianness
30
+ from qadence.utils import block_to_mathematical_expression
30
31
 
31
32
  logger = getLogger(__name__)
32
33
 
@@ -568,6 +569,28 @@ class QuantumModel(nn.Module):
568
569
  logger.warning(f"Unable to move {self} to {args}, {kwargs} due to {e}.")
569
570
  return self
570
571
 
572
+ def observables_to_expression(self) -> dict[str, str] | str:
573
+ """
574
+ Convert the observable to a dictionary representation of Pauli terms.
575
+
576
+ If no observable is set, returns an empty dictionary. Each observable is
577
+ represented by its tag (if available) as the key and its mathematical expression
578
+ as the value.
579
+
580
+ Returns:
581
+ dict[str, str]: A dictionary where the keys are observable tags (or "Obs." if not provided)
582
+ and the values are the corresponding mathematical expressions.
583
+ """
584
+ if self._observable is None:
585
+ return "No observable set."
586
+ else:
587
+ return {
588
+ obs.original.tag if obs.original.tag else "Obs.": block_to_mathematical_expression(
589
+ obs.original
590
+ )
591
+ for obs in self._observable
592
+ }
593
+
571
594
  @property
572
595
  def device(self) -> torch.device:
573
596
  """Get device.
qadence/register.py CHANGED
@@ -329,8 +329,12 @@ class Register:
329
329
  return Register(g, spacing=None, device_specs=self.device_specs)
330
330
 
331
331
  def _to_dict(self) -> dict:
332
+ try:
333
+ graph_data = nx.node_link_data(self.graph, edges="links")
334
+ except TypeError: # For Python 3.9 support
335
+ graph_data = nx.node_link_data(self.graph)
332
336
  return {
333
- "graph": nx.node_link_data(self.graph),
337
+ "graph": graph_data,
334
338
  "device_specs": self.device_specs._to_dict(),
335
339
  }
336
340
 
qadence/types.py CHANGED
@@ -445,17 +445,6 @@ class InputDiffMode(StrEnum):
445
445
  """Central finite differencing."""
446
446
 
447
447
 
448
- class ObservableTransform:
449
- """Observable transformation type."""
450
-
451
- SCALE = "scale"
452
- """Use the given values as scale and shift."""
453
- RANGE = "range"
454
- """Use the given values as min and max."""
455
- NONE = "none"
456
- """No transformation."""
457
-
458
-
459
448
  class ExperimentTrackingTool(StrEnum):
460
449
  TENSORBOARD = "tensorboard"
461
450
  """Use the tensorboard experiment tracker."""
@@ -463,6 +452,13 @@ class ExperimentTrackingTool(StrEnum):
463
452
  """Use the ml-flow experiment tracker."""
464
453
 
465
454
 
455
+ class ExecutionType(StrEnum):
456
+ TORCHRUN = "torchrun"
457
+ """Torchrun based distribution execution."""
458
+ DEFAULT = "default"
459
+ """Default distribution execution."""
460
+
461
+
466
462
  LoggablePlotFunction = Callable[[Module, int], tuple[str, Figure]]
467
463
 
468
464
 
qadence/utils.py CHANGED
@@ -1,6 +1,7 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  import math
4
+ import re
4
5
  from collections import Counter
5
6
  from functools import partial
6
7
  from logging import getLogger
@@ -13,6 +14,9 @@ from torch import Tensor, stack, vmap
13
14
  from torch import complex as make_complex
14
15
  from torch.linalg import eigvals
15
16
 
17
+ from rich.tree import Tree
18
+
19
+ from qadence.blocks import AbstractBlock
16
20
  from qadence.types import Endianness, ResultType, TNumber
17
21
 
18
22
  if TYPE_CHECKING:
@@ -290,3 +294,44 @@ def one_qubit_projector_matrix(state: str) -> Tensor:
290
294
 
291
295
  P0 = partial(one_qubit_projector, "0")
292
296
  P1 = partial(one_qubit_projector, "1")
297
+
298
+
299
+ def block_to_mathematical_expression(block: Tree | AbstractBlock) -> str:
300
+ """Convert a block to a readable mathematical expression.
301
+
302
+ Useful for printing Observables as a mathematical expression.
303
+
304
+ Args:
305
+ block (AbstractBlock): Tree instance.
306
+
307
+ Returns:
308
+ str: A mathematical expression.
309
+ """
310
+ block_tree: Tree = block.__rich_tree__() if isinstance(block, AbstractBlock) else block
311
+ block_title = block_tree.label if isinstance(block_tree.label, str) else ""
312
+ if "AddBlock" in block_title:
313
+ block_title = " + ".join(
314
+ [block_to_mathematical_expression(block_child) for block_child in block_tree.children]
315
+ )
316
+ if "KronBlock" in block_title:
317
+ block_title = " ⊗ ".join(
318
+ [block_to_mathematical_expression(block_child) for block_child in block_tree.children]
319
+ )
320
+ if "mul" in block_title:
321
+ block_title = re.findall("\d+\.\d+", block_title)[0]
322
+ coeff = float(block_title)
323
+ if coeff == 0:
324
+ block_title = ""
325
+ elif coeff == 1:
326
+ block_title = block_to_mathematical_expression(block_tree.children[0])
327
+ else:
328
+ block_title += " * " + block_to_mathematical_expression(block_tree.children[0])
329
+ first_part = block_title[:3]
330
+ if first_part in [" + ", " ⊗ ", " * "]:
331
+ block_title = block_title[3:]
332
+
333
+ # if too many trees, add parentheses.
334
+ nb_children = len(block_tree.children)
335
+ if nb_children > 1:
336
+ block_title = "(" + block_title + ")"
337
+ return block_title
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: qadence
3
- Version: 1.10.3
3
+ Version: 1.11.1
4
4
  Summary: Pasqal interface for circuit-based quantum computing SDKs
5
5
  Author-email: Aleksander Wennersteen <aleksander.wennersteen@pasqal.com>, Gert-Jan Both <gert-jan.both@pasqal.com>, Niklas Heim <niklas.heim@pasqal.com>, Mario Dagrada <mario.dagrada@pasqal.com>, Vincent Elfving <vincent.elfving@pasqal.com>, Dominik Seitz <dominik.seitz@pasqal.com>, Roland Guichard <roland.guichard@pasqal.com>, "Joao P. Moutinho" <joao.moutinho@pasqal.com>, Vytautas Abramavicius <vytautas.abramavicius@pasqal.com>, Gergana Velikova <gergana.velikova@pasqal.com>, Eduardo Maschio <eduardo.maschio@pasqal.com>, Smit Chaudhary <smit.chaudhary@pasqal.com>, Ignacio Fernández Graña <ignacio.fernandez-grana@pasqal.com>, Charles Moussa <charles.moussa@pasqal.com>, Giorgio Tosti Balducci <giorgio.tosti-balducci@pasqal.com>, Daniele Cucurachi <daniele.cucurachi@pasqal.com>, Pim Venderbosch <pim.venderbosch@pasqal.com>, Manu Lahariya <manu.lahariya@pasqal.com>
6
6
  License: Apache 2.0
@@ -23,7 +23,7 @@ Requires-Dist: nevergrad
23
23
  Requires-Dist: numpy
24
24
  Requires-Dist: openfermion
25
25
  Requires-Dist: pasqal-cloud
26
- Requires-Dist: pyqtorch==1.7.0
26
+ Requires-Dist: pyqtorch==1.7.2
27
27
  Requires-Dist: pyyaml
28
28
  Requires-Dist: rich
29
29
  Requires-Dist: scipy
@@ -55,9 +55,9 @@ Requires-Dist: mlflow; extra == 'mlflow'
55
55
  Provides-Extra: protocols
56
56
  Requires-Dist: qadence-protocols; extra == 'protocols'
57
57
  Provides-Extra: pulser
58
- Requires-Dist: pasqal-cloud==0.12.7; extra == 'pulser'
59
- Requires-Dist: pulser-core==1.2.2; extra == 'pulser'
60
- Requires-Dist: pulser-simulation==1.2.2; extra == 'pulser'
58
+ Requires-Dist: pasqal-cloud==0.20.2; extra == 'pulser'
59
+ Requires-Dist: pulser-core==1.3.0; extra == 'pulser'
60
+ Requires-Dist: pulser-simulation==1.3.0; extra == 'pulser'
61
61
  Provides-Extra: visualization
62
62
  Requires-Dist: graphviz; extra == 'visualization'
63
63
  Description-Content-Type: text/markdown
@@ -202,12 +202,15 @@ Users also report problems running Hatch on Windows, we suggest using WSL2.
202
202
  If you use Qadence for a publication, we kindly ask you to cite our work using the following BibTex entry:
203
203
 
204
204
  ```latex
205
- @article{qadence2024pasqal,
206
- title = {Qadence: a differentiable interface for digital-analog programs.},
207
- author={Dominik Seitz and Niklas Heim and João P. Moutinho and Roland Guichard and Vytautas Abramavicius and Aleksander Wennersteen and Gert-Jan Both and Anton Quelle and Caroline de Groot and Gergana V. Velikova and Vincent E. Elfving and Mario Dagrada},
208
- journal={arXiv:2401.09915},
209
- url = {https://github.com/pasqal-io/qadence},
210
- year = {2024}
205
+ @article{qadence2025,
206
+ author = {Seitz, Dominik and Heim, Niklas and Moutinho, João and Guichard, Roland and Abramavicius, Vytautas and Wennersteen, Aleksander and Both, Gert-Jan and Quelle, Anton and Groot, Caroline and Velikova, Gergana and Elfving, Vincent and Dagrada, Mario},
207
+ year = {2025},
208
+ month = {01},
209
+ pages = {1-14},
210
+ title = {Qadence: a differentiable interface for digital and analog programs},
211
+ volume = {PP},
212
+ journal = {IEEE Software},
213
+ doi = {10.1109/MS.2025.3536607}
211
214
  }
212
215
  ```
213
216