qadence 1.10.3__py3-none-any.whl → 1.11.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- qadence/blocks/block_to_tensor.py +21 -24
- qadence/constructors/__init__.py +7 -1
- qadence/constructors/hamiltonians.py +96 -9
- qadence/mitigations/analog_zne.py +6 -2
- qadence/ml_tools/__init__.py +2 -2
- qadence/ml_tools/callbacks/callback.py +80 -50
- qadence/ml_tools/callbacks/callbackmanager.py +3 -2
- qadence/ml_tools/callbacks/writer_registry.py +3 -2
- qadence/ml_tools/config.py +66 -5
- qadence/ml_tools/constructors.py +9 -62
- qadence/ml_tools/data.py +4 -0
- qadence/ml_tools/models.py +69 -4
- qadence/ml_tools/optimize_step.py +1 -2
- qadence/ml_tools/train_utils/__init__.py +3 -1
- qadence/ml_tools/train_utils/accelerator.py +480 -0
- qadence/ml_tools/train_utils/config_manager.py +7 -7
- qadence/ml_tools/train_utils/distribution.py +209 -0
- qadence/ml_tools/train_utils/execution.py +421 -0
- qadence/ml_tools/trainer.py +188 -100
- qadence/types.py +7 -11
- qadence/utils.py +45 -0
- {qadence-1.10.3.dist-info → qadence-1.11.0.dist-info}/METADATA +14 -11
- {qadence-1.10.3.dist-info → qadence-1.11.0.dist-info}/RECORD +25 -22
- {qadence-1.10.3.dist-info → qadence-1.11.0.dist-info}/WHEEL +0 -0
- {qadence-1.10.3.dist-info → qadence-1.11.0.dist-info}/licenses/LICENSE +0 -0
qadence/ml_tools/trainer.py
CHANGED
@@ -4,14 +4,11 @@ import copy
|
|
4
4
|
from itertools import islice
|
5
5
|
from logging import getLogger
|
6
6
|
from typing import Any, Callable, Iterable, cast
|
7
|
-
|
8
|
-
import torch
|
9
7
|
from nevergrad.optimization.base import Optimizer as NGOptimizer
|
10
|
-
|
11
|
-
from torch import
|
12
|
-
from torch import device as torch_device
|
13
|
-
from torch import dtype as torch_dtype
|
8
|
+
import torch
|
9
|
+
from torch import nn, optim
|
14
10
|
from torch.utils.data import DataLoader
|
11
|
+
from rich.progress import BarColumn, Progress, TaskProgressColumn, TextColumn, TimeRemainingColumn
|
15
12
|
|
16
13
|
from qadence.ml_tools.config import TrainConfig
|
17
14
|
from qadence.ml_tools.data import DictDataLoader, OptimizeResult, data_to_device
|
@@ -20,6 +17,7 @@ from qadence.ml_tools.optimize_step import optimize_step, update_ng_parameters
|
|
20
17
|
from qadence.ml_tools.stages import TrainingStage
|
21
18
|
|
22
19
|
from .train_utils.base_trainer import BaseTrainer
|
20
|
+
from .train_utils.accelerator import Accelerator
|
23
21
|
|
24
22
|
logger = getLogger("ml_tools")
|
25
23
|
|
@@ -38,11 +36,6 @@ class Trainer(BaseTrainer):
|
|
38
36
|
Attributes:
|
39
37
|
current_epoch (int): The current epoch number.
|
40
38
|
global_step (int): The global step across all epochs.
|
41
|
-
log_device (str): Device for logging, default is "cpu".
|
42
|
-
device (torch_device): Device used for computation.
|
43
|
-
dtype (torch_dtype | None): Data type used for computation.
|
44
|
-
data_dtype (torch_dtype | None): Data type for data.
|
45
|
-
Depends on the model's data type.
|
46
39
|
|
47
40
|
Inherited Attributes:
|
48
41
|
use_grad (bool): Indicates if gradients are used for optimization. Default is True.
|
@@ -240,8 +233,6 @@ class Trainer(BaseTrainer):
|
|
240
233
|
val_dataloader: DataLoader | DictDataLoader | None = None,
|
241
234
|
test_dataloader: DataLoader | DictDataLoader | None = None,
|
242
235
|
optimize_step: Callable = optimize_step,
|
243
|
-
device: torch_device | None = None,
|
244
|
-
dtype: torch_dtype | None = None,
|
245
236
|
max_batches: int | None = None,
|
246
237
|
):
|
247
238
|
"""
|
@@ -257,8 +248,6 @@ class Trainer(BaseTrainer):
|
|
257
248
|
val_dataloader (DataLoader | DictDataLoader | None): DataLoader for validation data.
|
258
249
|
test_dataloader (DataLoader | DictDataLoader | None): DataLoader for test data.
|
259
250
|
optimize_step (Callable): Function to execute an optimization step.
|
260
|
-
device (torch_device): Device to use for computation.
|
261
|
-
dtype (torch_dtype): Data type for computation.
|
262
251
|
max_batches (int | None): Maximum number of batches to process per epoch.
|
263
252
|
This is only valid in case of finite TensorDataset dataloaders.
|
264
253
|
if max_batches is not None, the maximum number of batches used will
|
@@ -278,13 +267,21 @@ class Trainer(BaseTrainer):
|
|
278
267
|
)
|
279
268
|
self.current_epoch: int = 0
|
280
269
|
self.global_step: int = 0
|
281
|
-
self.
|
282
|
-
self.
|
283
|
-
|
284
|
-
|
285
|
-
self.
|
286
|
-
|
287
|
-
|
270
|
+
self._stop_training: torch.Tensor = torch.tensor(0, dtype=torch.int)
|
271
|
+
self.progress: Progress | None = None
|
272
|
+
|
273
|
+
# Integration with Accelerator:
|
274
|
+
self.accelerator = Accelerator(
|
275
|
+
backend=config.backend,
|
276
|
+
nprocs=config.nprocs,
|
277
|
+
compute_setup=config.compute_setup,
|
278
|
+
dtype=config.dtype,
|
279
|
+
log_setup=config.log_setup,
|
280
|
+
)
|
281
|
+
# Decorate the unbound Trainer.fit method with accelerator.distribute.
|
282
|
+
# We use __get__ to bind the decorated method to the current instance,
|
283
|
+
# ensuring that 'self' is passed only once when self.fit is called.
|
284
|
+
self.fit = self.accelerator.distribute(Trainer.fit).__get__(self, Trainer) # type: ignore[method-assign]
|
288
285
|
|
289
286
|
def fit(
|
290
287
|
self,
|
@@ -323,26 +320,30 @@ class Trainer(BaseTrainer):
|
|
323
320
|
The callback_manager.start_training takes care of loading checkpoint,
|
324
321
|
and setting up the writer.
|
325
322
|
"""
|
326
|
-
self.
|
327
|
-
|
323
|
+
self._stop_training = torch.tensor(
|
324
|
+
0, dtype=torch.int, device=self.accelerator.execution.device
|
325
|
+
)
|
326
|
+
# initalize config in the first process, and broadcast it to all processes
|
327
|
+
if self.accelerator.rank == 0:
|
328
|
+
self.config_manager.initialize_config()
|
329
|
+
self.config_manager = self.accelerator.broadcast(self.config_manager, src=0)
|
328
330
|
self.callback_manager.start_training(trainer=self)
|
329
331
|
|
330
|
-
#
|
331
|
-
|
332
|
-
self.
|
333
|
-
|
334
|
-
|
335
|
-
|
336
|
-
# Progress bar for training visualization
|
337
|
-
self.progress: Progress = Progress(
|
338
|
-
TextColumn("[progress.description]{task.description}"),
|
339
|
-
BarColumn(),
|
340
|
-
TaskProgressColumn(),
|
341
|
-
TimeRemainingColumn(elapsed_when_finished=True),
|
332
|
+
# Integration with Accelerator: prepare the model, optimizer, and dataloaders.
|
333
|
+
(self.model, self.optimizer, self.train_dataloader, self.val_dataloader) = (
|
334
|
+
self.accelerator.prepare(
|
335
|
+
self.model, self.optimizer, self.train_dataloader, self.val_dataloader
|
336
|
+
)
|
342
337
|
)
|
343
338
|
|
344
|
-
#
|
345
|
-
self.
|
339
|
+
# Progress bar for training visualization
|
340
|
+
if self.accelerator.world_size == 1:
|
341
|
+
self.progress = Progress(
|
342
|
+
TextColumn("[progress.description]{task.description}"),
|
343
|
+
BarColumn(),
|
344
|
+
TaskProgressColumn(),
|
345
|
+
TimeRemainingColumn(elapsed_when_finished=True),
|
346
|
+
)
|
346
347
|
|
347
348
|
# Run validation at the start if specified in the configuration
|
348
349
|
self.perform_val = self.config.val_every > 0
|
@@ -356,7 +357,11 @@ class Trainer(BaseTrainer):
|
|
356
357
|
@BaseTrainer.callback("train")
|
357
358
|
def _train(self) -> list[list[tuple[torch.Tensor, dict[str, Any]]]]:
|
358
359
|
"""
|
359
|
-
Runs the main training loop
|
360
|
+
Runs the main training loop over multiple epochs.
|
361
|
+
|
362
|
+
This method sets up the training process by performing any necessary pre-training
|
363
|
+
actions (via `on_train_start`), configuring progress tracking (if available), and then
|
364
|
+
iteratively calling `_train_epoch` to run through the epochs.
|
360
365
|
|
361
366
|
Returns:
|
362
367
|
list[list[tuple[torch.Tensor, dict[str, Any]]]]: Training loss
|
@@ -365,45 +370,97 @@ class Trainer(BaseTrainer):
|
|
365
370
|
Epochs -> Training Batches -> (loss, metrics)
|
366
371
|
"""
|
367
372
|
self.on_train_start()
|
368
|
-
|
369
|
-
|
373
|
+
epoch_start, epoch_end = (
|
374
|
+
self.global_step,
|
375
|
+
self.global_step + self.config_manager.config.max_iter + 1,
|
376
|
+
)
|
370
377
|
|
371
|
-
|
372
|
-
|
373
|
-
|
374
|
-
|
375
|
-
|
376
|
-
val_task = self.progress.add_task(
|
377
|
-
"Validation",
|
378
|
-
total=(self.config_manager.config.max_iter + 1) / self.config.val_every,
|
378
|
+
if self.accelerator.world_size == 1 and self.progress:
|
379
|
+
# Progress setup is only available for non-spawned training.
|
380
|
+
with self.progress:
|
381
|
+
train_task = self.progress.add_task(
|
382
|
+
"Training", total=self.config_manager.config.max_iter
|
379
383
|
)
|
380
|
-
|
381
|
-
|
382
|
-
|
383
|
-
|
384
|
-
|
385
|
-
|
386
|
-
|
387
|
-
|
388
|
-
|
389
|
-
|
390
|
-
|
391
|
-
|
392
|
-
if self.perform_val and self.current_epoch % self.config.val_every == 0:
|
393
|
-
self.on_val_epoch_start()
|
394
|
-
val_epoch_loss_metrics = self.run_validation(self.val_dataloader)
|
395
|
-
val_losses.append(val_epoch_loss_metrics)
|
396
|
-
self.on_val_epoch_end(val_epoch_loss_metrics)
|
397
|
-
self.progress.update(val_task, advance=1)
|
398
|
-
|
399
|
-
self.progress.update(train_task, advance=1)
|
400
|
-
except KeyboardInterrupt:
|
401
|
-
logger.info("Terminating training gracefully after the current iteration.")
|
402
|
-
break
|
384
|
+
if self.perform_val:
|
385
|
+
val_task = self.progress.add_task(
|
386
|
+
"Validation",
|
387
|
+
total=(self.config_manager.config.max_iter + 1) / self.config.val_every,
|
388
|
+
)
|
389
|
+
else:
|
390
|
+
val_task = None
|
391
|
+
train_losses, val_losses = self._train_epochs(
|
392
|
+
epoch_start, epoch_end, train_task, val_task
|
393
|
+
)
|
394
|
+
else:
|
395
|
+
train_losses, val_losses = self._train_epochs(epoch_start, epoch_end)
|
403
396
|
|
404
397
|
self.on_train_end(train_losses, val_losses)
|
405
398
|
return train_losses
|
406
399
|
|
400
|
+
def _train_epochs(
|
401
|
+
self,
|
402
|
+
epoch_start: int,
|
403
|
+
epoch_end: int,
|
404
|
+
train_task: int | None = None,
|
405
|
+
val_task: int | None = None,
|
406
|
+
) -> tuple[
|
407
|
+
list[list[tuple[torch.Tensor, dict[str, Any]]]],
|
408
|
+
list[list[tuple[torch.Tensor, dict[str, Any]]]],
|
409
|
+
]:
|
410
|
+
"""
|
411
|
+
Executes the training loop for a series of epochs.
|
412
|
+
|
413
|
+
Args:
|
414
|
+
epoch_start (int): The starting epoch index.
|
415
|
+
epoch_end (int): The ending epoch index (non-inclusive).
|
416
|
+
train_task (int | None, optional): The progress bar task ID for training updates.
|
417
|
+
If provided, the progress bar will be updated after each epoch. Defaults to None.
|
418
|
+
val_task (int | None, optional): The progress bar task ID for validation updates.
|
419
|
+
If provided and validation is enabled, the progress bar will be updated after each validation run.
|
420
|
+
Defaults to None.
|
421
|
+
|
422
|
+
Returns:
|
423
|
+
list[list[tuple[torch.Tensor, dict[str, Any]]]]: A tuple of
|
424
|
+
Training loss metrics for all epochs.
|
425
|
+
list -> list -> tuples
|
426
|
+
Epochs -> Training Batches -> (loss, metrics)
|
427
|
+
And Validation loss metrics for all epochs
|
428
|
+
list -> list -> tuples
|
429
|
+
Epochs -> Training Batches -> (loss, metrics)
|
430
|
+
"""
|
431
|
+
train_losses = []
|
432
|
+
val_losses = []
|
433
|
+
|
434
|
+
# Iterate over the epochs
|
435
|
+
for epoch in range(epoch_start, epoch_end):
|
436
|
+
if not self.stop_training():
|
437
|
+
try:
|
438
|
+
self.current_epoch = epoch
|
439
|
+
self.on_train_epoch_start()
|
440
|
+
train_epoch_loss_metrics = self.run_training(self.train_dataloader)
|
441
|
+
train_losses.append(train_epoch_loss_metrics)
|
442
|
+
self.on_train_epoch_end(train_epoch_loss_metrics)
|
443
|
+
|
444
|
+
# Run validation periodically if specified
|
445
|
+
if self.perform_val and (epoch % self.config.val_every == 0):
|
446
|
+
self.on_val_epoch_start()
|
447
|
+
val_epoch_loss_metrics = self.run_validation(self.val_dataloader)
|
448
|
+
val_losses.append(val_epoch_loss_metrics)
|
449
|
+
self.on_val_epoch_end(val_epoch_loss_metrics)
|
450
|
+
if val_task is not None:
|
451
|
+
self.progress.update(val_task, advance=1) # type: ignore[union-attr]
|
452
|
+
|
453
|
+
if train_task is not None:
|
454
|
+
self.progress.update(train_task, advance=1) # type: ignore[union-attr]
|
455
|
+
except KeyboardInterrupt:
|
456
|
+
self._stop_training.fill_(1)
|
457
|
+
else:
|
458
|
+
if self.accelerator.rank == 0:
|
459
|
+
logger.info("Terminating training gracefully after the current iteration.")
|
460
|
+
self.accelerator.finalize()
|
461
|
+
break
|
462
|
+
return train_losses, val_losses
|
463
|
+
|
407
464
|
@BaseTrainer.callback("train_epoch")
|
408
465
|
def run_training(self, dataloader: DataLoader) -> list[tuple[torch.Tensor, dict[str, Any]]]:
|
409
466
|
"""
|
@@ -419,12 +476,12 @@ class Trainer(BaseTrainer):
|
|
419
476
|
"""
|
420
477
|
self.model.train()
|
421
478
|
train_epoch_loss_metrics = []
|
422
|
-
# Quick Fix for iteration 0
|
423
|
-
self._reset_model_and_opt()
|
424
479
|
|
425
480
|
for batch in self._batch_iter(dataloader, self.num_training_batches):
|
426
481
|
self.on_train_batch_start(batch)
|
427
482
|
train_batch_loss_metrics = self.run_train_batch(batch)
|
483
|
+
if self.config.all_reduce_metrics:
|
484
|
+
train_batch_loss_metrics = self._aggregate_result(train_batch_loss_metrics)
|
428
485
|
train_epoch_loss_metrics.append(train_batch_loss_metrics)
|
429
486
|
self.on_train_batch_end(train_batch_loss_metrics)
|
430
487
|
|
@@ -458,8 +515,8 @@ class Trainer(BaseTrainer):
|
|
458
515
|
optimizer=self.optimizer,
|
459
516
|
loss_fn=self.loss_fn,
|
460
517
|
xs=batch,
|
461
|
-
device=self.device,
|
462
|
-
dtype=self.data_dtype,
|
518
|
+
device=self.accelerator.execution.device,
|
519
|
+
dtype=self.accelerator.execution.data_dtype,
|
463
520
|
)
|
464
521
|
else:
|
465
522
|
# Perform optimization using Nevergrad
|
@@ -473,7 +530,15 @@ class Trainer(BaseTrainer):
|
|
473
530
|
self.ng_params = ng_params
|
474
531
|
loss_metrics = loss, metrics
|
475
532
|
|
476
|
-
|
533
|
+
# --------------------- FIX: Post-Optimization Loss --------------------- #
|
534
|
+
# Because the loss/metrics are returned before the optimization. To sync
|
535
|
+
# model state and current loss/metrics we calculate them again after optimization.
|
536
|
+
# This is not strictly necessary.
|
537
|
+
# TODO: Should be removed if loss can be logged at an unoptimized model state
|
538
|
+
with torch.no_grad():
|
539
|
+
post_update_loss_metrics = self.loss_fn(self.model, batch)
|
540
|
+
|
541
|
+
return self._modify_batch_end_loss_metrics(post_update_loss_metrics)
|
477
542
|
|
478
543
|
@BaseTrainer.callback("val_epoch")
|
479
544
|
def run_validation(self, dataloader: DataLoader) -> list[tuple[torch.Tensor, dict[str, Any]]]:
|
@@ -494,6 +559,8 @@ class Trainer(BaseTrainer):
|
|
494
559
|
for batch in self._batch_iter(dataloader, self.num_validation_batches):
|
495
560
|
self.on_val_batch_start(batch)
|
496
561
|
val_batch_loss_metrics = self.run_val_batch(batch)
|
562
|
+
if self.config.all_reduce_metrics:
|
563
|
+
val_batch_loss_metrics = self._aggregate_result(val_batch_loss_metrics)
|
497
564
|
val_epoch_loss_metrics.append(val_batch_loss_metrics)
|
498
565
|
self.on_val_batch_end(val_batch_loss_metrics)
|
499
566
|
|
@@ -568,6 +635,9 @@ class Trainer(BaseTrainer):
|
|
568
635
|
"""
|
569
636
|
Yields batches from the provided dataloader.
|
570
637
|
|
638
|
+
The batch of data is also moved
|
639
|
+
to the correct device and dtype using accelerator.prepare.
|
640
|
+
|
571
641
|
Args:
|
572
642
|
dataloader ([DataLoader]): The dataloader to iterate over.
|
573
643
|
num_batches (int): The maximum number of batches to yield.
|
@@ -581,9 +651,7 @@ class Trainer(BaseTrainer):
|
|
581
651
|
yield None
|
582
652
|
else:
|
583
653
|
for batch in islice(dataloader, num_batches):
|
584
|
-
|
585
|
-
# batch = data_to_device(batch, device=self.device, dtype=self.data_dtype)
|
586
|
-
yield batch
|
654
|
+
yield self.accelerator.prepare_batch(batch)
|
587
655
|
|
588
656
|
def _modify_batch_end_loss_metrics(
|
589
657
|
self, loss_metrics: tuple[torch.Tensor, dict[str, Any]]
|
@@ -609,27 +677,43 @@ class Trainer(BaseTrainer):
|
|
609
677
|
return loss, updated_metrics
|
610
678
|
return loss_metrics
|
611
679
|
|
612
|
-
def
|
680
|
+
def _aggregate_result(
|
681
|
+
self, result: tuple[torch.Tensor, dict[str, Any]]
|
682
|
+
) -> tuple[torch.Tensor, dict[str, Any]]:
|
613
683
|
"""
|
614
|
-
|
684
|
+
Aggregates the loss and metrics using the Accelerator's all_reduce_dict method if aggregation is enabled.
|
615
685
|
|
616
|
-
|
617
|
-
|
686
|
+
Args:
|
687
|
+
result: (tuple[torch.Tensor, dict[str, Any]])
|
688
|
+
The result consisting of loss and metrics.For more details,
|
689
|
+
look at the signature of build_optimize_result.
|
618
690
|
|
619
|
-
|
620
|
-
|
621
|
-
To align them with model/optimizer correctly, we checkpoint
|
622
|
-
the older copy of the model.
|
691
|
+
Returns:
|
692
|
+
tuple[torch.Tensor, dict[str, Any]]: The aggregated loss and metrics.
|
623
693
|
"""
|
694
|
+
loss, metrics = result
|
695
|
+
if self.config.all_reduce_metrics:
|
696
|
+
reduced = self.accelerator.all_reduce_dict({"loss": loss, **metrics})
|
697
|
+
loss = reduced.pop("loss")
|
698
|
+
metrics = reduced
|
699
|
+
return loss, metrics
|
700
|
+
else:
|
701
|
+
return loss, metrics
|
624
702
|
|
625
|
-
|
626
|
-
|
627
|
-
|
628
|
-
|
629
|
-
|
630
|
-
|
631
|
-
|
632
|
-
self.
|
703
|
+
def stop_training(self) -> bool:
|
704
|
+
"""
|
705
|
+
Helper function to indicate if the training should be stopped.
|
706
|
+
|
707
|
+
We all_reduce the indicator across all processes to ensure all processes are stopped.
|
708
|
+
|
709
|
+
Notes:
|
710
|
+
self._stop_training indicator indicates if the training should be stopped.
|
711
|
+
0 is continue. 1 is stop.
|
712
|
+
"""
|
713
|
+
_stop_training = self.accelerator.all_reduce_dict(
|
714
|
+
{"indicator": self._stop_training}, op="max"
|
715
|
+
)
|
716
|
+
return bool(_stop_training["indicator"] > 0)
|
633
717
|
|
634
718
|
def build_optimize_result(
|
635
719
|
self,
|
@@ -710,7 +794,13 @@ class Trainer(BaseTrainer):
|
|
710
794
|
|
711
795
|
# Store the optimization result
|
712
796
|
self.opt_result = OptimizeResult(
|
713
|
-
self.current_epoch,
|
797
|
+
self.current_epoch,
|
798
|
+
self.model,
|
799
|
+
self.optimizer,
|
800
|
+
loss,
|
801
|
+
metrics,
|
802
|
+
rank=self.accelerator.rank,
|
803
|
+
device=self.accelerator.execution.device,
|
714
804
|
)
|
715
805
|
|
716
806
|
def get_ic_grad_bounds(
|
@@ -808,9 +898,7 @@ class Trainer(BaseTrainer):
|
|
808
898
|
|
809
899
|
batch = next(iter(self._batch_iter(dataloader, num_batches=1)))
|
810
900
|
|
811
|
-
|
812
|
-
|
813
|
-
ic = InformationContent(self.model, self.loss_fn, xs, epsilons)
|
901
|
+
ic = InformationContent(self.model, self.loss_fn, batch, epsilons)
|
814
902
|
|
815
903
|
max_ic_lower_bound, max_ic_upper_bound = ic.get_grad_norm_bounds_max_IC()
|
816
904
|
sensitivity_ic_upper_bound = ic.get_grad_norm_bounds_sensitivity_IC(eta)
|
qadence/types.py
CHANGED
@@ -445,17 +445,6 @@ class InputDiffMode(StrEnum):
|
|
445
445
|
"""Central finite differencing."""
|
446
446
|
|
447
447
|
|
448
|
-
class ObservableTransform:
|
449
|
-
"""Observable transformation type."""
|
450
|
-
|
451
|
-
SCALE = "scale"
|
452
|
-
"""Use the given values as scale and shift."""
|
453
|
-
RANGE = "range"
|
454
|
-
"""Use the given values as min and max."""
|
455
|
-
NONE = "none"
|
456
|
-
"""No transformation."""
|
457
|
-
|
458
|
-
|
459
448
|
class ExperimentTrackingTool(StrEnum):
|
460
449
|
TENSORBOARD = "tensorboard"
|
461
450
|
"""Use the tensorboard experiment tracker."""
|
@@ -463,6 +452,13 @@ class ExperimentTrackingTool(StrEnum):
|
|
463
452
|
"""Use the ml-flow experiment tracker."""
|
464
453
|
|
465
454
|
|
455
|
+
class ExecutionType(StrEnum):
|
456
|
+
TORCHRUN = "torchrun"
|
457
|
+
"""Torchrun based distribution execution."""
|
458
|
+
DEFAULT = "default"
|
459
|
+
"""Default distribution execution."""
|
460
|
+
|
461
|
+
|
466
462
|
LoggablePlotFunction = Callable[[Module, int], tuple[str, Figure]]
|
467
463
|
|
468
464
|
|
qadence/utils.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1
1
|
from __future__ import annotations
|
2
2
|
|
3
3
|
import math
|
4
|
+
import re
|
4
5
|
from collections import Counter
|
5
6
|
from functools import partial
|
6
7
|
from logging import getLogger
|
@@ -13,6 +14,9 @@ from torch import Tensor, stack, vmap
|
|
13
14
|
from torch import complex as make_complex
|
14
15
|
from torch.linalg import eigvals
|
15
16
|
|
17
|
+
from rich.tree import Tree
|
18
|
+
|
19
|
+
from qadence.blocks import AbstractBlock
|
16
20
|
from qadence.types import Endianness, ResultType, TNumber
|
17
21
|
|
18
22
|
if TYPE_CHECKING:
|
@@ -290,3 +294,44 @@ def one_qubit_projector_matrix(state: str) -> Tensor:
|
|
290
294
|
|
291
295
|
P0 = partial(one_qubit_projector, "0")
|
292
296
|
P1 = partial(one_qubit_projector, "1")
|
297
|
+
|
298
|
+
|
299
|
+
def block_to_mathematical_expression(block: Tree | AbstractBlock) -> str:
|
300
|
+
"""Convert a block to a readable mathematical expression.
|
301
|
+
|
302
|
+
Useful for printing Observables as a mathematical expression.
|
303
|
+
|
304
|
+
Args:
|
305
|
+
block (AbstractBlock): Tree instance.
|
306
|
+
|
307
|
+
Returns:
|
308
|
+
str: A mathematical expression.
|
309
|
+
"""
|
310
|
+
block_tree: Tree = block.__rich_tree__() if isinstance(block, AbstractBlock) else block
|
311
|
+
block_title = block_tree.label if isinstance(block_tree.label, str) else ""
|
312
|
+
if "AddBlock" in block_title:
|
313
|
+
block_title = " + ".join(
|
314
|
+
[block_to_mathematical_expression(block_child) for block_child in block_tree.children]
|
315
|
+
)
|
316
|
+
if "KronBlock" in block_title:
|
317
|
+
block_title = " ⊗ ".join(
|
318
|
+
[block_to_mathematical_expression(block_child) for block_child in block_tree.children]
|
319
|
+
)
|
320
|
+
if "mul" in block_title:
|
321
|
+
block_title = re.findall("\d+\.\d+", block_title)[0]
|
322
|
+
coeff = float(block_title)
|
323
|
+
if coeff == 0:
|
324
|
+
block_title = ""
|
325
|
+
elif coeff == 1:
|
326
|
+
block_title = block_to_mathematical_expression(block_tree.children[0])
|
327
|
+
else:
|
328
|
+
block_title += " * " + block_to_mathematical_expression(block_tree.children[0])
|
329
|
+
first_part = block_title[:3]
|
330
|
+
if first_part in [" + ", " ⊗ ", " * "]:
|
331
|
+
block_title = block_title[3:]
|
332
|
+
|
333
|
+
# if too many trees, add parentheses.
|
334
|
+
nb_children = len(block_tree.children)
|
335
|
+
if nb_children > 1:
|
336
|
+
block_title = "(" + block_title + ")"
|
337
|
+
return block_title
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: qadence
|
3
|
-
Version: 1.
|
3
|
+
Version: 1.11.0
|
4
4
|
Summary: Pasqal interface for circuit-based quantum computing SDKs
|
5
5
|
Author-email: Aleksander Wennersteen <aleksander.wennersteen@pasqal.com>, Gert-Jan Both <gert-jan.both@pasqal.com>, Niklas Heim <niklas.heim@pasqal.com>, Mario Dagrada <mario.dagrada@pasqal.com>, Vincent Elfving <vincent.elfving@pasqal.com>, Dominik Seitz <dominik.seitz@pasqal.com>, Roland Guichard <roland.guichard@pasqal.com>, "Joao P. Moutinho" <joao.moutinho@pasqal.com>, Vytautas Abramavicius <vytautas.abramavicius@pasqal.com>, Gergana Velikova <gergana.velikova@pasqal.com>, Eduardo Maschio <eduardo.maschio@pasqal.com>, Smit Chaudhary <smit.chaudhary@pasqal.com>, Ignacio Fernández Graña <ignacio.fernandez-grana@pasqal.com>, Charles Moussa <charles.moussa@pasqal.com>, Giorgio Tosti Balducci <giorgio.tosti-balducci@pasqal.com>, Daniele Cucurachi <daniele.cucurachi@pasqal.com>, Pim Venderbosch <pim.venderbosch@pasqal.com>, Manu Lahariya <manu.lahariya@pasqal.com>
|
6
6
|
License: Apache 2.0
|
@@ -23,7 +23,7 @@ Requires-Dist: nevergrad
|
|
23
23
|
Requires-Dist: numpy
|
24
24
|
Requires-Dist: openfermion
|
25
25
|
Requires-Dist: pasqal-cloud
|
26
|
-
Requires-Dist: pyqtorch==1.7.
|
26
|
+
Requires-Dist: pyqtorch==1.7.1
|
27
27
|
Requires-Dist: pyyaml
|
28
28
|
Requires-Dist: rich
|
29
29
|
Requires-Dist: scipy
|
@@ -55,9 +55,9 @@ Requires-Dist: mlflow; extra == 'mlflow'
|
|
55
55
|
Provides-Extra: protocols
|
56
56
|
Requires-Dist: qadence-protocols; extra == 'protocols'
|
57
57
|
Provides-Extra: pulser
|
58
|
-
Requires-Dist: pasqal-cloud==0.
|
59
|
-
Requires-Dist: pulser-core==1.
|
60
|
-
Requires-Dist: pulser-simulation==1.
|
58
|
+
Requires-Dist: pasqal-cloud==0.13.0; extra == 'pulser'
|
59
|
+
Requires-Dist: pulser-core==1.3.0; extra == 'pulser'
|
60
|
+
Requires-Dist: pulser-simulation==1.3.0; extra == 'pulser'
|
61
61
|
Provides-Extra: visualization
|
62
62
|
Requires-Dist: graphviz; extra == 'visualization'
|
63
63
|
Description-Content-Type: text/markdown
|
@@ -202,12 +202,15 @@ Users also report problems running Hatch on Windows, we suggest using WSL2.
|
|
202
202
|
If you use Qadence for a publication, we kindly ask you to cite our work using the following BibTex entry:
|
203
203
|
|
204
204
|
```latex
|
205
|
-
@article{
|
206
|
-
|
207
|
-
|
208
|
-
|
209
|
-
|
210
|
-
|
205
|
+
@article{qadence2025,
|
206
|
+
author = {Seitz, Dominik and Heim, Niklas and Moutinho, João and Guichard, Roland and Abramavicius, Vytautas and Wennersteen, Aleksander and Both, Gert-Jan and Quelle, Anton and Groot, Caroline and Velikova, Gergana and Elfving, Vincent and Dagrada, Mario},
|
207
|
+
year = {2025},
|
208
|
+
month = {01},
|
209
|
+
pages = {1-14},
|
210
|
+
title = {Qadence: a differentiable interface for digital and analog programs},
|
211
|
+
volume = {PP},
|
212
|
+
journal = {IEEE Software},
|
213
|
+
doi = {10.1109/MS.2025.3536607}
|
211
214
|
}
|
212
215
|
```
|
213
216
|
|