tensorcircuit-nightly 1.4.0.dev20251016__py3-none-any.whl → 1.4.0.dev20251128__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of tensorcircuit-nightly might be problematic. Click here for more details.
- tensorcircuit/__init__.py +1 -1
- tensorcircuit/experimental.py +448 -66
- tensorcircuit/interfaces/tensortrans.py +6 -2
- tensorcircuit/interfaces/torch.py +14 -4
- {tensorcircuit_nightly-1.4.0.dev20251016.dist-info → tensorcircuit_nightly-1.4.0.dev20251128.dist-info}/METADATA +3 -2
- {tensorcircuit_nightly-1.4.0.dev20251016.dist-info → tensorcircuit_nightly-1.4.0.dev20251128.dist-info}/RECORD +9 -9
- {tensorcircuit_nightly-1.4.0.dev20251016.dist-info → tensorcircuit_nightly-1.4.0.dev20251128.dist-info}/WHEEL +0 -0
- {tensorcircuit_nightly-1.4.0.dev20251016.dist-info → tensorcircuit_nightly-1.4.0.dev20251128.dist-info}/licenses/LICENSE +0 -0
- {tensorcircuit_nightly-1.4.0.dev20251016.dist-info → tensorcircuit_nightly-1.4.0.dev20251128.dist-info}/top_level.txt +0 -0
tensorcircuit/__init__.py
CHANGED
tensorcircuit/experimental.py
CHANGED
|
@@ -7,6 +7,10 @@ Experimental features
|
|
|
7
7
|
from functools import partial
|
|
8
8
|
import logging
|
|
9
9
|
from typing import Any, Callable, Dict, Optional, Tuple, List, Sequence, Union
|
|
10
|
+
import pickle
|
|
11
|
+
import uuid
|
|
12
|
+
import time
|
|
13
|
+
import os
|
|
10
14
|
|
|
11
15
|
import numpy as np
|
|
12
16
|
|
|
@@ -489,6 +493,229 @@ jax_func_load = jax_jitted_function_load
|
|
|
489
493
|
PADDING_VALUE = -1
|
|
490
494
|
jaxlib: Any
|
|
491
495
|
ctg: Any
|
|
496
|
+
Mesh: Any
|
|
497
|
+
NamedSharding: Any
|
|
498
|
+
P: Any
|
|
499
|
+
|
|
500
|
+
|
|
501
|
+
def broadcast_py_object(obj: Any, shared_dir: Optional[str] = None) -> Any:
|
|
502
|
+
"""
|
|
503
|
+
Broadcast a picklable Python object from process 0 to all other processes,
|
|
504
|
+
with fallback mechanism from gRPC to file system based approach.
|
|
505
|
+
|
|
506
|
+
This function first attempts to use gRPC-based broadcast. If that fails due to
|
|
507
|
+
pickling issues, it falls back to a file system based approach that is more robust.
|
|
508
|
+
|
|
509
|
+
:param obj: The Python object to broadcast. It must be picklable.
|
|
510
|
+
This object should exist on process 0 and can be None on others.
|
|
511
|
+
:type obj: Any
|
|
512
|
+
:param shared_dir: Directory path for shared file system broadcast fallback.
|
|
513
|
+
If None, uses current directory. Only used in fallback mode.
|
|
514
|
+
:type shared_dir: Optional[str], optional
|
|
515
|
+
:return: The broadcasted object, now present on all processes.
|
|
516
|
+
:rtype: Any
|
|
517
|
+
"""
|
|
518
|
+
import jax
|
|
519
|
+
from jax.experimental import multihost_utils
|
|
520
|
+
|
|
521
|
+
try:
|
|
522
|
+
result = broadcast_py_object_jax(obj)
|
|
523
|
+
return result
|
|
524
|
+
|
|
525
|
+
except pickle.UnpicklingError as e:
|
|
526
|
+
# This block is executed if any process fails during the gRPC attempt.
|
|
527
|
+
|
|
528
|
+
multihost_utils.sync_global_devices("grpc_broadcast_failed_fallback_sync")
|
|
529
|
+
|
|
530
|
+
if jax.process_index() == 0:
|
|
531
|
+
border = "=" * 80
|
|
532
|
+
logger.warning(
|
|
533
|
+
"\n%s\nJAX gRPC broadcast failed with error: %s\n"
|
|
534
|
+
"--> Falling back to robust Shared File System broadcast method.\n%s",
|
|
535
|
+
border,
|
|
536
|
+
e,
|
|
537
|
+
border,
|
|
538
|
+
)
|
|
539
|
+
|
|
540
|
+
return broadcast_py_object_fs(obj, shared_dir)
|
|
541
|
+
|
|
542
|
+
|
|
543
|
+
def broadcast_py_object_jax(obj: Any) -> Any:
|
|
544
|
+
"""
|
|
545
|
+
Broadcast a picklable Python object from process 0 to all other processes
|
|
546
|
+
within jax ditribution system.
|
|
547
|
+
|
|
548
|
+
This function uses a two-step broadcast: first the size, then the data.
|
|
549
|
+
This is necessary because `broadcast_one_to_all` requires the same
|
|
550
|
+
shaped array on all hosts.
|
|
551
|
+
|
|
552
|
+
:param obj: The Python object to broadcast. It must be picklable.
|
|
553
|
+
This object should exist on process 0 and can be None on others.
|
|
554
|
+
|
|
555
|
+
:return: The broadcasted object, now present on all processes.
|
|
556
|
+
"""
|
|
557
|
+
import jax as jaxlib
|
|
558
|
+
import pickle
|
|
559
|
+
from jax.experimental import multihost_utils
|
|
560
|
+
|
|
561
|
+
# Serialize to bytes on process 0, empty bytes on others
|
|
562
|
+
if jaxlib.process_index() == 0:
|
|
563
|
+
if obj is None:
|
|
564
|
+
raise ValueError("Object to broadcast from process 0 cannot be None.")
|
|
565
|
+
data = pickle.dumps(obj)
|
|
566
|
+
logger.info(
|
|
567
|
+
f"--- Size of object to be broadcast: {len(data) / 1024**2:.3f} MB ---"
|
|
568
|
+
)
|
|
569
|
+
|
|
570
|
+
else:
|
|
571
|
+
data = b""
|
|
572
|
+
|
|
573
|
+
# Step 1: Broadcast the length of the serialized data.
|
|
574
|
+
# We send a single-element int32 array.
|
|
575
|
+
length = np.array([len(data)], dtype=np.int32)
|
|
576
|
+
length = multihost_utils.broadcast_one_to_all(length)
|
|
577
|
+
|
|
578
|
+
length = int(length[0]) # type: ignore
|
|
579
|
+
|
|
580
|
+
# Step 2: Broadcast the actual data.
|
|
581
|
+
# Convert byte string to a uint8 array for broadcasting.
|
|
582
|
+
send_arr_uint8 = np.frombuffer(data, dtype=np.uint8)
|
|
583
|
+
padded_length = (length + 3) // 4 * 4
|
|
584
|
+
if send_arr_uint8.size < padded_length:
|
|
585
|
+
send_arr_uint8 = np.pad( # type: ignore
|
|
586
|
+
send_arr_uint8, (0, padded_length - send_arr_uint8.size), mode="constant"
|
|
587
|
+
)
|
|
588
|
+
send_arr_int32 = send_arr_uint8.astype(np.int32)
|
|
589
|
+
# send_arr_int32 = jaxlib.numpy.array(send_arr_int32, dtype=np.int32)
|
|
590
|
+
send_arr_int32 = jaxlib.device_put(send_arr_int32)
|
|
591
|
+
|
|
592
|
+
jaxlib.experimental.multihost_utils.sync_global_devices("bulk_before")
|
|
593
|
+
|
|
594
|
+
received_arr = multihost_utils.broadcast_one_to_all(send_arr_int32)
|
|
595
|
+
|
|
596
|
+
received_arr = np.array(received_arr)
|
|
597
|
+
received_arr_uint8 = received_arr.astype(np.uint8)
|
|
598
|
+
|
|
599
|
+
# Step 3: Reconstruct the object from the received bytes.
|
|
600
|
+
# Convert the NumPy array back to bytes, truncate any padding, and unpickle.
|
|
601
|
+
received_data = received_arr_uint8[:length].tobytes()
|
|
602
|
+
# if jaxlib.process_index() == 0:
|
|
603
|
+
# logger.info(f"Broadcasted object {obj}")
|
|
604
|
+
return pickle.loads(received_data)
|
|
605
|
+
|
|
606
|
+
|
|
607
|
+
def broadcast_py_object_fs(
|
|
608
|
+
obj: Any, shared_dir: Optional[str] = None, timeout_seconds: int = 300
|
|
609
|
+
) -> Any:
|
|
610
|
+
"""
|
|
611
|
+
Broadcast a picklable Python object from process 0 to all other processes
|
|
612
|
+
using a shared file system approach.
|
|
613
|
+
|
|
614
|
+
This is a fallback method when gRPC-based broadcast fails. It uses UUID-based
|
|
615
|
+
file communication to share objects between processes through a shared file system.
|
|
616
|
+
|
|
617
|
+
:param obj: The Python object to broadcast. Must be picklable.
|
|
618
|
+
Should exist on process 0, can be None on others.
|
|
619
|
+
:type obj: Any
|
|
620
|
+
:param shared_dir: Directory path for shared file system communication.
|
|
621
|
+
If None, uses current directory.
|
|
622
|
+
:type shared_dir: Optional[str], optional
|
|
623
|
+
:param timeout_seconds: Maximum time to wait for file operations before timing out.
|
|
624
|
+
Defaults to 300 seconds.
|
|
625
|
+
:type timeout_seconds: int, optional
|
|
626
|
+
:return: The broadcasted object, now present on all processes.
|
|
627
|
+
:rtype: Any
|
|
628
|
+
"""
|
|
629
|
+
# to_avoid very subtle bugs for broadcast tree_data on A800 clusters
|
|
630
|
+
import jax
|
|
631
|
+
from jax.experimental import multihost_utils
|
|
632
|
+
|
|
633
|
+
if shared_dir is None:
|
|
634
|
+
shared_dir = "."
|
|
635
|
+
if jax.process_index() == 0:
|
|
636
|
+
os.makedirs(shared_dir, exist_ok=True)
|
|
637
|
+
|
|
638
|
+
id_comm_path = os.path.join(shared_dir, f".broadcast_temp_12318")
|
|
639
|
+
transfer_id = ""
|
|
640
|
+
|
|
641
|
+
if jax.process_index() == 0:
|
|
642
|
+
transfer_id = str(uuid.uuid4())
|
|
643
|
+
# print(f"[Process 0] Generated unique transfer ID: {transfer_id}", flush=True)
|
|
644
|
+
with open(id_comm_path, "w") as f:
|
|
645
|
+
f.write(transfer_id)
|
|
646
|
+
|
|
647
|
+
multihost_utils.sync_global_devices("fs_broadcast_id_written")
|
|
648
|
+
|
|
649
|
+
if jax.process_index() != 0:
|
|
650
|
+
start_time = time.time()
|
|
651
|
+
while not os.path.exists(id_comm_path):
|
|
652
|
+
time.sleep(0.1)
|
|
653
|
+
if time.time() - start_time > timeout_seconds:
|
|
654
|
+
raise TimeoutError(
|
|
655
|
+
f"Process {jax.process_index()} timed out waiting for ID file: {id_comm_path}"
|
|
656
|
+
)
|
|
657
|
+
with open(id_comm_path, "r") as f:
|
|
658
|
+
transfer_id = f.read()
|
|
659
|
+
|
|
660
|
+
multihost_utils.sync_global_devices("fs_broadcast_id_read")
|
|
661
|
+
if jax.process_index() == 0:
|
|
662
|
+
try:
|
|
663
|
+
os.remove(id_comm_path)
|
|
664
|
+
except OSError:
|
|
665
|
+
pass # 如果文件已被其他进程快速清理,忽略错误
|
|
666
|
+
|
|
667
|
+
# 定义本次传输使用的数据文件和标志文件路径
|
|
668
|
+
data_path = os.path.join(shared_dir, f"{transfer_id}.data")
|
|
669
|
+
done_path = os.path.join(shared_dir, f"{transfer_id}.done")
|
|
670
|
+
|
|
671
|
+
result_obj = None
|
|
672
|
+
|
|
673
|
+
if jax.process_index() == 0:
|
|
674
|
+
if obj is None:
|
|
675
|
+
raise ValueError("None cannot be broadcasted.")
|
|
676
|
+
|
|
677
|
+
# print(f"[Process 0] Pickling object...", flush=True)
|
|
678
|
+
pickled_data = pickle.dumps(obj)
|
|
679
|
+
logger.info(
|
|
680
|
+
f"[Process 0] Writing {len(pickled_data) / 1024**2:.3f} MB to {data_path}"
|
|
681
|
+
)
|
|
682
|
+
with open(data_path, "wb") as f:
|
|
683
|
+
f.write(pickled_data)
|
|
684
|
+
|
|
685
|
+
with open(done_path, "w") as f:
|
|
686
|
+
pass
|
|
687
|
+
logger.info(f"[Process 0] Write complete.")
|
|
688
|
+
result_obj = obj
|
|
689
|
+
else:
|
|
690
|
+
# print(f"[Process {jax.process_index()}] Waiting for done file: {done_path}", flush=True)
|
|
691
|
+
start_time = time.time()
|
|
692
|
+
while not os.path.exists(done_path):
|
|
693
|
+
time.sleep(0.1)
|
|
694
|
+
if time.time() - start_time > timeout_seconds:
|
|
695
|
+
raise TimeoutError(
|
|
696
|
+
f"Process {jax.process_index()} timed out waiting for done file: {done_path}"
|
|
697
|
+
)
|
|
698
|
+
|
|
699
|
+
# print(f"[Process {jax.process_index()}] Done file found. Reading data from {data_path}", flush=True)
|
|
700
|
+
with open(data_path, "rb") as f:
|
|
701
|
+
pickled_data = f.read()
|
|
702
|
+
|
|
703
|
+
result_obj = pickle.loads(pickled_data)
|
|
704
|
+
logger.info(f"[Process {jax.process_index()}] Object successfully loaded.")
|
|
705
|
+
|
|
706
|
+
multihost_utils.sync_global_devices("fs_broadcast_read_complete")
|
|
707
|
+
|
|
708
|
+
if jax.process_index() == 0:
|
|
709
|
+
try:
|
|
710
|
+
os.remove(data_path)
|
|
711
|
+
os.remove(done_path)
|
|
712
|
+
# print(f"[Process 0] Cleaned up temporary files for transfer {transfer_id}.", flush=True)
|
|
713
|
+
except OSError as e:
|
|
714
|
+
logger.info(
|
|
715
|
+
f"[Process 0]: Failed to clean up temporary files: {e}",
|
|
716
|
+
)
|
|
717
|
+
|
|
718
|
+
return result_obj
|
|
492
719
|
|
|
493
720
|
|
|
494
721
|
class DistributedContractor:
|
|
@@ -513,8 +740,10 @@ class DistributedContractor:
|
|
|
513
740
|
:type params: Tensor
|
|
514
741
|
:param cotengra_options: Configuration options passed to the cotengra optimizer. Defaults to None
|
|
515
742
|
:type cotengra_options: Optional[Dict[str, Any]], optional
|
|
516
|
-
:param devices: List of devices to use. If None, uses all available
|
|
743
|
+
:param devices: List of devices to use. If None, uses all available devices
|
|
517
744
|
:type devices: Optional[List[Any]], optional
|
|
745
|
+
:param mesh: Mesh object to use for distributed computation. If None, uses all available devices
|
|
746
|
+
:type mesh: Optional[Any], optional
|
|
518
747
|
"""
|
|
519
748
|
|
|
520
749
|
def __init__(
|
|
@@ -522,28 +751,39 @@ class DistributedContractor:
|
|
|
522
751
|
nodes_fn: Callable[[Tensor], List[Gate]],
|
|
523
752
|
params: Tensor,
|
|
524
753
|
cotengra_options: Optional[Dict[str, Any]] = None,
|
|
525
|
-
devices: Optional[List[Any]] = None,
|
|
754
|
+
devices: Optional[List[Any]] = None, # backward compatibility
|
|
755
|
+
mesh: Optional[Any] = None,
|
|
756
|
+
tree_data: Optional[Dict[str, Any]] = None,
|
|
526
757
|
) -> None:
|
|
527
758
|
global jaxlib
|
|
528
759
|
global ctg
|
|
760
|
+
global Mesh
|
|
761
|
+
global NamedSharding
|
|
762
|
+
global P
|
|
529
763
|
|
|
530
764
|
logger.info("Initializing DistributedContractor...")
|
|
531
765
|
import cotengra as ctg
|
|
766
|
+
from cotengra import ContractionTree
|
|
532
767
|
import jax as jaxlib
|
|
768
|
+
from jax.sharding import Mesh, NamedSharding, PartitionSpec as P
|
|
533
769
|
|
|
534
770
|
self.nodes_fn = nodes_fn
|
|
535
|
-
if
|
|
536
|
-
self.
|
|
537
|
-
|
|
538
|
-
|
|
771
|
+
if mesh is not None:
|
|
772
|
+
self.mesh = mesh
|
|
773
|
+
elif devices is not None:
|
|
774
|
+
self.mesh = Mesh(devices, axis_names=("devices",))
|
|
539
775
|
else:
|
|
540
|
-
self.
|
|
541
|
-
|
|
776
|
+
self.mesh = Mesh(jaxlib.devices(), axis_names=("devices",))
|
|
777
|
+
self.num_devices = len(self.mesh.devices)
|
|
542
778
|
|
|
543
779
|
if self.num_devices <= 1:
|
|
544
780
|
logger.info("DistributedContractor is running on a single device.")
|
|
545
781
|
|
|
546
782
|
self._params_template = params
|
|
783
|
+
self.params_sharding = jaxlib.tree_util.tree_map(
|
|
784
|
+
lambda x: NamedSharding(self.mesh, P(*((None,) * x.ndim))),
|
|
785
|
+
self._params_template,
|
|
786
|
+
)
|
|
547
787
|
self._backend = "jax"
|
|
548
788
|
self._compiled_v_fns: Dict[
|
|
549
789
|
Tuple[Callable[[Tensor], Tensor], str],
|
|
@@ -555,41 +795,68 @@ class DistributedContractor:
|
|
|
555
795
|
] = {}
|
|
556
796
|
|
|
557
797
|
logger.info("Running cotengra pathfinder... (This may take a while)")
|
|
558
|
-
|
|
559
|
-
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
|
|
563
|
-
|
|
564
|
-
|
|
565
|
-
|
|
566
|
-
}
|
|
567
|
-
if cotengra_options:
|
|
568
|
-
default_cotengra_options = cotengra_options
|
|
798
|
+
if tree_data is None:
|
|
799
|
+
if params is None:
|
|
800
|
+
raise ValueError("Please provide specific circuit parameters array.")
|
|
801
|
+
if jaxlib.process_index() == 0:
|
|
802
|
+
logger.info("Process 0: Running cotengra pathfinder...")
|
|
803
|
+
tree_data = self._get_tree_data(
|
|
804
|
+
self.nodes_fn, self._params_template, cotengra_options # type: ignore
|
|
805
|
+
)
|
|
569
806
|
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
|
|
807
|
+
# Step 2: Use the robust helper function to broadcast the tree object.
|
|
808
|
+
# Process 0 sends its computed `tree_object`.
|
|
809
|
+
# Other processes send `None`, but receive the object from process 0.
|
|
573
810
|
|
|
574
|
-
|
|
575
|
-
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
|
|
811
|
+
if jaxlib.process_count() > 1:
|
|
812
|
+
# self.tree = broadcast_py_object(tree_object)
|
|
813
|
+
jaxlib.experimental.multihost_utils.sync_global_devices("tree_before")
|
|
814
|
+
logger.info(
|
|
815
|
+
f"Process {jaxlib.process_index()}: Synchronizing contraction path..."
|
|
816
|
+
)
|
|
817
|
+
tree_data = broadcast_py_object(tree_data)
|
|
818
|
+
jaxlib.experimental.multihost_utils.sync_global_devices("tree_after")
|
|
819
|
+
else:
|
|
820
|
+
logger.info("Using pre-computed contraction path.")
|
|
821
|
+
if tree_data is None:
|
|
822
|
+
raise ValueError("Contraction path data is missing.")
|
|
823
|
+
|
|
824
|
+
self.tree = ContractionTree.from_path(
|
|
825
|
+
inputs=tree_data["inputs"],
|
|
826
|
+
output=tree_data["output"],
|
|
827
|
+
size_dict=tree_data["size_dict"],
|
|
828
|
+
path=tree_data["path"],
|
|
579
829
|
)
|
|
580
|
-
|
|
581
|
-
|
|
582
|
-
|
|
583
|
-
|
|
830
|
+
|
|
831
|
+
# Restore slicing information
|
|
832
|
+
for ind, _ in tree_data["sliced_inds"].items():
|
|
833
|
+
self.tree.remove_ind_(ind)
|
|
834
|
+
|
|
835
|
+
logger.info(
|
|
836
|
+
f"Process {jaxlib.process_index()}: Contraction path successfully synchronized."
|
|
837
|
+
)
|
|
838
|
+
actual_num_slices = self.tree.nslices
|
|
839
|
+
|
|
840
|
+
self._report_tree_info()
|
|
584
841
|
|
|
585
842
|
slices_per_device = int(np.ceil(actual_num_slices / self.num_devices))
|
|
586
843
|
padded_size = slices_per_device * self.num_devices
|
|
587
844
|
slice_indices = np.arange(actual_num_slices)
|
|
588
845
|
padded_slice_indices = np.full(padded_size, PADDING_VALUE, dtype=np.int32)
|
|
589
846
|
padded_slice_indices[:actual_num_slices] = slice_indices
|
|
590
|
-
|
|
591
|
-
|
|
847
|
+
|
|
848
|
+
# Reshape for distribution and define the sharding rule
|
|
849
|
+
batched_indices = padded_slice_indices.reshape(
|
|
850
|
+
self.num_devices, slices_per_device
|
|
592
851
|
)
|
|
852
|
+
# Sharding rule: split the first axis (the one for devices) across the 'devices' mesh axis
|
|
853
|
+
self.sharding = NamedSharding(self.mesh, P("devices", None))
|
|
854
|
+
# Place the tensor on devices according to the rule
|
|
855
|
+
self.batched_slice_indices = jaxlib.device_put(batched_indices, self.sharding)
|
|
856
|
+
|
|
857
|
+
# self.batched_slice_indices = backend.convert_to_tensor(
|
|
858
|
+
# padded_slice_indices.reshape(self.num_devices, slices_per_device)
|
|
859
|
+
# )
|
|
593
860
|
print(
|
|
594
861
|
f"Distributing across {self.num_devices} devices. Each device will sequentially process "
|
|
595
862
|
f"up to {slices_per_device} slices."
|
|
@@ -600,6 +867,90 @@ class DistributedContractor:
|
|
|
600
867
|
|
|
601
868
|
logger.info("Initialization complete.")
|
|
602
869
|
|
|
870
|
+
def _report_tree_info(self) -> None:
|
|
871
|
+
print("\n--- Contraction Path Info ---")
|
|
872
|
+
actual_num_slices = self.tree.nslices
|
|
873
|
+
stats = self.tree.contract_stats()
|
|
874
|
+
print(f"Path found with {actual_num_slices} slices.")
|
|
875
|
+
print(
|
|
876
|
+
f"Arithmetic Intensity (higher is better): {self.tree.arithmetic_intensity():.2f}"
|
|
877
|
+
)
|
|
878
|
+
print("flops (TFlops):", stats["flops"] / 2**40 / self.num_devices)
|
|
879
|
+
print("write (GB):", stats["write"] / 2**27 / actual_num_slices)
|
|
880
|
+
print("size (GB):", stats["size"] / 2**27)
|
|
881
|
+
print("-----------------------------\n")
|
|
882
|
+
|
|
883
|
+
@staticmethod
|
|
884
|
+
def _get_tree_data(
|
|
885
|
+
nodes_fn: Callable[[Tensor], List[Gate]],
|
|
886
|
+
params: Tensor,
|
|
887
|
+
cotengra_options: Optional[Dict[str, Any]] = None,
|
|
888
|
+
) -> Dict[str, Any]:
|
|
889
|
+
global ctg
|
|
890
|
+
|
|
891
|
+
import cotengra as ctg
|
|
892
|
+
|
|
893
|
+
local_cotengra_options = (cotengra_options or {}).copy()
|
|
894
|
+
|
|
895
|
+
nodes = nodes_fn(params)
|
|
896
|
+
tn_info, _ = get_tn_info(nodes)
|
|
897
|
+
default_cotengra_options = {
|
|
898
|
+
"slicing_reconf_opts": {"target_size": 2**28},
|
|
899
|
+
"max_repeats": 128,
|
|
900
|
+
"minimize": "write",
|
|
901
|
+
"parallel": "auto",
|
|
902
|
+
"progbar": True,
|
|
903
|
+
}
|
|
904
|
+
default_cotengra_options.update(local_cotengra_options)
|
|
905
|
+
|
|
906
|
+
opt = ctg.ReusableHyperOptimizer(**default_cotengra_options)
|
|
907
|
+
tree_object = opt.search(*tn_info)
|
|
908
|
+
tree_data = {
|
|
909
|
+
"inputs": tree_object.inputs,
|
|
910
|
+
"output": tree_object.output,
|
|
911
|
+
"size_dict": tree_object.size_dict,
|
|
912
|
+
"path": tree_object.get_path(),
|
|
913
|
+
"sliced_inds": tree_object.sliced_inds,
|
|
914
|
+
}
|
|
915
|
+
return tree_data
|
|
916
|
+
|
|
917
|
+
@staticmethod
|
|
918
|
+
def find_path(
|
|
919
|
+
nodes_fn: Callable[[Tensor], Tensor],
|
|
920
|
+
params: Tensor,
|
|
921
|
+
cotengra_options: Optional[Dict[str, Any]] = None,
|
|
922
|
+
filepath: Optional[str] = None,
|
|
923
|
+
) -> None:
|
|
924
|
+
tree_data = DistributedContractor._get_tree_data(
|
|
925
|
+
nodes_fn, params, cotengra_options
|
|
926
|
+
)
|
|
927
|
+
if filepath is not None:
|
|
928
|
+
with open(filepath, "wb") as f:
|
|
929
|
+
pickle.dump(tree_data, f)
|
|
930
|
+
logger.info(f"Contraction path data successfully saved to '{filepath}'.")
|
|
931
|
+
|
|
932
|
+
@classmethod
|
|
933
|
+
def from_path(
|
|
934
|
+
cls,
|
|
935
|
+
filepath: str,
|
|
936
|
+
nodes_fn: Callable[[Tensor], List[Gate]],
|
|
937
|
+
devices: Optional[List[Any]] = None, # backward compatibility
|
|
938
|
+
mesh: Optional[Any] = None,
|
|
939
|
+
params: Any = None,
|
|
940
|
+
) -> "DistributedContractor":
|
|
941
|
+
with open(filepath, "rb") as f:
|
|
942
|
+
tree_data = pickle.load(f)
|
|
943
|
+
|
|
944
|
+
# Each process loads the file independently. No broadcast is needed.
|
|
945
|
+
# We pass the loaded `tree_data` directly to __init__ to trigger the second workflow.
|
|
946
|
+
return cls(
|
|
947
|
+
nodes_fn=nodes_fn,
|
|
948
|
+
params=params,
|
|
949
|
+
mesh=mesh,
|
|
950
|
+
devices=devices,
|
|
951
|
+
tree_data=tree_data,
|
|
952
|
+
)
|
|
953
|
+
|
|
603
954
|
def _get_single_slice_contraction_fn(
|
|
604
955
|
self, op: Optional[Callable[[Tensor], Tensor]] = None
|
|
605
956
|
) -> Callable[[Any, Tensor, int], Tensor]:
|
|
@@ -716,6 +1067,7 @@ class DistributedContractor:
|
|
|
716
1067
|
fn_getter: Callable[..., Any],
|
|
717
1068
|
op: Optional[Callable[[Tensor], Tensor]],
|
|
718
1069
|
output_dtype: Optional[str],
|
|
1070
|
+
is_grad_fn: bool,
|
|
719
1071
|
) -> Callable[[Any, Tensor, Tensor], Tensor]:
|
|
720
1072
|
"""
|
|
721
1073
|
Gets a compiled pmap-ed function from cache or compiles and caches it.
|
|
@@ -728,15 +1080,60 @@ class DistributedContractor:
|
|
|
728
1080
|
cache_key = (op, output_dtype)
|
|
729
1081
|
if cache_key not in cache:
|
|
730
1082
|
device_fn = fn_getter(op=op, output_dtype=output_dtype)
|
|
731
|
-
|
|
732
|
-
|
|
733
|
-
|
|
734
|
-
|
|
735
|
-
|
|
736
|
-
|
|
737
|
-
|
|
738
|
-
|
|
739
|
-
|
|
1083
|
+
|
|
1084
|
+
def global_aggregated_fn(
|
|
1085
|
+
tree: Any, params: Any, batched_slice_indices: Tensor
|
|
1086
|
+
) -> Any:
|
|
1087
|
+
# Use jax.vmap to apply the per-device function across the sharded data.
|
|
1088
|
+
# vmap maps `device_fn` over the first axis (0) of `batched_slice_indices`.
|
|
1089
|
+
# `tree` and `params` are broadcasted (in_axes=None) to each call.
|
|
1090
|
+
vmapped_device_fn = jaxlib.vmap(
|
|
1091
|
+
device_fn, in_axes=(None, None, 0), out_axes=0
|
|
1092
|
+
)
|
|
1093
|
+
device_results = vmapped_device_fn(tree, params, batched_slice_indices)
|
|
1094
|
+
|
|
1095
|
+
# Now, `device_results` is a sharded PyTree (one result per device).
|
|
1096
|
+
# We aggregate them using jnp.sum, which JAX automatically compiles
|
|
1097
|
+
# into a cross-device AllReduce operation.
|
|
1098
|
+
|
|
1099
|
+
if is_grad_fn:
|
|
1100
|
+
# `device_results` is a (value, grad) tuple of sharded arrays
|
|
1101
|
+
device_values, device_grads = device_results
|
|
1102
|
+
|
|
1103
|
+
# Replace psum with jnp.sum
|
|
1104
|
+
global_value = jaxlib.numpy.sum(device_values, axis=0)
|
|
1105
|
+
global_grad = jaxlib.tree_util.tree_map(
|
|
1106
|
+
lambda g: jaxlib.numpy.sum(g, axis=0), device_grads
|
|
1107
|
+
)
|
|
1108
|
+
return global_value, global_grad
|
|
1109
|
+
else:
|
|
1110
|
+
# `device_results` is just the sharded values
|
|
1111
|
+
return jaxlib.numpy.sum(device_results, axis=0)
|
|
1112
|
+
|
|
1113
|
+
# Compile the global function with jax.jit and specify shardings.
|
|
1114
|
+
# `params` are replicated (available everywhere).
|
|
1115
|
+
|
|
1116
|
+
in_shardings = (self.params_sharding, self.sharding)
|
|
1117
|
+
|
|
1118
|
+
if is_grad_fn:
|
|
1119
|
+
# Returns (value, grad), so out_sharding must be a 2-tuple.
|
|
1120
|
+
# `value` is a replicated scalar -> P()
|
|
1121
|
+
sharding_for_value = NamedSharding(self.mesh, P())
|
|
1122
|
+
# `grad` is a replicated PyTree with the same structure as params.
|
|
1123
|
+
sharding_for_grad = self.params_sharding
|
|
1124
|
+
out_shardings = (sharding_for_value, sharding_for_grad)
|
|
1125
|
+
else:
|
|
1126
|
+
# Returns a single scalar value -> P()
|
|
1127
|
+
out_shardings = NamedSharding(self.mesh, P())
|
|
1128
|
+
|
|
1129
|
+
compiled_fn = jaxlib.jit(
|
|
1130
|
+
global_aggregated_fn,
|
|
1131
|
+
# `tree` is a static argument, its value is compiled into the function.
|
|
1132
|
+
static_argnums=(0,),
|
|
1133
|
+
# Specify how inputs are sharded.
|
|
1134
|
+
in_shardings=in_shardings,
|
|
1135
|
+
# Specify how the output should be sharded.
|
|
1136
|
+
out_shardings=out_shardings,
|
|
740
1137
|
)
|
|
741
1138
|
cache[cache_key] = compiled_fn # type: ignore
|
|
742
1139
|
return cache[cache_key] # type: ignore
|
|
@@ -744,7 +1141,7 @@ class DistributedContractor:
|
|
|
744
1141
|
def value_and_grad(
|
|
745
1142
|
self,
|
|
746
1143
|
params: Tensor,
|
|
747
|
-
aggregate: bool = True,
|
|
1144
|
+
# aggregate: bool = True,
|
|
748
1145
|
op: Optional[Callable[[Tensor], Tensor]] = None,
|
|
749
1146
|
output_dtype: Optional[str] = None,
|
|
750
1147
|
) -> Tuple[Tensor, Tensor]:
|
|
@@ -753,8 +1150,6 @@ class DistributedContractor:
|
|
|
753
1150
|
|
|
754
1151
|
:param params: Parameters for the `nodes_fn` input
|
|
755
1152
|
:type params: Tensor
|
|
756
|
-
:param aggregate: Whether to aggregate (sum) the results across devices, defaults to True
|
|
757
|
-
:type aggregate: bool, optional
|
|
758
1153
|
:param op: Optional post-processing function for the output, defaults to None (corresponding to `backend.real`)
|
|
759
1154
|
op is a cache key, so dont directly pass lambda function for op
|
|
760
1155
|
:type op: Optional[Callable[[Tensor], Tensor]], optional
|
|
@@ -766,24 +1161,18 @@ class DistributedContractor:
|
|
|
766
1161
|
fn_getter=self._get_device_sum_vg_fn,
|
|
767
1162
|
op=op,
|
|
768
1163
|
output_dtype=output_dtype,
|
|
1164
|
+
is_grad_fn=True,
|
|
769
1165
|
)
|
|
770
1166
|
|
|
771
|
-
|
|
1167
|
+
total_value, total_grad = compiled_vg_fn(
|
|
772
1168
|
self.tree, params, self.batched_slice_indices
|
|
773
1169
|
)
|
|
774
|
-
|
|
775
|
-
if aggregate:
|
|
776
|
-
total_value = backend.sum(device_values)
|
|
777
|
-
total_grad = jaxlib.tree_util.tree_map(
|
|
778
|
-
lambda x: backend.sum(x, axis=0), device_grads
|
|
779
|
-
)
|
|
780
|
-
return total_value, total_grad
|
|
781
|
-
return device_values, device_grads
|
|
1170
|
+
return total_value, total_grad
|
|
782
1171
|
|
|
783
1172
|
def value(
|
|
784
1173
|
self,
|
|
785
1174
|
params: Tensor,
|
|
786
|
-
aggregate: bool = True,
|
|
1175
|
+
# aggregate: bool = True,
|
|
787
1176
|
op: Optional[Callable[[Tensor], Tensor]] = None,
|
|
788
1177
|
output_dtype: Optional[str] = None,
|
|
789
1178
|
) -> Tensor:
|
|
@@ -792,8 +1181,6 @@ class DistributedContractor:
|
|
|
792
1181
|
|
|
793
1182
|
:param params: Parameters for the `nodes_fn` input
|
|
794
1183
|
:type params: Tensor
|
|
795
|
-
:param aggregate: Whether to aggregate (sum) the results across devices, defaults to True
|
|
796
|
-
:type aggregate: bool, optional
|
|
797
1184
|
:param op: Optional post-processing function for the output, defaults to None (corresponding to identity)
|
|
798
1185
|
op is a cache key, so dont directly pass lambda function for op
|
|
799
1186
|
:type op: Optional[Callable[[Tensor], Tensor]], optional
|
|
@@ -805,22 +1192,17 @@ class DistributedContractor:
|
|
|
805
1192
|
fn_getter=self._get_device_sum_v_fn,
|
|
806
1193
|
op=op,
|
|
807
1194
|
output_dtype=output_dtype,
|
|
1195
|
+
is_grad_fn=False,
|
|
808
1196
|
)
|
|
809
1197
|
|
|
810
|
-
|
|
811
|
-
|
|
812
|
-
if aggregate:
|
|
813
|
-
return backend.sum(device_values)
|
|
814
|
-
return device_values
|
|
1198
|
+
total_value = compiled_v_fn(self.tree, params, self.batched_slice_indices)
|
|
1199
|
+
return total_value
|
|
815
1200
|
|
|
816
1201
|
def grad(
|
|
817
1202
|
self,
|
|
818
1203
|
params: Tensor,
|
|
819
|
-
aggregate: bool = True,
|
|
820
1204
|
op: Optional[Callable[[Tensor], Tensor]] = None,
|
|
821
1205
|
output_dtype: Optional[str] = None,
|
|
822
1206
|
) -> Tensor:
|
|
823
|
-
_, grad = self.value_and_grad(
|
|
824
|
-
params, aggregate=aggregate, op=op, output_dtype=output_dtype
|
|
825
|
-
)
|
|
1207
|
+
_, grad = self.value_and_grad(params, op=op, output_dtype=output_dtype)
|
|
826
1208
|
return grad
|
|
@@ -132,13 +132,17 @@ def general_args_to_backend(
|
|
|
132
132
|
target_backend = backend
|
|
133
133
|
elif isinstance(target_backend, str):
|
|
134
134
|
target_backend = get_backend(target_backend)
|
|
135
|
+
try:
|
|
136
|
+
t = backend.tree_map(target_backend.from_dlpack, caps)
|
|
137
|
+
except TypeError:
|
|
138
|
+
t = backend.tree_map(target_backend.from_dlpack, args)
|
|
139
|
+
|
|
135
140
|
if dtype is None:
|
|
136
|
-
return
|
|
141
|
+
return t
|
|
137
142
|
if isinstance(dtype, str):
|
|
138
143
|
leaves, treedef = backend.tree_flatten(args)
|
|
139
144
|
dtype = [dtype for _ in range(len(leaves))]
|
|
140
145
|
dtype = backend.tree_unflatten(treedef, dtype)
|
|
141
|
-
t = backend.tree_map(target_backend.from_dlpack, caps)
|
|
142
146
|
t = backend.tree_map(target_backend.cast, t, dtype)
|
|
143
147
|
return t
|
|
144
148
|
|
|
@@ -69,12 +69,14 @@ def torch_interface(
|
|
|
69
69
|
@staticmethod
|
|
70
70
|
def forward(ctx: Any, *x: Any) -> Any: # type: ignore
|
|
71
71
|
# ctx.xdtype = [xi.dtype for xi in x]
|
|
72
|
-
ctx.
|
|
72
|
+
ctx.save_for_backward(*x)
|
|
73
|
+
x_detached = backend.tree_map(lambda s: s.detach(), x)
|
|
74
|
+
ctx.xdtype = backend.tree_map(lambda s: s.dtype, x_detached)
|
|
73
75
|
# (x, )
|
|
74
76
|
if len(ctx.xdtype) == 1:
|
|
75
77
|
ctx.xdtype = ctx.xdtype[0]
|
|
76
|
-
ctx.device = (backend.tree_flatten(
|
|
77
|
-
x = general_args_to_backend(
|
|
78
|
+
ctx.device = (backend.tree_flatten(x_detached)[0][0]).device
|
|
79
|
+
x = general_args_to_backend(x_detached, enable_dlpack=enable_dlpack)
|
|
78
80
|
y = fun(*x)
|
|
79
81
|
ctx.ydtype = backend.tree_map(lambda s: s.dtype, y)
|
|
80
82
|
if len(x) == 1:
|
|
@@ -88,6 +90,9 @@ def torch_interface(
|
|
|
88
90
|
|
|
89
91
|
@staticmethod
|
|
90
92
|
def backward(ctx: Any, *grad_y: Any) -> Any:
|
|
93
|
+
x = ctx.saved_tensors
|
|
94
|
+
x_detached = backend.tree_map(lambda s: s.detach(), x)
|
|
95
|
+
x_backend = general_args_to_backend(x_detached, enable_dlpack=enable_dlpack)
|
|
91
96
|
if len(grad_y) == 1:
|
|
92
97
|
grad_y = grad_y[0]
|
|
93
98
|
grad_y = backend.tree_map(lambda s: s.contiguous(), grad_y)
|
|
@@ -96,7 +101,12 @@ def torch_interface(
|
|
|
96
101
|
)
|
|
97
102
|
# grad_y = general_args_to_numpy(grad_y)
|
|
98
103
|
# grad_y = numpy_args_to_backend(grad_y, dtype=ctx.ydtype) # backend.dtype
|
|
99
|
-
|
|
104
|
+
if len(x_backend) == 1:
|
|
105
|
+
x_backend_for_vjp = x_backend[0]
|
|
106
|
+
else:
|
|
107
|
+
x_backend_for_vjp = x_backend
|
|
108
|
+
|
|
109
|
+
_, g = vjp_fun(x_backend_for_vjp, grad_y)
|
|
100
110
|
# a redundency due to current vjp API
|
|
101
111
|
|
|
102
112
|
r = general_args_to_backend(
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: tensorcircuit-nightly
|
|
3
|
-
Version: 1.4.0.
|
|
3
|
+
Version: 1.4.0.dev20251128
|
|
4
4
|
Summary: High performance unified quantum computing framework for the NISQ era
|
|
5
5
|
Author-email: TensorCircuit Authors <znfesnpbh@gmail.com>
|
|
6
6
|
License-Expression: Apache-2.0
|
|
@@ -70,7 +70,7 @@ TensorCircuit-NG is the only actively maintained official version and a [fully c
|
|
|
70
70
|
|
|
71
71
|
Please begin with [Quick Start](/docs/source/quickstart.rst) in the [full documentation](https://tensorcircuit-ng.readthedocs.io/).
|
|
72
72
|
|
|
73
|
-
For more information on software usage, sota algorithm implementation and engineer paradigm demonstration, please refer to
|
|
73
|
+
For more information on software usage, sota algorithm implementation and engineer paradigm demonstration, please refer to 100+ [example scripts](/examples) and 40+ [tutorial notebooks](https://tensorcircuit-ng.readthedocs.io/en/latest/#tutorials). API docstrings and test cases in [tests](/tests) are also informative. One can also refer to AI-native docs for tensorcircuit-ng: [Devin Deepwiki](https://deepwiki.com/tensorcircuit/tensorcircuit-ng) and [Context7 MCP](https://context7.com/tensorcircuit/tensorcircuit-ng).
|
|
74
74
|
|
|
75
75
|
For beginners, please refer to [quantum computing lectures with TC-NG](https://github.com/sxzgroup/qc_lecture) to learn both quantum computing basics and representative usage of TensorCircuit-NG.
|
|
76
76
|
|
|
@@ -347,6 +347,7 @@ TensorCircuit-NG is open source, released under the Apache License, Version 2.0.
|
|
|
347
347
|
<td align="center" valign="top" width="16.66%"><a href="https://github.com/Charlespkuer"><img src="https://avatars.githubusercontent.com/u/112697147?v=4?s=100" width="100px;" alt="Huang"/><br /><sub><b>Huang</b></sub></a><br /><a href="https://github.com/tensorcircuit/tensorcircuit-ng/commits?author=Charlespkuer" title="Code">💻</a> <a href="#example-Charlespkuer" title="Examples">💡</a> <a href="https://github.com/tensorcircuit/tensorcircuit-ng/commits?author=Charlespkuer" title="Tests">⚠️</a></td>
|
|
348
348
|
<td align="center" valign="top" width="16.66%"><a href="https://github.com/Huang-Xu-Yang"><img src="https://avatars.githubusercontent.com/u/227286661?v=4?s=100" width="100px;" alt="Huang-Xu-Yang"/><br /><sub><b>Huang-Xu-Yang</b></sub></a><br /><a href="https://github.com/tensorcircuit/tensorcircuit-ng/commits?author=Huang-Xu-Yang" title="Code">💻</a> <a href="https://github.com/tensorcircuit/tensorcircuit-ng/commits?author=Huang-Xu-Yang" title="Tests">⚠️</a></td>
|
|
349
349
|
<td align="center" valign="top" width="16.66%"><a href="https://github.com/WeiguoMa"><img src="https://avatars.githubusercontent.com/u/108172530?v=4?s=100" width="100px;" alt="Weiguo_M"/><br /><sub><b>Weiguo_M</b></sub></a><br /><a href="https://github.com/tensorcircuit/tensorcircuit-ng/commits?author=WeiguoMa" title="Code">💻</a> <a href="https://github.com/tensorcircuit/tensorcircuit-ng/commits?author=WeiguoMa" title="Tests">⚠️</a> <a href="#example-WeiguoMa" title="Examples">💡</a> <a href="#tutorial-WeiguoMa" title="Tutorials">✅</a></td>
|
|
350
|
+
<td align="center" valign="top" width="16.66%"><a href="https://github.com/QuiXamii"><img src="https://avatars.githubusercontent.com/u/136054857?v=4?s=100" width="100px;" alt="Qixiang WANG"/><br /><sub><b>Qixiang WANG</b></sub></a><br /><a href="#example-QuiXamii" title="Examples">💡</a></td>
|
|
350
351
|
</tr>
|
|
351
352
|
</tbody>
|
|
352
353
|
</table>
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
tensorcircuit/__init__.py,sha256=
|
|
1
|
+
tensorcircuit/__init__.py,sha256=FDfcQl270QZ832T4jvsAqjx4s_1pGhl1H8m6FOx212g,2160
|
|
2
2
|
tensorcircuit/about.py,sha256=DazTswU2nAwOmASTaDII3L04PVtaQ7oiWPty5YMI3Wk,5267
|
|
3
3
|
tensorcircuit/abstractcircuit.py,sha256=uDRgaDeH_Ym-6_ZEOZwvxHIDycVLHkGZv4zfaIgaEnc,44235
|
|
4
4
|
tensorcircuit/analogcircuit.py,sha256=4BzIC631MZ2m05CXuk2T6HQ8RTmHBE6NszaOLuxmlEc,15639
|
|
@@ -8,7 +8,7 @@ tensorcircuit/channels.py,sha256=CFQxWI-JmkIxexslCBdjp_RSxUbHs6eAJv4LvlXXXCY,286
|
|
|
8
8
|
tensorcircuit/circuit.py,sha256=lETz1SvUh_60ZMFtvSPMWOF6zWMMyQU4TyB_VwhkVHM,40027
|
|
9
9
|
tensorcircuit/cons.py,sha256=V0wjevtDkESCIWMJaysgPVorQlPAIT0vtRWvIZkEWcE,33065
|
|
10
10
|
tensorcircuit/densitymatrix.py,sha256=C8Q2fHXZ78S9ZaPqCIKl6_v_sILqbBgqBOUYUQ1QmFI,15020
|
|
11
|
-
tensorcircuit/experimental.py,sha256=
|
|
11
|
+
tensorcircuit/experimental.py,sha256=ao7O40vaEzgS7Mbrg2AOZ3rPTdrSW7EbAWKjwqF6MPo,44975
|
|
12
12
|
tensorcircuit/fgs.py,sha256=J1TjAiiqZk9KO1xYX_V0xsgKlYZaUQ7Enm4s5zkRM50,49514
|
|
13
13
|
tensorcircuit/gates.py,sha256=9x1VTEpZWz-FoWVM_YznoU1dbFzXnfXIEJQQVec-2Ko,30504
|
|
14
14
|
tensorcircuit/keras.py,sha256=nMSuu9uZy7haWwuen1g_6GFVwYIirtX9IvejDyoH33M,10129
|
|
@@ -71,8 +71,8 @@ tensorcircuit/interfaces/jax.py,sha256=q_nay20gcrPRyY2itvcOtkCjqtvcC4qotbvrgm2a3
|
|
|
71
71
|
tensorcircuit/interfaces/numpy.py,sha256=T7h64dG9e5xDG0KVOy9O8TXyrt5RWRnTWN9iXf3aGyY,1439
|
|
72
72
|
tensorcircuit/interfaces/scipy.py,sha256=_P2IeqvJiO7cdjTzNCIAFm8Y56Wd3j3jGmWUeeQ1Fw8,3402
|
|
73
73
|
tensorcircuit/interfaces/tensorflow.py,sha256=U4hZjm-yWxOJ5tqmffk8-tNvOkAltYBJ8Z6jYwOtTaM,3355
|
|
74
|
-
tensorcircuit/interfaces/tensortrans.py,sha256=
|
|
75
|
-
tensorcircuit/interfaces/torch.py,sha256=
|
|
74
|
+
tensorcircuit/interfaces/tensortrans.py,sha256=YHTKIINjXE085fqO_AfUJGE-t3OThUH8csk5PFK7Dig,10414
|
|
75
|
+
tensorcircuit/interfaces/torch.py,sha256=J04-bguSvJOiV-uhNVzMH28Pl-pDYPnTOv7wNm-QRZM,5633
|
|
76
76
|
tensorcircuit/results/__init__.py,sha256=3kkIvmjLYQd5ff-emY8l82rpv9mwMZdM2kTLZ9sNfA4,89
|
|
77
77
|
tensorcircuit/results/counts.py,sha256=gJ9x2D09wSZ8bwLB5ZR9lyx-bg6AAoz6JDr9cDAb83w,7267
|
|
78
78
|
tensorcircuit/results/readout_mitigation.py,sha256=dVpNvtFZe7n_fDVczKcqYPEepu3fV2qK3u-SfOpTf68,31746
|
|
@@ -89,8 +89,8 @@ tensorcircuit/templates/graphs.py,sha256=cPYrxjoem0xZ-Is9dZKAvEzWZL_FejfIRiCEOTA
|
|
|
89
89
|
tensorcircuit/templates/hamiltonians.py,sha256=Guvqqi-V47w8xeZDmca4_mU4mW9V4c3AplsBOrRtxFo,6308
|
|
90
90
|
tensorcircuit/templates/lattice.py,sha256=IvFyNgsFMfj82g-tpJraI3lMbI-EIZ0Cghq9v7tZ6Wg,72851
|
|
91
91
|
tensorcircuit/templates/measurements.py,sha256=pzc5Aa9S416Ilg4aOY77Z6ZhUlYcXnAkQNQFTuHjFFs,10943
|
|
92
|
-
tensorcircuit_nightly-1.4.0.
|
|
93
|
-
tensorcircuit_nightly-1.4.0.
|
|
94
|
-
tensorcircuit_nightly-1.4.0.
|
|
95
|
-
tensorcircuit_nightly-1.4.0.
|
|
96
|
-
tensorcircuit_nightly-1.4.0.
|
|
92
|
+
tensorcircuit_nightly-1.4.0.dev20251128.dist-info/licenses/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
|
|
93
|
+
tensorcircuit_nightly-1.4.0.dev20251128.dist-info/METADATA,sha256=XvzlNlaR6TyBER37rkHdFO_FCUIAbxTKlr_ZcvQbVZ0,38586
|
|
94
|
+
tensorcircuit_nightly-1.4.0.dev20251128.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
95
|
+
tensorcircuit_nightly-1.4.0.dev20251128.dist-info/top_level.txt,sha256=9dcuK5488dWpVauYz8cdvx743z_La1h7zIQCsEEgu7o,14
|
|
96
|
+
tensorcircuit_nightly-1.4.0.dev20251128.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|