tensorcircuit-nightly 1.4.0.dev20251107__py3-none-any.whl → 1.4.0.dev20251120__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tensorcircuit/__init__.py +1 -1
- tensorcircuit/experimental.py +305 -51
- {tensorcircuit_nightly-1.4.0.dev20251107.dist-info → tensorcircuit_nightly-1.4.0.dev20251120.dist-info}/METADATA +3 -2
- {tensorcircuit_nightly-1.4.0.dev20251107.dist-info → tensorcircuit_nightly-1.4.0.dev20251120.dist-info}/RECORD +7 -7
- {tensorcircuit_nightly-1.4.0.dev20251107.dist-info → tensorcircuit_nightly-1.4.0.dev20251120.dist-info}/WHEEL +0 -0
- {tensorcircuit_nightly-1.4.0.dev20251107.dist-info → tensorcircuit_nightly-1.4.0.dev20251120.dist-info}/licenses/LICENSE +0 -0
- {tensorcircuit_nightly-1.4.0.dev20251107.dist-info → tensorcircuit_nightly-1.4.0.dev20251120.dist-info}/top_level.txt +0 -0
tensorcircuit/__init__.py
CHANGED
tensorcircuit/experimental.py
CHANGED
|
@@ -7,6 +7,10 @@ Experimental features
|
|
|
7
7
|
from functools import partial
|
|
8
8
|
import logging
|
|
9
9
|
from typing import Any, Callable, Dict, Optional, Tuple, List, Sequence, Union
|
|
10
|
+
import pickle
|
|
11
|
+
import uuid
|
|
12
|
+
import time
|
|
13
|
+
import os
|
|
10
14
|
|
|
11
15
|
import numpy as np
|
|
12
16
|
|
|
@@ -494,7 +498,49 @@ NamedSharding: Any
|
|
|
494
498
|
P: Any
|
|
495
499
|
|
|
496
500
|
|
|
497
|
-
def broadcast_py_object(obj: Any) -> Any:
|
|
501
|
+
def broadcast_py_object(obj: Any, shared_dir: Optional[str] = None) -> Any:
|
|
502
|
+
"""
|
|
503
|
+
Broadcast a picklable Python object from process 0 to all other processes,
|
|
504
|
+
with fallback mechanism from gRPC to file system based approach.
|
|
505
|
+
|
|
506
|
+
This function first attempts to use gRPC-based broadcast. If that fails due to
|
|
507
|
+
pickling issues, it falls back to a file system based approach that is more robust.
|
|
508
|
+
|
|
509
|
+
:param obj: The Python object to broadcast. It must be picklable.
|
|
510
|
+
This object should exist on process 0 and can be None on others.
|
|
511
|
+
:type obj: Any
|
|
512
|
+
:param shared_dir: Directory path for shared file system broadcast fallback.
|
|
513
|
+
If None, uses current directory. Only used in fallback mode.
|
|
514
|
+
:type shared_dir: Optional[str], optional
|
|
515
|
+
:return: The broadcasted object, now present on all processes.
|
|
516
|
+
:rtype: Any
|
|
517
|
+
"""
|
|
518
|
+
import jax
|
|
519
|
+
from jax.experimental import multihost_utils
|
|
520
|
+
|
|
521
|
+
try:
|
|
522
|
+
result = broadcast_py_object_jax(obj)
|
|
523
|
+
return result
|
|
524
|
+
|
|
525
|
+
except pickle.UnpicklingError as e:
|
|
526
|
+
# This block is executed if any process fails during the gRPC attempt.
|
|
527
|
+
|
|
528
|
+
multihost_utils.sync_global_devices("grpc_broadcast_failed_fallback_sync")
|
|
529
|
+
|
|
530
|
+
if jax.process_index() == 0:
|
|
531
|
+
border = "=" * 80
|
|
532
|
+
logger.warning(
|
|
533
|
+
"\n%s\nJAX gRPC broadcast failed with error: %s\n"
|
|
534
|
+
"--> Falling back to robust Shared File System broadcast method.\n%s",
|
|
535
|
+
border,
|
|
536
|
+
e,
|
|
537
|
+
border,
|
|
538
|
+
)
|
|
539
|
+
|
|
540
|
+
return broadcast_py_object_fs(obj, shared_dir)
|
|
541
|
+
|
|
542
|
+
|
|
543
|
+
def broadcast_py_object_jax(obj: Any) -> Any:
|
|
498
544
|
"""
|
|
499
545
|
Broadcast a picklable Python object from process 0 to all other processes
|
|
500
546
|
within jax ditribution system.
|
|
@@ -517,6 +563,10 @@ def broadcast_py_object(obj: Any) -> Any:
|
|
|
517
563
|
if obj is None:
|
|
518
564
|
raise ValueError("Object to broadcast from process 0 cannot be None.")
|
|
519
565
|
data = pickle.dumps(obj)
|
|
566
|
+
logger.info(
|
|
567
|
+
f"--- Size of object to be broadcast: {len(data) / 1024**2:.3f} MB ---"
|
|
568
|
+
)
|
|
569
|
+
|
|
520
570
|
else:
|
|
521
571
|
data = b""
|
|
522
572
|
|
|
@@ -524,29 +574,150 @@ def broadcast_py_object(obj: Any) -> Any:
|
|
|
524
574
|
# We send a single-element int32 array.
|
|
525
575
|
length = np.array([len(data)], dtype=np.int32)
|
|
526
576
|
length = multihost_utils.broadcast_one_to_all(length)
|
|
577
|
+
|
|
527
578
|
length = int(length[0]) # type: ignore
|
|
528
579
|
|
|
529
580
|
# Step 2: Broadcast the actual data.
|
|
530
581
|
# Convert byte string to a uint8 array for broadcasting.
|
|
531
|
-
|
|
582
|
+
send_arr_uint8 = np.frombuffer(data, dtype=np.uint8)
|
|
583
|
+
padded_length = (length + 3) // 4 * 4
|
|
584
|
+
if send_arr_uint8.size < padded_length:
|
|
585
|
+
send_arr_uint8 = np.pad( # type: ignore
|
|
586
|
+
send_arr_uint8, (0, padded_length - send_arr_uint8.size), mode="constant"
|
|
587
|
+
)
|
|
588
|
+
send_arr_int32 = send_arr_uint8.astype(np.int32)
|
|
589
|
+
# send_arr_int32 = jaxlib.numpy.array(send_arr_int32, dtype=np.int32)
|
|
590
|
+
send_arr_int32 = jaxlib.device_put(send_arr_int32)
|
|
591
|
+
|
|
592
|
+
jaxlib.experimental.multihost_utils.sync_global_devices("bulk_before")
|
|
532
593
|
|
|
533
|
-
|
|
534
|
-
# to be smaller than `length`. More importantly, other processes create an
|
|
535
|
-
# empty buffer which must be padded to the correct receiving size.
|
|
536
|
-
if send_arr.size < length:
|
|
537
|
-
send_arr = np.pad(send_arr, (0, length - send_arr.size), mode="constant") # type: ignore
|
|
594
|
+
received_arr = multihost_utils.broadcast_one_to_all(send_arr_int32)
|
|
538
595
|
|
|
539
|
-
|
|
540
|
-
|
|
596
|
+
received_arr = np.array(received_arr)
|
|
597
|
+
received_arr_uint8 = received_arr.astype(np.uint8)
|
|
541
598
|
|
|
542
599
|
# Step 3: Reconstruct the object from the received bytes.
|
|
543
600
|
# Convert the NumPy array back to bytes, truncate any padding, and unpickle.
|
|
544
|
-
received_data =
|
|
545
|
-
if jaxlib.process_index() == 0:
|
|
546
|
-
|
|
601
|
+
received_data = received_arr_uint8[:length].tobytes()
|
|
602
|
+
# if jaxlib.process_index() == 0:
|
|
603
|
+
# logger.info(f"Broadcasted object {obj}")
|
|
547
604
|
return pickle.loads(received_data)
|
|
548
605
|
|
|
549
606
|
|
|
607
|
+
def broadcast_py_object_fs(
|
|
608
|
+
obj: Any, shared_dir: Optional[str] = None, timeout_seconds: int = 300
|
|
609
|
+
) -> Any:
|
|
610
|
+
"""
|
|
611
|
+
Broadcast a picklable Python object from process 0 to all other processes
|
|
612
|
+
using a shared file system approach.
|
|
613
|
+
|
|
614
|
+
This is a fallback method when gRPC-based broadcast fails. It uses UUID-based
|
|
615
|
+
file communication to share objects between processes through a shared file system.
|
|
616
|
+
|
|
617
|
+
:param obj: The Python object to broadcast. Must be picklable.
|
|
618
|
+
Should exist on process 0, can be None on others.
|
|
619
|
+
:type obj: Any
|
|
620
|
+
:param shared_dir: Directory path for shared file system communication.
|
|
621
|
+
If None, uses current directory.
|
|
622
|
+
:type shared_dir: Optional[str], optional
|
|
623
|
+
:param timeout_seconds: Maximum time to wait for file operations before timing out.
|
|
624
|
+
Defaults to 300 seconds.
|
|
625
|
+
:type timeout_seconds: int, optional
|
|
626
|
+
:return: The broadcasted object, now present on all processes.
|
|
627
|
+
:rtype: Any
|
|
628
|
+
"""
|
|
629
|
+
# to_avoid very subtle bugs for broadcast tree_data on A800 clusters
|
|
630
|
+
import jax
|
|
631
|
+
from jax.experimental import multihost_utils
|
|
632
|
+
|
|
633
|
+
if shared_dir is None:
|
|
634
|
+
shared_dir = "."
|
|
635
|
+
if jax.process_index() == 0:
|
|
636
|
+
os.makedirs(shared_dir, exist_ok=True)
|
|
637
|
+
|
|
638
|
+
id_comm_path = os.path.join(shared_dir, f".broadcast_temp_12318")
|
|
639
|
+
transfer_id = ""
|
|
640
|
+
|
|
641
|
+
if jax.process_index() == 0:
|
|
642
|
+
transfer_id = str(uuid.uuid4())
|
|
643
|
+
# print(f"[Process 0] Generated unique transfer ID: {transfer_id}", flush=True)
|
|
644
|
+
with open(id_comm_path, "w") as f:
|
|
645
|
+
f.write(transfer_id)
|
|
646
|
+
|
|
647
|
+
multihost_utils.sync_global_devices("fs_broadcast_id_written")
|
|
648
|
+
|
|
649
|
+
if jax.process_index() != 0:
|
|
650
|
+
start_time = time.time()
|
|
651
|
+
while not os.path.exists(id_comm_path):
|
|
652
|
+
time.sleep(0.1)
|
|
653
|
+
if time.time() - start_time > timeout_seconds:
|
|
654
|
+
raise TimeoutError(
|
|
655
|
+
f"Process {jax.process_index()} timed out waiting for ID file: {id_comm_path}"
|
|
656
|
+
)
|
|
657
|
+
with open(id_comm_path, "r") as f:
|
|
658
|
+
transfer_id = f.read()
|
|
659
|
+
|
|
660
|
+
multihost_utils.sync_global_devices("fs_broadcast_id_read")
|
|
661
|
+
if jax.process_index() == 0:
|
|
662
|
+
try:
|
|
663
|
+
os.remove(id_comm_path)
|
|
664
|
+
except OSError:
|
|
665
|
+
pass # 如果文件已被其他进程快速清理,忽略错误
|
|
666
|
+
|
|
667
|
+
# 定义本次传输使用的数据文件和标志文件路径
|
|
668
|
+
data_path = os.path.join(shared_dir, f"{transfer_id}.data")
|
|
669
|
+
done_path = os.path.join(shared_dir, f"{transfer_id}.done")
|
|
670
|
+
|
|
671
|
+
result_obj = None
|
|
672
|
+
|
|
673
|
+
if jax.process_index() == 0:
|
|
674
|
+
if obj is None:
|
|
675
|
+
raise ValueError("None cannot be broadcasted.")
|
|
676
|
+
|
|
677
|
+
# print(f"[Process 0] Pickling object...", flush=True)
|
|
678
|
+
pickled_data = pickle.dumps(obj)
|
|
679
|
+
logger.info(
|
|
680
|
+
f"[Process 0] Writing {len(pickled_data) / 1024**2:.3f} MB to {data_path}"
|
|
681
|
+
)
|
|
682
|
+
with open(data_path, "wb") as f:
|
|
683
|
+
f.write(pickled_data)
|
|
684
|
+
|
|
685
|
+
with open(done_path, "w") as f:
|
|
686
|
+
pass
|
|
687
|
+
logger.info(f"[Process 0] Write complete.")
|
|
688
|
+
result_obj = obj
|
|
689
|
+
else:
|
|
690
|
+
# print(f"[Process {jax.process_index()}] Waiting for done file: {done_path}", flush=True)
|
|
691
|
+
start_time = time.time()
|
|
692
|
+
while not os.path.exists(done_path):
|
|
693
|
+
time.sleep(0.1)
|
|
694
|
+
if time.time() - start_time > timeout_seconds:
|
|
695
|
+
raise TimeoutError(
|
|
696
|
+
f"Process {jax.process_index()} timed out waiting for done file: {done_path}"
|
|
697
|
+
)
|
|
698
|
+
|
|
699
|
+
# print(f"[Process {jax.process_index()}] Done file found. Reading data from {data_path}", flush=True)
|
|
700
|
+
with open(data_path, "rb") as f:
|
|
701
|
+
pickled_data = f.read()
|
|
702
|
+
|
|
703
|
+
result_obj = pickle.loads(pickled_data)
|
|
704
|
+
logger.info(f"[Process {jax.process_index()}] Object successfully loaded.")
|
|
705
|
+
|
|
706
|
+
multihost_utils.sync_global_devices("fs_broadcast_read_complete")
|
|
707
|
+
|
|
708
|
+
if jax.process_index() == 0:
|
|
709
|
+
try:
|
|
710
|
+
os.remove(data_path)
|
|
711
|
+
os.remove(done_path)
|
|
712
|
+
# print(f"[Process 0] Cleaned up temporary files for transfer {transfer_id}.", flush=True)
|
|
713
|
+
except OSError as e:
|
|
714
|
+
logger.info(
|
|
715
|
+
f"[Process 0]: Failed to clean up temporary files: {e}",
|
|
716
|
+
)
|
|
717
|
+
|
|
718
|
+
return result_obj
|
|
719
|
+
|
|
720
|
+
|
|
550
721
|
class DistributedContractor:
|
|
551
722
|
"""
|
|
552
723
|
A distributed tensor network contractor that parallelizes computations across multiple devices.
|
|
@@ -582,6 +753,7 @@ class DistributedContractor:
|
|
|
582
753
|
cotengra_options: Optional[Dict[str, Any]] = None,
|
|
583
754
|
devices: Optional[List[Any]] = None, # backward compatibility
|
|
584
755
|
mesh: Optional[Any] = None,
|
|
756
|
+
tree_data: Optional[Dict[str, Any]] = None,
|
|
585
757
|
) -> None:
|
|
586
758
|
global jaxlib
|
|
587
759
|
global ctg
|
|
@@ -591,6 +763,7 @@ class DistributedContractor:
|
|
|
591
763
|
|
|
592
764
|
logger.info("Initializing DistributedContractor...")
|
|
593
765
|
import cotengra as ctg
|
|
766
|
+
from cotengra import ContractionTree
|
|
594
767
|
import jax as jaxlib
|
|
595
768
|
from jax.sharding import Mesh, NamedSharding, PartitionSpec as P
|
|
596
769
|
|
|
@@ -618,51 +791,49 @@ class DistributedContractor:
|
|
|
618
791
|
] = {}
|
|
619
792
|
|
|
620
793
|
logger.info("Running cotengra pathfinder... (This may take a while)")
|
|
621
|
-
|
|
622
|
-
|
|
623
|
-
|
|
624
|
-
|
|
625
|
-
|
|
626
|
-
|
|
627
|
-
|
|
628
|
-
|
|
629
|
-
|
|
630
|
-
|
|
631
|
-
|
|
632
|
-
|
|
633
|
-
|
|
634
|
-
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
|
|
638
|
-
|
|
639
|
-
|
|
640
|
-
|
|
641
|
-
|
|
642
|
-
# Process 0 sends its computed `tree_object`.
|
|
643
|
-
# Other processes send `None`, but receive the object from process 0.
|
|
644
|
-
logger.info(
|
|
645
|
-
f"Process {jaxlib.process_index()}: Synchronizing contraction path..."
|
|
646
|
-
)
|
|
647
|
-
if jaxlib.process_count() > 1:
|
|
648
|
-
self.tree = broadcast_py_object(tree_object)
|
|
794
|
+
if tree_data is None:
|
|
795
|
+
if params is None:
|
|
796
|
+
raise ValueError("Please provide specific circuit parameters array.")
|
|
797
|
+
if jaxlib.process_index() == 0:
|
|
798
|
+
logger.info("Process 0: Running cotengra pathfinder...")
|
|
799
|
+
tree_data = self._get_tree_data(
|
|
800
|
+
self.nodes_fn, self._params_template, cotengra_options # type: ignore
|
|
801
|
+
)
|
|
802
|
+
|
|
803
|
+
# Step 2: Use the robust helper function to broadcast the tree object.
|
|
804
|
+
# Process 0 sends its computed `tree_object`.
|
|
805
|
+
# Other processes send `None`, but receive the object from process 0.
|
|
806
|
+
|
|
807
|
+
if jaxlib.process_count() > 1:
|
|
808
|
+
# self.tree = broadcast_py_object(tree_object)
|
|
809
|
+
jaxlib.experimental.multihost_utils.sync_global_devices("tree_before")
|
|
810
|
+
logger.info(
|
|
811
|
+
f"Process {jaxlib.process_index()}: Synchronizing contraction path..."
|
|
812
|
+
)
|
|
813
|
+
tree_data = broadcast_py_object(tree_data)
|
|
814
|
+
jaxlib.experimental.multihost_utils.sync_global_devices("tree_after")
|
|
649
815
|
else:
|
|
650
|
-
|
|
816
|
+
logger.info("Using pre-computed contraction path.")
|
|
817
|
+
if tree_data is None:
|
|
818
|
+
raise ValueError("Contraction path data is missing.")
|
|
819
|
+
|
|
820
|
+
self.tree = ContractionTree.from_path(
|
|
821
|
+
inputs=tree_data["inputs"],
|
|
822
|
+
output=tree_data["output"],
|
|
823
|
+
size_dict=tree_data["size_dict"],
|
|
824
|
+
path=tree_data["path"],
|
|
825
|
+
)
|
|
826
|
+
|
|
827
|
+
# Restore slicing information
|
|
828
|
+
for ind, _ in tree_data["sliced_inds"].items():
|
|
829
|
+
self.tree.remove_ind_(ind)
|
|
830
|
+
|
|
651
831
|
logger.info(
|
|
652
832
|
f"Process {jaxlib.process_index()}: Contraction path successfully synchronized."
|
|
653
833
|
)
|
|
654
834
|
actual_num_slices = self.tree.nslices
|
|
655
835
|
|
|
656
|
-
|
|
657
|
-
stats = self.tree.contract_stats()
|
|
658
|
-
print(f"Path found with {actual_num_slices} slices.")
|
|
659
|
-
print(
|
|
660
|
-
f"Arithmetic Intensity (higher is better): {self.tree.arithmetic_intensity():.2f}"
|
|
661
|
-
)
|
|
662
|
-
print("flops (TFlops):", stats["flops"] / 2**40 / self.num_devices)
|
|
663
|
-
print("write (GB):", stats["write"] / 2**27 / actual_num_slices)
|
|
664
|
-
print("size (GB):", stats["size"] / 2**27)
|
|
665
|
-
print("-----------------------------\n")
|
|
836
|
+
self._report_tree_info()
|
|
666
837
|
|
|
667
838
|
slices_per_device = int(np.ceil(actual_num_slices / self.num_devices))
|
|
668
839
|
padded_size = slices_per_device * self.num_devices
|
|
@@ -692,6 +863,89 @@ class DistributedContractor:
|
|
|
692
863
|
|
|
693
864
|
logger.info("Initialization complete.")
|
|
694
865
|
|
|
866
|
+
def _report_tree_info(self) -> None:
|
|
867
|
+
print("\n--- Contraction Path Info ---")
|
|
868
|
+
actual_num_slices = self.tree.nslices
|
|
869
|
+
stats = self.tree.contract_stats()
|
|
870
|
+
print(f"Path found with {actual_num_slices} slices.")
|
|
871
|
+
print(
|
|
872
|
+
f"Arithmetic Intensity (higher is better): {self.tree.arithmetic_intensity():.2f}"
|
|
873
|
+
)
|
|
874
|
+
print("flops (TFlops):", stats["flops"] / 2**40 / self.num_devices)
|
|
875
|
+
print("write (GB):", stats["write"] / 2**27 / actual_num_slices)
|
|
876
|
+
print("size (GB):", stats["size"] / 2**27)
|
|
877
|
+
print("-----------------------------\n")
|
|
878
|
+
|
|
879
|
+
@staticmethod
|
|
880
|
+
def _get_tree_data(
|
|
881
|
+
nodes_fn: Callable[[Tensor], List[Gate]],
|
|
882
|
+
params: Tensor,
|
|
883
|
+
cotengra_options: Optional[Dict[str, Any]] = None,
|
|
884
|
+
) -> Dict[str, Any]:
|
|
885
|
+
global ctg
|
|
886
|
+
|
|
887
|
+
import cotengra as ctg
|
|
888
|
+
|
|
889
|
+
local_cotengra_options = (cotengra_options or {}).copy()
|
|
890
|
+
|
|
891
|
+
nodes = nodes_fn(params)
|
|
892
|
+
tn_info, _ = get_tn_info(nodes)
|
|
893
|
+
default_cotengra_options = {
|
|
894
|
+
"slicing_reconf_opts": {"target_size": 2**28},
|
|
895
|
+
"max_repeats": 128,
|
|
896
|
+
"minimize": "write",
|
|
897
|
+
"parallel": "auto",
|
|
898
|
+
"progbar": True,
|
|
899
|
+
}
|
|
900
|
+
default_cotengra_options.update(local_cotengra_options)
|
|
901
|
+
|
|
902
|
+
opt = ctg.ReusableHyperOptimizer(**default_cotengra_options)
|
|
903
|
+
tree_object = opt.search(*tn_info)
|
|
904
|
+
tree_data = {
|
|
905
|
+
"inputs": tree_object.inputs,
|
|
906
|
+
"output": tree_object.output,
|
|
907
|
+
"size_dict": tree_object.size_dict,
|
|
908
|
+
"path": tree_object.get_path(),
|
|
909
|
+
"sliced_inds": tree_object.sliced_inds,
|
|
910
|
+
}
|
|
911
|
+
return tree_data
|
|
912
|
+
|
|
913
|
+
@staticmethod
|
|
914
|
+
def find_path(
|
|
915
|
+
nodes_fn: Callable[[Tensor], Tensor],
|
|
916
|
+
params: Tensor,
|
|
917
|
+
cotengra_options: Optional[Dict[str, Any]] = None,
|
|
918
|
+
filepath: Optional[str] = None,
|
|
919
|
+
) -> None:
|
|
920
|
+
tree_data = DistributedContractor._get_tree_data(
|
|
921
|
+
nodes_fn, params, cotengra_options
|
|
922
|
+
)
|
|
923
|
+
if filepath is not None:
|
|
924
|
+
with open(filepath, "wb") as f:
|
|
925
|
+
pickle.dump(tree_data, f)
|
|
926
|
+
logger.info(f"Contraction path data successfully saved to '{filepath}'.")
|
|
927
|
+
|
|
928
|
+
@classmethod
|
|
929
|
+
def from_path(
|
|
930
|
+
cls,
|
|
931
|
+
filepath: str,
|
|
932
|
+
nodes_fn: Callable[[Tensor], List[Gate]],
|
|
933
|
+
devices: Optional[List[Any]] = None, # backward compatibility
|
|
934
|
+
mesh: Optional[Any] = None,
|
|
935
|
+
) -> "DistributedContractor":
|
|
936
|
+
with open(filepath, "rb") as f:
|
|
937
|
+
tree_data = pickle.load(f)
|
|
938
|
+
|
|
939
|
+
# Each process loads the file independently. No broadcast is needed.
|
|
940
|
+
# We pass the loaded `tree_data` directly to __init__ to trigger the second workflow.
|
|
941
|
+
return cls(
|
|
942
|
+
nodes_fn=nodes_fn,
|
|
943
|
+
params=None,
|
|
944
|
+
mesh=mesh,
|
|
945
|
+
devices=devices,
|
|
946
|
+
tree_data=tree_data,
|
|
947
|
+
)
|
|
948
|
+
|
|
695
949
|
def _get_single_slice_contraction_fn(
|
|
696
950
|
self, op: Optional[Callable[[Tensor], Tensor]] = None
|
|
697
951
|
) -> Callable[[Any, Tensor, int], Tensor]:
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: tensorcircuit-nightly
|
|
3
|
-
Version: 1.4.0.
|
|
3
|
+
Version: 1.4.0.dev20251120
|
|
4
4
|
Summary: High performance unified quantum computing framework for the NISQ era
|
|
5
5
|
Author-email: TensorCircuit Authors <znfesnpbh@gmail.com>
|
|
6
6
|
License-Expression: Apache-2.0
|
|
@@ -70,7 +70,7 @@ TensorCircuit-NG is the only actively maintained official version and a [fully c
|
|
|
70
70
|
|
|
71
71
|
Please begin with [Quick Start](/docs/source/quickstart.rst) in the [full documentation](https://tensorcircuit-ng.readthedocs.io/).
|
|
72
72
|
|
|
73
|
-
For more information on software usage, sota algorithm implementation and engineer paradigm demonstration, please refer to
|
|
73
|
+
For more information on software usage, sota algorithm implementation and engineer paradigm demonstration, please refer to 100+ [example scripts](/examples) and 40+ [tutorial notebooks](https://tensorcircuit-ng.readthedocs.io/en/latest/#tutorials). API docstrings and test cases in [tests](/tests) are also informative. One can also refer to AI-native docs for tensorcircuit-ng: [Devin Deepwiki](https://deepwiki.com/tensorcircuit/tensorcircuit-ng) and [Context7 MCP](https://context7.com/tensorcircuit/tensorcircuit-ng).
|
|
74
74
|
|
|
75
75
|
For beginners, please refer to [quantum computing lectures with TC-NG](https://github.com/sxzgroup/qc_lecture) to learn both quantum computing basics and representative usage of TensorCircuit-NG.
|
|
76
76
|
|
|
@@ -347,6 +347,7 @@ TensorCircuit-NG is open source, released under the Apache License, Version 2.0.
|
|
|
347
347
|
<td align="center" valign="top" width="16.66%"><a href="https://github.com/Charlespkuer"><img src="https://avatars.githubusercontent.com/u/112697147?v=4?s=100" width="100px;" alt="Huang"/><br /><sub><b>Huang</b></sub></a><br /><a href="https://github.com/tensorcircuit/tensorcircuit-ng/commits?author=Charlespkuer" title="Code">💻</a> <a href="#example-Charlespkuer" title="Examples">💡</a> <a href="https://github.com/tensorcircuit/tensorcircuit-ng/commits?author=Charlespkuer" title="Tests">⚠️</a></td>
|
|
348
348
|
<td align="center" valign="top" width="16.66%"><a href="https://github.com/Huang-Xu-Yang"><img src="https://avatars.githubusercontent.com/u/227286661?v=4?s=100" width="100px;" alt="Huang-Xu-Yang"/><br /><sub><b>Huang-Xu-Yang</b></sub></a><br /><a href="https://github.com/tensorcircuit/tensorcircuit-ng/commits?author=Huang-Xu-Yang" title="Code">💻</a> <a href="https://github.com/tensorcircuit/tensorcircuit-ng/commits?author=Huang-Xu-Yang" title="Tests">⚠️</a></td>
|
|
349
349
|
<td align="center" valign="top" width="16.66%"><a href="https://github.com/WeiguoMa"><img src="https://avatars.githubusercontent.com/u/108172530?v=4?s=100" width="100px;" alt="Weiguo_M"/><br /><sub><b>Weiguo_M</b></sub></a><br /><a href="https://github.com/tensorcircuit/tensorcircuit-ng/commits?author=WeiguoMa" title="Code">💻</a> <a href="https://github.com/tensorcircuit/tensorcircuit-ng/commits?author=WeiguoMa" title="Tests">⚠️</a> <a href="#example-WeiguoMa" title="Examples">💡</a> <a href="#tutorial-WeiguoMa" title="Tutorials">✅</a></td>
|
|
350
|
+
<td align="center" valign="top" width="16.66%"><a href="https://github.com/QuiXamii"><img src="https://avatars.githubusercontent.com/u/136054857?v=4?s=100" width="100px;" alt="Qixiang WANG"/><br /><sub><b>Qixiang WANG</b></sub></a><br /><a href="#example-QuiXamii" title="Examples">💡</a></td>
|
|
350
351
|
</tr>
|
|
351
352
|
</tbody>
|
|
352
353
|
</table>
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
tensorcircuit/__init__.py,sha256=
|
|
1
|
+
tensorcircuit/__init__.py,sha256=lRHI2qX_n2Y56lS6c7g5FyDaLsVag4BWKz186z5ZprY,2160
|
|
2
2
|
tensorcircuit/about.py,sha256=DazTswU2nAwOmASTaDII3L04PVtaQ7oiWPty5YMI3Wk,5267
|
|
3
3
|
tensorcircuit/abstractcircuit.py,sha256=uDRgaDeH_Ym-6_ZEOZwvxHIDycVLHkGZv4zfaIgaEnc,44235
|
|
4
4
|
tensorcircuit/analogcircuit.py,sha256=4BzIC631MZ2m05CXuk2T6HQ8RTmHBE6NszaOLuxmlEc,15639
|
|
@@ -8,7 +8,7 @@ tensorcircuit/channels.py,sha256=CFQxWI-JmkIxexslCBdjp_RSxUbHs6eAJv4LvlXXXCY,286
|
|
|
8
8
|
tensorcircuit/circuit.py,sha256=lETz1SvUh_60ZMFtvSPMWOF6zWMMyQU4TyB_VwhkVHM,40027
|
|
9
9
|
tensorcircuit/cons.py,sha256=V0wjevtDkESCIWMJaysgPVorQlPAIT0vtRWvIZkEWcE,33065
|
|
10
10
|
tensorcircuit/densitymatrix.py,sha256=C8Q2fHXZ78S9ZaPqCIKl6_v_sILqbBgqBOUYUQ1QmFI,15020
|
|
11
|
-
tensorcircuit/experimental.py,sha256=
|
|
11
|
+
tensorcircuit/experimental.py,sha256=sX5-LIGSdEAFGy_-8E_-ugADo0mJ_1weuEWYD1FwuIo,44946
|
|
12
12
|
tensorcircuit/fgs.py,sha256=J1TjAiiqZk9KO1xYX_V0xsgKlYZaUQ7Enm4s5zkRM50,49514
|
|
13
13
|
tensorcircuit/gates.py,sha256=9x1VTEpZWz-FoWVM_YznoU1dbFzXnfXIEJQQVec-2Ko,30504
|
|
14
14
|
tensorcircuit/keras.py,sha256=nMSuu9uZy7haWwuen1g_6GFVwYIirtX9IvejDyoH33M,10129
|
|
@@ -89,8 +89,8 @@ tensorcircuit/templates/graphs.py,sha256=cPYrxjoem0xZ-Is9dZKAvEzWZL_FejfIRiCEOTA
|
|
|
89
89
|
tensorcircuit/templates/hamiltonians.py,sha256=Guvqqi-V47w8xeZDmca4_mU4mW9V4c3AplsBOrRtxFo,6308
|
|
90
90
|
tensorcircuit/templates/lattice.py,sha256=IvFyNgsFMfj82g-tpJraI3lMbI-EIZ0Cghq9v7tZ6Wg,72851
|
|
91
91
|
tensorcircuit/templates/measurements.py,sha256=pzc5Aa9S416Ilg4aOY77Z6ZhUlYcXnAkQNQFTuHjFFs,10943
|
|
92
|
-
tensorcircuit_nightly-1.4.0.
|
|
93
|
-
tensorcircuit_nightly-1.4.0.
|
|
94
|
-
tensorcircuit_nightly-1.4.0.
|
|
95
|
-
tensorcircuit_nightly-1.4.0.
|
|
96
|
-
tensorcircuit_nightly-1.4.0.
|
|
92
|
+
tensorcircuit_nightly-1.4.0.dev20251120.dist-info/licenses/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
|
|
93
|
+
tensorcircuit_nightly-1.4.0.dev20251120.dist-info/METADATA,sha256=AvGV3ichbVoH7yzmbZ1VxxixZ2dqwUm5jYZkt-6StTM,38586
|
|
94
|
+
tensorcircuit_nightly-1.4.0.dev20251120.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
95
|
+
tensorcircuit_nightly-1.4.0.dev20251120.dist-info/top_level.txt,sha256=9dcuK5488dWpVauYz8cdvx743z_La1h7zIQCsEEgu7o,14
|
|
96
|
+
tensorcircuit_nightly-1.4.0.dev20251120.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|