orca-sdk 0.1.10__py3-none-any.whl → 0.1.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- orca_sdk/_utils/analysis_ui.py +4 -1
- orca_sdk/_utils/data_parsing.py +11 -3
- orca_sdk/_utils/data_parsing_disk_test.py +91 -0
- orca_sdk/_utils/{data_parsing_test.py → data_parsing_torch_test.py} +58 -143
- orca_sdk/_utils/prediction_result_ui.py +4 -1
- orca_sdk/_utils/value_parser.py +44 -17
- orca_sdk/_utils/value_parser_test.py +6 -5
- orca_sdk/async_client.py +78 -18
- orca_sdk/classification_model.py +1 -1
- orca_sdk/classification_model_test.py +69 -22
- orca_sdk/client.py +78 -16
- orca_sdk/conftest.py +87 -7
- orca_sdk/credentials_test.py +5 -8
- orca_sdk/datasource.py +13 -8
- orca_sdk/datasource_test.py +8 -2
- orca_sdk/embedding_model.py +7 -2
- orca_sdk/embedding_model_test.py +29 -0
- orca_sdk/memoryset.py +325 -107
- orca_sdk/memoryset_test.py +87 -178
- orca_sdk/regression_model.py +1 -1
- orca_sdk/regression_model_test.py +44 -0
- orca_sdk/telemetry.py +1 -1
- {orca_sdk-0.1.10.dist-info → orca_sdk-0.1.11.dist-info}/METADATA +3 -5
- orca_sdk-0.1.11.dist-info/RECORD +42 -0
- orca_sdk-0.1.10.dist-info/RECORD +0 -41
- {orca_sdk-0.1.10.dist-info → orca_sdk-0.1.11.dist-info}/WHEEL +0 -0
orca_sdk/memoryset_test.py
CHANGED
|
@@ -59,7 +59,6 @@ def test_create_empty_labeled_memoryset():
|
|
|
59
59
|
|
|
60
60
|
# inserting should work on an empty memoryset
|
|
61
61
|
memoryset.insert(dict(value="i love soup", label=1, key="k1"))
|
|
62
|
-
memoryset.refresh()
|
|
63
62
|
assert memoryset.length == 1
|
|
64
63
|
m = memoryset[0]
|
|
65
64
|
assert isinstance(m, LabeledMemory)
|
|
@@ -104,7 +103,6 @@ def test_create_empty_scored_memoryset():
|
|
|
104
103
|
|
|
105
104
|
# inserting should work on an empty memoryset
|
|
106
105
|
memoryset.insert(dict(value="i love soup", score=0.25, key="k1", label=0))
|
|
107
|
-
memoryset.refresh()
|
|
108
106
|
assert memoryset.length == 1
|
|
109
107
|
m = memoryset[0]
|
|
110
108
|
assert isinstance(m, ScoredMemory)
|
|
@@ -563,155 +561,6 @@ def test_query_memoryset_with_feedback_metrics_sort(classification_model: Classi
|
|
|
563
561
|
assert memories[-1].feedback_metrics["positive"]["avg"] == -1.0
|
|
564
562
|
|
|
565
563
|
|
|
566
|
-
def test_query_memoryset_with_partition_id(readonly_partitioned_memoryset: LabeledMemoryset):
|
|
567
|
-
# Query with partition_id and include_global (default) - includes both p1 and global memories
|
|
568
|
-
memories = readonly_partitioned_memoryset.query(partition_id="p1")
|
|
569
|
-
assert len(memories) == 15 # 8 p1 + 7 global = 15
|
|
570
|
-
# Results should include both p1 and global memories
|
|
571
|
-
partition_ids = {memory.partition_id for memory in memories}
|
|
572
|
-
assert "p1" in partition_ids
|
|
573
|
-
assert None in partition_ids
|
|
574
|
-
|
|
575
|
-
|
|
576
|
-
def test_query_memoryset_with_partition_id_and_exclude_global(readonly_partitioned_memoryset: LabeledMemoryset):
|
|
577
|
-
# Query with partition_id and exclude_global mode - only returns p1 memories
|
|
578
|
-
memories = readonly_partitioned_memoryset.query(partition_id="p1", partition_filter_mode="exclude_global")
|
|
579
|
-
assert len(memories) == 8 # Only 8 p1 memories (no global)
|
|
580
|
-
# All results should be from partition p1 (no global memories)
|
|
581
|
-
assert all(memory.partition_id == "p1" for memory in memories)
|
|
582
|
-
|
|
583
|
-
|
|
584
|
-
def test_query_memoryset_with_partition_id_and_include_global(readonly_partitioned_memoryset: LabeledMemoryset):
|
|
585
|
-
# Query with partition_id and include_global mode (default) - includes both p1 and global
|
|
586
|
-
memories = readonly_partitioned_memoryset.query(partition_id="p1", partition_filter_mode="include_global")
|
|
587
|
-
assert len(memories) == 15 # 8 p1 + 7 global = 15
|
|
588
|
-
# Results should include both p1 and global memories
|
|
589
|
-
partition_ids = {memory.partition_id for memory in memories}
|
|
590
|
-
assert "p1" in partition_ids
|
|
591
|
-
assert None in partition_ids
|
|
592
|
-
|
|
593
|
-
|
|
594
|
-
def test_query_memoryset_with_partition_filter_mode_exclude_global(readonly_partitioned_memoryset: LabeledMemoryset):
|
|
595
|
-
# Query excluding global memories requires a partition_id
|
|
596
|
-
# Test with a specific partition_id
|
|
597
|
-
memories = readonly_partitioned_memoryset.query(partition_id="p1", partition_filter_mode="exclude_global")
|
|
598
|
-
assert len(memories) == 8 # Only p1 memories
|
|
599
|
-
# All results should have a partition_id (not global)
|
|
600
|
-
assert all(memory.partition_id == "p1" for memory in memories)
|
|
601
|
-
|
|
602
|
-
|
|
603
|
-
def test_query_memoryset_with_partition_filter_mode_only_global(readonly_partitioned_memoryset: LabeledMemoryset):
|
|
604
|
-
# Query only in global memories
|
|
605
|
-
memories = readonly_partitioned_memoryset.query(partition_filter_mode="only_global")
|
|
606
|
-
assert len(memories) == 7 # There are 7 global memories in SAMPLE_DATA
|
|
607
|
-
# All results should be global (partition_id is None)
|
|
608
|
-
assert all(memory.partition_id is None for memory in memories)
|
|
609
|
-
|
|
610
|
-
|
|
611
|
-
def test_query_memoryset_with_partition_filter_mode_include_global(readonly_partitioned_memoryset: LabeledMemoryset):
|
|
612
|
-
# Query including global memories - when no partition_id is specified,
|
|
613
|
-
# include_global seems to only return global memories
|
|
614
|
-
memories = readonly_partitioned_memoryset.query(partition_filter_mode="include_global")
|
|
615
|
-
# Based on actual behavior, this returns only global memories
|
|
616
|
-
assert len(memories) == 7
|
|
617
|
-
# All results should be global
|
|
618
|
-
assert all(memory.partition_id is None for memory in memories)
|
|
619
|
-
|
|
620
|
-
|
|
621
|
-
def test_query_memoryset_with_partition_filter_mode_ignore_partitions(readonly_partitioned_memoryset: LabeledMemoryset):
|
|
622
|
-
# Query ignoring partition filtering entirely - returns all memories
|
|
623
|
-
memories = readonly_partitioned_memoryset.query(partition_filter_mode="ignore_partitions", limit=100)
|
|
624
|
-
assert len(memories) == 22 # All 22 memories
|
|
625
|
-
# Results can come from any partition or global
|
|
626
|
-
partition_ids = {memory.partition_id for memory in memories}
|
|
627
|
-
# Should have results from multiple partitions/global
|
|
628
|
-
assert len(partition_ids) >= 1
|
|
629
|
-
# Verify we have p1, p2, and global
|
|
630
|
-
assert "p1" in partition_ids
|
|
631
|
-
assert "p2" in partition_ids
|
|
632
|
-
assert None in partition_ids
|
|
633
|
-
|
|
634
|
-
|
|
635
|
-
def test_query_memoryset_with_filters_and_partition_id(readonly_partitioned_memoryset: LabeledMemoryset):
|
|
636
|
-
# Query with filters and partition_id
|
|
637
|
-
memories = readonly_partitioned_memoryset.query(filters=[("label", "==", 0)], partition_id="p1")
|
|
638
|
-
assert len(memories) > 0
|
|
639
|
-
# All results should match the filter and be from partition p1
|
|
640
|
-
assert all(memory.label == 0 for memory in memories)
|
|
641
|
-
assert all(memory.partition_id == "p1" for memory in memories)
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
def test_query_memoryset_with_filters_and_partition_filter_mode(readonly_partitioned_memoryset: LabeledMemoryset):
|
|
645
|
-
# Query with filters and partition_filter_mode - exclude_global requires partition_id
|
|
646
|
-
memories = readonly_partitioned_memoryset.query(
|
|
647
|
-
filters=[("label", "==", 1)], partition_id="p1", partition_filter_mode="exclude_global"
|
|
648
|
-
)
|
|
649
|
-
assert len(memories) > 0
|
|
650
|
-
# All results should match the filter and be from p1 (not global)
|
|
651
|
-
assert all(memory.label == 1 for memory in memories)
|
|
652
|
-
assert all(memory.partition_id == "p1" for memory in memories)
|
|
653
|
-
|
|
654
|
-
|
|
655
|
-
def test_query_memoryset_with_limit_and_partition_id(readonly_partitioned_memoryset: LabeledMemoryset):
|
|
656
|
-
# Query with limit and partition_id
|
|
657
|
-
memories = readonly_partitioned_memoryset.query(partition_id="p2", limit=3)
|
|
658
|
-
assert len(memories) == 3
|
|
659
|
-
# All results should be from partition p2
|
|
660
|
-
assert all(memory.partition_id == "p2" for memory in memories)
|
|
661
|
-
|
|
662
|
-
|
|
663
|
-
def test_query_memoryset_with_offset_and_partition_id(readonly_partitioned_memoryset: LabeledMemoryset):
|
|
664
|
-
# Query with offset and partition_id - use exclude_global to get only p1 memories
|
|
665
|
-
memories_page1 = readonly_partitioned_memoryset.query(
|
|
666
|
-
partition_id="p1", partition_filter_mode="exclude_global", limit=5
|
|
667
|
-
)
|
|
668
|
-
memories_page2 = readonly_partitioned_memoryset.query(
|
|
669
|
-
partition_id="p1", partition_filter_mode="exclude_global", offset=5, limit=5
|
|
670
|
-
)
|
|
671
|
-
assert len(memories_page1) == 5
|
|
672
|
-
assert len(memories_page2) == 3 # Only 3 remaining p1 memories (8 total - 5 = 3)
|
|
673
|
-
# All results should be from partition p1
|
|
674
|
-
assert all(memory.partition_id == "p1" for memory in memories_page1)
|
|
675
|
-
assert all(memory.partition_id == "p1" for memory in memories_page2)
|
|
676
|
-
# Results should be different (pagination works)
|
|
677
|
-
memory_ids_page1 = {memory.memory_id for memory in memories_page1}
|
|
678
|
-
memory_ids_page2 = {memory.memory_id for memory in memories_page2}
|
|
679
|
-
assert memory_ids_page1.isdisjoint(memory_ids_page2)
|
|
680
|
-
|
|
681
|
-
|
|
682
|
-
def test_query_memoryset_with_partition_id_p2(readonly_partitioned_memoryset: LabeledMemoryset):
|
|
683
|
-
# Query a different partition to verify it works
|
|
684
|
-
# With include_global (default), it includes both p2 and global memories
|
|
685
|
-
memories = readonly_partitioned_memoryset.query(partition_id="p2")
|
|
686
|
-
assert len(memories) == 14 # 7 p2 + 7 global = 14
|
|
687
|
-
# Results should include both p2 and global memories
|
|
688
|
-
partition_ids = {memory.partition_id for memory in memories}
|
|
689
|
-
assert "p2" in partition_ids
|
|
690
|
-
assert None in partition_ids
|
|
691
|
-
|
|
692
|
-
|
|
693
|
-
def test_query_memoryset_with_metadata_filter_and_partition_id(readonly_partitioned_memoryset: LabeledMemoryset):
|
|
694
|
-
# Query with metadata filter and partition_id
|
|
695
|
-
memories = readonly_partitioned_memoryset.query(filters=[("metadata.key", "==", "g1")], partition_id="p1")
|
|
696
|
-
assert len(memories) > 0
|
|
697
|
-
# All results should match the metadata filter and be from partition p1
|
|
698
|
-
assert all(memory.metadata.get("key") == "g1" for memory in memories)
|
|
699
|
-
assert all(memory.partition_id == "p1" for memory in memories)
|
|
700
|
-
|
|
701
|
-
|
|
702
|
-
def test_query_memoryset_with_partition_filter_mode_only_global_and_filters(
|
|
703
|
-
readonly_partitioned_memoryset: LabeledMemoryset,
|
|
704
|
-
):
|
|
705
|
-
# Query only global memories with filters
|
|
706
|
-
memories = readonly_partitioned_memoryset.query(
|
|
707
|
-
filters=[("metadata.key", "==", "g3")], partition_filter_mode="only_global"
|
|
708
|
-
)
|
|
709
|
-
assert len(memories) > 0
|
|
710
|
-
# All results should match the filter and be global
|
|
711
|
-
assert all(memory.metadata.get("key") == "g3" for memory in memories)
|
|
712
|
-
assert all(memory.partition_id is None for memory in memories)
|
|
713
|
-
|
|
714
|
-
|
|
715
564
|
def test_labeled_memory_predictions_property(classification_model: ClassificationModel):
|
|
716
565
|
"""Test that LabeledMemory.predictions() only returns classification predictions."""
|
|
717
566
|
# Given: A classification model with memories
|
|
@@ -850,7 +699,6 @@ def test_memory_predictions_expected_label_filter(classification_model: Classifi
|
|
|
850
699
|
|
|
851
700
|
|
|
852
701
|
def test_insert_memories(writable_memoryset: LabeledMemoryset):
|
|
853
|
-
writable_memoryset.refresh()
|
|
854
702
|
prev_length = writable_memoryset.length
|
|
855
703
|
writable_memoryset.insert(
|
|
856
704
|
[
|
|
@@ -859,10 +707,8 @@ def test_insert_memories(writable_memoryset: LabeledMemoryset):
|
|
|
859
707
|
],
|
|
860
708
|
batch_size=1,
|
|
861
709
|
)
|
|
862
|
-
writable_memoryset.refresh()
|
|
863
710
|
assert writable_memoryset.length == prev_length + 2
|
|
864
711
|
writable_memoryset.insert(dict(value="tomato soup is my favorite", label=0, key="test", source_id="test"))
|
|
865
|
-
writable_memoryset.refresh()
|
|
866
712
|
assert writable_memoryset.length == prev_length + 3
|
|
867
713
|
last_memory = writable_memoryset[-1]
|
|
868
714
|
assert last_memory.value == "tomato soup is my favorite"
|
|
@@ -873,16 +719,16 @@ def test_insert_memories(writable_memoryset: LabeledMemoryset):
|
|
|
873
719
|
|
|
874
720
|
|
|
875
721
|
@skip_in_prod("Production memorysets do not have session consistency guarantees")
|
|
876
|
-
@skip_in_ci("CI environment may not have session consistency guarantees")
|
|
877
722
|
def test_update_memories(writable_memoryset: LabeledMemoryset, hf_dataset: Dataset):
|
|
878
723
|
# We've combined the update tests into one to avoid multiple expensive requests for a writable_memoryset
|
|
879
724
|
|
|
880
725
|
# test updating a single memory
|
|
881
726
|
memory_id = writable_memoryset[0].memory_id
|
|
882
|
-
|
|
727
|
+
updated_count = writable_memoryset.update(dict(memory_id=memory_id, value="i love soup so much"))
|
|
728
|
+
assert updated_count == 1
|
|
729
|
+
updated_memory = writable_memoryset.get(memory_id)
|
|
883
730
|
assert updated_memory.value == "i love soup so much"
|
|
884
731
|
assert updated_memory.label == hf_dataset[0]["label"]
|
|
885
|
-
writable_memoryset.refresh() # Refresh to ensure consistency after update
|
|
886
732
|
assert writable_memoryset.get(memory_id).value == "i love soup so much"
|
|
887
733
|
|
|
888
734
|
# test updating a memory instance
|
|
@@ -894,15 +740,51 @@ def test_update_memories(writable_memoryset: LabeledMemoryset, hf_dataset: Datas
|
|
|
894
740
|
|
|
895
741
|
# test updating multiple memories
|
|
896
742
|
memory_ids = [memory.memory_id for memory in writable_memoryset[:2]]
|
|
897
|
-
|
|
743
|
+
updated_count = writable_memoryset.update(
|
|
898
744
|
[
|
|
899
745
|
dict(memory_id=memory_ids[0], value="i love soup so much"),
|
|
900
746
|
dict(memory_id=memory_ids[1], value="cats are so cute"),
|
|
901
747
|
],
|
|
902
748
|
batch_size=1,
|
|
903
749
|
)
|
|
904
|
-
assert
|
|
905
|
-
assert
|
|
750
|
+
assert updated_count == 2
|
|
751
|
+
assert writable_memoryset.get(memory_ids[0]).value == "i love soup so much"
|
|
752
|
+
assert writable_memoryset.get(memory_ids[1]).value == "cats are so cute"
|
|
753
|
+
|
|
754
|
+
|
|
755
|
+
@skip_in_prod("Production memorysets do not have session consistency guarantees")
|
|
756
|
+
def test_update_memory_metadata(writable_memoryset: LabeledMemoryset):
|
|
757
|
+
memory = writable_memoryset[0]
|
|
758
|
+
assert memory.metadata["key"] == "g1"
|
|
759
|
+
|
|
760
|
+
# Updating label without metadata should preserve existing metadata
|
|
761
|
+
updated = memory.update(label=1)
|
|
762
|
+
assert updated.label == 1
|
|
763
|
+
assert updated.metadata["key"] == "g1", "Metadata should be preserved when not specified"
|
|
764
|
+
|
|
765
|
+
# Updating metadata via top-level keys should update only specified keys
|
|
766
|
+
updated = memory.update(key="updated", new_key="added")
|
|
767
|
+
assert updated.metadata["key"] == "updated", "Existing metadata key should be preserved"
|
|
768
|
+
assert updated.metadata["new_key"] == "added", "New metadata key should be added"
|
|
769
|
+
|
|
770
|
+
# Can explicitly clear metadata by passing metadata={}
|
|
771
|
+
writable_memoryset.update(dict(memory_id=memory.memory_id, metadata={}))
|
|
772
|
+
updated = writable_memoryset.get(memory.memory_id)
|
|
773
|
+
assert updated.metadata == {}, "Metadata should be cleared when explicitly set to {}"
|
|
774
|
+
|
|
775
|
+
|
|
776
|
+
def test_update_memories_by_filter(writable_memoryset: LabeledMemoryset):
|
|
777
|
+
source_ids_to_update = ["s1", "s3"]
|
|
778
|
+
initial_length = len(writable_memoryset)
|
|
779
|
+
updated_count = writable_memoryset.update(
|
|
780
|
+
filters=[("source_id", "in", source_ids_to_update)],
|
|
781
|
+
patch={"label": 1},
|
|
782
|
+
)
|
|
783
|
+
assert updated_count == 2
|
|
784
|
+
assert len(writable_memoryset) == initial_length
|
|
785
|
+
updated_memories = writable_memoryset.query(filters=[("source_id", "in", source_ids_to_update)])
|
|
786
|
+
assert len(updated_memories) == 2
|
|
787
|
+
assert all(memory.label == 1 for memory in updated_memories)
|
|
906
788
|
|
|
907
789
|
|
|
908
790
|
def test_delete_memories(writable_memoryset: LabeledMemoryset):
|
|
@@ -911,17 +793,60 @@ def test_delete_memories(writable_memoryset: LabeledMemoryset):
|
|
|
911
793
|
# test deleting a single memory
|
|
912
794
|
prev_length = writable_memoryset.length
|
|
913
795
|
memory_id = writable_memoryset[0].memory_id
|
|
914
|
-
writable_memoryset.delete(memory_id)
|
|
796
|
+
deleted_count = writable_memoryset.delete(memory_id)
|
|
797
|
+
assert deleted_count == 1
|
|
915
798
|
with pytest.raises(LookupError):
|
|
916
799
|
writable_memoryset.get(memory_id)
|
|
917
800
|
assert writable_memoryset.length == prev_length - 1
|
|
918
801
|
|
|
919
802
|
# test deleting multiple memories
|
|
920
803
|
prev_length = writable_memoryset.length
|
|
921
|
-
writable_memoryset.delete(
|
|
804
|
+
deleted_count = writable_memoryset.delete(
|
|
805
|
+
[writable_memoryset[0].memory_id, writable_memoryset[1].memory_id], batch_size=1
|
|
806
|
+
)
|
|
807
|
+
assert deleted_count == 2
|
|
922
808
|
assert writable_memoryset.length == prev_length - 2
|
|
923
809
|
|
|
924
810
|
|
|
811
|
+
def test_delete_memories_by_filter(writable_memoryset: LabeledMemoryset):
|
|
812
|
+
source_ids_to_delete = ["s1", "s3"]
|
|
813
|
+
initial_length = len(writable_memoryset)
|
|
814
|
+
memories_before = writable_memoryset.query(filters=[("source_id", "in", source_ids_to_delete)])
|
|
815
|
+
assert len(memories_before) == 2
|
|
816
|
+
deleted_count = writable_memoryset.delete(filters=[("source_id", "in", source_ids_to_delete)])
|
|
817
|
+
assert deleted_count == 2
|
|
818
|
+
assert len(writable_memoryset) == initial_length - 2
|
|
819
|
+
memories_after = writable_memoryset.query(filters=[("source_id", "in", source_ids_to_delete)])
|
|
820
|
+
assert len(memories_after) == 0
|
|
821
|
+
|
|
822
|
+
|
|
823
|
+
def test_delete_all_memories(writable_memoryset: LabeledMemoryset):
|
|
824
|
+
initial_count = writable_memoryset.length
|
|
825
|
+
deleted_count = writable_memoryset.truncate()
|
|
826
|
+
assert deleted_count == initial_count
|
|
827
|
+
assert writable_memoryset.length == 0
|
|
828
|
+
|
|
829
|
+
|
|
830
|
+
def test_delete_all_memories_from_partition(writable_memoryset: LabeledMemoryset):
|
|
831
|
+
memories_in_partition = len(writable_memoryset.query(filters=[("partition_id", "==", "p1")]))
|
|
832
|
+
assert memories_in_partition > 0
|
|
833
|
+
deleted_count = writable_memoryset.truncate(partition_id="p1")
|
|
834
|
+
assert deleted_count == memories_in_partition
|
|
835
|
+
memories_in_partition_after = len(writable_memoryset.query(filters=[("partition_id", "==", "p1")]))
|
|
836
|
+
assert memories_in_partition_after == 0
|
|
837
|
+
assert writable_memoryset.length > 0
|
|
838
|
+
|
|
839
|
+
|
|
840
|
+
def test_delete_all_memories_from_global_partition(writable_memoryset: LabeledMemoryset):
|
|
841
|
+
memories_in_global_partition = len(writable_memoryset.query(filters=[("partition_id", "==", None)]))
|
|
842
|
+
assert memories_in_global_partition > 0
|
|
843
|
+
deleted_count = writable_memoryset.truncate(partition_id=None)
|
|
844
|
+
assert deleted_count == memories_in_global_partition
|
|
845
|
+
memories_in_global_partition_after = len(writable_memoryset.query(filters=[("partition_id", "==", None)]))
|
|
846
|
+
assert memories_in_global_partition_after == 0
|
|
847
|
+
assert writable_memoryset.length > 0
|
|
848
|
+
|
|
849
|
+
|
|
925
850
|
def test_clone_memoryset(readonly_memoryset: LabeledMemoryset):
|
|
926
851
|
cloned_memoryset = readonly_memoryset.clone(
|
|
927
852
|
"test_cloned_memoryset", embedding_model=PretrainedEmbeddingModel.DISTILBERT
|
|
@@ -983,7 +908,6 @@ async def test_group_potential_duplicates(writable_memoryset: LabeledMemoryset):
|
|
|
983
908
|
|
|
984
909
|
|
|
985
910
|
def test_get_cascading_edits_suggestions(writable_memoryset: LabeledMemoryset):
|
|
986
|
-
# Insert a memory to test cascading edits
|
|
987
911
|
SOUP = 0
|
|
988
912
|
CATS = 1
|
|
989
913
|
query_text = "i love soup" # from SAMPLE_DATA in conftest.py
|
|
@@ -993,11 +917,7 @@ def test_get_cascading_edits_suggestions(writable_memoryset: LabeledMemoryset):
|
|
|
993
917
|
dict(value=mislabeled_soup_text, label=CATS), # mislabeled soup memory
|
|
994
918
|
]
|
|
995
919
|
)
|
|
996
|
-
|
|
997
|
-
# Fetch the memory to update
|
|
998
920
|
memory = writable_memoryset.query(filters=[("value", "==", query_text)])[0]
|
|
999
|
-
|
|
1000
|
-
# Update the label and get cascading edit suggestions
|
|
1001
921
|
suggestions = writable_memoryset.get_cascading_edits_suggestions(
|
|
1002
922
|
memory=memory,
|
|
1003
923
|
old_label=CATS,
|
|
@@ -1005,8 +925,6 @@ def test_get_cascading_edits_suggestions(writable_memoryset: LabeledMemoryset):
|
|
|
1005
925
|
max_neighbors=10,
|
|
1006
926
|
max_validation_neighbors=5,
|
|
1007
927
|
)
|
|
1008
|
-
|
|
1009
|
-
# Validate the suggestions
|
|
1010
928
|
assert len(suggestions) == 1
|
|
1011
929
|
assert suggestions[0]["neighbor"]["value"] == mislabeled_soup_text
|
|
1012
930
|
|
|
@@ -1075,13 +993,10 @@ def test_update_scored_memory(scored_memoryset: ScoredMemoryset):
|
|
|
1075
993
|
|
|
1076
994
|
@pytest.mark.asyncio
|
|
1077
995
|
async def test_insert_memories_async_single(writable_memoryset: LabeledMemoryset):
|
|
1078
|
-
"""Test async insertion of a single memory"""
|
|
1079
|
-
await writable_memoryset.arefresh()
|
|
1080
996
|
prev_length = writable_memoryset.length
|
|
1081
997
|
|
|
1082
998
|
await writable_memoryset.ainsert(dict(value="async tomato soup is my favorite", label=0, key="async_test"))
|
|
1083
999
|
|
|
1084
|
-
await writable_memoryset.arefresh()
|
|
1085
1000
|
assert writable_memoryset.length == prev_length + 1
|
|
1086
1001
|
last_memory = writable_memoryset[-1]
|
|
1087
1002
|
assert last_memory.value == "async tomato soup is my favorite"
|
|
@@ -1091,8 +1006,6 @@ async def test_insert_memories_async_single(writable_memoryset: LabeledMemoryset
|
|
|
1091
1006
|
|
|
1092
1007
|
@pytest.mark.asyncio
|
|
1093
1008
|
async def test_insert_memories_async_batch(writable_memoryset: LabeledMemoryset):
|
|
1094
|
-
"""Test async insertion of multiple memories"""
|
|
1095
|
-
await writable_memoryset.arefresh()
|
|
1096
1009
|
prev_length = writable_memoryset.length
|
|
1097
1010
|
|
|
1098
1011
|
await writable_memoryset.ainsert(
|
|
@@ -1102,7 +1015,6 @@ async def test_insert_memories_async_batch(writable_memoryset: LabeledMemoryset)
|
|
|
1102
1015
|
]
|
|
1103
1016
|
)
|
|
1104
1017
|
|
|
1105
|
-
await writable_memoryset.arefresh()
|
|
1106
1018
|
assert writable_memoryset.length == prev_length + 2
|
|
1107
1019
|
|
|
1108
1020
|
# Check the inserted memories
|
|
@@ -1121,8 +1033,6 @@ async def test_insert_memories_async_batch(writable_memoryset: LabeledMemoryset)
|
|
|
1121
1033
|
|
|
1122
1034
|
@pytest.mark.asyncio
|
|
1123
1035
|
async def test_insert_memories_async_with_source_id(writable_memoryset: LabeledMemoryset):
|
|
1124
|
-
"""Test async insertion with source_id and metadata"""
|
|
1125
|
-
await writable_memoryset.arefresh()
|
|
1126
1036
|
prev_length = writable_memoryset.length
|
|
1127
1037
|
|
|
1128
1038
|
await writable_memoryset.ainsert(
|
|
@@ -1131,7 +1041,6 @@ async def test_insert_memories_async_with_source_id(writable_memoryset: LabeledM
|
|
|
1131
1041
|
)
|
|
1132
1042
|
)
|
|
1133
1043
|
|
|
1134
|
-
await writable_memoryset.arefresh()
|
|
1135
1044
|
assert writable_memoryset.length == prev_length + 1
|
|
1136
1045
|
last_memory = writable_memoryset[-1]
|
|
1137
1046
|
assert last_memory.value == "async soup with source id"
|
orca_sdk/regression_model.py
CHANGED
|
@@ -212,7 +212,7 @@ class RegressionModel:
|
|
|
212
212
|
List of handles to all regression models in the OrcaCloud
|
|
213
213
|
"""
|
|
214
214
|
client = OrcaClient._resolve_client()
|
|
215
|
-
return [cls(metadata) for metadata in client.GET("/regression_model")]
|
|
215
|
+
return [cls(metadata) for metadata in client.GET("/regression_model", params={})]
|
|
216
216
|
|
|
217
217
|
@classmethod
|
|
218
218
|
def drop(cls, name_or_id: str, if_not_exists: DropMode = "error"):
|
|
@@ -90,6 +90,12 @@ def test_list_models_unauthorized(unauthorized_client, regression_model: Regress
|
|
|
90
90
|
assert RegressionModel.all() == []
|
|
91
91
|
|
|
92
92
|
|
|
93
|
+
def test_memoryset_regression_models_property(regression_model: RegressionModel, scored_memoryset: ScoredMemoryset):
|
|
94
|
+
models = scored_memoryset.regression_models
|
|
95
|
+
assert len(models) > 0
|
|
96
|
+
assert any(model.id == regression_model.id for model in models)
|
|
97
|
+
|
|
98
|
+
|
|
93
99
|
def test_update_model_attributes(regression_model: RegressionModel):
|
|
94
100
|
regression_model.description = "New description"
|
|
95
101
|
assert regression_model.description == "New description"
|
|
@@ -144,6 +150,29 @@ def test_delete_memoryset_before_model_constraint_violation(hf_dataset):
|
|
|
144
150
|
ScoredMemoryset.drop(memoryset.id)
|
|
145
151
|
|
|
146
152
|
|
|
153
|
+
def test_delete_memoryset_with_model_cascade(hf_dataset):
|
|
154
|
+
"""Test that cascade=False prevents deletion and cascade=True allows it."""
|
|
155
|
+
memoryset = ScoredMemoryset.from_hf_dataset("test_memoryset_cascade_delete_regression", hf_dataset)
|
|
156
|
+
model = RegressionModel.create("test_regression_model_cascade_delete", memoryset)
|
|
157
|
+
|
|
158
|
+
# Verify model exists
|
|
159
|
+
assert RegressionModel.open(model.name) is not None
|
|
160
|
+
|
|
161
|
+
# Without cascade, deletion should fail
|
|
162
|
+
with pytest.raises(RuntimeError):
|
|
163
|
+
ScoredMemoryset.drop(memoryset.id, cascade=False)
|
|
164
|
+
|
|
165
|
+
# Model should still exist
|
|
166
|
+
assert RegressionModel.exists(model.name)
|
|
167
|
+
|
|
168
|
+
# With cascade, deletion should succeed
|
|
169
|
+
ScoredMemoryset.drop(memoryset.id, cascade=True)
|
|
170
|
+
|
|
171
|
+
# Model should be deleted along with the memoryset
|
|
172
|
+
assert not RegressionModel.exists(model.name)
|
|
173
|
+
assert not ScoredMemoryset.exists(memoryset.name)
|
|
174
|
+
|
|
175
|
+
|
|
147
176
|
@pytest.mark.parametrize("data_type", ["dataset", "datasource"])
|
|
148
177
|
def test_evaluate(
|
|
149
178
|
regression_model: RegressionModel,
|
|
@@ -593,3 +622,18 @@ def test_drop(regression_model):
|
|
|
593
622
|
name = regression_model.name
|
|
594
623
|
RegressionModel.drop(name)
|
|
595
624
|
assert not RegressionModel.exists(name)
|
|
625
|
+
|
|
626
|
+
|
|
627
|
+
def test_predict_with_empty_partition(fully_partitioned_regression_resources):
|
|
628
|
+
datasource, memoryset, regression_model = fully_partitioned_regression_resources
|
|
629
|
+
|
|
630
|
+
assert memoryset.length == 15
|
|
631
|
+
|
|
632
|
+
with pytest.raises(RuntimeError, match="lookup failed to return the correct number of memories"):
|
|
633
|
+
regression_model.predict("i love cats", partition_filter_mode="only_global")
|
|
634
|
+
|
|
635
|
+
with pytest.raises(RuntimeError, match="lookup failed to return the correct number of memories"):
|
|
636
|
+
regression_model.predict("i love cats", partition_filter_mode="exclude_global", partition_id="p_does_not_exist")
|
|
637
|
+
|
|
638
|
+
with pytest.raises(RuntimeError, match="lookup failed to return the correct number of memories"):
|
|
639
|
+
regression_model.evaluate(datasource, partition_filter_mode="only_global")
|
orca_sdk/telemetry.py
CHANGED
|
@@ -4,7 +4,7 @@ import logging
|
|
|
4
4
|
import os
|
|
5
5
|
from abc import ABC
|
|
6
6
|
from datetime import datetime
|
|
7
|
-
from typing import TYPE_CHECKING, Any, Iterable, Literal, Self,
|
|
7
|
+
from typing import TYPE_CHECKING, Any, Iterable, Literal, Self, overload
|
|
8
8
|
|
|
9
9
|
from httpx import Timeout
|
|
10
10
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: orca_sdk
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.11
|
|
4
4
|
Summary: SDK for interacting with Orca Services
|
|
5
5
|
License-Expression: Apache-2.0
|
|
6
6
|
Author: Orca DB Inc.
|
|
@@ -11,16 +11,14 @@ Classifier: Programming Language :: Python :: 3.11
|
|
|
11
11
|
Classifier: Programming Language :: Python :: 3.12
|
|
12
12
|
Classifier: Programming Language :: Python :: 3.13
|
|
13
13
|
Classifier: Programming Language :: Python :: 3.14
|
|
14
|
+
Provides-Extra: ui
|
|
14
15
|
Requires-Dist: datasets (>=4.4.0,<5)
|
|
15
|
-
Requires-Dist: gradio (>=6.0.0,<7)
|
|
16
|
+
Requires-Dist: gradio (>=6.0.0,<7) ; extra == "ui"
|
|
16
17
|
Requires-Dist: httpx (>=0.28.1)
|
|
17
18
|
Requires-Dist: httpx-retries (>=0.4.3,<0.5.0)
|
|
18
19
|
Requires-Dist: numpy (>=2.1.0,<3)
|
|
19
|
-
Requires-Dist: pandas (>=2.2.3,<3)
|
|
20
|
-
Requires-Dist: pyarrow (>=22.0.0,<23)
|
|
21
20
|
Requires-Dist: python-dotenv (>=1.1.0)
|
|
22
21
|
Requires-Dist: scikit-learn (>=1.6.1,<2)
|
|
23
|
-
Requires-Dist: torch (>=2.8.0,<3)
|
|
24
22
|
Description-Content-Type: text/markdown
|
|
25
23
|
|
|
26
24
|
<!--
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
orca_sdk/__init__.py,sha256=xyjNwkLQXaX8A-UYgGwYDjv2btOXArT_yiMTfmW7KA8,1003
|
|
2
|
+
orca_sdk/_shared/__init__.py,sha256=3Kt0Hu3QLI5FEp9nqGTxqAm3hAoBJKcagfaGQZ-lbJQ,223
|
|
3
|
+
orca_sdk/_shared/metrics.py,sha256=faeL1B1ftmns1ikfKrIlU3xOn6j0iAGLNUupxvAFza8,24968
|
|
4
|
+
orca_sdk/_shared/metrics_test.py,sha256=vDIXoj8EuuLcdPJz_7EiVPgQ-FXiVT81JG30jxsg9HM,20752
|
|
5
|
+
orca_sdk/_utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
6
|
+
orca_sdk/_utils/analysis_ui.py,sha256=gXwCR972kffPjPyajcy7m8HgPARSfhXwBJ0rnOmu_7k,9418
|
|
7
|
+
orca_sdk/_utils/analysis_ui_style.css,sha256=q_ba_-_KtgztepHg829zLzypaxKayl7ySC1-oYDzV3k,836
|
|
8
|
+
orca_sdk/_utils/auth.py,sha256=nC252O171_3_wn4KBAN7kg8GNvoZFiQ5Xtzkrm5dWDo,2645
|
|
9
|
+
orca_sdk/_utils/auth_test.py,sha256=ygVWv1Ex53LaxIP7p2hzPHl8l9qYyBD5IGmEFJMps6s,1056
|
|
10
|
+
orca_sdk/_utils/common.py,sha256=wUm2pNDWytEecC5WiDWd02-yCZw3Akx0bIutG4lHsFA,805
|
|
11
|
+
orca_sdk/_utils/data_parsing.py,sha256=tTZwGJQ1xkl12gyq6gQ0dRIgDFaNDy3Sde8A_SIfmxo,5726
|
|
12
|
+
orca_sdk/_utils/data_parsing_disk_test.py,sha256=0IxyUNnawlNkqFwVEzIfXzuOympYkZRUP0rgxXhUrW4,3781
|
|
13
|
+
orca_sdk/_utils/data_parsing_torch_test.py,sha256=LTCd1H9V9OtBARv_SmyLEYMeAYPohf8IHJjUzxenEC8,5155
|
|
14
|
+
orca_sdk/_utils/pagination.py,sha256=986z0QPZixrZeurJWorF6eMgnTRdDF84AagEA6qNbMw,4245
|
|
15
|
+
orca_sdk/_utils/pagination_test.py,sha256=BUylCrcHnwoKEBmMUzVr0lwLpA35ivcCwdBK4rMw9y8,4887
|
|
16
|
+
orca_sdk/_utils/prediction_result_ui.css,sha256=sqBlkRLnovb5X5EcUDdB6iGpH63nVRlTW4uAmXuD0WM,258
|
|
17
|
+
orca_sdk/_utils/prediction_result_ui.py,sha256=N4Cj7PUEx2UeV-4Mhk-ZaPegilssaKtElSACVhVrx1w,4965
|
|
18
|
+
orca_sdk/_utils/tqdm_file_reader.py,sha256=Lw7Cg1UgNuRUoN6jjqZb-IlV00H-kbRcrZLdudr1GxE,324
|
|
19
|
+
orca_sdk/_utils/value_parser.py,sha256=pw-suYXKuZQ7mGV-QUFcD3-fmp6lJKjnyQ3f_Hb3Gg8,2379
|
|
20
|
+
orca_sdk/_utils/value_parser_test.py,sha256=dKni8W7KV2RgwuwK0ZN1SN-lH-W4DSSYkHdIXz52kys,1210
|
|
21
|
+
orca_sdk/async_client.py,sha256=qaDmmXrCjPql_y-_kZMskFR0OlE8n1Y_pxtC1JwGbF0,138668
|
|
22
|
+
orca_sdk/classification_model.py,sha256=rb1TmjCMrXqhpu4YKzT0ZOmzxjCV4_sc0gI9GONTc2o,50208
|
|
23
|
+
orca_sdk/classification_model_test.py,sha256=OXKv4vfnV3NYVcZypuDztVKiEFB_0sYX3S-iU5wui38,39534
|
|
24
|
+
orca_sdk/client.py,sha256=pm_NChTd3qKIwwCYoFUOj4sjZQvZJKYX1fbuT-H7hEc,137755
|
|
25
|
+
orca_sdk/conftest.py,sha256=sPxOUGHU9kFznPJ_JZ6vZxY4m6e290ygvUf47P4pW6I,16926
|
|
26
|
+
orca_sdk/credentials.py,sha256=2SwC3tq5akP-F_u2s4xMZDp8mlsKMUT1T5T9Z99-eSY,6588
|
|
27
|
+
orca_sdk/credentials_test.py,sha256=K_1aNVCE5eoqX-tfh0G0_Vhqzhui4qcfYqWaDXfaqTA,4020
|
|
28
|
+
orca_sdk/datasource.py,sha256=kPp3wOcjhTJsSwi51oK-y7tNYlz6jDAGKX9R7CoqHXs,22720
|
|
29
|
+
orca_sdk/datasource_test.py,sha256=qoePaetnlgQZAx6y5SvCv9JMdBSvB-0TB1ug0_L0FuY,16786
|
|
30
|
+
orca_sdk/embedding_model.py,sha256=hCl6vWpW7LXaM1ovGP6GzEp7sRdyJECS_sNc8kKBsvQ,28495
|
|
31
|
+
orca_sdk/embedding_model_test.py,sha256=CERI3Lk7U32N3qwZyzip41Mw1Yb4sHWPEGeSulsaY88,9368
|
|
32
|
+
orca_sdk/job.py,sha256=wHwVt-s7i-v8udhLGybB-90Kp4dwOLrY806bE4Tam5Q,13092
|
|
33
|
+
orca_sdk/job_test.py,sha256=nRSWxd_1UIfrj9oMVvrXjt6OBkBpddYAjb2y6P-DTUg,4327
|
|
34
|
+
orca_sdk/memoryset.py,sha256=JhIyusMelyg9ZinkoZKGtZIoxLANRk8XGZPlrVtX5ds,164318
|
|
35
|
+
orca_sdk/memoryset_test.py,sha256=3PsqzYkd-QG2nKQiWyW4qxC7QVPY76ytAmxkJ5EWfUs,46407
|
|
36
|
+
orca_sdk/regression_model.py,sha256=KHDVUZfnY5joRsO4HFg62LPeISH9j_cjyWW1SouuPHU,33971
|
|
37
|
+
orca_sdk/regression_model_test.py,sha256=SIVWS8gSnmolVLEdJ4k6AYCV1bY4Hcjej43Ynw-TDzE,27398
|
|
38
|
+
orca_sdk/telemetry.py,sha256=e_FiN3JFkQV62CKygd78BVQwfQwdClAZvV-XvLDEIGI,27828
|
|
39
|
+
orca_sdk/telemetry_test.py,sha256=eT66C5lFdNg-pQdo2I__BP7Tn5fTc9aTkVo9ZhWwhU0,5519
|
|
40
|
+
orca_sdk-0.1.11.dist-info/METADATA,sha256=54iBK4DRJ-rBkhYTnxTrKlnWQhMd7t9dXCY-iQ_XER8,3638
|
|
41
|
+
orca_sdk-0.1.11.dist-info/WHEEL,sha256=zp0Cn7JsFoX2ATtOhtaFYIiE2rmFAD4OcMhtUki8W3U,88
|
|
42
|
+
orca_sdk-0.1.11.dist-info/RECORD,,
|
orca_sdk-0.1.10.dist-info/RECORD
DELETED
|
@@ -1,41 +0,0 @@
|
|
|
1
|
-
orca_sdk/__init__.py,sha256=xyjNwkLQXaX8A-UYgGwYDjv2btOXArT_yiMTfmW7KA8,1003
|
|
2
|
-
orca_sdk/_shared/__init__.py,sha256=3Kt0Hu3QLI5FEp9nqGTxqAm3hAoBJKcagfaGQZ-lbJQ,223
|
|
3
|
-
orca_sdk/_shared/metrics.py,sha256=faeL1B1ftmns1ikfKrIlU3xOn6j0iAGLNUupxvAFza8,24968
|
|
4
|
-
orca_sdk/_shared/metrics_test.py,sha256=vDIXoj8EuuLcdPJz_7EiVPgQ-FXiVT81JG30jxsg9HM,20752
|
|
5
|
-
orca_sdk/_utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
6
|
-
orca_sdk/_utils/analysis_ui.py,sha256=R0xc4RyJKyBHJEEF_ztI4Dm5w8Y1uF0Wpkn4LQgXqBE,9258
|
|
7
|
-
orca_sdk/_utils/analysis_ui_style.css,sha256=q_ba_-_KtgztepHg829zLzypaxKayl7ySC1-oYDzV3k,836
|
|
8
|
-
orca_sdk/_utils/auth.py,sha256=nC252O171_3_wn4KBAN7kg8GNvoZFiQ5Xtzkrm5dWDo,2645
|
|
9
|
-
orca_sdk/_utils/auth_test.py,sha256=ygVWv1Ex53LaxIP7p2hzPHl8l9qYyBD5IGmEFJMps6s,1056
|
|
10
|
-
orca_sdk/_utils/common.py,sha256=wUm2pNDWytEecC5WiDWd02-yCZw3Akx0bIutG4lHsFA,805
|
|
11
|
-
orca_sdk/_utils/data_parsing.py,sha256=5vaTpvUOS-ldlcgnSARYw7s9mce-imzkU7kA48-pdIM,5396
|
|
12
|
-
orca_sdk/_utils/data_parsing_test.py,sha256=u7BEjxtsU9gMs3tAZI0lJ--vOLlwKwH3hemdCedzxA0,8826
|
|
13
|
-
orca_sdk/_utils/pagination.py,sha256=986z0QPZixrZeurJWorF6eMgnTRdDF84AagEA6qNbMw,4245
|
|
14
|
-
orca_sdk/_utils/pagination_test.py,sha256=BUylCrcHnwoKEBmMUzVr0lwLpA35ivcCwdBK4rMw9y8,4887
|
|
15
|
-
orca_sdk/_utils/prediction_result_ui.css,sha256=sqBlkRLnovb5X5EcUDdB6iGpH63nVRlTW4uAmXuD0WM,258
|
|
16
|
-
orca_sdk/_utils/prediction_result_ui.py,sha256=Ur_FY7dz3oWNmtPiP3Wl3yRlEMgK8q9UfT-SDu9UPxA,4805
|
|
17
|
-
orca_sdk/_utils/tqdm_file_reader.py,sha256=Lw7Cg1UgNuRUoN6jjqZb-IlV00H-kbRcrZLdudr1GxE,324
|
|
18
|
-
orca_sdk/_utils/value_parser.py,sha256=c3qMABCCDQcIjn9N1orYYnlRwDW9JWdGwW_2TDZPLdI,1286
|
|
19
|
-
orca_sdk/_utils/value_parser_test.py,sha256=OybsiC-Obi32RRi9NIuwrVBRAnlyPMV1xVAaevSrb7M,1079
|
|
20
|
-
orca_sdk/async_client.py,sha256=PM7N-ggmtucfcUF1vQGtTZOCJpSNTOgd7l3LDNF5kP4,137192
|
|
21
|
-
orca_sdk/classification_model.py,sha256=C58euWnNvwXnthR9RtVVCOcgPEbxCjjp3sHMb86V6YA,50197
|
|
22
|
-
orca_sdk/classification_model_test.py,sha256=ElqxtR6gNwwk8dNXwfwAhpT7l0ZIP3H4pHmOyFXyTWk,37370
|
|
23
|
-
orca_sdk/client.py,sha256=SKZv3zGG6OwLe_FlB5wL2cxltOLPCcHvoo2CbMwyKgA,136241
|
|
24
|
-
orca_sdk/conftest.py,sha256=0O1VY-SPKNAvi9fBLdY1RMnYVgZvMjP92y99bNAqqiw,12461
|
|
25
|
-
orca_sdk/credentials.py,sha256=2SwC3tq5akP-F_u2s4xMZDp8mlsKMUT1T5T9Z99-eSY,6588
|
|
26
|
-
orca_sdk/credentials_test.py,sha256=TLbXJMz3IlThvtSrHeLM7jRsKnrncA_ahOTpHg15Ei4,4089
|
|
27
|
-
orca_sdk/datasource.py,sha256=Qn5QloE84UXeyPk2wcy1lWe5wmh1iDBS044eWnxck_E,22371
|
|
28
|
-
orca_sdk/datasource_test.py,sha256=sCk3IcQJbDut5oN4Wf7PXhTxyMwalxMuCXJekSxy9wk,16665
|
|
29
|
-
orca_sdk/embedding_model.py,sha256=vLGnlO9I-cN1lklNBl_LxZ8m9oK3vkegFOpvYYw8u8g,28038
|
|
30
|
-
orca_sdk/embedding_model_test.py,sha256=Lc6fZ0ifT0hh6ldkUfjwMPcP6OgN0Umlzu8XDLs7UO4,8144
|
|
31
|
-
orca_sdk/job.py,sha256=wHwVt-s7i-v8udhLGybB-90Kp4dwOLrY806bE4Tam5Q,13092
|
|
32
|
-
orca_sdk/job_test.py,sha256=nRSWxd_1UIfrj9oMVvrXjt6OBkBpddYAjb2y6P-DTUg,4327
|
|
33
|
-
orca_sdk/memoryset.py,sha256=06v34fHabpkEaOv9VCKc0NhpMi_mNZGbQP_9GiW_nuE,157157
|
|
34
|
-
orca_sdk/memoryset_test.py,sha256=O2o42XETtffXtZy0kbLk2b8cUDXU-w2ZAzXLi5-vDPQ,51278
|
|
35
|
-
orca_sdk/regression_model.py,sha256=AXRzJG15sDJQSiDCDfRdcLnZDNkJWORYjhHqKyyL-Fc,33960
|
|
36
|
-
orca_sdk/regression_model_test.py,sha256=90EyrhaMk1kTf87RFkMNz1PTItmeUISs6AvHmyp08DU,25447
|
|
37
|
-
orca_sdk/telemetry.py,sha256=ZyCMiyyo_SchjadWZH55TlLrC4Ucq5S316NbW26LL4Y,27834
|
|
38
|
-
orca_sdk/telemetry_test.py,sha256=eT66C5lFdNg-pQdo2I__BP7Tn5fTc9aTkVo9ZhWwhU0,5519
|
|
39
|
-
orca_sdk-0.1.10.dist-info/METADATA,sha256=j_TIalbL8oztP39lnXjyAI6Aosvb6rnJKUc3gcuxD0k,3710
|
|
40
|
-
orca_sdk-0.1.10.dist-info/WHEEL,sha256=zp0Cn7JsFoX2ATtOhtaFYIiE2rmFAD4OcMhtUki8W3U,88
|
|
41
|
-
orca_sdk-0.1.10.dist-info/RECORD,,
|
|
File without changes
|