orca-sdk 0.1.3__py3-none-any.whl → 0.1.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- orca_sdk/async_client.py +448 -301
- orca_sdk/classification_model.py +53 -17
- orca_sdk/client.py +448 -301
- orca_sdk/datasource.py +45 -2
- orca_sdk/datasource_test.py +120 -0
- orca_sdk/embedding_model.py +32 -24
- orca_sdk/job.py +17 -17
- orca_sdk/memoryset.py +318 -30
- orca_sdk/memoryset_test.py +185 -1
- orca_sdk/regression_model.py +38 -4
- orca_sdk/telemetry.py +52 -13
- {orca_sdk-0.1.3.dist-info → orca_sdk-0.1.4.dist-info}/METADATA +1 -1
- {orca_sdk-0.1.3.dist-info → orca_sdk-0.1.4.dist-info}/RECORD +14 -14
- {orca_sdk-0.1.3.dist-info → orca_sdk-0.1.4.dist-info}/WHEEL +0 -0
orca_sdk/async_client.py
CHANGED
|
@@ -135,6 +135,8 @@ class ClassificationEvaluationRequest(TypedDict):
|
|
|
135
135
|
datasource_value_column: str
|
|
136
136
|
record_telemetry: NotRequired[bool]
|
|
137
137
|
telemetry_tags: NotRequired[list[str] | None]
|
|
138
|
+
subsample: NotRequired[int | float | None]
|
|
139
|
+
ignore_unlabeled: NotRequired[bool]
|
|
138
140
|
|
|
139
141
|
|
|
140
142
|
class CleanupResponse(TypedDict):
|
|
@@ -163,6 +165,7 @@ class CountPredictionsRequest(TypedDict):
|
|
|
163
165
|
prediction_ids: NotRequired[list[str] | None]
|
|
164
166
|
start_timestamp: NotRequired[str | None]
|
|
165
167
|
end_timestamp: NotRequired[str | None]
|
|
168
|
+
memory_id: NotRequired[str | None]
|
|
166
169
|
|
|
167
170
|
|
|
168
171
|
class CreateApiKeyRequest(TypedDict):
|
|
@@ -193,6 +196,12 @@ class CreateOrgPlanRequest(TypedDict):
|
|
|
193
196
|
tier: Literal["FREE", "PRO", "ENTERPRISE", "CANCELLED"]
|
|
194
197
|
|
|
195
198
|
|
|
199
|
+
class DatasetFilterItem(TypedDict):
|
|
200
|
+
field: str
|
|
201
|
+
op: Literal["==", "!=", ">", ">=", "<", "<=", "in", "not in", "like"]
|
|
202
|
+
value: Any
|
|
203
|
+
|
|
204
|
+
|
|
196
205
|
class DeleteMemoriesRequest(TypedDict):
|
|
197
206
|
memory_ids: list[str]
|
|
198
207
|
|
|
@@ -210,7 +219,7 @@ class EmbedRequest(TypedDict):
|
|
|
210
219
|
class EmbeddingEvaluationRequest(TypedDict):
|
|
211
220
|
datasource_name_or_id: str
|
|
212
221
|
eval_datasource_name_or_id: NotRequired[str | None]
|
|
213
|
-
subsample: NotRequired[int | None]
|
|
222
|
+
subsample: NotRequired[int | float | None]
|
|
214
223
|
datasource_value_column: NotRequired[str]
|
|
215
224
|
datasource_label_column: NotRequired[str | None]
|
|
216
225
|
datasource_score_column: NotRequired[str | None]
|
|
@@ -219,7 +228,7 @@ class EmbeddingEvaluationRequest(TypedDict):
|
|
|
219
228
|
weigh_memories: NotRequired[bool]
|
|
220
229
|
|
|
221
230
|
|
|
222
|
-
EmbeddingFinetuningMethod = Literal["classification", "batch_triplet_loss"]
|
|
231
|
+
EmbeddingFinetuningMethod = Literal["classification", "regression", "batch_triplet_loss"]
|
|
223
232
|
|
|
224
233
|
|
|
225
234
|
class FeedbackMetrics(TypedDict):
|
|
@@ -233,7 +242,19 @@ FeedbackType = Literal["CONTINUOUS", "BINARY"]
|
|
|
233
242
|
class FilterItem(TypedDict):
|
|
234
243
|
field: list
|
|
235
244
|
op: Literal["==", "!=", ">", ">=", "<", "<=", "in", "not in", "like"]
|
|
236
|
-
value: str | int | float | bool | list[str] | list[int] | list[float] | list[bool] | None
|
|
245
|
+
value: str | int | float | bool | list[str | None] | list[int] | list[float] | list[bool] | None
|
|
246
|
+
|
|
247
|
+
|
|
248
|
+
class GetDatasourceRowCountRequest(TypedDict):
|
|
249
|
+
filters: NotRequired[list[DatasetFilterItem]]
|
|
250
|
+
|
|
251
|
+
|
|
252
|
+
class GetDatasourceRowsRequest(TypedDict):
|
|
253
|
+
filters: NotRequired[list[DatasetFilterItem]]
|
|
254
|
+
limit: NotRequired[int]
|
|
255
|
+
offset: NotRequired[int]
|
|
256
|
+
shuffle: NotRequired[bool]
|
|
257
|
+
shuffle_seed: NotRequired[int | None]
|
|
237
258
|
|
|
238
259
|
|
|
239
260
|
class GetMemoriesRequest(TypedDict):
|
|
@@ -254,6 +275,18 @@ class InternalServerErrorResponse(TypedDict):
|
|
|
254
275
|
request_id: str
|
|
255
276
|
|
|
256
277
|
|
|
278
|
+
JobStatus = Literal["INITIALIZED", "DISPATCHED", "WAITING", "PROCESSING", "COMPLETED", "FAILED", "ABORTING", "ABORTED"]
|
|
279
|
+
|
|
280
|
+
|
|
281
|
+
class JobStatusInfo(TypedDict):
|
|
282
|
+
status: JobStatus
|
|
283
|
+
steps_total: int | None
|
|
284
|
+
steps_completed: int | None
|
|
285
|
+
exception: str | None
|
|
286
|
+
updated_at: str
|
|
287
|
+
created_at: str
|
|
288
|
+
|
|
289
|
+
|
|
257
290
|
class LabelClassMetrics(TypedDict):
|
|
258
291
|
label: int | None
|
|
259
292
|
label_name: NotRequired[str | None]
|
|
@@ -276,6 +309,7 @@ class LabeledMemoryInsert(TypedDict):
|
|
|
276
309
|
value: str | bytes
|
|
277
310
|
metadata: NotRequired[dict[str, str | int | float | bool | None]]
|
|
278
311
|
source_id: NotRequired[str | None]
|
|
312
|
+
partition_id: NotRequired[str | None]
|
|
279
313
|
label: int | None
|
|
280
314
|
|
|
281
315
|
|
|
@@ -340,8 +374,6 @@ class MemorysetClassPatternsMetrics(TypedDict):
|
|
|
340
374
|
class MemorysetClusterAnalysisConfig(TypedDict):
|
|
341
375
|
min_cluster_size: NotRequired[int | None]
|
|
342
376
|
max_cluster_size: NotRequired[int | None]
|
|
343
|
-
clustering_method: NotRequired[Literal["density", "graph"]]
|
|
344
|
-
min_cluster_distance: NotRequired[float]
|
|
345
377
|
partitioning_method: NotRequired[Literal["ng", "rb", "cpm"]]
|
|
346
378
|
resolution: NotRequired[float | None]
|
|
347
379
|
num_iterations: NotRequired[int]
|
|
@@ -370,6 +402,7 @@ class MemorysetConceptAnalysisConfig(TypedDict):
|
|
|
370
402
|
use_generative_naming: NotRequired[bool]
|
|
371
403
|
naming_examples_count: NotRequired[int]
|
|
372
404
|
naming_counterexample_count: NotRequired[int]
|
|
405
|
+
primary_label_pct_threshold: NotRequired[float]
|
|
373
406
|
seed: NotRequired[int]
|
|
374
407
|
|
|
375
408
|
|
|
@@ -439,7 +472,7 @@ class NotFoundErrorResponse(TypedDict):
|
|
|
439
472
|
"memory",
|
|
440
473
|
"evaluation",
|
|
441
474
|
"analysis",
|
|
442
|
-
"
|
|
475
|
+
"job",
|
|
443
476
|
"pretrained_embedding_model",
|
|
444
477
|
"finetuned_embedding_model",
|
|
445
478
|
"feedback_category",
|
|
@@ -553,6 +586,8 @@ class RegressionEvaluationRequest(TypedDict):
|
|
|
553
586
|
datasource_value_column: str
|
|
554
587
|
record_telemetry: NotRequired[bool]
|
|
555
588
|
telemetry_tags: NotRequired[list[str] | None]
|
|
589
|
+
subsample: NotRequired[int | float | None]
|
|
590
|
+
ignore_unlabeled: NotRequired[bool]
|
|
556
591
|
|
|
557
592
|
|
|
558
593
|
class RegressionMetrics(TypedDict):
|
|
@@ -595,12 +630,14 @@ class RegressionPredictionRequest(TypedDict):
|
|
|
595
630
|
prompt: NotRequired[str | None]
|
|
596
631
|
use_lookup_cache: NotRequired[bool]
|
|
597
632
|
consistency_level: NotRequired[Literal["Bounded", "Session", "Strong", "Eventual"] | None]
|
|
633
|
+
ignore_unlabeled: NotRequired[bool]
|
|
598
634
|
|
|
599
635
|
|
|
600
636
|
class ScorePredictionMemoryLookup(TypedDict):
|
|
601
637
|
value: str | bytes
|
|
602
638
|
embedding: list[float]
|
|
603
639
|
source_id: str | None
|
|
640
|
+
partition_id: str | None
|
|
604
641
|
metadata: dict[str, str | int | float | bool | None]
|
|
605
642
|
memory_id: str
|
|
606
643
|
memory_version: int
|
|
@@ -638,6 +675,7 @@ class ScoredMemory(TypedDict):
|
|
|
638
675
|
value: str | bytes
|
|
639
676
|
embedding: list[float]
|
|
640
677
|
source_id: str | None
|
|
678
|
+
partition_id: str | None
|
|
641
679
|
metadata: dict[str, str | int | float | bool | None]
|
|
642
680
|
memory_id: str
|
|
643
681
|
memory_version: int
|
|
@@ -653,6 +691,7 @@ class ScoredMemoryInsert(TypedDict):
|
|
|
653
691
|
value: str | bytes
|
|
654
692
|
metadata: NotRequired[dict[str, str | int | float | bool | None]]
|
|
655
693
|
source_id: NotRequired[str | None]
|
|
694
|
+
partition_id: NotRequired[str | None]
|
|
656
695
|
score: float | None
|
|
657
696
|
|
|
658
697
|
|
|
@@ -660,6 +699,7 @@ class ScoredMemoryLookup(TypedDict):
|
|
|
660
699
|
value: str | bytes
|
|
661
700
|
embedding: list[float]
|
|
662
701
|
source_id: str | None
|
|
702
|
+
partition_id: str | None
|
|
663
703
|
metadata: dict[str, str | int | float | bool | None]
|
|
664
704
|
memory_id: str
|
|
665
705
|
memory_version: int
|
|
@@ -676,6 +716,7 @@ class ScoredMemoryUpdate(TypedDict):
|
|
|
676
716
|
value: NotRequired[str | bytes]
|
|
677
717
|
metadata: NotRequired[dict[str, str | int | float | bool | None] | None]
|
|
678
718
|
source_id: NotRequired[str | None]
|
|
719
|
+
partition_id: NotRequired[str | None]
|
|
679
720
|
metrics: NotRequired[MemoryMetrics | None]
|
|
680
721
|
score: NotRequired[float | None]
|
|
681
722
|
|
|
@@ -684,6 +725,7 @@ class ScoredMemoryWithFeedbackMetrics(TypedDict):
|
|
|
684
725
|
value: str | bytes
|
|
685
726
|
embedding: list[float]
|
|
686
727
|
source_id: str | None
|
|
728
|
+
partition_id: str | None
|
|
687
729
|
metadata: dict[str, str | int | float | bool | None]
|
|
688
730
|
memory_id: str
|
|
689
731
|
memory_version: int
|
|
@@ -709,18 +751,6 @@ class SubConceptMetrics(TypedDict):
|
|
|
709
751
|
memory_count: int
|
|
710
752
|
|
|
711
753
|
|
|
712
|
-
TaskStatus = Literal["INITIALIZED", "DISPATCHED", "WAITING", "PROCESSING", "COMPLETED", "FAILED", "ABORTING", "ABORTED"]
|
|
713
|
-
|
|
714
|
-
|
|
715
|
-
class TaskStatusInfo(TypedDict):
|
|
716
|
-
status: TaskStatus
|
|
717
|
-
steps_total: int | None
|
|
718
|
-
steps_completed: int | None
|
|
719
|
-
exception: str | None
|
|
720
|
-
updated_at: str
|
|
721
|
-
created_at: str
|
|
722
|
-
|
|
723
|
-
|
|
724
754
|
TelemetryField = list
|
|
725
755
|
|
|
726
756
|
|
|
@@ -793,6 +823,10 @@ class DeleteMemorysetByNameOrIdParams(TypedDict):
|
|
|
793
823
|
name_or_id: str
|
|
794
824
|
|
|
795
825
|
|
|
826
|
+
class PostGpuMemorysetByNameOrIdLookupParams(TypedDict):
|
|
827
|
+
name_or_id: str
|
|
828
|
+
|
|
829
|
+
|
|
796
830
|
class GetMemorysetByNameOrIdMemoryByMemoryIdParams(TypedDict):
|
|
797
831
|
name_or_id: str
|
|
798
832
|
memory_id: str
|
|
@@ -825,20 +859,35 @@ class PostMemorysetByNameOrIdMemoriesDeleteParams(TypedDict):
|
|
|
825
859
|
name_or_id: str
|
|
826
860
|
|
|
827
861
|
|
|
862
|
+
class PatchGpuMemorysetByNameOrIdMemoryParams(TypedDict):
|
|
863
|
+
name_or_id: str
|
|
864
|
+
|
|
865
|
+
|
|
866
|
+
class PostGpuMemorysetByNameOrIdMemoryParams(TypedDict):
|
|
867
|
+
name_or_id: str
|
|
868
|
+
|
|
869
|
+
|
|
870
|
+
PostGpuMemorysetByNameOrIdMemoryRequest = list[LabeledMemoryInsert] | list[ScoredMemoryInsert]
|
|
871
|
+
|
|
872
|
+
|
|
873
|
+
class PatchGpuMemorysetByNameOrIdMemoriesParams(TypedDict):
|
|
874
|
+
name_or_id: str
|
|
875
|
+
|
|
876
|
+
|
|
828
877
|
class PostMemorysetByNameOrIdAnalysisParams(TypedDict):
|
|
829
878
|
name_or_id: str
|
|
830
879
|
|
|
831
880
|
|
|
832
881
|
class GetMemorysetByNameOrIdAnalysisParams(TypedDict):
|
|
833
882
|
name_or_id: str
|
|
834
|
-
status: NotRequired[
|
|
883
|
+
status: NotRequired[JobStatus | None]
|
|
835
884
|
limit: NotRequired[int | None]
|
|
836
885
|
offset: NotRequired[int | None]
|
|
837
886
|
|
|
838
887
|
|
|
839
|
-
class
|
|
888
|
+
class GetMemorysetByNameOrIdAnalysisByAnalysisJobIdParams(TypedDict):
|
|
840
889
|
name_or_id: str
|
|
841
|
-
|
|
890
|
+
analysis_job_id: str
|
|
842
891
|
|
|
843
892
|
|
|
844
893
|
class PostMemorysetByNameOrIdMemoryByMemoryIdCascadingEditsParams(TypedDict):
|
|
@@ -854,34 +903,42 @@ class DeleteFinetunedEmbeddingModelByNameOrIdParams(TypedDict):
|
|
|
854
903
|
name_or_id: str
|
|
855
904
|
|
|
856
905
|
|
|
857
|
-
class
|
|
906
|
+
class PostGpuFinetunedEmbeddingModelByNameOrIdEmbeddingParams(TypedDict):
|
|
858
907
|
name_or_id: str
|
|
859
908
|
|
|
860
909
|
|
|
861
|
-
class
|
|
862
|
-
|
|
863
|
-
task_id: str
|
|
910
|
+
class GetPretrainedEmbeddingModelByModelNameParams(TypedDict):
|
|
911
|
+
model_name: PretrainedEmbeddingModelName
|
|
864
912
|
|
|
865
913
|
|
|
866
|
-
class
|
|
867
|
-
|
|
868
|
-
datasource: NotRequired[str | None]
|
|
869
|
-
value_column: NotRequired[str | None]
|
|
870
|
-
label_column: NotRequired[str | None]
|
|
871
|
-
score_column: NotRequired[str | None]
|
|
914
|
+
class PostGpuPretrainedEmbeddingModelByModelNameEmbeddingParams(TypedDict):
|
|
915
|
+
model_name: PretrainedEmbeddingModelName
|
|
872
916
|
|
|
873
917
|
|
|
874
|
-
class
|
|
875
|
-
|
|
918
|
+
class PostFinetunedEmbeddingModelByNameOrIdEvaluationParams(TypedDict):
|
|
919
|
+
name_or_id: str
|
|
876
920
|
|
|
877
921
|
|
|
878
922
|
class PostPretrainedEmbeddingModelByModelNameEvaluationParams(TypedDict):
|
|
879
923
|
model_name: PretrainedEmbeddingModelName
|
|
880
924
|
|
|
881
925
|
|
|
882
|
-
class
|
|
926
|
+
class GetFinetunedEmbeddingModelByNameOrIdEvaluationByJobIdParams(TypedDict):
|
|
927
|
+
name_or_id: str
|
|
928
|
+
job_id: str
|
|
929
|
+
|
|
930
|
+
|
|
931
|
+
class GetPretrainedEmbeddingModelByModelNameEvaluationByJobIdParams(TypedDict):
|
|
883
932
|
model_name: PretrainedEmbeddingModelName
|
|
884
|
-
|
|
933
|
+
job_id: str
|
|
934
|
+
|
|
935
|
+
|
|
936
|
+
class GetFinetunedEmbeddingModelByNameOrIdEvaluationsParams(TypedDict):
|
|
937
|
+
name_or_id: str
|
|
938
|
+
datasource: NotRequired[str | None]
|
|
939
|
+
value_column: NotRequired[str | None]
|
|
940
|
+
label_column: NotRequired[str | None]
|
|
941
|
+
score_column: NotRequired[str | None]
|
|
885
942
|
|
|
886
943
|
|
|
887
944
|
class GetPretrainedEmbeddingModelByModelNameEvaluationsParams(TypedDict):
|
|
@@ -911,6 +968,14 @@ class DeleteDatasourceByNameOrIdParams(TypedDict):
|
|
|
911
968
|
name_or_id: str
|
|
912
969
|
|
|
913
970
|
|
|
971
|
+
class PostDatasourceByNameOrIdRowsParams(TypedDict):
|
|
972
|
+
name_or_id: str
|
|
973
|
+
|
|
974
|
+
|
|
975
|
+
class PostDatasourceByNameOrIdRowsCountParams(TypedDict):
|
|
976
|
+
name_or_id: str
|
|
977
|
+
|
|
978
|
+
|
|
914
979
|
class GetDatasourceByNameOrIdEmbeddingModelEvaluationsParams(TypedDict):
|
|
915
980
|
name_or_id: str
|
|
916
981
|
value_column: NotRequired[str | None]
|
|
@@ -941,36 +1006,42 @@ class DeleteClassificationModelByNameOrIdParams(TypedDict):
|
|
|
941
1006
|
name_or_id: str
|
|
942
1007
|
|
|
943
1008
|
|
|
944
|
-
class
|
|
945
|
-
|
|
1009
|
+
class PatchRegressionModelByNameOrIdParams(TypedDict):
|
|
1010
|
+
name_or_id: str
|
|
946
1011
|
|
|
947
1012
|
|
|
948
|
-
class
|
|
949
|
-
|
|
1013
|
+
class GetRegressionModelByNameOrIdParams(TypedDict):
|
|
1014
|
+
name_or_id: str
|
|
950
1015
|
|
|
951
1016
|
|
|
952
|
-
class
|
|
953
|
-
|
|
954
|
-
task_id: str
|
|
1017
|
+
class DeleteRegressionModelByNameOrIdParams(TypedDict):
|
|
1018
|
+
name_or_id: str
|
|
955
1019
|
|
|
956
1020
|
|
|
957
|
-
class
|
|
958
|
-
|
|
959
|
-
task_id: str
|
|
1021
|
+
class PostGpuClassificationModelByNameOrIdPredictionParams(TypedDict):
|
|
1022
|
+
name_or_id: str
|
|
960
1023
|
|
|
961
1024
|
|
|
962
|
-
class
|
|
1025
|
+
class PostClassificationModelByNameOrIdPredictionParams(TypedDict):
|
|
963
1026
|
name_or_id: str
|
|
964
1027
|
|
|
965
1028
|
|
|
966
|
-
class
|
|
1029
|
+
class PostGpuRegressionModelByNameOrIdPredictionParams(TypedDict):
|
|
967
1030
|
name_or_id: str
|
|
968
1031
|
|
|
969
1032
|
|
|
970
|
-
class
|
|
1033
|
+
class PostRegressionModelByNameOrIdPredictionParams(TypedDict):
|
|
971
1034
|
name_or_id: str
|
|
972
1035
|
|
|
973
1036
|
|
|
1037
|
+
class PostClassificationModelByModelNameOrIdEvaluationParams(TypedDict):
|
|
1038
|
+
model_name_or_id: str
|
|
1039
|
+
|
|
1040
|
+
|
|
1041
|
+
class GetClassificationModelByModelNameOrIdEvaluationParams(TypedDict):
|
|
1042
|
+
model_name_or_id: str
|
|
1043
|
+
|
|
1044
|
+
|
|
974
1045
|
class PostRegressionModelByModelNameOrIdEvaluationParams(TypedDict):
|
|
975
1046
|
model_name_or_id: str
|
|
976
1047
|
|
|
@@ -979,26 +1050,36 @@ class GetRegressionModelByModelNameOrIdEvaluationParams(TypedDict):
|
|
|
979
1050
|
model_name_or_id: str
|
|
980
1051
|
|
|
981
1052
|
|
|
982
|
-
class
|
|
1053
|
+
class GetClassificationModelByModelNameOrIdEvaluationByJobIdParams(TypedDict):
|
|
983
1054
|
model_name_or_id: str
|
|
984
|
-
|
|
1055
|
+
job_id: str
|
|
985
1056
|
|
|
986
1057
|
|
|
987
|
-
class
|
|
1058
|
+
class DeleteClassificationModelByModelNameOrIdEvaluationByJobIdParams(TypedDict):
|
|
988
1059
|
model_name_or_id: str
|
|
989
|
-
|
|
1060
|
+
job_id: str
|
|
990
1061
|
|
|
991
1062
|
|
|
992
|
-
class
|
|
993
|
-
|
|
1063
|
+
class GetRegressionModelByModelNameOrIdEvaluationByJobIdParams(TypedDict):
|
|
1064
|
+
model_name_or_id: str
|
|
1065
|
+
job_id: str
|
|
994
1066
|
|
|
995
1067
|
|
|
996
|
-
class
|
|
997
|
-
|
|
1068
|
+
class DeleteRegressionModelByModelNameOrIdEvaluationByJobIdParams(TypedDict):
|
|
1069
|
+
model_name_or_id: str
|
|
1070
|
+
job_id: str
|
|
1071
|
+
|
|
1072
|
+
|
|
1073
|
+
class GetJobByJobIdParams(TypedDict):
|
|
1074
|
+
job_id: str
|
|
1075
|
+
|
|
998
1076
|
|
|
1077
|
+
class GetJobByJobIdStatusParams(TypedDict):
|
|
1078
|
+
job_id: str
|
|
999
1079
|
|
|
1000
|
-
|
|
1001
|
-
|
|
1080
|
+
|
|
1081
|
+
class GetJobParams(TypedDict):
|
|
1082
|
+
status: NotRequired[JobStatus | list[JobStatus] | None]
|
|
1002
1083
|
type: NotRequired[str | list[str] | None]
|
|
1003
1084
|
limit: NotRequired[int | None]
|
|
1004
1085
|
offset: NotRequired[int]
|
|
@@ -1006,8 +1087,8 @@ class GetTaskParams(TypedDict):
|
|
|
1006
1087
|
end_timestamp: NotRequired[str | None]
|
|
1007
1088
|
|
|
1008
1089
|
|
|
1009
|
-
class
|
|
1010
|
-
|
|
1090
|
+
class DeleteJobByJobIdAbortParams(TypedDict):
|
|
1091
|
+
job_id: str
|
|
1011
1092
|
|
|
1012
1093
|
|
|
1013
1094
|
class GetWorkerParams(TypedDict):
|
|
@@ -1063,43 +1144,8 @@ class DeleteTelemetryFeedbackCategoryByNameOrIdParams(TypedDict):
|
|
|
1063
1144
|
PutTelemetryPredictionFeedbackRequest = list[PredictionFeedbackRequest]
|
|
1064
1145
|
|
|
1065
1146
|
|
|
1066
|
-
class
|
|
1067
|
-
|
|
1068
|
-
|
|
1069
|
-
|
|
1070
|
-
class PostGpuMemorysetByNameOrIdLookupParams(TypedDict):
|
|
1071
|
-
name_or_id: str
|
|
1072
|
-
|
|
1073
|
-
|
|
1074
|
-
class PatchGpuMemorysetByNameOrIdMemoryParams(TypedDict):
|
|
1075
|
-
name_or_id: str
|
|
1076
|
-
|
|
1077
|
-
|
|
1078
|
-
class PostGpuMemorysetByNameOrIdMemoryParams(TypedDict):
|
|
1079
|
-
name_or_id: str
|
|
1080
|
-
|
|
1081
|
-
|
|
1082
|
-
PostGpuMemorysetByNameOrIdMemoryRequest = list[LabeledMemoryInsert] | list[ScoredMemoryInsert]
|
|
1083
|
-
|
|
1084
|
-
|
|
1085
|
-
class PatchGpuMemorysetByNameOrIdMemoriesParams(TypedDict):
|
|
1086
|
-
name_or_id: str
|
|
1087
|
-
|
|
1088
|
-
|
|
1089
|
-
class PostGpuClassificationModelByNameOrIdPredictionParams(TypedDict):
|
|
1090
|
-
name_or_id: str
|
|
1091
|
-
|
|
1092
|
-
|
|
1093
|
-
class PostGpuRegressionModelByNameOrIdPredictionParams(TypedDict):
|
|
1094
|
-
name_or_id: str
|
|
1095
|
-
|
|
1096
|
-
|
|
1097
|
-
class PostGpuFinetunedEmbeddingModelByNameOrIdEmbeddingParams(TypedDict):
|
|
1098
|
-
name_or_id: str
|
|
1099
|
-
|
|
1100
|
-
|
|
1101
|
-
class PostGpuPretrainedEmbeddingModelByModelNameEmbeddingParams(TypedDict):
|
|
1102
|
-
model_name: PretrainedEmbeddingModelName
|
|
1147
|
+
class GetAgentsBootstrapClassificationModelByJobIdParams(TypedDict):
|
|
1148
|
+
job_id: str
|
|
1103
1149
|
|
|
1104
1150
|
|
|
1105
1151
|
class FieldValidationError(TypedDict):
|
|
@@ -1171,6 +1217,7 @@ class ClassificationPredictionRequest(TypedDict):
|
|
|
1171
1217
|
prompt: NotRequired[str | None]
|
|
1172
1218
|
use_lookup_cache: NotRequired[bool]
|
|
1173
1219
|
consistency_level: NotRequired[Literal["Bounded", "Session", "Strong", "Eventual"] | None]
|
|
1220
|
+
ignore_unlabeled: NotRequired[bool]
|
|
1174
1221
|
|
|
1175
1222
|
|
|
1176
1223
|
class CloneMemorysetRequest(TypedDict):
|
|
@@ -1187,6 +1234,7 @@ class ColumnInfo(TypedDict):
|
|
|
1187
1234
|
name: str
|
|
1188
1235
|
type: ColumnType
|
|
1189
1236
|
enum_options: NotRequired[list[str] | None]
|
|
1237
|
+
string_values: NotRequired[list[str] | None]
|
|
1190
1238
|
int_values: NotRequired[list[int] | None]
|
|
1191
1239
|
contains_nones: NotRequired[bool]
|
|
1192
1240
|
|
|
@@ -1233,6 +1281,8 @@ class CreateMemorysetRequest(TypedDict):
|
|
|
1233
1281
|
prompt: NotRequired[str]
|
|
1234
1282
|
hidden: NotRequired[bool]
|
|
1235
1283
|
batch_size: NotRequired[int]
|
|
1284
|
+
subsample: NotRequired[int | float | None]
|
|
1285
|
+
memory_type: NotRequired[MemoryType]
|
|
1236
1286
|
|
|
1237
1287
|
|
|
1238
1288
|
class CreateRegressionModelRequest(TypedDict):
|
|
@@ -1257,48 +1307,52 @@ class DatasourceMetadata(TypedDict):
|
|
|
1257
1307
|
|
|
1258
1308
|
|
|
1259
1309
|
class EmbeddingEvaluationResponse(TypedDict):
|
|
1260
|
-
|
|
1310
|
+
job_id: str
|
|
1261
1311
|
org_id: str
|
|
1262
1312
|
finetuned_embedding_model_id: str | None
|
|
1263
1313
|
pretrained_embedding_model_name: PretrainedEmbeddingModelName | None
|
|
1264
1314
|
datasource_id: str
|
|
1265
|
-
subsample: int | None
|
|
1315
|
+
subsample: int | float | None
|
|
1266
1316
|
datasource_value_column: str
|
|
1267
1317
|
datasource_label_column: NotRequired[str | None]
|
|
1268
1318
|
datasource_score_column: NotRequired[str | None]
|
|
1269
1319
|
neighbor_count: int
|
|
1270
1320
|
weigh_memories: bool
|
|
1271
|
-
status:
|
|
1321
|
+
status: JobStatus
|
|
1272
1322
|
result: ClassificationMetrics | RegressionMetrics | None
|
|
1273
1323
|
created_at: str
|
|
1274
1324
|
updated_at: str
|
|
1325
|
+
task_id: str
|
|
1275
1326
|
|
|
1276
1327
|
|
|
1277
1328
|
class EvaluationResponse(TypedDict):
|
|
1278
|
-
|
|
1329
|
+
job_id: str
|
|
1279
1330
|
org_id: str
|
|
1280
|
-
status:
|
|
1331
|
+
status: JobStatus
|
|
1281
1332
|
result: ClassificationMetrics | RegressionMetrics | None
|
|
1282
1333
|
created_at: str
|
|
1283
1334
|
updated_at: str
|
|
1335
|
+
task_id: str
|
|
1284
1336
|
|
|
1285
1337
|
|
|
1286
1338
|
class EvaluationResponseClassificationMetrics(TypedDict):
|
|
1287
|
-
|
|
1339
|
+
job_id: str
|
|
1288
1340
|
org_id: str
|
|
1289
|
-
status:
|
|
1341
|
+
status: JobStatus
|
|
1290
1342
|
result: ClassificationMetrics | None
|
|
1291
1343
|
created_at: str
|
|
1292
1344
|
updated_at: str
|
|
1345
|
+
task_id: str
|
|
1293
1346
|
|
|
1294
1347
|
|
|
1295
1348
|
class EvaluationResponseRegressionMetrics(TypedDict):
|
|
1296
|
-
|
|
1349
|
+
job_id: str
|
|
1297
1350
|
org_id: str
|
|
1298
|
-
status:
|
|
1351
|
+
status: JobStatus
|
|
1299
1352
|
result: RegressionMetrics | None
|
|
1300
1353
|
created_at: str
|
|
1301
1354
|
updated_at: str
|
|
1355
|
+
task_id: str
|
|
1302
1356
|
|
|
1303
1357
|
|
|
1304
1358
|
class FinetuneEmbeddingModelRequest(TypedDict):
|
|
@@ -1307,7 +1361,8 @@ class FinetuneEmbeddingModelRequest(TypedDict):
|
|
|
1307
1361
|
train_memoryset_name_or_id: NotRequired[str | None]
|
|
1308
1362
|
train_datasource_name_or_id: NotRequired[str | None]
|
|
1309
1363
|
eval_datasource_name_or_id: NotRequired[str | None]
|
|
1310
|
-
label_column: NotRequired[str]
|
|
1364
|
+
label_column: NotRequired[str | None]
|
|
1365
|
+
score_column: NotRequired[str | None]
|
|
1311
1366
|
value_column: NotRequired[str]
|
|
1312
1367
|
training_method: NotRequired[EmbeddingFinetuningMethod]
|
|
1313
1368
|
training_args: NotRequired[dict[str, str | int | float | bool]]
|
|
@@ -1324,8 +1379,9 @@ class FinetunedEmbeddingModelMetadata(TypedDict):
|
|
|
1324
1379
|
created_at: str
|
|
1325
1380
|
updated_at: str
|
|
1326
1381
|
base_model: PretrainedEmbeddingModelName
|
|
1382
|
+
finetuning_job_id: str
|
|
1383
|
+
finetuning_status: JobStatus
|
|
1327
1384
|
finetuning_task_id: str
|
|
1328
|
-
finetuning_status: TaskStatus
|
|
1329
1385
|
|
|
1330
1386
|
|
|
1331
1387
|
class HTTPValidationError(TypedDict):
|
|
@@ -1337,10 +1393,28 @@ class InvalidInputErrorResponse(TypedDict):
|
|
|
1337
1393
|
validation_issues: list[FieldValidationError]
|
|
1338
1394
|
|
|
1339
1395
|
|
|
1396
|
+
class Job(TypedDict):
|
|
1397
|
+
status: JobStatus
|
|
1398
|
+
steps_total: int | None
|
|
1399
|
+
steps_completed: int | None
|
|
1400
|
+
exception: str | None
|
|
1401
|
+
updated_at: str
|
|
1402
|
+
created_at: str
|
|
1403
|
+
id: str
|
|
1404
|
+
org_id: str
|
|
1405
|
+
worker_id: str | None
|
|
1406
|
+
type: str
|
|
1407
|
+
payload: BaseModel
|
|
1408
|
+
result: BaseModel | None
|
|
1409
|
+
depends_on: NotRequired[list[str]]
|
|
1410
|
+
lease_token: str | None
|
|
1411
|
+
|
|
1412
|
+
|
|
1340
1413
|
class LabelPredictionMemoryLookup(TypedDict):
|
|
1341
1414
|
value: str | bytes
|
|
1342
1415
|
embedding: list[float]
|
|
1343
1416
|
source_id: str | None
|
|
1417
|
+
partition_id: str | None
|
|
1344
1418
|
metadata: dict[str, str | int | float | bool | None]
|
|
1345
1419
|
memory_id: str
|
|
1346
1420
|
memory_version: int
|
|
@@ -1382,6 +1456,7 @@ class LabeledMemory(TypedDict):
|
|
|
1382
1456
|
value: str | bytes
|
|
1383
1457
|
embedding: list[float]
|
|
1384
1458
|
source_id: str | None
|
|
1459
|
+
partition_id: str | None
|
|
1385
1460
|
metadata: dict[str, str | int | float | bool | None]
|
|
1386
1461
|
memory_id: str
|
|
1387
1462
|
memory_version: int
|
|
@@ -1397,6 +1472,7 @@ class LabeledMemoryLookup(TypedDict):
|
|
|
1397
1472
|
value: str | bytes
|
|
1398
1473
|
embedding: list[float]
|
|
1399
1474
|
source_id: str | None
|
|
1475
|
+
partition_id: str | None
|
|
1400
1476
|
metadata: dict[str, str | int | float | bool | None]
|
|
1401
1477
|
memory_id: str
|
|
1402
1478
|
memory_version: int
|
|
@@ -1414,6 +1490,7 @@ class LabeledMemoryUpdate(TypedDict):
|
|
|
1414
1490
|
value: NotRequired[str | bytes]
|
|
1415
1491
|
metadata: NotRequired[dict[str, str | int | float | bool | None] | None]
|
|
1416
1492
|
source_id: NotRequired[str | None]
|
|
1493
|
+
partition_id: NotRequired[str | None]
|
|
1417
1494
|
metrics: NotRequired[MemoryMetrics | None]
|
|
1418
1495
|
label: NotRequired[int | None]
|
|
1419
1496
|
|
|
@@ -1422,6 +1499,7 @@ class LabeledMemoryWithFeedbackMetrics(TypedDict):
|
|
|
1422
1499
|
value: str | bytes
|
|
1423
1500
|
embedding: list[float]
|
|
1424
1501
|
source_id: str | None
|
|
1502
|
+
partition_id: str | None
|
|
1425
1503
|
metadata: dict[str, str | int | float | bool | None]
|
|
1426
1504
|
memory_id: str
|
|
1427
1505
|
memory_version: int
|
|
@@ -1441,7 +1519,8 @@ class ListPredictionsRequest(TypedDict):
|
|
|
1441
1519
|
prediction_ids: NotRequired[list[str] | None]
|
|
1442
1520
|
start_timestamp: NotRequired[str | None]
|
|
1443
1521
|
end_timestamp: NotRequired[str | None]
|
|
1444
|
-
|
|
1522
|
+
memory_id: NotRequired[str | None]
|
|
1523
|
+
limit: NotRequired[int]
|
|
1445
1524
|
offset: NotRequired[int | None]
|
|
1446
1525
|
sort: NotRequired[PredictionSort]
|
|
1447
1526
|
expected_label_match: NotRequired[bool | None]
|
|
@@ -1480,6 +1559,13 @@ class MemorysetMetrics(TypedDict):
|
|
|
1480
1559
|
concepts: NotRequired[MemorysetConceptMetrics | None]
|
|
1481
1560
|
|
|
1482
1561
|
|
|
1562
|
+
class PaginatedJob(TypedDict):
|
|
1563
|
+
items: list[Job]
|
|
1564
|
+
total: int
|
|
1565
|
+
offset: int
|
|
1566
|
+
limit: int
|
|
1567
|
+
|
|
1568
|
+
|
|
1483
1569
|
class PaginatedUnionLabeledMemoryWithFeedbackMetricsScoredMemoryWithFeedbackMetrics(TypedDict):
|
|
1484
1570
|
items: list[LabeledMemoryWithFeedbackMetrics | ScoredMemoryWithFeedbackMetrics]
|
|
1485
1571
|
total: int
|
|
@@ -1497,23 +1583,6 @@ class PretrainedEmbeddingModelMetadata(TypedDict):
|
|
|
1497
1583
|
num_params: int
|
|
1498
1584
|
|
|
1499
1585
|
|
|
1500
|
-
class Task(TypedDict):
|
|
1501
|
-
status: TaskStatus
|
|
1502
|
-
steps_total: int | None
|
|
1503
|
-
steps_completed: int | None
|
|
1504
|
-
exception: str | None
|
|
1505
|
-
updated_at: str
|
|
1506
|
-
created_at: str
|
|
1507
|
-
id: str
|
|
1508
|
-
org_id: str
|
|
1509
|
-
worker_id: str | None
|
|
1510
|
-
type: str
|
|
1511
|
-
payload: BaseModel
|
|
1512
|
-
result: BaseModel | None
|
|
1513
|
-
depends_on: list[str]
|
|
1514
|
-
lease_token: str | None
|
|
1515
|
-
|
|
1516
|
-
|
|
1517
1586
|
class TelemetryMemoriesRequest(TypedDict):
|
|
1518
1587
|
memoryset_id: str
|
|
1519
1588
|
offset: NotRequired[int]
|
|
@@ -1545,10 +1614,10 @@ class CascadingEditSuggestion(TypedDict):
|
|
|
1545
1614
|
|
|
1546
1615
|
|
|
1547
1616
|
class MemorysetAnalysisResponse(TypedDict):
|
|
1548
|
-
|
|
1617
|
+
job_id: str
|
|
1549
1618
|
org_id: str
|
|
1550
1619
|
memoryset_id: str
|
|
1551
|
-
status:
|
|
1620
|
+
status: JobStatus
|
|
1552
1621
|
lookup_count: int
|
|
1553
1622
|
batch_size: int
|
|
1554
1623
|
clear_metrics: bool
|
|
@@ -1556,6 +1625,7 @@ class MemorysetAnalysisResponse(TypedDict):
|
|
|
1556
1625
|
results: MemorysetMetrics | None
|
|
1557
1626
|
created_at: str
|
|
1558
1627
|
updated_at: str
|
|
1628
|
+
task_id: str
|
|
1559
1629
|
|
|
1560
1630
|
|
|
1561
1631
|
class MemorysetMetadata(TypedDict):
|
|
@@ -1571,8 +1641,8 @@ class MemorysetMetadata(TypedDict):
|
|
|
1571
1641
|
created_at: str
|
|
1572
1642
|
updated_at: str
|
|
1573
1643
|
memories_updated_at: str
|
|
1574
|
-
|
|
1575
|
-
insertion_status:
|
|
1644
|
+
insertion_job_id: str
|
|
1645
|
+
insertion_status: JobStatus
|
|
1576
1646
|
metrics: MemorysetMetrics
|
|
1577
1647
|
memory_type: MemoryType
|
|
1578
1648
|
label_names: list[str] | None
|
|
@@ -1582,13 +1652,7 @@ class MemorysetMetadata(TypedDict):
|
|
|
1582
1652
|
document_prompt_override: str | None
|
|
1583
1653
|
query_prompt_override: str | None
|
|
1584
1654
|
hidden: bool
|
|
1585
|
-
|
|
1586
|
-
|
|
1587
|
-
class PaginatedTask(TypedDict):
|
|
1588
|
-
items: list[Task]
|
|
1589
|
-
total: int
|
|
1590
|
-
offset: int
|
|
1591
|
-
limit: int
|
|
1655
|
+
insertion_task_id: str
|
|
1592
1656
|
|
|
1593
1657
|
|
|
1594
1658
|
class PaginatedWorkerInfo(TypedDict):
|
|
@@ -1606,11 +1670,12 @@ class BootstrapClassificationModelMeta(TypedDict):
|
|
|
1606
1670
|
|
|
1607
1671
|
|
|
1608
1672
|
class BootstrapClassificationModelResponse(TypedDict):
|
|
1609
|
-
|
|
1673
|
+
job_id: str
|
|
1610
1674
|
org_id: str
|
|
1611
|
-
status:
|
|
1675
|
+
status: JobStatus
|
|
1612
1676
|
result: BootstrapClassificationModelMeta | None
|
|
1613
1677
|
input: BootstrapClassificationModelRequest | None
|
|
1678
|
+
task_id: str
|
|
1614
1679
|
|
|
1615
1680
|
|
|
1616
1681
|
class OrcaAsyncClient(AsyncClient):
|
|
@@ -1889,9 +1954,9 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
1889
1954
|
@overload
|
|
1890
1955
|
async def GET(
|
|
1891
1956
|
self,
|
|
1892
|
-
path: Literal["/memoryset/{name_or_id}/analysis/{
|
|
1957
|
+
path: Literal["/memoryset/{name_or_id}/analysis/{analysis_job_id}"],
|
|
1893
1958
|
*,
|
|
1894
|
-
params:
|
|
1959
|
+
params: GetMemorysetByNameOrIdAnalysisByAnalysisJobIdParams,
|
|
1895
1960
|
parse_as: Literal["json"] = "json",
|
|
1896
1961
|
headers: HeaderTypes | None = None,
|
|
1897
1962
|
cookies: CookieTypes | None = None,
|
|
@@ -1939,9 +2004,9 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
1939
2004
|
@overload
|
|
1940
2005
|
async def GET(
|
|
1941
2006
|
self,
|
|
1942
|
-
path: Literal["/
|
|
2007
|
+
path: Literal["/pretrained_embedding_model"],
|
|
1943
2008
|
*,
|
|
1944
|
-
params:
|
|
2009
|
+
params: None = None,
|
|
1945
2010
|
parse_as: Literal["json"] = "json",
|
|
1946
2011
|
headers: HeaderTypes | None = None,
|
|
1947
2012
|
cookies: CookieTypes | None = None,
|
|
@@ -1949,16 +2014,16 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
1949
2014
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
1950
2015
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
1951
2016
|
extensions: RequestExtensions | None = None,
|
|
1952
|
-
) ->
|
|
1953
|
-
"""
|
|
2017
|
+
) -> list[PretrainedEmbeddingModelMetadata]:
|
|
2018
|
+
"""List all available pretrained embedding models."""
|
|
1954
2019
|
pass
|
|
1955
2020
|
|
|
1956
2021
|
@overload
|
|
1957
2022
|
async def GET(
|
|
1958
2023
|
self,
|
|
1959
|
-
path: Literal["/
|
|
2024
|
+
path: Literal["/pretrained_embedding_model/{model_name}"],
|
|
1960
2025
|
*,
|
|
1961
|
-
params:
|
|
2026
|
+
params: GetPretrainedEmbeddingModelByModelNameParams,
|
|
1962
2027
|
parse_as: Literal["json"] = "json",
|
|
1963
2028
|
headers: HeaderTypes | None = None,
|
|
1964
2029
|
cookies: CookieTypes | None = None,
|
|
@@ -1966,16 +2031,16 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
1966
2031
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
1967
2032
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
1968
2033
|
extensions: RequestExtensions | None = None,
|
|
1969
|
-
) ->
|
|
1970
|
-
"""
|
|
2034
|
+
) -> PretrainedEmbeddingModelMetadata:
|
|
2035
|
+
"""Get metadata for a specific pretrained embedding model."""
|
|
1971
2036
|
pass
|
|
1972
2037
|
|
|
1973
2038
|
@overload
|
|
1974
2039
|
async def GET(
|
|
1975
2040
|
self,
|
|
1976
|
-
path: Literal["/
|
|
2041
|
+
path: Literal["/finetuned_embedding_model/{name_or_id}/evaluation/{job_id}"],
|
|
1977
2042
|
*,
|
|
1978
|
-
params:
|
|
2043
|
+
params: GetFinetunedEmbeddingModelByNameOrIdEvaluationByJobIdParams,
|
|
1979
2044
|
parse_as: Literal["json"] = "json",
|
|
1980
2045
|
headers: HeaderTypes | None = None,
|
|
1981
2046
|
cookies: CookieTypes | None = None,
|
|
@@ -1983,16 +2048,16 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
1983
2048
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
1984
2049
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
1985
2050
|
extensions: RequestExtensions | None = None,
|
|
1986
|
-
) ->
|
|
1987
|
-
"""
|
|
2051
|
+
) -> EmbeddingEvaluationResponse:
|
|
2052
|
+
"""Get evaluation results for a finetuned embedding model by job ID."""
|
|
1988
2053
|
pass
|
|
1989
2054
|
|
|
1990
2055
|
@overload
|
|
1991
2056
|
async def GET(
|
|
1992
2057
|
self,
|
|
1993
|
-
path: Literal["/pretrained_embedding_model/{model_name}"],
|
|
2058
|
+
path: Literal["/pretrained_embedding_model/{model_name}/evaluation/{job_id}"],
|
|
1994
2059
|
*,
|
|
1995
|
-
params:
|
|
2060
|
+
params: GetPretrainedEmbeddingModelByModelNameEvaluationByJobIdParams,
|
|
1996
2061
|
parse_as: Literal["json"] = "json",
|
|
1997
2062
|
headers: HeaderTypes | None = None,
|
|
1998
2063
|
cookies: CookieTypes | None = None,
|
|
@@ -2000,16 +2065,16 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
2000
2065
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
2001
2066
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
2002
2067
|
extensions: RequestExtensions | None = None,
|
|
2003
|
-
) ->
|
|
2004
|
-
"""Get
|
|
2068
|
+
) -> EmbeddingEvaluationResponse:
|
|
2069
|
+
"""Get evaluation results for a pretrained embedding model by job ID."""
|
|
2005
2070
|
pass
|
|
2006
2071
|
|
|
2007
2072
|
@overload
|
|
2008
2073
|
async def GET(
|
|
2009
2074
|
self,
|
|
2010
|
-
path: Literal["/
|
|
2075
|
+
path: Literal["/finetuned_embedding_model/{name_or_id}/evaluations"],
|
|
2011
2076
|
*,
|
|
2012
|
-
params:
|
|
2077
|
+
params: GetFinetunedEmbeddingModelByNameOrIdEvaluationsParams,
|
|
2013
2078
|
parse_as: Literal["json"] = "json",
|
|
2014
2079
|
headers: HeaderTypes | None = None,
|
|
2015
2080
|
cookies: CookieTypes | None = None,
|
|
@@ -2017,8 +2082,8 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
2017
2082
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
2018
2083
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
2019
2084
|
extensions: RequestExtensions | None = None,
|
|
2020
|
-
) -> EmbeddingEvaluationResponse:
|
|
2021
|
-
"""
|
|
2085
|
+
) -> list[EmbeddingEvaluationResponse]:
|
|
2086
|
+
"""List all evaluation results for a finetuned embedding model."""
|
|
2022
2087
|
pass
|
|
2023
2088
|
|
|
2024
2089
|
@overload
|
|
@@ -2143,7 +2208,7 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
2143
2208
|
@overload
|
|
2144
2209
|
async def GET(
|
|
2145
2210
|
self,
|
|
2146
|
-
path: Literal["/
|
|
2211
|
+
path: Literal["/classification_model"],
|
|
2147
2212
|
*,
|
|
2148
2213
|
params: None = None,
|
|
2149
2214
|
parse_as: Literal["json"] = "json",
|
|
@@ -2153,13 +2218,13 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
2153
2218
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
2154
2219
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
2155
2220
|
extensions: RequestExtensions | None = None,
|
|
2156
|
-
) -> list[ClassificationModelMetadata
|
|
2221
|
+
) -> list[ClassificationModelMetadata]:
|
|
2157
2222
|
pass
|
|
2158
2223
|
|
|
2159
2224
|
@overload
|
|
2160
2225
|
async def GET(
|
|
2161
2226
|
self,
|
|
2162
|
-
path: Literal["/
|
|
2227
|
+
path: Literal["/regression_model"],
|
|
2163
2228
|
*,
|
|
2164
2229
|
params: None = None,
|
|
2165
2230
|
parse_as: Literal["json"] = "json",
|
|
@@ -2169,7 +2234,7 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
2169
2234
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
2170
2235
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
2171
2236
|
extensions: RequestExtensions | None = None,
|
|
2172
|
-
) -> list[
|
|
2237
|
+
) -> list[RegressionModelMetadata]:
|
|
2173
2238
|
pass
|
|
2174
2239
|
|
|
2175
2240
|
@overload
|
|
@@ -2191,9 +2256,9 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
2191
2256
|
@overload
|
|
2192
2257
|
async def GET(
|
|
2193
2258
|
self,
|
|
2194
|
-
path: Literal["/
|
|
2259
|
+
path: Literal["/regression_model/{name_or_id}"],
|
|
2195
2260
|
*,
|
|
2196
|
-
params:
|
|
2261
|
+
params: GetRegressionModelByNameOrIdParams,
|
|
2197
2262
|
parse_as: Literal["json"] = "json",
|
|
2198
2263
|
headers: HeaderTypes | None = None,
|
|
2199
2264
|
cookies: CookieTypes | None = None,
|
|
@@ -2201,15 +2266,15 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
2201
2266
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
2202
2267
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
2203
2268
|
extensions: RequestExtensions | None = None,
|
|
2204
|
-
) ->
|
|
2269
|
+
) -> RegressionModelMetadata:
|
|
2205
2270
|
pass
|
|
2206
2271
|
|
|
2207
2272
|
@overload
|
|
2208
2273
|
async def GET(
|
|
2209
2274
|
self,
|
|
2210
|
-
path: Literal["/
|
|
2275
|
+
path: Literal["/predictive_model"],
|
|
2211
2276
|
*,
|
|
2212
|
-
params:
|
|
2277
|
+
params: None = None,
|
|
2213
2278
|
parse_as: Literal["json"] = "json",
|
|
2214
2279
|
headers: HeaderTypes | None = None,
|
|
2215
2280
|
cookies: CookieTypes | None = None,
|
|
@@ -2217,15 +2282,15 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
2217
2282
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
2218
2283
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
2219
2284
|
extensions: RequestExtensions | None = None,
|
|
2220
|
-
) ->
|
|
2285
|
+
) -> list[ClassificationModelMetadata | RegressionModelMetadata]:
|
|
2221
2286
|
pass
|
|
2222
2287
|
|
|
2223
2288
|
@overload
|
|
2224
2289
|
async def GET(
|
|
2225
2290
|
self,
|
|
2226
|
-
path: Literal["/
|
|
2291
|
+
path: Literal["/classification_model/{model_name_or_id}/evaluation"],
|
|
2227
2292
|
*,
|
|
2228
|
-
params:
|
|
2293
|
+
params: GetClassificationModelByModelNameOrIdEvaluationParams,
|
|
2229
2294
|
parse_as: Literal["json"] = "json",
|
|
2230
2295
|
headers: HeaderTypes | None = None,
|
|
2231
2296
|
cookies: CookieTypes | None = None,
|
|
@@ -2233,15 +2298,15 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
2233
2298
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
2234
2299
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
2235
2300
|
extensions: RequestExtensions | None = None,
|
|
2236
|
-
) -> list[
|
|
2301
|
+
) -> list[EvaluationResponseClassificationMetrics]:
|
|
2237
2302
|
pass
|
|
2238
2303
|
|
|
2239
2304
|
@overload
|
|
2240
2305
|
async def GET(
|
|
2241
2306
|
self,
|
|
2242
|
-
path: Literal["/regression_model/{
|
|
2307
|
+
path: Literal["/regression_model/{model_name_or_id}/evaluation"],
|
|
2243
2308
|
*,
|
|
2244
|
-
params:
|
|
2309
|
+
params: GetRegressionModelByModelNameOrIdEvaluationParams,
|
|
2245
2310
|
parse_as: Literal["json"] = "json",
|
|
2246
2311
|
headers: HeaderTypes | None = None,
|
|
2247
2312
|
cookies: CookieTypes | None = None,
|
|
@@ -2249,15 +2314,15 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
2249
2314
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
2250
2315
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
2251
2316
|
extensions: RequestExtensions | None = None,
|
|
2252
|
-
) ->
|
|
2317
|
+
) -> list[EvaluationResponseRegressionMetrics]:
|
|
2253
2318
|
pass
|
|
2254
2319
|
|
|
2255
2320
|
@overload
|
|
2256
2321
|
async def GET(
|
|
2257
2322
|
self,
|
|
2258
|
-
path: Literal["/
|
|
2323
|
+
path: Literal["/classification_model/{model_name_or_id}/evaluation/{job_id}"],
|
|
2259
2324
|
*,
|
|
2260
|
-
params:
|
|
2325
|
+
params: GetClassificationModelByModelNameOrIdEvaluationByJobIdParams,
|
|
2261
2326
|
parse_as: Literal["json"] = "json",
|
|
2262
2327
|
headers: HeaderTypes | None = None,
|
|
2263
2328
|
cookies: CookieTypes | None = None,
|
|
@@ -2265,15 +2330,15 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
2265
2330
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
2266
2331
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
2267
2332
|
extensions: RequestExtensions | None = None,
|
|
2268
|
-
) ->
|
|
2333
|
+
) -> EvaluationResponseClassificationMetrics:
|
|
2269
2334
|
pass
|
|
2270
2335
|
|
|
2271
2336
|
@overload
|
|
2272
2337
|
async def GET(
|
|
2273
2338
|
self,
|
|
2274
|
-
path: Literal["/regression_model/{model_name_or_id}/evaluation/{
|
|
2339
|
+
path: Literal["/regression_model/{model_name_or_id}/evaluation/{job_id}"],
|
|
2275
2340
|
*,
|
|
2276
|
-
params:
|
|
2341
|
+
params: GetRegressionModelByModelNameOrIdEvaluationByJobIdParams,
|
|
2277
2342
|
parse_as: Literal["json"] = "json",
|
|
2278
2343
|
headers: HeaderTypes | None = None,
|
|
2279
2344
|
cookies: CookieTypes | None = None,
|
|
@@ -2287,9 +2352,9 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
2287
2352
|
@overload
|
|
2288
2353
|
async def GET(
|
|
2289
2354
|
self,
|
|
2290
|
-
path: Literal["/
|
|
2355
|
+
path: Literal["/job/{job_id}"],
|
|
2291
2356
|
*,
|
|
2292
|
-
params:
|
|
2357
|
+
params: GetJobByJobIdParams,
|
|
2293
2358
|
parse_as: Literal["json"] = "json",
|
|
2294
2359
|
headers: HeaderTypes | None = None,
|
|
2295
2360
|
cookies: CookieTypes | None = None,
|
|
@@ -2297,15 +2362,15 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
2297
2362
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
2298
2363
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
2299
2364
|
extensions: RequestExtensions | None = None,
|
|
2300
|
-
) ->
|
|
2365
|
+
) -> Job:
|
|
2301
2366
|
pass
|
|
2302
2367
|
|
|
2303
2368
|
@overload
|
|
2304
2369
|
async def GET(
|
|
2305
2370
|
self,
|
|
2306
|
-
path: Literal["/
|
|
2371
|
+
path: Literal["/job/{job_id}/status"],
|
|
2307
2372
|
*,
|
|
2308
|
-
params:
|
|
2373
|
+
params: GetJobByJobIdStatusParams,
|
|
2309
2374
|
parse_as: Literal["json"] = "json",
|
|
2310
2375
|
headers: HeaderTypes | None = None,
|
|
2311
2376
|
cookies: CookieTypes | None = None,
|
|
@@ -2313,15 +2378,15 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
2313
2378
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
2314
2379
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
2315
2380
|
extensions: RequestExtensions | None = None,
|
|
2316
|
-
) ->
|
|
2381
|
+
) -> JobStatusInfo:
|
|
2317
2382
|
pass
|
|
2318
2383
|
|
|
2319
2384
|
@overload
|
|
2320
2385
|
async def GET(
|
|
2321
2386
|
self,
|
|
2322
|
-
path: Literal["/
|
|
2387
|
+
path: Literal["/job"],
|
|
2323
2388
|
*,
|
|
2324
|
-
params:
|
|
2389
|
+
params: GetJobParams,
|
|
2325
2390
|
parse_as: Literal["json"] = "json",
|
|
2326
2391
|
headers: HeaderTypes | None = None,
|
|
2327
2392
|
cookies: CookieTypes | None = None,
|
|
@@ -2329,7 +2394,7 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
2329
2394
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
2330
2395
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
2331
2396
|
extensions: RequestExtensions | None = None,
|
|
2332
|
-
) ->
|
|
2397
|
+
) -> PaginatedJob:
|
|
2333
2398
|
pass
|
|
2334
2399
|
|
|
2335
2400
|
@overload
|
|
@@ -2480,9 +2545,9 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
2480
2545
|
@overload
|
|
2481
2546
|
async def GET(
|
|
2482
2547
|
self,
|
|
2483
|
-
path: Literal["/agents/bootstrap_classification_model/{
|
|
2548
|
+
path: Literal["/agents/bootstrap_classification_model/{job_id}"],
|
|
2484
2549
|
*,
|
|
2485
|
-
params:
|
|
2550
|
+
params: GetAgentsBootstrapClassificationModelByJobIdParams,
|
|
2486
2551
|
parse_as: Literal["json"] = "json",
|
|
2487
2552
|
headers: HeaderTypes | None = None,
|
|
2488
2553
|
cookies: CookieTypes | None = None,
|
|
@@ -2491,7 +2556,7 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
2491
2556
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
2492
2557
|
extensions: RequestExtensions | None = None,
|
|
2493
2558
|
) -> BootstrapClassificationModelResponse:
|
|
2494
|
-
"""Get the status of a bootstrap classification model
|
|
2559
|
+
"""Get the status of a bootstrap classification model job"""
|
|
2495
2560
|
pass
|
|
2496
2561
|
|
|
2497
2562
|
async def GET(
|
|
@@ -2661,9 +2726,9 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
2661
2726
|
@overload
|
|
2662
2727
|
async def DELETE(
|
|
2663
2728
|
self,
|
|
2664
|
-
path: Literal["/
|
|
2729
|
+
path: Literal["/regression_model/{name_or_id}"],
|
|
2665
2730
|
*,
|
|
2666
|
-
params:
|
|
2731
|
+
params: DeleteRegressionModelByNameOrIdParams,
|
|
2667
2732
|
parse_as: Literal["json"] = "json",
|
|
2668
2733
|
headers: HeaderTypes | None = None,
|
|
2669
2734
|
cookies: CookieTypes | None = None,
|
|
@@ -2677,9 +2742,9 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
2677
2742
|
@overload
|
|
2678
2743
|
async def DELETE(
|
|
2679
2744
|
self,
|
|
2680
|
-
path: Literal["/
|
|
2745
|
+
path: Literal["/classification_model/{model_name_or_id}/evaluation/{job_id}"],
|
|
2681
2746
|
*,
|
|
2682
|
-
params:
|
|
2747
|
+
params: DeleteClassificationModelByModelNameOrIdEvaluationByJobIdParams,
|
|
2683
2748
|
parse_as: Literal["json"] = "json",
|
|
2684
2749
|
headers: HeaderTypes | None = None,
|
|
2685
2750
|
cookies: CookieTypes | None = None,
|
|
@@ -2693,9 +2758,9 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
2693
2758
|
@overload
|
|
2694
2759
|
async def DELETE(
|
|
2695
2760
|
self,
|
|
2696
|
-
path: Literal["/regression_model/{model_name_or_id}/evaluation/{
|
|
2761
|
+
path: Literal["/regression_model/{model_name_or_id}/evaluation/{job_id}"],
|
|
2697
2762
|
*,
|
|
2698
|
-
params:
|
|
2763
|
+
params: DeleteRegressionModelByModelNameOrIdEvaluationByJobIdParams,
|
|
2699
2764
|
parse_as: Literal["json"] = "json",
|
|
2700
2765
|
headers: HeaderTypes | None = None,
|
|
2701
2766
|
cookies: CookieTypes | None = None,
|
|
@@ -2709,9 +2774,9 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
2709
2774
|
@overload
|
|
2710
2775
|
async def DELETE(
|
|
2711
2776
|
self,
|
|
2712
|
-
path: Literal["/
|
|
2777
|
+
path: Literal["/job/{job_id}/abort"],
|
|
2713
2778
|
*,
|
|
2714
|
-
params:
|
|
2779
|
+
params: DeleteJobByJobIdAbortParams,
|
|
2715
2780
|
parse_as: Literal["json"] = "json",
|
|
2716
2781
|
headers: HeaderTypes | None = None,
|
|
2717
2782
|
cookies: CookieTypes | None = None,
|
|
@@ -2872,6 +2937,26 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
2872
2937
|
) -> None:
|
|
2873
2938
|
pass
|
|
2874
2939
|
|
|
2940
|
+
@overload
|
|
2941
|
+
async def POST(
|
|
2942
|
+
self,
|
|
2943
|
+
path: Literal["/gpu/memoryset/{name_or_id}/lookup"],
|
|
2944
|
+
*,
|
|
2945
|
+
params: PostGpuMemorysetByNameOrIdLookupParams,
|
|
2946
|
+
json: LookupRequest,
|
|
2947
|
+
data: None = None,
|
|
2948
|
+
files: None = None,
|
|
2949
|
+
content: None = None,
|
|
2950
|
+
parse_as: Literal["json"] = "json",
|
|
2951
|
+
headers: HeaderTypes | None = None,
|
|
2952
|
+
cookies: CookieTypes | None = None,
|
|
2953
|
+
auth: AuthTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
2954
|
+
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
2955
|
+
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
2956
|
+
extensions: RequestExtensions | None = None,
|
|
2957
|
+
) -> list[list[LabeledMemoryLookup | ScoredMemoryLookup]]:
|
|
2958
|
+
pass
|
|
2959
|
+
|
|
2875
2960
|
@overload
|
|
2876
2961
|
async def POST(
|
|
2877
2962
|
self,
|
|
@@ -2932,6 +3017,26 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
2932
3017
|
) -> None:
|
|
2933
3018
|
pass
|
|
2934
3019
|
|
|
3020
|
+
@overload
|
|
3021
|
+
async def POST(
|
|
3022
|
+
self,
|
|
3023
|
+
path: Literal["/gpu/memoryset/{name_or_id}/memory"],
|
|
3024
|
+
*,
|
|
3025
|
+
params: PostGpuMemorysetByNameOrIdMemoryParams,
|
|
3026
|
+
json: PostGpuMemorysetByNameOrIdMemoryRequest,
|
|
3027
|
+
data: None = None,
|
|
3028
|
+
files: None = None,
|
|
3029
|
+
content: None = None,
|
|
3030
|
+
parse_as: Literal["json"] = "json",
|
|
3031
|
+
headers: HeaderTypes | None = None,
|
|
3032
|
+
cookies: CookieTypes | None = None,
|
|
3033
|
+
auth: AuthTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3034
|
+
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3035
|
+
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3036
|
+
extensions: RequestExtensions | None = None,
|
|
3037
|
+
) -> list[str]:
|
|
3038
|
+
pass
|
|
3039
|
+
|
|
2935
3040
|
@overload
|
|
2936
3041
|
async def POST(
|
|
2937
3042
|
self,
|
|
@@ -2993,6 +3098,48 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
2993
3098
|
"""Create a finetuned embedding model."""
|
|
2994
3099
|
pass
|
|
2995
3100
|
|
|
3101
|
+
@overload
|
|
3102
|
+
async def POST(
|
|
3103
|
+
self,
|
|
3104
|
+
path: Literal["/gpu/finetuned_embedding_model/{name_or_id}/embedding"],
|
|
3105
|
+
*,
|
|
3106
|
+
params: PostGpuFinetunedEmbeddingModelByNameOrIdEmbeddingParams,
|
|
3107
|
+
json: EmbedRequest,
|
|
3108
|
+
data: None = None,
|
|
3109
|
+
files: None = None,
|
|
3110
|
+
content: None = None,
|
|
3111
|
+
parse_as: Literal["json"] = "json",
|
|
3112
|
+
headers: HeaderTypes | None = None,
|
|
3113
|
+
cookies: CookieTypes | None = None,
|
|
3114
|
+
auth: AuthTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3115
|
+
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3116
|
+
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3117
|
+
extensions: RequestExtensions | None = None,
|
|
3118
|
+
) -> list[list[float]]:
|
|
3119
|
+
"""Embed values using a finetuned embedding model."""
|
|
3120
|
+
pass
|
|
3121
|
+
|
|
3122
|
+
@overload
|
|
3123
|
+
async def POST(
|
|
3124
|
+
self,
|
|
3125
|
+
path: Literal["/gpu/pretrained_embedding_model/{model_name}/embedding"],
|
|
3126
|
+
*,
|
|
3127
|
+
params: PostGpuPretrainedEmbeddingModelByModelNameEmbeddingParams,
|
|
3128
|
+
json: EmbedRequest,
|
|
3129
|
+
data: None = None,
|
|
3130
|
+
files: None = None,
|
|
3131
|
+
content: None = None,
|
|
3132
|
+
parse_as: Literal["json"] = "json",
|
|
3133
|
+
headers: HeaderTypes | None = None,
|
|
3134
|
+
cookies: CookieTypes | None = None,
|
|
3135
|
+
auth: AuthTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3136
|
+
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3137
|
+
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3138
|
+
extensions: RequestExtensions | None = None,
|
|
3139
|
+
) -> list[list[float]]:
|
|
3140
|
+
"""Embed values using a pretrained embedding model."""
|
|
3141
|
+
pass
|
|
3142
|
+
|
|
2996
3143
|
@overload
|
|
2997
3144
|
async def POST(
|
|
2998
3145
|
self,
|
|
@@ -3092,10 +3239,10 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
3092
3239
|
@overload
|
|
3093
3240
|
async def POST(
|
|
3094
3241
|
self,
|
|
3095
|
-
path: Literal["/
|
|
3242
|
+
path: Literal["/datasource/{name_or_id}/rows"],
|
|
3096
3243
|
*,
|
|
3097
|
-
params:
|
|
3098
|
-
json:
|
|
3244
|
+
params: PostDatasourceByNameOrIdRowsParams,
|
|
3245
|
+
json: GetDatasourceRowsRequest,
|
|
3099
3246
|
data: None = None,
|
|
3100
3247
|
files: None = None,
|
|
3101
3248
|
content: None = None,
|
|
@@ -3106,16 +3253,17 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
3106
3253
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3107
3254
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3108
3255
|
extensions: RequestExtensions | None = None,
|
|
3109
|
-
) ->
|
|
3256
|
+
) -> list[dict[str, Any]]:
|
|
3257
|
+
"""Get rows from a specific datasource with optional filtering."""
|
|
3110
3258
|
pass
|
|
3111
3259
|
|
|
3112
3260
|
@overload
|
|
3113
3261
|
async def POST(
|
|
3114
3262
|
self,
|
|
3115
|
-
path: Literal["/
|
|
3263
|
+
path: Literal["/datasource/{name_or_id}/rows/count"],
|
|
3116
3264
|
*,
|
|
3117
|
-
params:
|
|
3118
|
-
json:
|
|
3265
|
+
params: PostDatasourceByNameOrIdRowsCountParams,
|
|
3266
|
+
json: GetDatasourceRowCountRequest,
|
|
3119
3267
|
data: None = None,
|
|
3120
3268
|
files: None = None,
|
|
3121
3269
|
content: None = None,
|
|
@@ -3126,16 +3274,17 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
3126
3274
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3127
3275
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3128
3276
|
extensions: RequestExtensions | None = None,
|
|
3129
|
-
) ->
|
|
3277
|
+
) -> int:
|
|
3278
|
+
"""Get row count from a specific datasource with optional filtering."""
|
|
3130
3279
|
pass
|
|
3131
3280
|
|
|
3132
3281
|
@overload
|
|
3133
3282
|
async def POST(
|
|
3134
3283
|
self,
|
|
3135
|
-
path: Literal["/
|
|
3284
|
+
path: Literal["/classification_model"],
|
|
3136
3285
|
*,
|
|
3137
3286
|
params: None = None,
|
|
3138
|
-
json:
|
|
3287
|
+
json: CreateClassificationModelRequest,
|
|
3139
3288
|
data: None = None,
|
|
3140
3289
|
files: None = None,
|
|
3141
3290
|
content: None = None,
|
|
@@ -3146,16 +3295,16 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
3146
3295
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3147
3296
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3148
3297
|
extensions: RequestExtensions | None = None,
|
|
3149
|
-
) ->
|
|
3298
|
+
) -> ClassificationModelMetadata:
|
|
3150
3299
|
pass
|
|
3151
3300
|
|
|
3152
3301
|
@overload
|
|
3153
3302
|
async def POST(
|
|
3154
3303
|
self,
|
|
3155
|
-
path: Literal["/regression_model
|
|
3304
|
+
path: Literal["/regression_model"],
|
|
3156
3305
|
*,
|
|
3157
|
-
params:
|
|
3158
|
-
json:
|
|
3306
|
+
params: None = None,
|
|
3307
|
+
json: CreateRegressionModelRequest,
|
|
3159
3308
|
data: None = None,
|
|
3160
3309
|
files: None = None,
|
|
3161
3310
|
content: None = None,
|
|
@@ -3166,16 +3315,16 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
3166
3315
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3167
3316
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3168
3317
|
extensions: RequestExtensions | None = None,
|
|
3169
|
-
) ->
|
|
3318
|
+
) -> RegressionModelMetadata:
|
|
3170
3319
|
pass
|
|
3171
3320
|
|
|
3172
3321
|
@overload
|
|
3173
3322
|
async def POST(
|
|
3174
3323
|
self,
|
|
3175
|
-
path: Literal["/
|
|
3324
|
+
path: Literal["/gpu/classification_model/{name_or_id}/prediction"],
|
|
3176
3325
|
*,
|
|
3177
|
-
params:
|
|
3178
|
-
json:
|
|
3326
|
+
params: PostGpuClassificationModelByNameOrIdPredictionParams,
|
|
3327
|
+
json: ClassificationPredictionRequest,
|
|
3179
3328
|
data: None = None,
|
|
3180
3329
|
files: None = None,
|
|
3181
3330
|
content: None = None,
|
|
@@ -3186,17 +3335,16 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
3186
3335
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3187
3336
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3188
3337
|
extensions: RequestExtensions | None = None,
|
|
3189
|
-
) -> list[
|
|
3190
|
-
"""List predictions with optional filtering and sorting."""
|
|
3338
|
+
) -> list[BaseLabelPredictionResult]:
|
|
3191
3339
|
pass
|
|
3192
3340
|
|
|
3193
3341
|
@overload
|
|
3194
3342
|
async def POST(
|
|
3195
3343
|
self,
|
|
3196
|
-
path: Literal["/
|
|
3344
|
+
path: Literal["/classification_model/{name_or_id}/prediction"],
|
|
3197
3345
|
*,
|
|
3198
|
-
params:
|
|
3199
|
-
json:
|
|
3346
|
+
params: PostClassificationModelByNameOrIdPredictionParams,
|
|
3347
|
+
json: ClassificationPredictionRequest,
|
|
3200
3348
|
data: None = None,
|
|
3201
3349
|
files: None = None,
|
|
3202
3350
|
content: None = None,
|
|
@@ -3207,17 +3355,16 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
3207
3355
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3208
3356
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3209
3357
|
extensions: RequestExtensions | None = None,
|
|
3210
|
-
) ->
|
|
3211
|
-
"""Count predictions with optional filtering."""
|
|
3358
|
+
) -> list[BaseLabelPredictionResult]:
|
|
3212
3359
|
pass
|
|
3213
3360
|
|
|
3214
3361
|
@overload
|
|
3215
3362
|
async def POST(
|
|
3216
3363
|
self,
|
|
3217
|
-
path: Literal["/
|
|
3364
|
+
path: Literal["/gpu/regression_model/{name_or_id}/prediction"],
|
|
3218
3365
|
*,
|
|
3219
|
-
params:
|
|
3220
|
-
json:
|
|
3366
|
+
params: PostGpuRegressionModelByNameOrIdPredictionParams,
|
|
3367
|
+
json: RegressionPredictionRequest,
|
|
3221
3368
|
data: None = None,
|
|
3222
3369
|
files: None = None,
|
|
3223
3370
|
content: None = None,
|
|
@@ -3228,21 +3375,16 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
3228
3375
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3229
3376
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3230
3377
|
extensions: RequestExtensions | None = None,
|
|
3231
|
-
) ->
|
|
3232
|
-
"""
|
|
3233
|
-
List memories with feedback metrics.
|
|
3234
|
-
**Note**: This endpoint will ONLY return memories that have been used in a prediction.
|
|
3235
|
-
If you want to query ALL memories WITHOUT feedback metrics, use the query_memoryset endpoint.
|
|
3236
|
-
"""
|
|
3378
|
+
) -> list[BaseScorePredictionResult]:
|
|
3237
3379
|
pass
|
|
3238
3380
|
|
|
3239
3381
|
@overload
|
|
3240
3382
|
async def POST(
|
|
3241
3383
|
self,
|
|
3242
|
-
path: Literal["/
|
|
3384
|
+
path: Literal["/regression_model/{name_or_id}/prediction"],
|
|
3243
3385
|
*,
|
|
3244
|
-
params:
|
|
3245
|
-
json:
|
|
3386
|
+
params: PostRegressionModelByNameOrIdPredictionParams,
|
|
3387
|
+
json: RegressionPredictionRequest,
|
|
3246
3388
|
data: None = None,
|
|
3247
3389
|
files: None = None,
|
|
3248
3390
|
content: None = None,
|
|
@@ -3253,30 +3395,16 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
3253
3395
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3254
3396
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3255
3397
|
extensions: RequestExtensions | None = None,
|
|
3256
|
-
) ->
|
|
3257
|
-
"""
|
|
3258
|
-
Bootstrap a classification model by creating a memoryset with generated memories and a classification model.
|
|
3259
|
-
|
|
3260
|
-
This endpoint uses the bootstrap_classification_model agent to generate:
|
|
3261
|
-
1. Memoryset configuration with appropriate settings
|
|
3262
|
-
2. Model configuration with optimal parameters
|
|
3263
|
-
3. High-quality training memories for each label
|
|
3264
|
-
|
|
3265
|
-
The process involves:
|
|
3266
|
-
1. Calling the agent to generate configurations and memories
|
|
3267
|
-
2. Creating a datasource from the generated memories
|
|
3268
|
-
3. Creating a memoryset from the datasource
|
|
3269
|
-
4. Creating a classification model from the memoryset
|
|
3270
|
-
"""
|
|
3398
|
+
) -> list[BaseScorePredictionResult]:
|
|
3271
3399
|
pass
|
|
3272
3400
|
|
|
3273
3401
|
@overload
|
|
3274
3402
|
async def POST(
|
|
3275
3403
|
self,
|
|
3276
|
-
path: Literal["/
|
|
3404
|
+
path: Literal["/classification_model/{model_name_or_id}/evaluation"],
|
|
3277
3405
|
*,
|
|
3278
|
-
params:
|
|
3279
|
-
json:
|
|
3406
|
+
params: PostClassificationModelByModelNameOrIdEvaluationParams,
|
|
3407
|
+
json: ClassificationEvaluationRequest,
|
|
3280
3408
|
data: None = None,
|
|
3281
3409
|
files: None = None,
|
|
3282
3410
|
content: None = None,
|
|
@@ -3287,16 +3415,16 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
3287
3415
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3288
3416
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3289
3417
|
extensions: RequestExtensions | None = None,
|
|
3290
|
-
) ->
|
|
3418
|
+
) -> EvaluationResponse:
|
|
3291
3419
|
pass
|
|
3292
3420
|
|
|
3293
3421
|
@overload
|
|
3294
3422
|
async def POST(
|
|
3295
3423
|
self,
|
|
3296
|
-
path: Literal["/
|
|
3424
|
+
path: Literal["/regression_model/{model_name_or_id}/evaluation"],
|
|
3297
3425
|
*,
|
|
3298
|
-
params:
|
|
3299
|
-
json:
|
|
3426
|
+
params: PostRegressionModelByModelNameOrIdEvaluationParams,
|
|
3427
|
+
json: RegressionEvaluationRequest,
|
|
3300
3428
|
data: None = None,
|
|
3301
3429
|
files: None = None,
|
|
3302
3430
|
content: None = None,
|
|
@@ -3307,16 +3435,16 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
3307
3435
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3308
3436
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3309
3437
|
extensions: RequestExtensions | None = None,
|
|
3310
|
-
) ->
|
|
3438
|
+
) -> EvaluationResponse:
|
|
3311
3439
|
pass
|
|
3312
3440
|
|
|
3313
3441
|
@overload
|
|
3314
3442
|
async def POST(
|
|
3315
3443
|
self,
|
|
3316
|
-
path: Literal["/
|
|
3444
|
+
path: Literal["/telemetry/prediction"],
|
|
3317
3445
|
*,
|
|
3318
|
-
params:
|
|
3319
|
-
json:
|
|
3446
|
+
params: None = None,
|
|
3447
|
+
json: ListPredictionsRequest | None = None,
|
|
3320
3448
|
data: None = None,
|
|
3321
3449
|
files: None = None,
|
|
3322
3450
|
content: None = None,
|
|
@@ -3327,16 +3455,17 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
3327
3455
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3328
3456
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3329
3457
|
extensions: RequestExtensions | None = None,
|
|
3330
|
-
) -> list[
|
|
3458
|
+
) -> list[LabelPredictionWithMemoriesAndFeedback | ScorePredictionWithMemoriesAndFeedback]:
|
|
3459
|
+
"""List predictions with optional filtering and sorting."""
|
|
3331
3460
|
pass
|
|
3332
3461
|
|
|
3333
3462
|
@overload
|
|
3334
3463
|
async def POST(
|
|
3335
3464
|
self,
|
|
3336
|
-
path: Literal["/
|
|
3465
|
+
path: Literal["/telemetry/prediction/count"],
|
|
3337
3466
|
*,
|
|
3338
|
-
params:
|
|
3339
|
-
json:
|
|
3467
|
+
params: None = None,
|
|
3468
|
+
json: CountPredictionsRequest | None = None,
|
|
3340
3469
|
data: None = None,
|
|
3341
3470
|
files: None = None,
|
|
3342
3471
|
content: None = None,
|
|
@@ -3347,16 +3476,17 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
3347
3476
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3348
3477
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3349
3478
|
extensions: RequestExtensions | None = None,
|
|
3350
|
-
) ->
|
|
3479
|
+
) -> int:
|
|
3480
|
+
"""Count predictions with optional filtering."""
|
|
3351
3481
|
pass
|
|
3352
3482
|
|
|
3353
3483
|
@overload
|
|
3354
3484
|
async def POST(
|
|
3355
3485
|
self,
|
|
3356
|
-
path: Literal["/
|
|
3486
|
+
path: Literal["/telemetry/memories"],
|
|
3357
3487
|
*,
|
|
3358
|
-
params:
|
|
3359
|
-
json:
|
|
3488
|
+
params: None = None,
|
|
3489
|
+
json: TelemetryMemoriesRequest,
|
|
3360
3490
|
data: None = None,
|
|
3361
3491
|
files: None = None,
|
|
3362
3492
|
content: None = None,
|
|
@@ -3367,17 +3497,21 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
3367
3497
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3368
3498
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3369
3499
|
extensions: RequestExtensions | None = None,
|
|
3370
|
-
) ->
|
|
3371
|
-
"""
|
|
3500
|
+
) -> PaginatedUnionLabeledMemoryWithFeedbackMetricsScoredMemoryWithFeedbackMetrics:
|
|
3501
|
+
"""
|
|
3502
|
+
List memories with feedback metrics.
|
|
3503
|
+
**Note**: This endpoint will ONLY return memories that have been used in a prediction.
|
|
3504
|
+
If you want to query ALL memories WITHOUT feedback metrics, use the query_memoryset endpoint.
|
|
3505
|
+
"""
|
|
3372
3506
|
pass
|
|
3373
3507
|
|
|
3374
3508
|
@overload
|
|
3375
3509
|
async def POST(
|
|
3376
3510
|
self,
|
|
3377
|
-
path: Literal["/
|
|
3511
|
+
path: Literal["/agents/bootstrap_classification_model"],
|
|
3378
3512
|
*,
|
|
3379
|
-
params:
|
|
3380
|
-
json:
|
|
3513
|
+
params: None = None,
|
|
3514
|
+
json: BootstrapClassificationModelRequest,
|
|
3381
3515
|
data: None = None,
|
|
3382
3516
|
files: None = None,
|
|
3383
3517
|
content: None = None,
|
|
@@ -3388,8 +3522,21 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
3388
3522
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3389
3523
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3390
3524
|
extensions: RequestExtensions | None = None,
|
|
3391
|
-
) ->
|
|
3392
|
-
"""
|
|
3525
|
+
) -> BootstrapClassificationModelResponse:
|
|
3526
|
+
"""
|
|
3527
|
+
Bootstrap a classification model by creating a memoryset with generated memories and a classification model.
|
|
3528
|
+
|
|
3529
|
+
This endpoint uses the bootstrap_classification_model agent to generate:
|
|
3530
|
+
1. Memoryset configuration with appropriate settings
|
|
3531
|
+
2. Model configuration with optimal parameters
|
|
3532
|
+
3. High-quality training memories for each label
|
|
3533
|
+
|
|
3534
|
+
The process involves:
|
|
3535
|
+
1. Calling the agent to generate configurations and memories
|
|
3536
|
+
2. Creating a datasource from the generated memories
|
|
3537
|
+
3. Creating a memoryset from the datasource
|
|
3538
|
+
4. Creating a classification model from the memoryset
|
|
3539
|
+
"""
|
|
3393
3540
|
pass
|
|
3394
3541
|
|
|
3395
3542
|
async def POST(
|
|
@@ -3535,10 +3682,10 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
3535
3682
|
@overload
|
|
3536
3683
|
async def PATCH(
|
|
3537
3684
|
self,
|
|
3538
|
-
path: Literal["/
|
|
3685
|
+
path: Literal["/gpu/memoryset/{name_or_id}/memory"],
|
|
3539
3686
|
*,
|
|
3540
|
-
params:
|
|
3541
|
-
json:
|
|
3687
|
+
params: PatchGpuMemorysetByNameOrIdMemoryParams,
|
|
3688
|
+
json: PatchGpuMemorysetByNameOrIdMemoryRequest,
|
|
3542
3689
|
data: None = None,
|
|
3543
3690
|
files: None = None,
|
|
3544
3691
|
content: None = None,
|
|
@@ -3549,16 +3696,16 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
3549
3696
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3550
3697
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3551
3698
|
extensions: RequestExtensions | None = None,
|
|
3552
|
-
) ->
|
|
3699
|
+
) -> LabeledMemory | ScoredMemory:
|
|
3553
3700
|
pass
|
|
3554
3701
|
|
|
3555
3702
|
@overload
|
|
3556
3703
|
async def PATCH(
|
|
3557
3704
|
self,
|
|
3558
|
-
path: Literal["/
|
|
3705
|
+
path: Literal["/gpu/memoryset/{name_or_id}/memories"],
|
|
3559
3706
|
*,
|
|
3560
|
-
params:
|
|
3561
|
-
json:
|
|
3707
|
+
params: PatchGpuMemorysetByNameOrIdMemoriesParams,
|
|
3708
|
+
json: PatchGpuMemorysetByNameOrIdMemoriesRequest,
|
|
3562
3709
|
data: None = None,
|
|
3563
3710
|
files: None = None,
|
|
3564
3711
|
content: None = None,
|
|
@@ -3569,16 +3716,16 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
3569
3716
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3570
3717
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3571
3718
|
extensions: RequestExtensions | None = None,
|
|
3572
|
-
) ->
|
|
3719
|
+
) -> list[LabeledMemory] | list[ScoredMemory]:
|
|
3573
3720
|
pass
|
|
3574
3721
|
|
|
3575
3722
|
@overload
|
|
3576
3723
|
async def PATCH(
|
|
3577
3724
|
self,
|
|
3578
|
-
path: Literal["/
|
|
3725
|
+
path: Literal["/classification_model/{name_or_id}"],
|
|
3579
3726
|
*,
|
|
3580
|
-
params:
|
|
3581
|
-
json:
|
|
3727
|
+
params: PatchClassificationModelByNameOrIdParams,
|
|
3728
|
+
json: PredictiveModelUpdate,
|
|
3582
3729
|
data: None = None,
|
|
3583
3730
|
files: None = None,
|
|
3584
3731
|
content: None = None,
|
|
@@ -3589,17 +3736,16 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
3589
3736
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3590
3737
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3591
3738
|
extensions: RequestExtensions | None = None,
|
|
3592
|
-
) ->
|
|
3593
|
-
"""Update a prediction with new expected values, tags, or memory ID."""
|
|
3739
|
+
) -> ClassificationModelMetadata:
|
|
3594
3740
|
pass
|
|
3595
3741
|
|
|
3596
3742
|
@overload
|
|
3597
3743
|
async def PATCH(
|
|
3598
3744
|
self,
|
|
3599
|
-
path: Literal["/
|
|
3745
|
+
path: Literal["/regression_model/{name_or_id}"],
|
|
3600
3746
|
*,
|
|
3601
|
-
params:
|
|
3602
|
-
json:
|
|
3747
|
+
params: PatchRegressionModelByNameOrIdParams,
|
|
3748
|
+
json: PredictiveModelUpdate,
|
|
3603
3749
|
data: None = None,
|
|
3604
3750
|
files: None = None,
|
|
3605
3751
|
content: None = None,
|
|
@@ -3610,16 +3756,16 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
3610
3756
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3611
3757
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3612
3758
|
extensions: RequestExtensions | None = None,
|
|
3613
|
-
) ->
|
|
3759
|
+
) -> RegressionModelMetadata:
|
|
3614
3760
|
pass
|
|
3615
3761
|
|
|
3616
3762
|
@overload
|
|
3617
3763
|
async def PATCH(
|
|
3618
3764
|
self,
|
|
3619
|
-
path: Literal["/
|
|
3765
|
+
path: Literal["/telemetry/prediction/{prediction_id}"],
|
|
3620
3766
|
*,
|
|
3621
|
-
params:
|
|
3622
|
-
json:
|
|
3767
|
+
params: PatchTelemetryPredictionByPredictionIdParams,
|
|
3768
|
+
json: UpdatePredictionRequest,
|
|
3623
3769
|
data: None = None,
|
|
3624
3770
|
files: None = None,
|
|
3625
3771
|
content: None = None,
|
|
@@ -3630,7 +3776,8 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
3630
3776
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3631
3777
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3632
3778
|
extensions: RequestExtensions | None = None,
|
|
3633
|
-
) ->
|
|
3779
|
+
) -> Any:
|
|
3780
|
+
"""Update a prediction with new expected values, tags, or memory ID."""
|
|
3634
3781
|
pass
|
|
3635
3782
|
|
|
3636
3783
|
async def PATCH(
|