orca-sdk 0.1.3__py3-none-any.whl → 0.1.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- orca_sdk/_shared/metrics.py +179 -40
- orca_sdk/_shared/metrics_test.py +99 -6
- orca_sdk/_utils/data_parsing_test.py +1 -1
- orca_sdk/async_client.py +462 -301
- orca_sdk/classification_model.py +156 -41
- orca_sdk/classification_model_test.py +327 -8
- orca_sdk/client.py +462 -301
- orca_sdk/conftest.py +140 -21
- orca_sdk/datasource.py +45 -2
- orca_sdk/datasource_test.py +120 -0
- orca_sdk/embedding_model.py +32 -24
- orca_sdk/job.py +17 -17
- orca_sdk/memoryset.py +459 -56
- orca_sdk/memoryset_test.py +435 -2
- orca_sdk/regression_model.py +110 -19
- orca_sdk/regression_model_test.py +213 -0
- orca_sdk/telemetry.py +52 -13
- {orca_sdk-0.1.3.dist-info → orca_sdk-0.1.5.dist-info}/METADATA +1 -1
- {orca_sdk-0.1.3.dist-info → orca_sdk-0.1.5.dist-info}/RECORD +20 -20
- {orca_sdk-0.1.3.dist-info → orca_sdk-0.1.5.dist-info}/WHEEL +0 -0
orca_sdk/async_client.py
CHANGED
|
@@ -135,6 +135,10 @@ class ClassificationEvaluationRequest(TypedDict):
|
|
|
135
135
|
datasource_value_column: str
|
|
136
136
|
record_telemetry: NotRequired[bool]
|
|
137
137
|
telemetry_tags: NotRequired[list[str] | None]
|
|
138
|
+
subsample: NotRequired[int | float | None]
|
|
139
|
+
ignore_unlabeled: NotRequired[bool]
|
|
140
|
+
datasource_partition_column: NotRequired[str | None]
|
|
141
|
+
partition_filter_mode: NotRequired[Literal["ignore_partitions", "include_global", "exclude_global", "only_global"]]
|
|
138
142
|
|
|
139
143
|
|
|
140
144
|
class CleanupResponse(TypedDict):
|
|
@@ -163,6 +167,7 @@ class CountPredictionsRequest(TypedDict):
|
|
|
163
167
|
prediction_ids: NotRequired[list[str] | None]
|
|
164
168
|
start_timestamp: NotRequired[str | None]
|
|
165
169
|
end_timestamp: NotRequired[str | None]
|
|
170
|
+
memory_id: NotRequired[str | None]
|
|
166
171
|
|
|
167
172
|
|
|
168
173
|
class CreateApiKeyRequest(TypedDict):
|
|
@@ -193,6 +198,12 @@ class CreateOrgPlanRequest(TypedDict):
|
|
|
193
198
|
tier: Literal["FREE", "PRO", "ENTERPRISE", "CANCELLED"]
|
|
194
199
|
|
|
195
200
|
|
|
201
|
+
class DatasetFilterItem(TypedDict):
|
|
202
|
+
field: str
|
|
203
|
+
op: Literal["==", "!=", ">", ">=", "<", "<=", "in", "not in", "like"]
|
|
204
|
+
value: Any
|
|
205
|
+
|
|
206
|
+
|
|
196
207
|
class DeleteMemoriesRequest(TypedDict):
|
|
197
208
|
memory_ids: list[str]
|
|
198
209
|
|
|
@@ -210,7 +221,7 @@ class EmbedRequest(TypedDict):
|
|
|
210
221
|
class EmbeddingEvaluationRequest(TypedDict):
|
|
211
222
|
datasource_name_or_id: str
|
|
212
223
|
eval_datasource_name_or_id: NotRequired[str | None]
|
|
213
|
-
subsample: NotRequired[int | None]
|
|
224
|
+
subsample: NotRequired[int | float | None]
|
|
214
225
|
datasource_value_column: NotRequired[str]
|
|
215
226
|
datasource_label_column: NotRequired[str | None]
|
|
216
227
|
datasource_score_column: NotRequired[str | None]
|
|
@@ -219,7 +230,7 @@ class EmbeddingEvaluationRequest(TypedDict):
|
|
|
219
230
|
weigh_memories: NotRequired[bool]
|
|
220
231
|
|
|
221
232
|
|
|
222
|
-
EmbeddingFinetuningMethod = Literal["classification", "batch_triplet_loss"]
|
|
233
|
+
EmbeddingFinetuningMethod = Literal["classification", "regression", "batch_triplet_loss"]
|
|
223
234
|
|
|
224
235
|
|
|
225
236
|
class FeedbackMetrics(TypedDict):
|
|
@@ -233,7 +244,19 @@ FeedbackType = Literal["CONTINUOUS", "BINARY"]
|
|
|
233
244
|
class FilterItem(TypedDict):
|
|
234
245
|
field: list
|
|
235
246
|
op: Literal["==", "!=", ">", ">=", "<", "<=", "in", "not in", "like"]
|
|
236
|
-
value: str | int | float | bool | list[str] | list[int] | list[float] | list[bool] | None
|
|
247
|
+
value: str | int | float | bool | list[str | None] | list[int] | list[float] | list[bool] | None
|
|
248
|
+
|
|
249
|
+
|
|
250
|
+
class GetDatasourceRowCountRequest(TypedDict):
|
|
251
|
+
filters: NotRequired[list[DatasetFilterItem]]
|
|
252
|
+
|
|
253
|
+
|
|
254
|
+
class GetDatasourceRowsRequest(TypedDict):
|
|
255
|
+
filters: NotRequired[list[DatasetFilterItem]]
|
|
256
|
+
limit: NotRequired[int]
|
|
257
|
+
offset: NotRequired[int]
|
|
258
|
+
shuffle: NotRequired[bool]
|
|
259
|
+
shuffle_seed: NotRequired[int | None]
|
|
237
260
|
|
|
238
261
|
|
|
239
262
|
class GetMemoriesRequest(TypedDict):
|
|
@@ -254,6 +277,18 @@ class InternalServerErrorResponse(TypedDict):
|
|
|
254
277
|
request_id: str
|
|
255
278
|
|
|
256
279
|
|
|
280
|
+
JobStatus = Literal["INITIALIZED", "DISPATCHED", "WAITING", "PROCESSING", "COMPLETED", "FAILED", "ABORTING", "ABORTED"]
|
|
281
|
+
|
|
282
|
+
|
|
283
|
+
class JobStatusInfo(TypedDict):
|
|
284
|
+
status: JobStatus
|
|
285
|
+
steps_total: int | None
|
|
286
|
+
steps_completed: int | None
|
|
287
|
+
exception: str | None
|
|
288
|
+
updated_at: str
|
|
289
|
+
created_at: str
|
|
290
|
+
|
|
291
|
+
|
|
257
292
|
class LabelClassMetrics(TypedDict):
|
|
258
293
|
label: int | None
|
|
259
294
|
label_name: NotRequired[str | None]
|
|
@@ -276,6 +311,7 @@ class LabeledMemoryInsert(TypedDict):
|
|
|
276
311
|
value: str | bytes
|
|
277
312
|
metadata: NotRequired[dict[str, str | int | float | bool | None]]
|
|
278
313
|
source_id: NotRequired[str | None]
|
|
314
|
+
partition_id: NotRequired[str | None]
|
|
279
315
|
label: int | None
|
|
280
316
|
|
|
281
317
|
|
|
@@ -283,12 +319,16 @@ class ListMemoriesRequest(TypedDict):
|
|
|
283
319
|
offset: NotRequired[int]
|
|
284
320
|
limit: NotRequired[int]
|
|
285
321
|
filters: NotRequired[list[FilterItem]]
|
|
322
|
+
partition_id: NotRequired[str | None]
|
|
323
|
+
partition_filter_mode: NotRequired[Literal["ignore_partitions", "include_global", "exclude_global", "only_global"]]
|
|
286
324
|
|
|
287
325
|
|
|
288
326
|
class LookupRequest(TypedDict):
|
|
289
327
|
query: list[str]
|
|
290
328
|
count: NotRequired[int]
|
|
291
329
|
prompt: NotRequired[str | None]
|
|
330
|
+
partition_id: NotRequired[str | list[str | None] | None]
|
|
331
|
+
partition_filter_mode: NotRequired[Literal["ignore_partitions", "include_global", "exclude_global", "only_global"]]
|
|
292
332
|
|
|
293
333
|
|
|
294
334
|
class LookupScoreMetrics(TypedDict):
|
|
@@ -340,8 +380,6 @@ class MemorysetClassPatternsMetrics(TypedDict):
|
|
|
340
380
|
class MemorysetClusterAnalysisConfig(TypedDict):
|
|
341
381
|
min_cluster_size: NotRequired[int | None]
|
|
342
382
|
max_cluster_size: NotRequired[int | None]
|
|
343
|
-
clustering_method: NotRequired[Literal["density", "graph"]]
|
|
344
|
-
min_cluster_distance: NotRequired[float]
|
|
345
383
|
partitioning_method: NotRequired[Literal["ng", "rb", "cpm"]]
|
|
346
384
|
resolution: NotRequired[float | None]
|
|
347
385
|
num_iterations: NotRequired[int]
|
|
@@ -370,6 +408,7 @@ class MemorysetConceptAnalysisConfig(TypedDict):
|
|
|
370
408
|
use_generative_naming: NotRequired[bool]
|
|
371
409
|
naming_examples_count: NotRequired[int]
|
|
372
410
|
naming_counterexample_count: NotRequired[int]
|
|
411
|
+
primary_label_pct_threshold: NotRequired[float]
|
|
373
412
|
seed: NotRequired[int]
|
|
374
413
|
|
|
375
414
|
|
|
@@ -439,7 +478,7 @@ class NotFoundErrorResponse(TypedDict):
|
|
|
439
478
|
"memory",
|
|
440
479
|
"evaluation",
|
|
441
480
|
"analysis",
|
|
442
|
-
"
|
|
481
|
+
"job",
|
|
443
482
|
"pretrained_embedding_model",
|
|
444
483
|
"finetuned_embedding_model",
|
|
445
484
|
"feedback_category",
|
|
@@ -553,6 +592,10 @@ class RegressionEvaluationRequest(TypedDict):
|
|
|
553
592
|
datasource_value_column: str
|
|
554
593
|
record_telemetry: NotRequired[bool]
|
|
555
594
|
telemetry_tags: NotRequired[list[str] | None]
|
|
595
|
+
subsample: NotRequired[int | float | None]
|
|
596
|
+
ignore_unlabeled: NotRequired[bool]
|
|
597
|
+
datasource_partition_column: NotRequired[str | None]
|
|
598
|
+
partition_filter_mode: NotRequired[Literal["ignore_partitions", "include_global", "exclude_global", "only_global"]]
|
|
556
599
|
|
|
557
600
|
|
|
558
601
|
class RegressionMetrics(TypedDict):
|
|
@@ -595,12 +638,16 @@ class RegressionPredictionRequest(TypedDict):
|
|
|
595
638
|
prompt: NotRequired[str | None]
|
|
596
639
|
use_lookup_cache: NotRequired[bool]
|
|
597
640
|
consistency_level: NotRequired[Literal["Bounded", "Session", "Strong", "Eventual"] | None]
|
|
641
|
+
ignore_unlabeled: NotRequired[bool]
|
|
642
|
+
partition_ids: NotRequired[str | list[str | None] | None]
|
|
643
|
+
partition_filter_mode: NotRequired[Literal["ignore_partitions", "include_global", "exclude_global", "only_global"]]
|
|
598
644
|
|
|
599
645
|
|
|
600
646
|
class ScorePredictionMemoryLookup(TypedDict):
|
|
601
647
|
value: str | bytes
|
|
602
648
|
embedding: list[float]
|
|
603
649
|
source_id: str | None
|
|
650
|
+
partition_id: str | None
|
|
604
651
|
metadata: dict[str, str | int | float | bool | None]
|
|
605
652
|
memory_id: str
|
|
606
653
|
memory_version: int
|
|
@@ -638,6 +685,7 @@ class ScoredMemory(TypedDict):
|
|
|
638
685
|
value: str | bytes
|
|
639
686
|
embedding: list[float]
|
|
640
687
|
source_id: str | None
|
|
688
|
+
partition_id: str | None
|
|
641
689
|
metadata: dict[str, str | int | float | bool | None]
|
|
642
690
|
memory_id: str
|
|
643
691
|
memory_version: int
|
|
@@ -653,6 +701,7 @@ class ScoredMemoryInsert(TypedDict):
|
|
|
653
701
|
value: str | bytes
|
|
654
702
|
metadata: NotRequired[dict[str, str | int | float | bool | None]]
|
|
655
703
|
source_id: NotRequired[str | None]
|
|
704
|
+
partition_id: NotRequired[str | None]
|
|
656
705
|
score: float | None
|
|
657
706
|
|
|
658
707
|
|
|
@@ -660,6 +709,7 @@ class ScoredMemoryLookup(TypedDict):
|
|
|
660
709
|
value: str | bytes
|
|
661
710
|
embedding: list[float]
|
|
662
711
|
source_id: str | None
|
|
712
|
+
partition_id: str | None
|
|
663
713
|
metadata: dict[str, str | int | float | bool | None]
|
|
664
714
|
memory_id: str
|
|
665
715
|
memory_version: int
|
|
@@ -676,6 +726,7 @@ class ScoredMemoryUpdate(TypedDict):
|
|
|
676
726
|
value: NotRequired[str | bytes]
|
|
677
727
|
metadata: NotRequired[dict[str, str | int | float | bool | None] | None]
|
|
678
728
|
source_id: NotRequired[str | None]
|
|
729
|
+
partition_id: NotRequired[str | None]
|
|
679
730
|
metrics: NotRequired[MemoryMetrics | None]
|
|
680
731
|
score: NotRequired[float | None]
|
|
681
732
|
|
|
@@ -684,6 +735,7 @@ class ScoredMemoryWithFeedbackMetrics(TypedDict):
|
|
|
684
735
|
value: str | bytes
|
|
685
736
|
embedding: list[float]
|
|
686
737
|
source_id: str | None
|
|
738
|
+
partition_id: str | None
|
|
687
739
|
metadata: dict[str, str | int | float | bool | None]
|
|
688
740
|
memory_id: str
|
|
689
741
|
memory_version: int
|
|
@@ -709,18 +761,6 @@ class SubConceptMetrics(TypedDict):
|
|
|
709
761
|
memory_count: int
|
|
710
762
|
|
|
711
763
|
|
|
712
|
-
TaskStatus = Literal["INITIALIZED", "DISPATCHED", "WAITING", "PROCESSING", "COMPLETED", "FAILED", "ABORTING", "ABORTED"]
|
|
713
|
-
|
|
714
|
-
|
|
715
|
-
class TaskStatusInfo(TypedDict):
|
|
716
|
-
status: TaskStatus
|
|
717
|
-
steps_total: int | None
|
|
718
|
-
steps_completed: int | None
|
|
719
|
-
exception: str | None
|
|
720
|
-
updated_at: str
|
|
721
|
-
created_at: str
|
|
722
|
-
|
|
723
|
-
|
|
724
764
|
TelemetryField = list
|
|
725
765
|
|
|
726
766
|
|
|
@@ -793,6 +833,10 @@ class DeleteMemorysetByNameOrIdParams(TypedDict):
|
|
|
793
833
|
name_or_id: str
|
|
794
834
|
|
|
795
835
|
|
|
836
|
+
class PostGpuMemorysetByNameOrIdLookupParams(TypedDict):
|
|
837
|
+
name_or_id: str
|
|
838
|
+
|
|
839
|
+
|
|
796
840
|
class GetMemorysetByNameOrIdMemoryByMemoryIdParams(TypedDict):
|
|
797
841
|
name_or_id: str
|
|
798
842
|
memory_id: str
|
|
@@ -825,20 +869,35 @@ class PostMemorysetByNameOrIdMemoriesDeleteParams(TypedDict):
|
|
|
825
869
|
name_or_id: str
|
|
826
870
|
|
|
827
871
|
|
|
872
|
+
class PatchGpuMemorysetByNameOrIdMemoryParams(TypedDict):
|
|
873
|
+
name_or_id: str
|
|
874
|
+
|
|
875
|
+
|
|
876
|
+
class PostGpuMemorysetByNameOrIdMemoryParams(TypedDict):
|
|
877
|
+
name_or_id: str
|
|
878
|
+
|
|
879
|
+
|
|
880
|
+
PostGpuMemorysetByNameOrIdMemoryRequest = list[LabeledMemoryInsert] | list[ScoredMemoryInsert]
|
|
881
|
+
|
|
882
|
+
|
|
883
|
+
class PatchGpuMemorysetByNameOrIdMemoriesParams(TypedDict):
|
|
884
|
+
name_or_id: str
|
|
885
|
+
|
|
886
|
+
|
|
828
887
|
class PostMemorysetByNameOrIdAnalysisParams(TypedDict):
|
|
829
888
|
name_or_id: str
|
|
830
889
|
|
|
831
890
|
|
|
832
891
|
class GetMemorysetByNameOrIdAnalysisParams(TypedDict):
|
|
833
892
|
name_or_id: str
|
|
834
|
-
status: NotRequired[
|
|
893
|
+
status: NotRequired[JobStatus | None]
|
|
835
894
|
limit: NotRequired[int | None]
|
|
836
895
|
offset: NotRequired[int | None]
|
|
837
896
|
|
|
838
897
|
|
|
839
|
-
class
|
|
898
|
+
class GetMemorysetByNameOrIdAnalysisByAnalysisJobIdParams(TypedDict):
|
|
840
899
|
name_or_id: str
|
|
841
|
-
|
|
900
|
+
analysis_job_id: str
|
|
842
901
|
|
|
843
902
|
|
|
844
903
|
class PostMemorysetByNameOrIdMemoryByMemoryIdCascadingEditsParams(TypedDict):
|
|
@@ -854,34 +913,42 @@ class DeleteFinetunedEmbeddingModelByNameOrIdParams(TypedDict):
|
|
|
854
913
|
name_or_id: str
|
|
855
914
|
|
|
856
915
|
|
|
857
|
-
class
|
|
916
|
+
class PostGpuFinetunedEmbeddingModelByNameOrIdEmbeddingParams(TypedDict):
|
|
858
917
|
name_or_id: str
|
|
859
918
|
|
|
860
919
|
|
|
861
|
-
class
|
|
862
|
-
|
|
863
|
-
task_id: str
|
|
920
|
+
class GetPretrainedEmbeddingModelByModelNameParams(TypedDict):
|
|
921
|
+
model_name: PretrainedEmbeddingModelName
|
|
864
922
|
|
|
865
923
|
|
|
866
|
-
class
|
|
867
|
-
|
|
868
|
-
datasource: NotRequired[str | None]
|
|
869
|
-
value_column: NotRequired[str | None]
|
|
870
|
-
label_column: NotRequired[str | None]
|
|
871
|
-
score_column: NotRequired[str | None]
|
|
924
|
+
class PostGpuPretrainedEmbeddingModelByModelNameEmbeddingParams(TypedDict):
|
|
925
|
+
model_name: PretrainedEmbeddingModelName
|
|
872
926
|
|
|
873
927
|
|
|
874
|
-
class
|
|
875
|
-
|
|
928
|
+
class PostFinetunedEmbeddingModelByNameOrIdEvaluationParams(TypedDict):
|
|
929
|
+
name_or_id: str
|
|
876
930
|
|
|
877
931
|
|
|
878
932
|
class PostPretrainedEmbeddingModelByModelNameEvaluationParams(TypedDict):
|
|
879
933
|
model_name: PretrainedEmbeddingModelName
|
|
880
934
|
|
|
881
935
|
|
|
882
|
-
class
|
|
936
|
+
class GetFinetunedEmbeddingModelByNameOrIdEvaluationByJobIdParams(TypedDict):
|
|
937
|
+
name_or_id: str
|
|
938
|
+
job_id: str
|
|
939
|
+
|
|
940
|
+
|
|
941
|
+
class GetPretrainedEmbeddingModelByModelNameEvaluationByJobIdParams(TypedDict):
|
|
883
942
|
model_name: PretrainedEmbeddingModelName
|
|
884
|
-
|
|
943
|
+
job_id: str
|
|
944
|
+
|
|
945
|
+
|
|
946
|
+
class GetFinetunedEmbeddingModelByNameOrIdEvaluationsParams(TypedDict):
|
|
947
|
+
name_or_id: str
|
|
948
|
+
datasource: NotRequired[str | None]
|
|
949
|
+
value_column: NotRequired[str | None]
|
|
950
|
+
label_column: NotRequired[str | None]
|
|
951
|
+
score_column: NotRequired[str | None]
|
|
885
952
|
|
|
886
953
|
|
|
887
954
|
class GetPretrainedEmbeddingModelByModelNameEvaluationsParams(TypedDict):
|
|
@@ -911,6 +978,14 @@ class DeleteDatasourceByNameOrIdParams(TypedDict):
|
|
|
911
978
|
name_or_id: str
|
|
912
979
|
|
|
913
980
|
|
|
981
|
+
class PostDatasourceByNameOrIdRowsParams(TypedDict):
|
|
982
|
+
name_or_id: str
|
|
983
|
+
|
|
984
|
+
|
|
985
|
+
class PostDatasourceByNameOrIdRowsCountParams(TypedDict):
|
|
986
|
+
name_or_id: str
|
|
987
|
+
|
|
988
|
+
|
|
914
989
|
class GetDatasourceByNameOrIdEmbeddingModelEvaluationsParams(TypedDict):
|
|
915
990
|
name_or_id: str
|
|
916
991
|
value_column: NotRequired[str | None]
|
|
@@ -941,36 +1016,42 @@ class DeleteClassificationModelByNameOrIdParams(TypedDict):
|
|
|
941
1016
|
name_or_id: str
|
|
942
1017
|
|
|
943
1018
|
|
|
944
|
-
class
|
|
945
|
-
|
|
1019
|
+
class PatchRegressionModelByNameOrIdParams(TypedDict):
|
|
1020
|
+
name_or_id: str
|
|
946
1021
|
|
|
947
1022
|
|
|
948
|
-
class
|
|
949
|
-
|
|
1023
|
+
class GetRegressionModelByNameOrIdParams(TypedDict):
|
|
1024
|
+
name_or_id: str
|
|
950
1025
|
|
|
951
1026
|
|
|
952
|
-
class
|
|
953
|
-
|
|
954
|
-
task_id: str
|
|
1027
|
+
class DeleteRegressionModelByNameOrIdParams(TypedDict):
|
|
1028
|
+
name_or_id: str
|
|
955
1029
|
|
|
956
1030
|
|
|
957
|
-
class
|
|
958
|
-
|
|
959
|
-
task_id: str
|
|
1031
|
+
class PostGpuClassificationModelByNameOrIdPredictionParams(TypedDict):
|
|
1032
|
+
name_or_id: str
|
|
960
1033
|
|
|
961
1034
|
|
|
962
|
-
class
|
|
1035
|
+
class PostClassificationModelByNameOrIdPredictionParams(TypedDict):
|
|
963
1036
|
name_or_id: str
|
|
964
1037
|
|
|
965
1038
|
|
|
966
|
-
class
|
|
1039
|
+
class PostGpuRegressionModelByNameOrIdPredictionParams(TypedDict):
|
|
967
1040
|
name_or_id: str
|
|
968
1041
|
|
|
969
1042
|
|
|
970
|
-
class
|
|
1043
|
+
class PostRegressionModelByNameOrIdPredictionParams(TypedDict):
|
|
971
1044
|
name_or_id: str
|
|
972
1045
|
|
|
973
1046
|
|
|
1047
|
+
class PostClassificationModelByModelNameOrIdEvaluationParams(TypedDict):
|
|
1048
|
+
model_name_or_id: str
|
|
1049
|
+
|
|
1050
|
+
|
|
1051
|
+
class GetClassificationModelByModelNameOrIdEvaluationParams(TypedDict):
|
|
1052
|
+
model_name_or_id: str
|
|
1053
|
+
|
|
1054
|
+
|
|
974
1055
|
class PostRegressionModelByModelNameOrIdEvaluationParams(TypedDict):
|
|
975
1056
|
model_name_or_id: str
|
|
976
1057
|
|
|
@@ -979,26 +1060,36 @@ class GetRegressionModelByModelNameOrIdEvaluationParams(TypedDict):
|
|
|
979
1060
|
model_name_or_id: str
|
|
980
1061
|
|
|
981
1062
|
|
|
982
|
-
class
|
|
1063
|
+
class GetClassificationModelByModelNameOrIdEvaluationByJobIdParams(TypedDict):
|
|
983
1064
|
model_name_or_id: str
|
|
984
|
-
|
|
1065
|
+
job_id: str
|
|
985
1066
|
|
|
986
1067
|
|
|
987
|
-
class
|
|
1068
|
+
class DeleteClassificationModelByModelNameOrIdEvaluationByJobIdParams(TypedDict):
|
|
988
1069
|
model_name_or_id: str
|
|
989
|
-
|
|
1070
|
+
job_id: str
|
|
990
1071
|
|
|
991
1072
|
|
|
992
|
-
class
|
|
993
|
-
|
|
1073
|
+
class GetRegressionModelByModelNameOrIdEvaluationByJobIdParams(TypedDict):
|
|
1074
|
+
model_name_or_id: str
|
|
1075
|
+
job_id: str
|
|
994
1076
|
|
|
995
1077
|
|
|
996
|
-
class
|
|
997
|
-
|
|
1078
|
+
class DeleteRegressionModelByModelNameOrIdEvaluationByJobIdParams(TypedDict):
|
|
1079
|
+
model_name_or_id: str
|
|
1080
|
+
job_id: str
|
|
1081
|
+
|
|
1082
|
+
|
|
1083
|
+
class GetJobByJobIdParams(TypedDict):
|
|
1084
|
+
job_id: str
|
|
1085
|
+
|
|
998
1086
|
|
|
1087
|
+
class GetJobByJobIdStatusParams(TypedDict):
|
|
1088
|
+
job_id: str
|
|
999
1089
|
|
|
1000
|
-
|
|
1001
|
-
|
|
1090
|
+
|
|
1091
|
+
class GetJobParams(TypedDict):
|
|
1092
|
+
status: NotRequired[JobStatus | list[JobStatus] | None]
|
|
1002
1093
|
type: NotRequired[str | list[str] | None]
|
|
1003
1094
|
limit: NotRequired[int | None]
|
|
1004
1095
|
offset: NotRequired[int]
|
|
@@ -1006,8 +1097,8 @@ class GetTaskParams(TypedDict):
|
|
|
1006
1097
|
end_timestamp: NotRequired[str | None]
|
|
1007
1098
|
|
|
1008
1099
|
|
|
1009
|
-
class
|
|
1010
|
-
|
|
1100
|
+
class DeleteJobByJobIdAbortParams(TypedDict):
|
|
1101
|
+
job_id: str
|
|
1011
1102
|
|
|
1012
1103
|
|
|
1013
1104
|
class GetWorkerParams(TypedDict):
|
|
@@ -1063,43 +1154,8 @@ class DeleteTelemetryFeedbackCategoryByNameOrIdParams(TypedDict):
|
|
|
1063
1154
|
PutTelemetryPredictionFeedbackRequest = list[PredictionFeedbackRequest]
|
|
1064
1155
|
|
|
1065
1156
|
|
|
1066
|
-
class
|
|
1067
|
-
|
|
1068
|
-
|
|
1069
|
-
|
|
1070
|
-
class PostGpuMemorysetByNameOrIdLookupParams(TypedDict):
|
|
1071
|
-
name_or_id: str
|
|
1072
|
-
|
|
1073
|
-
|
|
1074
|
-
class PatchGpuMemorysetByNameOrIdMemoryParams(TypedDict):
|
|
1075
|
-
name_or_id: str
|
|
1076
|
-
|
|
1077
|
-
|
|
1078
|
-
class PostGpuMemorysetByNameOrIdMemoryParams(TypedDict):
|
|
1079
|
-
name_or_id: str
|
|
1080
|
-
|
|
1081
|
-
|
|
1082
|
-
PostGpuMemorysetByNameOrIdMemoryRequest = list[LabeledMemoryInsert] | list[ScoredMemoryInsert]
|
|
1083
|
-
|
|
1084
|
-
|
|
1085
|
-
class PatchGpuMemorysetByNameOrIdMemoriesParams(TypedDict):
|
|
1086
|
-
name_or_id: str
|
|
1087
|
-
|
|
1088
|
-
|
|
1089
|
-
class PostGpuClassificationModelByNameOrIdPredictionParams(TypedDict):
|
|
1090
|
-
name_or_id: str
|
|
1091
|
-
|
|
1092
|
-
|
|
1093
|
-
class PostGpuRegressionModelByNameOrIdPredictionParams(TypedDict):
|
|
1094
|
-
name_or_id: str
|
|
1095
|
-
|
|
1096
|
-
|
|
1097
|
-
class PostGpuFinetunedEmbeddingModelByNameOrIdEmbeddingParams(TypedDict):
|
|
1098
|
-
name_or_id: str
|
|
1099
|
-
|
|
1100
|
-
|
|
1101
|
-
class PostGpuPretrainedEmbeddingModelByModelNameEmbeddingParams(TypedDict):
|
|
1102
|
-
model_name: PretrainedEmbeddingModelName
|
|
1157
|
+
class GetAgentsBootstrapClassificationModelByJobIdParams(TypedDict):
|
|
1158
|
+
job_id: str
|
|
1103
1159
|
|
|
1104
1160
|
|
|
1105
1161
|
class FieldValidationError(TypedDict):
|
|
@@ -1171,6 +1227,9 @@ class ClassificationPredictionRequest(TypedDict):
|
|
|
1171
1227
|
prompt: NotRequired[str | None]
|
|
1172
1228
|
use_lookup_cache: NotRequired[bool]
|
|
1173
1229
|
consistency_level: NotRequired[Literal["Bounded", "Session", "Strong", "Eventual"] | None]
|
|
1230
|
+
ignore_unlabeled: NotRequired[bool]
|
|
1231
|
+
partition_ids: NotRequired[str | list[str | None] | None]
|
|
1232
|
+
partition_filter_mode: NotRequired[Literal["ignore_partitions", "include_global", "exclude_global", "only_global"]]
|
|
1174
1233
|
|
|
1175
1234
|
|
|
1176
1235
|
class CloneMemorysetRequest(TypedDict):
|
|
@@ -1187,6 +1246,7 @@ class ColumnInfo(TypedDict):
|
|
|
1187
1246
|
name: str
|
|
1188
1247
|
type: ColumnType
|
|
1189
1248
|
enum_options: NotRequired[list[str] | None]
|
|
1249
|
+
string_values: NotRequired[list[str] | None]
|
|
1190
1250
|
int_values: NotRequired[list[int] | None]
|
|
1191
1251
|
contains_nones: NotRequired[bool]
|
|
1192
1252
|
|
|
@@ -1223,6 +1283,7 @@ class CreateMemorysetRequest(TypedDict):
|
|
|
1223
1283
|
datasource_score_column: NotRequired[str | None]
|
|
1224
1284
|
datasource_value_column: str
|
|
1225
1285
|
datasource_source_id_column: NotRequired[str | None]
|
|
1286
|
+
datasource_partition_id_column: NotRequired[str | None]
|
|
1226
1287
|
remove_duplicates: NotRequired[bool]
|
|
1227
1288
|
pretrained_embedding_model_name: NotRequired[PretrainedEmbeddingModelName | None]
|
|
1228
1289
|
finetuned_embedding_model_name_or_id: NotRequired[str | None]
|
|
@@ -1233,6 +1294,8 @@ class CreateMemorysetRequest(TypedDict):
|
|
|
1233
1294
|
prompt: NotRequired[str]
|
|
1234
1295
|
hidden: NotRequired[bool]
|
|
1235
1296
|
batch_size: NotRequired[int]
|
|
1297
|
+
subsample: NotRequired[int | float | None]
|
|
1298
|
+
memory_type: NotRequired[MemoryType]
|
|
1236
1299
|
|
|
1237
1300
|
|
|
1238
1301
|
class CreateRegressionModelRequest(TypedDict):
|
|
@@ -1257,48 +1320,52 @@ class DatasourceMetadata(TypedDict):
|
|
|
1257
1320
|
|
|
1258
1321
|
|
|
1259
1322
|
class EmbeddingEvaluationResponse(TypedDict):
|
|
1260
|
-
|
|
1323
|
+
job_id: str
|
|
1261
1324
|
org_id: str
|
|
1262
1325
|
finetuned_embedding_model_id: str | None
|
|
1263
1326
|
pretrained_embedding_model_name: PretrainedEmbeddingModelName | None
|
|
1264
1327
|
datasource_id: str
|
|
1265
|
-
subsample: int | None
|
|
1328
|
+
subsample: int | float | None
|
|
1266
1329
|
datasource_value_column: str
|
|
1267
1330
|
datasource_label_column: NotRequired[str | None]
|
|
1268
1331
|
datasource_score_column: NotRequired[str | None]
|
|
1269
1332
|
neighbor_count: int
|
|
1270
1333
|
weigh_memories: bool
|
|
1271
|
-
status:
|
|
1334
|
+
status: JobStatus
|
|
1272
1335
|
result: ClassificationMetrics | RegressionMetrics | None
|
|
1273
1336
|
created_at: str
|
|
1274
1337
|
updated_at: str
|
|
1338
|
+
task_id: str
|
|
1275
1339
|
|
|
1276
1340
|
|
|
1277
1341
|
class EvaluationResponse(TypedDict):
|
|
1278
|
-
|
|
1342
|
+
job_id: str
|
|
1279
1343
|
org_id: str
|
|
1280
|
-
status:
|
|
1344
|
+
status: JobStatus
|
|
1281
1345
|
result: ClassificationMetrics | RegressionMetrics | None
|
|
1282
1346
|
created_at: str
|
|
1283
1347
|
updated_at: str
|
|
1348
|
+
task_id: str
|
|
1284
1349
|
|
|
1285
1350
|
|
|
1286
1351
|
class EvaluationResponseClassificationMetrics(TypedDict):
|
|
1287
|
-
|
|
1352
|
+
job_id: str
|
|
1288
1353
|
org_id: str
|
|
1289
|
-
status:
|
|
1354
|
+
status: JobStatus
|
|
1290
1355
|
result: ClassificationMetrics | None
|
|
1291
1356
|
created_at: str
|
|
1292
1357
|
updated_at: str
|
|
1358
|
+
task_id: str
|
|
1293
1359
|
|
|
1294
1360
|
|
|
1295
1361
|
class EvaluationResponseRegressionMetrics(TypedDict):
|
|
1296
|
-
|
|
1362
|
+
job_id: str
|
|
1297
1363
|
org_id: str
|
|
1298
|
-
status:
|
|
1364
|
+
status: JobStatus
|
|
1299
1365
|
result: RegressionMetrics | None
|
|
1300
1366
|
created_at: str
|
|
1301
1367
|
updated_at: str
|
|
1368
|
+
task_id: str
|
|
1302
1369
|
|
|
1303
1370
|
|
|
1304
1371
|
class FinetuneEmbeddingModelRequest(TypedDict):
|
|
@@ -1307,7 +1374,8 @@ class FinetuneEmbeddingModelRequest(TypedDict):
|
|
|
1307
1374
|
train_memoryset_name_or_id: NotRequired[str | None]
|
|
1308
1375
|
train_datasource_name_or_id: NotRequired[str | None]
|
|
1309
1376
|
eval_datasource_name_or_id: NotRequired[str | None]
|
|
1310
|
-
label_column: NotRequired[str]
|
|
1377
|
+
label_column: NotRequired[str | None]
|
|
1378
|
+
score_column: NotRequired[str | None]
|
|
1311
1379
|
value_column: NotRequired[str]
|
|
1312
1380
|
training_method: NotRequired[EmbeddingFinetuningMethod]
|
|
1313
1381
|
training_args: NotRequired[dict[str, str | int | float | bool]]
|
|
@@ -1324,8 +1392,9 @@ class FinetunedEmbeddingModelMetadata(TypedDict):
|
|
|
1324
1392
|
created_at: str
|
|
1325
1393
|
updated_at: str
|
|
1326
1394
|
base_model: PretrainedEmbeddingModelName
|
|
1395
|
+
finetuning_job_id: str
|
|
1396
|
+
finetuning_status: JobStatus
|
|
1327
1397
|
finetuning_task_id: str
|
|
1328
|
-
finetuning_status: TaskStatus
|
|
1329
1398
|
|
|
1330
1399
|
|
|
1331
1400
|
class HTTPValidationError(TypedDict):
|
|
@@ -1337,10 +1406,28 @@ class InvalidInputErrorResponse(TypedDict):
|
|
|
1337
1406
|
validation_issues: list[FieldValidationError]
|
|
1338
1407
|
|
|
1339
1408
|
|
|
1409
|
+
class Job(TypedDict):
|
|
1410
|
+
status: JobStatus
|
|
1411
|
+
steps_total: int | None
|
|
1412
|
+
steps_completed: int | None
|
|
1413
|
+
exception: str | None
|
|
1414
|
+
updated_at: str
|
|
1415
|
+
created_at: str
|
|
1416
|
+
id: str
|
|
1417
|
+
org_id: str
|
|
1418
|
+
worker_id: str | None
|
|
1419
|
+
type: str
|
|
1420
|
+
payload: BaseModel
|
|
1421
|
+
result: BaseModel | None
|
|
1422
|
+
depends_on: NotRequired[list[str]]
|
|
1423
|
+
lease_token: str | None
|
|
1424
|
+
|
|
1425
|
+
|
|
1340
1426
|
class LabelPredictionMemoryLookup(TypedDict):
|
|
1341
1427
|
value: str | bytes
|
|
1342
1428
|
embedding: list[float]
|
|
1343
1429
|
source_id: str | None
|
|
1430
|
+
partition_id: str | None
|
|
1344
1431
|
metadata: dict[str, str | int | float | bool | None]
|
|
1345
1432
|
memory_id: str
|
|
1346
1433
|
memory_version: int
|
|
@@ -1382,6 +1469,7 @@ class LabeledMemory(TypedDict):
|
|
|
1382
1469
|
value: str | bytes
|
|
1383
1470
|
embedding: list[float]
|
|
1384
1471
|
source_id: str | None
|
|
1472
|
+
partition_id: str | None
|
|
1385
1473
|
metadata: dict[str, str | int | float | bool | None]
|
|
1386
1474
|
memory_id: str
|
|
1387
1475
|
memory_version: int
|
|
@@ -1397,6 +1485,7 @@ class LabeledMemoryLookup(TypedDict):
|
|
|
1397
1485
|
value: str | bytes
|
|
1398
1486
|
embedding: list[float]
|
|
1399
1487
|
source_id: str | None
|
|
1488
|
+
partition_id: str | None
|
|
1400
1489
|
metadata: dict[str, str | int | float | bool | None]
|
|
1401
1490
|
memory_id: str
|
|
1402
1491
|
memory_version: int
|
|
@@ -1414,6 +1503,7 @@ class LabeledMemoryUpdate(TypedDict):
|
|
|
1414
1503
|
value: NotRequired[str | bytes]
|
|
1415
1504
|
metadata: NotRequired[dict[str, str | int | float | bool | None] | None]
|
|
1416
1505
|
source_id: NotRequired[str | None]
|
|
1506
|
+
partition_id: NotRequired[str | None]
|
|
1417
1507
|
metrics: NotRequired[MemoryMetrics | None]
|
|
1418
1508
|
label: NotRequired[int | None]
|
|
1419
1509
|
|
|
@@ -1422,6 +1512,7 @@ class LabeledMemoryWithFeedbackMetrics(TypedDict):
|
|
|
1422
1512
|
value: str | bytes
|
|
1423
1513
|
embedding: list[float]
|
|
1424
1514
|
source_id: str | None
|
|
1515
|
+
partition_id: str | None
|
|
1425
1516
|
metadata: dict[str, str | int | float | bool | None]
|
|
1426
1517
|
memory_id: str
|
|
1427
1518
|
memory_version: int
|
|
@@ -1441,7 +1532,8 @@ class ListPredictionsRequest(TypedDict):
|
|
|
1441
1532
|
prediction_ids: NotRequired[list[str] | None]
|
|
1442
1533
|
start_timestamp: NotRequired[str | None]
|
|
1443
1534
|
end_timestamp: NotRequired[str | None]
|
|
1444
|
-
|
|
1535
|
+
memory_id: NotRequired[str | None]
|
|
1536
|
+
limit: NotRequired[int]
|
|
1445
1537
|
offset: NotRequired[int | None]
|
|
1446
1538
|
sort: NotRequired[PredictionSort]
|
|
1447
1539
|
expected_label_match: NotRequired[bool | None]
|
|
@@ -1462,6 +1554,7 @@ class MemorysetAnalysisRequest(TypedDict):
|
|
|
1462
1554
|
batch_size: NotRequired[int]
|
|
1463
1555
|
clear_metrics: NotRequired[bool]
|
|
1464
1556
|
configs: MemorysetAnalysisConfigs
|
|
1557
|
+
partition_filter_mode: NotRequired[Literal["ignore_partitions", "include_global", "exclude_global", "only_global"]]
|
|
1465
1558
|
|
|
1466
1559
|
|
|
1467
1560
|
class MemorysetConceptMetrics(TypedDict):
|
|
@@ -1480,6 +1573,13 @@ class MemorysetMetrics(TypedDict):
|
|
|
1480
1573
|
concepts: NotRequired[MemorysetConceptMetrics | None]
|
|
1481
1574
|
|
|
1482
1575
|
|
|
1576
|
+
class PaginatedJob(TypedDict):
|
|
1577
|
+
items: list[Job]
|
|
1578
|
+
total: int
|
|
1579
|
+
offset: int
|
|
1580
|
+
limit: int
|
|
1581
|
+
|
|
1582
|
+
|
|
1483
1583
|
class PaginatedUnionLabeledMemoryWithFeedbackMetricsScoredMemoryWithFeedbackMetrics(TypedDict):
|
|
1484
1584
|
items: list[LabeledMemoryWithFeedbackMetrics | ScoredMemoryWithFeedbackMetrics]
|
|
1485
1585
|
total: int
|
|
@@ -1497,23 +1597,6 @@ class PretrainedEmbeddingModelMetadata(TypedDict):
|
|
|
1497
1597
|
num_params: int
|
|
1498
1598
|
|
|
1499
1599
|
|
|
1500
|
-
class Task(TypedDict):
|
|
1501
|
-
status: TaskStatus
|
|
1502
|
-
steps_total: int | None
|
|
1503
|
-
steps_completed: int | None
|
|
1504
|
-
exception: str | None
|
|
1505
|
-
updated_at: str
|
|
1506
|
-
created_at: str
|
|
1507
|
-
id: str
|
|
1508
|
-
org_id: str
|
|
1509
|
-
worker_id: str | None
|
|
1510
|
-
type: str
|
|
1511
|
-
payload: BaseModel
|
|
1512
|
-
result: BaseModel | None
|
|
1513
|
-
depends_on: list[str]
|
|
1514
|
-
lease_token: str | None
|
|
1515
|
-
|
|
1516
|
-
|
|
1517
1600
|
class TelemetryMemoriesRequest(TypedDict):
|
|
1518
1601
|
memoryset_id: str
|
|
1519
1602
|
offset: NotRequired[int]
|
|
@@ -1545,10 +1628,10 @@ class CascadingEditSuggestion(TypedDict):
|
|
|
1545
1628
|
|
|
1546
1629
|
|
|
1547
1630
|
class MemorysetAnalysisResponse(TypedDict):
|
|
1548
|
-
|
|
1631
|
+
job_id: str
|
|
1549
1632
|
org_id: str
|
|
1550
1633
|
memoryset_id: str
|
|
1551
|
-
status:
|
|
1634
|
+
status: JobStatus
|
|
1552
1635
|
lookup_count: int
|
|
1553
1636
|
batch_size: int
|
|
1554
1637
|
clear_metrics: bool
|
|
@@ -1556,6 +1639,7 @@ class MemorysetAnalysisResponse(TypedDict):
|
|
|
1556
1639
|
results: MemorysetMetrics | None
|
|
1557
1640
|
created_at: str
|
|
1558
1641
|
updated_at: str
|
|
1642
|
+
task_id: str
|
|
1559
1643
|
|
|
1560
1644
|
|
|
1561
1645
|
class MemorysetMetadata(TypedDict):
|
|
@@ -1571,8 +1655,8 @@ class MemorysetMetadata(TypedDict):
|
|
|
1571
1655
|
created_at: str
|
|
1572
1656
|
updated_at: str
|
|
1573
1657
|
memories_updated_at: str
|
|
1574
|
-
|
|
1575
|
-
insertion_status:
|
|
1658
|
+
insertion_job_id: str
|
|
1659
|
+
insertion_status: JobStatus
|
|
1576
1660
|
metrics: MemorysetMetrics
|
|
1577
1661
|
memory_type: MemoryType
|
|
1578
1662
|
label_names: list[str] | None
|
|
@@ -1582,13 +1666,7 @@ class MemorysetMetadata(TypedDict):
|
|
|
1582
1666
|
document_prompt_override: str | None
|
|
1583
1667
|
query_prompt_override: str | None
|
|
1584
1668
|
hidden: bool
|
|
1585
|
-
|
|
1586
|
-
|
|
1587
|
-
class PaginatedTask(TypedDict):
|
|
1588
|
-
items: list[Task]
|
|
1589
|
-
total: int
|
|
1590
|
-
offset: int
|
|
1591
|
-
limit: int
|
|
1669
|
+
insertion_task_id: str
|
|
1592
1670
|
|
|
1593
1671
|
|
|
1594
1672
|
class PaginatedWorkerInfo(TypedDict):
|
|
@@ -1606,11 +1684,12 @@ class BootstrapClassificationModelMeta(TypedDict):
|
|
|
1606
1684
|
|
|
1607
1685
|
|
|
1608
1686
|
class BootstrapClassificationModelResponse(TypedDict):
|
|
1609
|
-
|
|
1687
|
+
job_id: str
|
|
1610
1688
|
org_id: str
|
|
1611
|
-
status:
|
|
1689
|
+
status: JobStatus
|
|
1612
1690
|
result: BootstrapClassificationModelMeta | None
|
|
1613
1691
|
input: BootstrapClassificationModelRequest | None
|
|
1692
|
+
task_id: str
|
|
1614
1693
|
|
|
1615
1694
|
|
|
1616
1695
|
class OrcaAsyncClient(AsyncClient):
|
|
@@ -1889,9 +1968,9 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
1889
1968
|
@overload
|
|
1890
1969
|
async def GET(
|
|
1891
1970
|
self,
|
|
1892
|
-
path: Literal["/memoryset/{name_or_id}/analysis/{
|
|
1971
|
+
path: Literal["/memoryset/{name_or_id}/analysis/{analysis_job_id}"],
|
|
1893
1972
|
*,
|
|
1894
|
-
params:
|
|
1973
|
+
params: GetMemorysetByNameOrIdAnalysisByAnalysisJobIdParams,
|
|
1895
1974
|
parse_as: Literal["json"] = "json",
|
|
1896
1975
|
headers: HeaderTypes | None = None,
|
|
1897
1976
|
cookies: CookieTypes | None = None,
|
|
@@ -1939,9 +2018,9 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
1939
2018
|
@overload
|
|
1940
2019
|
async def GET(
|
|
1941
2020
|
self,
|
|
1942
|
-
path: Literal["/
|
|
2021
|
+
path: Literal["/pretrained_embedding_model"],
|
|
1943
2022
|
*,
|
|
1944
|
-
params:
|
|
2023
|
+
params: None = None,
|
|
1945
2024
|
parse_as: Literal["json"] = "json",
|
|
1946
2025
|
headers: HeaderTypes | None = None,
|
|
1947
2026
|
cookies: CookieTypes | None = None,
|
|
@@ -1949,16 +2028,16 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
1949
2028
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
1950
2029
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
1951
2030
|
extensions: RequestExtensions | None = None,
|
|
1952
|
-
) ->
|
|
1953
|
-
"""
|
|
2031
|
+
) -> list[PretrainedEmbeddingModelMetadata]:
|
|
2032
|
+
"""List all available pretrained embedding models."""
|
|
1954
2033
|
pass
|
|
1955
2034
|
|
|
1956
2035
|
@overload
|
|
1957
2036
|
async def GET(
|
|
1958
2037
|
self,
|
|
1959
|
-
path: Literal["/
|
|
2038
|
+
path: Literal["/pretrained_embedding_model/{model_name}"],
|
|
1960
2039
|
*,
|
|
1961
|
-
params:
|
|
2040
|
+
params: GetPretrainedEmbeddingModelByModelNameParams,
|
|
1962
2041
|
parse_as: Literal["json"] = "json",
|
|
1963
2042
|
headers: HeaderTypes | None = None,
|
|
1964
2043
|
cookies: CookieTypes | None = None,
|
|
@@ -1966,16 +2045,16 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
1966
2045
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
1967
2046
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
1968
2047
|
extensions: RequestExtensions | None = None,
|
|
1969
|
-
) ->
|
|
1970
|
-
"""
|
|
2048
|
+
) -> PretrainedEmbeddingModelMetadata:
|
|
2049
|
+
"""Get metadata for a specific pretrained embedding model."""
|
|
1971
2050
|
pass
|
|
1972
2051
|
|
|
1973
2052
|
@overload
|
|
1974
2053
|
async def GET(
|
|
1975
2054
|
self,
|
|
1976
|
-
path: Literal["/
|
|
2055
|
+
path: Literal["/finetuned_embedding_model/{name_or_id}/evaluation/{job_id}"],
|
|
1977
2056
|
*,
|
|
1978
|
-
params:
|
|
2057
|
+
params: GetFinetunedEmbeddingModelByNameOrIdEvaluationByJobIdParams,
|
|
1979
2058
|
parse_as: Literal["json"] = "json",
|
|
1980
2059
|
headers: HeaderTypes | None = None,
|
|
1981
2060
|
cookies: CookieTypes | None = None,
|
|
@@ -1983,16 +2062,16 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
1983
2062
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
1984
2063
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
1985
2064
|
extensions: RequestExtensions | None = None,
|
|
1986
|
-
) ->
|
|
1987
|
-
"""
|
|
2065
|
+
) -> EmbeddingEvaluationResponse:
|
|
2066
|
+
"""Get evaluation results for a finetuned embedding model by job ID."""
|
|
1988
2067
|
pass
|
|
1989
2068
|
|
|
1990
2069
|
@overload
|
|
1991
2070
|
async def GET(
|
|
1992
2071
|
self,
|
|
1993
|
-
path: Literal["/pretrained_embedding_model/{model_name}"],
|
|
2072
|
+
path: Literal["/pretrained_embedding_model/{model_name}/evaluation/{job_id}"],
|
|
1994
2073
|
*,
|
|
1995
|
-
params:
|
|
2074
|
+
params: GetPretrainedEmbeddingModelByModelNameEvaluationByJobIdParams,
|
|
1996
2075
|
parse_as: Literal["json"] = "json",
|
|
1997
2076
|
headers: HeaderTypes | None = None,
|
|
1998
2077
|
cookies: CookieTypes | None = None,
|
|
@@ -2000,16 +2079,16 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
2000
2079
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
2001
2080
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
2002
2081
|
extensions: RequestExtensions | None = None,
|
|
2003
|
-
) ->
|
|
2004
|
-
"""Get
|
|
2082
|
+
) -> EmbeddingEvaluationResponse:
|
|
2083
|
+
"""Get evaluation results for a pretrained embedding model by job ID."""
|
|
2005
2084
|
pass
|
|
2006
2085
|
|
|
2007
2086
|
@overload
|
|
2008
2087
|
async def GET(
|
|
2009
2088
|
self,
|
|
2010
|
-
path: Literal["/
|
|
2089
|
+
path: Literal["/finetuned_embedding_model/{name_or_id}/evaluations"],
|
|
2011
2090
|
*,
|
|
2012
|
-
params:
|
|
2091
|
+
params: GetFinetunedEmbeddingModelByNameOrIdEvaluationsParams,
|
|
2013
2092
|
parse_as: Literal["json"] = "json",
|
|
2014
2093
|
headers: HeaderTypes | None = None,
|
|
2015
2094
|
cookies: CookieTypes | None = None,
|
|
@@ -2017,8 +2096,8 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
2017
2096
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
2018
2097
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
2019
2098
|
extensions: RequestExtensions | None = None,
|
|
2020
|
-
) -> EmbeddingEvaluationResponse:
|
|
2021
|
-
"""
|
|
2099
|
+
) -> list[EmbeddingEvaluationResponse]:
|
|
2100
|
+
"""List all evaluation results for a finetuned embedding model."""
|
|
2022
2101
|
pass
|
|
2023
2102
|
|
|
2024
2103
|
@overload
|
|
@@ -2143,7 +2222,7 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
2143
2222
|
@overload
|
|
2144
2223
|
async def GET(
|
|
2145
2224
|
self,
|
|
2146
|
-
path: Literal["/
|
|
2225
|
+
path: Literal["/classification_model"],
|
|
2147
2226
|
*,
|
|
2148
2227
|
params: None = None,
|
|
2149
2228
|
parse_as: Literal["json"] = "json",
|
|
@@ -2153,13 +2232,13 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
2153
2232
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
2154
2233
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
2155
2234
|
extensions: RequestExtensions | None = None,
|
|
2156
|
-
) -> list[ClassificationModelMetadata
|
|
2235
|
+
) -> list[ClassificationModelMetadata]:
|
|
2157
2236
|
pass
|
|
2158
2237
|
|
|
2159
2238
|
@overload
|
|
2160
2239
|
async def GET(
|
|
2161
2240
|
self,
|
|
2162
|
-
path: Literal["/
|
|
2241
|
+
path: Literal["/regression_model"],
|
|
2163
2242
|
*,
|
|
2164
2243
|
params: None = None,
|
|
2165
2244
|
parse_as: Literal["json"] = "json",
|
|
@@ -2169,7 +2248,7 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
2169
2248
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
2170
2249
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
2171
2250
|
extensions: RequestExtensions | None = None,
|
|
2172
|
-
) -> list[
|
|
2251
|
+
) -> list[RegressionModelMetadata]:
|
|
2173
2252
|
pass
|
|
2174
2253
|
|
|
2175
2254
|
@overload
|
|
@@ -2191,9 +2270,9 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
2191
2270
|
@overload
|
|
2192
2271
|
async def GET(
|
|
2193
2272
|
self,
|
|
2194
|
-
path: Literal["/
|
|
2273
|
+
path: Literal["/regression_model/{name_or_id}"],
|
|
2195
2274
|
*,
|
|
2196
|
-
params:
|
|
2275
|
+
params: GetRegressionModelByNameOrIdParams,
|
|
2197
2276
|
parse_as: Literal["json"] = "json",
|
|
2198
2277
|
headers: HeaderTypes | None = None,
|
|
2199
2278
|
cookies: CookieTypes | None = None,
|
|
@@ -2201,15 +2280,15 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
2201
2280
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
2202
2281
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
2203
2282
|
extensions: RequestExtensions | None = None,
|
|
2204
|
-
) ->
|
|
2283
|
+
) -> RegressionModelMetadata:
|
|
2205
2284
|
pass
|
|
2206
2285
|
|
|
2207
2286
|
@overload
|
|
2208
2287
|
async def GET(
|
|
2209
2288
|
self,
|
|
2210
|
-
path: Literal["/
|
|
2289
|
+
path: Literal["/predictive_model"],
|
|
2211
2290
|
*,
|
|
2212
|
-
params:
|
|
2291
|
+
params: None = None,
|
|
2213
2292
|
parse_as: Literal["json"] = "json",
|
|
2214
2293
|
headers: HeaderTypes | None = None,
|
|
2215
2294
|
cookies: CookieTypes | None = None,
|
|
@@ -2217,15 +2296,15 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
2217
2296
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
2218
2297
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
2219
2298
|
extensions: RequestExtensions | None = None,
|
|
2220
|
-
) ->
|
|
2299
|
+
) -> list[ClassificationModelMetadata | RegressionModelMetadata]:
|
|
2221
2300
|
pass
|
|
2222
2301
|
|
|
2223
2302
|
@overload
|
|
2224
2303
|
async def GET(
|
|
2225
2304
|
self,
|
|
2226
|
-
path: Literal["/
|
|
2305
|
+
path: Literal["/classification_model/{model_name_or_id}/evaluation"],
|
|
2227
2306
|
*,
|
|
2228
|
-
params:
|
|
2307
|
+
params: GetClassificationModelByModelNameOrIdEvaluationParams,
|
|
2229
2308
|
parse_as: Literal["json"] = "json",
|
|
2230
2309
|
headers: HeaderTypes | None = None,
|
|
2231
2310
|
cookies: CookieTypes | None = None,
|
|
@@ -2233,15 +2312,15 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
2233
2312
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
2234
2313
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
2235
2314
|
extensions: RequestExtensions | None = None,
|
|
2236
|
-
) -> list[
|
|
2315
|
+
) -> list[EvaluationResponseClassificationMetrics]:
|
|
2237
2316
|
pass
|
|
2238
2317
|
|
|
2239
2318
|
@overload
|
|
2240
2319
|
async def GET(
|
|
2241
2320
|
self,
|
|
2242
|
-
path: Literal["/regression_model/{
|
|
2321
|
+
path: Literal["/regression_model/{model_name_or_id}/evaluation"],
|
|
2243
2322
|
*,
|
|
2244
|
-
params:
|
|
2323
|
+
params: GetRegressionModelByModelNameOrIdEvaluationParams,
|
|
2245
2324
|
parse_as: Literal["json"] = "json",
|
|
2246
2325
|
headers: HeaderTypes | None = None,
|
|
2247
2326
|
cookies: CookieTypes | None = None,
|
|
@@ -2249,15 +2328,15 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
2249
2328
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
2250
2329
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
2251
2330
|
extensions: RequestExtensions | None = None,
|
|
2252
|
-
) ->
|
|
2331
|
+
) -> list[EvaluationResponseRegressionMetrics]:
|
|
2253
2332
|
pass
|
|
2254
2333
|
|
|
2255
2334
|
@overload
|
|
2256
2335
|
async def GET(
|
|
2257
2336
|
self,
|
|
2258
|
-
path: Literal["/
|
|
2337
|
+
path: Literal["/classification_model/{model_name_or_id}/evaluation/{job_id}"],
|
|
2259
2338
|
*,
|
|
2260
|
-
params:
|
|
2339
|
+
params: GetClassificationModelByModelNameOrIdEvaluationByJobIdParams,
|
|
2261
2340
|
parse_as: Literal["json"] = "json",
|
|
2262
2341
|
headers: HeaderTypes | None = None,
|
|
2263
2342
|
cookies: CookieTypes | None = None,
|
|
@@ -2265,15 +2344,15 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
2265
2344
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
2266
2345
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
2267
2346
|
extensions: RequestExtensions | None = None,
|
|
2268
|
-
) ->
|
|
2347
|
+
) -> EvaluationResponseClassificationMetrics:
|
|
2269
2348
|
pass
|
|
2270
2349
|
|
|
2271
2350
|
@overload
|
|
2272
2351
|
async def GET(
|
|
2273
2352
|
self,
|
|
2274
|
-
path: Literal["/regression_model/{model_name_or_id}/evaluation/{
|
|
2353
|
+
path: Literal["/regression_model/{model_name_or_id}/evaluation/{job_id}"],
|
|
2275
2354
|
*,
|
|
2276
|
-
params:
|
|
2355
|
+
params: GetRegressionModelByModelNameOrIdEvaluationByJobIdParams,
|
|
2277
2356
|
parse_as: Literal["json"] = "json",
|
|
2278
2357
|
headers: HeaderTypes | None = None,
|
|
2279
2358
|
cookies: CookieTypes | None = None,
|
|
@@ -2287,9 +2366,9 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
2287
2366
|
@overload
|
|
2288
2367
|
async def GET(
|
|
2289
2368
|
self,
|
|
2290
|
-
path: Literal["/
|
|
2369
|
+
path: Literal["/job/{job_id}"],
|
|
2291
2370
|
*,
|
|
2292
|
-
params:
|
|
2371
|
+
params: GetJobByJobIdParams,
|
|
2293
2372
|
parse_as: Literal["json"] = "json",
|
|
2294
2373
|
headers: HeaderTypes | None = None,
|
|
2295
2374
|
cookies: CookieTypes | None = None,
|
|
@@ -2297,15 +2376,15 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
2297
2376
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
2298
2377
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
2299
2378
|
extensions: RequestExtensions | None = None,
|
|
2300
|
-
) ->
|
|
2379
|
+
) -> Job:
|
|
2301
2380
|
pass
|
|
2302
2381
|
|
|
2303
2382
|
@overload
|
|
2304
2383
|
async def GET(
|
|
2305
2384
|
self,
|
|
2306
|
-
path: Literal["/
|
|
2385
|
+
path: Literal["/job/{job_id}/status"],
|
|
2307
2386
|
*,
|
|
2308
|
-
params:
|
|
2387
|
+
params: GetJobByJobIdStatusParams,
|
|
2309
2388
|
parse_as: Literal["json"] = "json",
|
|
2310
2389
|
headers: HeaderTypes | None = None,
|
|
2311
2390
|
cookies: CookieTypes | None = None,
|
|
@@ -2313,15 +2392,15 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
2313
2392
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
2314
2393
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
2315
2394
|
extensions: RequestExtensions | None = None,
|
|
2316
|
-
) ->
|
|
2395
|
+
) -> JobStatusInfo:
|
|
2317
2396
|
pass
|
|
2318
2397
|
|
|
2319
2398
|
@overload
|
|
2320
2399
|
async def GET(
|
|
2321
2400
|
self,
|
|
2322
|
-
path: Literal["/
|
|
2401
|
+
path: Literal["/job"],
|
|
2323
2402
|
*,
|
|
2324
|
-
params:
|
|
2403
|
+
params: GetJobParams,
|
|
2325
2404
|
parse_as: Literal["json"] = "json",
|
|
2326
2405
|
headers: HeaderTypes | None = None,
|
|
2327
2406
|
cookies: CookieTypes | None = None,
|
|
@@ -2329,7 +2408,7 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
2329
2408
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
2330
2409
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
2331
2410
|
extensions: RequestExtensions | None = None,
|
|
2332
|
-
) ->
|
|
2411
|
+
) -> PaginatedJob:
|
|
2333
2412
|
pass
|
|
2334
2413
|
|
|
2335
2414
|
@overload
|
|
@@ -2480,9 +2559,9 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
2480
2559
|
@overload
|
|
2481
2560
|
async def GET(
|
|
2482
2561
|
self,
|
|
2483
|
-
path: Literal["/agents/bootstrap_classification_model/{
|
|
2562
|
+
path: Literal["/agents/bootstrap_classification_model/{job_id}"],
|
|
2484
2563
|
*,
|
|
2485
|
-
params:
|
|
2564
|
+
params: GetAgentsBootstrapClassificationModelByJobIdParams,
|
|
2486
2565
|
parse_as: Literal["json"] = "json",
|
|
2487
2566
|
headers: HeaderTypes | None = None,
|
|
2488
2567
|
cookies: CookieTypes | None = None,
|
|
@@ -2491,7 +2570,7 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
2491
2570
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
2492
2571
|
extensions: RequestExtensions | None = None,
|
|
2493
2572
|
) -> BootstrapClassificationModelResponse:
|
|
2494
|
-
"""Get the status of a bootstrap classification model
|
|
2573
|
+
"""Get the status of a bootstrap classification model job"""
|
|
2495
2574
|
pass
|
|
2496
2575
|
|
|
2497
2576
|
async def GET(
|
|
@@ -2661,9 +2740,9 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
2661
2740
|
@overload
|
|
2662
2741
|
async def DELETE(
|
|
2663
2742
|
self,
|
|
2664
|
-
path: Literal["/
|
|
2743
|
+
path: Literal["/regression_model/{name_or_id}"],
|
|
2665
2744
|
*,
|
|
2666
|
-
params:
|
|
2745
|
+
params: DeleteRegressionModelByNameOrIdParams,
|
|
2667
2746
|
parse_as: Literal["json"] = "json",
|
|
2668
2747
|
headers: HeaderTypes | None = None,
|
|
2669
2748
|
cookies: CookieTypes | None = None,
|
|
@@ -2677,9 +2756,9 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
2677
2756
|
@overload
|
|
2678
2757
|
async def DELETE(
|
|
2679
2758
|
self,
|
|
2680
|
-
path: Literal["/
|
|
2759
|
+
path: Literal["/classification_model/{model_name_or_id}/evaluation/{job_id}"],
|
|
2681
2760
|
*,
|
|
2682
|
-
params:
|
|
2761
|
+
params: DeleteClassificationModelByModelNameOrIdEvaluationByJobIdParams,
|
|
2683
2762
|
parse_as: Literal["json"] = "json",
|
|
2684
2763
|
headers: HeaderTypes | None = None,
|
|
2685
2764
|
cookies: CookieTypes | None = None,
|
|
@@ -2693,9 +2772,9 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
2693
2772
|
@overload
|
|
2694
2773
|
async def DELETE(
|
|
2695
2774
|
self,
|
|
2696
|
-
path: Literal["/regression_model/{model_name_or_id}/evaluation/{
|
|
2775
|
+
path: Literal["/regression_model/{model_name_or_id}/evaluation/{job_id}"],
|
|
2697
2776
|
*,
|
|
2698
|
-
params:
|
|
2777
|
+
params: DeleteRegressionModelByModelNameOrIdEvaluationByJobIdParams,
|
|
2699
2778
|
parse_as: Literal["json"] = "json",
|
|
2700
2779
|
headers: HeaderTypes | None = None,
|
|
2701
2780
|
cookies: CookieTypes | None = None,
|
|
@@ -2709,9 +2788,9 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
2709
2788
|
@overload
|
|
2710
2789
|
async def DELETE(
|
|
2711
2790
|
self,
|
|
2712
|
-
path: Literal["/
|
|
2791
|
+
path: Literal["/job/{job_id}/abort"],
|
|
2713
2792
|
*,
|
|
2714
|
-
params:
|
|
2793
|
+
params: DeleteJobByJobIdAbortParams,
|
|
2715
2794
|
parse_as: Literal["json"] = "json",
|
|
2716
2795
|
headers: HeaderTypes | None = None,
|
|
2717
2796
|
cookies: CookieTypes | None = None,
|
|
@@ -2872,6 +2951,26 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
2872
2951
|
) -> None:
|
|
2873
2952
|
pass
|
|
2874
2953
|
|
|
2954
|
+
@overload
|
|
2955
|
+
async def POST(
|
|
2956
|
+
self,
|
|
2957
|
+
path: Literal["/gpu/memoryset/{name_or_id}/lookup"],
|
|
2958
|
+
*,
|
|
2959
|
+
params: PostGpuMemorysetByNameOrIdLookupParams,
|
|
2960
|
+
json: LookupRequest,
|
|
2961
|
+
data: None = None,
|
|
2962
|
+
files: None = None,
|
|
2963
|
+
content: None = None,
|
|
2964
|
+
parse_as: Literal["json"] = "json",
|
|
2965
|
+
headers: HeaderTypes | None = None,
|
|
2966
|
+
cookies: CookieTypes | None = None,
|
|
2967
|
+
auth: AuthTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
2968
|
+
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
2969
|
+
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
2970
|
+
extensions: RequestExtensions | None = None,
|
|
2971
|
+
) -> list[list[LabeledMemoryLookup | ScoredMemoryLookup]]:
|
|
2972
|
+
pass
|
|
2973
|
+
|
|
2875
2974
|
@overload
|
|
2876
2975
|
async def POST(
|
|
2877
2976
|
self,
|
|
@@ -2932,6 +3031,26 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
2932
3031
|
) -> None:
|
|
2933
3032
|
pass
|
|
2934
3033
|
|
|
3034
|
+
@overload
|
|
3035
|
+
async def POST(
|
|
3036
|
+
self,
|
|
3037
|
+
path: Literal["/gpu/memoryset/{name_or_id}/memory"],
|
|
3038
|
+
*,
|
|
3039
|
+
params: PostGpuMemorysetByNameOrIdMemoryParams,
|
|
3040
|
+
json: PostGpuMemorysetByNameOrIdMemoryRequest,
|
|
3041
|
+
data: None = None,
|
|
3042
|
+
files: None = None,
|
|
3043
|
+
content: None = None,
|
|
3044
|
+
parse_as: Literal["json"] = "json",
|
|
3045
|
+
headers: HeaderTypes | None = None,
|
|
3046
|
+
cookies: CookieTypes | None = None,
|
|
3047
|
+
auth: AuthTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3048
|
+
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3049
|
+
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3050
|
+
extensions: RequestExtensions | None = None,
|
|
3051
|
+
) -> list[str]:
|
|
3052
|
+
pass
|
|
3053
|
+
|
|
2935
3054
|
@overload
|
|
2936
3055
|
async def POST(
|
|
2937
3056
|
self,
|
|
@@ -2993,6 +3112,48 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
2993
3112
|
"""Create a finetuned embedding model."""
|
|
2994
3113
|
pass
|
|
2995
3114
|
|
|
3115
|
+
@overload
|
|
3116
|
+
async def POST(
|
|
3117
|
+
self,
|
|
3118
|
+
path: Literal["/gpu/finetuned_embedding_model/{name_or_id}/embedding"],
|
|
3119
|
+
*,
|
|
3120
|
+
params: PostGpuFinetunedEmbeddingModelByNameOrIdEmbeddingParams,
|
|
3121
|
+
json: EmbedRequest,
|
|
3122
|
+
data: None = None,
|
|
3123
|
+
files: None = None,
|
|
3124
|
+
content: None = None,
|
|
3125
|
+
parse_as: Literal["json"] = "json",
|
|
3126
|
+
headers: HeaderTypes | None = None,
|
|
3127
|
+
cookies: CookieTypes | None = None,
|
|
3128
|
+
auth: AuthTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3129
|
+
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3130
|
+
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3131
|
+
extensions: RequestExtensions | None = None,
|
|
3132
|
+
) -> list[list[float]]:
|
|
3133
|
+
"""Embed values using a finetuned embedding model."""
|
|
3134
|
+
pass
|
|
3135
|
+
|
|
3136
|
+
@overload
|
|
3137
|
+
async def POST(
|
|
3138
|
+
self,
|
|
3139
|
+
path: Literal["/gpu/pretrained_embedding_model/{model_name}/embedding"],
|
|
3140
|
+
*,
|
|
3141
|
+
params: PostGpuPretrainedEmbeddingModelByModelNameEmbeddingParams,
|
|
3142
|
+
json: EmbedRequest,
|
|
3143
|
+
data: None = None,
|
|
3144
|
+
files: None = None,
|
|
3145
|
+
content: None = None,
|
|
3146
|
+
parse_as: Literal["json"] = "json",
|
|
3147
|
+
headers: HeaderTypes | None = None,
|
|
3148
|
+
cookies: CookieTypes | None = None,
|
|
3149
|
+
auth: AuthTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3150
|
+
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3151
|
+
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3152
|
+
extensions: RequestExtensions | None = None,
|
|
3153
|
+
) -> list[list[float]]:
|
|
3154
|
+
"""Embed values using a pretrained embedding model."""
|
|
3155
|
+
pass
|
|
3156
|
+
|
|
2996
3157
|
@overload
|
|
2997
3158
|
async def POST(
|
|
2998
3159
|
self,
|
|
@@ -3092,10 +3253,10 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
3092
3253
|
@overload
|
|
3093
3254
|
async def POST(
|
|
3094
3255
|
self,
|
|
3095
|
-
path: Literal["/
|
|
3256
|
+
path: Literal["/datasource/{name_or_id}/rows"],
|
|
3096
3257
|
*,
|
|
3097
|
-
params:
|
|
3098
|
-
json:
|
|
3258
|
+
params: PostDatasourceByNameOrIdRowsParams,
|
|
3259
|
+
json: GetDatasourceRowsRequest,
|
|
3099
3260
|
data: None = None,
|
|
3100
3261
|
files: None = None,
|
|
3101
3262
|
content: None = None,
|
|
@@ -3106,16 +3267,17 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
3106
3267
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3107
3268
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3108
3269
|
extensions: RequestExtensions | None = None,
|
|
3109
|
-
) ->
|
|
3270
|
+
) -> list[dict[str, Any]]:
|
|
3271
|
+
"""Get rows from a specific datasource with optional filtering."""
|
|
3110
3272
|
pass
|
|
3111
3273
|
|
|
3112
3274
|
@overload
|
|
3113
3275
|
async def POST(
|
|
3114
3276
|
self,
|
|
3115
|
-
path: Literal["/
|
|
3277
|
+
path: Literal["/datasource/{name_or_id}/rows/count"],
|
|
3116
3278
|
*,
|
|
3117
|
-
params:
|
|
3118
|
-
json:
|
|
3279
|
+
params: PostDatasourceByNameOrIdRowsCountParams,
|
|
3280
|
+
json: GetDatasourceRowCountRequest,
|
|
3119
3281
|
data: None = None,
|
|
3120
3282
|
files: None = None,
|
|
3121
3283
|
content: None = None,
|
|
@@ -3126,16 +3288,17 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
3126
3288
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3127
3289
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3128
3290
|
extensions: RequestExtensions | None = None,
|
|
3129
|
-
) ->
|
|
3291
|
+
) -> int:
|
|
3292
|
+
"""Get row count from a specific datasource with optional filtering."""
|
|
3130
3293
|
pass
|
|
3131
3294
|
|
|
3132
3295
|
@overload
|
|
3133
3296
|
async def POST(
|
|
3134
3297
|
self,
|
|
3135
|
-
path: Literal["/
|
|
3298
|
+
path: Literal["/classification_model"],
|
|
3136
3299
|
*,
|
|
3137
3300
|
params: None = None,
|
|
3138
|
-
json:
|
|
3301
|
+
json: CreateClassificationModelRequest,
|
|
3139
3302
|
data: None = None,
|
|
3140
3303
|
files: None = None,
|
|
3141
3304
|
content: None = None,
|
|
@@ -3146,16 +3309,16 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
3146
3309
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3147
3310
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3148
3311
|
extensions: RequestExtensions | None = None,
|
|
3149
|
-
) ->
|
|
3312
|
+
) -> ClassificationModelMetadata:
|
|
3150
3313
|
pass
|
|
3151
3314
|
|
|
3152
3315
|
@overload
|
|
3153
3316
|
async def POST(
|
|
3154
3317
|
self,
|
|
3155
|
-
path: Literal["/regression_model
|
|
3318
|
+
path: Literal["/regression_model"],
|
|
3156
3319
|
*,
|
|
3157
|
-
params:
|
|
3158
|
-
json:
|
|
3320
|
+
params: None = None,
|
|
3321
|
+
json: CreateRegressionModelRequest,
|
|
3159
3322
|
data: None = None,
|
|
3160
3323
|
files: None = None,
|
|
3161
3324
|
content: None = None,
|
|
@@ -3166,16 +3329,16 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
3166
3329
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3167
3330
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3168
3331
|
extensions: RequestExtensions | None = None,
|
|
3169
|
-
) ->
|
|
3332
|
+
) -> RegressionModelMetadata:
|
|
3170
3333
|
pass
|
|
3171
3334
|
|
|
3172
3335
|
@overload
|
|
3173
3336
|
async def POST(
|
|
3174
3337
|
self,
|
|
3175
|
-
path: Literal["/
|
|
3338
|
+
path: Literal["/gpu/classification_model/{name_or_id}/prediction"],
|
|
3176
3339
|
*,
|
|
3177
|
-
params:
|
|
3178
|
-
json:
|
|
3340
|
+
params: PostGpuClassificationModelByNameOrIdPredictionParams,
|
|
3341
|
+
json: ClassificationPredictionRequest,
|
|
3179
3342
|
data: None = None,
|
|
3180
3343
|
files: None = None,
|
|
3181
3344
|
content: None = None,
|
|
@@ -3186,17 +3349,16 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
3186
3349
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3187
3350
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3188
3351
|
extensions: RequestExtensions | None = None,
|
|
3189
|
-
) -> list[
|
|
3190
|
-
"""List predictions with optional filtering and sorting."""
|
|
3352
|
+
) -> list[BaseLabelPredictionResult]:
|
|
3191
3353
|
pass
|
|
3192
3354
|
|
|
3193
3355
|
@overload
|
|
3194
3356
|
async def POST(
|
|
3195
3357
|
self,
|
|
3196
|
-
path: Literal["/
|
|
3358
|
+
path: Literal["/classification_model/{name_or_id}/prediction"],
|
|
3197
3359
|
*,
|
|
3198
|
-
params:
|
|
3199
|
-
json:
|
|
3360
|
+
params: PostClassificationModelByNameOrIdPredictionParams,
|
|
3361
|
+
json: ClassificationPredictionRequest,
|
|
3200
3362
|
data: None = None,
|
|
3201
3363
|
files: None = None,
|
|
3202
3364
|
content: None = None,
|
|
@@ -3207,17 +3369,16 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
3207
3369
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3208
3370
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3209
3371
|
extensions: RequestExtensions | None = None,
|
|
3210
|
-
) ->
|
|
3211
|
-
"""Count predictions with optional filtering."""
|
|
3372
|
+
) -> list[BaseLabelPredictionResult]:
|
|
3212
3373
|
pass
|
|
3213
3374
|
|
|
3214
3375
|
@overload
|
|
3215
3376
|
async def POST(
|
|
3216
3377
|
self,
|
|
3217
|
-
path: Literal["/
|
|
3378
|
+
path: Literal["/gpu/regression_model/{name_or_id}/prediction"],
|
|
3218
3379
|
*,
|
|
3219
|
-
params:
|
|
3220
|
-
json:
|
|
3380
|
+
params: PostGpuRegressionModelByNameOrIdPredictionParams,
|
|
3381
|
+
json: RegressionPredictionRequest,
|
|
3221
3382
|
data: None = None,
|
|
3222
3383
|
files: None = None,
|
|
3223
3384
|
content: None = None,
|
|
@@ -3228,21 +3389,16 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
3228
3389
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3229
3390
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3230
3391
|
extensions: RequestExtensions | None = None,
|
|
3231
|
-
) ->
|
|
3232
|
-
"""
|
|
3233
|
-
List memories with feedback metrics.
|
|
3234
|
-
**Note**: This endpoint will ONLY return memories that have been used in a prediction.
|
|
3235
|
-
If you want to query ALL memories WITHOUT feedback metrics, use the query_memoryset endpoint.
|
|
3236
|
-
"""
|
|
3392
|
+
) -> list[BaseScorePredictionResult]:
|
|
3237
3393
|
pass
|
|
3238
3394
|
|
|
3239
3395
|
@overload
|
|
3240
3396
|
async def POST(
|
|
3241
3397
|
self,
|
|
3242
|
-
path: Literal["/
|
|
3398
|
+
path: Literal["/regression_model/{name_or_id}/prediction"],
|
|
3243
3399
|
*,
|
|
3244
|
-
params:
|
|
3245
|
-
json:
|
|
3400
|
+
params: PostRegressionModelByNameOrIdPredictionParams,
|
|
3401
|
+
json: RegressionPredictionRequest,
|
|
3246
3402
|
data: None = None,
|
|
3247
3403
|
files: None = None,
|
|
3248
3404
|
content: None = None,
|
|
@@ -3253,30 +3409,16 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
3253
3409
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3254
3410
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3255
3411
|
extensions: RequestExtensions | None = None,
|
|
3256
|
-
) ->
|
|
3257
|
-
"""
|
|
3258
|
-
Bootstrap a classification model by creating a memoryset with generated memories and a classification model.
|
|
3259
|
-
|
|
3260
|
-
This endpoint uses the bootstrap_classification_model agent to generate:
|
|
3261
|
-
1. Memoryset configuration with appropriate settings
|
|
3262
|
-
2. Model configuration with optimal parameters
|
|
3263
|
-
3. High-quality training memories for each label
|
|
3264
|
-
|
|
3265
|
-
The process involves:
|
|
3266
|
-
1. Calling the agent to generate configurations and memories
|
|
3267
|
-
2. Creating a datasource from the generated memories
|
|
3268
|
-
3. Creating a memoryset from the datasource
|
|
3269
|
-
4. Creating a classification model from the memoryset
|
|
3270
|
-
"""
|
|
3412
|
+
) -> list[BaseScorePredictionResult]:
|
|
3271
3413
|
pass
|
|
3272
3414
|
|
|
3273
3415
|
@overload
|
|
3274
3416
|
async def POST(
|
|
3275
3417
|
self,
|
|
3276
|
-
path: Literal["/
|
|
3418
|
+
path: Literal["/classification_model/{model_name_or_id}/evaluation"],
|
|
3277
3419
|
*,
|
|
3278
|
-
params:
|
|
3279
|
-
json:
|
|
3420
|
+
params: PostClassificationModelByModelNameOrIdEvaluationParams,
|
|
3421
|
+
json: ClassificationEvaluationRequest,
|
|
3280
3422
|
data: None = None,
|
|
3281
3423
|
files: None = None,
|
|
3282
3424
|
content: None = None,
|
|
@@ -3287,16 +3429,16 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
3287
3429
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3288
3430
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3289
3431
|
extensions: RequestExtensions | None = None,
|
|
3290
|
-
) ->
|
|
3432
|
+
) -> EvaluationResponse:
|
|
3291
3433
|
pass
|
|
3292
3434
|
|
|
3293
3435
|
@overload
|
|
3294
3436
|
async def POST(
|
|
3295
3437
|
self,
|
|
3296
|
-
path: Literal["/
|
|
3438
|
+
path: Literal["/regression_model/{model_name_or_id}/evaluation"],
|
|
3297
3439
|
*,
|
|
3298
|
-
params:
|
|
3299
|
-
json:
|
|
3440
|
+
params: PostRegressionModelByModelNameOrIdEvaluationParams,
|
|
3441
|
+
json: RegressionEvaluationRequest,
|
|
3300
3442
|
data: None = None,
|
|
3301
3443
|
files: None = None,
|
|
3302
3444
|
content: None = None,
|
|
@@ -3307,16 +3449,16 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
3307
3449
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3308
3450
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3309
3451
|
extensions: RequestExtensions | None = None,
|
|
3310
|
-
) ->
|
|
3452
|
+
) -> EvaluationResponse:
|
|
3311
3453
|
pass
|
|
3312
3454
|
|
|
3313
3455
|
@overload
|
|
3314
3456
|
async def POST(
|
|
3315
3457
|
self,
|
|
3316
|
-
path: Literal["/
|
|
3458
|
+
path: Literal["/telemetry/prediction"],
|
|
3317
3459
|
*,
|
|
3318
|
-
params:
|
|
3319
|
-
json:
|
|
3460
|
+
params: None = None,
|
|
3461
|
+
json: ListPredictionsRequest | None = None,
|
|
3320
3462
|
data: None = None,
|
|
3321
3463
|
files: None = None,
|
|
3322
3464
|
content: None = None,
|
|
@@ -3327,16 +3469,17 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
3327
3469
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3328
3470
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3329
3471
|
extensions: RequestExtensions | None = None,
|
|
3330
|
-
) -> list[
|
|
3472
|
+
) -> list[LabelPredictionWithMemoriesAndFeedback | ScorePredictionWithMemoriesAndFeedback]:
|
|
3473
|
+
"""List predictions with optional filtering and sorting."""
|
|
3331
3474
|
pass
|
|
3332
3475
|
|
|
3333
3476
|
@overload
|
|
3334
3477
|
async def POST(
|
|
3335
3478
|
self,
|
|
3336
|
-
path: Literal["/
|
|
3479
|
+
path: Literal["/telemetry/prediction/count"],
|
|
3337
3480
|
*,
|
|
3338
|
-
params:
|
|
3339
|
-
json:
|
|
3481
|
+
params: None = None,
|
|
3482
|
+
json: CountPredictionsRequest | None = None,
|
|
3340
3483
|
data: None = None,
|
|
3341
3484
|
files: None = None,
|
|
3342
3485
|
content: None = None,
|
|
@@ -3347,16 +3490,17 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
3347
3490
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3348
3491
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3349
3492
|
extensions: RequestExtensions | None = None,
|
|
3350
|
-
) ->
|
|
3493
|
+
) -> int:
|
|
3494
|
+
"""Count predictions with optional filtering."""
|
|
3351
3495
|
pass
|
|
3352
3496
|
|
|
3353
3497
|
@overload
|
|
3354
3498
|
async def POST(
|
|
3355
3499
|
self,
|
|
3356
|
-
path: Literal["/
|
|
3500
|
+
path: Literal["/telemetry/memories"],
|
|
3357
3501
|
*,
|
|
3358
|
-
params:
|
|
3359
|
-
json:
|
|
3502
|
+
params: None = None,
|
|
3503
|
+
json: TelemetryMemoriesRequest,
|
|
3360
3504
|
data: None = None,
|
|
3361
3505
|
files: None = None,
|
|
3362
3506
|
content: None = None,
|
|
@@ -3367,17 +3511,21 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
3367
3511
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3368
3512
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3369
3513
|
extensions: RequestExtensions | None = None,
|
|
3370
|
-
) ->
|
|
3371
|
-
"""
|
|
3514
|
+
) -> PaginatedUnionLabeledMemoryWithFeedbackMetricsScoredMemoryWithFeedbackMetrics:
|
|
3515
|
+
"""
|
|
3516
|
+
List memories with feedback metrics.
|
|
3517
|
+
**Note**: This endpoint will ONLY return memories that have been used in a prediction.
|
|
3518
|
+
If you want to query ALL memories WITHOUT feedback metrics, use the query_memoryset endpoint.
|
|
3519
|
+
"""
|
|
3372
3520
|
pass
|
|
3373
3521
|
|
|
3374
3522
|
@overload
|
|
3375
3523
|
async def POST(
|
|
3376
3524
|
self,
|
|
3377
|
-
path: Literal["/
|
|
3525
|
+
path: Literal["/agents/bootstrap_classification_model"],
|
|
3378
3526
|
*,
|
|
3379
|
-
params:
|
|
3380
|
-
json:
|
|
3527
|
+
params: None = None,
|
|
3528
|
+
json: BootstrapClassificationModelRequest,
|
|
3381
3529
|
data: None = None,
|
|
3382
3530
|
files: None = None,
|
|
3383
3531
|
content: None = None,
|
|
@@ -3388,8 +3536,21 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
3388
3536
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3389
3537
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3390
3538
|
extensions: RequestExtensions | None = None,
|
|
3391
|
-
) ->
|
|
3392
|
-
"""
|
|
3539
|
+
) -> BootstrapClassificationModelResponse:
|
|
3540
|
+
"""
|
|
3541
|
+
Bootstrap a classification model by creating a memoryset with generated memories and a classification model.
|
|
3542
|
+
|
|
3543
|
+
This endpoint uses the bootstrap_classification_model agent to generate:
|
|
3544
|
+
1. Memoryset configuration with appropriate settings
|
|
3545
|
+
2. Model configuration with optimal parameters
|
|
3546
|
+
3. High-quality training memories for each label
|
|
3547
|
+
|
|
3548
|
+
The process involves:
|
|
3549
|
+
1. Calling the agent to generate configurations and memories
|
|
3550
|
+
2. Creating a datasource from the generated memories
|
|
3551
|
+
3. Creating a memoryset from the datasource
|
|
3552
|
+
4. Creating a classification model from the memoryset
|
|
3553
|
+
"""
|
|
3393
3554
|
pass
|
|
3394
3555
|
|
|
3395
3556
|
async def POST(
|
|
@@ -3535,10 +3696,10 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
3535
3696
|
@overload
|
|
3536
3697
|
async def PATCH(
|
|
3537
3698
|
self,
|
|
3538
|
-
path: Literal["/
|
|
3699
|
+
path: Literal["/gpu/memoryset/{name_or_id}/memory"],
|
|
3539
3700
|
*,
|
|
3540
|
-
params:
|
|
3541
|
-
json:
|
|
3701
|
+
params: PatchGpuMemorysetByNameOrIdMemoryParams,
|
|
3702
|
+
json: PatchGpuMemorysetByNameOrIdMemoryRequest,
|
|
3542
3703
|
data: None = None,
|
|
3543
3704
|
files: None = None,
|
|
3544
3705
|
content: None = None,
|
|
@@ -3549,16 +3710,16 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
3549
3710
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3550
3711
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3551
3712
|
extensions: RequestExtensions | None = None,
|
|
3552
|
-
) ->
|
|
3713
|
+
) -> LabeledMemory | ScoredMemory:
|
|
3553
3714
|
pass
|
|
3554
3715
|
|
|
3555
3716
|
@overload
|
|
3556
3717
|
async def PATCH(
|
|
3557
3718
|
self,
|
|
3558
|
-
path: Literal["/
|
|
3719
|
+
path: Literal["/gpu/memoryset/{name_or_id}/memories"],
|
|
3559
3720
|
*,
|
|
3560
|
-
params:
|
|
3561
|
-
json:
|
|
3721
|
+
params: PatchGpuMemorysetByNameOrIdMemoriesParams,
|
|
3722
|
+
json: PatchGpuMemorysetByNameOrIdMemoriesRequest,
|
|
3562
3723
|
data: None = None,
|
|
3563
3724
|
files: None = None,
|
|
3564
3725
|
content: None = None,
|
|
@@ -3569,16 +3730,16 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
3569
3730
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3570
3731
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3571
3732
|
extensions: RequestExtensions | None = None,
|
|
3572
|
-
) ->
|
|
3733
|
+
) -> list[LabeledMemory] | list[ScoredMemory]:
|
|
3573
3734
|
pass
|
|
3574
3735
|
|
|
3575
3736
|
@overload
|
|
3576
3737
|
async def PATCH(
|
|
3577
3738
|
self,
|
|
3578
|
-
path: Literal["/
|
|
3739
|
+
path: Literal["/classification_model/{name_or_id}"],
|
|
3579
3740
|
*,
|
|
3580
|
-
params:
|
|
3581
|
-
json:
|
|
3741
|
+
params: PatchClassificationModelByNameOrIdParams,
|
|
3742
|
+
json: PredictiveModelUpdate,
|
|
3582
3743
|
data: None = None,
|
|
3583
3744
|
files: None = None,
|
|
3584
3745
|
content: None = None,
|
|
@@ -3589,17 +3750,16 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
3589
3750
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3590
3751
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3591
3752
|
extensions: RequestExtensions | None = None,
|
|
3592
|
-
) ->
|
|
3593
|
-
"""Update a prediction with new expected values, tags, or memory ID."""
|
|
3753
|
+
) -> ClassificationModelMetadata:
|
|
3594
3754
|
pass
|
|
3595
3755
|
|
|
3596
3756
|
@overload
|
|
3597
3757
|
async def PATCH(
|
|
3598
3758
|
self,
|
|
3599
|
-
path: Literal["/
|
|
3759
|
+
path: Literal["/regression_model/{name_or_id}"],
|
|
3600
3760
|
*,
|
|
3601
|
-
params:
|
|
3602
|
-
json:
|
|
3761
|
+
params: PatchRegressionModelByNameOrIdParams,
|
|
3762
|
+
json: PredictiveModelUpdate,
|
|
3603
3763
|
data: None = None,
|
|
3604
3764
|
files: None = None,
|
|
3605
3765
|
content: None = None,
|
|
@@ -3610,16 +3770,16 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
3610
3770
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3611
3771
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3612
3772
|
extensions: RequestExtensions | None = None,
|
|
3613
|
-
) ->
|
|
3773
|
+
) -> RegressionModelMetadata:
|
|
3614
3774
|
pass
|
|
3615
3775
|
|
|
3616
3776
|
@overload
|
|
3617
3777
|
async def PATCH(
|
|
3618
3778
|
self,
|
|
3619
|
-
path: Literal["/
|
|
3779
|
+
path: Literal["/telemetry/prediction/{prediction_id}"],
|
|
3620
3780
|
*,
|
|
3621
|
-
params:
|
|
3622
|
-
json:
|
|
3781
|
+
params: PatchTelemetryPredictionByPredictionIdParams,
|
|
3782
|
+
json: UpdatePredictionRequest,
|
|
3623
3783
|
data: None = None,
|
|
3624
3784
|
files: None = None,
|
|
3625
3785
|
content: None = None,
|
|
@@ -3630,7 +3790,8 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
3630
3790
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3631
3791
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
3632
3792
|
extensions: RequestExtensions | None = None,
|
|
3633
|
-
) ->
|
|
3793
|
+
) -> Any:
|
|
3794
|
+
"""Update a prediction with new expected values, tags, or memory ID."""
|
|
3634
3795
|
pass
|
|
3635
3796
|
|
|
3636
3797
|
async def PATCH(
|