orca-sdk 0.1.3__py3-none-any.whl → 0.1.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
orca_sdk/client.py CHANGED
@@ -133,6 +133,8 @@ class ClassificationEvaluationRequest(TypedDict):
133
133
  datasource_value_column: str
134
134
  record_telemetry: NotRequired[bool]
135
135
  telemetry_tags: NotRequired[list[str] | None]
136
+ subsample: NotRequired[int | float | None]
137
+ ignore_unlabeled: NotRequired[bool]
136
138
 
137
139
 
138
140
  class CleanupResponse(TypedDict):
@@ -161,6 +163,7 @@ class CountPredictionsRequest(TypedDict):
161
163
  prediction_ids: NotRequired[list[str] | None]
162
164
  start_timestamp: NotRequired[str | None]
163
165
  end_timestamp: NotRequired[str | None]
166
+ memory_id: NotRequired[str | None]
164
167
 
165
168
 
166
169
  class CreateApiKeyRequest(TypedDict):
@@ -191,6 +194,12 @@ class CreateOrgPlanRequest(TypedDict):
191
194
  tier: Literal["FREE", "PRO", "ENTERPRISE", "CANCELLED"]
192
195
 
193
196
 
197
+ class DatasetFilterItem(TypedDict):
198
+ field: str
199
+ op: Literal["==", "!=", ">", ">=", "<", "<=", "in", "not in", "like"]
200
+ value: Any
201
+
202
+
194
203
  class DeleteMemoriesRequest(TypedDict):
195
204
  memory_ids: list[str]
196
205
 
@@ -208,7 +217,7 @@ class EmbedRequest(TypedDict):
208
217
  class EmbeddingEvaluationRequest(TypedDict):
209
218
  datasource_name_or_id: str
210
219
  eval_datasource_name_or_id: NotRequired[str | None]
211
- subsample: NotRequired[int | None]
220
+ subsample: NotRequired[int | float | None]
212
221
  datasource_value_column: NotRequired[str]
213
222
  datasource_label_column: NotRequired[str | None]
214
223
  datasource_score_column: NotRequired[str | None]
@@ -217,7 +226,7 @@ class EmbeddingEvaluationRequest(TypedDict):
217
226
  weigh_memories: NotRequired[bool]
218
227
 
219
228
 
220
- EmbeddingFinetuningMethod = Literal["classification", "batch_triplet_loss"]
229
+ EmbeddingFinetuningMethod = Literal["classification", "regression", "batch_triplet_loss"]
221
230
 
222
231
 
223
232
  class FeedbackMetrics(TypedDict):
@@ -231,7 +240,19 @@ FeedbackType = Literal["CONTINUOUS", "BINARY"]
231
240
  class FilterItem(TypedDict):
232
241
  field: list
233
242
  op: Literal["==", "!=", ">", ">=", "<", "<=", "in", "not in", "like"]
234
- value: str | int | float | bool | list[str] | list[int] | list[float] | list[bool] | None
243
+ value: str | int | float | bool | list[str | None] | list[int] | list[float] | list[bool] | None
244
+
245
+
246
+ class GetDatasourceRowCountRequest(TypedDict):
247
+ filters: NotRequired[list[DatasetFilterItem]]
248
+
249
+
250
+ class GetDatasourceRowsRequest(TypedDict):
251
+ filters: NotRequired[list[DatasetFilterItem]]
252
+ limit: NotRequired[int]
253
+ offset: NotRequired[int]
254
+ shuffle: NotRequired[bool]
255
+ shuffle_seed: NotRequired[int | None]
235
256
 
236
257
 
237
258
  class GetMemoriesRequest(TypedDict):
@@ -252,6 +273,18 @@ class InternalServerErrorResponse(TypedDict):
252
273
  request_id: str
253
274
 
254
275
 
276
+ JobStatus = Literal["INITIALIZED", "DISPATCHED", "WAITING", "PROCESSING", "COMPLETED", "FAILED", "ABORTING", "ABORTED"]
277
+
278
+
279
+ class JobStatusInfo(TypedDict):
280
+ status: JobStatus
281
+ steps_total: int | None
282
+ steps_completed: int | None
283
+ exception: str | None
284
+ updated_at: str
285
+ created_at: str
286
+
287
+
255
288
  class LabelClassMetrics(TypedDict):
256
289
  label: int | None
257
290
  label_name: NotRequired[str | None]
@@ -274,6 +307,7 @@ class LabeledMemoryInsert(TypedDict):
274
307
  value: str | bytes
275
308
  metadata: NotRequired[dict[str, str | int | float | bool | None]]
276
309
  source_id: NotRequired[str | None]
310
+ partition_id: NotRequired[str | None]
277
311
  label: int | None
278
312
 
279
313
 
@@ -338,8 +372,6 @@ class MemorysetClassPatternsMetrics(TypedDict):
338
372
  class MemorysetClusterAnalysisConfig(TypedDict):
339
373
  min_cluster_size: NotRequired[int | None]
340
374
  max_cluster_size: NotRequired[int | None]
341
- clustering_method: NotRequired[Literal["density", "graph"]]
342
- min_cluster_distance: NotRequired[float]
343
375
  partitioning_method: NotRequired[Literal["ng", "rb", "cpm"]]
344
376
  resolution: NotRequired[float | None]
345
377
  num_iterations: NotRequired[int]
@@ -368,6 +400,7 @@ class MemorysetConceptAnalysisConfig(TypedDict):
368
400
  use_generative_naming: NotRequired[bool]
369
401
  naming_examples_count: NotRequired[int]
370
402
  naming_counterexample_count: NotRequired[int]
403
+ primary_label_pct_threshold: NotRequired[float]
371
404
  seed: NotRequired[int]
372
405
 
373
406
 
@@ -437,7 +470,7 @@ class NotFoundErrorResponse(TypedDict):
437
470
  "memory",
438
471
  "evaluation",
439
472
  "analysis",
440
- "task",
473
+ "job",
441
474
  "pretrained_embedding_model",
442
475
  "finetuned_embedding_model",
443
476
  "feedback_category",
@@ -551,6 +584,8 @@ class RegressionEvaluationRequest(TypedDict):
551
584
  datasource_value_column: str
552
585
  record_telemetry: NotRequired[bool]
553
586
  telemetry_tags: NotRequired[list[str] | None]
587
+ subsample: NotRequired[int | float | None]
588
+ ignore_unlabeled: NotRequired[bool]
554
589
 
555
590
 
556
591
  class RegressionMetrics(TypedDict):
@@ -593,12 +628,14 @@ class RegressionPredictionRequest(TypedDict):
593
628
  prompt: NotRequired[str | None]
594
629
  use_lookup_cache: NotRequired[bool]
595
630
  consistency_level: NotRequired[Literal["Bounded", "Session", "Strong", "Eventual"] | None]
631
+ ignore_unlabeled: NotRequired[bool]
596
632
 
597
633
 
598
634
  class ScorePredictionMemoryLookup(TypedDict):
599
635
  value: str | bytes
600
636
  embedding: list[float]
601
637
  source_id: str | None
638
+ partition_id: str | None
602
639
  metadata: dict[str, str | int | float | bool | None]
603
640
  memory_id: str
604
641
  memory_version: int
@@ -636,6 +673,7 @@ class ScoredMemory(TypedDict):
636
673
  value: str | bytes
637
674
  embedding: list[float]
638
675
  source_id: str | None
676
+ partition_id: str | None
639
677
  metadata: dict[str, str | int | float | bool | None]
640
678
  memory_id: str
641
679
  memory_version: int
@@ -651,6 +689,7 @@ class ScoredMemoryInsert(TypedDict):
651
689
  value: str | bytes
652
690
  metadata: NotRequired[dict[str, str | int | float | bool | None]]
653
691
  source_id: NotRequired[str | None]
692
+ partition_id: NotRequired[str | None]
654
693
  score: float | None
655
694
 
656
695
 
@@ -658,6 +697,7 @@ class ScoredMemoryLookup(TypedDict):
658
697
  value: str | bytes
659
698
  embedding: list[float]
660
699
  source_id: str | None
700
+ partition_id: str | None
661
701
  metadata: dict[str, str | int | float | bool | None]
662
702
  memory_id: str
663
703
  memory_version: int
@@ -674,6 +714,7 @@ class ScoredMemoryUpdate(TypedDict):
674
714
  value: NotRequired[str | bytes]
675
715
  metadata: NotRequired[dict[str, str | int | float | bool | None] | None]
676
716
  source_id: NotRequired[str | None]
717
+ partition_id: NotRequired[str | None]
677
718
  metrics: NotRequired[MemoryMetrics | None]
678
719
  score: NotRequired[float | None]
679
720
 
@@ -682,6 +723,7 @@ class ScoredMemoryWithFeedbackMetrics(TypedDict):
682
723
  value: str | bytes
683
724
  embedding: list[float]
684
725
  source_id: str | None
726
+ partition_id: str | None
685
727
  metadata: dict[str, str | int | float | bool | None]
686
728
  memory_id: str
687
729
  memory_version: int
@@ -707,18 +749,6 @@ class SubConceptMetrics(TypedDict):
707
749
  memory_count: int
708
750
 
709
751
 
710
- TaskStatus = Literal["INITIALIZED", "DISPATCHED", "WAITING", "PROCESSING", "COMPLETED", "FAILED", "ABORTING", "ABORTED"]
711
-
712
-
713
- class TaskStatusInfo(TypedDict):
714
- status: TaskStatus
715
- steps_total: int | None
716
- steps_completed: int | None
717
- exception: str | None
718
- updated_at: str
719
- created_at: str
720
-
721
-
722
752
  TelemetryField = list
723
753
 
724
754
 
@@ -791,6 +821,10 @@ class DeleteMemorysetByNameOrIdParams(TypedDict):
791
821
  name_or_id: str
792
822
 
793
823
 
824
+ class PostGpuMemorysetByNameOrIdLookupParams(TypedDict):
825
+ name_or_id: str
826
+
827
+
794
828
  class GetMemorysetByNameOrIdMemoryByMemoryIdParams(TypedDict):
795
829
  name_or_id: str
796
830
  memory_id: str
@@ -823,20 +857,35 @@ class PostMemorysetByNameOrIdMemoriesDeleteParams(TypedDict):
823
857
  name_or_id: str
824
858
 
825
859
 
860
+ class PatchGpuMemorysetByNameOrIdMemoryParams(TypedDict):
861
+ name_or_id: str
862
+
863
+
864
+ class PostGpuMemorysetByNameOrIdMemoryParams(TypedDict):
865
+ name_or_id: str
866
+
867
+
868
+ PostGpuMemorysetByNameOrIdMemoryRequest = list[LabeledMemoryInsert] | list[ScoredMemoryInsert]
869
+
870
+
871
+ class PatchGpuMemorysetByNameOrIdMemoriesParams(TypedDict):
872
+ name_or_id: str
873
+
874
+
826
875
  class PostMemorysetByNameOrIdAnalysisParams(TypedDict):
827
876
  name_or_id: str
828
877
 
829
878
 
830
879
  class GetMemorysetByNameOrIdAnalysisParams(TypedDict):
831
880
  name_or_id: str
832
- status: NotRequired[TaskStatus | None]
881
+ status: NotRequired[JobStatus | None]
833
882
  limit: NotRequired[int | None]
834
883
  offset: NotRequired[int | None]
835
884
 
836
885
 
837
- class GetMemorysetByNameOrIdAnalysisByAnalysisTaskIdParams(TypedDict):
886
+ class GetMemorysetByNameOrIdAnalysisByAnalysisJobIdParams(TypedDict):
838
887
  name_or_id: str
839
- analysis_task_id: str
888
+ analysis_job_id: str
840
889
 
841
890
 
842
891
  class PostMemorysetByNameOrIdMemoryByMemoryIdCascadingEditsParams(TypedDict):
@@ -852,34 +901,42 @@ class DeleteFinetunedEmbeddingModelByNameOrIdParams(TypedDict):
852
901
  name_or_id: str
853
902
 
854
903
 
855
- class PostFinetunedEmbeddingModelByNameOrIdEvaluationParams(TypedDict):
904
+ class PostGpuFinetunedEmbeddingModelByNameOrIdEmbeddingParams(TypedDict):
856
905
  name_or_id: str
857
906
 
858
907
 
859
- class GetFinetunedEmbeddingModelByNameOrIdEvaluationByTaskIdParams(TypedDict):
860
- name_or_id: str
861
- task_id: str
908
+ class GetPretrainedEmbeddingModelByModelNameParams(TypedDict):
909
+ model_name: PretrainedEmbeddingModelName
862
910
 
863
911
 
864
- class GetFinetunedEmbeddingModelByNameOrIdEvaluationsParams(TypedDict):
865
- name_or_id: str
866
- datasource: NotRequired[str | None]
867
- value_column: NotRequired[str | None]
868
- label_column: NotRequired[str | None]
869
- score_column: NotRequired[str | None]
912
+ class PostGpuPretrainedEmbeddingModelByModelNameEmbeddingParams(TypedDict):
913
+ model_name: PretrainedEmbeddingModelName
870
914
 
871
915
 
872
- class GetPretrainedEmbeddingModelByModelNameParams(TypedDict):
873
- model_name: PretrainedEmbeddingModelName
916
+ class PostFinetunedEmbeddingModelByNameOrIdEvaluationParams(TypedDict):
917
+ name_or_id: str
874
918
 
875
919
 
876
920
  class PostPretrainedEmbeddingModelByModelNameEvaluationParams(TypedDict):
877
921
  model_name: PretrainedEmbeddingModelName
878
922
 
879
923
 
880
- class GetPretrainedEmbeddingModelByModelNameEvaluationByTaskIdParams(TypedDict):
924
+ class GetFinetunedEmbeddingModelByNameOrIdEvaluationByJobIdParams(TypedDict):
925
+ name_or_id: str
926
+ job_id: str
927
+
928
+
929
+ class GetPretrainedEmbeddingModelByModelNameEvaluationByJobIdParams(TypedDict):
881
930
  model_name: PretrainedEmbeddingModelName
882
- task_id: str
931
+ job_id: str
932
+
933
+
934
+ class GetFinetunedEmbeddingModelByNameOrIdEvaluationsParams(TypedDict):
935
+ name_or_id: str
936
+ datasource: NotRequired[str | None]
937
+ value_column: NotRequired[str | None]
938
+ label_column: NotRequired[str | None]
939
+ score_column: NotRequired[str | None]
883
940
 
884
941
 
885
942
  class GetPretrainedEmbeddingModelByModelNameEvaluationsParams(TypedDict):
@@ -909,6 +966,14 @@ class DeleteDatasourceByNameOrIdParams(TypedDict):
909
966
  name_or_id: str
910
967
 
911
968
 
969
+ class PostDatasourceByNameOrIdRowsParams(TypedDict):
970
+ name_or_id: str
971
+
972
+
973
+ class PostDatasourceByNameOrIdRowsCountParams(TypedDict):
974
+ name_or_id: str
975
+
976
+
912
977
  class GetDatasourceByNameOrIdEmbeddingModelEvaluationsParams(TypedDict):
913
978
  name_or_id: str
914
979
  value_column: NotRequired[str | None]
@@ -939,36 +1004,42 @@ class DeleteClassificationModelByNameOrIdParams(TypedDict):
939
1004
  name_or_id: str
940
1005
 
941
1006
 
942
- class PostClassificationModelByModelNameOrIdEvaluationParams(TypedDict):
943
- model_name_or_id: str
1007
+ class PatchRegressionModelByNameOrIdParams(TypedDict):
1008
+ name_or_id: str
944
1009
 
945
1010
 
946
- class GetClassificationModelByModelNameOrIdEvaluationParams(TypedDict):
947
- model_name_or_id: str
1011
+ class GetRegressionModelByNameOrIdParams(TypedDict):
1012
+ name_or_id: str
948
1013
 
949
1014
 
950
- class GetClassificationModelByModelNameOrIdEvaluationByTaskIdParams(TypedDict):
951
- model_name_or_id: str
952
- task_id: str
1015
+ class DeleteRegressionModelByNameOrIdParams(TypedDict):
1016
+ name_or_id: str
953
1017
 
954
1018
 
955
- class DeleteClassificationModelByModelNameOrIdEvaluationByTaskIdParams(TypedDict):
956
- model_name_or_id: str
957
- task_id: str
1019
+ class PostGpuClassificationModelByNameOrIdPredictionParams(TypedDict):
1020
+ name_or_id: str
958
1021
 
959
1022
 
960
- class PatchRegressionModelByNameOrIdParams(TypedDict):
1023
+ class PostClassificationModelByNameOrIdPredictionParams(TypedDict):
961
1024
  name_or_id: str
962
1025
 
963
1026
 
964
- class GetRegressionModelByNameOrIdParams(TypedDict):
1027
+ class PostGpuRegressionModelByNameOrIdPredictionParams(TypedDict):
965
1028
  name_or_id: str
966
1029
 
967
1030
 
968
- class DeleteRegressionModelByNameOrIdParams(TypedDict):
1031
+ class PostRegressionModelByNameOrIdPredictionParams(TypedDict):
969
1032
  name_or_id: str
970
1033
 
971
1034
 
1035
+ class PostClassificationModelByModelNameOrIdEvaluationParams(TypedDict):
1036
+ model_name_or_id: str
1037
+
1038
+
1039
+ class GetClassificationModelByModelNameOrIdEvaluationParams(TypedDict):
1040
+ model_name_or_id: str
1041
+
1042
+
972
1043
  class PostRegressionModelByModelNameOrIdEvaluationParams(TypedDict):
973
1044
  model_name_or_id: str
974
1045
 
@@ -977,26 +1048,36 @@ class GetRegressionModelByModelNameOrIdEvaluationParams(TypedDict):
977
1048
  model_name_or_id: str
978
1049
 
979
1050
 
980
- class GetRegressionModelByModelNameOrIdEvaluationByTaskIdParams(TypedDict):
1051
+ class GetClassificationModelByModelNameOrIdEvaluationByJobIdParams(TypedDict):
981
1052
  model_name_or_id: str
982
- task_id: str
1053
+ job_id: str
983
1054
 
984
1055
 
985
- class DeleteRegressionModelByModelNameOrIdEvaluationByTaskIdParams(TypedDict):
1056
+ class DeleteClassificationModelByModelNameOrIdEvaluationByJobIdParams(TypedDict):
986
1057
  model_name_or_id: str
987
- task_id: str
1058
+ job_id: str
988
1059
 
989
1060
 
990
- class GetTaskByTaskIdParams(TypedDict):
991
- task_id: str
1061
+ class GetRegressionModelByModelNameOrIdEvaluationByJobIdParams(TypedDict):
1062
+ model_name_or_id: str
1063
+ job_id: str
992
1064
 
993
1065
 
994
- class GetTaskByTaskIdStatusParams(TypedDict):
995
- task_id: str
1066
+ class DeleteRegressionModelByModelNameOrIdEvaluationByJobIdParams(TypedDict):
1067
+ model_name_or_id: str
1068
+ job_id: str
1069
+
1070
+
1071
+ class GetJobByJobIdParams(TypedDict):
1072
+ job_id: str
1073
+
996
1074
 
1075
+ class GetJobByJobIdStatusParams(TypedDict):
1076
+ job_id: str
997
1077
 
998
- class GetTaskParams(TypedDict):
999
- status: NotRequired[TaskStatus | list[TaskStatus] | None]
1078
+
1079
+ class GetJobParams(TypedDict):
1080
+ status: NotRequired[JobStatus | list[JobStatus] | None]
1000
1081
  type: NotRequired[str | list[str] | None]
1001
1082
  limit: NotRequired[int | None]
1002
1083
  offset: NotRequired[int]
@@ -1004,8 +1085,8 @@ class GetTaskParams(TypedDict):
1004
1085
  end_timestamp: NotRequired[str | None]
1005
1086
 
1006
1087
 
1007
- class DeleteTaskByTaskIdAbortParams(TypedDict):
1008
- task_id: str
1088
+ class DeleteJobByJobIdAbortParams(TypedDict):
1089
+ job_id: str
1009
1090
 
1010
1091
 
1011
1092
  class GetWorkerParams(TypedDict):
@@ -1061,43 +1142,8 @@ class DeleteTelemetryFeedbackCategoryByNameOrIdParams(TypedDict):
1061
1142
  PutTelemetryPredictionFeedbackRequest = list[PredictionFeedbackRequest]
1062
1143
 
1063
1144
 
1064
- class GetAgentsBootstrapClassificationModelByTaskIdParams(TypedDict):
1065
- task_id: str
1066
-
1067
-
1068
- class PostGpuMemorysetByNameOrIdLookupParams(TypedDict):
1069
- name_or_id: str
1070
-
1071
-
1072
- class PatchGpuMemorysetByNameOrIdMemoryParams(TypedDict):
1073
- name_or_id: str
1074
-
1075
-
1076
- class PostGpuMemorysetByNameOrIdMemoryParams(TypedDict):
1077
- name_or_id: str
1078
-
1079
-
1080
- PostGpuMemorysetByNameOrIdMemoryRequest = list[LabeledMemoryInsert] | list[ScoredMemoryInsert]
1081
-
1082
-
1083
- class PatchGpuMemorysetByNameOrIdMemoriesParams(TypedDict):
1084
- name_or_id: str
1085
-
1086
-
1087
- class PostGpuClassificationModelByNameOrIdPredictionParams(TypedDict):
1088
- name_or_id: str
1089
-
1090
-
1091
- class PostGpuRegressionModelByNameOrIdPredictionParams(TypedDict):
1092
- name_or_id: str
1093
-
1094
-
1095
- class PostGpuFinetunedEmbeddingModelByNameOrIdEmbeddingParams(TypedDict):
1096
- name_or_id: str
1097
-
1098
-
1099
- class PostGpuPretrainedEmbeddingModelByModelNameEmbeddingParams(TypedDict):
1100
- model_name: PretrainedEmbeddingModelName
1145
+ class GetAgentsBootstrapClassificationModelByJobIdParams(TypedDict):
1146
+ job_id: str
1101
1147
 
1102
1148
 
1103
1149
  class FieldValidationError(TypedDict):
@@ -1169,6 +1215,7 @@ class ClassificationPredictionRequest(TypedDict):
1169
1215
  prompt: NotRequired[str | None]
1170
1216
  use_lookup_cache: NotRequired[bool]
1171
1217
  consistency_level: NotRequired[Literal["Bounded", "Session", "Strong", "Eventual"] | None]
1218
+ ignore_unlabeled: NotRequired[bool]
1172
1219
 
1173
1220
 
1174
1221
  class CloneMemorysetRequest(TypedDict):
@@ -1185,6 +1232,7 @@ class ColumnInfo(TypedDict):
1185
1232
  name: str
1186
1233
  type: ColumnType
1187
1234
  enum_options: NotRequired[list[str] | None]
1235
+ string_values: NotRequired[list[str] | None]
1188
1236
  int_values: NotRequired[list[int] | None]
1189
1237
  contains_nones: NotRequired[bool]
1190
1238
 
@@ -1231,6 +1279,8 @@ class CreateMemorysetRequest(TypedDict):
1231
1279
  prompt: NotRequired[str]
1232
1280
  hidden: NotRequired[bool]
1233
1281
  batch_size: NotRequired[int]
1282
+ subsample: NotRequired[int | float | None]
1283
+ memory_type: NotRequired[MemoryType]
1234
1284
 
1235
1285
 
1236
1286
  class CreateRegressionModelRequest(TypedDict):
@@ -1255,48 +1305,52 @@ class DatasourceMetadata(TypedDict):
1255
1305
 
1256
1306
 
1257
1307
  class EmbeddingEvaluationResponse(TypedDict):
1258
- task_id: str
1308
+ job_id: str
1259
1309
  org_id: str
1260
1310
  finetuned_embedding_model_id: str | None
1261
1311
  pretrained_embedding_model_name: PretrainedEmbeddingModelName | None
1262
1312
  datasource_id: str
1263
- subsample: int | None
1313
+ subsample: int | float | None
1264
1314
  datasource_value_column: str
1265
1315
  datasource_label_column: NotRequired[str | None]
1266
1316
  datasource_score_column: NotRequired[str | None]
1267
1317
  neighbor_count: int
1268
1318
  weigh_memories: bool
1269
- status: TaskStatus
1319
+ status: JobStatus
1270
1320
  result: ClassificationMetrics | RegressionMetrics | None
1271
1321
  created_at: str
1272
1322
  updated_at: str
1323
+ task_id: str
1273
1324
 
1274
1325
 
1275
1326
  class EvaluationResponse(TypedDict):
1276
- task_id: str
1327
+ job_id: str
1277
1328
  org_id: str
1278
- status: TaskStatus
1329
+ status: JobStatus
1279
1330
  result: ClassificationMetrics | RegressionMetrics | None
1280
1331
  created_at: str
1281
1332
  updated_at: str
1333
+ task_id: str
1282
1334
 
1283
1335
 
1284
1336
  class EvaluationResponseClassificationMetrics(TypedDict):
1285
- task_id: str
1337
+ job_id: str
1286
1338
  org_id: str
1287
- status: TaskStatus
1339
+ status: JobStatus
1288
1340
  result: ClassificationMetrics | None
1289
1341
  created_at: str
1290
1342
  updated_at: str
1343
+ task_id: str
1291
1344
 
1292
1345
 
1293
1346
  class EvaluationResponseRegressionMetrics(TypedDict):
1294
- task_id: str
1347
+ job_id: str
1295
1348
  org_id: str
1296
- status: TaskStatus
1349
+ status: JobStatus
1297
1350
  result: RegressionMetrics | None
1298
1351
  created_at: str
1299
1352
  updated_at: str
1353
+ task_id: str
1300
1354
 
1301
1355
 
1302
1356
  class FinetuneEmbeddingModelRequest(TypedDict):
@@ -1305,7 +1359,8 @@ class FinetuneEmbeddingModelRequest(TypedDict):
1305
1359
  train_memoryset_name_or_id: NotRequired[str | None]
1306
1360
  train_datasource_name_or_id: NotRequired[str | None]
1307
1361
  eval_datasource_name_or_id: NotRequired[str | None]
1308
- label_column: NotRequired[str]
1362
+ label_column: NotRequired[str | None]
1363
+ score_column: NotRequired[str | None]
1309
1364
  value_column: NotRequired[str]
1310
1365
  training_method: NotRequired[EmbeddingFinetuningMethod]
1311
1366
  training_args: NotRequired[dict[str, str | int | float | bool]]
@@ -1322,8 +1377,9 @@ class FinetunedEmbeddingModelMetadata(TypedDict):
1322
1377
  created_at: str
1323
1378
  updated_at: str
1324
1379
  base_model: PretrainedEmbeddingModelName
1380
+ finetuning_job_id: str
1381
+ finetuning_status: JobStatus
1325
1382
  finetuning_task_id: str
1326
- finetuning_status: TaskStatus
1327
1383
 
1328
1384
 
1329
1385
  class HTTPValidationError(TypedDict):
@@ -1335,10 +1391,28 @@ class InvalidInputErrorResponse(TypedDict):
1335
1391
  validation_issues: list[FieldValidationError]
1336
1392
 
1337
1393
 
1394
+ class Job(TypedDict):
1395
+ status: JobStatus
1396
+ steps_total: int | None
1397
+ steps_completed: int | None
1398
+ exception: str | None
1399
+ updated_at: str
1400
+ created_at: str
1401
+ id: str
1402
+ org_id: str
1403
+ worker_id: str | None
1404
+ type: str
1405
+ payload: BaseModel
1406
+ result: BaseModel | None
1407
+ depends_on: NotRequired[list[str]]
1408
+ lease_token: str | None
1409
+
1410
+
1338
1411
  class LabelPredictionMemoryLookup(TypedDict):
1339
1412
  value: str | bytes
1340
1413
  embedding: list[float]
1341
1414
  source_id: str | None
1415
+ partition_id: str | None
1342
1416
  metadata: dict[str, str | int | float | bool | None]
1343
1417
  memory_id: str
1344
1418
  memory_version: int
@@ -1380,6 +1454,7 @@ class LabeledMemory(TypedDict):
1380
1454
  value: str | bytes
1381
1455
  embedding: list[float]
1382
1456
  source_id: str | None
1457
+ partition_id: str | None
1383
1458
  metadata: dict[str, str | int | float | bool | None]
1384
1459
  memory_id: str
1385
1460
  memory_version: int
@@ -1395,6 +1470,7 @@ class LabeledMemoryLookup(TypedDict):
1395
1470
  value: str | bytes
1396
1471
  embedding: list[float]
1397
1472
  source_id: str | None
1473
+ partition_id: str | None
1398
1474
  metadata: dict[str, str | int | float | bool | None]
1399
1475
  memory_id: str
1400
1476
  memory_version: int
@@ -1412,6 +1488,7 @@ class LabeledMemoryUpdate(TypedDict):
1412
1488
  value: NotRequired[str | bytes]
1413
1489
  metadata: NotRequired[dict[str, str | int | float | bool | None] | None]
1414
1490
  source_id: NotRequired[str | None]
1491
+ partition_id: NotRequired[str | None]
1415
1492
  metrics: NotRequired[MemoryMetrics | None]
1416
1493
  label: NotRequired[int | None]
1417
1494
 
@@ -1420,6 +1497,7 @@ class LabeledMemoryWithFeedbackMetrics(TypedDict):
1420
1497
  value: str | bytes
1421
1498
  embedding: list[float]
1422
1499
  source_id: str | None
1500
+ partition_id: str | None
1423
1501
  metadata: dict[str, str | int | float | bool | None]
1424
1502
  memory_id: str
1425
1503
  memory_version: int
@@ -1439,7 +1517,8 @@ class ListPredictionsRequest(TypedDict):
1439
1517
  prediction_ids: NotRequired[list[str] | None]
1440
1518
  start_timestamp: NotRequired[str | None]
1441
1519
  end_timestamp: NotRequired[str | None]
1442
- limit: NotRequired[int | None]
1520
+ memory_id: NotRequired[str | None]
1521
+ limit: NotRequired[int]
1443
1522
  offset: NotRequired[int | None]
1444
1523
  sort: NotRequired[PredictionSort]
1445
1524
  expected_label_match: NotRequired[bool | None]
@@ -1478,6 +1557,13 @@ class MemorysetMetrics(TypedDict):
1478
1557
  concepts: NotRequired[MemorysetConceptMetrics | None]
1479
1558
 
1480
1559
 
1560
+ class PaginatedJob(TypedDict):
1561
+ items: list[Job]
1562
+ total: int
1563
+ offset: int
1564
+ limit: int
1565
+
1566
+
1481
1567
  class PaginatedUnionLabeledMemoryWithFeedbackMetricsScoredMemoryWithFeedbackMetrics(TypedDict):
1482
1568
  items: list[LabeledMemoryWithFeedbackMetrics | ScoredMemoryWithFeedbackMetrics]
1483
1569
  total: int
@@ -1495,23 +1581,6 @@ class PretrainedEmbeddingModelMetadata(TypedDict):
1495
1581
  num_params: int
1496
1582
 
1497
1583
 
1498
- class Task(TypedDict):
1499
- status: TaskStatus
1500
- steps_total: int | None
1501
- steps_completed: int | None
1502
- exception: str | None
1503
- updated_at: str
1504
- created_at: str
1505
- id: str
1506
- org_id: str
1507
- worker_id: str | None
1508
- type: str
1509
- payload: BaseModel
1510
- result: BaseModel | None
1511
- depends_on: list[str]
1512
- lease_token: str | None
1513
-
1514
-
1515
1584
  class TelemetryMemoriesRequest(TypedDict):
1516
1585
  memoryset_id: str
1517
1586
  offset: NotRequired[int]
@@ -1543,10 +1612,10 @@ class CascadingEditSuggestion(TypedDict):
1543
1612
 
1544
1613
 
1545
1614
  class MemorysetAnalysisResponse(TypedDict):
1546
- task_id: str
1615
+ job_id: str
1547
1616
  org_id: str
1548
1617
  memoryset_id: str
1549
- status: TaskStatus
1618
+ status: JobStatus
1550
1619
  lookup_count: int
1551
1620
  batch_size: int
1552
1621
  clear_metrics: bool
@@ -1554,6 +1623,7 @@ class MemorysetAnalysisResponse(TypedDict):
1554
1623
  results: MemorysetMetrics | None
1555
1624
  created_at: str
1556
1625
  updated_at: str
1626
+ task_id: str
1557
1627
 
1558
1628
 
1559
1629
  class MemorysetMetadata(TypedDict):
@@ -1569,8 +1639,8 @@ class MemorysetMetadata(TypedDict):
1569
1639
  created_at: str
1570
1640
  updated_at: str
1571
1641
  memories_updated_at: str
1572
- insertion_task_id: str
1573
- insertion_status: TaskStatus
1642
+ insertion_job_id: str
1643
+ insertion_status: JobStatus
1574
1644
  metrics: MemorysetMetrics
1575
1645
  memory_type: MemoryType
1576
1646
  label_names: list[str] | None
@@ -1580,13 +1650,7 @@ class MemorysetMetadata(TypedDict):
1580
1650
  document_prompt_override: str | None
1581
1651
  query_prompt_override: str | None
1582
1652
  hidden: bool
1583
-
1584
-
1585
- class PaginatedTask(TypedDict):
1586
- items: list[Task]
1587
- total: int
1588
- offset: int
1589
- limit: int
1653
+ insertion_task_id: str
1590
1654
 
1591
1655
 
1592
1656
  class PaginatedWorkerInfo(TypedDict):
@@ -1604,11 +1668,12 @@ class BootstrapClassificationModelMeta(TypedDict):
1604
1668
 
1605
1669
 
1606
1670
  class BootstrapClassificationModelResponse(TypedDict):
1607
- task_id: str
1671
+ job_id: str
1608
1672
  org_id: str
1609
- status: TaskStatus
1673
+ status: JobStatus
1610
1674
  result: BootstrapClassificationModelMeta | None
1611
1675
  input: BootstrapClassificationModelRequest | None
1676
+ task_id: str
1612
1677
 
1613
1678
 
1614
1679
  class OrcaClient(Client):
@@ -1887,9 +1952,9 @@ class OrcaClient(Client):
1887
1952
  @overload
1888
1953
  def GET(
1889
1954
  self,
1890
- path: Literal["/memoryset/{name_or_id}/analysis/{analysis_task_id}"],
1955
+ path: Literal["/memoryset/{name_or_id}/analysis/{analysis_job_id}"],
1891
1956
  *,
1892
- params: GetMemorysetByNameOrIdAnalysisByAnalysisTaskIdParams,
1957
+ params: GetMemorysetByNameOrIdAnalysisByAnalysisJobIdParams,
1893
1958
  parse_as: Literal["json"] = "json",
1894
1959
  headers: HeaderTypes | None = None,
1895
1960
  cookies: CookieTypes | None = None,
@@ -1937,9 +2002,9 @@ class OrcaClient(Client):
1937
2002
  @overload
1938
2003
  def GET(
1939
2004
  self,
1940
- path: Literal["/finetuned_embedding_model/{name_or_id}/evaluation/{task_id}"],
2005
+ path: Literal["/pretrained_embedding_model"],
1941
2006
  *,
1942
- params: GetFinetunedEmbeddingModelByNameOrIdEvaluationByTaskIdParams,
2007
+ params: None = None,
1943
2008
  parse_as: Literal["json"] = "json",
1944
2009
  headers: HeaderTypes | None = None,
1945
2010
  cookies: CookieTypes | None = None,
@@ -1947,16 +2012,16 @@ class OrcaClient(Client):
1947
2012
  follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
1948
2013
  timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
1949
2014
  extensions: RequestExtensions | None = None,
1950
- ) -> EmbeddingEvaluationResponse:
1951
- """Get evaluation results for a finetuned embedding model by task ID."""
2015
+ ) -> list[PretrainedEmbeddingModelMetadata]:
2016
+ """List all available pretrained embedding models."""
1952
2017
  pass
1953
2018
 
1954
2019
  @overload
1955
2020
  def GET(
1956
2021
  self,
1957
- path: Literal["/finetuned_embedding_model/{name_or_id}/evaluations"],
2022
+ path: Literal["/pretrained_embedding_model/{model_name}"],
1958
2023
  *,
1959
- params: GetFinetunedEmbeddingModelByNameOrIdEvaluationsParams,
2024
+ params: GetPretrainedEmbeddingModelByModelNameParams,
1960
2025
  parse_as: Literal["json"] = "json",
1961
2026
  headers: HeaderTypes | None = None,
1962
2027
  cookies: CookieTypes | None = None,
@@ -1964,16 +2029,16 @@ class OrcaClient(Client):
1964
2029
  follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
1965
2030
  timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
1966
2031
  extensions: RequestExtensions | None = None,
1967
- ) -> list[EmbeddingEvaluationResponse]:
1968
- """List all evaluation results for a finetuned embedding model."""
2032
+ ) -> PretrainedEmbeddingModelMetadata:
2033
+ """Get metadata for a specific pretrained embedding model."""
1969
2034
  pass
1970
2035
 
1971
2036
  @overload
1972
2037
  def GET(
1973
2038
  self,
1974
- path: Literal["/pretrained_embedding_model"],
2039
+ path: Literal["/finetuned_embedding_model/{name_or_id}/evaluation/{job_id}"],
1975
2040
  *,
1976
- params: None = None,
2041
+ params: GetFinetunedEmbeddingModelByNameOrIdEvaluationByJobIdParams,
1977
2042
  parse_as: Literal["json"] = "json",
1978
2043
  headers: HeaderTypes | None = None,
1979
2044
  cookies: CookieTypes | None = None,
@@ -1981,16 +2046,16 @@ class OrcaClient(Client):
1981
2046
  follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
1982
2047
  timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
1983
2048
  extensions: RequestExtensions | None = None,
1984
- ) -> list[PretrainedEmbeddingModelMetadata]:
1985
- """List all available pretrained embedding models."""
2049
+ ) -> EmbeddingEvaluationResponse:
2050
+ """Get evaluation results for a finetuned embedding model by job ID."""
1986
2051
  pass
1987
2052
 
1988
2053
  @overload
1989
2054
  def GET(
1990
2055
  self,
1991
- path: Literal["/pretrained_embedding_model/{model_name}"],
2056
+ path: Literal["/pretrained_embedding_model/{model_name}/evaluation/{job_id}"],
1992
2057
  *,
1993
- params: GetPretrainedEmbeddingModelByModelNameParams,
2058
+ params: GetPretrainedEmbeddingModelByModelNameEvaluationByJobIdParams,
1994
2059
  parse_as: Literal["json"] = "json",
1995
2060
  headers: HeaderTypes | None = None,
1996
2061
  cookies: CookieTypes | None = None,
@@ -1998,16 +2063,16 @@ class OrcaClient(Client):
1998
2063
  follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
1999
2064
  timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
2000
2065
  extensions: RequestExtensions | None = None,
2001
- ) -> PretrainedEmbeddingModelMetadata:
2002
- """Get metadata for a specific pretrained embedding model."""
2066
+ ) -> EmbeddingEvaluationResponse:
2067
+ """Get evaluation results for a pretrained embedding model by job ID."""
2003
2068
  pass
2004
2069
 
2005
2070
  @overload
2006
2071
  def GET(
2007
2072
  self,
2008
- path: Literal["/pretrained_embedding_model/{model_name}/evaluation/{task_id}"],
2073
+ path: Literal["/finetuned_embedding_model/{name_or_id}/evaluations"],
2009
2074
  *,
2010
- params: GetPretrainedEmbeddingModelByModelNameEvaluationByTaskIdParams,
2075
+ params: GetFinetunedEmbeddingModelByNameOrIdEvaluationsParams,
2011
2076
  parse_as: Literal["json"] = "json",
2012
2077
  headers: HeaderTypes | None = None,
2013
2078
  cookies: CookieTypes | None = None,
@@ -2015,8 +2080,8 @@ class OrcaClient(Client):
2015
2080
  follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
2016
2081
  timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
2017
2082
  extensions: RequestExtensions | None = None,
2018
- ) -> EmbeddingEvaluationResponse:
2019
- """Get evaluation results for a pretrained embedding model by task ID."""
2083
+ ) -> list[EmbeddingEvaluationResponse]:
2084
+ """List all evaluation results for a finetuned embedding model."""
2020
2085
  pass
2021
2086
 
2022
2087
  @overload
@@ -2141,7 +2206,7 @@ class OrcaClient(Client):
2141
2206
  @overload
2142
2207
  def GET(
2143
2208
  self,
2144
- path: Literal["/predictive_model"],
2209
+ path: Literal["/classification_model"],
2145
2210
  *,
2146
2211
  params: None = None,
2147
2212
  parse_as: Literal["json"] = "json",
@@ -2151,13 +2216,13 @@ class OrcaClient(Client):
2151
2216
  follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
2152
2217
  timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
2153
2218
  extensions: RequestExtensions | None = None,
2154
- ) -> list[ClassificationModelMetadata | RegressionModelMetadata]:
2219
+ ) -> list[ClassificationModelMetadata]:
2155
2220
  pass
2156
2221
 
2157
2222
  @overload
2158
2223
  def GET(
2159
2224
  self,
2160
- path: Literal["/classification_model"],
2225
+ path: Literal["/regression_model"],
2161
2226
  *,
2162
2227
  params: None = None,
2163
2228
  parse_as: Literal["json"] = "json",
@@ -2167,7 +2232,7 @@ class OrcaClient(Client):
2167
2232
  follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
2168
2233
  timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
2169
2234
  extensions: RequestExtensions | None = None,
2170
- ) -> list[ClassificationModelMetadata]:
2235
+ ) -> list[RegressionModelMetadata]:
2171
2236
  pass
2172
2237
 
2173
2238
  @overload
@@ -2189,9 +2254,9 @@ class OrcaClient(Client):
2189
2254
  @overload
2190
2255
  def GET(
2191
2256
  self,
2192
- path: Literal["/classification_model/{model_name_or_id}/evaluation"],
2257
+ path: Literal["/regression_model/{name_or_id}"],
2193
2258
  *,
2194
- params: GetClassificationModelByModelNameOrIdEvaluationParams,
2259
+ params: GetRegressionModelByNameOrIdParams,
2195
2260
  parse_as: Literal["json"] = "json",
2196
2261
  headers: HeaderTypes | None = None,
2197
2262
  cookies: CookieTypes | None = None,
@@ -2199,15 +2264,15 @@ class OrcaClient(Client):
2199
2264
  follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
2200
2265
  timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
2201
2266
  extensions: RequestExtensions | None = None,
2202
- ) -> list[EvaluationResponseClassificationMetrics]:
2267
+ ) -> RegressionModelMetadata:
2203
2268
  pass
2204
2269
 
2205
2270
  @overload
2206
2271
  def GET(
2207
2272
  self,
2208
- path: Literal["/classification_model/{model_name_or_id}/evaluation/{task_id}"],
2273
+ path: Literal["/predictive_model"],
2209
2274
  *,
2210
- params: GetClassificationModelByModelNameOrIdEvaluationByTaskIdParams,
2275
+ params: None = None,
2211
2276
  parse_as: Literal["json"] = "json",
2212
2277
  headers: HeaderTypes | None = None,
2213
2278
  cookies: CookieTypes | None = None,
@@ -2215,15 +2280,15 @@ class OrcaClient(Client):
2215
2280
  follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
2216
2281
  timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
2217
2282
  extensions: RequestExtensions | None = None,
2218
- ) -> EvaluationResponseClassificationMetrics:
2283
+ ) -> list[ClassificationModelMetadata | RegressionModelMetadata]:
2219
2284
  pass
2220
2285
 
2221
2286
  @overload
2222
2287
  def GET(
2223
2288
  self,
2224
- path: Literal["/regression_model"],
2289
+ path: Literal["/classification_model/{model_name_or_id}/evaluation"],
2225
2290
  *,
2226
- params: None = None,
2291
+ params: GetClassificationModelByModelNameOrIdEvaluationParams,
2227
2292
  parse_as: Literal["json"] = "json",
2228
2293
  headers: HeaderTypes | None = None,
2229
2294
  cookies: CookieTypes | None = None,
@@ -2231,15 +2296,15 @@ class OrcaClient(Client):
2231
2296
  follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
2232
2297
  timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
2233
2298
  extensions: RequestExtensions | None = None,
2234
- ) -> list[RegressionModelMetadata]:
2299
+ ) -> list[EvaluationResponseClassificationMetrics]:
2235
2300
  pass
2236
2301
 
2237
2302
  @overload
2238
2303
  def GET(
2239
2304
  self,
2240
- path: Literal["/regression_model/{name_or_id}"],
2305
+ path: Literal["/regression_model/{model_name_or_id}/evaluation"],
2241
2306
  *,
2242
- params: GetRegressionModelByNameOrIdParams,
2307
+ params: GetRegressionModelByModelNameOrIdEvaluationParams,
2243
2308
  parse_as: Literal["json"] = "json",
2244
2309
  headers: HeaderTypes | None = None,
2245
2310
  cookies: CookieTypes | None = None,
@@ -2247,15 +2312,15 @@ class OrcaClient(Client):
2247
2312
  follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
2248
2313
  timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
2249
2314
  extensions: RequestExtensions | None = None,
2250
- ) -> RegressionModelMetadata:
2315
+ ) -> list[EvaluationResponseRegressionMetrics]:
2251
2316
  pass
2252
2317
 
2253
2318
  @overload
2254
2319
  def GET(
2255
2320
  self,
2256
- path: Literal["/regression_model/{model_name_or_id}/evaluation"],
2321
+ path: Literal["/classification_model/{model_name_or_id}/evaluation/{job_id}"],
2257
2322
  *,
2258
- params: GetRegressionModelByModelNameOrIdEvaluationParams,
2323
+ params: GetClassificationModelByModelNameOrIdEvaluationByJobIdParams,
2259
2324
  parse_as: Literal["json"] = "json",
2260
2325
  headers: HeaderTypes | None = None,
2261
2326
  cookies: CookieTypes | None = None,
@@ -2263,15 +2328,15 @@ class OrcaClient(Client):
2263
2328
  follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
2264
2329
  timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
2265
2330
  extensions: RequestExtensions | None = None,
2266
- ) -> list[EvaluationResponseRegressionMetrics]:
2331
+ ) -> EvaluationResponseClassificationMetrics:
2267
2332
  pass
2268
2333
 
2269
2334
  @overload
2270
2335
  def GET(
2271
2336
  self,
2272
- path: Literal["/regression_model/{model_name_or_id}/evaluation/{task_id}"],
2337
+ path: Literal["/regression_model/{model_name_or_id}/evaluation/{job_id}"],
2273
2338
  *,
2274
- params: GetRegressionModelByModelNameOrIdEvaluationByTaskIdParams,
2339
+ params: GetRegressionModelByModelNameOrIdEvaluationByJobIdParams,
2275
2340
  parse_as: Literal["json"] = "json",
2276
2341
  headers: HeaderTypes | None = None,
2277
2342
  cookies: CookieTypes | None = None,
@@ -2285,9 +2350,9 @@ class OrcaClient(Client):
2285
2350
  @overload
2286
2351
  def GET(
2287
2352
  self,
2288
- path: Literal["/task/{task_id}"],
2353
+ path: Literal["/job/{job_id}"],
2289
2354
  *,
2290
- params: GetTaskByTaskIdParams,
2355
+ params: GetJobByJobIdParams,
2291
2356
  parse_as: Literal["json"] = "json",
2292
2357
  headers: HeaderTypes | None = None,
2293
2358
  cookies: CookieTypes | None = None,
@@ -2295,15 +2360,15 @@ class OrcaClient(Client):
2295
2360
  follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
2296
2361
  timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
2297
2362
  extensions: RequestExtensions | None = None,
2298
- ) -> Task:
2363
+ ) -> Job:
2299
2364
  pass
2300
2365
 
2301
2366
  @overload
2302
2367
  def GET(
2303
2368
  self,
2304
- path: Literal["/task/{task_id}/status"],
2369
+ path: Literal["/job/{job_id}/status"],
2305
2370
  *,
2306
- params: GetTaskByTaskIdStatusParams,
2371
+ params: GetJobByJobIdStatusParams,
2307
2372
  parse_as: Literal["json"] = "json",
2308
2373
  headers: HeaderTypes | None = None,
2309
2374
  cookies: CookieTypes | None = None,
@@ -2311,15 +2376,15 @@ class OrcaClient(Client):
2311
2376
  follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
2312
2377
  timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
2313
2378
  extensions: RequestExtensions | None = None,
2314
- ) -> TaskStatusInfo:
2379
+ ) -> JobStatusInfo:
2315
2380
  pass
2316
2381
 
2317
2382
  @overload
2318
2383
  def GET(
2319
2384
  self,
2320
- path: Literal["/task"],
2385
+ path: Literal["/job"],
2321
2386
  *,
2322
- params: GetTaskParams,
2387
+ params: GetJobParams,
2323
2388
  parse_as: Literal["json"] = "json",
2324
2389
  headers: HeaderTypes | None = None,
2325
2390
  cookies: CookieTypes | None = None,
@@ -2327,7 +2392,7 @@ class OrcaClient(Client):
2327
2392
  follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
2328
2393
  timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
2329
2394
  extensions: RequestExtensions | None = None,
2330
- ) -> PaginatedTask:
2395
+ ) -> PaginatedJob:
2331
2396
  pass
2332
2397
 
2333
2398
  @overload
@@ -2478,9 +2543,9 @@ class OrcaClient(Client):
2478
2543
  @overload
2479
2544
  def GET(
2480
2545
  self,
2481
- path: Literal["/agents/bootstrap_classification_model/{task_id}"],
2546
+ path: Literal["/agents/bootstrap_classification_model/{job_id}"],
2482
2547
  *,
2483
- params: GetAgentsBootstrapClassificationModelByTaskIdParams,
2548
+ params: GetAgentsBootstrapClassificationModelByJobIdParams,
2484
2549
  parse_as: Literal["json"] = "json",
2485
2550
  headers: HeaderTypes | None = None,
2486
2551
  cookies: CookieTypes | None = None,
@@ -2489,7 +2554,7 @@ class OrcaClient(Client):
2489
2554
  timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
2490
2555
  extensions: RequestExtensions | None = None,
2491
2556
  ) -> BootstrapClassificationModelResponse:
2492
- """Get the status of a bootstrap classification model task"""
2557
+ """Get the status of a bootstrap classification model job"""
2493
2558
  pass
2494
2559
 
2495
2560
  def GET(
@@ -2659,9 +2724,9 @@ class OrcaClient(Client):
2659
2724
  @overload
2660
2725
  def DELETE(
2661
2726
  self,
2662
- path: Literal["/classification_model/{model_name_or_id}/evaluation/{task_id}"],
2727
+ path: Literal["/regression_model/{name_or_id}"],
2663
2728
  *,
2664
- params: DeleteClassificationModelByModelNameOrIdEvaluationByTaskIdParams,
2729
+ params: DeleteRegressionModelByNameOrIdParams,
2665
2730
  parse_as: Literal["json"] = "json",
2666
2731
  headers: HeaderTypes | None = None,
2667
2732
  cookies: CookieTypes | None = None,
@@ -2675,9 +2740,9 @@ class OrcaClient(Client):
2675
2740
  @overload
2676
2741
  def DELETE(
2677
2742
  self,
2678
- path: Literal["/regression_model/{name_or_id}"],
2743
+ path: Literal["/classification_model/{model_name_or_id}/evaluation/{job_id}"],
2679
2744
  *,
2680
- params: DeleteRegressionModelByNameOrIdParams,
2745
+ params: DeleteClassificationModelByModelNameOrIdEvaluationByJobIdParams,
2681
2746
  parse_as: Literal["json"] = "json",
2682
2747
  headers: HeaderTypes | None = None,
2683
2748
  cookies: CookieTypes | None = None,
@@ -2691,9 +2756,9 @@ class OrcaClient(Client):
2691
2756
  @overload
2692
2757
  def DELETE(
2693
2758
  self,
2694
- path: Literal["/regression_model/{model_name_or_id}/evaluation/{task_id}"],
2759
+ path: Literal["/regression_model/{model_name_or_id}/evaluation/{job_id}"],
2695
2760
  *,
2696
- params: DeleteRegressionModelByModelNameOrIdEvaluationByTaskIdParams,
2761
+ params: DeleteRegressionModelByModelNameOrIdEvaluationByJobIdParams,
2697
2762
  parse_as: Literal["json"] = "json",
2698
2763
  headers: HeaderTypes | None = None,
2699
2764
  cookies: CookieTypes | None = None,
@@ -2707,9 +2772,9 @@ class OrcaClient(Client):
2707
2772
  @overload
2708
2773
  def DELETE(
2709
2774
  self,
2710
- path: Literal["/task/{task_id}/abort"],
2775
+ path: Literal["/job/{job_id}/abort"],
2711
2776
  *,
2712
- params: DeleteTaskByTaskIdAbortParams,
2777
+ params: DeleteJobByJobIdAbortParams,
2713
2778
  parse_as: Literal["json"] = "json",
2714
2779
  headers: HeaderTypes | None = None,
2715
2780
  cookies: CookieTypes | None = None,
@@ -2870,6 +2935,26 @@ class OrcaClient(Client):
2870
2935
  ) -> None:
2871
2936
  pass
2872
2937
 
2938
+ @overload
2939
+ def POST(
2940
+ self,
2941
+ path: Literal["/gpu/memoryset/{name_or_id}/lookup"],
2942
+ *,
2943
+ params: PostGpuMemorysetByNameOrIdLookupParams,
2944
+ json: LookupRequest,
2945
+ data: None = None,
2946
+ files: None = None,
2947
+ content: None = None,
2948
+ parse_as: Literal["json"] = "json",
2949
+ headers: HeaderTypes | None = None,
2950
+ cookies: CookieTypes | None = None,
2951
+ auth: AuthTypes | UseClientDefault = USE_CLIENT_DEFAULT,
2952
+ follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
2953
+ timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
2954
+ extensions: RequestExtensions | None = None,
2955
+ ) -> list[list[LabeledMemoryLookup | ScoredMemoryLookup]]:
2956
+ pass
2957
+
2873
2958
  @overload
2874
2959
  def POST(
2875
2960
  self,
@@ -2930,6 +3015,26 @@ class OrcaClient(Client):
2930
3015
  ) -> None:
2931
3016
  pass
2932
3017
 
3018
+ @overload
3019
+ def POST(
3020
+ self,
3021
+ path: Literal["/gpu/memoryset/{name_or_id}/memory"],
3022
+ *,
3023
+ params: PostGpuMemorysetByNameOrIdMemoryParams,
3024
+ json: PostGpuMemorysetByNameOrIdMemoryRequest,
3025
+ data: None = None,
3026
+ files: None = None,
3027
+ content: None = None,
3028
+ parse_as: Literal["json"] = "json",
3029
+ headers: HeaderTypes | None = None,
3030
+ cookies: CookieTypes | None = None,
3031
+ auth: AuthTypes | UseClientDefault = USE_CLIENT_DEFAULT,
3032
+ follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
3033
+ timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
3034
+ extensions: RequestExtensions | None = None,
3035
+ ) -> list[str]:
3036
+ pass
3037
+
2933
3038
  @overload
2934
3039
  def POST(
2935
3040
  self,
@@ -2991,6 +3096,48 @@ class OrcaClient(Client):
2991
3096
  """Create a finetuned embedding model."""
2992
3097
  pass
2993
3098
 
3099
+ @overload
3100
+ def POST(
3101
+ self,
3102
+ path: Literal["/gpu/finetuned_embedding_model/{name_or_id}/embedding"],
3103
+ *,
3104
+ params: PostGpuFinetunedEmbeddingModelByNameOrIdEmbeddingParams,
3105
+ json: EmbedRequest,
3106
+ data: None = None,
3107
+ files: None = None,
3108
+ content: None = None,
3109
+ parse_as: Literal["json"] = "json",
3110
+ headers: HeaderTypes | None = None,
3111
+ cookies: CookieTypes | None = None,
3112
+ auth: AuthTypes | UseClientDefault = USE_CLIENT_DEFAULT,
3113
+ follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
3114
+ timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
3115
+ extensions: RequestExtensions | None = None,
3116
+ ) -> list[list[float]]:
3117
+ """Embed values using a finetuned embedding model."""
3118
+ pass
3119
+
3120
+ @overload
3121
+ def POST(
3122
+ self,
3123
+ path: Literal["/gpu/pretrained_embedding_model/{model_name}/embedding"],
3124
+ *,
3125
+ params: PostGpuPretrainedEmbeddingModelByModelNameEmbeddingParams,
3126
+ json: EmbedRequest,
3127
+ data: None = None,
3128
+ files: None = None,
3129
+ content: None = None,
3130
+ parse_as: Literal["json"] = "json",
3131
+ headers: HeaderTypes | None = None,
3132
+ cookies: CookieTypes | None = None,
3133
+ auth: AuthTypes | UseClientDefault = USE_CLIENT_DEFAULT,
3134
+ follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
3135
+ timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
3136
+ extensions: RequestExtensions | None = None,
3137
+ ) -> list[list[float]]:
3138
+ """Embed values using a pretrained embedding model."""
3139
+ pass
3140
+
2994
3141
  @overload
2995
3142
  def POST(
2996
3143
  self,
@@ -3090,10 +3237,10 @@ class OrcaClient(Client):
3090
3237
  @overload
3091
3238
  def POST(
3092
3239
  self,
3093
- path: Literal["/classification_model"],
3240
+ path: Literal["/datasource/{name_or_id}/rows"],
3094
3241
  *,
3095
- params: None = None,
3096
- json: CreateClassificationModelRequest,
3242
+ params: PostDatasourceByNameOrIdRowsParams,
3243
+ json: GetDatasourceRowsRequest,
3097
3244
  data: None = None,
3098
3245
  files: None = None,
3099
3246
  content: None = None,
@@ -3104,16 +3251,17 @@ class OrcaClient(Client):
3104
3251
  follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
3105
3252
  timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
3106
3253
  extensions: RequestExtensions | None = None,
3107
- ) -> ClassificationModelMetadata:
3254
+ ) -> list[dict[str, Any]]:
3255
+ """Get rows from a specific datasource with optional filtering."""
3108
3256
  pass
3109
3257
 
3110
3258
  @overload
3111
3259
  def POST(
3112
3260
  self,
3113
- path: Literal["/classification_model/{model_name_or_id}/evaluation"],
3261
+ path: Literal["/datasource/{name_or_id}/rows/count"],
3114
3262
  *,
3115
- params: PostClassificationModelByModelNameOrIdEvaluationParams,
3116
- json: ClassificationEvaluationRequest,
3263
+ params: PostDatasourceByNameOrIdRowsCountParams,
3264
+ json: GetDatasourceRowCountRequest,
3117
3265
  data: None = None,
3118
3266
  files: None = None,
3119
3267
  content: None = None,
@@ -3124,16 +3272,17 @@ class OrcaClient(Client):
3124
3272
  follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
3125
3273
  timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
3126
3274
  extensions: RequestExtensions | None = None,
3127
- ) -> EvaluationResponse:
3275
+ ) -> int:
3276
+ """Get row count from a specific datasource with optional filtering."""
3128
3277
  pass
3129
3278
 
3130
3279
  @overload
3131
3280
  def POST(
3132
3281
  self,
3133
- path: Literal["/regression_model"],
3282
+ path: Literal["/classification_model"],
3134
3283
  *,
3135
3284
  params: None = None,
3136
- json: CreateRegressionModelRequest,
3285
+ json: CreateClassificationModelRequest,
3137
3286
  data: None = None,
3138
3287
  files: None = None,
3139
3288
  content: None = None,
@@ -3144,16 +3293,16 @@ class OrcaClient(Client):
3144
3293
  follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
3145
3294
  timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
3146
3295
  extensions: RequestExtensions | None = None,
3147
- ) -> RegressionModelMetadata:
3296
+ ) -> ClassificationModelMetadata:
3148
3297
  pass
3149
3298
 
3150
3299
  @overload
3151
3300
  def POST(
3152
3301
  self,
3153
- path: Literal["/regression_model/{model_name_or_id}/evaluation"],
3302
+ path: Literal["/regression_model"],
3154
3303
  *,
3155
- params: PostRegressionModelByModelNameOrIdEvaluationParams,
3156
- json: RegressionEvaluationRequest,
3304
+ params: None = None,
3305
+ json: CreateRegressionModelRequest,
3157
3306
  data: None = None,
3158
3307
  files: None = None,
3159
3308
  content: None = None,
@@ -3164,16 +3313,16 @@ class OrcaClient(Client):
3164
3313
  follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
3165
3314
  timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
3166
3315
  extensions: RequestExtensions | None = None,
3167
- ) -> EvaluationResponse:
3316
+ ) -> RegressionModelMetadata:
3168
3317
  pass
3169
3318
 
3170
3319
  @overload
3171
3320
  def POST(
3172
3321
  self,
3173
- path: Literal["/telemetry/prediction"],
3322
+ path: Literal["/gpu/classification_model/{name_or_id}/prediction"],
3174
3323
  *,
3175
- params: None = None,
3176
- json: ListPredictionsRequest | None = None,
3324
+ params: PostGpuClassificationModelByNameOrIdPredictionParams,
3325
+ json: ClassificationPredictionRequest,
3177
3326
  data: None = None,
3178
3327
  files: None = None,
3179
3328
  content: None = None,
@@ -3184,17 +3333,16 @@ class OrcaClient(Client):
3184
3333
  follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
3185
3334
  timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
3186
3335
  extensions: RequestExtensions | None = None,
3187
- ) -> list[LabelPredictionWithMemoriesAndFeedback | ScorePredictionWithMemoriesAndFeedback]:
3188
- """List predictions with optional filtering and sorting."""
3336
+ ) -> list[BaseLabelPredictionResult]:
3189
3337
  pass
3190
3338
 
3191
3339
  @overload
3192
3340
  def POST(
3193
3341
  self,
3194
- path: Literal["/telemetry/prediction/count"],
3342
+ path: Literal["/classification_model/{name_or_id}/prediction"],
3195
3343
  *,
3196
- params: None = None,
3197
- json: CountPredictionsRequest | None = None,
3344
+ params: PostClassificationModelByNameOrIdPredictionParams,
3345
+ json: ClassificationPredictionRequest,
3198
3346
  data: None = None,
3199
3347
  files: None = None,
3200
3348
  content: None = None,
@@ -3205,17 +3353,16 @@ class OrcaClient(Client):
3205
3353
  follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
3206
3354
  timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
3207
3355
  extensions: RequestExtensions | None = None,
3208
- ) -> int:
3209
- """Count predictions with optional filtering."""
3356
+ ) -> list[BaseLabelPredictionResult]:
3210
3357
  pass
3211
3358
 
3212
3359
  @overload
3213
3360
  def POST(
3214
3361
  self,
3215
- path: Literal["/telemetry/memories"],
3362
+ path: Literal["/gpu/regression_model/{name_or_id}/prediction"],
3216
3363
  *,
3217
- params: None = None,
3218
- json: TelemetryMemoriesRequest,
3364
+ params: PostGpuRegressionModelByNameOrIdPredictionParams,
3365
+ json: RegressionPredictionRequest,
3219
3366
  data: None = None,
3220
3367
  files: None = None,
3221
3368
  content: None = None,
@@ -3226,21 +3373,16 @@ class OrcaClient(Client):
3226
3373
  follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
3227
3374
  timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
3228
3375
  extensions: RequestExtensions | None = None,
3229
- ) -> PaginatedUnionLabeledMemoryWithFeedbackMetricsScoredMemoryWithFeedbackMetrics:
3230
- """
3231
- List memories with feedback metrics.
3232
- **Note**: This endpoint will ONLY return memories that have been used in a prediction.
3233
- If you want to query ALL memories WITHOUT feedback metrics, use the query_memoryset endpoint.
3234
- """
3376
+ ) -> list[BaseScorePredictionResult]:
3235
3377
  pass
3236
3378
 
3237
3379
  @overload
3238
3380
  def POST(
3239
3381
  self,
3240
- path: Literal["/agents/bootstrap_classification_model"],
3382
+ path: Literal["/regression_model/{name_or_id}/prediction"],
3241
3383
  *,
3242
- params: None = None,
3243
- json: BootstrapClassificationModelRequest,
3384
+ params: PostRegressionModelByNameOrIdPredictionParams,
3385
+ json: RegressionPredictionRequest,
3244
3386
  data: None = None,
3245
3387
  files: None = None,
3246
3388
  content: None = None,
@@ -3251,30 +3393,16 @@ class OrcaClient(Client):
3251
3393
  follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
3252
3394
  timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
3253
3395
  extensions: RequestExtensions | None = None,
3254
- ) -> BootstrapClassificationModelResponse:
3255
- """
3256
- Bootstrap a classification model by creating a memoryset with generated memories and a classification model.
3257
-
3258
- This endpoint uses the bootstrap_classification_model agent to generate:
3259
- 1. Memoryset configuration with appropriate settings
3260
- 2. Model configuration with optimal parameters
3261
- 3. High-quality training memories for each label
3262
-
3263
- The process involves:
3264
- 1. Calling the agent to generate configurations and memories
3265
- 2. Creating a datasource from the generated memories
3266
- 3. Creating a memoryset from the datasource
3267
- 4. Creating a classification model from the memoryset
3268
- """
3396
+ ) -> list[BaseScorePredictionResult]:
3269
3397
  pass
3270
3398
 
3271
3399
  @overload
3272
3400
  def POST(
3273
3401
  self,
3274
- path: Literal["/gpu/memoryset/{name_or_id}/lookup"],
3402
+ path: Literal["/classification_model/{model_name_or_id}/evaluation"],
3275
3403
  *,
3276
- params: PostGpuMemorysetByNameOrIdLookupParams,
3277
- json: LookupRequest,
3404
+ params: PostClassificationModelByModelNameOrIdEvaluationParams,
3405
+ json: ClassificationEvaluationRequest,
3278
3406
  data: None = None,
3279
3407
  files: None = None,
3280
3408
  content: None = None,
@@ -3285,16 +3413,16 @@ class OrcaClient(Client):
3285
3413
  follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
3286
3414
  timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
3287
3415
  extensions: RequestExtensions | None = None,
3288
- ) -> list[list[LabeledMemoryLookup | ScoredMemoryLookup]]:
3416
+ ) -> EvaluationResponse:
3289
3417
  pass
3290
3418
 
3291
3419
  @overload
3292
3420
  def POST(
3293
3421
  self,
3294
- path: Literal["/gpu/memoryset/{name_or_id}/memory"],
3422
+ path: Literal["/regression_model/{model_name_or_id}/evaluation"],
3295
3423
  *,
3296
- params: PostGpuMemorysetByNameOrIdMemoryParams,
3297
- json: PostGpuMemorysetByNameOrIdMemoryRequest,
3424
+ params: PostRegressionModelByModelNameOrIdEvaluationParams,
3425
+ json: RegressionEvaluationRequest,
3298
3426
  data: None = None,
3299
3427
  files: None = None,
3300
3428
  content: None = None,
@@ -3305,16 +3433,16 @@ class OrcaClient(Client):
3305
3433
  follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
3306
3434
  timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
3307
3435
  extensions: RequestExtensions | None = None,
3308
- ) -> list[str]:
3436
+ ) -> EvaluationResponse:
3309
3437
  pass
3310
3438
 
3311
3439
  @overload
3312
3440
  def POST(
3313
3441
  self,
3314
- path: Literal["/gpu/classification_model/{name_or_id}/prediction"],
3442
+ path: Literal["/telemetry/prediction"],
3315
3443
  *,
3316
- params: PostGpuClassificationModelByNameOrIdPredictionParams,
3317
- json: ClassificationPredictionRequest,
3444
+ params: None = None,
3445
+ json: ListPredictionsRequest | None = None,
3318
3446
  data: None = None,
3319
3447
  files: None = None,
3320
3448
  content: None = None,
@@ -3325,16 +3453,17 @@ class OrcaClient(Client):
3325
3453
  follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
3326
3454
  timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
3327
3455
  extensions: RequestExtensions | None = None,
3328
- ) -> list[BaseLabelPredictionResult]:
3456
+ ) -> list[LabelPredictionWithMemoriesAndFeedback | ScorePredictionWithMemoriesAndFeedback]:
3457
+ """List predictions with optional filtering and sorting."""
3329
3458
  pass
3330
3459
 
3331
3460
  @overload
3332
3461
  def POST(
3333
3462
  self,
3334
- path: Literal["/gpu/regression_model/{name_or_id}/prediction"],
3463
+ path: Literal["/telemetry/prediction/count"],
3335
3464
  *,
3336
- params: PostGpuRegressionModelByNameOrIdPredictionParams,
3337
- json: RegressionPredictionRequest,
3465
+ params: None = None,
3466
+ json: CountPredictionsRequest | None = None,
3338
3467
  data: None = None,
3339
3468
  files: None = None,
3340
3469
  content: None = None,
@@ -3345,16 +3474,17 @@ class OrcaClient(Client):
3345
3474
  follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
3346
3475
  timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
3347
3476
  extensions: RequestExtensions | None = None,
3348
- ) -> list[BaseScorePredictionResult]:
3477
+ ) -> int:
3478
+ """Count predictions with optional filtering."""
3349
3479
  pass
3350
3480
 
3351
3481
  @overload
3352
3482
  def POST(
3353
3483
  self,
3354
- path: Literal["/gpu/finetuned_embedding_model/{name_or_id}/embedding"],
3484
+ path: Literal["/telemetry/memories"],
3355
3485
  *,
3356
- params: PostGpuFinetunedEmbeddingModelByNameOrIdEmbeddingParams,
3357
- json: EmbedRequest,
3486
+ params: None = None,
3487
+ json: TelemetryMemoriesRequest,
3358
3488
  data: None = None,
3359
3489
  files: None = None,
3360
3490
  content: None = None,
@@ -3365,17 +3495,21 @@ class OrcaClient(Client):
3365
3495
  follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
3366
3496
  timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
3367
3497
  extensions: RequestExtensions | None = None,
3368
- ) -> list[list[float]]:
3369
- """Embed values using a finetuned embedding model."""
3498
+ ) -> PaginatedUnionLabeledMemoryWithFeedbackMetricsScoredMemoryWithFeedbackMetrics:
3499
+ """
3500
+ List memories with feedback metrics.
3501
+ **Note**: This endpoint will ONLY return memories that have been used in a prediction.
3502
+ If you want to query ALL memories WITHOUT feedback metrics, use the query_memoryset endpoint.
3503
+ """
3370
3504
  pass
3371
3505
 
3372
3506
  @overload
3373
3507
  def POST(
3374
3508
  self,
3375
- path: Literal["/gpu/pretrained_embedding_model/{model_name}/embedding"],
3509
+ path: Literal["/agents/bootstrap_classification_model"],
3376
3510
  *,
3377
- params: PostGpuPretrainedEmbeddingModelByModelNameEmbeddingParams,
3378
- json: EmbedRequest,
3511
+ params: None = None,
3512
+ json: BootstrapClassificationModelRequest,
3379
3513
  data: None = None,
3380
3514
  files: None = None,
3381
3515
  content: None = None,
@@ -3386,8 +3520,21 @@ class OrcaClient(Client):
3386
3520
  follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
3387
3521
  timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
3388
3522
  extensions: RequestExtensions | None = None,
3389
- ) -> list[list[float]]:
3390
- """Embed values using a pretrained embedding model."""
3523
+ ) -> BootstrapClassificationModelResponse:
3524
+ """
3525
+ Bootstrap a classification model by creating a memoryset with generated memories and a classification model.
3526
+
3527
+ This endpoint uses the bootstrap_classification_model agent to generate:
3528
+ 1. Memoryset configuration with appropriate settings
3529
+ 2. Model configuration with optimal parameters
3530
+ 3. High-quality training memories for each label
3531
+
3532
+ The process involves:
3533
+ 1. Calling the agent to generate configurations and memories
3534
+ 2. Creating a datasource from the generated memories
3535
+ 3. Creating a memoryset from the datasource
3536
+ 4. Creating a classification model from the memoryset
3537
+ """
3391
3538
  pass
3392
3539
 
3393
3540
  def POST(
@@ -3533,10 +3680,10 @@ class OrcaClient(Client):
3533
3680
  @overload
3534
3681
  def PATCH(
3535
3682
  self,
3536
- path: Literal["/classification_model/{name_or_id}"],
3683
+ path: Literal["/gpu/memoryset/{name_or_id}/memory"],
3537
3684
  *,
3538
- params: PatchClassificationModelByNameOrIdParams,
3539
- json: PredictiveModelUpdate,
3685
+ params: PatchGpuMemorysetByNameOrIdMemoryParams,
3686
+ json: PatchGpuMemorysetByNameOrIdMemoryRequest,
3540
3687
  data: None = None,
3541
3688
  files: None = None,
3542
3689
  content: None = None,
@@ -3547,16 +3694,16 @@ class OrcaClient(Client):
3547
3694
  follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
3548
3695
  timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
3549
3696
  extensions: RequestExtensions | None = None,
3550
- ) -> ClassificationModelMetadata:
3697
+ ) -> LabeledMemory | ScoredMemory:
3551
3698
  pass
3552
3699
 
3553
3700
  @overload
3554
3701
  def PATCH(
3555
3702
  self,
3556
- path: Literal["/regression_model/{name_or_id}"],
3703
+ path: Literal["/gpu/memoryset/{name_or_id}/memories"],
3557
3704
  *,
3558
- params: PatchRegressionModelByNameOrIdParams,
3559
- json: PredictiveModelUpdate,
3705
+ params: PatchGpuMemorysetByNameOrIdMemoriesParams,
3706
+ json: PatchGpuMemorysetByNameOrIdMemoriesRequest,
3560
3707
  data: None = None,
3561
3708
  files: None = None,
3562
3709
  content: None = None,
@@ -3567,16 +3714,16 @@ class OrcaClient(Client):
3567
3714
  follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
3568
3715
  timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
3569
3716
  extensions: RequestExtensions | None = None,
3570
- ) -> RegressionModelMetadata:
3717
+ ) -> list[LabeledMemory] | list[ScoredMemory]:
3571
3718
  pass
3572
3719
 
3573
3720
  @overload
3574
3721
  def PATCH(
3575
3722
  self,
3576
- path: Literal["/telemetry/prediction/{prediction_id}"],
3723
+ path: Literal["/classification_model/{name_or_id}"],
3577
3724
  *,
3578
- params: PatchTelemetryPredictionByPredictionIdParams,
3579
- json: UpdatePredictionRequest,
3725
+ params: PatchClassificationModelByNameOrIdParams,
3726
+ json: PredictiveModelUpdate,
3580
3727
  data: None = None,
3581
3728
  files: None = None,
3582
3729
  content: None = None,
@@ -3587,17 +3734,16 @@ class OrcaClient(Client):
3587
3734
  follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
3588
3735
  timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
3589
3736
  extensions: RequestExtensions | None = None,
3590
- ) -> Any:
3591
- """Update a prediction with new expected values, tags, or memory ID."""
3737
+ ) -> ClassificationModelMetadata:
3592
3738
  pass
3593
3739
 
3594
3740
  @overload
3595
3741
  def PATCH(
3596
3742
  self,
3597
- path: Literal["/gpu/memoryset/{name_or_id}/memory"],
3743
+ path: Literal["/regression_model/{name_or_id}"],
3598
3744
  *,
3599
- params: PatchGpuMemorysetByNameOrIdMemoryParams,
3600
- json: PatchGpuMemorysetByNameOrIdMemoryRequest,
3745
+ params: PatchRegressionModelByNameOrIdParams,
3746
+ json: PredictiveModelUpdate,
3601
3747
  data: None = None,
3602
3748
  files: None = None,
3603
3749
  content: None = None,
@@ -3608,16 +3754,16 @@ class OrcaClient(Client):
3608
3754
  follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
3609
3755
  timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
3610
3756
  extensions: RequestExtensions | None = None,
3611
- ) -> LabeledMemory | ScoredMemory:
3757
+ ) -> RegressionModelMetadata:
3612
3758
  pass
3613
3759
 
3614
3760
  @overload
3615
3761
  def PATCH(
3616
3762
  self,
3617
- path: Literal["/gpu/memoryset/{name_or_id}/memories"],
3763
+ path: Literal["/telemetry/prediction/{prediction_id}"],
3618
3764
  *,
3619
- params: PatchGpuMemorysetByNameOrIdMemoriesParams,
3620
- json: PatchGpuMemorysetByNameOrIdMemoriesRequest,
3765
+ params: PatchTelemetryPredictionByPredictionIdParams,
3766
+ json: UpdatePredictionRequest,
3621
3767
  data: None = None,
3622
3768
  files: None = None,
3623
3769
  content: None = None,
@@ -3628,7 +3774,8 @@ class OrcaClient(Client):
3628
3774
  follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
3629
3775
  timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
3630
3776
  extensions: RequestExtensions | None = None,
3631
- ) -> list[LabeledMemory] | list[ScoredMemory]:
3777
+ ) -> Any:
3778
+ """Update a prediction with new expected values, tags, or memory ID."""
3632
3779
  pass
3633
3780
 
3634
3781
  def PATCH(