orca-sdk 0.1.8__py3-none-any.whl → 0.1.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- orca_sdk/_utils/analysis_ui.py +1 -1
- orca_sdk/_utils/data_parsing.py +16 -12
- orca_sdk/_utils/data_parsing_test.py +8 -8
- orca_sdk/async_client.py +96 -28
- orca_sdk/classification_model.py +184 -104
- orca_sdk/classification_model_test.py +8 -4
- orca_sdk/client.py +96 -28
- orca_sdk/credentials.py +8 -10
- orca_sdk/datasource.py +3 -3
- orca_sdk/memoryset.py +64 -38
- orca_sdk/memoryset_test.py +5 -3
- orca_sdk/regression_model.py +124 -67
- orca_sdk/regression_model_test.py +8 -4
- {orca_sdk-0.1.8.dist-info → orca_sdk-0.1.10.dist-info}/METADATA +4 -4
- {orca_sdk-0.1.8.dist-info → orca_sdk-0.1.10.dist-info}/RECORD +16 -16
- {orca_sdk-0.1.8.dist-info → orca_sdk-0.1.10.dist-info}/WHEEL +0 -0
orca_sdk/_utils/analysis_ui.py
CHANGED
|
@@ -111,7 +111,7 @@ def display_suggested_memory_relabels(memoryset: LabeledMemoryset):
|
|
|
111
111
|
|
|
112
112
|
@gr.render(
|
|
113
113
|
inputs=[memory_relabel_map, all_approved],
|
|
114
|
-
triggers=[demo.load, refresh.change, all_approved.change, memory_relabel_map.change],
|
|
114
|
+
triggers=[demo.load, refresh.change, all_approved.change, memory_relabel_map.change], # type: ignore[arg-type]
|
|
115
115
|
)
|
|
116
116
|
def render_table(current_memory_relabel_map, current_all_approved):
|
|
117
117
|
if len(current_memory_relabel_map):
|
orca_sdk/_utils/data_parsing.py
CHANGED
|
@@ -1,10 +1,10 @@
|
|
|
1
1
|
import pickle
|
|
2
2
|
from dataclasses import asdict, is_dataclass
|
|
3
3
|
from os import PathLike
|
|
4
|
-
from tempfile import TemporaryDirectory
|
|
5
4
|
from typing import Any, cast
|
|
6
5
|
|
|
7
6
|
from datasets import Dataset
|
|
7
|
+
from datasets.exceptions import DatasetGenerationError
|
|
8
8
|
from torch.utils.data import DataLoader as TorchDataLoader
|
|
9
9
|
from torch.utils.data import Dataset as TorchDataset
|
|
10
10
|
|
|
@@ -15,7 +15,10 @@ def parse_dict_like(item: Any, column_names: list[str] | None = None) -> dict:
|
|
|
15
15
|
|
|
16
16
|
if isinstance(item, tuple):
|
|
17
17
|
if column_names is not None:
|
|
18
|
-
|
|
18
|
+
if len(item) != len(column_names):
|
|
19
|
+
raise ValueError(
|
|
20
|
+
f"Tuple length ({len(item)}) does not match number of column names ({len(column_names)})"
|
|
21
|
+
)
|
|
19
22
|
return {column_names[i]: item[i] for i in range(len(item))}
|
|
20
23
|
elif hasattr(item, "_fields") and all(isinstance(field, str) for field in item._fields): # type: ignore
|
|
21
24
|
return {field: getattr(item, field) for field in item._fields} # type: ignore
|
|
@@ -42,7 +45,8 @@ def parse_batch(batch: Any, column_names: list[str] | None = None) -> list[dict]
|
|
|
42
45
|
|
|
43
46
|
|
|
44
47
|
def hf_dataset_from_torch(
|
|
45
|
-
torch_data: TorchDataLoader | TorchDataset,
|
|
48
|
+
torch_data: TorchDataLoader | TorchDataset,
|
|
49
|
+
column_names: list[str] | None = None,
|
|
46
50
|
) -> Dataset:
|
|
47
51
|
"""
|
|
48
52
|
Create a HuggingFace Dataset from a PyTorch DataLoader or Dataset.
|
|
@@ -55,7 +59,6 @@ def hf_dataset_from_torch(
|
|
|
55
59
|
torch_data: A PyTorch DataLoader or Dataset object to create the HuggingFace Dataset from.
|
|
56
60
|
column_names: Optional list of column names to use for the dataset. If not provided,
|
|
57
61
|
the column names will be inferred from the data.
|
|
58
|
-
ignore_cache: If True, the dataset will not be cached on disk.
|
|
59
62
|
Returns:
|
|
60
63
|
A HuggingFace Dataset object containing the data from the PyTorch DataLoader or Dataset.
|
|
61
64
|
"""
|
|
@@ -64,18 +67,19 @@ def hf_dataset_from_torch(
|
|
|
64
67
|
else:
|
|
65
68
|
dataloader = TorchDataLoader(torch_data, batch_size=1, collate_fn=lambda x: x)
|
|
66
69
|
|
|
67
|
-
|
|
70
|
+
# Collect data from the dataloader into a list to avoid serialization issues
|
|
71
|
+
# with Dataset.from_generator in Python 3.14 (see datasets issue #7839)
|
|
72
|
+
data_list = []
|
|
73
|
+
try:
|
|
68
74
|
for batch in dataloader:
|
|
69
|
-
|
|
75
|
+
data_list.extend(parse_batch(batch, column_names=column_names))
|
|
76
|
+
except ValueError as e:
|
|
77
|
+
raise DatasetGenerationError(str(e)) from e
|
|
70
78
|
|
|
71
|
-
|
|
72
|
-
with TemporaryDirectory() as temp_dir:
|
|
73
|
-
ds = Dataset.from_generator(generator, cache_dir=temp_dir)
|
|
74
|
-
else:
|
|
75
|
-
ds = Dataset.from_generator(generator)
|
|
79
|
+
ds = Dataset.from_list(data_list)
|
|
76
80
|
|
|
77
81
|
if not isinstance(ds, Dataset):
|
|
78
|
-
raise ValueError(f"Failed to create dataset from
|
|
82
|
+
raise ValueError(f"Failed to create dataset from list: {type(ds)}")
|
|
79
83
|
return ds
|
|
80
84
|
|
|
81
85
|
|
|
@@ -29,7 +29,7 @@ class PytorchDictDataset(TorchDataset):
|
|
|
29
29
|
def test_hf_dataset_from_torch_dict():
|
|
30
30
|
# Given a Pytorch dataset that returns a dictionary for each item
|
|
31
31
|
dataset = PytorchDictDataset()
|
|
32
|
-
hf_dataset = hf_dataset_from_torch(dataset
|
|
32
|
+
hf_dataset = hf_dataset_from_torch(dataset)
|
|
33
33
|
# Then the HF dataset should be created successfully
|
|
34
34
|
assert isinstance(hf_dataset, Dataset)
|
|
35
35
|
assert len(hf_dataset) == len(dataset)
|
|
@@ -51,7 +51,7 @@ def test_hf_dataset_from_torch_tuple():
|
|
|
51
51
|
# Given a Pytorch dataset that returns a tuple for each item
|
|
52
52
|
dataset = PytorchTupleDataset()
|
|
53
53
|
# And the correct number of column names passed in
|
|
54
|
-
hf_dataset = hf_dataset_from_torch(dataset, column_names=["value", "label"]
|
|
54
|
+
hf_dataset = hf_dataset_from_torch(dataset, column_names=["value", "label"])
|
|
55
55
|
# Then the HF dataset should be created successfully
|
|
56
56
|
assert isinstance(hf_dataset, Dataset)
|
|
57
57
|
assert len(hf_dataset) == len(dataset)
|
|
@@ -63,7 +63,7 @@ def test_hf_dataset_from_torch_tuple_error():
|
|
|
63
63
|
dataset = PytorchTupleDataset()
|
|
64
64
|
# Then the HF dataset should raise an error if no column names are passed in
|
|
65
65
|
with pytest.raises(DatasetGenerationError):
|
|
66
|
-
hf_dataset_from_torch(dataset
|
|
66
|
+
hf_dataset_from_torch(dataset)
|
|
67
67
|
|
|
68
68
|
|
|
69
69
|
def test_hf_dataset_from_torch_tuple_error_not_enough_columns():
|
|
@@ -71,7 +71,7 @@ def test_hf_dataset_from_torch_tuple_error_not_enough_columns():
|
|
|
71
71
|
dataset = PytorchTupleDataset()
|
|
72
72
|
# Then the HF dataset should raise an error if not enough column names are passed in
|
|
73
73
|
with pytest.raises(DatasetGenerationError):
|
|
74
|
-
hf_dataset_from_torch(dataset, column_names=["value"]
|
|
74
|
+
hf_dataset_from_torch(dataset, column_names=["value"])
|
|
75
75
|
|
|
76
76
|
|
|
77
77
|
DatasetTuple = namedtuple("DatasetTuple", ["value", "label"])
|
|
@@ -92,7 +92,7 @@ def test_hf_dataset_from_torch_named_tuple():
|
|
|
92
92
|
# Given a Pytorch dataset that returns a namedtuple for each item
|
|
93
93
|
dataset = PytorchNamedTupleDataset()
|
|
94
94
|
# And no column names are passed in
|
|
95
|
-
hf_dataset = hf_dataset_from_torch(dataset
|
|
95
|
+
hf_dataset = hf_dataset_from_torch(dataset)
|
|
96
96
|
# Then the HF dataset should be created successfully
|
|
97
97
|
assert isinstance(hf_dataset, Dataset)
|
|
98
98
|
assert len(hf_dataset) == len(dataset)
|
|
@@ -119,7 +119,7 @@ class PytorchDataclassDataset(TorchDataset):
|
|
|
119
119
|
def test_hf_dataset_from_torch_dataclass():
|
|
120
120
|
# Given a Pytorch dataset that returns a dataclass for each item
|
|
121
121
|
dataset = PytorchDataclassDataset()
|
|
122
|
-
hf_dataset = hf_dataset_from_torch(dataset
|
|
122
|
+
hf_dataset = hf_dataset_from_torch(dataset)
|
|
123
123
|
# Then the HF dataset should be created successfully
|
|
124
124
|
assert isinstance(hf_dataset, Dataset)
|
|
125
125
|
assert len(hf_dataset) == len(dataset)
|
|
@@ -142,7 +142,7 @@ def test_hf_dataset_from_torch_invalid_dataset():
|
|
|
142
142
|
dataset = PytorchInvalidDataset()
|
|
143
143
|
# Then the HF dataset should raise an error
|
|
144
144
|
with pytest.raises(DatasetGenerationError):
|
|
145
|
-
hf_dataset_from_torch(dataset
|
|
145
|
+
hf_dataset_from_torch(dataset)
|
|
146
146
|
|
|
147
147
|
|
|
148
148
|
def test_hf_dataset_from_torchdataloader():
|
|
@@ -153,7 +153,7 @@ def test_hf_dataset_from_torchdataloader():
|
|
|
153
153
|
return {"value": [item["value"] for item in x], "label": [item["label"] for item in x]}
|
|
154
154
|
|
|
155
155
|
dataloader = TorchDataLoader(dataset, batch_size=3, collate_fn=collate_fn)
|
|
156
|
-
hf_dataset = hf_dataset_from_torch(dataloader
|
|
156
|
+
hf_dataset = hf_dataset_from_torch(dataloader)
|
|
157
157
|
# Then the HF dataset should be created successfully
|
|
158
158
|
assert isinstance(hf_dataset, Dataset)
|
|
159
159
|
assert len(hf_dataset) == len(dataset)
|
orca_sdk/async_client.py
CHANGED
|
@@ -17,6 +17,7 @@ from typing import (
|
|
|
17
17
|
Mapping,
|
|
18
18
|
NotRequired,
|
|
19
19
|
Self,
|
|
20
|
+
TypeAlias,
|
|
20
21
|
TypedDict,
|
|
21
22
|
cast,
|
|
22
23
|
overload,
|
|
@@ -155,11 +156,14 @@ class ClusterMetrics(TypedDict):
|
|
|
155
156
|
memory_count: int
|
|
156
157
|
|
|
157
158
|
|
|
158
|
-
ColumnType = Literal["STRING", "FLOAT", "INT", "BOOL", "ENUM", "IMAGE", "OTHER"]
|
|
159
|
+
ColumnType: TypeAlias = Literal["STRING", "FLOAT", "INT", "BOOL", "ENUM", "IMAGE", "OTHER"]
|
|
160
|
+
"""
|
|
161
|
+
The type of a column in a datasource
|
|
162
|
+
"""
|
|
159
163
|
|
|
160
164
|
|
|
161
165
|
class ConstraintViolationErrorResponse(TypedDict):
|
|
162
|
-
status_code:
|
|
166
|
+
status_code: Literal[409]
|
|
163
167
|
constraint: str
|
|
164
168
|
|
|
165
169
|
|
|
@@ -170,6 +174,7 @@ class CountPredictionsRequest(TypedDict):
|
|
|
170
174
|
start_timestamp: NotRequired[str | None]
|
|
171
175
|
end_timestamp: NotRequired[str | None]
|
|
172
176
|
memory_id: NotRequired[str | None]
|
|
177
|
+
expected_label_match: NotRequired[bool | None]
|
|
173
178
|
|
|
174
179
|
|
|
175
180
|
class CreateApiKeyRequest(TypedDict):
|
|
@@ -232,7 +237,7 @@ class EmbeddingEvaluationRequest(TypedDict):
|
|
|
232
237
|
weigh_memories: NotRequired[bool]
|
|
233
238
|
|
|
234
239
|
|
|
235
|
-
EmbeddingFinetuningMethod = Literal["classification", "regression", "batch_triplet_loss"]
|
|
240
|
+
EmbeddingFinetuningMethod: TypeAlias = Literal["classification", "regression", "batch_triplet_loss"]
|
|
236
241
|
|
|
237
242
|
|
|
238
243
|
class FeedbackMetrics(TypedDict):
|
|
@@ -240,13 +245,55 @@ class FeedbackMetrics(TypedDict):
|
|
|
240
245
|
count: int
|
|
241
246
|
|
|
242
247
|
|
|
243
|
-
FeedbackType = Literal["CONTINUOUS", "BINARY"]
|
|
248
|
+
FeedbackType: TypeAlias = Literal["CONTINUOUS", "BINARY"]
|
|
244
249
|
|
|
245
250
|
|
|
246
251
|
class FilterItem(TypedDict):
|
|
247
|
-
field:
|
|
248
|
-
|
|
252
|
+
field: (
|
|
253
|
+
tuple[
|
|
254
|
+
Literal[
|
|
255
|
+
"memory_id",
|
|
256
|
+
"value",
|
|
257
|
+
"label",
|
|
258
|
+
"metadata",
|
|
259
|
+
"source_id",
|
|
260
|
+
"partition_id",
|
|
261
|
+
"created_at",
|
|
262
|
+
"updated_at",
|
|
263
|
+
"edited_at",
|
|
264
|
+
"metrics",
|
|
265
|
+
"score",
|
|
266
|
+
"labels",
|
|
267
|
+
]
|
|
268
|
+
]
|
|
269
|
+
| tuple[Literal["metadata"], str]
|
|
270
|
+
| tuple[
|
|
271
|
+
Literal["metrics"],
|
|
272
|
+
Literal[
|
|
273
|
+
"cluster",
|
|
274
|
+
"embedding_2d",
|
|
275
|
+
"is_duplicate",
|
|
276
|
+
"duplicate_memory_ids",
|
|
277
|
+
"has_potential_duplicates",
|
|
278
|
+
"potential_duplicate_memory_ids",
|
|
279
|
+
"anomaly_score",
|
|
280
|
+
"neighbor_label_logits",
|
|
281
|
+
"neighbor_predicted_label",
|
|
282
|
+
"neighbor_predicted_label_ambiguity",
|
|
283
|
+
"neighbor_predicted_label_confidence",
|
|
284
|
+
"current_label_neighbor_confidence",
|
|
285
|
+
"normalized_neighbor_label_entropy",
|
|
286
|
+
"neighbor_predicted_label_matches_current_label",
|
|
287
|
+
"spread",
|
|
288
|
+
"uniformity",
|
|
289
|
+
"concept_id",
|
|
290
|
+
"subconcept_id",
|
|
291
|
+
],
|
|
292
|
+
]
|
|
293
|
+
)
|
|
294
|
+
op: Literal["==", "!=", ">", ">=", "<", "<=", "in", "not in", "like", "contains all", "contains any"]
|
|
249
295
|
value: str | int | float | bool | list[str | None] | list[int] | list[float] | list[bool] | None
|
|
296
|
+
transform: NotRequired[Literal["length"]]
|
|
250
297
|
|
|
251
298
|
|
|
252
299
|
class GetDatasourceRowCountRequest(TypedDict):
|
|
@@ -274,12 +321,17 @@ class HealthyResponse(TypedDict):
|
|
|
274
321
|
|
|
275
322
|
|
|
276
323
|
class InternalServerErrorResponse(TypedDict):
|
|
277
|
-
status_code:
|
|
324
|
+
status_code: Literal[500]
|
|
278
325
|
message: str
|
|
279
326
|
request_id: str
|
|
280
327
|
|
|
281
328
|
|
|
282
|
-
JobStatus = Literal[
|
|
329
|
+
JobStatus: TypeAlias = Literal[
|
|
330
|
+
"INITIALIZED", "DISPATCHED", "WAITING", "PROCESSING", "COMPLETED", "FAILED", "ABORTING", "ABORTED"
|
|
331
|
+
]
|
|
332
|
+
"""
|
|
333
|
+
Status of job in the job queue
|
|
334
|
+
"""
|
|
283
335
|
|
|
284
336
|
|
|
285
337
|
class JobStatusInfo(TypedDict):
|
|
@@ -346,7 +398,7 @@ class MemoryMetrics(TypedDict):
|
|
|
346
398
|
has_potential_duplicates: NotRequired[bool]
|
|
347
399
|
potential_duplicate_memory_ids: NotRequired[list[str] | None]
|
|
348
400
|
cluster: NotRequired[int]
|
|
349
|
-
embedding_2d: NotRequired[
|
|
401
|
+
embedding_2d: NotRequired[tuple[float, float]]
|
|
350
402
|
anomaly_score: NotRequired[float]
|
|
351
403
|
neighbor_label_logits: NotRequired[list[float] | None]
|
|
352
404
|
neighbor_predicted_label: NotRequired[int | None]
|
|
@@ -361,7 +413,7 @@ class MemoryMetrics(TypedDict):
|
|
|
361
413
|
subconcept_id: NotRequired[int | None]
|
|
362
414
|
|
|
363
415
|
|
|
364
|
-
MemoryType = Literal["LABELED", "SCORED"]
|
|
416
|
+
MemoryType: TypeAlias = Literal["LABELED", "SCORED"]
|
|
365
417
|
|
|
366
418
|
|
|
367
419
|
class MemorysetClassPatternsAnalysisConfig(TypedDict):
|
|
@@ -467,7 +519,7 @@ class MemorysetUpdate(TypedDict):
|
|
|
467
519
|
|
|
468
520
|
|
|
469
521
|
class NotFoundErrorResponse(TypedDict):
|
|
470
|
-
status_code:
|
|
522
|
+
status_code: Literal[404]
|
|
471
523
|
resource: (
|
|
472
524
|
Literal[
|
|
473
525
|
"org",
|
|
@@ -547,7 +599,7 @@ class PredictionFeedbackResult(TypedDict):
|
|
|
547
599
|
new_category_ids: list[str]
|
|
548
600
|
|
|
549
601
|
|
|
550
|
-
PredictionSort = list[
|
|
602
|
+
PredictionSort: TypeAlias = list[tuple[Literal["timestamp", "confidence", "anomaly_score"], Literal["asc", "desc"]]]
|
|
551
603
|
|
|
552
604
|
|
|
553
605
|
class PredictiveModelUpdate(TypedDict):
|
|
@@ -556,15 +608,18 @@ class PredictiveModelUpdate(TypedDict):
|
|
|
556
608
|
locked: NotRequired[bool]
|
|
557
609
|
|
|
558
610
|
|
|
559
|
-
PretrainedEmbeddingModelName = Literal[
|
|
611
|
+
PretrainedEmbeddingModelName: TypeAlias = Literal[
|
|
560
612
|
"CLIP_BASE", "GTE_BASE", "CDE_SMALL", "DISTILBERT", "GTE_SMALL", "MXBAI_LARGE", "E5_LARGE", "BGE_BASE", "GIST_LARGE"
|
|
561
613
|
]
|
|
614
|
+
"""
|
|
615
|
+
Names of pretrained embedding models that are supported by OrcaCloud
|
|
616
|
+
"""
|
|
562
617
|
|
|
563
618
|
|
|
564
|
-
RACHeadType = Literal["KNN", "MMOE", "FF", "BMMOE"]
|
|
619
|
+
RACHeadType: TypeAlias = Literal["KNN", "MMOE", "FF", "BMMOE"]
|
|
565
620
|
|
|
566
621
|
|
|
567
|
-
RARHeadType = Literal["MMOE", "KNN"]
|
|
622
|
+
RARHeadType: TypeAlias = Literal["MMOE", "KNN"]
|
|
568
623
|
|
|
569
624
|
|
|
570
625
|
class ROCCurve(TypedDict):
|
|
@@ -671,6 +726,7 @@ class ScorePredictionWithMemoriesAndFeedback(TypedDict):
|
|
|
671
726
|
tags: list[str]
|
|
672
727
|
explanation: str | None
|
|
673
728
|
memory_id: str | None
|
|
729
|
+
is_in_dense_neighborhood: NotRequired[bool | None]
|
|
674
730
|
feedbacks: list[PredictionFeedback]
|
|
675
731
|
|
|
676
732
|
|
|
@@ -742,7 +798,7 @@ class ScoredMemoryWithFeedbackMetrics(TypedDict):
|
|
|
742
798
|
|
|
743
799
|
|
|
744
800
|
class ServiceUnavailableErrorResponse(TypedDict):
|
|
745
|
-
status_code:
|
|
801
|
+
status_code: Literal[503]
|
|
746
802
|
service: str
|
|
747
803
|
|
|
748
804
|
|
|
@@ -754,7 +810,9 @@ class SubConceptMetrics(TypedDict):
|
|
|
754
810
|
memory_count: int
|
|
755
811
|
|
|
756
812
|
|
|
757
|
-
TelemetryField =
|
|
813
|
+
TelemetryField: TypeAlias = (
|
|
814
|
+
tuple[Literal["feedback_metrics"], str, Literal["avg", "count"]] | tuple[Literal["lookup"], Literal["count"]]
|
|
815
|
+
)
|
|
758
816
|
|
|
759
817
|
|
|
760
818
|
class TelemetryFilterItem(TypedDict):
|
|
@@ -769,11 +827,11 @@ class TelemetrySortOptions(TypedDict):
|
|
|
769
827
|
|
|
770
828
|
|
|
771
829
|
class UnauthenticatedErrorResponse(TypedDict):
|
|
772
|
-
status_code:
|
|
830
|
+
status_code: Literal[401]
|
|
773
831
|
|
|
774
832
|
|
|
775
833
|
class UnauthorizedErrorResponse(TypedDict):
|
|
776
|
-
status_code:
|
|
834
|
+
status_code: Literal[403]
|
|
777
835
|
reason: str
|
|
778
836
|
|
|
779
837
|
|
|
@@ -794,7 +852,10 @@ class ValidationError(TypedDict):
|
|
|
794
852
|
type: str
|
|
795
853
|
|
|
796
854
|
|
|
797
|
-
WorkerStatus = Literal["IDLE", "BUSY", "DRAINING", "SHUTDOWN", "CRASHED"]
|
|
855
|
+
WorkerStatus: TypeAlias = Literal["IDLE", "BUSY", "DRAINING", "SHUTDOWN", "CRASHED"]
|
|
856
|
+
"""
|
|
857
|
+
Status of worker in the worker pool
|
|
858
|
+
"""
|
|
798
859
|
|
|
799
860
|
|
|
800
861
|
class GetTestErrorByStatusCodeParams(TypedDict):
|
|
@@ -870,7 +931,7 @@ class PostGpuMemorysetByNameOrIdMemoryParams(TypedDict):
|
|
|
870
931
|
name_or_id: str
|
|
871
932
|
|
|
872
933
|
|
|
873
|
-
PostGpuMemorysetByNameOrIdMemoryRequest = list[LabeledMemoryInsert] | list[ScoredMemoryInsert]
|
|
934
|
+
PostGpuMemorysetByNameOrIdMemoryRequest: TypeAlias = list[LabeledMemoryInsert] | list[ScoredMemoryInsert]
|
|
874
935
|
|
|
875
936
|
|
|
876
937
|
class PatchGpuMemorysetByNameOrIdMemoriesParams(TypedDict):
|
|
@@ -1106,6 +1167,10 @@ class GetWorkerByWorkerIdParams(TypedDict):
|
|
|
1106
1167
|
|
|
1107
1168
|
class GetTelemetryPredictionByPredictionIdParams(TypedDict):
|
|
1108
1169
|
prediction_id: str
|
|
1170
|
+
calc_neighborhood_density: NotRequired[bool]
|
|
1171
|
+
"""
|
|
1172
|
+
Calculate neighborhood density
|
|
1173
|
+
"""
|
|
1109
1174
|
|
|
1110
1175
|
|
|
1111
1176
|
class PatchTelemetryPredictionByPredictionIdParams(TypedDict):
|
|
@@ -1144,7 +1209,7 @@ class DeleteTelemetryFeedbackCategoryByNameOrIdParams(TypedDict):
|
|
|
1144
1209
|
name_or_id: str
|
|
1145
1210
|
|
|
1146
1211
|
|
|
1147
|
-
PutTelemetryPredictionFeedbackRequest = list[PredictionFeedbackRequest]
|
|
1212
|
+
PutTelemetryPredictionFeedbackRequest: TypeAlias = list[PredictionFeedbackRequest]
|
|
1148
1213
|
|
|
1149
1214
|
|
|
1150
1215
|
class GetAgentsBootstrapClassificationModelByJobIdParams(TypedDict):
|
|
@@ -1197,6 +1262,8 @@ class ClassificationMetrics(TypedDict):
|
|
|
1197
1262
|
pr_auc: NotRequired[float | None]
|
|
1198
1263
|
pr_curve: NotRequired[PRCurve | None]
|
|
1199
1264
|
roc_curve: NotRequired[ROCCurve | None]
|
|
1265
|
+
confusion_matrix: NotRequired[list[list[int]] | None]
|
|
1266
|
+
warnings: NotRequired[list[str]]
|
|
1200
1267
|
|
|
1201
1268
|
|
|
1202
1269
|
class ClassificationModelMetadata(TypedDict):
|
|
@@ -1420,7 +1487,7 @@ class HTTPValidationError(TypedDict):
|
|
|
1420
1487
|
|
|
1421
1488
|
|
|
1422
1489
|
class InvalidInputErrorResponse(TypedDict):
|
|
1423
|
-
status_code:
|
|
1490
|
+
status_code: Literal[422]
|
|
1424
1491
|
validation_issues: list[FieldValidationError]
|
|
1425
1492
|
|
|
1426
1493
|
|
|
@@ -1480,6 +1547,7 @@ class LabelPredictionWithMemoriesAndFeedback(TypedDict):
|
|
|
1480
1547
|
tags: list[str]
|
|
1481
1548
|
explanation: str | None
|
|
1482
1549
|
memory_id: str | None
|
|
1550
|
+
is_in_dense_neighborhood: NotRequired[bool | None]
|
|
1483
1551
|
feedbacks: list[PredictionFeedback]
|
|
1484
1552
|
|
|
1485
1553
|
|
|
@@ -1551,10 +1619,10 @@ class ListPredictionsRequest(TypedDict):
|
|
|
1551
1619
|
start_timestamp: NotRequired[str | None]
|
|
1552
1620
|
end_timestamp: NotRequired[str | None]
|
|
1553
1621
|
memory_id: NotRequired[str | None]
|
|
1622
|
+
expected_label_match: NotRequired[bool | None]
|
|
1554
1623
|
limit: NotRequired[int]
|
|
1555
1624
|
offset: NotRequired[int | None]
|
|
1556
1625
|
sort: NotRequired[PredictionSort]
|
|
1557
|
-
expected_label_match: NotRequired[bool | None]
|
|
1558
1626
|
|
|
1559
1627
|
|
|
1560
1628
|
class MemorysetAnalysisConfigs(TypedDict):
|
|
@@ -1633,10 +1701,10 @@ class WorkerInfo(TypedDict):
|
|
|
1633
1701
|
config: dict[str, str | float | int | bool | dict[str, str] | None]
|
|
1634
1702
|
|
|
1635
1703
|
|
|
1636
|
-
PatchGpuMemorysetByNameOrIdMemoryRequest = LabeledMemoryUpdate | ScoredMemoryUpdate
|
|
1704
|
+
PatchGpuMemorysetByNameOrIdMemoryRequest: TypeAlias = LabeledMemoryUpdate | ScoredMemoryUpdate
|
|
1637
1705
|
|
|
1638
1706
|
|
|
1639
|
-
PatchGpuMemorysetByNameOrIdMemoriesRequest = list[LabeledMemoryUpdate] | list[ScoredMemoryUpdate]
|
|
1707
|
+
PatchGpuMemorysetByNameOrIdMemoriesRequest: TypeAlias = list[LabeledMemoryUpdate] | list[ScoredMemoryUpdate]
|
|
1640
1708
|
|
|
1641
1709
|
|
|
1642
1710
|
class CascadingEditSuggestion(TypedDict):
|
|
@@ -1864,7 +1932,7 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
1864
1932
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
1865
1933
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
1866
1934
|
extensions: RequestExtensions | None = None,
|
|
1867
|
-
) ->
|
|
1935
|
+
) -> Literal[True]:
|
|
1868
1936
|
"""Return true only when called with a valid root API key; otherwise 401 Unauthenticated."""
|
|
1869
1937
|
pass
|
|
1870
1938
|
|
|
@@ -1898,7 +1966,7 @@ class OrcaAsyncClient(AsyncClient):
|
|
|
1898
1966
|
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
1899
1967
|
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
|
|
1900
1968
|
extensions: RequestExtensions | None = None,
|
|
1901
|
-
) ->
|
|
1969
|
+
) -> Literal[True]:
|
|
1902
1970
|
"""Returns true if the api key header is valid for the org (will be false for admin api key)"""
|
|
1903
1971
|
pass
|
|
1904
1972
|
|