orca-sdk 0.1.7__tar.gz → 0.1.9__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. {orca_sdk-0.1.7 → orca_sdk-0.1.9}/PKG-INFO +6 -5
  2. {orca_sdk-0.1.7 → orca_sdk-0.1.9}/orca_sdk/_utils/analysis_ui.py +1 -1
  3. {orca_sdk-0.1.7 → orca_sdk-0.1.9}/orca_sdk/_utils/data_parsing.py +16 -12
  4. {orca_sdk-0.1.7 → orca_sdk-0.1.9}/orca_sdk/_utils/data_parsing_test.py +8 -8
  5. {orca_sdk-0.1.7 → orca_sdk-0.1.9}/orca_sdk/async_client.py +96 -28
  6. {orca_sdk-0.1.7 → orca_sdk-0.1.9}/orca_sdk/classification_model.py +184 -104
  7. {orca_sdk-0.1.7 → orca_sdk-0.1.9}/orca_sdk/classification_model_test.py +8 -4
  8. {orca_sdk-0.1.7 → orca_sdk-0.1.9}/orca_sdk/client.py +96 -28
  9. {orca_sdk-0.1.7 → orca_sdk-0.1.9}/orca_sdk/datasource.py +3 -3
  10. {orca_sdk-0.1.7 → orca_sdk-0.1.9}/orca_sdk/memoryset.py +64 -38
  11. {orca_sdk-0.1.7 → orca_sdk-0.1.9}/orca_sdk/memoryset_test.py +5 -3
  12. {orca_sdk-0.1.7 → orca_sdk-0.1.9}/orca_sdk/regression_model.py +124 -67
  13. {orca_sdk-0.1.7 → orca_sdk-0.1.9}/orca_sdk/regression_model_test.py +8 -4
  14. {orca_sdk-0.1.7 → orca_sdk-0.1.9}/pyproject.toml +7 -6
  15. {orca_sdk-0.1.7 → orca_sdk-0.1.9}/README.md +0 -0
  16. {orca_sdk-0.1.7 → orca_sdk-0.1.9}/orca_sdk/__init__.py +0 -0
  17. {orca_sdk-0.1.7 → orca_sdk-0.1.9}/orca_sdk/_shared/__init__.py +0 -0
  18. {orca_sdk-0.1.7 → orca_sdk-0.1.9}/orca_sdk/_shared/metrics.py +0 -0
  19. {orca_sdk-0.1.7 → orca_sdk-0.1.9}/orca_sdk/_shared/metrics_test.py +0 -0
  20. {orca_sdk-0.1.7 → orca_sdk-0.1.9}/orca_sdk/_utils/__init__.py +0 -0
  21. {orca_sdk-0.1.7 → orca_sdk-0.1.9}/orca_sdk/_utils/analysis_ui_style.css +0 -0
  22. {orca_sdk-0.1.7 → orca_sdk-0.1.9}/orca_sdk/_utils/auth.py +0 -0
  23. {orca_sdk-0.1.7 → orca_sdk-0.1.9}/orca_sdk/_utils/auth_test.py +0 -0
  24. {orca_sdk-0.1.7 → orca_sdk-0.1.9}/orca_sdk/_utils/common.py +0 -0
  25. {orca_sdk-0.1.7 → orca_sdk-0.1.9}/orca_sdk/_utils/pagination.py +0 -0
  26. {orca_sdk-0.1.7 → orca_sdk-0.1.9}/orca_sdk/_utils/pagination_test.py +0 -0
  27. {orca_sdk-0.1.7 → orca_sdk-0.1.9}/orca_sdk/_utils/prediction_result_ui.css +0 -0
  28. {orca_sdk-0.1.7 → orca_sdk-0.1.9}/orca_sdk/_utils/prediction_result_ui.py +0 -0
  29. {orca_sdk-0.1.7 → orca_sdk-0.1.9}/orca_sdk/_utils/tqdm_file_reader.py +0 -0
  30. {orca_sdk-0.1.7 → orca_sdk-0.1.9}/orca_sdk/_utils/value_parser.py +0 -0
  31. {orca_sdk-0.1.7 → orca_sdk-0.1.9}/orca_sdk/_utils/value_parser_test.py +0 -0
  32. {orca_sdk-0.1.7 → orca_sdk-0.1.9}/orca_sdk/conftest.py +0 -0
  33. {orca_sdk-0.1.7 → orca_sdk-0.1.9}/orca_sdk/credentials.py +0 -0
  34. {orca_sdk-0.1.7 → orca_sdk-0.1.9}/orca_sdk/credentials_test.py +0 -0
  35. {orca_sdk-0.1.7 → orca_sdk-0.1.9}/orca_sdk/datasource_test.py +0 -0
  36. {orca_sdk-0.1.7 → orca_sdk-0.1.9}/orca_sdk/embedding_model.py +0 -0
  37. {orca_sdk-0.1.7 → orca_sdk-0.1.9}/orca_sdk/embedding_model_test.py +0 -0
  38. {orca_sdk-0.1.7 → orca_sdk-0.1.9}/orca_sdk/job.py +0 -0
  39. {orca_sdk-0.1.7 → orca_sdk-0.1.9}/orca_sdk/job_test.py +0 -0
  40. {orca_sdk-0.1.7 → orca_sdk-0.1.9}/orca_sdk/telemetry.py +0 -0
  41. {orca_sdk-0.1.7 → orca_sdk-0.1.9}/orca_sdk/telemetry_test.py +0 -0
@@ -1,22 +1,23 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: orca_sdk
3
- Version: 0.1.7
3
+ Version: 0.1.9
4
4
  Summary: SDK for interacting with Orca Services
5
5
  License-Expression: Apache-2.0
6
6
  Author: Orca DB Inc.
7
7
  Author-email: dev-rel@orcadb.ai
8
- Requires-Python: >=3.11,<3.14
8
+ Requires-Python: >=3.11,<3.15
9
9
  Classifier: Programming Language :: Python :: 3
10
10
  Classifier: Programming Language :: Python :: 3.11
11
11
  Classifier: Programming Language :: Python :: 3.12
12
12
  Classifier: Programming Language :: Python :: 3.13
13
- Requires-Dist: datasets (>=3.1.0,<4)
14
- Requires-Dist: gradio (>=5.44.1,<6)
13
+ Classifier: Programming Language :: Python :: 3.14
14
+ Requires-Dist: datasets (>=4.4.0,<5)
15
+ Requires-Dist: gradio (>=6.0.0,<7)
15
16
  Requires-Dist: httpx (>=0.28.1)
16
17
  Requires-Dist: httpx-retries (>=0.4.3,<0.5.0)
17
18
  Requires-Dist: numpy (>=2.1.0,<3)
18
19
  Requires-Dist: pandas (>=2.2.3,<3)
19
- Requires-Dist: pyarrow (>=18.0.0,<19)
20
+ Requires-Dist: pyarrow (>=22.0.0,<23)
20
21
  Requires-Dist: python-dotenv (>=1.1.0)
21
22
  Requires-Dist: scikit-learn (>=1.6.1,<2)
22
23
  Requires-Dist: torch (>=2.8.0,<3)
@@ -111,7 +111,7 @@ def display_suggested_memory_relabels(memoryset: LabeledMemoryset):
111
111
 
112
112
  @gr.render(
113
113
  inputs=[memory_relabel_map, all_approved],
114
- triggers=[demo.load, refresh.change, all_approved.change, memory_relabel_map.change],
114
+ triggers=[demo.load, refresh.change, all_approved.change, memory_relabel_map.change], # type: ignore[arg-type]
115
115
  )
116
116
  def render_table(current_memory_relabel_map, current_all_approved):
117
117
  if len(current_memory_relabel_map):
@@ -1,10 +1,10 @@
1
1
  import pickle
2
2
  from dataclasses import asdict, is_dataclass
3
3
  from os import PathLike
4
- from tempfile import TemporaryDirectory
5
4
  from typing import Any, cast
6
5
 
7
6
  from datasets import Dataset
7
+ from datasets.exceptions import DatasetGenerationError
8
8
  from torch.utils.data import DataLoader as TorchDataLoader
9
9
  from torch.utils.data import Dataset as TorchDataset
10
10
 
@@ -15,7 +15,10 @@ def parse_dict_like(item: Any, column_names: list[str] | None = None) -> dict:
15
15
 
16
16
  if isinstance(item, tuple):
17
17
  if column_names is not None:
18
- assert len(item) == len(column_names)
18
+ if len(item) != len(column_names):
19
+ raise ValueError(
20
+ f"Tuple length ({len(item)}) does not match number of column names ({len(column_names)})"
21
+ )
19
22
  return {column_names[i]: item[i] for i in range(len(item))}
20
23
  elif hasattr(item, "_fields") and all(isinstance(field, str) for field in item._fields): # type: ignore
21
24
  return {field: getattr(item, field) for field in item._fields} # type: ignore
@@ -42,7 +45,8 @@ def parse_batch(batch: Any, column_names: list[str] | None = None) -> list[dict]
42
45
 
43
46
 
44
47
  def hf_dataset_from_torch(
45
- torch_data: TorchDataLoader | TorchDataset, column_names: list[str] | None = None, ignore_cache=False
48
+ torch_data: TorchDataLoader | TorchDataset,
49
+ column_names: list[str] | None = None,
46
50
  ) -> Dataset:
47
51
  """
48
52
  Create a HuggingFace Dataset from a PyTorch DataLoader or Dataset.
@@ -55,7 +59,6 @@ def hf_dataset_from_torch(
55
59
  torch_data: A PyTorch DataLoader or Dataset object to create the HuggingFace Dataset from.
56
60
  column_names: Optional list of column names to use for the dataset. If not provided,
57
61
  the column names will be inferred from the data.
58
- ignore_cache: If True, the dataset will not be cached on disk.
59
62
  Returns:
60
63
  A HuggingFace Dataset object containing the data from the PyTorch DataLoader or Dataset.
61
64
  """
@@ -64,18 +67,19 @@ def hf_dataset_from_torch(
64
67
  else:
65
68
  dataloader = TorchDataLoader(torch_data, batch_size=1, collate_fn=lambda x: x)
66
69
 
67
- def generator():
70
+ # Collect data from the dataloader into a list to avoid serialization issues
71
+ # with Dataset.from_generator in Python 3.14 (see datasets issue #7839)
72
+ data_list = []
73
+ try:
68
74
  for batch in dataloader:
69
- yield from parse_batch(batch, column_names=column_names)
75
+ data_list.extend(parse_batch(batch, column_names=column_names))
76
+ except ValueError as e:
77
+ raise DatasetGenerationError(str(e)) from e
70
78
 
71
- if ignore_cache:
72
- with TemporaryDirectory() as temp_dir:
73
- ds = Dataset.from_generator(generator, cache_dir=temp_dir)
74
- else:
75
- ds = Dataset.from_generator(generator)
79
+ ds = Dataset.from_list(data_list)
76
80
 
77
81
  if not isinstance(ds, Dataset):
78
- raise ValueError(f"Failed to create dataset from generator: {type(ds)}")
82
+ raise ValueError(f"Failed to create dataset from list: {type(ds)}")
79
83
  return ds
80
84
 
81
85
 
@@ -29,7 +29,7 @@ class PytorchDictDataset(TorchDataset):
29
29
  def test_hf_dataset_from_torch_dict():
30
30
  # Given a Pytorch dataset that returns a dictionary for each item
31
31
  dataset = PytorchDictDataset()
32
- hf_dataset = hf_dataset_from_torch(dataset, ignore_cache=True)
32
+ hf_dataset = hf_dataset_from_torch(dataset)
33
33
  # Then the HF dataset should be created successfully
34
34
  assert isinstance(hf_dataset, Dataset)
35
35
  assert len(hf_dataset) == len(dataset)
@@ -51,7 +51,7 @@ def test_hf_dataset_from_torch_tuple():
51
51
  # Given a Pytorch dataset that returns a tuple for each item
52
52
  dataset = PytorchTupleDataset()
53
53
  # And the correct number of column names passed in
54
- hf_dataset = hf_dataset_from_torch(dataset, column_names=["value", "label"], ignore_cache=True)
54
+ hf_dataset = hf_dataset_from_torch(dataset, column_names=["value", "label"])
55
55
  # Then the HF dataset should be created successfully
56
56
  assert isinstance(hf_dataset, Dataset)
57
57
  assert len(hf_dataset) == len(dataset)
@@ -63,7 +63,7 @@ def test_hf_dataset_from_torch_tuple_error():
63
63
  dataset = PytorchTupleDataset()
64
64
  # Then the HF dataset should raise an error if no column names are passed in
65
65
  with pytest.raises(DatasetGenerationError):
66
- hf_dataset_from_torch(dataset, ignore_cache=True)
66
+ hf_dataset_from_torch(dataset)
67
67
 
68
68
 
69
69
  def test_hf_dataset_from_torch_tuple_error_not_enough_columns():
@@ -71,7 +71,7 @@ def test_hf_dataset_from_torch_tuple_error_not_enough_columns():
71
71
  dataset = PytorchTupleDataset()
72
72
  # Then the HF dataset should raise an error if not enough column names are passed in
73
73
  with pytest.raises(DatasetGenerationError):
74
- hf_dataset_from_torch(dataset, column_names=["value"], ignore_cache=True)
74
+ hf_dataset_from_torch(dataset, column_names=["value"])
75
75
 
76
76
 
77
77
  DatasetTuple = namedtuple("DatasetTuple", ["value", "label"])
@@ -92,7 +92,7 @@ def test_hf_dataset_from_torch_named_tuple():
92
92
  # Given a Pytorch dataset that returns a namedtuple for each item
93
93
  dataset = PytorchNamedTupleDataset()
94
94
  # And no column names are passed in
95
- hf_dataset = hf_dataset_from_torch(dataset, ignore_cache=True)
95
+ hf_dataset = hf_dataset_from_torch(dataset)
96
96
  # Then the HF dataset should be created successfully
97
97
  assert isinstance(hf_dataset, Dataset)
98
98
  assert len(hf_dataset) == len(dataset)
@@ -119,7 +119,7 @@ class PytorchDataclassDataset(TorchDataset):
119
119
  def test_hf_dataset_from_torch_dataclass():
120
120
  # Given a Pytorch dataset that returns a dataclass for each item
121
121
  dataset = PytorchDataclassDataset()
122
- hf_dataset = hf_dataset_from_torch(dataset, ignore_cache=True)
122
+ hf_dataset = hf_dataset_from_torch(dataset)
123
123
  # Then the HF dataset should be created successfully
124
124
  assert isinstance(hf_dataset, Dataset)
125
125
  assert len(hf_dataset) == len(dataset)
@@ -142,7 +142,7 @@ def test_hf_dataset_from_torch_invalid_dataset():
142
142
  dataset = PytorchInvalidDataset()
143
143
  # Then the HF dataset should raise an error
144
144
  with pytest.raises(DatasetGenerationError):
145
- hf_dataset_from_torch(dataset, ignore_cache=True)
145
+ hf_dataset_from_torch(dataset)
146
146
 
147
147
 
148
148
  def test_hf_dataset_from_torchdataloader():
@@ -153,7 +153,7 @@ def test_hf_dataset_from_torchdataloader():
153
153
  return {"value": [item["value"] for item in x], "label": [item["label"] for item in x]}
154
154
 
155
155
  dataloader = TorchDataLoader(dataset, batch_size=3, collate_fn=collate_fn)
156
- hf_dataset = hf_dataset_from_torch(dataloader, ignore_cache=True)
156
+ hf_dataset = hf_dataset_from_torch(dataloader)
157
157
  # Then the HF dataset should be created successfully
158
158
  assert isinstance(hf_dataset, Dataset)
159
159
  assert len(hf_dataset) == len(dataset)
@@ -17,6 +17,7 @@ from typing import (
17
17
  Mapping,
18
18
  NotRequired,
19
19
  Self,
20
+ TypeAlias,
20
21
  TypedDict,
21
22
  cast,
22
23
  overload,
@@ -155,11 +156,14 @@ class ClusterMetrics(TypedDict):
155
156
  memory_count: int
156
157
 
157
158
 
158
- ColumnType = Literal["STRING", "FLOAT", "INT", "BOOL", "ENUM", "IMAGE", "OTHER"]
159
+ ColumnType: TypeAlias = Literal["STRING", "FLOAT", "INT", "BOOL", "ENUM", "IMAGE", "OTHER"]
160
+ """
161
+ The type of a column in a datasource
162
+ """
159
163
 
160
164
 
161
165
  class ConstraintViolationErrorResponse(TypedDict):
162
- status_code: NotRequired[int]
166
+ status_code: Literal[409]
163
167
  constraint: str
164
168
 
165
169
 
@@ -170,6 +174,7 @@ class CountPredictionsRequest(TypedDict):
170
174
  start_timestamp: NotRequired[str | None]
171
175
  end_timestamp: NotRequired[str | None]
172
176
  memory_id: NotRequired[str | None]
177
+ expected_label_match: NotRequired[bool | None]
173
178
 
174
179
 
175
180
  class CreateApiKeyRequest(TypedDict):
@@ -232,7 +237,7 @@ class EmbeddingEvaluationRequest(TypedDict):
232
237
  weigh_memories: NotRequired[bool]
233
238
 
234
239
 
235
- EmbeddingFinetuningMethod = Literal["classification", "regression", "batch_triplet_loss"]
240
+ EmbeddingFinetuningMethod: TypeAlias = Literal["classification", "regression", "batch_triplet_loss"]
236
241
 
237
242
 
238
243
  class FeedbackMetrics(TypedDict):
@@ -240,13 +245,55 @@ class FeedbackMetrics(TypedDict):
240
245
  count: int
241
246
 
242
247
 
243
- FeedbackType = Literal["CONTINUOUS", "BINARY"]
248
+ FeedbackType: TypeAlias = Literal["CONTINUOUS", "BINARY"]
244
249
 
245
250
 
246
251
  class FilterItem(TypedDict):
247
- field: list
248
- op: Literal["==", "!=", ">", ">=", "<", "<=", "in", "not in", "like"]
252
+ field: (
253
+ tuple[
254
+ Literal[
255
+ "memory_id",
256
+ "value",
257
+ "label",
258
+ "metadata",
259
+ "source_id",
260
+ "partition_id",
261
+ "created_at",
262
+ "updated_at",
263
+ "edited_at",
264
+ "metrics",
265
+ "score",
266
+ "labels",
267
+ ]
268
+ ]
269
+ | tuple[Literal["metadata"], str]
270
+ | tuple[
271
+ Literal["metrics"],
272
+ Literal[
273
+ "cluster",
274
+ "embedding_2d",
275
+ "is_duplicate",
276
+ "duplicate_memory_ids",
277
+ "has_potential_duplicates",
278
+ "potential_duplicate_memory_ids",
279
+ "anomaly_score",
280
+ "neighbor_label_logits",
281
+ "neighbor_predicted_label",
282
+ "neighbor_predicted_label_ambiguity",
283
+ "neighbor_predicted_label_confidence",
284
+ "current_label_neighbor_confidence",
285
+ "normalized_neighbor_label_entropy",
286
+ "neighbor_predicted_label_matches_current_label",
287
+ "spread",
288
+ "uniformity",
289
+ "concept_id",
290
+ "subconcept_id",
291
+ ],
292
+ ]
293
+ )
294
+ op: Literal["==", "!=", ">", ">=", "<", "<=", "in", "not in", "like", "contains all", "contains any"]
249
295
  value: str | int | float | bool | list[str | None] | list[int] | list[float] | list[bool] | None
296
+ transform: NotRequired[Literal["length"]]
250
297
 
251
298
 
252
299
  class GetDatasourceRowCountRequest(TypedDict):
@@ -274,12 +321,17 @@ class HealthyResponse(TypedDict):
274
321
 
275
322
 
276
323
  class InternalServerErrorResponse(TypedDict):
277
- status_code: NotRequired[int]
324
+ status_code: Literal[500]
278
325
  message: str
279
326
  request_id: str
280
327
 
281
328
 
282
- JobStatus = Literal["INITIALIZED", "DISPATCHED", "WAITING", "PROCESSING", "COMPLETED", "FAILED", "ABORTING", "ABORTED"]
329
+ JobStatus: TypeAlias = Literal[
330
+ "INITIALIZED", "DISPATCHED", "WAITING", "PROCESSING", "COMPLETED", "FAILED", "ABORTING", "ABORTED"
331
+ ]
332
+ """
333
+ Status of job in the job queue
334
+ """
283
335
 
284
336
 
285
337
  class JobStatusInfo(TypedDict):
@@ -346,7 +398,7 @@ class MemoryMetrics(TypedDict):
346
398
  has_potential_duplicates: NotRequired[bool]
347
399
  potential_duplicate_memory_ids: NotRequired[list[str] | None]
348
400
  cluster: NotRequired[int]
349
- embedding_2d: NotRequired[list]
401
+ embedding_2d: NotRequired[tuple[float, float]]
350
402
  anomaly_score: NotRequired[float]
351
403
  neighbor_label_logits: NotRequired[list[float] | None]
352
404
  neighbor_predicted_label: NotRequired[int | None]
@@ -361,7 +413,7 @@ class MemoryMetrics(TypedDict):
361
413
  subconcept_id: NotRequired[int | None]
362
414
 
363
415
 
364
- MemoryType = Literal["LABELED", "SCORED"]
416
+ MemoryType: TypeAlias = Literal["LABELED", "SCORED"]
365
417
 
366
418
 
367
419
  class MemorysetClassPatternsAnalysisConfig(TypedDict):
@@ -467,7 +519,7 @@ class MemorysetUpdate(TypedDict):
467
519
 
468
520
 
469
521
  class NotFoundErrorResponse(TypedDict):
470
- status_code: NotRequired[int]
522
+ status_code: Literal[404]
471
523
  resource: (
472
524
  Literal[
473
525
  "org",
@@ -547,7 +599,7 @@ class PredictionFeedbackResult(TypedDict):
547
599
  new_category_ids: list[str]
548
600
 
549
601
 
550
- PredictionSort = list[list]
602
+ PredictionSort: TypeAlias = list[tuple[Literal["timestamp", "confidence", "anomaly_score"], Literal["asc", "desc"]]]
551
603
 
552
604
 
553
605
  class PredictiveModelUpdate(TypedDict):
@@ -556,15 +608,18 @@ class PredictiveModelUpdate(TypedDict):
556
608
  locked: NotRequired[bool]
557
609
 
558
610
 
559
- PretrainedEmbeddingModelName = Literal[
611
+ PretrainedEmbeddingModelName: TypeAlias = Literal[
560
612
  "CLIP_BASE", "GTE_BASE", "CDE_SMALL", "DISTILBERT", "GTE_SMALL", "MXBAI_LARGE", "E5_LARGE", "BGE_BASE", "GIST_LARGE"
561
613
  ]
614
+ """
615
+ Names of pretrained embedding models that are supported by OrcaCloud
616
+ """
562
617
 
563
618
 
564
- RACHeadType = Literal["KNN", "MMOE", "FF", "BMMOE"]
619
+ RACHeadType: TypeAlias = Literal["KNN", "MMOE", "FF", "BMMOE"]
565
620
 
566
621
 
567
- RARHeadType = Literal["MMOE", "KNN"]
622
+ RARHeadType: TypeAlias = Literal["MMOE", "KNN"]
568
623
 
569
624
 
570
625
  class ROCCurve(TypedDict):
@@ -671,6 +726,7 @@ class ScorePredictionWithMemoriesAndFeedback(TypedDict):
671
726
  tags: list[str]
672
727
  explanation: str | None
673
728
  memory_id: str | None
729
+ is_in_dense_neighborhood: NotRequired[bool | None]
674
730
  feedbacks: list[PredictionFeedback]
675
731
 
676
732
 
@@ -742,7 +798,7 @@ class ScoredMemoryWithFeedbackMetrics(TypedDict):
742
798
 
743
799
 
744
800
  class ServiceUnavailableErrorResponse(TypedDict):
745
- status_code: NotRequired[int]
801
+ status_code: Literal[503]
746
802
  service: str
747
803
 
748
804
 
@@ -754,7 +810,9 @@ class SubConceptMetrics(TypedDict):
754
810
  memory_count: int
755
811
 
756
812
 
757
- TelemetryField = list
813
+ TelemetryField: TypeAlias = (
814
+ tuple[Literal["feedback_metrics"], str, Literal["avg", "count"]] | tuple[Literal["lookup"], Literal["count"]]
815
+ )
758
816
 
759
817
 
760
818
  class TelemetryFilterItem(TypedDict):
@@ -769,11 +827,11 @@ class TelemetrySortOptions(TypedDict):
769
827
 
770
828
 
771
829
  class UnauthenticatedErrorResponse(TypedDict):
772
- status_code: NotRequired[int]
830
+ status_code: Literal[401]
773
831
 
774
832
 
775
833
  class UnauthorizedErrorResponse(TypedDict):
776
- status_code: NotRequired[int]
834
+ status_code: Literal[403]
777
835
  reason: str
778
836
 
779
837
 
@@ -794,7 +852,10 @@ class ValidationError(TypedDict):
794
852
  type: str
795
853
 
796
854
 
797
- WorkerStatus = Literal["IDLE", "BUSY", "DRAINING", "SHUTDOWN", "CRASHED"]
855
+ WorkerStatus: TypeAlias = Literal["IDLE", "BUSY", "DRAINING", "SHUTDOWN", "CRASHED"]
856
+ """
857
+ Status of worker in the worker pool
858
+ """
798
859
 
799
860
 
800
861
  class GetTestErrorByStatusCodeParams(TypedDict):
@@ -870,7 +931,7 @@ class PostGpuMemorysetByNameOrIdMemoryParams(TypedDict):
870
931
  name_or_id: str
871
932
 
872
933
 
873
- PostGpuMemorysetByNameOrIdMemoryRequest = list[LabeledMemoryInsert] | list[ScoredMemoryInsert]
934
+ PostGpuMemorysetByNameOrIdMemoryRequest: TypeAlias = list[LabeledMemoryInsert] | list[ScoredMemoryInsert]
874
935
 
875
936
 
876
937
  class PatchGpuMemorysetByNameOrIdMemoriesParams(TypedDict):
@@ -1106,6 +1167,10 @@ class GetWorkerByWorkerIdParams(TypedDict):
1106
1167
 
1107
1168
  class GetTelemetryPredictionByPredictionIdParams(TypedDict):
1108
1169
  prediction_id: str
1170
+ calc_neighborhood_density: NotRequired[bool]
1171
+ """
1172
+ Calculate neighborhood density
1173
+ """
1109
1174
 
1110
1175
 
1111
1176
  class PatchTelemetryPredictionByPredictionIdParams(TypedDict):
@@ -1144,7 +1209,7 @@ class DeleteTelemetryFeedbackCategoryByNameOrIdParams(TypedDict):
1144
1209
  name_or_id: str
1145
1210
 
1146
1211
 
1147
- PutTelemetryPredictionFeedbackRequest = list[PredictionFeedbackRequest]
1212
+ PutTelemetryPredictionFeedbackRequest: TypeAlias = list[PredictionFeedbackRequest]
1148
1213
 
1149
1214
 
1150
1215
  class GetAgentsBootstrapClassificationModelByJobIdParams(TypedDict):
@@ -1197,6 +1262,8 @@ class ClassificationMetrics(TypedDict):
1197
1262
  pr_auc: NotRequired[float | None]
1198
1263
  pr_curve: NotRequired[PRCurve | None]
1199
1264
  roc_curve: NotRequired[ROCCurve | None]
1265
+ confusion_matrix: NotRequired[list[list[int]] | None]
1266
+ warnings: NotRequired[list[str]]
1200
1267
 
1201
1268
 
1202
1269
  class ClassificationModelMetadata(TypedDict):
@@ -1420,7 +1487,7 @@ class HTTPValidationError(TypedDict):
1420
1487
 
1421
1488
 
1422
1489
  class InvalidInputErrorResponse(TypedDict):
1423
- status_code: NotRequired[int]
1490
+ status_code: Literal[422]
1424
1491
  validation_issues: list[FieldValidationError]
1425
1492
 
1426
1493
 
@@ -1480,6 +1547,7 @@ class LabelPredictionWithMemoriesAndFeedback(TypedDict):
1480
1547
  tags: list[str]
1481
1548
  explanation: str | None
1482
1549
  memory_id: str | None
1550
+ is_in_dense_neighborhood: NotRequired[bool | None]
1483
1551
  feedbacks: list[PredictionFeedback]
1484
1552
 
1485
1553
 
@@ -1551,10 +1619,10 @@ class ListPredictionsRequest(TypedDict):
1551
1619
  start_timestamp: NotRequired[str | None]
1552
1620
  end_timestamp: NotRequired[str | None]
1553
1621
  memory_id: NotRequired[str | None]
1622
+ expected_label_match: NotRequired[bool | None]
1554
1623
  limit: NotRequired[int]
1555
1624
  offset: NotRequired[int | None]
1556
1625
  sort: NotRequired[PredictionSort]
1557
- expected_label_match: NotRequired[bool | None]
1558
1626
 
1559
1627
 
1560
1628
  class MemorysetAnalysisConfigs(TypedDict):
@@ -1633,10 +1701,10 @@ class WorkerInfo(TypedDict):
1633
1701
  config: dict[str, str | float | int | bool | dict[str, str] | None]
1634
1702
 
1635
1703
 
1636
- PatchGpuMemorysetByNameOrIdMemoryRequest = LabeledMemoryUpdate | ScoredMemoryUpdate
1704
+ PatchGpuMemorysetByNameOrIdMemoryRequest: TypeAlias = LabeledMemoryUpdate | ScoredMemoryUpdate
1637
1705
 
1638
1706
 
1639
- PatchGpuMemorysetByNameOrIdMemoriesRequest = list[LabeledMemoryUpdate] | list[ScoredMemoryUpdate]
1707
+ PatchGpuMemorysetByNameOrIdMemoriesRequest: TypeAlias = list[LabeledMemoryUpdate] | list[ScoredMemoryUpdate]
1640
1708
 
1641
1709
 
1642
1710
  class CascadingEditSuggestion(TypedDict):
@@ -1864,7 +1932,7 @@ class OrcaAsyncClient(AsyncClient):
1864
1932
  follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
1865
1933
  timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
1866
1934
  extensions: RequestExtensions | None = None,
1867
- ) -> bool:
1935
+ ) -> Literal[True]:
1868
1936
  """Return true only when called with a valid root API key; otherwise 401 Unauthenticated."""
1869
1937
  pass
1870
1938
 
@@ -1898,7 +1966,7 @@ class OrcaAsyncClient(AsyncClient):
1898
1966
  follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
1899
1967
  timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
1900
1968
  extensions: RequestExtensions | None = None,
1901
- ) -> bool:
1969
+ ) -> Literal[True]:
1902
1970
  """Returns true if the api key header is valid for the org (will be false for admin api key)"""
1903
1971
  pass
1904
1972