orca-sdk 0.1.1__py3-none-any.whl → 0.1.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- orca_sdk/__init__.py +10 -4
- orca_sdk/_shared/__init__.py +10 -0
- orca_sdk/_shared/metrics.py +393 -0
- orca_sdk/_shared/metrics_test.py +273 -0
- orca_sdk/_utils/analysis_ui.py +12 -10
- orca_sdk/_utils/analysis_ui_style.css +0 -3
- orca_sdk/_utils/auth.py +31 -29
- orca_sdk/_utils/data_parsing.py +28 -2
- orca_sdk/_utils/data_parsing_test.py +15 -15
- orca_sdk/_utils/pagination.py +126 -0
- orca_sdk/_utils/pagination_test.py +132 -0
- orca_sdk/_utils/prediction_result_ui.py +67 -21
- orca_sdk/_utils/tqdm_file_reader.py +12 -0
- orca_sdk/_utils/value_parser.py +45 -0
- orca_sdk/_utils/value_parser_test.py +39 -0
- orca_sdk/async_client.py +3795 -0
- orca_sdk/classification_model.py +601 -129
- orca_sdk/classification_model_test.py +415 -117
- orca_sdk/client.py +3787 -0
- orca_sdk/conftest.py +184 -38
- orca_sdk/credentials.py +162 -20
- orca_sdk/credentials_test.py +100 -16
- orca_sdk/datasource.py +268 -68
- orca_sdk/datasource_test.py +266 -18
- orca_sdk/embedding_model.py +434 -82
- orca_sdk/embedding_model_test.py +66 -33
- orca_sdk/job.py +343 -0
- orca_sdk/job_test.py +108 -0
- orca_sdk/memoryset.py +1690 -324
- orca_sdk/memoryset_test.py +456 -119
- orca_sdk/regression_model.py +694 -0
- orca_sdk/regression_model_test.py +378 -0
- orca_sdk/telemetry.py +460 -143
- orca_sdk/telemetry_test.py +43 -24
- {orca_sdk-0.1.1.dist-info → orca_sdk-0.1.3.dist-info}/METADATA +34 -16
- orca_sdk-0.1.3.dist-info/RECORD +41 -0
- {orca_sdk-0.1.1.dist-info → orca_sdk-0.1.3.dist-info}/WHEEL +1 -1
- orca_sdk/_generated_api_client/__init__.py +0 -3
- orca_sdk/_generated_api_client/api/__init__.py +0 -193
- orca_sdk/_generated_api_client/api/auth/__init__.py +0 -0
- orca_sdk/_generated_api_client/api/auth/check_authentication_auth_get.py +0 -128
- orca_sdk/_generated_api_client/api/auth/create_api_key_auth_api_key_post.py +0 -170
- orca_sdk/_generated_api_client/api/auth/delete_api_key_auth_api_key_name_or_id_delete.py +0 -156
- orca_sdk/_generated_api_client/api/auth/delete_org_auth_org_delete.py +0 -130
- orca_sdk/_generated_api_client/api/auth/list_api_keys_auth_api_key_get.py +0 -127
- orca_sdk/_generated_api_client/api/classification_model/__init__.py +0 -0
- orca_sdk/_generated_api_client/api/classification_model/create_evaluation_classification_model_model_name_or_id_evaluation_post.py +0 -183
- orca_sdk/_generated_api_client/api/classification_model/create_model_classification_model_post.py +0 -170
- orca_sdk/_generated_api_client/api/classification_model/delete_evaluation_classification_model_model_name_or_id_evaluation_task_id_delete.py +0 -168
- orca_sdk/_generated_api_client/api/classification_model/delete_model_classification_model_name_or_id_delete.py +0 -154
- orca_sdk/_generated_api_client/api/classification_model/get_evaluation_classification_model_model_name_or_id_evaluation_task_id_get.py +0 -170
- orca_sdk/_generated_api_client/api/classification_model/get_model_classification_model_name_or_id_get.py +0 -156
- orca_sdk/_generated_api_client/api/classification_model/list_evaluations_classification_model_model_name_or_id_evaluation_get.py +0 -161
- orca_sdk/_generated_api_client/api/classification_model/list_models_classification_model_get.py +0 -127
- orca_sdk/_generated_api_client/api/classification_model/predict_gpu_classification_model_name_or_id_prediction_post.py +0 -190
- orca_sdk/_generated_api_client/api/datasource/__init__.py +0 -0
- orca_sdk/_generated_api_client/api/datasource/create_datasource_datasource_post.py +0 -167
- orca_sdk/_generated_api_client/api/datasource/delete_datasource_datasource_name_or_id_delete.py +0 -156
- orca_sdk/_generated_api_client/api/datasource/get_datasource_datasource_name_or_id_get.py +0 -156
- orca_sdk/_generated_api_client/api/datasource/list_datasources_datasource_get.py +0 -127
- orca_sdk/_generated_api_client/api/default/__init__.py +0 -0
- orca_sdk/_generated_api_client/api/default/healthcheck_get.py +0 -118
- orca_sdk/_generated_api_client/api/default/healthcheck_gpu_get.py +0 -118
- orca_sdk/_generated_api_client/api/finetuned_embedding_model/__init__.py +0 -0
- orca_sdk/_generated_api_client/api/finetuned_embedding_model/create_finetuned_embedding_model_finetuned_embedding_model_post.py +0 -168
- orca_sdk/_generated_api_client/api/finetuned_embedding_model/delete_finetuned_embedding_model_finetuned_embedding_model_name_or_id_delete.py +0 -156
- orca_sdk/_generated_api_client/api/finetuned_embedding_model/embed_with_finetuned_model_gpu_finetuned_embedding_model_name_or_id_embedding_post.py +0 -189
- orca_sdk/_generated_api_client/api/finetuned_embedding_model/get_finetuned_embedding_model_finetuned_embedding_model_name_or_id_get.py +0 -156
- orca_sdk/_generated_api_client/api/finetuned_embedding_model/list_finetuned_embedding_models_finetuned_embedding_model_get.py +0 -127
- orca_sdk/_generated_api_client/api/memoryset/__init__.py +0 -0
- orca_sdk/_generated_api_client/api/memoryset/clone_memoryset_memoryset_name_or_id_clone_post.py +0 -181
- orca_sdk/_generated_api_client/api/memoryset/create_analysis_memoryset_name_or_id_analysis_post.py +0 -183
- orca_sdk/_generated_api_client/api/memoryset/create_memoryset_memoryset_post.py +0 -168
- orca_sdk/_generated_api_client/api/memoryset/delete_memories_memoryset_name_or_id_memories_delete_post.py +0 -181
- orca_sdk/_generated_api_client/api/memoryset/delete_memory_memoryset_name_or_id_memory_memory_id_delete.py +0 -167
- orca_sdk/_generated_api_client/api/memoryset/delete_memoryset_memoryset_name_or_id_delete.py +0 -156
- orca_sdk/_generated_api_client/api/memoryset/get_analysis_memoryset_name_or_id_analysis_analysis_task_id_get.py +0 -169
- orca_sdk/_generated_api_client/api/memoryset/get_memories_memoryset_name_or_id_memories_get_post.py +0 -188
- orca_sdk/_generated_api_client/api/memoryset/get_memory_memoryset_name_or_id_memory_memory_id_get.py +0 -169
- orca_sdk/_generated_api_client/api/memoryset/get_memoryset_memoryset_name_or_id_get.py +0 -156
- orca_sdk/_generated_api_client/api/memoryset/insert_memories_gpu_memoryset_name_or_id_memory_post.py +0 -184
- orca_sdk/_generated_api_client/api/memoryset/list_analyses_memoryset_name_or_id_analysis_get.py +0 -260
- orca_sdk/_generated_api_client/api/memoryset/list_memorysets_memoryset_get.py +0 -127
- orca_sdk/_generated_api_client/api/memoryset/memoryset_lookup_gpu_memoryset_name_or_id_lookup_post.py +0 -193
- orca_sdk/_generated_api_client/api/memoryset/query_memoryset_memoryset_name_or_id_memories_post.py +0 -188
- orca_sdk/_generated_api_client/api/memoryset/update_memories_gpu_memoryset_name_or_id_memories_patch.py +0 -191
- orca_sdk/_generated_api_client/api/memoryset/update_memory_gpu_memoryset_name_or_id_memory_patch.py +0 -187
- orca_sdk/_generated_api_client/api/pretrained_embedding_model/__init__.py +0 -0
- orca_sdk/_generated_api_client/api/pretrained_embedding_model/embed_with_pretrained_model_gpu_pretrained_embedding_model_model_name_embedding_post.py +0 -188
- orca_sdk/_generated_api_client/api/pretrained_embedding_model/get_pretrained_embedding_model_pretrained_embedding_model_model_name_get.py +0 -157
- orca_sdk/_generated_api_client/api/pretrained_embedding_model/list_pretrained_embedding_models_pretrained_embedding_model_get.py +0 -127
- orca_sdk/_generated_api_client/api/task/__init__.py +0 -0
- orca_sdk/_generated_api_client/api/task/abort_task_task_task_id_abort_delete.py +0 -154
- orca_sdk/_generated_api_client/api/task/get_task_status_task_task_id_status_get.py +0 -156
- orca_sdk/_generated_api_client/api/task/list_tasks_task_get.py +0 -243
- orca_sdk/_generated_api_client/api/telemetry/__init__.py +0 -0
- orca_sdk/_generated_api_client/api/telemetry/drop_feedback_category_with_data_telemetry_feedback_category_name_or_id_delete.py +0 -162
- orca_sdk/_generated_api_client/api/telemetry/get_feedback_category_telemetry_feedback_category_name_or_id_get.py +0 -156
- orca_sdk/_generated_api_client/api/telemetry/get_prediction_telemetry_prediction_prediction_id_get.py +0 -157
- orca_sdk/_generated_api_client/api/telemetry/list_feedback_categories_telemetry_feedback_category_get.py +0 -127
- orca_sdk/_generated_api_client/api/telemetry/list_predictions_telemetry_prediction_post.py +0 -175
- orca_sdk/_generated_api_client/api/telemetry/record_prediction_feedback_telemetry_prediction_feedback_put.py +0 -171
- orca_sdk/_generated_api_client/api/telemetry/update_prediction_telemetry_prediction_prediction_id_patch.py +0 -181
- orca_sdk/_generated_api_client/client.py +0 -216
- orca_sdk/_generated_api_client/errors.py +0 -38
- orca_sdk/_generated_api_client/models/__init__.py +0 -159
- orca_sdk/_generated_api_client/models/analyze_neighbor_labels_result.py +0 -84
- orca_sdk/_generated_api_client/models/api_key_metadata.py +0 -118
- orca_sdk/_generated_api_client/models/base_model.py +0 -55
- orca_sdk/_generated_api_client/models/body_create_datasource_datasource_post.py +0 -176
- orca_sdk/_generated_api_client/models/classification_evaluation_result.py +0 -114
- orca_sdk/_generated_api_client/models/clone_labeled_memoryset_request.py +0 -150
- orca_sdk/_generated_api_client/models/column_info.py +0 -114
- orca_sdk/_generated_api_client/models/column_type.py +0 -14
- orca_sdk/_generated_api_client/models/conflict_error_response.py +0 -80
- orca_sdk/_generated_api_client/models/create_api_key_request.py +0 -99
- orca_sdk/_generated_api_client/models/create_api_key_response.py +0 -126
- orca_sdk/_generated_api_client/models/create_labeled_memoryset_request.py +0 -259
- orca_sdk/_generated_api_client/models/create_rac_model_request.py +0 -209
- orca_sdk/_generated_api_client/models/datasource_metadata.py +0 -142
- orca_sdk/_generated_api_client/models/delete_memories_request.py +0 -70
- orca_sdk/_generated_api_client/models/embed_request.py +0 -127
- orca_sdk/_generated_api_client/models/embedding_finetuning_method.py +0 -9
- orca_sdk/_generated_api_client/models/evaluation_request.py +0 -180
- orca_sdk/_generated_api_client/models/evaluation_response.py +0 -140
- orca_sdk/_generated_api_client/models/feedback_type.py +0 -9
- orca_sdk/_generated_api_client/models/field_validation_error.py +0 -103
- orca_sdk/_generated_api_client/models/filter_item.py +0 -231
- orca_sdk/_generated_api_client/models/filter_item_field_type_0_item.py +0 -15
- orca_sdk/_generated_api_client/models/filter_item_field_type_2_item_type_1.py +0 -16
- orca_sdk/_generated_api_client/models/filter_item_op.py +0 -16
- orca_sdk/_generated_api_client/models/find_duplicates_analysis_result.py +0 -70
- orca_sdk/_generated_api_client/models/finetune_embedding_model_request.py +0 -259
- orca_sdk/_generated_api_client/models/finetune_embedding_model_request_training_args.py +0 -66
- orca_sdk/_generated_api_client/models/finetuned_embedding_model_metadata.py +0 -166
- orca_sdk/_generated_api_client/models/get_memories_request.py +0 -70
- orca_sdk/_generated_api_client/models/internal_server_error_response.py +0 -80
- orca_sdk/_generated_api_client/models/label_class_metrics.py +0 -108
- orca_sdk/_generated_api_client/models/label_prediction_memory_lookup.py +0 -274
- orca_sdk/_generated_api_client/models/label_prediction_memory_lookup_metadata.py +0 -68
- orca_sdk/_generated_api_client/models/label_prediction_result.py +0 -101
- orca_sdk/_generated_api_client/models/label_prediction_with_memories_and_feedback.py +0 -232
- orca_sdk/_generated_api_client/models/labeled_memory.py +0 -197
- orca_sdk/_generated_api_client/models/labeled_memory_insert.py +0 -108
- orca_sdk/_generated_api_client/models/labeled_memory_insert_metadata.py +0 -68
- orca_sdk/_generated_api_client/models/labeled_memory_lookup.py +0 -258
- orca_sdk/_generated_api_client/models/labeled_memory_lookup_metadata.py +0 -68
- orca_sdk/_generated_api_client/models/labeled_memory_metadata.py +0 -68
- orca_sdk/_generated_api_client/models/labeled_memory_metrics.py +0 -277
- orca_sdk/_generated_api_client/models/labeled_memory_update.py +0 -171
- orca_sdk/_generated_api_client/models/labeled_memory_update_metadata_type_0.py +0 -68
- orca_sdk/_generated_api_client/models/labeled_memoryset_metadata.py +0 -195
- orca_sdk/_generated_api_client/models/list_analyses_memoryset_name_or_id_analysis_get_type_type_0.py +0 -9
- orca_sdk/_generated_api_client/models/list_memories_request.py +0 -104
- orca_sdk/_generated_api_client/models/list_predictions_request.py +0 -234
- orca_sdk/_generated_api_client/models/list_predictions_request_sort_item_item_type_0.py +0 -9
- orca_sdk/_generated_api_client/models/list_predictions_request_sort_item_item_type_1.py +0 -9
- orca_sdk/_generated_api_client/models/lookup_request.py +0 -81
- orca_sdk/_generated_api_client/models/memoryset_analysis_request.py +0 -83
- orca_sdk/_generated_api_client/models/memoryset_analysis_request_type.py +0 -9
- orca_sdk/_generated_api_client/models/memoryset_analysis_response.py +0 -180
- orca_sdk/_generated_api_client/models/memoryset_analysis_response_config.py +0 -66
- orca_sdk/_generated_api_client/models/memoryset_analysis_response_type.py +0 -9
- orca_sdk/_generated_api_client/models/not_found_error_response.py +0 -100
- orca_sdk/_generated_api_client/models/not_found_error_response_resource_type_0.py +0 -20
- orca_sdk/_generated_api_client/models/prediction_feedback.py +0 -157
- orca_sdk/_generated_api_client/models/prediction_feedback_category.py +0 -115
- orca_sdk/_generated_api_client/models/prediction_feedback_request.py +0 -122
- orca_sdk/_generated_api_client/models/prediction_feedback_result.py +0 -102
- orca_sdk/_generated_api_client/models/prediction_request.py +0 -169
- orca_sdk/_generated_api_client/models/pretrained_embedding_model_metadata.py +0 -97
- orca_sdk/_generated_api_client/models/pretrained_embedding_model_name.py +0 -11
- orca_sdk/_generated_api_client/models/rac_head_type.py +0 -11
- orca_sdk/_generated_api_client/models/rac_model_metadata.py +0 -191
- orca_sdk/_generated_api_client/models/service_unavailable_error_response.py +0 -80
- orca_sdk/_generated_api_client/models/task.py +0 -198
- orca_sdk/_generated_api_client/models/task_status.py +0 -14
- orca_sdk/_generated_api_client/models/task_status_info.py +0 -133
- orca_sdk/_generated_api_client/models/unauthenticated_error_response.py +0 -72
- orca_sdk/_generated_api_client/models/unauthorized_error_response.py +0 -80
- orca_sdk/_generated_api_client/models/unprocessable_input_error_response.py +0 -94
- orca_sdk/_generated_api_client/models/update_prediction_request.py +0 -93
- orca_sdk/_generated_api_client/py.typed +0 -1
- orca_sdk/_generated_api_client/types.py +0 -56
- orca_sdk/_utils/task.py +0 -73
- orca_sdk-0.1.1.dist-info/RECORD +0 -175
|
@@ -0,0 +1,126 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import Callable, Generic, Iterator, TypedDict, TypeVar, cast, overload
|
|
4
|
+
|
|
5
|
+
T = TypeVar("T")
|
|
6
|
+
R = TypeVar("R")
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class Page(TypedDict, Generic[T]):
|
|
10
|
+
items: list[T]
|
|
11
|
+
count: int
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class _PagedIterable(Generic[T, R]):
|
|
15
|
+
def __init__(
|
|
16
|
+
self,
|
|
17
|
+
fetch: Callable[[int, int], Page[T]],
|
|
18
|
+
*,
|
|
19
|
+
transform: Callable[[T], R] | None = None,
|
|
20
|
+
page_size: int = 100,
|
|
21
|
+
) -> None:
|
|
22
|
+
"""
|
|
23
|
+
Iterate over a paginated endpoint.
|
|
24
|
+
|
|
25
|
+
Parameters:
|
|
26
|
+
fetch: function to fetch a page from the endpoint `(limit: int, offset: int) -> TypedDict[{items: list[T], count: int}]`
|
|
27
|
+
transform: Optional function to transforms item types `(item: T) -> R`, defaults to identity
|
|
28
|
+
limit: maximum number of items to fetch per page
|
|
29
|
+
"""
|
|
30
|
+
self.fetch = fetch
|
|
31
|
+
self.transform = transform or (lambda x: cast(R, x))
|
|
32
|
+
self.page_size = page_size
|
|
33
|
+
self.offset = 0 # tracks how much has been yielded, not fetched
|
|
34
|
+
self.page = fetch(self.page_size, self.offset) # fetch first page to populate count
|
|
35
|
+
self.count = self.page["count"]
|
|
36
|
+
|
|
37
|
+
def __iter__(self) -> Iterator[R]:
|
|
38
|
+
if self.offset >= self.count:
|
|
39
|
+
self.offset = 0
|
|
40
|
+
if len(self.page["items"]) < self.count:
|
|
41
|
+
# refetch first page unless we are still on the first page
|
|
42
|
+
self.page = self.fetch(self.page_size, self.offset)
|
|
43
|
+
|
|
44
|
+
# yield prefetched first page
|
|
45
|
+
if self.offset == 0:
|
|
46
|
+
yield from map(self.transform, self.page["items"])
|
|
47
|
+
self.offset += len(self.page["items"])
|
|
48
|
+
|
|
49
|
+
# yield remaining pages one by one
|
|
50
|
+
while self.offset < self.count:
|
|
51
|
+
self.page = self.fetch(self.page_size, self.offset)
|
|
52
|
+
yield from map(self.transform, self.page["items"])
|
|
53
|
+
self.offset += len(self.page["items"])
|
|
54
|
+
|
|
55
|
+
@overload
|
|
56
|
+
def __getitem__(self, key: int) -> R:
|
|
57
|
+
pass
|
|
58
|
+
|
|
59
|
+
@overload
|
|
60
|
+
def __getitem__(self, key: slice) -> list[R]:
|
|
61
|
+
pass
|
|
62
|
+
|
|
63
|
+
def __getitem__(self, key: int | slice) -> R | list[R]:
|
|
64
|
+
if isinstance(key, int):
|
|
65
|
+
effective_key = key
|
|
66
|
+
if effective_key < 0:
|
|
67
|
+
effective_key += self.count
|
|
68
|
+
if not 0 <= effective_key < self.count:
|
|
69
|
+
raise IndexError(f"Index {key} out of range")
|
|
70
|
+
# if key is on current page, return item
|
|
71
|
+
if self.offset <= effective_key < self.offset + len(self.page["items"]):
|
|
72
|
+
return self.transform(self.page["items"][effective_key - self.offset])
|
|
73
|
+
# otherwise, fetch and return the single item
|
|
74
|
+
return self.transform(self.fetch(1, effective_key)["items"][0])
|
|
75
|
+
|
|
76
|
+
elif isinstance(key, slice):
|
|
77
|
+
start, stop, step = key.indices(self.count)
|
|
78
|
+
if step != 1:
|
|
79
|
+
raise ValueError("Stepped slicing is not supported")
|
|
80
|
+
start = start + self.count if start < 0 else start or 0
|
|
81
|
+
stop = stop + self.count if stop < 0 else stop or self.count
|
|
82
|
+
if start >= self.count or stop > self.count:
|
|
83
|
+
raise IndexError(f"Slice {key} out of range")
|
|
84
|
+
limit = min(self.page_size, stop - start)
|
|
85
|
+
if limit <= 0:
|
|
86
|
+
return []
|
|
87
|
+
items = []
|
|
88
|
+
for i in range(start, stop, limit):
|
|
89
|
+
page = self.fetch(limit, i)
|
|
90
|
+
items.extend(map(self.transform, page["items"]))
|
|
91
|
+
return items
|
|
92
|
+
|
|
93
|
+
def __len__(self) -> int:
|
|
94
|
+
return self.count
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
# type checking workaround until python 3.13 allows declaring the class as PagedIterable[T, R = T]
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
@overload
|
|
101
|
+
def PagedIterable(
|
|
102
|
+
fetch: Callable[[int, int], Page[T]],
|
|
103
|
+
*,
|
|
104
|
+
transform: None = None,
|
|
105
|
+
page_size: int = 100,
|
|
106
|
+
) -> _PagedIterable[T, T]:
|
|
107
|
+
pass
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
@overload
|
|
111
|
+
def PagedIterable(
|
|
112
|
+
fetch: Callable[[int, int], Page[T]],
|
|
113
|
+
*,
|
|
114
|
+
transform: Callable[[T], R],
|
|
115
|
+
page_size: int = 100,
|
|
116
|
+
) -> _PagedIterable[T, R]:
|
|
117
|
+
pass
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
def PagedIterable(
|
|
121
|
+
fetch: Callable[[int, int], Page[T]],
|
|
122
|
+
*,
|
|
123
|
+
transform: Callable[[T], R] | None = None,
|
|
124
|
+
page_size: int = 100,
|
|
125
|
+
) -> _PagedIterable[T, R]:
|
|
126
|
+
return _PagedIterable(fetch, transform=transform, page_size=page_size)
|
|
@@ -0,0 +1,132 @@
|
|
|
1
|
+
import pytest
|
|
2
|
+
|
|
3
|
+
from .pagination import Page, PagedIterable
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class MockEndpoint:
|
|
7
|
+
"""Mock paginated endpoint for testing"""
|
|
8
|
+
|
|
9
|
+
def __init__(self, total_items: int):
|
|
10
|
+
self.items = list(range(total_items))
|
|
11
|
+
self.fetch_count = 0
|
|
12
|
+
|
|
13
|
+
def fetch(self, limit: int, offset: int) -> Page[int]:
|
|
14
|
+
self.fetch_count += 1
|
|
15
|
+
end_index = min(offset + limit, len(self.items))
|
|
16
|
+
items = self.items[offset:end_index]
|
|
17
|
+
return {"items": items, "count": len(self.items)}
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def test_basic_pagination():
|
|
21
|
+
# Given a mock endpoint with 5 items
|
|
22
|
+
endpoint = MockEndpoint(5)
|
|
23
|
+
# When doing a paginated iteration
|
|
24
|
+
paginated = PagedIterable(endpoint.fetch, page_size=2)
|
|
25
|
+
# Then we should be able to iterate through all items
|
|
26
|
+
assert list(paginated) == [0, 1, 2, 3, 4]
|
|
27
|
+
# And the length should be correct
|
|
28
|
+
assert len(paginated) == 5
|
|
29
|
+
# And 3 requests: [0,1], [2,3], [4] should have been made, one for each page
|
|
30
|
+
assert endpoint.fetch_count == 3
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def test_empty_results():
|
|
34
|
+
# Given an empty mock endpoint
|
|
35
|
+
endpoint = MockEndpoint(0)
|
|
36
|
+
# When doing a paginated iteration
|
|
37
|
+
paginated = PagedIterable(endpoint.fetch, page_size=5)
|
|
38
|
+
# Then we should get an empty list
|
|
39
|
+
assert list(paginated) == []
|
|
40
|
+
# And the length should be 0
|
|
41
|
+
assert len(paginated) == 0
|
|
42
|
+
# And only one request should have been made, for the first page
|
|
43
|
+
assert endpoint.fetch_count == 1
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def test_transform_function():
|
|
47
|
+
# Given a mock endpoint with 4 items
|
|
48
|
+
endpoint = MockEndpoint(4)
|
|
49
|
+
# And a transform function that doubles the items
|
|
50
|
+
transform = lambda x: f"2x={2*x}"
|
|
51
|
+
# When doing a paginated iteration with a transform function
|
|
52
|
+
paginated = PagedIterable(endpoint.fetch, transform=transform, page_size=2)
|
|
53
|
+
# Then we should get the transformed items
|
|
54
|
+
assert list(paginated) == ["2x=0", "2x=2", "2x=4", "2x=6"]
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def test_multiple_iterations():
|
|
58
|
+
# Given a mock endpoint with 5 items
|
|
59
|
+
endpoint = MockEndpoint(5)
|
|
60
|
+
# When we do 2 paginated iterations
|
|
61
|
+
paginated = PagedIterable(endpoint.fetch, page_size=2)
|
|
62
|
+
result1 = list(paginated)
|
|
63
|
+
result2 = list(paginated)
|
|
64
|
+
# Then we should get the same items twice
|
|
65
|
+
assert result1 == result2 == [0, 1, 2, 3, 4]
|
|
66
|
+
# And 6 requests should have been made, 3 for each iteration
|
|
67
|
+
assert endpoint.fetch_count == 6
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
def test_single_page_optimization():
|
|
71
|
+
# Given a mock endpoint with 5 items
|
|
72
|
+
endpoint = MockEndpoint(5)
|
|
73
|
+
# When doing a paginated iteration with a limit that is greater than the number of items
|
|
74
|
+
paginated = PagedIterable(endpoint.fetch, page_size=10)
|
|
75
|
+
# Then we should get all items
|
|
76
|
+
assert list(paginated) == [0, 1, 2, 3, 4]
|
|
77
|
+
# And the length should be 5
|
|
78
|
+
assert len(paginated) == 5
|
|
79
|
+
# And only one request should have been made
|
|
80
|
+
assert endpoint.fetch_count == 1
|
|
81
|
+
# And a second iteration should not make any additional requests
|
|
82
|
+
assert list(paginated) == [0, 1, 2, 3, 4]
|
|
83
|
+
assert endpoint.fetch_count == 1
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
def test_indexing():
|
|
87
|
+
# Given a mock endpoint with 7 items
|
|
88
|
+
endpoint = MockEndpoint(7)
|
|
89
|
+
# When creating a paginated iterable with page size 3
|
|
90
|
+
paginated = PagedIterable(endpoint.fetch, page_size=3)
|
|
91
|
+
# Then we should be able to access items by index
|
|
92
|
+
assert paginated[0] == 0
|
|
93
|
+
assert paginated[2] == 2
|
|
94
|
+
assert paginated[6] == 6
|
|
95
|
+
# And negative indices should work
|
|
96
|
+
assert paginated[-1] == 6
|
|
97
|
+
# And accessing out of bounds should raise IndexError
|
|
98
|
+
with pytest.raises(IndexError):
|
|
99
|
+
paginated[7]
|
|
100
|
+
with pytest.raises(IndexError):
|
|
101
|
+
paginated[-8]
|
|
102
|
+
# And transforms are applied
|
|
103
|
+
assert PagedIterable(endpoint.fetch, transform=lambda x: x * 10, page_size=3)[1] == 10
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
def test_slicing():
|
|
107
|
+
# Given a mock endpoint with 10 items
|
|
108
|
+
endpoint = MockEndpoint(10)
|
|
109
|
+
# When creating a paginated iterable
|
|
110
|
+
paginated = PagedIterable(endpoint.fetch, page_size=3)
|
|
111
|
+
# Then we should be able to slice it
|
|
112
|
+
assert list(paginated[2:5]) == [2, 3, 4]
|
|
113
|
+
assert list(paginated[:3]) == [0, 1, 2]
|
|
114
|
+
assert list(paginated[7:]) == [7, 8, 9]
|
|
115
|
+
# And negative indices should work
|
|
116
|
+
assert list(paginated[:-5]) == [0, 1, 2, 3, 4, 5]
|
|
117
|
+
assert list(paginated[-3:]) == [7, 8, 9]
|
|
118
|
+
assert list(paginated[-5:-2]) == [5, 6, 7]
|
|
119
|
+
# And empty slices should work
|
|
120
|
+
assert list(paginated[5:5]) == []
|
|
121
|
+
# And slicing with a start and stop that are out of bounds should raise IndexError
|
|
122
|
+
with pytest.raises(IndexError):
|
|
123
|
+
list(paginated[20:25])
|
|
124
|
+
# And slicing with a step other than 1 should raise ValueError
|
|
125
|
+
with pytest.raises(ValueError):
|
|
126
|
+
list(paginated[::2])
|
|
127
|
+
with pytest.raises(ValueError):
|
|
128
|
+
list(paginated[1:8:3])
|
|
129
|
+
with pytest.raises(ValueError):
|
|
130
|
+
list(paginated[::-1])
|
|
131
|
+
# And transforms are applied
|
|
132
|
+
assert list(PagedIterable(endpoint.fetch, transform=lambda x: x * 10, page_size=3)[1:3]) == [10, 20]
|
|
@@ -1,3 +1,5 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
1
3
|
import logging
|
|
2
4
|
import re
|
|
3
5
|
from pathlib import Path
|
|
@@ -5,14 +7,13 @@ from typing import TYPE_CHECKING
|
|
|
5
7
|
|
|
6
8
|
import gradio as gr
|
|
7
9
|
|
|
8
|
-
from ..memoryset import LabeledMemoryLookup
|
|
10
|
+
from ..memoryset import LabeledMemoryLookup, LabeledMemoryset, ScoredMemoryLookup
|
|
9
11
|
|
|
10
12
|
if TYPE_CHECKING:
|
|
11
|
-
from ..telemetry import
|
|
13
|
+
from ..telemetry import PredictionBase
|
|
12
14
|
|
|
13
15
|
|
|
14
|
-
def inspect_prediction_result(prediction_result:
|
|
15
|
-
label_names = prediction_result.memoryset.label_names
|
|
16
|
+
def inspect_prediction_result(prediction_result: PredictionBase):
|
|
16
17
|
|
|
17
18
|
def update_label(val: str, memory: LabeledMemoryLookup, progress=gr.Progress(track_tqdm=True)):
|
|
18
19
|
progress(0)
|
|
@@ -26,6 +27,12 @@ def inspect_prediction_result(prediction_result: "LabelPrediction"):
|
|
|
26
27
|
else:
|
|
27
28
|
logging.error(f"Invalid label format: {val}")
|
|
28
29
|
|
|
30
|
+
def update_score(val: float, memory: ScoredMemoryLookup, progress=gr.Progress(track_tqdm=True)):
|
|
31
|
+
progress(0)
|
|
32
|
+
memory.update(score=val)
|
|
33
|
+
progress(1)
|
|
34
|
+
return "✅ Changes saved"
|
|
35
|
+
|
|
29
36
|
with gr.Blocks(
|
|
30
37
|
fill_width=True,
|
|
31
38
|
title="Prediction Results",
|
|
@@ -33,32 +40,71 @@ def inspect_prediction_result(prediction_result: "LabelPrediction"):
|
|
|
33
40
|
) as prediction_result_ui:
|
|
34
41
|
gr.Markdown("# Prediction Results")
|
|
35
42
|
gr.Markdown(f"**Input:** {prediction_result.input_value}")
|
|
36
|
-
|
|
43
|
+
|
|
44
|
+
if isinstance(prediction_result.memoryset, LabeledMemoryset) and prediction_result.label is not None:
|
|
45
|
+
label_names = prediction_result.memoryset.label_names
|
|
46
|
+
gr.Markdown(f"**Prediction:** {label_names[prediction_result.label]} ({prediction_result.label})")
|
|
47
|
+
else:
|
|
48
|
+
gr.Markdown(f"**Prediction:** {prediction_result.score:.2f}")
|
|
49
|
+
|
|
37
50
|
gr.Markdown("### Memory Lookups")
|
|
38
51
|
|
|
39
52
|
with gr.Row(equal_height=True, variant="panel"):
|
|
40
53
|
with gr.Column(scale=7):
|
|
41
54
|
gr.Markdown("**Value**")
|
|
42
55
|
with gr.Column(scale=3, min_width=150):
|
|
43
|
-
gr.Markdown("**Label**")
|
|
56
|
+
gr.Markdown("**Label**" if prediction_result.label is not None else "**Score**")
|
|
57
|
+
|
|
44
58
|
for i, mem_lookup in enumerate(prediction_result.memory_lookups):
|
|
45
59
|
with gr.Row(equal_height=True, variant="panel", elem_classes="white" if i % 2 == 0 else None):
|
|
46
60
|
with gr.Column(scale=7):
|
|
47
|
-
gr.Markdown(
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
)
|
|
56
|
-
changes_saved = gr.HTML(lambda: "", elem_classes="success no-padding", every=15)
|
|
57
|
-
dropdown.change(
|
|
58
|
-
lambda val, mem_lookup=mem_lookup: update_label(val, mem_lookup),
|
|
59
|
-
inputs=[dropdown],
|
|
60
|
-
outputs=[changes_saved],
|
|
61
|
-
show_progress="full",
|
|
61
|
+
gr.Markdown(
|
|
62
|
+
(
|
|
63
|
+
mem_lookup.value
|
|
64
|
+
if isinstance(mem_lookup.value, str)
|
|
65
|
+
else "Time series data" if isinstance(mem_lookup.value, list) else "Image data"
|
|
66
|
+
),
|
|
67
|
+
label="Value",
|
|
68
|
+
height=50,
|
|
62
69
|
)
|
|
70
|
+
with gr.Column(scale=3, min_width=150):
|
|
71
|
+
if (
|
|
72
|
+
isinstance(prediction_result.memoryset, LabeledMemoryset)
|
|
73
|
+
and prediction_result.label is not None
|
|
74
|
+
and isinstance(mem_lookup, LabeledMemoryLookup)
|
|
75
|
+
):
|
|
76
|
+
label_names = prediction_result.memoryset.label_names
|
|
77
|
+
dropdown = gr.Dropdown(
|
|
78
|
+
choices=[f"{label_name} ({i})" for i, label_name in enumerate(label_names)],
|
|
79
|
+
label="Label",
|
|
80
|
+
value=(
|
|
81
|
+
f"{label_names[mem_lookup.label]} ({mem_lookup.label})"
|
|
82
|
+
if mem_lookup.label is not None
|
|
83
|
+
else "None"
|
|
84
|
+
),
|
|
85
|
+
interactive=True,
|
|
86
|
+
container=False,
|
|
87
|
+
)
|
|
88
|
+
changes_saved = gr.HTML(lambda: "", elem_classes="success no-padding", every=15)
|
|
89
|
+
dropdown.change(
|
|
90
|
+
lambda val, mem=mem_lookup: update_label(val, mem),
|
|
91
|
+
inputs=[dropdown],
|
|
92
|
+
outputs=[changes_saved],
|
|
93
|
+
show_progress="full",
|
|
94
|
+
)
|
|
95
|
+
elif prediction_result.score is not None and isinstance(mem_lookup, ScoredMemoryLookup):
|
|
96
|
+
input = gr.Number(
|
|
97
|
+
value=mem_lookup.score,
|
|
98
|
+
label="Score",
|
|
99
|
+
interactive=True,
|
|
100
|
+
container=False,
|
|
101
|
+
)
|
|
102
|
+
changes_saved = gr.HTML(lambda: "", elem_classes="success no-padding", every=15)
|
|
103
|
+
input.change(
|
|
104
|
+
lambda val, mem=mem_lookup: update_score(val, mem),
|
|
105
|
+
inputs=[input],
|
|
106
|
+
outputs=[changes_saved],
|
|
107
|
+
show_progress="full",
|
|
108
|
+
)
|
|
63
109
|
|
|
64
110
|
prediction_result_ui.launch()
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
class TqdmFileReader:
|
|
2
|
+
def __init__(self, file_obj, pbar):
|
|
3
|
+
self.file_obj = file_obj
|
|
4
|
+
self.pbar = pbar
|
|
5
|
+
|
|
6
|
+
def read(self, size=-1):
|
|
7
|
+
data = self.file_obj.read(size)
|
|
8
|
+
self.pbar.update(len(data))
|
|
9
|
+
return data
|
|
10
|
+
|
|
11
|
+
def __getattr__(self, attr):
|
|
12
|
+
return getattr(self.file_obj, attr)
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
import base64
|
|
2
|
+
import io
|
|
3
|
+
from typing import cast
|
|
4
|
+
|
|
5
|
+
import numpy as np
|
|
6
|
+
from numpy.typing import NDArray
|
|
7
|
+
from PIL import Image as pil
|
|
8
|
+
|
|
9
|
+
ValueType = str | pil.Image | NDArray[np.float32]
|
|
10
|
+
"""
|
|
11
|
+
The type of a value in a memoryset
|
|
12
|
+
|
|
13
|
+
- `str`: string
|
|
14
|
+
- `pil.Image`: image
|
|
15
|
+
- `NDArray[np.float32]`: univariate or multivariate timeseries
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def decode_value(value: str) -> ValueType:
|
|
20
|
+
if value.startswith("data:image"):
|
|
21
|
+
header, data = value.split(",", 1)
|
|
22
|
+
return pil.open(io.BytesIO(base64.b64decode(data)))
|
|
23
|
+
|
|
24
|
+
if value.startswith("data:numpy"):
|
|
25
|
+
header, data = value.split(",", 1)
|
|
26
|
+
return np.load(io.BytesIO(base64.b64decode(data)))
|
|
27
|
+
|
|
28
|
+
return value
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def encode_value(value: ValueType) -> str:
|
|
32
|
+
if isinstance(value, pil.Image):
|
|
33
|
+
header = f"data:image/{value.format.lower()};base64," if value.format else "data:image;base64,"
|
|
34
|
+
buffer = io.BytesIO()
|
|
35
|
+
value.save(buffer, format=value.format)
|
|
36
|
+
bytes = buffer.getvalue()
|
|
37
|
+
return header + base64.b64encode(bytes).decode("utf-8")
|
|
38
|
+
|
|
39
|
+
if isinstance(value, np.ndarray):
|
|
40
|
+
header = f"data:numpy/{value.dtype.name};base64,"
|
|
41
|
+
buffer = io.BytesIO()
|
|
42
|
+
np.save(buffer, value)
|
|
43
|
+
return header + base64.b64encode(buffer.getvalue()).decode("utf-8")
|
|
44
|
+
|
|
45
|
+
return value
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
from PIL import Image as pil
|
|
3
|
+
|
|
4
|
+
from .value_parser import decode_value, encode_value
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def test_string_parsing():
|
|
8
|
+
encoded = encode_value("hello world")
|
|
9
|
+
assert encoded == "hello world"
|
|
10
|
+
|
|
11
|
+
decoded = decode_value(encoded)
|
|
12
|
+
assert decoded == "hello world"
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def test_image_parsing():
|
|
16
|
+
img = pil.new("RGB", (10, 10), color="red")
|
|
17
|
+
img.format = "PNG"
|
|
18
|
+
|
|
19
|
+
encoded = encode_value(img)
|
|
20
|
+
assert isinstance(encoded, str)
|
|
21
|
+
assert encoded.startswith("data:image/png;base64,")
|
|
22
|
+
|
|
23
|
+
decoded = decode_value(encoded)
|
|
24
|
+
assert isinstance(decoded, pil.Image)
|
|
25
|
+
assert decoded.size == img.size
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def test_timeseries_parsing():
|
|
29
|
+
timeseries = np.random.rand(20, 3).astype(np.float32)
|
|
30
|
+
|
|
31
|
+
encoded = encode_value(timeseries)
|
|
32
|
+
assert isinstance(encoded, str)
|
|
33
|
+
assert encoded.startswith(f"data:numpy/{timeseries.dtype.name};base64,")
|
|
34
|
+
|
|
35
|
+
decoded = decode_value(encoded)
|
|
36
|
+
assert isinstance(decoded, np.ndarray)
|
|
37
|
+
assert decoded.shape == timeseries.shape
|
|
38
|
+
assert decoded.dtype == timeseries.dtype
|
|
39
|
+
assert np.allclose(decoded, timeseries)
|