orca-sdk 0.1.1__py3-none-any.whl → 0.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- orca_sdk/__init__.py +10 -4
- orca_sdk/_shared/__init__.py +10 -0
- orca_sdk/_shared/metrics.py +393 -0
- orca_sdk/_shared/metrics_test.py +273 -0
- orca_sdk/_utils/analysis_ui.py +12 -10
- orca_sdk/_utils/analysis_ui_style.css +0 -3
- orca_sdk/_utils/auth.py +27 -29
- orca_sdk/_utils/data_parsing.py +28 -2
- orca_sdk/_utils/data_parsing_test.py +15 -15
- orca_sdk/_utils/pagination.py +126 -0
- orca_sdk/_utils/pagination_test.py +132 -0
- orca_sdk/_utils/prediction_result_ui.py +67 -21
- orca_sdk/_utils/tqdm_file_reader.py +12 -0
- orca_sdk/_utils/value_parser.py +45 -0
- orca_sdk/_utils/value_parser_test.py +39 -0
- orca_sdk/classification_model.py +439 -129
- orca_sdk/classification_model_test.py +334 -104
- orca_sdk/client.py +3747 -0
- orca_sdk/conftest.py +164 -19
- orca_sdk/credentials.py +120 -18
- orca_sdk/credentials_test.py +20 -0
- orca_sdk/datasource.py +259 -68
- orca_sdk/datasource_test.py +242 -0
- orca_sdk/embedding_model.py +425 -82
- orca_sdk/embedding_model_test.py +39 -13
- orca_sdk/job.py +337 -0
- orca_sdk/job_test.py +108 -0
- orca_sdk/memoryset.py +1341 -305
- orca_sdk/memoryset_test.py +350 -111
- orca_sdk/regression_model.py +684 -0
- orca_sdk/regression_model_test.py +369 -0
- orca_sdk/telemetry.py +449 -143
- orca_sdk/telemetry_test.py +43 -24
- {orca_sdk-0.1.1.dist-info → orca_sdk-0.1.2.dist-info}/METADATA +34 -16
- orca_sdk-0.1.2.dist-info/RECORD +40 -0
- {orca_sdk-0.1.1.dist-info → orca_sdk-0.1.2.dist-info}/WHEEL +1 -1
- orca_sdk/_generated_api_client/__init__.py +0 -3
- orca_sdk/_generated_api_client/api/__init__.py +0 -193
- orca_sdk/_generated_api_client/api/auth/__init__.py +0 -0
- orca_sdk/_generated_api_client/api/auth/check_authentication_auth_get.py +0 -128
- orca_sdk/_generated_api_client/api/auth/create_api_key_auth_api_key_post.py +0 -170
- orca_sdk/_generated_api_client/api/auth/delete_api_key_auth_api_key_name_or_id_delete.py +0 -156
- orca_sdk/_generated_api_client/api/auth/delete_org_auth_org_delete.py +0 -130
- orca_sdk/_generated_api_client/api/auth/list_api_keys_auth_api_key_get.py +0 -127
- orca_sdk/_generated_api_client/api/classification_model/__init__.py +0 -0
- orca_sdk/_generated_api_client/api/classification_model/create_evaluation_classification_model_model_name_or_id_evaluation_post.py +0 -183
- orca_sdk/_generated_api_client/api/classification_model/create_model_classification_model_post.py +0 -170
- orca_sdk/_generated_api_client/api/classification_model/delete_evaluation_classification_model_model_name_or_id_evaluation_task_id_delete.py +0 -168
- orca_sdk/_generated_api_client/api/classification_model/delete_model_classification_model_name_or_id_delete.py +0 -154
- orca_sdk/_generated_api_client/api/classification_model/get_evaluation_classification_model_model_name_or_id_evaluation_task_id_get.py +0 -170
- orca_sdk/_generated_api_client/api/classification_model/get_model_classification_model_name_or_id_get.py +0 -156
- orca_sdk/_generated_api_client/api/classification_model/list_evaluations_classification_model_model_name_or_id_evaluation_get.py +0 -161
- orca_sdk/_generated_api_client/api/classification_model/list_models_classification_model_get.py +0 -127
- orca_sdk/_generated_api_client/api/classification_model/predict_gpu_classification_model_name_or_id_prediction_post.py +0 -190
- orca_sdk/_generated_api_client/api/datasource/__init__.py +0 -0
- orca_sdk/_generated_api_client/api/datasource/create_datasource_datasource_post.py +0 -167
- orca_sdk/_generated_api_client/api/datasource/delete_datasource_datasource_name_or_id_delete.py +0 -156
- orca_sdk/_generated_api_client/api/datasource/get_datasource_datasource_name_or_id_get.py +0 -156
- orca_sdk/_generated_api_client/api/datasource/list_datasources_datasource_get.py +0 -127
- orca_sdk/_generated_api_client/api/default/__init__.py +0 -0
- orca_sdk/_generated_api_client/api/default/healthcheck_get.py +0 -118
- orca_sdk/_generated_api_client/api/default/healthcheck_gpu_get.py +0 -118
- orca_sdk/_generated_api_client/api/finetuned_embedding_model/__init__.py +0 -0
- orca_sdk/_generated_api_client/api/finetuned_embedding_model/create_finetuned_embedding_model_finetuned_embedding_model_post.py +0 -168
- orca_sdk/_generated_api_client/api/finetuned_embedding_model/delete_finetuned_embedding_model_finetuned_embedding_model_name_or_id_delete.py +0 -156
- orca_sdk/_generated_api_client/api/finetuned_embedding_model/embed_with_finetuned_model_gpu_finetuned_embedding_model_name_or_id_embedding_post.py +0 -189
- orca_sdk/_generated_api_client/api/finetuned_embedding_model/get_finetuned_embedding_model_finetuned_embedding_model_name_or_id_get.py +0 -156
- orca_sdk/_generated_api_client/api/finetuned_embedding_model/list_finetuned_embedding_models_finetuned_embedding_model_get.py +0 -127
- orca_sdk/_generated_api_client/api/memoryset/__init__.py +0 -0
- orca_sdk/_generated_api_client/api/memoryset/clone_memoryset_memoryset_name_or_id_clone_post.py +0 -181
- orca_sdk/_generated_api_client/api/memoryset/create_analysis_memoryset_name_or_id_analysis_post.py +0 -183
- orca_sdk/_generated_api_client/api/memoryset/create_memoryset_memoryset_post.py +0 -168
- orca_sdk/_generated_api_client/api/memoryset/delete_memories_memoryset_name_or_id_memories_delete_post.py +0 -181
- orca_sdk/_generated_api_client/api/memoryset/delete_memory_memoryset_name_or_id_memory_memory_id_delete.py +0 -167
- orca_sdk/_generated_api_client/api/memoryset/delete_memoryset_memoryset_name_or_id_delete.py +0 -156
- orca_sdk/_generated_api_client/api/memoryset/get_analysis_memoryset_name_or_id_analysis_analysis_task_id_get.py +0 -169
- orca_sdk/_generated_api_client/api/memoryset/get_memories_memoryset_name_or_id_memories_get_post.py +0 -188
- orca_sdk/_generated_api_client/api/memoryset/get_memory_memoryset_name_or_id_memory_memory_id_get.py +0 -169
- orca_sdk/_generated_api_client/api/memoryset/get_memoryset_memoryset_name_or_id_get.py +0 -156
- orca_sdk/_generated_api_client/api/memoryset/insert_memories_gpu_memoryset_name_or_id_memory_post.py +0 -184
- orca_sdk/_generated_api_client/api/memoryset/list_analyses_memoryset_name_or_id_analysis_get.py +0 -260
- orca_sdk/_generated_api_client/api/memoryset/list_memorysets_memoryset_get.py +0 -127
- orca_sdk/_generated_api_client/api/memoryset/memoryset_lookup_gpu_memoryset_name_or_id_lookup_post.py +0 -193
- orca_sdk/_generated_api_client/api/memoryset/query_memoryset_memoryset_name_or_id_memories_post.py +0 -188
- orca_sdk/_generated_api_client/api/memoryset/update_memories_gpu_memoryset_name_or_id_memories_patch.py +0 -191
- orca_sdk/_generated_api_client/api/memoryset/update_memory_gpu_memoryset_name_or_id_memory_patch.py +0 -187
- orca_sdk/_generated_api_client/api/pretrained_embedding_model/__init__.py +0 -0
- orca_sdk/_generated_api_client/api/pretrained_embedding_model/embed_with_pretrained_model_gpu_pretrained_embedding_model_model_name_embedding_post.py +0 -188
- orca_sdk/_generated_api_client/api/pretrained_embedding_model/get_pretrained_embedding_model_pretrained_embedding_model_model_name_get.py +0 -157
- orca_sdk/_generated_api_client/api/pretrained_embedding_model/list_pretrained_embedding_models_pretrained_embedding_model_get.py +0 -127
- orca_sdk/_generated_api_client/api/task/__init__.py +0 -0
- orca_sdk/_generated_api_client/api/task/abort_task_task_task_id_abort_delete.py +0 -154
- orca_sdk/_generated_api_client/api/task/get_task_status_task_task_id_status_get.py +0 -156
- orca_sdk/_generated_api_client/api/task/list_tasks_task_get.py +0 -243
- orca_sdk/_generated_api_client/api/telemetry/__init__.py +0 -0
- orca_sdk/_generated_api_client/api/telemetry/drop_feedback_category_with_data_telemetry_feedback_category_name_or_id_delete.py +0 -162
- orca_sdk/_generated_api_client/api/telemetry/get_feedback_category_telemetry_feedback_category_name_or_id_get.py +0 -156
- orca_sdk/_generated_api_client/api/telemetry/get_prediction_telemetry_prediction_prediction_id_get.py +0 -157
- orca_sdk/_generated_api_client/api/telemetry/list_feedback_categories_telemetry_feedback_category_get.py +0 -127
- orca_sdk/_generated_api_client/api/telemetry/list_predictions_telemetry_prediction_post.py +0 -175
- orca_sdk/_generated_api_client/api/telemetry/record_prediction_feedback_telemetry_prediction_feedback_put.py +0 -171
- orca_sdk/_generated_api_client/api/telemetry/update_prediction_telemetry_prediction_prediction_id_patch.py +0 -181
- orca_sdk/_generated_api_client/client.py +0 -216
- orca_sdk/_generated_api_client/errors.py +0 -38
- orca_sdk/_generated_api_client/models/__init__.py +0 -159
- orca_sdk/_generated_api_client/models/analyze_neighbor_labels_result.py +0 -84
- orca_sdk/_generated_api_client/models/api_key_metadata.py +0 -118
- orca_sdk/_generated_api_client/models/base_model.py +0 -55
- orca_sdk/_generated_api_client/models/body_create_datasource_datasource_post.py +0 -176
- orca_sdk/_generated_api_client/models/classification_evaluation_result.py +0 -114
- orca_sdk/_generated_api_client/models/clone_labeled_memoryset_request.py +0 -150
- orca_sdk/_generated_api_client/models/column_info.py +0 -114
- orca_sdk/_generated_api_client/models/column_type.py +0 -14
- orca_sdk/_generated_api_client/models/conflict_error_response.py +0 -80
- orca_sdk/_generated_api_client/models/create_api_key_request.py +0 -99
- orca_sdk/_generated_api_client/models/create_api_key_response.py +0 -126
- orca_sdk/_generated_api_client/models/create_labeled_memoryset_request.py +0 -259
- orca_sdk/_generated_api_client/models/create_rac_model_request.py +0 -209
- orca_sdk/_generated_api_client/models/datasource_metadata.py +0 -142
- orca_sdk/_generated_api_client/models/delete_memories_request.py +0 -70
- orca_sdk/_generated_api_client/models/embed_request.py +0 -127
- orca_sdk/_generated_api_client/models/embedding_finetuning_method.py +0 -9
- orca_sdk/_generated_api_client/models/evaluation_request.py +0 -180
- orca_sdk/_generated_api_client/models/evaluation_response.py +0 -140
- orca_sdk/_generated_api_client/models/feedback_type.py +0 -9
- orca_sdk/_generated_api_client/models/field_validation_error.py +0 -103
- orca_sdk/_generated_api_client/models/filter_item.py +0 -231
- orca_sdk/_generated_api_client/models/filter_item_field_type_0_item.py +0 -15
- orca_sdk/_generated_api_client/models/filter_item_field_type_2_item_type_1.py +0 -16
- orca_sdk/_generated_api_client/models/filter_item_op.py +0 -16
- orca_sdk/_generated_api_client/models/find_duplicates_analysis_result.py +0 -70
- orca_sdk/_generated_api_client/models/finetune_embedding_model_request.py +0 -259
- orca_sdk/_generated_api_client/models/finetune_embedding_model_request_training_args.py +0 -66
- orca_sdk/_generated_api_client/models/finetuned_embedding_model_metadata.py +0 -166
- orca_sdk/_generated_api_client/models/get_memories_request.py +0 -70
- orca_sdk/_generated_api_client/models/internal_server_error_response.py +0 -80
- orca_sdk/_generated_api_client/models/label_class_metrics.py +0 -108
- orca_sdk/_generated_api_client/models/label_prediction_memory_lookup.py +0 -274
- orca_sdk/_generated_api_client/models/label_prediction_memory_lookup_metadata.py +0 -68
- orca_sdk/_generated_api_client/models/label_prediction_result.py +0 -101
- orca_sdk/_generated_api_client/models/label_prediction_with_memories_and_feedback.py +0 -232
- orca_sdk/_generated_api_client/models/labeled_memory.py +0 -197
- orca_sdk/_generated_api_client/models/labeled_memory_insert.py +0 -108
- orca_sdk/_generated_api_client/models/labeled_memory_insert_metadata.py +0 -68
- orca_sdk/_generated_api_client/models/labeled_memory_lookup.py +0 -258
- orca_sdk/_generated_api_client/models/labeled_memory_lookup_metadata.py +0 -68
- orca_sdk/_generated_api_client/models/labeled_memory_metadata.py +0 -68
- orca_sdk/_generated_api_client/models/labeled_memory_metrics.py +0 -277
- orca_sdk/_generated_api_client/models/labeled_memory_update.py +0 -171
- orca_sdk/_generated_api_client/models/labeled_memory_update_metadata_type_0.py +0 -68
- orca_sdk/_generated_api_client/models/labeled_memoryset_metadata.py +0 -195
- orca_sdk/_generated_api_client/models/list_analyses_memoryset_name_or_id_analysis_get_type_type_0.py +0 -9
- orca_sdk/_generated_api_client/models/list_memories_request.py +0 -104
- orca_sdk/_generated_api_client/models/list_predictions_request.py +0 -234
- orca_sdk/_generated_api_client/models/list_predictions_request_sort_item_item_type_0.py +0 -9
- orca_sdk/_generated_api_client/models/list_predictions_request_sort_item_item_type_1.py +0 -9
- orca_sdk/_generated_api_client/models/lookup_request.py +0 -81
- orca_sdk/_generated_api_client/models/memoryset_analysis_request.py +0 -83
- orca_sdk/_generated_api_client/models/memoryset_analysis_request_type.py +0 -9
- orca_sdk/_generated_api_client/models/memoryset_analysis_response.py +0 -180
- orca_sdk/_generated_api_client/models/memoryset_analysis_response_config.py +0 -66
- orca_sdk/_generated_api_client/models/memoryset_analysis_response_type.py +0 -9
- orca_sdk/_generated_api_client/models/not_found_error_response.py +0 -100
- orca_sdk/_generated_api_client/models/not_found_error_response_resource_type_0.py +0 -20
- orca_sdk/_generated_api_client/models/prediction_feedback.py +0 -157
- orca_sdk/_generated_api_client/models/prediction_feedback_category.py +0 -115
- orca_sdk/_generated_api_client/models/prediction_feedback_request.py +0 -122
- orca_sdk/_generated_api_client/models/prediction_feedback_result.py +0 -102
- orca_sdk/_generated_api_client/models/prediction_request.py +0 -169
- orca_sdk/_generated_api_client/models/pretrained_embedding_model_metadata.py +0 -97
- orca_sdk/_generated_api_client/models/pretrained_embedding_model_name.py +0 -11
- orca_sdk/_generated_api_client/models/rac_head_type.py +0 -11
- orca_sdk/_generated_api_client/models/rac_model_metadata.py +0 -191
- orca_sdk/_generated_api_client/models/service_unavailable_error_response.py +0 -80
- orca_sdk/_generated_api_client/models/task.py +0 -198
- orca_sdk/_generated_api_client/models/task_status.py +0 -14
- orca_sdk/_generated_api_client/models/task_status_info.py +0 -133
- orca_sdk/_generated_api_client/models/unauthenticated_error_response.py +0 -72
- orca_sdk/_generated_api_client/models/unauthorized_error_response.py +0 -80
- orca_sdk/_generated_api_client/models/unprocessable_input_error_response.py +0 -94
- orca_sdk/_generated_api_client/models/update_prediction_request.py +0 -93
- orca_sdk/_generated_api_client/py.typed +0 -1
- orca_sdk/_generated_api_client/types.py +0 -56
- orca_sdk/_utils/task.py +0 -73
- orca_sdk-0.1.1.dist-info/RECORD +0 -175
orca_sdk/telemetry.py
CHANGED
|
@@ -1,34 +1,49 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
3
|
import logging
|
|
4
|
+
import os
|
|
5
|
+
from abc import ABC
|
|
4
6
|
from datetime import datetime
|
|
5
|
-
from typing import TYPE_CHECKING, Any, Iterable, overload
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
from .
|
|
11
|
-
drop_feedback_category_with_data,
|
|
12
|
-
get_prediction,
|
|
13
|
-
list_feedback_categories,
|
|
14
|
-
list_predictions,
|
|
15
|
-
record_prediction_feedback,
|
|
16
|
-
update_prediction,
|
|
17
|
-
)
|
|
18
|
-
from ._generated_api_client.models import (
|
|
19
|
-
FeedbackType,
|
|
7
|
+
from typing import TYPE_CHECKING, Any, Iterable, Literal, Self, overload
|
|
8
|
+
|
|
9
|
+
from httpx import Timeout
|
|
10
|
+
|
|
11
|
+
from ._utils.common import UNSET
|
|
12
|
+
from .client import (
|
|
20
13
|
LabelPredictionWithMemoriesAndFeedback,
|
|
21
|
-
ListPredictionsRequest,
|
|
22
14
|
PredictionFeedbackCategory,
|
|
23
15
|
PredictionFeedbackRequest,
|
|
16
|
+
ScorePredictionWithMemoriesAndFeedback,
|
|
24
17
|
UpdatePredictionRequest,
|
|
18
|
+
orca_api,
|
|
19
|
+
)
|
|
20
|
+
from .memoryset import (
|
|
21
|
+
LabeledMemoryLookup,
|
|
22
|
+
LabeledMemoryset,
|
|
23
|
+
ScoredMemoryLookup,
|
|
24
|
+
ScoredMemoryset,
|
|
25
25
|
)
|
|
26
|
-
from ._generated_api_client.types import UNSET as CLIENT_UNSET
|
|
27
|
-
from ._utils.prediction_result_ui import inspect_prediction_result
|
|
28
|
-
from .memoryset import LabeledMemoryLookup, LabeledMemoryset
|
|
29
26
|
|
|
30
27
|
if TYPE_CHECKING:
|
|
31
28
|
from .classification_model import ClassificationModel
|
|
29
|
+
from .regression_model import RegressionModel
|
|
30
|
+
|
|
31
|
+
TelemetryMode = Literal["off", "on", "sync", "async"]
|
|
32
|
+
"""
|
|
33
|
+
Mode for saving telemetry. One of:
|
|
34
|
+
|
|
35
|
+
- `"off"`: Do not save telemetry
|
|
36
|
+
- `"on"`: Save telemetry asynchronously unless the `ORCA_SAVE_TELEMETRY_SYNCHRONOUSLY` environment variable is set.
|
|
37
|
+
- `"sync"`: Save telemetry synchronously
|
|
38
|
+
- `"async"`: Save telemetry asynchronously
|
|
39
|
+
"""
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def _get_telemetry_config(override: TelemetryMode | None = None) -> tuple[bool, bool]:
|
|
43
|
+
return (
|
|
44
|
+
override != "off",
|
|
45
|
+
os.getenv("ORCA_SAVE_TELEMETRY_SYNCHRONOUSLY", "0") != "0" or override == "sync",
|
|
46
|
+
)
|
|
32
47
|
|
|
33
48
|
|
|
34
49
|
def _parse_feedback(feedback: dict[str, Any]) -> PredictionFeedbackRequest:
|
|
@@ -38,12 +53,15 @@ def _parse_feedback(feedback: dict[str, Any]) -> PredictionFeedbackRequest:
|
|
|
38
53
|
prediction_id = feedback.get("prediction_id", None)
|
|
39
54
|
if prediction_id is None:
|
|
40
55
|
raise ValueError("`prediction_id` must be specified")
|
|
41
|
-
|
|
42
|
-
prediction_id
|
|
43
|
-
category_name
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
56
|
+
output: PredictionFeedbackRequest = {
|
|
57
|
+
"prediction_id": prediction_id,
|
|
58
|
+
"category_name": category,
|
|
59
|
+
}
|
|
60
|
+
if "value" in feedback:
|
|
61
|
+
output["value"] = feedback["value"]
|
|
62
|
+
if "comment" in feedback:
|
|
63
|
+
output["comment"] = feedback["comment"]
|
|
64
|
+
return output
|
|
47
65
|
|
|
48
66
|
|
|
49
67
|
class FeedbackCategory:
|
|
@@ -67,11 +85,10 @@ class FeedbackCategory:
|
|
|
67
85
|
created_at: datetime
|
|
68
86
|
|
|
69
87
|
def __init__(self, category: PredictionFeedbackCategory):
|
|
70
|
-
|
|
71
|
-
self.
|
|
72
|
-
self.
|
|
73
|
-
self.
|
|
74
|
-
self.created_at = category.created_at
|
|
88
|
+
self.id = category["id"]
|
|
89
|
+
self.name = category["name"]
|
|
90
|
+
self.value_type = bool if category["type"] == "BINARY" else float
|
|
91
|
+
self.created_at = datetime.fromisoformat(category["created_at"])
|
|
75
92
|
|
|
76
93
|
@classmethod
|
|
77
94
|
def all(cls) -> list[FeedbackCategory]:
|
|
@@ -81,7 +98,7 @@ class FeedbackCategory:
|
|
|
81
98
|
Returns:
|
|
82
99
|
List with information about all existing feedback categories.
|
|
83
100
|
"""
|
|
84
|
-
return [FeedbackCategory(category) for category in
|
|
101
|
+
return [FeedbackCategory(category) for category in orca_api.GET("/telemetry/feedback_category")]
|
|
85
102
|
|
|
86
103
|
@classmethod
|
|
87
104
|
def drop(cls, name: str) -> None:
|
|
@@ -98,113 +115,159 @@ class FeedbackCategory:
|
|
|
98
115
|
Raises:
|
|
99
116
|
LookupError: If the category is not found.
|
|
100
117
|
"""
|
|
101
|
-
|
|
102
|
-
logging.info(f"
|
|
118
|
+
orca_api.DELETE("/telemetry/feedback_category/{name_or_id}", params={"name_or_id": name})
|
|
119
|
+
logging.info(f"Deleted feedback category {name} with all associated feedback")
|
|
103
120
|
|
|
104
121
|
def __repr__(self):
|
|
105
122
|
return "FeedbackCategory({" + f"name: {self.name}, " + f"value_type: {self.value_type}" + "})"
|
|
106
123
|
|
|
107
124
|
|
|
108
|
-
class
|
|
109
|
-
|
|
110
|
-
|
|
125
|
+
class AddMemorySuggestions:
|
|
126
|
+
suggestions: list[tuple[str, str]]
|
|
127
|
+
memoryset_id: str
|
|
128
|
+
model_id: str
|
|
129
|
+
prediction_id: str
|
|
111
130
|
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
memory_lookups: List of memories used to ground the prediction
|
|
118
|
-
input_value: Input value that this prediction was for
|
|
119
|
-
model: Model that was used to make the prediction
|
|
120
|
-
memoryset: Memoryset that was used to lookup memories to ground the prediction
|
|
121
|
-
expected_label: Optional expected label that was set for the prediction
|
|
122
|
-
tags: tags that were set for the prediction
|
|
123
|
-
feedback: Feedback recorded, mapping from category name to value
|
|
124
|
-
"""
|
|
131
|
+
def __init__(self, suggestions: list[tuple[str, str]], memoryset_id: str, model_id: str, prediction_id: str):
|
|
132
|
+
self.suggestions = suggestions
|
|
133
|
+
self.memoryset_id = memoryset_id
|
|
134
|
+
self.model_id = model_id
|
|
135
|
+
self.prediction_id = prediction_id
|
|
125
136
|
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
137
|
+
def __repr__(self):
|
|
138
|
+
return (
|
|
139
|
+
"AddMemorySuggestions({"
|
|
140
|
+
+ f"suggestions: {self.suggestions}, "
|
|
141
|
+
+ f"memoryset_id: {self.memoryset_id}, "
|
|
142
|
+
+ f"model_id: {self.model_id}, "
|
|
143
|
+
+ f"prediction_id: {self.prediction_id}"
|
|
144
|
+
+ "})"
|
|
145
|
+
)
|
|
146
|
+
|
|
147
|
+
def apply(self) -> None:
|
|
148
|
+
memoryset = LabeledMemoryset.open(self.memoryset_id)
|
|
149
|
+
label_name_to_label = {label_name: label for label, label_name in enumerate(memoryset.label_names)}
|
|
150
|
+
memoryset.insert(
|
|
151
|
+
[{"value": suggestion[0], "label": label_name_to_label[suggestion[1]]} for suggestion in self.suggestions]
|
|
152
|
+
)
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
class PredictionBase(ABC):
|
|
156
|
+
prediction_id: str | None
|
|
129
157
|
confidence: float
|
|
130
|
-
|
|
131
|
-
model: ClassificationModel
|
|
158
|
+
anomaly_score: float | None
|
|
132
159
|
|
|
133
160
|
def __init__(
|
|
134
161
|
self,
|
|
135
|
-
prediction_id: str,
|
|
162
|
+
prediction_id: str | None,
|
|
136
163
|
*,
|
|
137
|
-
label: int,
|
|
164
|
+
label: int | None,
|
|
138
165
|
label_name: str | None,
|
|
166
|
+
score: float | None,
|
|
139
167
|
confidence: float,
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
168
|
+
anomaly_score: float | None,
|
|
169
|
+
memoryset: LabeledMemoryset | ScoredMemoryset,
|
|
170
|
+
model: ClassificationModel | RegressionModel,
|
|
171
|
+
telemetry: LabelPredictionWithMemoriesAndFeedback | ScorePredictionWithMemoriesAndFeedback | None = None,
|
|
172
|
+
logits: list[float] | None = None,
|
|
173
|
+
input_value: str | None = None,
|
|
143
174
|
):
|
|
144
|
-
# for internal use only, do not document
|
|
145
|
-
from .classification_model import ClassificationModel
|
|
146
|
-
|
|
147
175
|
self.prediction_id = prediction_id
|
|
148
176
|
self.label = label
|
|
149
177
|
self.label_name = label_name
|
|
178
|
+
self.score = score
|
|
150
179
|
self.confidence = confidence
|
|
151
|
-
self.
|
|
152
|
-
self.
|
|
180
|
+
self.anomaly_score = anomaly_score
|
|
181
|
+
self.memoryset = memoryset
|
|
182
|
+
self.model = model
|
|
153
183
|
self.__telemetry = telemetry if telemetry else None
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
return (
|
|
157
|
-
"LabelPrediction({"
|
|
158
|
-
+ f"label: <{self.label_name}: {self.label}>, "
|
|
159
|
-
+ f"confidence: {self.confidence:.2f}, "
|
|
160
|
-
+ f"input_value: '{str(self.input_value)[:100] + '...' if len(str(self.input_value)) > 100 else self.input_value}'"
|
|
161
|
-
+ "})"
|
|
162
|
-
)
|
|
184
|
+
self.logits = logits
|
|
185
|
+
self._input_value = input_value
|
|
163
186
|
|
|
164
187
|
@property
|
|
165
|
-
def _telemetry(self) -> LabelPredictionWithMemoriesAndFeedback:
|
|
188
|
+
def _telemetry(self) -> LabelPredictionWithMemoriesAndFeedback | ScorePredictionWithMemoriesAndFeedback:
|
|
166
189
|
# for internal use only, do not document
|
|
167
190
|
if self.__telemetry is None:
|
|
168
|
-
self.
|
|
191
|
+
if self.prediction_id is None:
|
|
192
|
+
raise ValueError("Cannot fetch telemetry with no prediction ID")
|
|
193
|
+
self.__telemetry = orca_api.GET(
|
|
194
|
+
"/telemetry/prediction/{prediction_id}", params={"prediction_id": self.prediction_id}
|
|
195
|
+
)
|
|
169
196
|
return self.__telemetry
|
|
170
197
|
|
|
171
198
|
@property
|
|
172
|
-
def
|
|
173
|
-
|
|
199
|
+
def input_value(self) -> str:
|
|
200
|
+
if self._input_value is not None:
|
|
201
|
+
return self._input_value
|
|
202
|
+
assert isinstance(self._telemetry["input_value"], str)
|
|
203
|
+
return self._telemetry["input_value"]
|
|
174
204
|
|
|
175
205
|
@property
|
|
176
|
-
def
|
|
177
|
-
|
|
206
|
+
def memory_lookups(self) -> list[LabeledMemoryLookup] | list[ScoredMemoryLookup]:
|
|
207
|
+
if "label" in self._telemetry:
|
|
208
|
+
return [
|
|
209
|
+
LabeledMemoryLookup(self._telemetry["memoryset_id"], lookup) for lookup in self._telemetry["memories"]
|
|
210
|
+
]
|
|
211
|
+
else:
|
|
212
|
+
return [
|
|
213
|
+
ScoredMemoryLookup(self._telemetry["memoryset_id"], lookup) for lookup in self._telemetry["memories"]
|
|
214
|
+
]
|
|
178
215
|
|
|
179
216
|
@property
|
|
180
217
|
def feedback(self) -> dict[str, bool | float]:
|
|
181
218
|
return {
|
|
182
|
-
f
|
|
183
|
-
f
|
|
219
|
+
f["category_name"]: (
|
|
220
|
+
f["value"] if f["category_type"] == "CONTINUOUS" else True if f["value"] == 1 else False
|
|
184
221
|
)
|
|
185
|
-
for f in self._telemetry
|
|
222
|
+
for f in self._telemetry["feedbacks"]
|
|
186
223
|
}
|
|
187
224
|
|
|
188
225
|
@property
|
|
189
|
-
def
|
|
190
|
-
return self._telemetry
|
|
226
|
+
def tags(self) -> set[str]:
|
|
227
|
+
return set(self._telemetry["tags"])
|
|
191
228
|
|
|
192
229
|
@property
|
|
193
|
-
def
|
|
194
|
-
|
|
230
|
+
def explanation(self) -> str:
|
|
231
|
+
if self._telemetry["explanation"] is None:
|
|
232
|
+
self._telemetry["explanation"] = orca_api.GET(
|
|
233
|
+
"/telemetry/prediction/{prediction_id}/explanation",
|
|
234
|
+
params={"prediction_id": self._telemetry["prediction_id"]},
|
|
235
|
+
parse_as="text",
|
|
236
|
+
timeout=30,
|
|
237
|
+
)
|
|
238
|
+
return self._telemetry["explanation"]
|
|
239
|
+
|
|
240
|
+
def explain(self, refresh: bool = False) -> None:
|
|
241
|
+
"""
|
|
242
|
+
Print an explanation of the prediction as a stream of text.
|
|
243
|
+
|
|
244
|
+
Params:
|
|
245
|
+
refresh: Force the explanation agent to re-run even if an explanation already exists.
|
|
246
|
+
"""
|
|
247
|
+
if not refresh and self._telemetry["explanation"] is not None:
|
|
248
|
+
print(self._telemetry["explanation"])
|
|
249
|
+
else:
|
|
250
|
+
with orca_api.stream(
|
|
251
|
+
"GET",
|
|
252
|
+
f"/telemetry/prediction/{self.prediction_id}/explanation?refresh={refresh}",
|
|
253
|
+
timeout=Timeout(connect=3, read=None),
|
|
254
|
+
) as res:
|
|
255
|
+
for chunk in res.iter_text():
|
|
256
|
+
print(chunk, end="")
|
|
257
|
+
print() # final newline
|
|
195
258
|
|
|
196
259
|
@overload
|
|
197
260
|
@classmethod
|
|
198
|
-
def get(cls, prediction_id: str) ->
|
|
261
|
+
def get(cls, prediction_id: str) -> Self: # type: ignore -- this takes precedence
|
|
199
262
|
pass
|
|
200
263
|
|
|
201
264
|
@overload
|
|
202
265
|
@classmethod
|
|
203
|
-
def get(cls, prediction_id: Iterable[str]) -> list[
|
|
266
|
+
def get(cls, prediction_id: Iterable[str]) -> list[Self]:
|
|
204
267
|
pass
|
|
205
268
|
|
|
206
269
|
@classmethod
|
|
207
|
-
def get(cls, prediction_id: str | Iterable[str]) ->
|
|
270
|
+
def get(cls, prediction_id: str | Iterable[str]) -> Self | list[Self]:
|
|
208
271
|
"""
|
|
209
272
|
Fetch a prediction or predictions
|
|
210
273
|
|
|
@@ -223,6 +286,7 @@ class LabelPrediction:
|
|
|
223
286
|
LabelPrediction({
|
|
224
287
|
label: <positive: 1>,
|
|
225
288
|
confidence: 0.95,
|
|
289
|
+
anomaly_score: 0.1,
|
|
226
290
|
input_value: "I am happy",
|
|
227
291
|
memoryset: "my_memoryset",
|
|
228
292
|
model: "my_model"
|
|
@@ -237,6 +301,7 @@ class LabelPrediction:
|
|
|
237
301
|
LabelPrediction({
|
|
238
302
|
label: <positive: 1>,
|
|
239
303
|
confidence: 0.95,
|
|
304
|
+
anomaly_score: 0.1,
|
|
240
305
|
input_value: "I am happy",
|
|
241
306
|
memoryset: "my_memoryset",
|
|
242
307
|
model: "my_model"
|
|
@@ -244,68 +309,73 @@ class LabelPrediction:
|
|
|
244
309
|
LabelPrediction({
|
|
245
310
|
label: <negative: 0>,
|
|
246
311
|
confidence: 0.05,
|
|
312
|
+
anomaly_score: 0.2,
|
|
247
313
|
input_value: "I am sad",
|
|
248
314
|
memoryset: "my_memoryset", model: "my_model"
|
|
249
315
|
}),
|
|
250
316
|
]
|
|
251
317
|
"""
|
|
252
|
-
|
|
253
|
-
|
|
318
|
+
from .classification_model import ClassificationModel
|
|
319
|
+
from .regression_model import RegressionModel
|
|
320
|
+
|
|
321
|
+
def create_prediction(
|
|
322
|
+
prediction: LabelPredictionWithMemoriesAndFeedback | ScorePredictionWithMemoriesAndFeedback,
|
|
323
|
+
) -> Self:
|
|
324
|
+
|
|
325
|
+
if "label" in prediction:
|
|
326
|
+
memoryset = LabeledMemoryset.open(prediction["memoryset_id"])
|
|
327
|
+
model = ClassificationModel.open(prediction["model_id"])
|
|
328
|
+
else:
|
|
329
|
+
memoryset = ScoredMemoryset.open(prediction["memoryset_id"])
|
|
330
|
+
model = RegressionModel.open(prediction["model_id"])
|
|
331
|
+
|
|
254
332
|
return cls(
|
|
255
|
-
prediction_id=prediction
|
|
256
|
-
label=prediction.label,
|
|
257
|
-
label_name=prediction.label_name,
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
333
|
+
prediction_id=prediction["prediction_id"],
|
|
334
|
+
label=prediction.get("label", None),
|
|
335
|
+
label_name=prediction.get("label_name", None),
|
|
336
|
+
score=prediction.get("score", None),
|
|
337
|
+
confidence=prediction["confidence"],
|
|
338
|
+
anomaly_score=prediction["anomaly_score"],
|
|
339
|
+
memoryset=memoryset,
|
|
340
|
+
model=model,
|
|
261
341
|
telemetry=prediction,
|
|
262
342
|
)
|
|
343
|
+
|
|
344
|
+
if isinstance(prediction_id, str):
|
|
345
|
+
return create_prediction(
|
|
346
|
+
orca_api.GET("/telemetry/prediction/{prediction_id}", params={"prediction_id": prediction_id})
|
|
347
|
+
)
|
|
263
348
|
else:
|
|
264
349
|
return [
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
label=prediction.label,
|
|
268
|
-
label_name=prediction.label_name,
|
|
269
|
-
confidence=prediction.confidence,
|
|
270
|
-
memoryset=prediction.memoryset_id,
|
|
271
|
-
model=prediction.model_id,
|
|
272
|
-
telemetry=prediction,
|
|
273
|
-
)
|
|
274
|
-
for prediction in list_predictions(body=ListPredictionsRequest(prediction_ids=list(prediction_id)))
|
|
350
|
+
create_prediction(prediction)
|
|
351
|
+
for prediction in orca_api.POST("/telemetry/prediction", json={"prediction_ids": list(prediction_id)})
|
|
275
352
|
]
|
|
276
353
|
|
|
277
354
|
def refresh(self):
|
|
278
355
|
"""Refresh the prediction data from the OrcaCloud"""
|
|
279
|
-
self.
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
"""Open a UI to inspect the memories used by this prediction"""
|
|
283
|
-
inspect_prediction_result(self)
|
|
284
|
-
|
|
285
|
-
def update(self, *, expected_label: int | None = UNSET, tags: set[str] | None = UNSET) -> None:
|
|
286
|
-
"""
|
|
287
|
-
Update editable prediction properties.
|
|
288
|
-
|
|
289
|
-
Params:
|
|
290
|
-
expected_label: Value to set for the expected label, defaults to `[UNSET]` if not provided.
|
|
291
|
-
tags: Value to replace existing tags with, defaults to `[UNSET]` if not provided.
|
|
292
|
-
|
|
293
|
-
Examples:
|
|
294
|
-
Update the expected label:
|
|
295
|
-
>>> prediction.update(expected_label=1)
|
|
296
|
-
|
|
297
|
-
Add a new tag:
|
|
298
|
-
>>> prediction.update(tags=prediction.tags | {"new_tag"})
|
|
356
|
+
if self.prediction_id is None:
|
|
357
|
+
raise ValueError("Cannot refresh prediction with no prediction ID")
|
|
358
|
+
self.__dict__.update(self.get(self.prediction_id).__dict__)
|
|
299
359
|
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
)
|
|
360
|
+
def _update(
|
|
361
|
+
self,
|
|
362
|
+
*,
|
|
363
|
+
tags: set[str] | None = UNSET,
|
|
364
|
+
expected_label: int | None = UNSET,
|
|
365
|
+
expected_score: float | None = UNSET,
|
|
366
|
+
) -> None:
|
|
367
|
+
if self.prediction_id is None:
|
|
368
|
+
raise ValueError("Cannot update prediction with no prediction ID")
|
|
369
|
+
|
|
370
|
+
payload: UpdatePredictionRequest = {}
|
|
371
|
+
if tags is not UNSET:
|
|
372
|
+
payload["tags"] = [] if tags is None else list(tags)
|
|
373
|
+
if expected_label is not UNSET:
|
|
374
|
+
payload["expected_label"] = expected_label
|
|
375
|
+
if expected_score is not UNSET:
|
|
376
|
+
payload["expected_score"] = expected_score
|
|
377
|
+
orca_api.PATCH(
|
|
378
|
+
"/telemetry/prediction/{prediction_id}", params={"prediction_id": self.prediction_id}, json=payload
|
|
309
379
|
)
|
|
310
380
|
self.refresh()
|
|
311
381
|
|
|
@@ -316,7 +386,7 @@ class LabelPrediction:
|
|
|
316
386
|
Params:
|
|
317
387
|
tag: Tag to add to the prediction
|
|
318
388
|
"""
|
|
319
|
-
self.
|
|
389
|
+
self._update(tags=self.tags | {tag})
|
|
320
390
|
|
|
321
391
|
def remove_tag(self, tag: str) -> None:
|
|
322
392
|
"""
|
|
@@ -325,7 +395,7 @@ class LabelPrediction:
|
|
|
325
395
|
Params:
|
|
326
396
|
tag: Tag to remove from the prediction
|
|
327
397
|
"""
|
|
328
|
-
self.
|
|
398
|
+
self._update(tags=self.tags - {tag})
|
|
329
399
|
|
|
330
400
|
def record_feedback(
|
|
331
401
|
self,
|
|
@@ -361,12 +431,13 @@ class LabelPrediction:
|
|
|
361
431
|
ValueError: If the value does not match previous value types for the category, or is a
|
|
362
432
|
[`float`][float] that is not between `-1.0` and `+1.0`.
|
|
363
433
|
"""
|
|
364
|
-
|
|
365
|
-
|
|
434
|
+
orca_api.PUT(
|
|
435
|
+
"/telemetry/prediction/feedback",
|
|
436
|
+
json=[
|
|
366
437
|
_parse_feedback(
|
|
367
438
|
{"prediction_id": self.prediction_id, "category": category, "value": value, "comment": comment}
|
|
368
439
|
)
|
|
369
|
-
]
|
|
440
|
+
],
|
|
370
441
|
)
|
|
371
442
|
self.refresh()
|
|
372
443
|
|
|
@@ -380,7 +451,242 @@ class LabelPrediction:
|
|
|
380
451
|
Raises:
|
|
381
452
|
ValueError: If the category is not found.
|
|
382
453
|
"""
|
|
383
|
-
|
|
384
|
-
|
|
454
|
+
if self.prediction_id is None:
|
|
455
|
+
raise ValueError("Cannot delete feedback with no prediction ID")
|
|
456
|
+
|
|
457
|
+
orca_api.PUT(
|
|
458
|
+
"/telemetry/prediction/feedback",
|
|
459
|
+
json=[PredictionFeedbackRequest(prediction_id=self.prediction_id, category_name=category, value=None)],
|
|
385
460
|
)
|
|
386
461
|
self.refresh()
|
|
462
|
+
|
|
463
|
+
def inspect(self) -> None:
|
|
464
|
+
"""
|
|
465
|
+
Display an interactive UI with the details about this prediction
|
|
466
|
+
|
|
467
|
+
Note:
|
|
468
|
+
This method is only available in Jupyter notebooks.
|
|
469
|
+
"""
|
|
470
|
+
from ._utils.prediction_result_ui import inspect_prediction_result
|
|
471
|
+
|
|
472
|
+
inspect_prediction_result(self)
|
|
473
|
+
|
|
474
|
+
|
|
475
|
+
class ClassificationPrediction(PredictionBase):
|
|
476
|
+
"""
|
|
477
|
+
Labeled prediction result from a [`ClassificationModel`][orca_sdk.ClassificationModel]
|
|
478
|
+
|
|
479
|
+
Attributes:
|
|
480
|
+
prediction_id: Unique identifier of this prediction used for feedback
|
|
481
|
+
label: Label predicted by the model
|
|
482
|
+
label_name: Human-readable name of the label
|
|
483
|
+
confidence: Confidence of the prediction
|
|
484
|
+
anomaly_score: Anomaly score of the input
|
|
485
|
+
input_value: The input value used for the prediction
|
|
486
|
+
expected_label: Expected label for the prediction, useful when evaluating the model
|
|
487
|
+
expected_label_name: Human-readable name of the expected label
|
|
488
|
+
memory_lookups: Memories used by the model to make the prediction
|
|
489
|
+
explanation: Natural language explanation of the prediction, only available if the model
|
|
490
|
+
has the Explain API enabled
|
|
491
|
+
tags: Tags for the prediction, useful for filtering and grouping predictions
|
|
492
|
+
model: Model used to make the prediction
|
|
493
|
+
memoryset: Memoryset that was used to lookup memories to ground the prediction
|
|
494
|
+
"""
|
|
495
|
+
|
|
496
|
+
label: int
|
|
497
|
+
label_name: str
|
|
498
|
+
logits: list[float] | None
|
|
499
|
+
model: ClassificationModel
|
|
500
|
+
memoryset: LabeledMemoryset
|
|
501
|
+
|
|
502
|
+
def __repr__(self):
|
|
503
|
+
return (
|
|
504
|
+
"ClassificationPrediction({"
|
|
505
|
+
+ f"label: <{self.label_name}: {self.label}>, "
|
|
506
|
+
+ f"confidence: {self.confidence:.2f}, "
|
|
507
|
+
+ (f"anomaly_score: {self.anomaly_score:.2f}, " if self.anomaly_score is not None else "")
|
|
508
|
+
+ f"input_value: '{str(self.input_value)[:100] + '...' if len(str(self.input_value)) > 100 else self.input_value}'"
|
|
509
|
+
+ "})"
|
|
510
|
+
)
|
|
511
|
+
|
|
512
|
+
@property
|
|
513
|
+
def memory_lookups(self) -> list[LabeledMemoryLookup]:
|
|
514
|
+
assert "label" in self._telemetry
|
|
515
|
+
return [LabeledMemoryLookup(self._telemetry["memoryset_id"], lookup) for lookup in self._telemetry["memories"]]
|
|
516
|
+
|
|
517
|
+
@property
|
|
518
|
+
def expected_label(self) -> int | None:
|
|
519
|
+
assert "label" in self._telemetry
|
|
520
|
+
return self._telemetry["expected_label"]
|
|
521
|
+
|
|
522
|
+
@property
|
|
523
|
+
def expected_label_name(self) -> str | None:
|
|
524
|
+
assert "label" in self._telemetry
|
|
525
|
+
return self._telemetry["expected_label_name"]
|
|
526
|
+
|
|
527
|
+
def update(
|
|
528
|
+
self,
|
|
529
|
+
*,
|
|
530
|
+
tags: set[str] | None = UNSET,
|
|
531
|
+
expected_label: int | None = UNSET,
|
|
532
|
+
) -> None:
|
|
533
|
+
"""
|
|
534
|
+
Update the prediction.
|
|
535
|
+
|
|
536
|
+
Note:
|
|
537
|
+
If a field is not provided, it will default to [UNSET][orca_sdk.UNSET] and not be updated.
|
|
538
|
+
|
|
539
|
+
Params:
|
|
540
|
+
tags: New tags to set for the prediction. Set to `None` to remove all tags.
|
|
541
|
+
expected_label: New expected label to set for the prediction. Set to `None` to remove.
|
|
542
|
+
"""
|
|
543
|
+
self._update(tags=tags, expected_label=expected_label)
|
|
544
|
+
|
|
545
|
+
def recommend_action(self, *, refresh: bool = False) -> tuple[str, str]:
|
|
546
|
+
"""
|
|
547
|
+
Get an action recommendation for improving this prediction.
|
|
548
|
+
|
|
549
|
+
Analyzes the prediction and suggests the most effective action to improve model
|
|
550
|
+
performance, such as adding memories, detecting mislabels, removing duplicates,
|
|
551
|
+
or finetuning.
|
|
552
|
+
|
|
553
|
+
Params:
|
|
554
|
+
refresh: Force the action recommendation agent to re-run even if a recommendation already exists
|
|
555
|
+
|
|
556
|
+
Returns:
|
|
557
|
+
Tuple of (action, rationale) where:
|
|
558
|
+
- action: The recommended action ("add_memories", "detect_mislabels", "remove_duplicates", or "finetuning") that would resolve the mislabeling
|
|
559
|
+
- rationale: Explanation for why this action was recommended
|
|
560
|
+
|
|
561
|
+
Raises:
|
|
562
|
+
ValueError: If the prediction has no prediction ID
|
|
563
|
+
RuntimeError: If the lighthouse API key is not configured
|
|
564
|
+
|
|
565
|
+
Examples:
|
|
566
|
+
Get action recommendation for an incorrect prediction:
|
|
567
|
+
>>> action, rationale = prediction.recommend_action()
|
|
568
|
+
>>> print(f"Recommended action: {action}")
|
|
569
|
+
>>> print(f"Rationale: {rationale}")
|
|
570
|
+
"""
|
|
571
|
+
if self.prediction_id is None:
|
|
572
|
+
raise ValueError("Cannot get action recommendation with no prediction ID")
|
|
573
|
+
|
|
574
|
+
response = orca_api.GET(
|
|
575
|
+
"/telemetry/prediction/{prediction_id}/action",
|
|
576
|
+
params={"prediction_id": self.prediction_id},
|
|
577
|
+
timeout=30,
|
|
578
|
+
)
|
|
579
|
+
return (response["action"], response["rationale"])
|
|
580
|
+
|
|
581
|
+
def generate_memory_suggestions(self, *, num_memories: int = 3) -> AddMemorySuggestions:
|
|
582
|
+
"""
|
|
583
|
+
Generate synthetic memory suggestions to improve this prediction.
|
|
584
|
+
|
|
585
|
+
Creates new example memories that are similar to the input but have clearer
|
|
586
|
+
signals for the expected label. These can be added to the memoryset to improve
|
|
587
|
+
model performance on similar inputs.
|
|
588
|
+
|
|
589
|
+
Params:
|
|
590
|
+
num_memories: Number of memory suggestions to generate (default: 3)
|
|
591
|
+
|
|
592
|
+
Returns:
|
|
593
|
+
List of dictionaries that can be directly passed to memoryset.insert().
|
|
594
|
+
Each dictionary contains:
|
|
595
|
+
- "value": The suggested memory text
|
|
596
|
+
- "label": The suggested label as an integer
|
|
597
|
+
|
|
598
|
+
Raises:
|
|
599
|
+
ValueError: If the prediction has no prediction ID
|
|
600
|
+
RuntimeError: If the lighthouse API key is not configured
|
|
601
|
+
|
|
602
|
+
Examples:
|
|
603
|
+
Generate memory suggestions for an incorrect prediction:
|
|
604
|
+
>>> suggestions = prediction.generate_memory_suggestions(num_memories=3)
|
|
605
|
+
>>> for suggestion in suggestions:
|
|
606
|
+
... print(f"Value: {suggestion['value']}, Label: {suggestion['label']}")
|
|
607
|
+
>>>
|
|
608
|
+
>>> # Add suggestions directly to memoryset
|
|
609
|
+
>>> model.memoryset.insert(suggestions)
|
|
610
|
+
"""
|
|
611
|
+
if self.prediction_id is None:
|
|
612
|
+
raise ValueError("Cannot generate memory suggestions with no prediction ID")
|
|
613
|
+
|
|
614
|
+
response = orca_api.GET(
|
|
615
|
+
"/telemetry/prediction/{prediction_id}/memory_suggestions",
|
|
616
|
+
params={"prediction_id": self.prediction_id, "num_memories": num_memories},
|
|
617
|
+
timeout=30,
|
|
618
|
+
)
|
|
619
|
+
|
|
620
|
+
return AddMemorySuggestions(
|
|
621
|
+
suggestions=[(m["value"], m["label_name"]) for m in response["memories"]],
|
|
622
|
+
memoryset_id=self.memoryset.id,
|
|
623
|
+
model_id=self.model.id,
|
|
624
|
+
prediction_id=self.prediction_id,
|
|
625
|
+
)
|
|
626
|
+
|
|
627
|
+
|
|
628
|
+
class RegressionPrediction(PredictionBase):
|
|
629
|
+
"""
|
|
630
|
+
Score-based prediction result from a [`RegressionModel`][orca_sdk.RegressionModel]
|
|
631
|
+
|
|
632
|
+
Attributes:
|
|
633
|
+
prediction_id: Unique identifier of this prediction used for feedback
|
|
634
|
+
score: Score predicted by the model
|
|
635
|
+
confidence: Confidence of the prediction
|
|
636
|
+
anomaly_score: Anomaly score of the input
|
|
637
|
+
input_value: The input value used for the prediction
|
|
638
|
+
expected_score: Expected score for the prediction, useful when evaluating the model
|
|
639
|
+
memory_lookups: Memories used by the model to make the prediction
|
|
640
|
+
explanation: Natural language explanation of the prediction, only available if the model
|
|
641
|
+
has the Explain API enabled
|
|
642
|
+
tags: Tags for the prediction, useful for filtering and grouping predictions
|
|
643
|
+
model: Model used to make the prediction
|
|
644
|
+
memoryset: Memoryset that was used to lookup memories to ground the prediction
|
|
645
|
+
"""
|
|
646
|
+
|
|
647
|
+
score: float
|
|
648
|
+
model: RegressionModel
|
|
649
|
+
memoryset: ScoredMemoryset
|
|
650
|
+
|
|
651
|
+
def __repr__(self):
|
|
652
|
+
return (
|
|
653
|
+
"RegressionPrediction({"
|
|
654
|
+
+ f"score: {self.score:.2f}, "
|
|
655
|
+
+ f"confidence: {self.confidence:.2f}, "
|
|
656
|
+
+ (f"anomaly_score: {self.anomaly_score:.2f}, " if self.anomaly_score is not None else "")
|
|
657
|
+
+ f"input_value: '{str(self.input_value)[:100] + '...' if len(str(self.input_value)) > 100 else self.input_value}'"
|
|
658
|
+
+ "})"
|
|
659
|
+
)
|
|
660
|
+
|
|
661
|
+
@property
|
|
662
|
+
def memory_lookups(self) -> list[ScoredMemoryLookup]:
|
|
663
|
+
assert "score" in self._telemetry
|
|
664
|
+
return [ScoredMemoryLookup(self._telemetry["memoryset_id"], lookup) for lookup in self._telemetry["memories"]]
|
|
665
|
+
|
|
666
|
+
@property
|
|
667
|
+
def explanation(self) -> str:
|
|
668
|
+
"""The explanation for this prediction. Requires `lighthouse_client_api_key` to be set."""
|
|
669
|
+
raise NotImplementedError("Explanation is not supported for regression predictions")
|
|
670
|
+
|
|
671
|
+
@property
|
|
672
|
+
def expected_score(self) -> float | None:
|
|
673
|
+
assert "score" in self._telemetry
|
|
674
|
+
return self._telemetry["expected_score"]
|
|
675
|
+
|
|
676
|
+
def update(
|
|
677
|
+
self,
|
|
678
|
+
*,
|
|
679
|
+
tags: set[str] | None = UNSET,
|
|
680
|
+
expected_score: float | None = UNSET,
|
|
681
|
+
) -> None:
|
|
682
|
+
"""
|
|
683
|
+
Update the prediction.
|
|
684
|
+
|
|
685
|
+
Note:
|
|
686
|
+
If a field is not provided, it will default to [UNSET][orca_sdk.UNSET] and not be updated.
|
|
687
|
+
|
|
688
|
+
Params:
|
|
689
|
+
tags: New tags to set for the prediction. Set to `None` to remove all tags.
|
|
690
|
+
expected_score: New expected score to set for the prediction. Set to `None` to remove.
|
|
691
|
+
"""
|
|
692
|
+
self._update(tags=tags, expected_score=expected_score)
|