arthur-client 1.4.1249__py3-none-any.whl → 1.4.1251__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. arthur_client/api_bindings/__init__.py +5 -0
  2. arthur_client/api_bindings/docs/CreateModelTaskJobSpec.md +2 -0
  3. arthur_client/api_bindings/docs/JobSpec.md +2 -0
  4. arthur_client/api_bindings/docs/MetricResponse.md +36 -0
  5. arthur_client/api_bindings/docs/MetricType.md +14 -0
  6. arthur_client/api_bindings/docs/ModelProblemType.md +2 -0
  7. arthur_client/api_bindings/docs/NewMetricRequest.md +32 -0
  8. arthur_client/api_bindings/docs/RelevanceMetricConfig.md +31 -0
  9. arthur_client/api_bindings/docs/TaskResponse.md +2 -0
  10. arthur_client/api_bindings/docs/TaskType.md +12 -0
  11. arthur_client/api_bindings/models/__init__.py +5 -0
  12. arthur_client/api_bindings/models/create_model_task_job_spec.py +15 -2
  13. arthur_client/api_bindings/models/metric_response.py +113 -0
  14. arthur_client/api_bindings/models/metric_type.py +38 -0
  15. arthur_client/api_bindings/models/model_problem_type.py +1 -0
  16. arthur_client/api_bindings/models/new_metric_request.py +103 -0
  17. arthur_client/api_bindings/models/relevance_metric_config.py +94 -0
  18. arthur_client/api_bindings/models/task_response.py +21 -4
  19. arthur_client/api_bindings/models/task_type.py +37 -0
  20. arthur_client/api_bindings/test/test_create_model_task_job_spec.py +19 -0
  21. arthur_client/api_bindings/test/test_job_spec.py +19 -0
  22. arthur_client/api_bindings/test/test_metric_response.py +64 -0
  23. arthur_client/api_bindings/test/test_metric_type.py +33 -0
  24. arthur_client/api_bindings/test/test_new_metric_request.py +59 -0
  25. arthur_client/api_bindings/test/test_put_task_state_cache_request.py +22 -0
  26. arthur_client/api_bindings/test/test_relevance_metric_config.py +52 -0
  27. arthur_client/api_bindings/test/test_task_read_response.py +11 -0
  28. arthur_client/api_bindings/test/test_task_response.py +13 -0
  29. arthur_client/api_bindings/test/test_task_type.py +33 -0
  30. arthur_client/api_bindings_README.md +5 -0
  31. {arthur_client-1.4.1249.dist-info → arthur_client-1.4.1251.dist-info}/METADATA +1 -1
  32. {arthur_client-1.4.1249.dist-info → arthur_client-1.4.1251.dist-info}/RECORD +33 -18
  33. {arthur_client-1.4.1249.dist-info → arthur_client-1.4.1251.dist-info}/WHEEL +0 -0
@@ -174,6 +174,8 @@ from arthur_client.api_bindings.models.joined_dataset import JoinedDataset
174
174
  from arthur_client.api_bindings.models.keywords_config import KeywordsConfig
175
175
  from arthur_client.api_bindings.models.list_datasets_job_spec import ListDatasetsJobSpec
176
176
  from arthur_client.api_bindings.models.list_type import ListType
177
+ from arthur_client.api_bindings.models.metric_response import MetricResponse
178
+ from arthur_client.api_bindings.models.metric_type import MetricType
177
179
  from arthur_client.api_bindings.models.metrics_arg_spec import MetricsArgSpec
178
180
  from arthur_client.api_bindings.models.metrics_calculation_job_spec import MetricsCalculationJobSpec
179
181
  from arthur_client.api_bindings.models.metrics_column_list_parameter_schema import MetricsColumnListParameterSchema
@@ -192,6 +194,7 @@ from arthur_client.api_bindings.models.model_metric_spec import ModelMetricSpec
192
194
  from arthur_client.api_bindings.models.model_metrics_schedule import ModelMetricsSchedule
193
195
  from arthur_client.api_bindings.models.model_problem_type import ModelProblemType
194
196
  from arthur_client.api_bindings.models.models_sort import ModelsSort
197
+ from arthur_client.api_bindings.models.new_metric_request import NewMetricRequest
195
198
  from arthur_client.api_bindings.models.new_rule_request import NewRuleRequest
196
199
  from arthur_client.api_bindings.models.not_found_error import NotFoundError
197
200
  from arthur_client.api_bindings.models.numeric_metric import NumericMetric
@@ -276,6 +279,7 @@ from arthur_client.api_bindings.models.put_task_state_cache_request import PutTa
276
279
  from arthur_client.api_bindings.models.regenerate_task_validation_key_job_spec import RegenerateTaskValidationKeyJobSpec
277
280
  from arthur_client.api_bindings.models.regex_config import RegexConfig
278
281
  from arthur_client.api_bindings.models.register_user import RegisterUser
282
+ from arthur_client.api_bindings.models.relevance_metric_config import RelevanceMetricConfig
279
283
  from arthur_client.api_bindings.models.reported_custom_aggregation import ReportedCustomAggregation
280
284
  from arthur_client.api_bindings.models.resource_kind import ResourceKind
281
285
  from arthur_client.api_bindings.models.resource_list_aggregation_spec_schema import ResourceListAggregationSpecSchema
@@ -323,6 +327,7 @@ from arthur_client.api_bindings.models.task_connection_info import TaskConnectio
323
327
  from arthur_client.api_bindings.models.task_mutation_response import TaskMutationResponse
324
328
  from arthur_client.api_bindings.models.task_read_response import TaskReadResponse
325
329
  from arthur_client.api_bindings.models.task_response import TaskResponse
330
+ from arthur_client.api_bindings.models.task_type import TaskType
326
331
  from arthur_client.api_bindings.models.task_validation_api_key import TaskValidationAPIKey
327
332
  from arthur_client.api_bindings.models.task_validation_key_regeneration_response import TaskValidationKeyRegenerationResponse
328
333
  from arthur_client.api_bindings.models.toxicity_config import ToxicityConfig
@@ -10,6 +10,8 @@ Name | Type | Description | Notes
10
10
  **task_name** | **str** | The name of the task. |
11
11
  **onboarding_identifier** | **str** | | [optional]
12
12
  **initial_rules** | [**List[NewRuleRequest]**](NewRuleRequest.md) | The initial rules to apply to the created model. |
13
+ **task_type** | [**TaskType**](TaskType.md) | The type of task to create. | [optional]
14
+ **initial_metrics** | [**List[NewMetricRequest]**](NewMetricRequest.md) | The initial metrics to apply to agentic tasks. |
13
15
 
14
16
  ## Example
15
17
 
@@ -21,6 +21,8 @@ Name | Type | Description | Notes
21
21
  **task_name** | **str** | The name of the task. |
22
22
  **onboarding_identifier** | **str** | | [optional]
23
23
  **initial_rules** | [**List[NewRuleRequest]**](NewRuleRequest.md) | The initial rules to apply to the created model. |
24
+ **task_type** | [**TaskType**](TaskType.md) | The type of task to create. | [optional]
25
+ **initial_metrics** | [**List[NewMetricRequest]**](NewMetricRequest.md) | The initial metrics to apply to agentic tasks. |
24
26
  **rules_to_enable** | **List[str]** | The list of rule IDs to enable on the task. | [optional]
25
27
  **rules_to_disable** | **List[str]** | The list of rule IDs to disable on the task. | [optional]
26
28
  **rules_to_archive** | **List[str]** | The list of rule IDs to archive on the task. | [optional]
@@ -0,0 +1,36 @@
1
+ # MetricResponse
2
+
3
+
4
+ ## Properties
5
+
6
+ Name | Type | Description | Notes
7
+ ------------ | ------------- | ------------- | -------------
8
+ **id** | **str** | ID of the Metric |
9
+ **name** | **str** | Name of the Metric |
10
+ **type** | [**MetricType**](MetricType.md) | Type of the Metric |
11
+ **metric_metadata** | **str** | Metadata of the Metric |
12
+ **config** | **str** | | [optional]
13
+ **created_at** | **datetime** | Time the Metric was created in unix milliseconds |
14
+ **updated_at** | **datetime** | Time the Metric was updated in unix milliseconds |
15
+ **enabled** | **bool** | | [optional]
16
+
17
+ ## Example
18
+
19
+ ```python
20
+ from arthur_client.api_bindings.models.metric_response import MetricResponse
21
+
22
+ # TODO update the JSON string below
23
+ json = "{}"
24
+ # create an instance of MetricResponse from a JSON string
25
+ metric_response_instance = MetricResponse.from_json(json)
26
+ # print the JSON string representation of the object
27
+ print(MetricResponse.to_json())
28
+
29
+ # convert the object into a dict
30
+ metric_response_dict = metric_response_instance.to_dict()
31
+ # create an instance of MetricResponse from a dict
32
+ metric_response_from_dict = MetricResponse.from_dict(metric_response_dict)
33
+ ```
34
+ [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
35
+
36
+
@@ -0,0 +1,14 @@
1
+ # MetricType
2
+
3
+
4
+ ## Enum
5
+
6
+ * `QUERYRELEVANCE` (value: `'QueryRelevance'`)
7
+
8
+ * `RESPONSERELEVANCE` (value: `'ResponseRelevance'`)
9
+
10
+ * `TOOLSELECTION` (value: `'ToolSelection'`)
11
+
12
+ [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
13
+
14
+
@@ -13,6 +13,8 @@
13
13
 
14
14
  * `MULTICLASS_CLASSIFICATION` (value: `'multiclass_classification'`)
15
15
 
16
+ * `AGENTIC_TRACE` (value: `'agentic_trace'`)
17
+
16
18
  [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
17
19
 
18
20
 
@@ -0,0 +1,32 @@
1
+ # NewMetricRequest
2
+
3
+
4
+ ## Properties
5
+
6
+ Name | Type | Description | Notes
7
+ ------------ | ------------- | ------------- | -------------
8
+ **type** | [**MetricType**](MetricType.md) | Type of the metric. It can only be one of QueryRelevance, ResponseRelevance, ToolSelection |
9
+ **name** | **str** | Name of metric |
10
+ **metric_metadata** | **str** | Additional metadata for the metric |
11
+ **config** | [**RelevanceMetricConfig**](RelevanceMetricConfig.md) | | [optional]
12
+
13
+ ## Example
14
+
15
+ ```python
16
+ from arthur_client.api_bindings.models.new_metric_request import NewMetricRequest
17
+
18
+ # TODO update the JSON string below
19
+ json = "{}"
20
+ # create an instance of NewMetricRequest from a JSON string
21
+ new_metric_request_instance = NewMetricRequest.from_json(json)
22
+ # print the JSON string representation of the object
23
+ print(NewMetricRequest.to_json())
24
+
25
+ # convert the object into a dict
26
+ new_metric_request_dict = new_metric_request_instance.to_dict()
27
+ # create an instance of NewMetricRequest from a dict
28
+ new_metric_request_from_dict = NewMetricRequest.from_dict(new_metric_request_dict)
29
+ ```
30
+ [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
31
+
32
+
@@ -0,0 +1,31 @@
1
+ # RelevanceMetricConfig
2
+
3
+ Configuration for relevance metrics including QueryRelevance and ResponseRelevance
4
+
5
+ ## Properties
6
+
7
+ Name | Type | Description | Notes
8
+ ------------ | ------------- | ------------- | -------------
9
+ **relevance_threshold** | **float** | | [optional]
10
+ **use_llm_judge** | **bool** | Whether to use LLM as a judge for relevance scoring | [optional] [default to True]
11
+
12
+ ## Example
13
+
14
+ ```python
15
+ from arthur_client.api_bindings.models.relevance_metric_config import RelevanceMetricConfig
16
+
17
+ # TODO update the JSON string below
18
+ json = "{}"
19
+ # create an instance of RelevanceMetricConfig from a JSON string
20
+ relevance_metric_config_instance = RelevanceMetricConfig.from_json(json)
21
+ # print the JSON string representation of the object
22
+ print(RelevanceMetricConfig.to_json())
23
+
24
+ # convert the object into a dict
25
+ relevance_metric_config_dict = relevance_metric_config_instance.to_dict()
26
+ # create an instance of RelevanceMetricConfig from a dict
27
+ relevance_metric_config_from_dict = RelevanceMetricConfig.from_dict(relevance_metric_config_dict)
28
+ ```
29
+ [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
30
+
31
+
@@ -9,7 +9,9 @@ Name | Type | Description | Notes
9
9
  **name** | **str** | Name of the task |
10
10
  **created_at** | **int** | Time the task was created in unix milliseconds |
11
11
  **updated_at** | **int** | Time the task was created in unix milliseconds |
12
+ **is_agentic** | **bool** | Whether the task is agentic or not |
12
13
  **rules** | [**List[RuleResponse]**](RuleResponse.md) | List of all the rules for the task. |
14
+ **metrics** | [**List[MetricResponse]**](MetricResponse.md) | | [optional]
13
15
 
14
16
  ## Example
15
17
 
@@ -0,0 +1,12 @@
1
+ # TaskType
2
+
3
+
4
+ ## Enum
5
+
6
+ * `TRADITIONAL` (value: `'traditional'`)
7
+
8
+ * `AGENTIC` (value: `'agentic'`)
9
+
10
+ [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
11
+
12
+
@@ -131,6 +131,8 @@ from arthur_client.api_bindings.models.joined_dataset import JoinedDataset
131
131
  from arthur_client.api_bindings.models.keywords_config import KeywordsConfig
132
132
  from arthur_client.api_bindings.models.list_datasets_job_spec import ListDatasetsJobSpec
133
133
  from arthur_client.api_bindings.models.list_type import ListType
134
+ from arthur_client.api_bindings.models.metric_response import MetricResponse
135
+ from arthur_client.api_bindings.models.metric_type import MetricType
134
136
  from arthur_client.api_bindings.models.metrics_arg_spec import MetricsArgSpec
135
137
  from arthur_client.api_bindings.models.metrics_calculation_job_spec import MetricsCalculationJobSpec
136
138
  from arthur_client.api_bindings.models.metrics_column_list_parameter_schema import MetricsColumnListParameterSchema
@@ -149,6 +151,7 @@ from arthur_client.api_bindings.models.model_metric_spec import ModelMetricSpec
149
151
  from arthur_client.api_bindings.models.model_metrics_schedule import ModelMetricsSchedule
150
152
  from arthur_client.api_bindings.models.model_problem_type import ModelProblemType
151
153
  from arthur_client.api_bindings.models.models_sort import ModelsSort
154
+ from arthur_client.api_bindings.models.new_metric_request import NewMetricRequest
152
155
  from arthur_client.api_bindings.models.new_rule_request import NewRuleRequest
153
156
  from arthur_client.api_bindings.models.not_found_error import NotFoundError
154
157
  from arthur_client.api_bindings.models.numeric_metric import NumericMetric
@@ -233,6 +236,7 @@ from arthur_client.api_bindings.models.put_task_state_cache_request import PutTa
233
236
  from arthur_client.api_bindings.models.regenerate_task_validation_key_job_spec import RegenerateTaskValidationKeyJobSpec
234
237
  from arthur_client.api_bindings.models.regex_config import RegexConfig
235
238
  from arthur_client.api_bindings.models.register_user import RegisterUser
239
+ from arthur_client.api_bindings.models.relevance_metric_config import RelevanceMetricConfig
236
240
  from arthur_client.api_bindings.models.reported_custom_aggregation import ReportedCustomAggregation
237
241
  from arthur_client.api_bindings.models.resource_kind import ResourceKind
238
242
  from arthur_client.api_bindings.models.resource_list_aggregation_spec_schema import ResourceListAggregationSpecSchema
@@ -280,6 +284,7 @@ from arthur_client.api_bindings.models.task_connection_info import TaskConnectio
280
284
  from arthur_client.api_bindings.models.task_mutation_response import TaskMutationResponse
281
285
  from arthur_client.api_bindings.models.task_read_response import TaskReadResponse
282
286
  from arthur_client.api_bindings.models.task_response import TaskResponse
287
+ from arthur_client.api_bindings.models.task_type import TaskType
283
288
  from arthur_client.api_bindings.models.task_validation_api_key import TaskValidationAPIKey
284
289
  from arthur_client.api_bindings.models.task_validation_key_regeneration_response import TaskValidationKeyRegenerationResponse
285
290
  from arthur_client.api_bindings.models.toxicity_config import ToxicityConfig
@@ -19,7 +19,9 @@ import json
19
19
 
20
20
  from pydantic import BaseModel, ConfigDict, Field, StrictStr, field_validator
21
21
  from typing import Any, ClassVar, Dict, List, Optional
22
+ from arthur_client.api_bindings.models.new_metric_request import NewMetricRequest
22
23
  from arthur_client.api_bindings.models.new_rule_request import NewRuleRequest
24
+ from arthur_client.api_bindings.models.task_type import TaskType
23
25
  from typing import Optional, Set
24
26
  from typing_extensions import Self
25
27
 
@@ -32,7 +34,9 @@ class CreateModelTaskJobSpec(BaseModel):
32
34
  task_name: StrictStr = Field(description="The name of the task.")
33
35
  onboarding_identifier: Optional[StrictStr] = None
34
36
  initial_rules: List[NewRuleRequest] = Field(description="The initial rules to apply to the created model.")
35
- __properties: ClassVar[List[str]] = ["job_type", "connector_id", "task_name", "onboarding_identifier", "initial_rules"]
37
+ task_type: Optional[TaskType] = Field(default=None, description="The type of task to create.")
38
+ initial_metrics: List[NewMetricRequest] = Field(description="The initial metrics to apply to agentic tasks.")
39
+ __properties: ClassVar[List[str]] = ["job_type", "connector_id", "task_name", "onboarding_identifier", "initial_rules", "task_type", "initial_metrics"]
36
40
 
37
41
  @field_validator('job_type')
38
42
  def job_type_validate_enum(cls, value):
@@ -90,6 +94,13 @@ class CreateModelTaskJobSpec(BaseModel):
90
94
  if _item_initial_rules:
91
95
  _items.append(_item_initial_rules.to_dict())
92
96
  _dict['initial_rules'] = _items
97
+ # override the default output from pydantic by calling `to_dict()` of each item in initial_metrics (list)
98
+ _items = []
99
+ if self.initial_metrics:
100
+ for _item_initial_metrics in self.initial_metrics:
101
+ if _item_initial_metrics:
102
+ _items.append(_item_initial_metrics.to_dict())
103
+ _dict['initial_metrics'] = _items
93
104
  # set to None if onboarding_identifier (nullable) is None
94
105
  # and model_fields_set contains the field
95
106
  if self.onboarding_identifier is None and "onboarding_identifier" in self.model_fields_set:
@@ -111,7 +122,9 @@ class CreateModelTaskJobSpec(BaseModel):
111
122
  "connector_id": obj.get("connector_id"),
112
123
  "task_name": obj.get("task_name"),
113
124
  "onboarding_identifier": obj.get("onboarding_identifier"),
114
- "initial_rules": [NewRuleRequest.from_dict(_item) for _item in obj["initial_rules"]] if obj.get("initial_rules") is not None else None
125
+ "initial_rules": [NewRuleRequest.from_dict(_item) for _item in obj["initial_rules"]] if obj.get("initial_rules") is not None else None,
126
+ "task_type": obj.get("task_type"),
127
+ "initial_metrics": [NewMetricRequest.from_dict(_item) for _item in obj["initial_metrics"]] if obj.get("initial_metrics") is not None else None
115
128
  })
116
129
  return _obj
117
130
 
@@ -0,0 +1,113 @@
1
+ # coding: utf-8
2
+
3
+ """
4
+ Arthur Scope
5
+
6
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
+
8
+ The version of the OpenAPI document: 0.1.0
9
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
10
+
11
+ Do not edit the class manually.
12
+ """ # noqa: E501
13
+
14
+
15
+ from __future__ import annotations
16
+ import pprint
17
+ import re # noqa: F401
18
+ import json
19
+
20
+ from datetime import datetime
21
+ from pydantic import BaseModel, ConfigDict, Field, StrictBool, StrictStr
22
+ from typing import Any, ClassVar, Dict, List, Optional
23
+ from arthur_client.api_bindings.models.metric_type import MetricType
24
+ from typing import Optional, Set
25
+ from typing_extensions import Self
26
+
27
+ class MetricResponse(BaseModel):
28
+ """
29
+ MetricResponse
30
+ """ # noqa: E501
31
+ id: StrictStr = Field(description="ID of the Metric")
32
+ name: StrictStr = Field(description="Name of the Metric")
33
+ type: MetricType = Field(description="Type of the Metric")
34
+ metric_metadata: StrictStr = Field(description="Metadata of the Metric")
35
+ config: Optional[StrictStr] = None
36
+ created_at: datetime = Field(description="Time the Metric was created in unix milliseconds")
37
+ updated_at: datetime = Field(description="Time the Metric was updated in unix milliseconds")
38
+ enabled: Optional[StrictBool] = None
39
+ __properties: ClassVar[List[str]] = ["id", "name", "type", "metric_metadata", "config", "created_at", "updated_at", "enabled"]
40
+
41
+ model_config = ConfigDict(
42
+ populate_by_name=True,
43
+ validate_assignment=True,
44
+ protected_namespaces=(),
45
+ )
46
+
47
+
48
+ def to_str(self) -> str:
49
+ """Returns the string representation of the model using alias"""
50
+ return pprint.pformat(self.model_dump(by_alias=True))
51
+
52
+ def to_json(self) -> str:
53
+ """Returns the JSON representation of the model using alias"""
54
+ # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
55
+ return json.dumps(self.to_dict())
56
+
57
+ @classmethod
58
+ def from_json(cls, json_str: str) -> Optional[Self]:
59
+ """Create an instance of MetricResponse from a JSON string"""
60
+ return cls.from_dict(json.loads(json_str))
61
+
62
+ def to_dict(self) -> Dict[str, Any]:
63
+ """Return the dictionary representation of the model using alias.
64
+
65
+ This has the following differences from calling pydantic's
66
+ `self.model_dump(by_alias=True)`:
67
+
68
+ * `None` is only added to the output dict for nullable fields that
69
+ were set at model initialization. Other fields with value `None`
70
+ are ignored.
71
+ """
72
+ excluded_fields: Set[str] = set([
73
+ ])
74
+
75
+ _dict = self.model_dump(
76
+ by_alias=True,
77
+ exclude=excluded_fields,
78
+ exclude_none=True,
79
+ )
80
+ # set to None if config (nullable) is None
81
+ # and model_fields_set contains the field
82
+ if self.config is None and "config" in self.model_fields_set:
83
+ _dict['config'] = None
84
+
85
+ # set to None if enabled (nullable) is None
86
+ # and model_fields_set contains the field
87
+ if self.enabled is None and "enabled" in self.model_fields_set:
88
+ _dict['enabled'] = None
89
+
90
+ return _dict
91
+
92
+ @classmethod
93
+ def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
94
+ """Create an instance of MetricResponse from a dict"""
95
+ if obj is None:
96
+ return None
97
+
98
+ if not isinstance(obj, dict):
99
+ return cls.model_validate(obj)
100
+
101
+ _obj = cls.model_validate({
102
+ "id": obj.get("id"),
103
+ "name": obj.get("name"),
104
+ "type": obj.get("type"),
105
+ "metric_metadata": obj.get("metric_metadata"),
106
+ "config": obj.get("config"),
107
+ "created_at": obj.get("created_at"),
108
+ "updated_at": obj.get("updated_at"),
109
+ "enabled": obj.get("enabled")
110
+ })
111
+ return _obj
112
+
113
+
@@ -0,0 +1,38 @@
1
+ # coding: utf-8
2
+
3
+ """
4
+ Arthur Scope
5
+
6
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
+
8
+ The version of the OpenAPI document: 0.1.0
9
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
10
+
11
+ Do not edit the class manually.
12
+ """ # noqa: E501
13
+
14
+
15
+ from __future__ import annotations
16
+ import json
17
+ from enum import Enum
18
+ from typing_extensions import Self
19
+
20
+
21
+ class MetricType(str, Enum):
22
+ """
23
+ MetricType
24
+ """
25
+
26
+ """
27
+ allowed enum values
28
+ """
29
+ QUERYRELEVANCE = 'QueryRelevance'
30
+ RESPONSERELEVANCE = 'ResponseRelevance'
31
+ TOOLSELECTION = 'ToolSelection'
32
+
33
+ @classmethod
34
+ def from_json(cls, json_str: str) -> Self:
35
+ """Create an instance of MetricType from a JSON string"""
36
+ return cls(json.loads(json_str))
37
+
38
+
@@ -31,6 +31,7 @@ class ModelProblemType(str, Enum):
31
31
  ARTHUR_SHIELD = 'arthur_shield'
32
32
  CUSTOM = 'custom'
33
33
  MULTICLASS_CLASSIFICATION = 'multiclass_classification'
34
+ AGENTIC_TRACE = 'agentic_trace'
34
35
 
35
36
  @classmethod
36
37
  def from_json(cls, json_str: str) -> Self:
@@ -0,0 +1,103 @@
1
+ # coding: utf-8
2
+
3
+ """
4
+ Arthur Scope
5
+
6
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
+
8
+ The version of the OpenAPI document: 0.1.0
9
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
10
+
11
+ Do not edit the class manually.
12
+ """ # noqa: E501
13
+
14
+
15
+ from __future__ import annotations
16
+ import pprint
17
+ import re # noqa: F401
18
+ import json
19
+
20
+ from pydantic import BaseModel, ConfigDict, Field, StrictStr
21
+ from typing import Any, ClassVar, Dict, List, Optional
22
+ from arthur_client.api_bindings.models.metric_type import MetricType
23
+ from arthur_client.api_bindings.models.relevance_metric_config import RelevanceMetricConfig
24
+ from typing import Optional, Set
25
+ from typing_extensions import Self
26
+
27
+ class NewMetricRequest(BaseModel):
28
+ """
29
+ NewMetricRequest
30
+ """ # noqa: E501
31
+ type: MetricType = Field(description="Type of the metric. It can only be one of QueryRelevance, ResponseRelevance, ToolSelection")
32
+ name: StrictStr = Field(description="Name of metric")
33
+ metric_metadata: StrictStr = Field(description="Additional metadata for the metric")
34
+ config: Optional[RelevanceMetricConfig] = None
35
+ __properties: ClassVar[List[str]] = ["type", "name", "metric_metadata", "config"]
36
+
37
+ model_config = ConfigDict(
38
+ populate_by_name=True,
39
+ validate_assignment=True,
40
+ protected_namespaces=(),
41
+ )
42
+
43
+
44
+ def to_str(self) -> str:
45
+ """Returns the string representation of the model using alias"""
46
+ return pprint.pformat(self.model_dump(by_alias=True))
47
+
48
+ def to_json(self) -> str:
49
+ """Returns the JSON representation of the model using alias"""
50
+ # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
51
+ return json.dumps(self.to_dict())
52
+
53
+ @classmethod
54
+ def from_json(cls, json_str: str) -> Optional[Self]:
55
+ """Create an instance of NewMetricRequest from a JSON string"""
56
+ return cls.from_dict(json.loads(json_str))
57
+
58
+ def to_dict(self) -> Dict[str, Any]:
59
+ """Return the dictionary representation of the model using alias.
60
+
61
+ This has the following differences from calling pydantic's
62
+ `self.model_dump(by_alias=True)`:
63
+
64
+ * `None` is only added to the output dict for nullable fields that
65
+ were set at model initialization. Other fields with value `None`
66
+ are ignored.
67
+ """
68
+ excluded_fields: Set[str] = set([
69
+ ])
70
+
71
+ _dict = self.model_dump(
72
+ by_alias=True,
73
+ exclude=excluded_fields,
74
+ exclude_none=True,
75
+ )
76
+ # override the default output from pydantic by calling `to_dict()` of config
77
+ if self.config:
78
+ _dict['config'] = self.config.to_dict()
79
+ # set to None if config (nullable) is None
80
+ # and model_fields_set contains the field
81
+ if self.config is None and "config" in self.model_fields_set:
82
+ _dict['config'] = None
83
+
84
+ return _dict
85
+
86
+ @classmethod
87
+ def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
88
+ """Create an instance of NewMetricRequest from a dict"""
89
+ if obj is None:
90
+ return None
91
+
92
+ if not isinstance(obj, dict):
93
+ return cls.model_validate(obj)
94
+
95
+ _obj = cls.model_validate({
96
+ "type": obj.get("type"),
97
+ "name": obj.get("name"),
98
+ "metric_metadata": obj.get("metric_metadata"),
99
+ "config": RelevanceMetricConfig.from_dict(obj["config"]) if obj.get("config") is not None else None
100
+ })
101
+ return _obj
102
+
103
+
@@ -0,0 +1,94 @@
1
+ # coding: utf-8
2
+
3
+ """
4
+ Arthur Scope
5
+
6
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
+
8
+ The version of the OpenAPI document: 0.1.0
9
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
10
+
11
+ Do not edit the class manually.
12
+ """ # noqa: E501
13
+
14
+
15
+ from __future__ import annotations
16
+ import pprint
17
+ import re # noqa: F401
18
+ import json
19
+
20
+ from pydantic import BaseModel, ConfigDict, Field, StrictBool, StrictFloat, StrictInt
21
+ from typing import Any, ClassVar, Dict, List, Optional, Union
22
+ from typing import Optional, Set
23
+ from typing_extensions import Self
24
+
25
+ class RelevanceMetricConfig(BaseModel):
26
+ """
27
+ Configuration for relevance metrics including QueryRelevance and ResponseRelevance
28
+ """ # noqa: E501
29
+ relevance_threshold: Optional[Union[StrictFloat, StrictInt]] = None
30
+ use_llm_judge: Optional[StrictBool] = Field(default=True, description="Whether to use LLM as a judge for relevance scoring")
31
+ __properties: ClassVar[List[str]] = ["relevance_threshold", "use_llm_judge"]
32
+
33
+ model_config = ConfigDict(
34
+ populate_by_name=True,
35
+ validate_assignment=True,
36
+ protected_namespaces=(),
37
+ )
38
+
39
+
40
+ def to_str(self) -> str:
41
+ """Returns the string representation of the model using alias"""
42
+ return pprint.pformat(self.model_dump(by_alias=True))
43
+
44
+ def to_json(self) -> str:
45
+ """Returns the JSON representation of the model using alias"""
46
+ # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
47
+ return json.dumps(self.to_dict())
48
+
49
+ @classmethod
50
+ def from_json(cls, json_str: str) -> Optional[Self]:
51
+ """Create an instance of RelevanceMetricConfig from a JSON string"""
52
+ return cls.from_dict(json.loads(json_str))
53
+
54
+ def to_dict(self) -> Dict[str, Any]:
55
+ """Return the dictionary representation of the model using alias.
56
+
57
+ This has the following differences from calling pydantic's
58
+ `self.model_dump(by_alias=True)`:
59
+
60
+ * `None` is only added to the output dict for nullable fields that
61
+ were set at model initialization. Other fields with value `None`
62
+ are ignored.
63
+ """
64
+ excluded_fields: Set[str] = set([
65
+ ])
66
+
67
+ _dict = self.model_dump(
68
+ by_alias=True,
69
+ exclude=excluded_fields,
70
+ exclude_none=True,
71
+ )
72
+ # set to None if relevance_threshold (nullable) is None
73
+ # and model_fields_set contains the field
74
+ if self.relevance_threshold is None and "relevance_threshold" in self.model_fields_set:
75
+ _dict['relevance_threshold'] = None
76
+
77
+ return _dict
78
+
79
+ @classmethod
80
+ def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
81
+ """Create an instance of RelevanceMetricConfig from a dict"""
82
+ if obj is None:
83
+ return None
84
+
85
+ if not isinstance(obj, dict):
86
+ return cls.model_validate(obj)
87
+
88
+ _obj = cls.model_validate({
89
+ "relevance_threshold": obj.get("relevance_threshold"),
90
+ "use_llm_judge": obj.get("use_llm_judge") if obj.get("use_llm_judge") is not None else True
91
+ })
92
+ return _obj
93
+
94
+