arthur-client 0.4.1__tar.gz → 0.6.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {arthur-client-0.4.1 → arthur-client-0.6.0}/PKG-INFO +1 -1
- {arthur-client-0.4.1 → arthur-client-0.6.0}/arthur/client/rest/admin/models.py +1 -1
- {arthur-client-0.4.1 → arthur-client-0.6.0}/arthur/client/rest/alerts/models.py +1 -1
- {arthur-client-0.4.1 → arthur-client-0.6.0}/arthur/client/rest/bench/models.py +1 -1
- {arthur-client-0.4.1 → arthur-client-0.6.0}/arthur/client/rest/common/models.py +1 -1
- {arthur-client-0.4.1 → arthur-client-0.6.0}/arthur/client/rest/enrichments/client.py +18 -38
- {arthur-client-0.4.1 → arthur-client-0.6.0}/arthur/client/rest/enrichments/models.py +178 -182
- {arthur-client-0.4.1 → arthur-client-0.6.0}/arthur/client/rest/inferences/models.py +1 -1
- {arthur-client-0.4.1 → arthur-client-0.6.0}/arthur/client/rest/insights/models.py +1 -1
- {arthur-client-0.4.1 → arthur-client-0.6.0}/arthur/client/rest/metrics/models.py +2 -6
- {arthur-client-0.4.1 → arthur-client-0.6.0}/arthur/client/rest/model_groups/models.py +1 -1
- {arthur-client-0.4.1 → arthur-client-0.6.0}/arthur/client/rest/models/models.py +1 -1
- {arthur-client-0.4.1 → arthur-client-0.6.0}/arthur/client/rest/query/models.py +1 -1
- {arthur-client-0.4.1 → arthur-client-0.6.0}/arthur/client/version.py +1 -1
- {arthur-client-0.4.1 → arthur-client-0.6.0}/arthur_client.egg-info/PKG-INFO +1 -1
- {arthur-client-0.4.1 → arthur-client-0.6.0}/LICENSE +0 -0
- {arthur-client-0.4.1 → arthur-client-0.6.0}/README.md +0 -0
- {arthur-client-0.4.1 → arthur-client-0.6.0}/arthur/client/__init__.py +0 -0
- {arthur-client-0.4.1 → arthur-client-0.6.0}/arthur/client/auth/__init__.py +0 -0
- {arthur-client-0.4.1 → arthur-client-0.6.0}/arthur/client/auth/helpers.py +0 -0
- {arthur-client-0.4.1 → arthur-client-0.6.0}/arthur/client/auth/refresh.py +0 -0
- {arthur-client-0.4.1 → arthur-client-0.6.0}/arthur/client/helpers.py +0 -0
- {arthur-client-0.4.1 → arthur-client-0.6.0}/arthur/client/http/__init__.py +0 -0
- {arthur-client-0.4.1 → arthur-client-0.6.0}/arthur/client/http/helper.py +0 -0
- {arthur-client-0.4.1 → arthur-client-0.6.0}/arthur/client/http/requests.py +0 -0
- {arthur-client-0.4.1 → arthur-client-0.6.0}/arthur/client/http/validation.py +0 -0
- {arthur-client-0.4.1 → arthur-client-0.6.0}/arthur/client/rest/__init__.py +0 -0
- {arthur-client-0.4.1 → arthur-client-0.6.0}/arthur/client/rest/admin/__init__.py +0 -0
- {arthur-client-0.4.1 → arthur-client-0.6.0}/arthur/client/rest/admin/client.py +0 -0
- {arthur-client-0.4.1 → arthur-client-0.6.0}/arthur/client/rest/alerts/__init__.py +0 -0
- {arthur-client-0.4.1 → arthur-client-0.6.0}/arthur/client/rest/alerts/client.py +0 -0
- {arthur-client-0.4.1 → arthur-client-0.6.0}/arthur/client/rest/bench/__init__.py +0 -0
- {arthur-client-0.4.1 → arthur-client-0.6.0}/arthur/client/rest/bench/client.py +0 -0
- {arthur-client-0.4.1 → arthur-client-0.6.0}/arthur/client/rest/client.py +0 -0
- {arthur-client-0.4.1 → arthur-client-0.6.0}/arthur/client/rest/common/__init__.py +0 -0
- {arthur-client-0.4.1 → arthur-client-0.6.0}/arthur/client/rest/common/client.py +0 -0
- {arthur-client-0.4.1 → arthur-client-0.6.0}/arthur/client/rest/enrichments/__init__.py +0 -0
- {arthur-client-0.4.1 → arthur-client-0.6.0}/arthur/client/rest/inferences/__init__.py +0 -0
- {arthur-client-0.4.1 → arthur-client-0.6.0}/arthur/client/rest/inferences/client.py +0 -0
- {arthur-client-0.4.1 → arthur-client-0.6.0}/arthur/client/rest/insights/__init__.py +0 -0
- {arthur-client-0.4.1 → arthur-client-0.6.0}/arthur/client/rest/insights/client.py +0 -0
- {arthur-client-0.4.1 → arthur-client-0.6.0}/arthur/client/rest/metrics/__init__.py +0 -0
- {arthur-client-0.4.1 → arthur-client-0.6.0}/arthur/client/rest/metrics/client.py +0 -0
- {arthur-client-0.4.1 → arthur-client-0.6.0}/arthur/client/rest/model_groups/__init__.py +0 -0
- {arthur-client-0.4.1 → arthur-client-0.6.0}/arthur/client/rest/model_groups/client.py +1 -1
- {arthur-client-0.4.1 → arthur-client-0.6.0}/arthur/client/rest/models/__init__.py +0 -0
- {arthur-client-0.4.1 → arthur-client-0.6.0}/arthur/client/rest/models/client.py +0 -0
- {arthur-client-0.4.1 → arthur-client-0.6.0}/arthur/client/rest/query/__init__.py +0 -0
- {arthur-client-0.4.1 → arthur-client-0.6.0}/arthur/client/rest/query/client.py +0 -0
- {arthur-client-0.4.1 → arthur-client-0.6.0}/arthur/client/types.py +0 -0
- {arthur-client-0.4.1 → arthur-client-0.6.0}/arthur/common/__init__.py +0 -0
- {arthur-client-0.4.1 → arthur-client-0.6.0}/arthur/common/constants.py +0 -0
- {arthur-client-0.4.1 → arthur-client-0.6.0}/arthur/common/exceptions.py +0 -0
- {arthur-client-0.4.1 → arthur-client-0.6.0}/arthur/common/log.py +0 -0
- {arthur-client-0.4.1 → arthur-client-0.6.0}/arthur_client.egg-info/SOURCES.txt +0 -0
- {arthur-client-0.4.1 → arthur-client-0.6.0}/arthur_client.egg-info/dependency_links.txt +0 -0
- {arthur-client-0.4.1 → arthur-client-0.6.0}/arthur_client.egg-info/requires.txt +0 -0
- {arthur-client-0.4.1 → arthur-client-0.6.0}/arthur_client.egg-info/top_level.txt +0 -0
- {arthur-client-0.4.1 → arthur-client-0.6.0}/client-readme-public.md +0 -0
- {arthur-client-0.4.1 → arthur-client-0.6.0}/pyproject.toml +0 -0
- {arthur-client-0.4.1 → arthur-client-0.6.0}/setup.cfg +0 -0
@@ -8,18 +8,20 @@ from arthur.client.http.requests import HTTPClient
|
|
8
8
|
|
9
9
|
from arthur.client.rest.enrichments.models import (
|
10
10
|
AnomalyDetectionEnrichmentConfiguration,
|
11
|
+
AnomalyDetectionEnrichmentResponse,
|
11
12
|
BiasConstraintEnum,
|
12
13
|
BiasMitigationEnrichmentConfiguration,
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
ExplainabilityEnrichmentConfiguration,
|
14
|
+
BiasMitigationEnrichmentResponse,
|
15
|
+
EnrichmentsRequest,
|
16
|
+
EnrichmentsResponse,
|
17
17
|
ExplainabilityEnrichmentMultipartRequestBody,
|
18
18
|
ExplainabilityEnrichmentRequest,
|
19
|
+
ExplainabilityEnrichmentResponse,
|
19
20
|
ExplainabilityResultOnDemand,
|
20
21
|
ExplainabilityResultWhatIf,
|
21
22
|
FindHotspotsResponse,
|
22
23
|
HotspotsEnrichmentConfiguration,
|
24
|
+
HotspotsEnrichmentResponse,
|
23
25
|
PaginatedBiasMitigationCurves,
|
24
26
|
WhatIfRequest,
|
25
27
|
)
|
@@ -85,7 +87,7 @@ class ArthurEnrichmentsClient:
|
|
85
87
|
)
|
86
88
|
return PaginatedBiasMitigationCurves(**parsed_resp)
|
87
89
|
|
88
|
-
def get_enrichment_configuration(self, model_id: str) ->
|
90
|
+
def get_enrichment_configuration(self, model_id: str) -> EnrichmentsResponse:
|
89
91
|
"""
|
90
92
|
Gets the enrichment configurations for a model
|
91
93
|
|
@@ -95,19 +97,19 @@ class ArthurEnrichmentsClient:
|
|
95
97
|
parsed_resp: Dict[str, Any] = self.http_client.get( # type: ignore
|
96
98
|
f"/v3/models/{model_id}/enrichments", validation_response_code=200
|
97
99
|
)
|
98
|
-
return
|
100
|
+
return EnrichmentsResponse(**parsed_resp)
|
99
101
|
|
100
102
|
def update_enrichment_config(
|
101
103
|
self,
|
102
104
|
model_id: str,
|
103
|
-
json_body:
|
105
|
+
json_body: EnrichmentsRequest,
|
104
106
|
multipart_data: ExplainabilityEnrichmentMultipartRequestBody,
|
105
107
|
) -> Response:
|
106
108
|
"""
|
107
109
|
Updates the enrichment configuration for a model
|
108
110
|
|
109
111
|
:param model_id:
|
110
|
-
:param json_body: A
|
112
|
+
:param json_body: Configures multiple enrichments. A multipart/form-data body with at least a `configuration` JSON body. If explainability is being enabled for the first time, artifacts must be supplied.
|
111
113
|
:param multipart_data: When setting up explainability, a config must always be provided. The explainability enrichment artifact files may be provided all together, but a config must be provided as well, regardless of whether the config has already been set.
|
112
114
|
"""
|
113
115
|
|
@@ -122,7 +124,7 @@ class ArthurEnrichmentsClient:
|
|
122
124
|
|
123
125
|
def get_anomaly_detection_config(
|
124
126
|
self, model_id: str
|
125
|
-
) ->
|
127
|
+
) -> AnomalyDetectionEnrichmentResponse:
|
126
128
|
"""
|
127
129
|
|
128
130
|
|
@@ -133,7 +135,7 @@ class ArthurEnrichmentsClient:
|
|
133
135
|
f"/v3/models/{model_id}/enrichments/anomaly_detection",
|
134
136
|
validation_response_code=200,
|
135
137
|
)
|
136
|
-
return
|
138
|
+
return AnomalyDetectionEnrichmentResponse(**parsed_resp)
|
137
139
|
|
138
140
|
def update_anomaly_detection_config(
|
139
141
|
self, model_id: str, json_body: AnomalyDetectionEnrichmentConfiguration
|
@@ -155,7 +157,7 @@ class ArthurEnrichmentsClient:
|
|
155
157
|
|
156
158
|
def get_bias_mitigation_config(
|
157
159
|
self, model_id: str
|
158
|
-
) ->
|
160
|
+
) -> BiasMitigationEnrichmentResponse:
|
159
161
|
"""
|
160
162
|
|
161
163
|
|
@@ -166,7 +168,7 @@ class ArthurEnrichmentsClient:
|
|
166
168
|
f"/v3/models/{model_id}/enrichments/bias_mitigation",
|
167
169
|
validation_response_code=200,
|
168
170
|
)
|
169
|
-
return
|
171
|
+
return BiasMitigationEnrichmentResponse(**parsed_resp)
|
170
172
|
|
171
173
|
def update_bias_mitigation_config(
|
172
174
|
self, model_id: str, json_body: BiasMitigationEnrichmentConfiguration
|
@@ -186,7 +188,7 @@ class ArthurEnrichmentsClient:
|
|
186
188
|
)
|
187
189
|
return raw_resp
|
188
190
|
|
189
|
-
def get_hotspots_config(self, model_id: str) ->
|
191
|
+
def get_hotspots_config(self, model_id: str) -> HotspotsEnrichmentResponse:
|
190
192
|
"""
|
191
193
|
Get hotspot enrichment config for a model
|
192
194
|
|
@@ -196,7 +198,7 @@ class ArthurEnrichmentsClient:
|
|
196
198
|
parsed_resp: Dict[str, Any] = self.http_client.get( # type: ignore
|
197
199
|
f"/v3/models/{model_id}/enrichments/hotspots", validation_response_code=200
|
198
200
|
)
|
199
|
-
return
|
201
|
+
return HotspotsEnrichmentResponse(**parsed_resp)
|
200
202
|
|
201
203
|
def update_hotspots_config(
|
202
204
|
self, model_id: str, json_body: HotspotsEnrichmentConfiguration
|
@@ -251,7 +253,7 @@ class ArthurEnrichmentsClient:
|
|
251
253
|
|
252
254
|
def get_explainability_config(
|
253
255
|
self, model_id: str
|
254
|
-
) ->
|
256
|
+
) -> ExplainabilityEnrichmentResponse:
|
255
257
|
"""
|
256
258
|
|
257
259
|
|
@@ -262,7 +264,7 @@ class ArthurEnrichmentsClient:
|
|
262
264
|
f"/v3/models/{model_id}/enrichments/explainability",
|
263
265
|
validation_response_code=200,
|
264
266
|
)
|
265
|
-
return
|
267
|
+
return ExplainabilityEnrichmentResponse(**parsed_resp)
|
266
268
|
|
267
269
|
def update_explainability_config(
|
268
270
|
self,
|
@@ -287,28 +289,6 @@ class ArthurEnrichmentsClient:
|
|
287
289
|
)
|
288
290
|
return raw_resp
|
289
291
|
|
290
|
-
def update_enrichment_status(
|
291
|
-
self,
|
292
|
-
model_id: str,
|
293
|
-
enrichment_name: EnrichmentName,
|
294
|
-
json_body: EnrichmentStatusUpdate,
|
295
|
-
) -> Response:
|
296
|
-
"""
|
297
|
-
Update status for an enrichment
|
298
|
-
|
299
|
-
:param model_id:
|
300
|
-
:param enrichment_name: Name of enrichment.
|
301
|
-
:param json_body: Updates status for an enrichment. A body with at least a `status` key.
|
302
|
-
"""
|
303
|
-
|
304
|
-
raw_resp: Response = self.http_client.patch( # type: ignore
|
305
|
-
f"/v3/models/{model_id}/enrichments/{enrichment_name}/status",
|
306
|
-
json=json_body.dict(by_alias=True, exclude_none=True),
|
307
|
-
validation_response_code=202,
|
308
|
-
return_raw_response=True,
|
309
|
-
)
|
310
|
-
return raw_resp
|
311
|
-
|
312
292
|
def explain_inference(
|
313
293
|
self,
|
314
294
|
model_id: str,
|
@@ -1,6 +1,6 @@
|
|
1
1
|
# generated by datamodel-codegen:
|
2
2
|
# filename: enrichments.yaml
|
3
|
-
# timestamp: 2023-
|
3
|
+
# timestamp: 2023-10-04T15:04:36+00:00
|
4
4
|
|
5
5
|
from __future__ import annotations
|
6
6
|
|
@@ -12,132 +12,164 @@ from arthur.client.types import ByteField
|
|
12
12
|
from pydantic import BaseModel, Extra, Field
|
13
13
|
|
14
14
|
|
15
|
-
class
|
16
|
-
enabled: bool
|
15
|
+
class Status(str, Enum):
|
17
16
|
"""
|
18
|
-
|
17
|
+
The enrichment pipeline status
|
19
18
|
"""
|
20
19
|
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
20
|
+
Disabled = 'Disabled'
|
21
|
+
Pending = 'Pending'
|
22
|
+
Training = 'Training'
|
23
|
+
Ready = 'Ready'
|
24
|
+
Failed = 'Failed'
|
25
25
|
|
26
26
|
|
27
|
-
class
|
27
|
+
class EnrichmentStatus(BaseModel):
|
28
|
+
status: Optional[Status] = Field(None, example='Disabled')
|
28
29
|
"""
|
29
|
-
|
30
|
+
The enrichment pipeline status
|
30
31
|
"""
|
31
32
|
|
32
|
-
Disabled = 'disabled'
|
33
|
-
Updating = 'updating'
|
34
|
-
Ready = 'ready'
|
35
33
|
|
36
|
-
|
37
|
-
|
38
|
-
pipeline_status: Optional[PipelineStatus] = None
|
34
|
+
class AnomalyDetectionEnrichmentConfiguration(BaseModel):
|
35
|
+
enabled: bool
|
39
36
|
"""
|
40
|
-
|
37
|
+
Whether or not the enrichment is enabled
|
41
38
|
"""
|
42
39
|
|
43
40
|
|
44
|
-
class AnomalyDetectionEnrichmentConfiguration
|
41
|
+
class BiasMitigationEnrichmentConfiguration(AnomalyDetectionEnrichmentConfiguration):
|
45
42
|
pass
|
46
43
|
|
47
44
|
|
48
|
-
class
|
49
|
-
left: Optional[FindHotspotsNode] = None
|
50
|
-
right: Optional[FindHotspotsNode] = None
|
51
|
-
rules: Dict[str, Any]
|
45
|
+
class ExplanationAlgo(str, Enum):
|
52
46
|
"""
|
53
|
-
|
47
|
+
Explainability algorithm to use in the model server. Current options are "lime" or "shap", default is "lime"
|
54
48
|
"""
|
55
|
-
|
49
|
+
|
50
|
+
LIME = 'lime'
|
51
|
+
SHAP = 'shap'
|
52
|
+
|
53
|
+
|
54
|
+
class Config(BaseModel):
|
55
|
+
python_version: Optional[str] = Field(None, example='3.8.17')
|
56
56
|
"""
|
57
|
-
|
57
|
+
Python version number user project users
|
58
58
|
"""
|
59
|
-
|
59
|
+
sdk_version: Optional[str] = Field(None, example='3.26.0')
|
60
60
|
"""
|
61
|
-
|
61
|
+
SDK version number user project uses
|
62
62
|
"""
|
63
|
-
|
63
|
+
streaming_explainability_enabled: Optional[bool] = Field(None, example=True)
|
64
64
|
"""
|
65
|
-
|
65
|
+
Set to true if explainability should be calculated for all inferences in a streaming manner and set to false if explainability should only be calculated when requested.
|
66
66
|
"""
|
67
|
-
|
67
|
+
user_predict_function_import_path: Optional[str] = Field(
|
68
|
+
None, example='path/to/import/function.py'
|
69
|
+
)
|
68
70
|
"""
|
69
|
-
|
71
|
+
Import path of the user model predict function in the project directory
|
70
72
|
"""
|
71
|
-
|
73
|
+
shap_expected_values: Optional[List[float]] = Field(None, example=[1, 2, 3])
|
72
74
|
"""
|
73
|
-
|
75
|
+
If using SHAP, these are the expected values generated when the explainer is created
|
74
76
|
"""
|
75
|
-
|
77
|
+
model_server_cpu: Optional[str] = Field('2', example='2')
|
76
78
|
"""
|
77
|
-
|
79
|
+
Number of cpus to assign to explanation server.
|
78
80
|
"""
|
79
|
-
|
81
|
+
model_server_memory: Optional[str] = Field('1500Mi', example='1Gi')
|
80
82
|
"""
|
81
|
-
|
83
|
+
Amount of memory to assign explanation server in the format xMi or xGi.
|
82
84
|
"""
|
83
|
-
|
85
|
+
model_server_max_replicas: Optional[int] = Field(30, example=30)
|
84
86
|
"""
|
85
|
-
|
87
|
+
Max number of model server instances.
|
86
88
|
"""
|
87
|
-
|
89
|
+
explanation_nsamples: Optional[int] = Field(2000, example=2000)
|
88
90
|
"""
|
89
|
-
|
91
|
+
Explainability algorithms create sample data points when explaining inferences. The number of samples created per explanation can be configured here. There is a trade off between accuracy and computing power and time for this configuration.
|
90
92
|
"""
|
91
|
-
|
92
|
-
|
93
|
-
class AnomalyDetectionEnrichmentResponse(AnomalyDetectionEnrichmentConfiguration):
|
93
|
+
explanation_algo: Optional[ExplanationAlgo] = Field('lime', example='lime')
|
94
94
|
"""
|
95
|
-
|
95
|
+
Explainability algorithm to use in the model server. Current options are "lime" or "shap", default is "lime"
|
96
|
+
"""
|
97
|
+
inference_consumer_cpu: Optional[str] = Field('500m', example='500m')
|
98
|
+
"""
|
99
|
+
Number of cpus to assign to the inference reader
|
100
|
+
"""
|
101
|
+
inference_consumer_memory: Optional[str] = Field('512Mi', example='512Mi')
|
102
|
+
"""
|
103
|
+
Amount of memory to assign to the inference reader in the format xMi or xGi.
|
104
|
+
"""
|
105
|
+
inference_consumer_score_percent: Optional[float] = Field(1.0, example=0.1)
|
106
|
+
"""
|
107
|
+
Sampling rate for inferences to explain
|
108
|
+
"""
|
109
|
+
inference_consumer_thread_pool_size: Optional[int] = Field(5, example=5)
|
110
|
+
"""
|
111
|
+
Number of threads in the inference consumer pool
|
96
112
|
"""
|
97
|
-
|
98
|
-
pass
|
99
113
|
|
100
114
|
|
101
|
-
class
|
115
|
+
class ExplainabilityEnrichmentConfiguration(BaseModel):
|
102
116
|
"""
|
103
|
-
|
117
|
+
JSON-formatted configuration options for the Explainability Enrichment. See the ExplainabilityEnrichmentConfiguration for schema.
|
104
118
|
"""
|
105
119
|
|
106
|
-
|
120
|
+
enabled: Optional[bool] = Field(None, example=True)
|
121
|
+
"""
|
122
|
+
Whether or not the enrichment is enabled
|
123
|
+
"""
|
124
|
+
config: Optional[Config] = None
|
107
125
|
|
108
126
|
|
109
|
-
class HotspotsEnrichmentConfiguration(
|
127
|
+
class HotspotsEnrichmentConfiguration(AnomalyDetectionEnrichmentConfiguration):
|
110
128
|
pass
|
111
129
|
|
112
130
|
|
113
|
-
class
|
114
|
-
predicted_attribute_name: str = Field(..., example='feature_a')
|
115
|
-
expected_value: float = Field(..., example=0.12)
|
116
|
-
|
117
|
-
|
118
|
-
class TokenObject(BaseModel):
|
119
|
-
token: str = Field(..., example='dog')
|
131
|
+
class EnrichmentsConfiguration(BaseModel):
|
120
132
|
"""
|
121
|
-
|
133
|
+
A JSON-formatted enrichments configuration. See the EnrichmentsConfiguration object for schema
|
122
134
|
"""
|
123
|
-
|
135
|
+
|
136
|
+
anomaly_detection: Optional[AnomalyDetectionEnrichmentConfiguration] = None
|
137
|
+
bias_mitigation: Optional[BiasMitigationEnrichmentConfiguration] = None
|
138
|
+
hotspots: Optional[HotspotsEnrichmentConfiguration] = None
|
139
|
+
explainability: Optional[ExplainabilityEnrichmentConfiguration] = None
|
140
|
+
|
141
|
+
|
142
|
+
class AnomalyDetectionEnrichmentResponse(
|
143
|
+
AnomalyDetectionEnrichmentConfiguration, EnrichmentStatus
|
144
|
+
):
|
124
145
|
"""
|
125
|
-
|
146
|
+
The response object containing configuration and status of an on-by-default enrichment.
|
126
147
|
"""
|
127
|
-
|
148
|
+
|
149
|
+
pass
|
150
|
+
|
151
|
+
|
152
|
+
class BiasMitigationEnrichmentResponse(
|
153
|
+
BiasMitigationEnrichmentConfiguration, EnrichmentStatus
|
154
|
+
):
|
128
155
|
"""
|
129
|
-
|
156
|
+
The response object containing configuration and status of an on-by-default enrichment.
|
130
157
|
"""
|
131
158
|
|
159
|
+
pass
|
132
160
|
|
133
|
-
|
134
|
-
|
161
|
+
|
162
|
+
class ExplainabilityEnrichmentResponse(
|
163
|
+
ExplainabilityEnrichmentConfiguration, EnrichmentStatus
|
164
|
+
):
|
135
165
|
"""
|
136
|
-
|
166
|
+
The response object containing configuration and status of the explainability enrichment.
|
137
167
|
"""
|
138
168
|
|
169
|
+
pass
|
170
|
+
|
139
171
|
|
140
|
-
class HotspotsEnrichmentResponse(HotspotsEnrichmentConfiguration):
|
172
|
+
class HotspotsEnrichmentResponse(HotspotsEnrichmentConfiguration, EnrichmentStatus):
|
141
173
|
"""
|
142
174
|
The response object containing configuration and status of an on-by-default enrichment.
|
143
175
|
"""
|
@@ -145,93 +177,121 @@ class HotspotsEnrichmentResponse(HotspotsEnrichmentConfiguration):
|
|
145
177
|
pass
|
146
178
|
|
147
179
|
|
148
|
-
class
|
149
|
-
|
150
|
-
|
180
|
+
class EnrichmentsResponse(BaseModel):
|
181
|
+
"""
|
182
|
+
The response object containing configuration and status of all enrichments.
|
183
|
+
"""
|
151
184
|
|
152
|
-
|
185
|
+
anomaly_detection: Optional[AnomalyDetectionEnrichmentResponse] = None
|
186
|
+
bias_mitigation: Optional[BiasMitigationEnrichmentResponse] = None
|
187
|
+
hotspots: Optional[HotspotsEnrichmentResponse] = None
|
188
|
+
explainability: Optional[ExplainabilityEnrichmentResponse] = None
|
153
189
|
|
154
190
|
|
155
|
-
class
|
191
|
+
class ExplainabilityEnrichmentRequest(BaseModel):
|
156
192
|
"""
|
157
|
-
|
193
|
+
Configures explainability. A multipart/form-data body with at least a `configuration` JSON body. If explainability is being enabled for the first time, artifacts must be supplied.
|
158
194
|
"""
|
159
195
|
|
160
|
-
|
161
|
-
|
196
|
+
config: Optional[ExplainabilityEnrichmentConfiguration] = None
|
197
|
+
"""
|
198
|
+
Explainability enrichment configuration
|
199
|
+
"""
|
162
200
|
|
163
201
|
|
164
|
-
class
|
165
|
-
python_version: Optional[str] = Field(None, example='3.8.17')
|
202
|
+
class EnrichmentsRequest(BaseModel):
|
166
203
|
"""
|
167
|
-
|
204
|
+
Configures multiple enrichments. A multipart/form-data body with at least a `configuration` JSON body. If explainability is being enabled for the first time, artifacts must be supplied.
|
168
205
|
"""
|
169
|
-
|
206
|
+
|
207
|
+
config: Optional[EnrichmentsConfiguration] = None
|
170
208
|
"""
|
171
|
-
|
209
|
+
Enrichments configuration
|
172
210
|
"""
|
173
|
-
|
211
|
+
|
212
|
+
|
213
|
+
class ExplanationValuesWhatIf(BaseModel):
|
214
|
+
attribute_name: str = Field(..., example='feature_a')
|
215
|
+
explanation_value: float = Field(..., example=0.12)
|
216
|
+
|
217
|
+
|
218
|
+
class FindHotspotsNode(BaseModel):
|
219
|
+
left: Optional[FindHotspotsNode] = None
|
220
|
+
right: Optional[FindHotspotsNode] = None
|
221
|
+
rules: Dict[str, Any]
|
174
222
|
"""
|
175
|
-
|
223
|
+
rules for the split on this node
|
176
224
|
"""
|
177
|
-
|
178
|
-
None, example='path/to/import/function.py'
|
179
|
-
)
|
225
|
+
gt_to_info: Optional[Dict[str, Any]] = None
|
180
226
|
"""
|
181
|
-
|
227
|
+
info around ground truths at this node
|
182
228
|
"""
|
183
|
-
|
229
|
+
precision: Optional[float] = None
|
184
230
|
"""
|
185
|
-
|
231
|
+
precision for this node
|
186
232
|
"""
|
187
|
-
|
233
|
+
recall: Optional[float] = None
|
188
234
|
"""
|
189
|
-
|
235
|
+
recall for this node
|
190
236
|
"""
|
191
|
-
|
237
|
+
f1: Optional[float] = None
|
192
238
|
"""
|
193
|
-
|
239
|
+
f1 for this node
|
194
240
|
"""
|
195
|
-
|
241
|
+
accuracy: float
|
196
242
|
"""
|
197
|
-
|
243
|
+
accuracy for this node
|
198
244
|
"""
|
199
|
-
|
245
|
+
impurity: float
|
200
246
|
"""
|
201
|
-
|
247
|
+
impurity for this node
|
202
248
|
"""
|
203
|
-
|
249
|
+
n_samples: int
|
204
250
|
"""
|
205
|
-
|
251
|
+
n_samples used for this node
|
206
252
|
"""
|
207
|
-
|
253
|
+
feature: str
|
208
254
|
"""
|
209
|
-
|
255
|
+
name of feature this node was cut on
|
210
256
|
"""
|
211
|
-
|
257
|
+
cutoff: Optional[float] = None
|
212
258
|
"""
|
213
|
-
|
259
|
+
the cutoff for the node
|
214
260
|
"""
|
215
|
-
|
261
|
+
|
262
|
+
|
263
|
+
class ExpectedValues(BaseModel):
|
264
|
+
predicted_attribute_name: str = Field(..., example='feature_a')
|
265
|
+
expected_value: float = Field(..., example=0.12)
|
266
|
+
|
267
|
+
|
268
|
+
class TokenObject(BaseModel):
|
269
|
+
token: str = Field(..., example='dog')
|
216
270
|
"""
|
217
|
-
|
271
|
+
Token string which is generated from separating the input text by the model's given delimiter.
|
218
272
|
"""
|
219
|
-
|
273
|
+
position: float = Field(..., example=0)
|
220
274
|
"""
|
221
|
-
|
275
|
+
Integer representing the location of the token in the input text. 0 refers to the the first token in the input text.
|
222
276
|
"""
|
223
|
-
|
224
|
-
|
225
|
-
class ExplainabilityEnrichmentConfiguration(BaseModel):
|
277
|
+
explanation_value: float = Field(..., example=0.48)
|
226
278
|
"""
|
227
|
-
|
279
|
+
Float explanation value for the specific token.
|
228
280
|
"""
|
229
281
|
|
230
|
-
|
282
|
+
|
283
|
+
class FindHotspotsResponse(BaseModel):
|
284
|
+
data: List[FindHotspotsNode]
|
231
285
|
"""
|
232
|
-
|
286
|
+
Contains all hotspots based on input
|
233
287
|
"""
|
234
|
-
|
288
|
+
|
289
|
+
|
290
|
+
class ExplanationInput(BaseModel):
|
291
|
+
class Config:
|
292
|
+
extra = Extra.allow
|
293
|
+
|
294
|
+
__root__: Optional[Dict[str, Dict[str, Any]]] = None
|
235
295
|
|
236
296
|
|
237
297
|
class ExplanationValuesOnDemand(BaseModel):
|
@@ -284,20 +344,15 @@ class WhatIfRequest(BaseModel):
|
|
284
344
|
model_pipeline_input: List[WhatIfAttributeRequest]
|
285
345
|
|
286
346
|
|
287
|
-
class
|
288
|
-
Disabled = 'Disabled'
|
289
|
-
Pending = 'Pending'
|
290
|
-
Training = 'Training'
|
291
|
-
Ready = 'Ready'
|
292
|
-
Failed = 'Failed'
|
293
|
-
|
294
|
-
|
295
|
-
class EnrichmentStatusUpdate(BaseModel):
|
347
|
+
class EnrichmentStatusUpdate(EnrichmentStatus):
|
296
348
|
"""
|
297
349
|
Updates status for an enrichment. A body with at least a `status` key.
|
298
350
|
"""
|
299
351
|
|
300
|
-
status: Status
|
352
|
+
status: Status = Field(..., example='Disabled')
|
353
|
+
"""
|
354
|
+
The enrichment pipeline status
|
355
|
+
"""
|
301
356
|
|
302
357
|
|
303
358
|
class EnrichmentName(str, Enum):
|
@@ -332,17 +387,6 @@ class BiasMitigationDataPoints(BaseModel):
|
|
332
387
|
"""
|
333
388
|
|
334
389
|
|
335
|
-
class EnrichmentsConfiguration(BaseModel):
|
336
|
-
"""
|
337
|
-
A JSON-formatted enrichments configuration. See the EnrichmentsConfiguration object for schema
|
338
|
-
"""
|
339
|
-
|
340
|
-
anomaly_detection: Optional[AnomalyDetectionEnrichmentConfiguration] = None
|
341
|
-
bias_mitigation: Optional[BiasMitigationEnrichmentConfiguration] = None
|
342
|
-
hotspots: Optional[HotspotsEnrichmentConfiguration] = None
|
343
|
-
explainability: Optional[ExplainabilityEnrichmentConfiguration] = None
|
344
|
-
|
345
|
-
|
346
390
|
class ExplanationsOnDemand(BaseModel):
|
347
391
|
algorithm: str = Field(..., example='shap')
|
348
392
|
predicted_attribute_name: str = Field(..., example='class_a')
|
@@ -355,25 +399,6 @@ class BiasConstraintEnum(str, Enum):
|
|
355
399
|
EqualizedOdds = 'equalized_odds'
|
356
400
|
|
357
401
|
|
358
|
-
class ExplainabilityEnrichmentResponse(ExplainabilityEnrichmentConfiguration):
|
359
|
-
"""
|
360
|
-
The response object containing configuration and status of the explainability enrichment.
|
361
|
-
"""
|
362
|
-
|
363
|
-
pass
|
364
|
-
|
365
|
-
|
366
|
-
class ExplainabilityEnrichmentRequest(BaseModel):
|
367
|
-
"""
|
368
|
-
Configures explainability. A multipart/form-data body with at least a `configuration` JSON body. If explainability is being enabled for the first time, artifacts must be supplied.
|
369
|
-
"""
|
370
|
-
|
371
|
-
config: Optional[ExplainabilityEnrichmentConfiguration] = None
|
372
|
-
"""
|
373
|
-
Explainability enrichment configuration
|
374
|
-
"""
|
375
|
-
|
376
|
-
|
377
402
|
class BiasMitigationCurveResponse(BaseModel):
|
378
403
|
id: str = Field(..., example='418c6939-8765-40fa-b04e-11ba57b7f21c')
|
379
404
|
"""
|
@@ -425,32 +450,11 @@ class BiasMitigationCurveResponse(BaseModel):
|
|
425
450
|
"""
|
426
451
|
|
427
452
|
|
428
|
-
class EnrichmentsStatus(BaseModel):
|
429
|
-
"""
|
430
|
-
The response object containing configuration and status of all enrichments.
|
431
|
-
"""
|
432
|
-
|
433
|
-
anomaly_detection: Optional[EnrichmentStatus] = None
|
434
|
-
bias_mitigation: Optional[EnrichmentStatus] = None
|
435
|
-
explainability: Optional[EnrichmentStatus] = None
|
436
|
-
|
437
|
-
|
438
453
|
class ExplainabilityResultOnDemand(BaseModel):
|
439
454
|
explanation: List[ExplanationsOnDemand]
|
440
455
|
expected_value: Optional[List[ExpectedValues]] = None
|
441
456
|
|
442
457
|
|
443
|
-
class EnrichmentsResponse(BaseModel):
|
444
|
-
"""
|
445
|
-
The response object containing configuration and status of all enrichments.
|
446
|
-
"""
|
447
|
-
|
448
|
-
anomaly_detection: Optional[AnomalyDetectionEnrichmentResponse] = None
|
449
|
-
bias_mitigation: Optional[BiasMitigationEnrichmentResponse] = None
|
450
|
-
hotspots: Optional[HotspotsEnrichmentResponse] = None
|
451
|
-
explainability: Optional[ExplainabilityEnrichmentResponse] = None
|
452
|
-
|
453
|
-
|
454
458
|
class ExplainabilityResultWhatIf(BaseModel):
|
455
459
|
predicted_values: List[WhatIfAttributeRequest]
|
456
460
|
explanation: List[ExplanationsWhatIf]
|
@@ -465,14 +469,6 @@ class ExplainabilityEnrichmentMultipartRequestBody(ExplainabilityEnrichmentArtif
|
|
465
469
|
config: Optional[ExplainabilityEnrichmentConfiguration] = None
|
466
470
|
|
467
471
|
|
468
|
-
class EnrichmentsRequest(EnrichmentsConfiguration):
|
469
|
-
"""
|
470
|
-
Configures multiple enrichments. A multipart/form-data body with at least a `configuration` JSON body. If explainability is being enabled for the first time, artifacts must be supplied.
|
471
|
-
"""
|
472
|
-
|
473
|
-
pass
|
474
|
-
|
475
|
-
|
476
472
|
class PaginatedBiasMitigationCurves(BaseModel):
|
477
473
|
data: List[BiasMitigationCurveResponse]
|
478
474
|
"""
|
@@ -1,6 +1,6 @@
|
|
1
1
|
# generated by datamodel-codegen:
|
2
2
|
# filename: metrics.yaml
|
3
|
-
# timestamp: 2023-
|
3
|
+
# timestamp: 2023-10-04T15:04:35+00:00
|
4
4
|
|
5
5
|
from __future__ import annotations
|
6
6
|
|
@@ -32,11 +32,7 @@ class CategoricalValueLimits(BaseModel):
|
|
32
32
|
|
33
33
|
|
34
34
|
class MetricGroupBy(BaseModel):
|
35
|
-
|
36
|
-
Model attribute to group by. If provided, the results will be broken down by each value or bin in the attribute
|
37
|
-
"""
|
38
|
-
|
39
|
-
__root__: List[str] = Field(..., example=['Education'])
|
35
|
+
__root__: str = Field(..., example='Education')
|
40
36
|
"""
|
41
37
|
Model attribute to group by. If provided, the results will be broken down by each value or bin in the attribute
|
42
38
|
"""
|
@@ -1,2 +1,2 @@
|
|
1
|
-
__version__ = "0.
|
1
|
+
__version__ = "0.6.0"
|
2
2
|
api_spec_version = "3.0.0"
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
@@ -6,13 +6,13 @@ from requests import Response
|
|
6
6
|
# import http client
|
7
7
|
from arthur.client.http.requests import HTTPClient
|
8
8
|
|
9
|
-
from arthur.client.rest.models.models import ModelExpand, ModelObject
|
10
9
|
from arthur.client.rest.model_groups.models import (
|
11
10
|
ModelGroupResponse,
|
12
11
|
ModelGroupUpdateRequest,
|
13
12
|
PaginatedModelGroupResponse,
|
14
13
|
PaginatedModelGroupVersionsResponse,
|
15
14
|
)
|
15
|
+
from arthur.client.rest.models.models import ModelExpand, ModelObject
|
16
16
|
|
17
17
|
|
18
18
|
PATH_PREFIX = "/api"
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|