enkryptai-sdk 1.0.9__tar.gz → 1.0.11__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {enkryptai_sdk-1.0.9/src/enkryptai_sdk.egg-info → enkryptai_sdk-1.0.11}/PKG-INFO +8 -2
- {enkryptai_sdk-1.0.9 → enkryptai_sdk-1.0.11}/README.md +7 -1
- {enkryptai_sdk-1.0.9 → enkryptai_sdk-1.0.11}/setup.py +1 -1
- {enkryptai_sdk-1.0.9 → enkryptai_sdk-1.0.11}/src/enkryptai_sdk/base.py +10 -7
- {enkryptai_sdk-1.0.9 → enkryptai_sdk-1.0.11}/src/enkryptai_sdk/config.py +8 -3
- {enkryptai_sdk-1.0.9 → enkryptai_sdk-1.0.11}/src/enkryptai_sdk/dto/base.py +12 -1
- {enkryptai_sdk-1.0.9 → enkryptai_sdk-1.0.11}/src/enkryptai_sdk/dto/guardrails.py +11 -3
- {enkryptai_sdk-1.0.9 → enkryptai_sdk-1.0.11}/src/enkryptai_sdk/dto/models.py +17 -0
- {enkryptai_sdk-1.0.9 → enkryptai_sdk-1.0.11}/src/enkryptai_sdk/dto/red_team.py +8 -0
- {enkryptai_sdk-1.0.9 → enkryptai_sdk-1.0.11}/src/enkryptai_sdk/models.py +12 -6
- {enkryptai_sdk-1.0.9 → enkryptai_sdk-1.0.11}/src/enkryptai_sdk/red_team.py +4 -1
- {enkryptai_sdk-1.0.9 → enkryptai_sdk-1.0.11/src/enkryptai_sdk.egg-info}/PKG-INFO +8 -2
- {enkryptai_sdk-1.0.9 → enkryptai_sdk-1.0.11}/tests/test_all_v2.py +90 -37
- {enkryptai_sdk-1.0.9 → enkryptai_sdk-1.0.11}/tests/test_guardrails.py +1 -1
- {enkryptai_sdk-1.0.9 → enkryptai_sdk-1.0.11}/tests/test_model.py +29 -4
- {enkryptai_sdk-1.0.9 → enkryptai_sdk-1.0.11}/LICENSE +0 -0
- {enkryptai_sdk-1.0.9 → enkryptai_sdk-1.0.11}/setup.cfg +0 -0
- {enkryptai_sdk-1.0.9 → enkryptai_sdk-1.0.11}/src/enkryptai_sdk/__init__.py +0 -0
- {enkryptai_sdk-1.0.9 → enkryptai_sdk-1.0.11}/src/enkryptai_sdk/ai_proxy.py +0 -0
- {enkryptai_sdk-1.0.9 → enkryptai_sdk-1.0.11}/src/enkryptai_sdk/coc.py +0 -0
- {enkryptai_sdk-1.0.9 → enkryptai_sdk-1.0.11}/src/enkryptai_sdk/datasets.py +0 -0
- {enkryptai_sdk-1.0.9 → enkryptai_sdk-1.0.11}/src/enkryptai_sdk/deployments.py +0 -0
- {enkryptai_sdk-1.0.9 → enkryptai_sdk-1.0.11}/src/enkryptai_sdk/dto/__init__.py +0 -0
- {enkryptai_sdk-1.0.9 → enkryptai_sdk-1.0.11}/src/enkryptai_sdk/dto/ai_proxy.py +0 -0
- {enkryptai_sdk-1.0.9 → enkryptai_sdk-1.0.11}/src/enkryptai_sdk/dto/coc.py +0 -0
- {enkryptai_sdk-1.0.9 → enkryptai_sdk-1.0.11}/src/enkryptai_sdk/dto/datasets.py +0 -0
- {enkryptai_sdk-1.0.9 → enkryptai_sdk-1.0.11}/src/enkryptai_sdk/dto/deployments.py +0 -0
- {enkryptai_sdk-1.0.9 → enkryptai_sdk-1.0.11}/src/enkryptai_sdk/evals.py +0 -0
- {enkryptai_sdk-1.0.9 → enkryptai_sdk-1.0.11}/src/enkryptai_sdk/guardrails.py +0 -0
- {enkryptai_sdk-1.0.9 → enkryptai_sdk-1.0.11}/src/enkryptai_sdk/guardrails_old.py +0 -0
- {enkryptai_sdk-1.0.9 → enkryptai_sdk-1.0.11}/src/enkryptai_sdk/response.py +0 -0
- {enkryptai_sdk-1.0.9 → enkryptai_sdk-1.0.11}/src/enkryptai_sdk.egg-info/SOURCES.txt +0 -0
- {enkryptai_sdk-1.0.9 → enkryptai_sdk-1.0.11}/src/enkryptai_sdk.egg-info/dependency_links.txt +0 -0
- {enkryptai_sdk-1.0.9 → enkryptai_sdk-1.0.11}/src/enkryptai_sdk.egg-info/top_level.txt +0 -0
- {enkryptai_sdk-1.0.9 → enkryptai_sdk-1.0.11}/tests/test_ai_proxy.py +0 -0
- {enkryptai_sdk-1.0.9 → enkryptai_sdk-1.0.11}/tests/test_all.py +0 -0
- {enkryptai_sdk-1.0.9 → enkryptai_sdk-1.0.11}/tests/test_basic.py +0 -0
- {enkryptai_sdk-1.0.9 → enkryptai_sdk-1.0.11}/tests/test_coc.py +0 -0
- {enkryptai_sdk-1.0.9 → enkryptai_sdk-1.0.11}/tests/test_datasets.py +0 -0
- {enkryptai_sdk-1.0.9 → enkryptai_sdk-1.0.11}/tests/test_deployments.py +0 -0
- {enkryptai_sdk-1.0.9 → enkryptai_sdk-1.0.11}/tests/test_detect_policy.py +0 -0
- {enkryptai_sdk-1.0.9 → enkryptai_sdk-1.0.11}/tests/test_injection_attack.py +0 -0
- {enkryptai_sdk-1.0.9 → enkryptai_sdk-1.0.11}/tests/test_openai.py +0 -0
- {enkryptai_sdk-1.0.9 → enkryptai_sdk-1.0.11}/tests/test_policy_violation.py +0 -0
- {enkryptai_sdk-1.0.9 → enkryptai_sdk-1.0.11}/tests/test_redteam.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: enkryptai-sdk
|
|
3
|
-
Version: 1.0.
|
|
3
|
+
Version: 1.0.11
|
|
4
4
|
Summary: A Python SDK with guardrails and red teaming functionality for API interactions
|
|
5
5
|
Home-page: https://github.com/enkryptai/enkryptai-sdk
|
|
6
6
|
Author: Enkrypt AI Team
|
|
@@ -264,7 +264,10 @@ sample_detectors = {
|
|
|
264
264
|
"policy_violation": {
|
|
265
265
|
"enabled": True,
|
|
266
266
|
"need_explanation": True,
|
|
267
|
-
"policy_text": ""
|
|
267
|
+
"policy_text": "The model should not provide medical advice when asked about health symptoms."
|
|
268
|
+
# Or we can also give coc_policy_name of a saved Code of Conduct Policy
|
|
269
|
+
# Instead of policy_text
|
|
270
|
+
# "coc_policy_name": "Test CoC Policy"
|
|
268
271
|
},
|
|
269
272
|
"bias": {
|
|
270
273
|
"enabled": False
|
|
@@ -718,6 +721,9 @@ guardrails_config = GuardrailsConfig.injection_attack()
|
|
|
718
721
|
|
|
719
722
|
```python Python
|
|
720
723
|
guardrails_config = GuardrailsConfig.policy_violation(policy_text="You must not use hate speech", need_explanation=True)
|
|
724
|
+
|
|
725
|
+
# Or we can also give coc_policy_name of a saved Code of Conduct Policy instead of policy_text
|
|
726
|
+
guardrails_config = GuardrailsConfig.policy_violation(coc_policy_name="Test CoC Policy", need_explanation=True)
|
|
721
727
|
```
|
|
722
728
|
|
|
723
729
|
### [Toxicity](https://docs.enkryptai.com/guardrails-api-reference/Toxicity_Detector)
|
|
@@ -241,7 +241,10 @@ sample_detectors = {
|
|
|
241
241
|
"policy_violation": {
|
|
242
242
|
"enabled": True,
|
|
243
243
|
"need_explanation": True,
|
|
244
|
-
"policy_text": ""
|
|
244
|
+
"policy_text": "The model should not provide medical advice when asked about health symptoms."
|
|
245
|
+
# Or we can also give coc_policy_name of a saved Code of Conduct Policy
|
|
246
|
+
# Instead of policy_text
|
|
247
|
+
# "coc_policy_name": "Test CoC Policy"
|
|
245
248
|
},
|
|
246
249
|
"bias": {
|
|
247
250
|
"enabled": False
|
|
@@ -695,6 +698,9 @@ guardrails_config = GuardrailsConfig.injection_attack()
|
|
|
695
698
|
|
|
696
699
|
```python Python
|
|
697
700
|
guardrails_config = GuardrailsConfig.policy_violation(policy_text="You must not use hate speech", need_explanation=True)
|
|
701
|
+
|
|
702
|
+
# Or we can also give coc_policy_name of a saved Code of Conduct Policy instead of policy_text
|
|
703
|
+
guardrails_config = GuardrailsConfig.policy_violation(coc_policy_name="Test CoC Policy", need_explanation=True)
|
|
698
704
|
```
|
|
699
705
|
|
|
700
706
|
### [Toxicity](https://docs.enkryptai.com/guardrails-api-reference/Toxicity_Detector)
|
|
@@ -9,7 +9,7 @@ with open(os.path.join(here, "README.md"), encoding="utf-8") as fh:
|
|
|
9
9
|
setup(
|
|
10
10
|
name="enkryptai-sdk", # This is the name of your package on PyPI
|
|
11
11
|
# NOTE: Also change this in .github/workflows/test.yaml
|
|
12
|
-
version="1.0.
|
|
12
|
+
version="1.0.11", # Update this for new versions
|
|
13
13
|
description="A Python SDK with guardrails and red teaming functionality for API interactions",
|
|
14
14
|
long_description=long_description,
|
|
15
15
|
long_description_content_type="text/markdown",
|
|
@@ -72,14 +72,17 @@ class BaseClient:
|
|
|
72
72
|
)
|
|
73
73
|
|
|
74
74
|
if response.status >= 400:
|
|
75
|
-
|
|
76
|
-
response.json()
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
error_message = error_data.get("message", str(error_data))
|
|
75
|
+
try:
|
|
76
|
+
error_data = response.json()
|
|
77
|
+
error_message = error_data.get("message", str(error_data))
|
|
78
|
+
except:
|
|
79
|
+
error_message = response.data.decode('utf-8') if response.data else f"HTTP {response.status}"
|
|
81
80
|
raise urllib3.exceptions.HTTPError(error_message)
|
|
82
|
-
|
|
81
|
+
|
|
82
|
+
try:
|
|
83
|
+
return response.json()
|
|
84
|
+
except:
|
|
85
|
+
return {"error": response.data.decode('utf-8') if response.data else "Invalid JSON response"}
|
|
83
86
|
except urllib3.exceptions.HTTPError as e:
|
|
84
87
|
return {"error": str(e)}
|
|
85
88
|
|
|
@@ -10,7 +10,7 @@ DEFAULT_GUARDRAILS_CONFIG = {
|
|
|
10
10
|
"keyword_detector": {"enabled": False, "banned_keywords": []},
|
|
11
11
|
"policy_violation": {
|
|
12
12
|
"enabled": False,
|
|
13
|
-
"policy_text": "",
|
|
13
|
+
"policy_text": "Do not allow any illegal or immoral activities.",
|
|
14
14
|
"need_explanation": False,
|
|
15
15
|
},
|
|
16
16
|
"bias": {"enabled": False},
|
|
@@ -42,16 +42,21 @@ class GuardrailsConfig:
|
|
|
42
42
|
return cls(config)
|
|
43
43
|
|
|
44
44
|
@classmethod
|
|
45
|
-
def policy_violation(cls, policy_text: str, need_explanation: bool = False):
|
|
45
|
+
def policy_violation(cls, policy_text: str = "", need_explanation: bool = False, coc_policy_name: str = ""):
|
|
46
46
|
"""
|
|
47
47
|
Returns a configuration instance pre-configured for policy violation detection.
|
|
48
48
|
"""
|
|
49
49
|
config = copy.deepcopy(DEFAULT_GUARDRAILS_CONFIG)
|
|
50
50
|
config["policy_violation"] = {
|
|
51
51
|
"enabled": True,
|
|
52
|
-
"policy_text": policy_text,
|
|
53
52
|
"need_explanation": need_explanation,
|
|
54
53
|
}
|
|
54
|
+
|
|
55
|
+
if policy_text:
|
|
56
|
+
config["policy_violation"]["policy_text"] = policy_text
|
|
57
|
+
if coc_policy_name:
|
|
58
|
+
config["policy_violation"]["coc_policy_name"] = coc_policy_name
|
|
59
|
+
|
|
55
60
|
return cls(config)
|
|
56
61
|
|
|
57
62
|
@classmethod
|
|
@@ -52,7 +52,18 @@ class BaseDTO:
|
|
|
52
52
|
|
|
53
53
|
def to_dict(self) -> Dict[str, Any]:
|
|
54
54
|
"""Convert the instance to a dictionary."""
|
|
55
|
-
d = {
|
|
55
|
+
d = {}
|
|
56
|
+
for k, v in self.__dict__.items():
|
|
57
|
+
if k == "_extra_fields":
|
|
58
|
+
continue
|
|
59
|
+
if hasattr(v, "to_dict"):
|
|
60
|
+
d[k] = v.to_dict()
|
|
61
|
+
elif isinstance(v, list):
|
|
62
|
+
d[k] = [item.to_dict() if hasattr(item, "to_dict") else item for item in v]
|
|
63
|
+
elif isinstance(v, dict):
|
|
64
|
+
d[k] = {key: val.to_dict() if hasattr(val, "to_dict") else val for key, val in v.items()}
|
|
65
|
+
else:
|
|
66
|
+
d[k] = v
|
|
56
67
|
d.update(self._extra_fields)
|
|
57
68
|
return d
|
|
58
69
|
|
|
@@ -163,6 +163,7 @@ class PolicyViolationDetector(BaseDTO):
|
|
|
163
163
|
enabled: bool = False
|
|
164
164
|
policy_text: str = ""
|
|
165
165
|
need_explanation: bool = False
|
|
166
|
+
coc_policy_name: str = ""
|
|
166
167
|
_extra_fields: Dict[str, Any] = field(default_factory=dict)
|
|
167
168
|
|
|
168
169
|
@classmethod
|
|
@@ -170,16 +171,23 @@ class PolicyViolationDetector(BaseDTO):
|
|
|
170
171
|
return cls(
|
|
171
172
|
enabled=data.get("enabled", False),
|
|
172
173
|
policy_text=data.get("policy_text", ""),
|
|
173
|
-
need_explanation=data.get("need_explanation", False)
|
|
174
|
+
need_explanation=data.get("need_explanation", False),
|
|
175
|
+
coc_policy_name=data.get("coc_policy_name", "")
|
|
174
176
|
)
|
|
175
177
|
|
|
176
178
|
def to_dict(self) -> Dict[str, Any]:
|
|
177
|
-
|
|
179
|
+
res_dict = {
|
|
178
180
|
"enabled": self.enabled,
|
|
179
|
-
"policy_text": self.policy_text,
|
|
180
181
|
"need_explanation": self.need_explanation
|
|
181
182
|
}
|
|
182
183
|
|
|
184
|
+
if self.policy_text:
|
|
185
|
+
res_dict["policy_text"] = self.policy_text
|
|
186
|
+
if self.coc_policy_name:
|
|
187
|
+
res_dict["coc_policy_name"] = self.coc_policy_name
|
|
188
|
+
|
|
189
|
+
return res_dict
|
|
190
|
+
|
|
183
191
|
|
|
184
192
|
@dataclass
|
|
185
193
|
class BiasDetector(BaseDTO):
|
|
@@ -37,6 +37,8 @@ class ModelProviders(str, Enum):
|
|
|
37
37
|
OPENAI_COMPATIBLE = "openai_compatible"
|
|
38
38
|
COHERE_COMPATIBLE = "cohere_compatible"
|
|
39
39
|
ANTHROPIC_COMPATIBLE = "anthropic_compatible"
|
|
40
|
+
CUSTOM = "custom"
|
|
41
|
+
HR = "hr"
|
|
40
42
|
|
|
41
43
|
|
|
42
44
|
@dataclass
|
|
@@ -247,6 +249,14 @@ class ModelConfigDetails(BaseDTO):
|
|
|
247
249
|
d = super().to_dict()
|
|
248
250
|
# Handle AuthData specifically
|
|
249
251
|
d["auth_data"] = self.auth_data.to_dict()
|
|
252
|
+
# Handle CustomHeader list
|
|
253
|
+
d["custom_headers"] = [header.to_dict() for header in self.custom_headers]
|
|
254
|
+
# Handle ModelProviders enum
|
|
255
|
+
if isinstance(d["model_provider"], ModelProviders):
|
|
256
|
+
d["model_provider"] = d["model_provider"].value
|
|
257
|
+
# Handle input/output modalities
|
|
258
|
+
d["input_modalities"] = [m.value for m in self.input_modalities]
|
|
259
|
+
d["output_modalities"] = [m.value for m in self.output_modalities]
|
|
250
260
|
return d
|
|
251
261
|
|
|
252
262
|
def to_json(self):
|
|
@@ -289,6 +299,13 @@ class ModelConfig(BaseDTO):
|
|
|
289
299
|
|
|
290
300
|
return cls(**data, model_config=model_config)
|
|
291
301
|
|
|
302
|
+
def to_dict(self) -> dict:
|
|
303
|
+
"""Convert the ModelConfig instance to a dictionary."""
|
|
304
|
+
d = super().to_dict()
|
|
305
|
+
# Handle nested ModelConfigDetails
|
|
306
|
+
d["model_config"] = self.model_config.to_dict()
|
|
307
|
+
return d
|
|
308
|
+
|
|
292
309
|
@classmethod
|
|
293
310
|
def __str__(self):
|
|
294
311
|
"""String representation of the ModelConfig."""
|
|
@@ -367,7 +367,15 @@ class TargetModelConfiguration(BaseDTO):
|
|
|
367
367
|
|
|
368
368
|
@classmethod
|
|
369
369
|
def from_dict(cls, data: dict):
|
|
370
|
+
data = data.copy()
|
|
371
|
+
if "custom_headers" in data:
|
|
372
|
+
data["custom_headers"] = [CustomHeader.from_dict(header) for header in data["custom_headers"]]
|
|
370
373
|
return cls(**data)
|
|
374
|
+
|
|
375
|
+
def to_dict(self) -> dict:
|
|
376
|
+
d = asdict(self)
|
|
377
|
+
d["custom_headers"] = [header.to_dict() for header in self.custom_headers]
|
|
378
|
+
return d
|
|
371
379
|
|
|
372
380
|
|
|
373
381
|
@dataclass
|
|
@@ -49,6 +49,9 @@ class ModelClient(BaseClient):
|
|
|
49
49
|
"chat": f"/{remaining_path}" if remaining_path else "",
|
|
50
50
|
}
|
|
51
51
|
|
|
52
|
+
# Convert custom_headers to list of dictionaries
|
|
53
|
+
custom_headers = [header.to_dict() for header in config.model_config.custom_headers]
|
|
54
|
+
|
|
52
55
|
payload = {
|
|
53
56
|
"testing_for": config.testing_for,
|
|
54
57
|
"model_name": config.model_name,
|
|
@@ -72,9 +75,9 @@ class ModelClient(BaseClient):
|
|
|
72
75
|
},
|
|
73
76
|
"apikeys": [config.model_config.apikey] if config.model_config.apikey else [],
|
|
74
77
|
"tools": config.model_config.tools,
|
|
75
|
-
"input_modalities": config.model_config.input_modalities,
|
|
76
|
-
"output_modalities": config.model_config.output_modalities,
|
|
77
|
-
"custom_headers":
|
|
78
|
+
"input_modalities": [m.value if hasattr(m, 'value') else m for m in config.model_config.input_modalities],
|
|
79
|
+
"output_modalities": [m.value if hasattr(m, 'value') else m for m in config.model_config.output_modalities],
|
|
80
|
+
"custom_headers": custom_headers,
|
|
78
81
|
"custom_payload": config.model_config.custom_payload,
|
|
79
82
|
"custom_response_content_type": config.model_config.custom_response_content_type,
|
|
80
83
|
"custom_response_format": config.model_config.custom_response_format,
|
|
@@ -189,6 +192,9 @@ class ModelClient(BaseClient):
|
|
|
189
192
|
"chat": f"/{remaining_path}" if remaining_path else "",
|
|
190
193
|
}
|
|
191
194
|
|
|
195
|
+
# Convert custom_headers to list of dictionaries
|
|
196
|
+
custom_headers = [header.to_dict() for header in config.model_config.custom_headers]
|
|
197
|
+
|
|
192
198
|
payload = {
|
|
193
199
|
"model_saved_name": config.model_saved_name,
|
|
194
200
|
"model_version": config.model_version,
|
|
@@ -217,9 +223,9 @@ class ModelClient(BaseClient):
|
|
|
217
223
|
[config.model_config.apikey] if config.model_config.apikey else []
|
|
218
224
|
),
|
|
219
225
|
"tools": config.model_config.tools,
|
|
220
|
-
"input_modalities": config.model_config.input_modalities,
|
|
221
|
-
"output_modalities": config.model_config.output_modalities,
|
|
222
|
-
"custom_headers":
|
|
226
|
+
"input_modalities": [m.value if hasattr(m, 'value') else m for m in config.model_config.input_modalities],
|
|
227
|
+
"output_modalities": [m.value if hasattr(m, 'value') else m for m in config.model_config.output_modalities],
|
|
228
|
+
"custom_headers": custom_headers,
|
|
223
229
|
"custom_payload": config.model_config.custom_payload,
|
|
224
230
|
"custom_response_content_type": config.model_config.custom_response_content_type,
|
|
225
231
|
"custom_response_format": config.model_config.custom_response_format,
|
|
@@ -1,4 +1,5 @@
|
|
|
1
|
-
import
|
|
1
|
+
# import json
|
|
2
|
+
# import urllib3
|
|
2
3
|
from .base import BaseClient
|
|
3
4
|
from .models import ModelClient
|
|
4
5
|
from .datasets import DatasetClient
|
|
@@ -62,6 +63,8 @@ class RedTeamClient(BaseClient):
|
|
|
62
63
|
"""
|
|
63
64
|
try:
|
|
64
65
|
config = RedTeamModelHealthConfig.from_dict(config)
|
|
66
|
+
# Print the config as json string
|
|
67
|
+
# print(f"Config: {json.dumps(config.to_dict(), indent=4)}")
|
|
65
68
|
response = self._request("POST", "/redteam/model-health", json=config.to_dict())
|
|
66
69
|
# if response.get("error"):
|
|
67
70
|
if response.get("error") not in [None, ""]:
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: enkryptai-sdk
|
|
3
|
-
Version: 1.0.
|
|
3
|
+
Version: 1.0.11
|
|
4
4
|
Summary: A Python SDK with guardrails and red teaming functionality for API interactions
|
|
5
5
|
Home-page: https://github.com/enkryptai/enkryptai-sdk
|
|
6
6
|
Author: Enkrypt AI Team
|
|
@@ -264,7 +264,10 @@ sample_detectors = {
|
|
|
264
264
|
"policy_violation": {
|
|
265
265
|
"enabled": True,
|
|
266
266
|
"need_explanation": True,
|
|
267
|
-
"policy_text": ""
|
|
267
|
+
"policy_text": "The model should not provide medical advice when asked about health symptoms."
|
|
268
|
+
# Or we can also give coc_policy_name of a saved Code of Conduct Policy
|
|
269
|
+
# Instead of policy_text
|
|
270
|
+
# "coc_policy_name": "Test CoC Policy"
|
|
268
271
|
},
|
|
269
272
|
"bias": {
|
|
270
273
|
"enabled": False
|
|
@@ -718,6 +721,9 @@ guardrails_config = GuardrailsConfig.injection_attack()
|
|
|
718
721
|
|
|
719
722
|
```python Python
|
|
720
723
|
guardrails_config = GuardrailsConfig.policy_violation(policy_text="You must not use hate speech", need_explanation=True)
|
|
724
|
+
|
|
725
|
+
# Or we can also give coc_policy_name of a saved Code of Conduct Policy instead of policy_text
|
|
726
|
+
guardrails_config = GuardrailsConfig.policy_violation(coc_policy_name="Test CoC Policy", need_explanation=True)
|
|
721
727
|
```
|
|
722
728
|
|
|
723
729
|
### [Toxicity](https://docs.enkryptai.com/guardrails-api-reference/Toxicity_Detector)
|
|
@@ -141,7 +141,7 @@ def sample_detectors():
|
|
|
141
141
|
"policy_violation": {
|
|
142
142
|
"enabled": True,
|
|
143
143
|
"need_explanation": True,
|
|
144
|
-
"policy_text": ""
|
|
144
|
+
"policy_text": "Do not allow any illegal or immoral activities."
|
|
145
145
|
},
|
|
146
146
|
"bias": {
|
|
147
147
|
"enabled": False
|
|
@@ -163,11 +163,33 @@ def sample_model_config():
|
|
|
163
163
|
"testing_for": "foundationModels",
|
|
164
164
|
"model_name": model_name,
|
|
165
165
|
"model_config": {
|
|
166
|
-
"model_provider": model_provider,
|
|
166
|
+
# "model_provider": model_provider,
|
|
167
|
+
"model_provider": "custom",
|
|
167
168
|
"endpoint_url": model_endpoint_url,
|
|
168
|
-
"apikey": OPENAI_API_KEY,
|
|
169
|
+
# "apikey": OPENAI_API_KEY,
|
|
169
170
|
"input_modalities": ["text"],
|
|
170
171
|
"output_modalities": ["text"],
|
|
172
|
+
"custom_headers": [
|
|
173
|
+
{
|
|
174
|
+
"key": "Content-Type",
|
|
175
|
+
"value": "application/json"
|
|
176
|
+
},
|
|
177
|
+
{
|
|
178
|
+
"key": "Authorization",
|
|
179
|
+
"value": "Bearer " + OPENAI_API_KEY
|
|
180
|
+
}
|
|
181
|
+
],
|
|
182
|
+
"custom_payload": {
|
|
183
|
+
"model": model_name,
|
|
184
|
+
"messages": [
|
|
185
|
+
{
|
|
186
|
+
"role": "user",
|
|
187
|
+
"content": "{prompt}"
|
|
188
|
+
}
|
|
189
|
+
]
|
|
190
|
+
},
|
|
191
|
+
"custom_response_content_type": "json",
|
|
192
|
+
"custom_response_format": ".choices[0].message.content",
|
|
171
193
|
},
|
|
172
194
|
}
|
|
173
195
|
|
|
@@ -235,9 +257,31 @@ def sample_redteam_model_health_config():
|
|
|
235
257
|
"testing_for": "foundationModels",
|
|
236
258
|
"model_version": "v1",
|
|
237
259
|
"model_source": "https://openai.com",
|
|
238
|
-
"model_provider": model_provider,
|
|
260
|
+
# "model_provider": model_provider,
|
|
261
|
+
"model_provider": "custom",
|
|
239
262
|
"model_endpoint_url": model_endpoint_url,
|
|
240
|
-
"model_api_key": OPENAI_API_KEY,
|
|
263
|
+
# "model_api_key": OPENAI_API_KEY,
|
|
264
|
+
"custom_headers": [
|
|
265
|
+
{
|
|
266
|
+
"key": "Content-Type",
|
|
267
|
+
"value": "application/json"
|
|
268
|
+
},
|
|
269
|
+
{
|
|
270
|
+
"key": "Authorization",
|
|
271
|
+
"value": "Bearer " + OPENAI_API_KEY
|
|
272
|
+
}
|
|
273
|
+
],
|
|
274
|
+
"custom_payload": {
|
|
275
|
+
"model": model_name,
|
|
276
|
+
"messages": [
|
|
277
|
+
{
|
|
278
|
+
"role": "user",
|
|
279
|
+
"content": "{prompt}"
|
|
280
|
+
}
|
|
281
|
+
]
|
|
282
|
+
},
|
|
283
|
+
"custom_response_content_type": "json",
|
|
284
|
+
"custom_response_format": ".choices[0].message.content",
|
|
241
285
|
"system_prompt": "",
|
|
242
286
|
"rate_per_min": 20,
|
|
243
287
|
"input_modalities": ["text"],
|
|
@@ -659,18 +703,19 @@ def test_adherence(guardrails_client):
|
|
|
659
703
|
assert summary.adherence_score == 0
|
|
660
704
|
|
|
661
705
|
|
|
662
|
-
|
|
663
|
-
|
|
664
|
-
|
|
665
|
-
|
|
666
|
-
|
|
667
|
-
|
|
668
|
-
|
|
669
|
-
|
|
670
|
-
|
|
671
|
-
|
|
672
|
-
|
|
673
|
-
|
|
706
|
+
# Not being used in Prod at this time
|
|
707
|
+
# def test_relevancy(guardrails_client):
|
|
708
|
+
# print("\n\nTesting relevancy")
|
|
709
|
+
# # Test the relevancy method
|
|
710
|
+
# response = guardrails_client.relevancy(llm_answer="Hello! How can I help you today? If you have any questions or need assistance with something, feel free to ask. I'm here to provide information and support. Is there something specific you'd like to know or discuss?", question="Hi")
|
|
711
|
+
# print("\nResponse from relevancy: ", response)
|
|
712
|
+
# print("\nResponse data type: ", type(response))
|
|
713
|
+
# assert response is not None
|
|
714
|
+
# assert hasattr(response, "summary")
|
|
715
|
+
# summary = response.summary
|
|
716
|
+
# assert summary.relevancy_score == 0
|
|
717
|
+
# print("\nSleeping for 60 seconds after guardrails tests...")
|
|
718
|
+
# time.sleep(60)
|
|
674
719
|
|
|
675
720
|
|
|
676
721
|
def test_list_policies(guardrails_client):
|
|
@@ -1174,13 +1219,16 @@ def test_get_health(redteam_client):
|
|
|
1174
1219
|
assert response.status == "healthy"
|
|
1175
1220
|
|
|
1176
1221
|
|
|
1177
|
-
|
|
1178
|
-
|
|
1179
|
-
|
|
1180
|
-
|
|
1181
|
-
|
|
1182
|
-
|
|
1183
|
-
|
|
1222
|
+
# ---------------------------------------------------------
|
|
1223
|
+
# Commenting as we already test for saved model health
|
|
1224
|
+
# ---------------------------------------------------------
|
|
1225
|
+
# def test_model_health(redteam_client, sample_redteam_model_health_config):
|
|
1226
|
+
# print("\n\nTesting check_model_health")
|
|
1227
|
+
# response = redteam_client.check_model_health(config=sample_redteam_model_health_config)
|
|
1228
|
+
# print("\nResponse from check_model_health: ", response)
|
|
1229
|
+
# assert response is not None
|
|
1230
|
+
# assert hasattr(response, "status")
|
|
1231
|
+
# assert response.status == "healthy"
|
|
1184
1232
|
|
|
1185
1233
|
|
|
1186
1234
|
def test_saved_model_health(redteam_client):
|
|
@@ -1192,7 +1240,8 @@ def test_saved_model_health(redteam_client):
|
|
|
1192
1240
|
assert response.status == "healthy"
|
|
1193
1241
|
|
|
1194
1242
|
|
|
1195
|
-
#
|
|
1243
|
+
# ---------------------------------------------------------
|
|
1244
|
+
# Commenting as we are testing with add custom task with saved model
|
|
1196
1245
|
# ---------------------------------------------------------
|
|
1197
1246
|
# def test_add_task_with_target_model(redteam_client, sample_redteam_target_config):
|
|
1198
1247
|
# print("\n\nTesting adding a new redteam task with target model")
|
|
@@ -1210,20 +1259,24 @@ def test_saved_model_health(redteam_client):
|
|
|
1210
1259
|
# time.sleep(60)
|
|
1211
1260
|
|
|
1212
1261
|
|
|
1213
|
-
|
|
1214
|
-
|
|
1215
|
-
|
|
1216
|
-
|
|
1217
|
-
|
|
1218
|
-
|
|
1219
|
-
|
|
1220
|
-
|
|
1221
|
-
|
|
1222
|
-
|
|
1223
|
-
|
|
1224
|
-
|
|
1262
|
+
# ---------------------------------------------------------
|
|
1263
|
+
# Commenting as we are testing with add custom task with saved model
|
|
1264
|
+
# ---------------------------------------------------------
|
|
1265
|
+
# def test_add_task_with_saved_model(redteam_client, sample_redteam_model_config):
|
|
1266
|
+
# print("\n\nTesting adding a new redteam task with saved model")
|
|
1267
|
+
# response = redteam_client.add_task_with_saved_model(config=sample_redteam_model_config,model_saved_name=test_model_saved_name, model_version=test_model_version)
|
|
1268
|
+
# print("\nResponse from adding a new redteam task with saved model: ", response)
|
|
1269
|
+
# assert response is not None
|
|
1270
|
+
# assert hasattr(response, "task_id")
|
|
1271
|
+
# assert hasattr(response, "message")
|
|
1272
|
+
# response.message == "Redteam task has been added successfully"
|
|
1273
|
+
# # Sleep for a while to let the task complete
|
|
1274
|
+
# # This is also useful to avoid rate limiting issues
|
|
1275
|
+
# print("\nSleeping for 60 seconds to let the task complete if possible ...")
|
|
1276
|
+
# time.sleep(60)
|
|
1225
1277
|
|
|
1226
1278
|
|
|
1279
|
+
# ---------------------------------------------------------
|
|
1227
1280
|
# # Testing only via saved model as it should be sufficient
|
|
1228
1281
|
# ---------------------------------------------------------
|
|
1229
1282
|
# def test_add_custom_task_with_target_model(redteam_client, sample_custom_redteam_target_config):
|
|
@@ -14,6 +14,9 @@ model_saved_name = None
|
|
|
14
14
|
test_model_saved_name = "Test Model"
|
|
15
15
|
model_version = None
|
|
16
16
|
test_model_version = "v1"
|
|
17
|
+
model_provider = "openai"
|
|
18
|
+
model_name = "gpt-4o-mini"
|
|
19
|
+
model_endpoint_url = "https://api.openai.com/v1/chat/completions"
|
|
17
20
|
|
|
18
21
|
@pytest.fixture
|
|
19
22
|
def model_client():
|
|
@@ -27,13 +30,35 @@ def sample_model_config():
|
|
|
27
30
|
"model_saved_name": test_model_saved_name,
|
|
28
31
|
"model_version": test_model_version,
|
|
29
32
|
"testing_for": "foundationModels",
|
|
30
|
-
"model_name":
|
|
33
|
+
"model_name": model_name,
|
|
31
34
|
"model_config": {
|
|
32
|
-
"model_provider":
|
|
33
|
-
"
|
|
34
|
-
"
|
|
35
|
+
# "model_provider": model_provider,
|
|
36
|
+
"model_provider": "custom",
|
|
37
|
+
"endpoint_url": model_endpoint_url,
|
|
38
|
+
# "apikey": OPENAI_API_KEY,
|
|
35
39
|
"input_modalities": ["text"],
|
|
36
40
|
"output_modalities": ["text"],
|
|
41
|
+
"custom_headers": [
|
|
42
|
+
{
|
|
43
|
+
"key": "Content-Type",
|
|
44
|
+
"value": "application/json"
|
|
45
|
+
},
|
|
46
|
+
{
|
|
47
|
+
"key": "Authorization",
|
|
48
|
+
"value": "Bearer " + OPENAI_API_KEY
|
|
49
|
+
}
|
|
50
|
+
],
|
|
51
|
+
"custom_payload": {
|
|
52
|
+
"model": model_name,
|
|
53
|
+
"messages": [
|
|
54
|
+
{
|
|
55
|
+
"role": "user",
|
|
56
|
+
"content": "{prompt}"
|
|
57
|
+
}
|
|
58
|
+
]
|
|
59
|
+
},
|
|
60
|
+
"custom_response_content_type": "json",
|
|
61
|
+
"custom_response_format": ".choices[0].message.content",
|
|
37
62
|
},
|
|
38
63
|
}
|
|
39
64
|
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{enkryptai_sdk-1.0.9 → enkryptai_sdk-1.0.11}/src/enkryptai_sdk.egg-info/dependency_links.txt
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|