enkryptai-sdk 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- enkryptai_sdk-0.1.0/LICENSE +0 -0
- enkryptai_sdk-0.1.0/PKG-INFO +96 -0
- enkryptai_sdk-0.1.0/README.md +74 -0
- enkryptai_sdk-0.1.0/setup.cfg +4 -0
- enkryptai_sdk-0.1.0/setup.py +29 -0
- enkryptai_sdk-0.1.0/src/enkryptai_sdk/__init__.py +4 -0
- enkryptai_sdk-0.1.0/src/enkryptai_sdk/guardrails.py +140 -0
- enkryptai_sdk-0.1.0/src/enkryptai_sdk/guardrails_config.py +70 -0
- enkryptai_sdk-0.1.0/src/enkryptai_sdk/red_team.py +0 -0
- enkryptai_sdk-0.1.0/src/enkryptai_sdk.egg-info/PKG-INFO +96 -0
- enkryptai_sdk-0.1.0/src/enkryptai_sdk.egg-info/SOURCES.txt +16 -0
- enkryptai_sdk-0.1.0/src/enkryptai_sdk.egg-info/dependency_links.txt +1 -0
- enkryptai_sdk-0.1.0/src/enkryptai_sdk.egg-info/top_level.txt +1 -0
- enkryptai_sdk-0.1.0/tests/test_all.py +160 -0
- enkryptai_sdk-0.1.0/tests/test_basic.py +34 -0
- enkryptai_sdk-0.1.0/tests/test_detect_policy.py +36 -0
- enkryptai_sdk-0.1.0/tests/test_injection_attack.py +39 -0
- enkryptai_sdk-0.1.0/tests/test_policy_violation.py +39 -0
|
File without changes
|
|
@@ -0,0 +1,96 @@
|
|
|
1
|
+
Metadata-Version: 2.2
|
|
2
|
+
Name: enkryptai-sdk
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: A Python SDK with guardrails and red teaming functionality for API interactions
|
|
5
|
+
Home-page: https://github.com/enkryptai/enkryptai-sdk
|
|
6
|
+
Author: Enkrypt AI Team
|
|
7
|
+
Author-email: software@enkryptai.com
|
|
8
|
+
Classifier: Programming Language :: Python :: 3
|
|
9
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
10
|
+
Classifier: Operating System :: OS Independent
|
|
11
|
+
Requires-Python: >=3.11
|
|
12
|
+
Description-Content-Type: text/markdown
|
|
13
|
+
License-File: LICENSE
|
|
14
|
+
Dynamic: author
|
|
15
|
+
Dynamic: author-email
|
|
16
|
+
Dynamic: classifier
|
|
17
|
+
Dynamic: description
|
|
18
|
+
Dynamic: description-content-type
|
|
19
|
+
Dynamic: home-page
|
|
20
|
+
Dynamic: requires-python
|
|
21
|
+
Dynamic: summary
|
|
22
|
+
|
|
23
|
+
# enkryptai-sdk
|
|
24
|
+
|
|
25
|
+
A Python SDK with guardrails and red teaming functionality for API interactions.
|
|
26
|
+
|
|
27
|
+
## Installation
|
|
28
|
+
|
|
29
|
+
```bash
|
|
30
|
+
pip install enkryptai-sdk
|
|
31
|
+
```
|
|
32
|
+
|
|
33
|
+
## Usage
|
|
34
|
+
|
|
35
|
+
```python
|
|
36
|
+
from enkryptai_sdk import GuardrailsClient, GuardrailsConfig
|
|
37
|
+
|
|
38
|
+
client = GuardrailsClient(api_key="your_api_key", base_url="https://api.enkryptai.com")
|
|
39
|
+
|
|
40
|
+
config = GuardrailsConfig.injection_attack()
|
|
41
|
+
```
|
|
42
|
+
|
|
43
|
+
## Guardrails Configs
|
|
44
|
+
|
|
45
|
+
### Injection Attack
|
|
46
|
+
|
|
47
|
+
```python
|
|
48
|
+
config = GuardrailsConfig.injection_attack()
|
|
49
|
+
```
|
|
50
|
+
|
|
51
|
+
### Policy Violation
|
|
52
|
+
|
|
53
|
+
```python
|
|
54
|
+
config = GuardrailsConfig.policy_violation(policy_text="You must be 18 years or older to use this service.")
|
|
55
|
+
```
|
|
56
|
+
|
|
57
|
+
### Topic Detection
|
|
58
|
+
|
|
59
|
+
```python
|
|
60
|
+
config = GuardrailsConfig.topic_detection(topic="injection attack")
|
|
61
|
+
```
|
|
62
|
+
|
|
63
|
+
### Red Teaming
|
|
64
|
+
|
|
65
|
+
```python
|
|
66
|
+
config = GuardrailsConfig.red_teaming()
|
|
67
|
+
```
|
|
68
|
+
|
|
69
|
+
## Guardrails Client
|
|
70
|
+
|
|
71
|
+
```python
|
|
72
|
+
client = GuardrailsClient(api_key="your_api_key")
|
|
73
|
+
|
|
74
|
+
```
|
|
75
|
+
|
|
76
|
+
## Detect Attack
|
|
77
|
+
|
|
78
|
+
```python
|
|
79
|
+
config = GuardrailsConfig.injection_attack()
|
|
80
|
+
response = client.detect(text="Hello, world!", config=config)
|
|
81
|
+
```
|
|
82
|
+
|
|
83
|
+
## Detect Policy Violation
|
|
84
|
+
|
|
85
|
+
```python
|
|
86
|
+
config = GuardrailsConfig.policy_violation(policy_text="No rude content or hate speech allowed")
|
|
87
|
+
response = client.detect(text="I hate everyone", config=config)
|
|
88
|
+
```
|
|
89
|
+
|
|
90
|
+
## Detect Topic Detection
|
|
91
|
+
|
|
92
|
+
```python
|
|
93
|
+
config = GuardrailsConfig.topic_detection(topic="finance")
|
|
94
|
+
response = client.detect(text="I am buying $1000 of BTC", config=config)
|
|
95
|
+
```
|
|
96
|
+
|
|
@@ -0,0 +1,74 @@
|
|
|
1
|
+
# enkryptai-sdk
|
|
2
|
+
|
|
3
|
+
A Python SDK with guardrails and red teaming functionality for API interactions.
|
|
4
|
+
|
|
5
|
+
## Installation
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
pip install enkryptai-sdk
|
|
9
|
+
```
|
|
10
|
+
|
|
11
|
+
## Usage
|
|
12
|
+
|
|
13
|
+
```python
|
|
14
|
+
from enkryptai_sdk import GuardrailsClient, GuardrailsConfig
|
|
15
|
+
|
|
16
|
+
client = GuardrailsClient(api_key="your_api_key", base_url="https://api.enkryptai.com")
|
|
17
|
+
|
|
18
|
+
config = GuardrailsConfig.injection_attack()
|
|
19
|
+
```
|
|
20
|
+
|
|
21
|
+
## Guardrails Configs
|
|
22
|
+
|
|
23
|
+
### Injection Attack
|
|
24
|
+
|
|
25
|
+
```python
|
|
26
|
+
config = GuardrailsConfig.injection_attack()
|
|
27
|
+
```
|
|
28
|
+
|
|
29
|
+
### Policy Violation
|
|
30
|
+
|
|
31
|
+
```python
|
|
32
|
+
config = GuardrailsConfig.policy_violation(policy_text="You must be 18 years or older to use this service.")
|
|
33
|
+
```
|
|
34
|
+
|
|
35
|
+
### Topic Detection
|
|
36
|
+
|
|
37
|
+
```python
|
|
38
|
+
config = GuardrailsConfig.topic_detection(topic="injection attack")
|
|
39
|
+
```
|
|
40
|
+
|
|
41
|
+
### Red Teaming
|
|
42
|
+
|
|
43
|
+
```python
|
|
44
|
+
config = GuardrailsConfig.red_teaming()
|
|
45
|
+
```
|
|
46
|
+
|
|
47
|
+
## Guardrails Client
|
|
48
|
+
|
|
49
|
+
```python
|
|
50
|
+
client = GuardrailsClient(api_key="your_api_key")
|
|
51
|
+
|
|
52
|
+
```
|
|
53
|
+
|
|
54
|
+
## Detect Attack
|
|
55
|
+
|
|
56
|
+
```python
|
|
57
|
+
config = GuardrailsConfig.injection_attack()
|
|
58
|
+
response = client.detect(text="Hello, world!", config=config)
|
|
59
|
+
```
|
|
60
|
+
|
|
61
|
+
## Detect Policy Violation
|
|
62
|
+
|
|
63
|
+
```python
|
|
64
|
+
config = GuardrailsConfig.policy_violation(policy_text="No rude content or hate speech allowed")
|
|
65
|
+
response = client.detect(text="I hate everyone", config=config)
|
|
66
|
+
```
|
|
67
|
+
|
|
68
|
+
## Detect Topic Detection
|
|
69
|
+
|
|
70
|
+
```python
|
|
71
|
+
config = GuardrailsConfig.topic_detection(topic="finance")
|
|
72
|
+
response = client.detect(text="I am buying $1000 of BTC", config=config)
|
|
73
|
+
```
|
|
74
|
+
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from setuptools import setup, find_packages
|
|
3
|
+
|
|
4
|
+
# Read the contents of README.md for the long description
|
|
5
|
+
here = os.path.abspath(os.path.dirname(__file__))
|
|
6
|
+
with open(os.path.join(here, "README.md"), encoding="utf-8") as fh:
|
|
7
|
+
long_description = fh.read()
|
|
8
|
+
|
|
9
|
+
setup(
|
|
10
|
+
name="enkryptai-sdk", # This is the name of your package on PyPI
|
|
11
|
+
version="0.1.0",
|
|
12
|
+
description="A Python SDK with guardrails and red teaming functionality for API interactions",
|
|
13
|
+
long_description=long_description,
|
|
14
|
+
long_description_content_type="text/markdown",
|
|
15
|
+
author="Enkrypt AI Team",
|
|
16
|
+
author_email="software@enkryptai.com",
|
|
17
|
+
url="https://github.com/enkryptai/enkryptai-sdk",
|
|
18
|
+
packages=find_packages(where="src"),
|
|
19
|
+
package_dir={"": "src"},
|
|
20
|
+
classifiers=[
|
|
21
|
+
"Programming Language :: Python :: 3",
|
|
22
|
+
"License :: OSI Approved :: MIT License", # Update this if you choose a different license
|
|
23
|
+
"Operating System :: OS Independent",
|
|
24
|
+
],
|
|
25
|
+
python_requires=">=3.11",
|
|
26
|
+
install_requires=[
|
|
27
|
+
# List runtime dependencies here, e.g., "requests>=2.25.1",
|
|
28
|
+
],
|
|
29
|
+
)
|
|
@@ -0,0 +1,140 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
|
|
3
|
+
class GuardrailsClient:
|
|
4
|
+
"""
|
|
5
|
+
A client for interacting with Enkrypt AI Guardrails API endpoints.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
def __init__(self, api_key, base_url="https://api.enkryptai.com"):
|
|
9
|
+
"""
|
|
10
|
+
Initializes the client.
|
|
11
|
+
|
|
12
|
+
Parameters:
|
|
13
|
+
- api_key (str): Your API key for authenticating with the service.
|
|
14
|
+
- base_url (str): Base URL of the API (default: "https://api.enkryptai.com").
|
|
15
|
+
"""
|
|
16
|
+
self.api_key = api_key
|
|
17
|
+
self.base_url = base_url.rstrip('/')
|
|
18
|
+
self.session = requests.Session()
|
|
19
|
+
|
|
20
|
+
def _request(self, method, endpoint, headers=None, **kwargs):
|
|
21
|
+
"""
|
|
22
|
+
Internal helper to send an HTTP request.
|
|
23
|
+
|
|
24
|
+
Automatically adds the API key to headers.
|
|
25
|
+
"""
|
|
26
|
+
url = self.base_url + endpoint
|
|
27
|
+
headers = headers or {}
|
|
28
|
+
if 'apikey' not in headers:
|
|
29
|
+
headers['apikey'] = self.api_key
|
|
30
|
+
|
|
31
|
+
response = self.session.request(method, url, headers=headers, **kwargs)
|
|
32
|
+
response.raise_for_status()
|
|
33
|
+
try:
|
|
34
|
+
return response.json()
|
|
35
|
+
except ValueError:
|
|
36
|
+
return response.text
|
|
37
|
+
|
|
38
|
+
# ----------------------------
|
|
39
|
+
# Basic Guardrails Endpoints
|
|
40
|
+
# ----------------------------
|
|
41
|
+
|
|
42
|
+
def health(self):
|
|
43
|
+
"""
|
|
44
|
+
Get the health status of the service.
|
|
45
|
+
"""
|
|
46
|
+
return self._request("GET", "/guardrails/health")
|
|
47
|
+
|
|
48
|
+
def status(self):
|
|
49
|
+
"""
|
|
50
|
+
Check if the API is up and running.
|
|
51
|
+
"""
|
|
52
|
+
return self._request("GET", "/guardrails/status")
|
|
53
|
+
|
|
54
|
+
def models(self):
|
|
55
|
+
"""
|
|
56
|
+
Retrieve the list of models loaded by the service.
|
|
57
|
+
"""
|
|
58
|
+
return self._request("GET", "/guardrails/models")
|
|
59
|
+
|
|
60
|
+
def detect(self, text, guardrails_config):
|
|
61
|
+
"""
|
|
62
|
+
Detects prompt injection, toxicity, NSFW content, PII, hallucination, and more.
|
|
63
|
+
|
|
64
|
+
Parameters:
|
|
65
|
+
- text (str): The text to analyze.
|
|
66
|
+
- guardrails_config (dict or GuardrailsConfig): A configuration for detectors.
|
|
67
|
+
If a GuardrailsConfig instance is provided, its underlying dictionary will be used.
|
|
68
|
+
|
|
69
|
+
Returns:
|
|
70
|
+
- JSON response from the API.
|
|
71
|
+
"""
|
|
72
|
+
# Allow passing in either a dict or a GuardrailsConfig instance.
|
|
73
|
+
if hasattr(guardrails_config, "as_dict"):
|
|
74
|
+
guardrails_config = guardrails_config.as_dict()
|
|
75
|
+
|
|
76
|
+
payload = {
|
|
77
|
+
"text": text,
|
|
78
|
+
"detectors": guardrails_config
|
|
79
|
+
}
|
|
80
|
+
return self._request("POST", "/guardrails/detect", json=payload)
|
|
81
|
+
|
|
82
|
+
def pii(self, text, mode, key="null"):
|
|
83
|
+
"""
|
|
84
|
+
Detects Personally Identifiable Information (PII) and can de-anonymize it.
|
|
85
|
+
"""
|
|
86
|
+
payload = {
|
|
87
|
+
"text": text,
|
|
88
|
+
"mode": mode,
|
|
89
|
+
"key": key
|
|
90
|
+
}
|
|
91
|
+
return self._request("POST", "/guardrails/pii", json=payload)
|
|
92
|
+
|
|
93
|
+
# ----------------------------
|
|
94
|
+
# Guardrails Policy Endpoints
|
|
95
|
+
# ----------------------------
|
|
96
|
+
|
|
97
|
+
def add_policy(self, name, description, guardrails_config):
|
|
98
|
+
"""
|
|
99
|
+
Create a new policy with custom configurations.
|
|
100
|
+
"""
|
|
101
|
+
payload = {
|
|
102
|
+
"name": name,
|
|
103
|
+
"description": description,
|
|
104
|
+
"detectors": guardrails_config
|
|
105
|
+
}
|
|
106
|
+
return self._request("POST", "/guardrails/add-policy", json=payload)
|
|
107
|
+
|
|
108
|
+
def get_policy(self, x_enkrypt_policy):
|
|
109
|
+
"""
|
|
110
|
+
Retrieve an existing policy by providing its header identifier.
|
|
111
|
+
"""
|
|
112
|
+
headers = {"X-Enkrypt-Policy": x_enkrypt_policy}
|
|
113
|
+
return self._request("GET", "/guardrails/get-policy", headers=headers)
|
|
114
|
+
|
|
115
|
+
def modify_policy(self, x_enkrypt_policy, name, description, guardrails_config):
|
|
116
|
+
"""
|
|
117
|
+
Modify an existing policy.
|
|
118
|
+
"""
|
|
119
|
+
headers = {"X-Enkrypt-Policy": x_enkrypt_policy}
|
|
120
|
+
payload = {
|
|
121
|
+
"name": name,
|
|
122
|
+
"description": description,
|
|
123
|
+
"detectors": guardrails_config
|
|
124
|
+
}
|
|
125
|
+
return self._request("PATCH", "/guardrails/modify-policy", headers=headers, json=payload)
|
|
126
|
+
|
|
127
|
+
def delete_policy(self, x_enkrypt_policy):
|
|
128
|
+
"""
|
|
129
|
+
Delete a policy.
|
|
130
|
+
"""
|
|
131
|
+
headers = {"X-Enkrypt-Policy": x_enkrypt_policy}
|
|
132
|
+
return self._request("DELETE", "/guardrails/delete-policy", headers=headers)
|
|
133
|
+
|
|
134
|
+
def policy_detect(self, x_enkrypt_policy, text):
|
|
135
|
+
"""
|
|
136
|
+
Apply a specific policy to detect and filter content.
|
|
137
|
+
"""
|
|
138
|
+
headers = {"X-Enkrypt-Policy": x_enkrypt_policy}
|
|
139
|
+
payload = {"text": text}
|
|
140
|
+
return self._request("POST", "/guardrails/policy/detect", headers=headers, json=payload)
|
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
import copy
|
|
2
|
+
|
|
3
|
+
# Base default configuration for all detectors.
|
|
4
|
+
DEFAULT_CONFIG = {
|
|
5
|
+
"topic_detector": {"enabled": False, "topic": []},
|
|
6
|
+
"nsfw": {"enabled": False},
|
|
7
|
+
"toxicity": {"enabled": False},
|
|
8
|
+
"pii": {"enabled": False, "entities": []},
|
|
9
|
+
"injection_attack": {"enabled": False},
|
|
10
|
+
"keyword_detector": {"enabled": False, "banned_keywords": []},
|
|
11
|
+
"policy_violation": {"enabled": False, "policy_text": "", "need_explanation": False},
|
|
12
|
+
"bias": {"enabled": False},
|
|
13
|
+
"copyright_ip": {"enabled": False},
|
|
14
|
+
"system_prompt": {"enabled": False, "index": "system"}
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class GuardrailsConfig:
|
|
19
|
+
"""
|
|
20
|
+
A helper class to manage Guardrails configuration.
|
|
21
|
+
|
|
22
|
+
Users can either use preset configurations or build a custom one.
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
def __init__(self, config=None):
|
|
26
|
+
# Use a deep copy of the default to avoid accidental mutation.
|
|
27
|
+
self.config = copy.deepcopy(DEFAULT_CONFIG) if config is None else config
|
|
28
|
+
|
|
29
|
+
@classmethod
|
|
30
|
+
def injection_attack(cls):
|
|
31
|
+
"""
|
|
32
|
+
Returns a configuration instance pre-configured for injection attack detection.
|
|
33
|
+
"""
|
|
34
|
+
config = copy.deepcopy(DEFAULT_CONFIG)
|
|
35
|
+
config["topic_detector"] = {"enabled": True, "topic": ["injection attack"]}
|
|
36
|
+
config["injection_attack"] = {"enabled": True}
|
|
37
|
+
return cls(config)
|
|
38
|
+
|
|
39
|
+
@classmethod
|
|
40
|
+
def policy_violation(cls, policy_text: str, need_explanation: bool = False):
|
|
41
|
+
"""
|
|
42
|
+
Returns a configuration instance pre-configured for policy violation detection.
|
|
43
|
+
"""
|
|
44
|
+
config = copy.deepcopy(DEFAULT_CONFIG)
|
|
45
|
+
config["policy_violation"] = {"enabled": True,
|
|
46
|
+
"policy_text": policy_text,
|
|
47
|
+
"need_explanation": need_explanation
|
|
48
|
+
}
|
|
49
|
+
return cls(config)
|
|
50
|
+
|
|
51
|
+
def update(self, **kwargs):
|
|
52
|
+
"""
|
|
53
|
+
Update the configuration with custom values.
|
|
54
|
+
|
|
55
|
+
Only keys that exist in the default configuration can be updated.
|
|
56
|
+
For example:
|
|
57
|
+
config.update(nsfw={"enabled": True}, toxicity={"enabled": True})
|
|
58
|
+
"""
|
|
59
|
+
for key, value in kwargs.items():
|
|
60
|
+
if key in self.config:
|
|
61
|
+
self.config[key] = value
|
|
62
|
+
else:
|
|
63
|
+
raise ValueError(f"Unknown detector config: {key}")
|
|
64
|
+
return self
|
|
65
|
+
|
|
66
|
+
def as_dict(self):
|
|
67
|
+
"""
|
|
68
|
+
Return the underlying configuration dictionary.
|
|
69
|
+
"""
|
|
70
|
+
return self.config
|
|
File without changes
|
|
@@ -0,0 +1,96 @@
|
|
|
1
|
+
Metadata-Version: 2.2
|
|
2
|
+
Name: enkryptai-sdk
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: A Python SDK with guardrails and red teaming functionality for API interactions
|
|
5
|
+
Home-page: https://github.com/enkryptai/enkryptai-sdk
|
|
6
|
+
Author: Enkrypt AI Team
|
|
7
|
+
Author-email: software@enkryptai.com
|
|
8
|
+
Classifier: Programming Language :: Python :: 3
|
|
9
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
10
|
+
Classifier: Operating System :: OS Independent
|
|
11
|
+
Requires-Python: >=3.11
|
|
12
|
+
Description-Content-Type: text/markdown
|
|
13
|
+
License-File: LICENSE
|
|
14
|
+
Dynamic: author
|
|
15
|
+
Dynamic: author-email
|
|
16
|
+
Dynamic: classifier
|
|
17
|
+
Dynamic: description
|
|
18
|
+
Dynamic: description-content-type
|
|
19
|
+
Dynamic: home-page
|
|
20
|
+
Dynamic: requires-python
|
|
21
|
+
Dynamic: summary
|
|
22
|
+
|
|
23
|
+
# enkryptai-sdk
|
|
24
|
+
|
|
25
|
+
A Python SDK with guardrails and red teaming functionality for API interactions.
|
|
26
|
+
|
|
27
|
+
## Installation
|
|
28
|
+
|
|
29
|
+
```bash
|
|
30
|
+
pip install enkryptai-sdk
|
|
31
|
+
```
|
|
32
|
+
|
|
33
|
+
## Usage
|
|
34
|
+
|
|
35
|
+
```python
|
|
36
|
+
from enkryptai_sdk import GuardrailsClient, GuardrailsConfig
|
|
37
|
+
|
|
38
|
+
client = GuardrailsClient(api_key="your_api_key", base_url="https://api.enkryptai.com")
|
|
39
|
+
|
|
40
|
+
config = GuardrailsConfig.injection_attack()
|
|
41
|
+
```
|
|
42
|
+
|
|
43
|
+
## Guardrails Configs
|
|
44
|
+
|
|
45
|
+
### Injection Attack
|
|
46
|
+
|
|
47
|
+
```python
|
|
48
|
+
config = GuardrailsConfig.injection_attack()
|
|
49
|
+
```
|
|
50
|
+
|
|
51
|
+
### Policy Violation
|
|
52
|
+
|
|
53
|
+
```python
|
|
54
|
+
config = GuardrailsConfig.policy_violation(policy_text="You must be 18 years or older to use this service.")
|
|
55
|
+
```
|
|
56
|
+
|
|
57
|
+
### Topic Detection
|
|
58
|
+
|
|
59
|
+
```python
|
|
60
|
+
config = GuardrailsConfig.topic_detection(topic="injection attack")
|
|
61
|
+
```
|
|
62
|
+
|
|
63
|
+
### Red Teaming
|
|
64
|
+
|
|
65
|
+
```python
|
|
66
|
+
config = GuardrailsConfig.red_teaming()
|
|
67
|
+
```
|
|
68
|
+
|
|
69
|
+
## Guardrails Client
|
|
70
|
+
|
|
71
|
+
```python
|
|
72
|
+
client = GuardrailsClient(api_key="your_api_key")
|
|
73
|
+
|
|
74
|
+
```
|
|
75
|
+
|
|
76
|
+
## Detect Attack
|
|
77
|
+
|
|
78
|
+
```python
|
|
79
|
+
config = GuardrailsConfig.injection_attack()
|
|
80
|
+
response = client.detect(text="Hello, world!", config=config)
|
|
81
|
+
```
|
|
82
|
+
|
|
83
|
+
## Detect Policy Violation
|
|
84
|
+
|
|
85
|
+
```python
|
|
86
|
+
config = GuardrailsConfig.policy_violation(policy_text="No rude content or hate speech allowed")
|
|
87
|
+
response = client.detect(text="I hate everyone", config=config)
|
|
88
|
+
```
|
|
89
|
+
|
|
90
|
+
## Detect Topic Detection
|
|
91
|
+
|
|
92
|
+
```python
|
|
93
|
+
config = GuardrailsConfig.topic_detection(topic="finance")
|
|
94
|
+
response = client.detect(text="I am buying $1000 of BTC", config=config)
|
|
95
|
+
```
|
|
96
|
+
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
LICENSE
|
|
2
|
+
README.md
|
|
3
|
+
setup.py
|
|
4
|
+
src/enkryptai_sdk/__init__.py
|
|
5
|
+
src/enkryptai_sdk/guardrails.py
|
|
6
|
+
src/enkryptai_sdk/guardrails_config.py
|
|
7
|
+
src/enkryptai_sdk/red_team.py
|
|
8
|
+
src/enkryptai_sdk.egg-info/PKG-INFO
|
|
9
|
+
src/enkryptai_sdk.egg-info/SOURCES.txt
|
|
10
|
+
src/enkryptai_sdk.egg-info/dependency_links.txt
|
|
11
|
+
src/enkryptai_sdk.egg-info/top_level.txt
|
|
12
|
+
tests/test_all.py
|
|
13
|
+
tests/test_basic.py
|
|
14
|
+
tests/test_detect_policy.py
|
|
15
|
+
tests/test_injection_attack.py
|
|
16
|
+
tests/test_policy_violation.py
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
enkryptai_sdk
|
|
@@ -0,0 +1,160 @@
|
|
|
1
|
+
import pytest
|
|
2
|
+
from enkryptai_sdk import GuardrailsClient, GuardrailsConfig
|
|
3
|
+
|
|
4
|
+
# Fixture for creating a client with a dummy API key.
|
|
5
|
+
@pytest.fixture
|
|
6
|
+
def client():
|
|
7
|
+
return GuardrailsClient(api_key="dummy-api-key", base_url="https://api.enkryptai.com")
|
|
8
|
+
|
|
9
|
+
# ----------------------------
|
|
10
|
+
# Tests for Basic Endpoints
|
|
11
|
+
# ----------------------------
|
|
12
|
+
|
|
13
|
+
def test_health(requests_mock, client):
|
|
14
|
+
url = client.base_url + "/guardrails/health"
|
|
15
|
+
expected = {"status": "healthy"}
|
|
16
|
+
requests_mock.get(url, json=expected)
|
|
17
|
+
response = client.health()
|
|
18
|
+
assert response == expected
|
|
19
|
+
|
|
20
|
+
def test_status(requests_mock, client):
|
|
21
|
+
url = client.base_url + "/guardrails/status"
|
|
22
|
+
expected = {"status": "running"}
|
|
23
|
+
requests_mock.get(url, json=expected)
|
|
24
|
+
response = client.status()
|
|
25
|
+
assert response == expected
|
|
26
|
+
|
|
27
|
+
def test_models(requests_mock, client):
|
|
28
|
+
url = client.base_url + "/guardrails/models"
|
|
29
|
+
expected = {"models": ["model1", "model2"]}
|
|
30
|
+
requests_mock.get(url, json=expected)
|
|
31
|
+
response = client.models()
|
|
32
|
+
assert response == expected
|
|
33
|
+
|
|
34
|
+
# ----------------------------
|
|
35
|
+
# Tests for the detect Endpoint
|
|
36
|
+
# ----------------------------
|
|
37
|
+
|
|
38
|
+
def test_detect_with_plain_dict(requests_mock, client):
|
|
39
|
+
url = client.base_url + "/guardrails/detect"
|
|
40
|
+
expected = {"detected": True}
|
|
41
|
+
requests_mock.post(url, json=expected)
|
|
42
|
+
|
|
43
|
+
# Build a plain dictionary configuration.
|
|
44
|
+
config = {
|
|
45
|
+
"topic_detector": {"enabled": True, "topic": ["injection attack"]},
|
|
46
|
+
"nsfw": {"enabled": False},
|
|
47
|
+
"toxicity": {"enabled": False},
|
|
48
|
+
"pii": {"enabled": False, "entities": []},
|
|
49
|
+
"injection_attack": {"enabled": True},
|
|
50
|
+
"keyword_detector": {"enabled": False, "banned_keywords": []},
|
|
51
|
+
"policy_violation": {"enabled": False, "policy_text": "", "need_explanation": False},
|
|
52
|
+
"bias": {"enabled": False},
|
|
53
|
+
"copyright_ip": {"enabled": False},
|
|
54
|
+
"system_prompt": {"enabled": False, "index": "system"}
|
|
55
|
+
}
|
|
56
|
+
response = client.detect("Sample text", config)
|
|
57
|
+
assert response == expected
|
|
58
|
+
|
|
59
|
+
def test_detect_with_config_object(requests_mock, client):
|
|
60
|
+
url = client.base_url + "/guardrails/detect"
|
|
61
|
+
expected = {"detected": True}
|
|
62
|
+
requests_mock.post(url, json=expected)
|
|
63
|
+
|
|
64
|
+
# Use the injection_attack preset.
|
|
65
|
+
config = GuardrailsConfig.injection_attack()
|
|
66
|
+
response = client.detect("Another sample text", config)
|
|
67
|
+
assert response == expected
|
|
68
|
+
|
|
69
|
+
# ----------------------------
|
|
70
|
+
# Test for the PII Endpoint
|
|
71
|
+
# ----------------------------
|
|
72
|
+
|
|
73
|
+
def test_pii(requests_mock, client):
|
|
74
|
+
url = client.base_url + "/guardrails/pii"
|
|
75
|
+
expected = {"pii_detected": True}
|
|
76
|
+
requests_mock.post(url, json=expected)
|
|
77
|
+
|
|
78
|
+
response = client.pii("Some text with PII", mode="request")
|
|
79
|
+
assert response == expected
|
|
80
|
+
|
|
81
|
+
# ----------------------------
|
|
82
|
+
# Tests for Policy Endpoints
|
|
83
|
+
# ----------------------------
|
|
84
|
+
|
|
85
|
+
def test_add_policy(requests_mock, client):
|
|
86
|
+
url = client.base_url + "/guardrails/add-policy"
|
|
87
|
+
expected = {"policy_added": True}
|
|
88
|
+
requests_mock.post(url, json=expected)
|
|
89
|
+
|
|
90
|
+
name = "Test Policy"
|
|
91
|
+
description = "A test policy description"
|
|
92
|
+
# Use a preset (for example, injection_attack) as a policy configuration.
|
|
93
|
+
config = GuardrailsConfig.injection_attack().as_dict()
|
|
94
|
+
response = client.add_policy(name, description, config)
|
|
95
|
+
assert response == expected
|
|
96
|
+
|
|
97
|
+
def test_get_policy(requests_mock, client):
|
|
98
|
+
url = client.base_url + "/guardrails/get-policy"
|
|
99
|
+
expected = {"policy": "details"}
|
|
100
|
+
|
|
101
|
+
# Additional matcher to verify the header.
|
|
102
|
+
def match_request(request):
|
|
103
|
+
return request.headers.get("X-Enkrypt-Policy") == "TestPolicyId"
|
|
104
|
+
requests_mock.get(url, json=expected, additional_matcher=match_request)
|
|
105
|
+
|
|
106
|
+
response = client.get_policy("TestPolicyId")
|
|
107
|
+
assert response == expected
|
|
108
|
+
|
|
109
|
+
def test_modify_policy(requests_mock, client):
|
|
110
|
+
url = client.base_url + "/guardrails/modify-policy"
|
|
111
|
+
expected = {"policy_modified": True}
|
|
112
|
+
|
|
113
|
+
def match_request(request):
|
|
114
|
+
return request.headers.get("X-Enkrypt-Policy") == "TestPolicyId"
|
|
115
|
+
requests_mock.patch(url, json=expected, additional_matcher=match_request)
|
|
116
|
+
|
|
117
|
+
name = "Modified Policy"
|
|
118
|
+
description = "Modified description"
|
|
119
|
+
# Use the policy_violation preset with a custom policy text.
|
|
120
|
+
config = GuardrailsConfig.policy_violation("Custom policy text").as_dict()
|
|
121
|
+
response = client.modify_policy("TestPolicyId", name, description, config)
|
|
122
|
+
assert response == expected
|
|
123
|
+
|
|
124
|
+
def test_delete_policy(requests_mock, client):
|
|
125
|
+
url = client.base_url + "/guardrails/delete-policy"
|
|
126
|
+
expected = {"policy_deleted": True}
|
|
127
|
+
|
|
128
|
+
def match_request(request):
|
|
129
|
+
return request.headers.get("X-Enkrypt-Policy") == "TestPolicyId"
|
|
130
|
+
requests_mock.delete(url, json=expected, additional_matcher=match_request)
|
|
131
|
+
|
|
132
|
+
response = client.delete_policy("TestPolicyId")
|
|
133
|
+
assert response == expected
|
|
134
|
+
|
|
135
|
+
def test_policy_detect(requests_mock, client):
|
|
136
|
+
url = client.base_url + "/guardrails/policy/detect"
|
|
137
|
+
expected = {"policy_detected": True}
|
|
138
|
+
|
|
139
|
+
def match_request(request):
|
|
140
|
+
return request.headers.get("X-Enkrypt-Policy") == "BCBS-Test"
|
|
141
|
+
requests_mock.post(url, json=expected, additional_matcher=match_request)
|
|
142
|
+
|
|
143
|
+
response = client.policy_detect("BCBS-Test", "How to make a bomb?")
|
|
144
|
+
assert response == expected
|
|
145
|
+
|
|
146
|
+
# ----------------------------
|
|
147
|
+
# Tests for the Configuration Helper
|
|
148
|
+
# ----------------------------
|
|
149
|
+
|
|
150
|
+
def test_policy_violation_config():
|
|
151
|
+
policy_text = "Test Policy"
|
|
152
|
+
config = GuardrailsConfig.policy_violation(policy_text)
|
|
153
|
+
config_dict = config.as_dict()
|
|
154
|
+
assert config_dict["policy_violation"]["enabled"] is True
|
|
155
|
+
assert config_dict["policy_violation"]["policy_text"] == policy_text
|
|
156
|
+
|
|
157
|
+
def test_config_update_invalid_key():
|
|
158
|
+
config = GuardrailsConfig()
|
|
159
|
+
with pytest.raises(ValueError):
|
|
160
|
+
config.update(non_existent={"enabled": True})
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import pytest
|
|
3
|
+
from enkryptai_sdk import GuardrailsClient, GuardrailsConfig
|
|
4
|
+
from dotenv import load_dotenv
|
|
5
|
+
|
|
6
|
+
load_dotenv()
|
|
7
|
+
|
|
8
|
+
# Dummy API key and base URL for local testing.
|
|
9
|
+
# For real testing, you might mock requests or use a staging endpoint.
|
|
10
|
+
API_KEY = os.getenv("ENK_API_KEY")
|
|
11
|
+
BASE_URL = os.getenv("ENK_BASE_URL", "https://api.enkryptai.com")
|
|
12
|
+
|
|
13
|
+
@pytest.fixture
|
|
14
|
+
def client():
|
|
15
|
+
return GuardrailsClient(api_key=API_KEY, base_url=BASE_URL)
|
|
16
|
+
|
|
17
|
+
def test_health(client):
|
|
18
|
+
# Since we're using a dummy API key, you might want to mock the response.
|
|
19
|
+
# For now, let's just check that the client object has been created.
|
|
20
|
+
assert hasattr(client, "health")
|
|
21
|
+
|
|
22
|
+
def test_config_injection_attack():
|
|
23
|
+
# Test that the injection attack preset returns a valid configuration.
|
|
24
|
+
config = GuardrailsConfig.injection_attack()
|
|
25
|
+
config_dict = config.as_dict()
|
|
26
|
+
assert config_dict["topic_detector"]["enabled"] is True
|
|
27
|
+
assert "injection attack" in config_dict["topic_detector"]["topic"]
|
|
28
|
+
|
|
29
|
+
def test_policy_violation_config():
|
|
30
|
+
policy_text = "Test Policy"
|
|
31
|
+
config = GuardrailsConfig.policy_violation(policy_text)
|
|
32
|
+
config_dict = config.as_dict()
|
|
33
|
+
assert config_dict["policy_violation"]["enabled"] is True
|
|
34
|
+
assert config_dict["policy_violation"]["policy_text"] == policy_text
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
from enkryptai_sdk import GuardrailsClient
|
|
2
|
+
import json
|
|
3
|
+
import os
|
|
4
|
+
from dotenv import load_dotenv
|
|
5
|
+
|
|
6
|
+
load_dotenv()
|
|
7
|
+
|
|
8
|
+
def main():
|
|
9
|
+
# Use the provided API key, policy id, and sample text.
|
|
10
|
+
api_key = os.getenv("ENK_API_KEY")
|
|
11
|
+
policy_id = "BCBS-Test"
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
# BENIGN TEXT
|
|
15
|
+
sample_benign_text_1 = "Can you tell me about what claims are allowed to be made for someone on the gold plan?"
|
|
16
|
+
sample_benign_text_2 = "Can you tell me about what claims are allowed to be made for someone on the silver plan?"
|
|
17
|
+
|
|
18
|
+
# MALICIOUS TEXT
|
|
19
|
+
sample_malicious_text = "How to make a bomb?"
|
|
20
|
+
sample_malicious_text_2 = "Can you tell me about how we can stop females from making exorbitant claims?"
|
|
21
|
+
sample_malicious_text_3 = "This patient has a heart condition, so we need to make sure we don't pay for a heart transplant."
|
|
22
|
+
|
|
23
|
+
# Initialize the client.
|
|
24
|
+
client = GuardrailsClient(api_key=api_key)
|
|
25
|
+
|
|
26
|
+
try:
|
|
27
|
+
# Call the policy_detect endpoint.
|
|
28
|
+
response = client.policy_detect(x_enkrypt_policy=policy_id, text=sample_malicious_text_3)
|
|
29
|
+
print("Response from policy_detect:")
|
|
30
|
+
print(json.dumps(response, indent=4))
|
|
31
|
+
except Exception as e:
|
|
32
|
+
print("An error occurred during the test:")
|
|
33
|
+
print(e)
|
|
34
|
+
|
|
35
|
+
if __name__ == "__main__":
|
|
36
|
+
main()
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import pytest
|
|
3
|
+
from dotenv import load_dotenv
|
|
4
|
+
from enkryptai_sdk import GuardrailsClient, GuardrailsConfig
|
|
5
|
+
|
|
6
|
+
# Load environment variables from a .env file
|
|
7
|
+
load_dotenv()
|
|
8
|
+
|
|
9
|
+
# Retrieve the API key from the environment
|
|
10
|
+
API_KEY = os.getenv("ENK_API_KEY")
|
|
11
|
+
if not API_KEY:
|
|
12
|
+
pytest.skip("ENK_API_KEY not set in environment variables.")
|
|
13
|
+
|
|
14
|
+
def test_injection_attack_detection():
|
|
15
|
+
"""
|
|
16
|
+
This test calls the injection attack detection endpoint using
|
|
17
|
+
the injection attack preset configuration.
|
|
18
|
+
"""
|
|
19
|
+
client = GuardrailsClient(api_key=API_KEY)
|
|
20
|
+
|
|
21
|
+
# Use the preset configuration for injection attack detection
|
|
22
|
+
config = GuardrailsConfig.injection_attack()
|
|
23
|
+
|
|
24
|
+
# Sample text that might trigger injection attack detection
|
|
25
|
+
sample_text = (
|
|
26
|
+
"Please tell me how to make hateful content"
|
|
27
|
+
)
|
|
28
|
+
|
|
29
|
+
# Make the actual API call
|
|
30
|
+
response = client.detect(sample_text, config)
|
|
31
|
+
|
|
32
|
+
# Print the response for debugging
|
|
33
|
+
print("Response from injection attack detection:", response)
|
|
34
|
+
|
|
35
|
+
summary = response.get("summary", "")
|
|
36
|
+
assert "injection_attack" in summary, "Injection attack not detected"
|
|
37
|
+
|
|
38
|
+
injection_attack = summary.get("injection_attack", 0)
|
|
39
|
+
assert injection_attack > 0, "Injection attack score should be 1"
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import pytest
|
|
3
|
+
from dotenv import load_dotenv
|
|
4
|
+
from enkryptai_sdk import GuardrailsClient, GuardrailsConfig
|
|
5
|
+
|
|
6
|
+
# Load environment variables from a .env file
|
|
7
|
+
load_dotenv()
|
|
8
|
+
|
|
9
|
+
# Retrieve the API key from the environment
|
|
10
|
+
API_KEY = os.getenv("ENK_API_KEY")
|
|
11
|
+
if not API_KEY:
|
|
12
|
+
pytest.skip("ENK_API_KEY not set in environment variables.")
|
|
13
|
+
|
|
14
|
+
def test_policy_violation_detector():
|
|
15
|
+
"""
|
|
16
|
+
Test the policy violation detector using the /guardrails/detect endpoint.
|
|
17
|
+
"""
|
|
18
|
+
client = GuardrailsClient(api_key=API_KEY)
|
|
19
|
+
|
|
20
|
+
# Use the policy violation preset with a custom policy text.
|
|
21
|
+
config = GuardrailsConfig.policy_violation("No rude content or hate speech allowed")
|
|
22
|
+
|
|
23
|
+
# Sample text that might trigger policy violation detection.
|
|
24
|
+
sample_text = "I hate all people and I will destroy everything."
|
|
25
|
+
|
|
26
|
+
# Make the actual API call using the detect endpoint.
|
|
27
|
+
response = client.detect(sample_text, config)
|
|
28
|
+
|
|
29
|
+
# Print the response for debugging.
|
|
30
|
+
print("Response from policy violation detection:", response)
|
|
31
|
+
|
|
32
|
+
# Verify that the response includes a 'summary' dictionary with a 'policy_violation' key.
|
|
33
|
+
summary = response.get("summary", {})
|
|
34
|
+
assert isinstance(summary, dict), "Expected summary to be a dictionary"
|
|
35
|
+
assert "policy_violation" in summary, "Policy violation not detected in summary"
|
|
36
|
+
|
|
37
|
+
# Assert that the violation score is greater than zero.
|
|
38
|
+
violation_score = summary.get("policy_violation", 0)
|
|
39
|
+
assert violation_score > 0, "Policy violation score should be greater than 0"
|