pangea-sdk 5.5.0b2__tar.gz → 5.5.0b4__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {pangea_sdk-5.5.0b2 → pangea_sdk-5.5.0b4}/PKG-INFO +7 -7
- {pangea_sdk-5.5.0b2 → pangea_sdk-5.5.0b4}/README.md +2 -2
- {pangea_sdk-5.5.0b2 → pangea_sdk-5.5.0b4}/pangea/__init__.py +1 -1
- pangea_sdk-5.5.0b4/pangea/asyncio/services/ai_guard.py +221 -0
- {pangea_sdk-5.5.0b2 → pangea_sdk-5.5.0b4}/pangea/asyncio/services/prompt_guard.py +16 -8
- {pangea_sdk-5.5.0b2 → pangea_sdk-5.5.0b4}/pangea/asyncio/services/share.py +2 -2
- {pangea_sdk-5.5.0b2 → pangea_sdk-5.5.0b4}/pangea/services/ai_guard.py +122 -40
- {pangea_sdk-5.5.0b2 → pangea_sdk-5.5.0b4}/pangea/services/prompt_guard.py +19 -11
- {pangea_sdk-5.5.0b2 → pangea_sdk-5.5.0b4}/pangea/services/share/share.py +1 -1
- {pangea_sdk-5.5.0b2 → pangea_sdk-5.5.0b4}/pyproject.toml +8 -8
- pangea_sdk-5.5.0b2/pangea/asyncio/services/ai_guard.py +0 -156
- {pangea_sdk-5.5.0b2 → pangea_sdk-5.5.0b4}/pangea/asyncio/__init__.py +0 -0
- {pangea_sdk-5.5.0b2 → pangea_sdk-5.5.0b4}/pangea/asyncio/file_uploader.py +0 -0
- {pangea_sdk-5.5.0b2 → pangea_sdk-5.5.0b4}/pangea/asyncio/request.py +0 -0
- {pangea_sdk-5.5.0b2 → pangea_sdk-5.5.0b4}/pangea/asyncio/services/__init__.py +0 -0
- {pangea_sdk-5.5.0b2 → pangea_sdk-5.5.0b4}/pangea/asyncio/services/audit.py +0 -0
- {pangea_sdk-5.5.0b2 → pangea_sdk-5.5.0b4}/pangea/asyncio/services/authn.py +0 -0
- {pangea_sdk-5.5.0b2 → pangea_sdk-5.5.0b4}/pangea/asyncio/services/authz.py +0 -0
- {pangea_sdk-5.5.0b2 → pangea_sdk-5.5.0b4}/pangea/asyncio/services/base.py +0 -0
- {pangea_sdk-5.5.0b2 → pangea_sdk-5.5.0b4}/pangea/asyncio/services/embargo.py +0 -0
- {pangea_sdk-5.5.0b2 → pangea_sdk-5.5.0b4}/pangea/asyncio/services/file_scan.py +0 -0
- {pangea_sdk-5.5.0b2 → pangea_sdk-5.5.0b4}/pangea/asyncio/services/intel.py +0 -0
- {pangea_sdk-5.5.0b2 → pangea_sdk-5.5.0b4}/pangea/asyncio/services/redact.py +0 -0
- {pangea_sdk-5.5.0b2 → pangea_sdk-5.5.0b4}/pangea/asyncio/services/sanitize.py +0 -0
- {pangea_sdk-5.5.0b2 → pangea_sdk-5.5.0b4}/pangea/asyncio/services/vault.py +0 -0
- {pangea_sdk-5.5.0b2 → pangea_sdk-5.5.0b4}/pangea/audit_logger.py +0 -0
- {pangea_sdk-5.5.0b2 → pangea_sdk-5.5.0b4}/pangea/config.py +0 -0
- {pangea_sdk-5.5.0b2 → pangea_sdk-5.5.0b4}/pangea/crypto/rsa.py +0 -0
- {pangea_sdk-5.5.0b2 → pangea_sdk-5.5.0b4}/pangea/deep_verify.py +0 -0
- {pangea_sdk-5.5.0b2 → pangea_sdk-5.5.0b4}/pangea/deprecated.py +0 -0
- {pangea_sdk-5.5.0b2 → pangea_sdk-5.5.0b4}/pangea/dump_audit.py +0 -0
- {pangea_sdk-5.5.0b2 → pangea_sdk-5.5.0b4}/pangea/exceptions.py +0 -0
- {pangea_sdk-5.5.0b2 → pangea_sdk-5.5.0b4}/pangea/file_uploader.py +0 -0
- {pangea_sdk-5.5.0b2 → pangea_sdk-5.5.0b4}/pangea/py.typed +0 -0
- {pangea_sdk-5.5.0b2 → pangea_sdk-5.5.0b4}/pangea/request.py +0 -0
- {pangea_sdk-5.5.0b2 → pangea_sdk-5.5.0b4}/pangea/response.py +0 -0
- {pangea_sdk-5.5.0b2 → pangea_sdk-5.5.0b4}/pangea/services/__init__.py +0 -0
- {pangea_sdk-5.5.0b2 → pangea_sdk-5.5.0b4}/pangea/services/audit/audit.py +0 -0
- {pangea_sdk-5.5.0b2 → pangea_sdk-5.5.0b4}/pangea/services/audit/exceptions.py +0 -0
- {pangea_sdk-5.5.0b2 → pangea_sdk-5.5.0b4}/pangea/services/audit/models.py +0 -0
- {pangea_sdk-5.5.0b2 → pangea_sdk-5.5.0b4}/pangea/services/audit/signing.py +0 -0
- {pangea_sdk-5.5.0b2 → pangea_sdk-5.5.0b4}/pangea/services/audit/util.py +0 -0
- {pangea_sdk-5.5.0b2 → pangea_sdk-5.5.0b4}/pangea/services/authn/authn.py +0 -0
- {pangea_sdk-5.5.0b2 → pangea_sdk-5.5.0b4}/pangea/services/authn/models.py +0 -0
- {pangea_sdk-5.5.0b2 → pangea_sdk-5.5.0b4}/pangea/services/authz.py +0 -0
- {pangea_sdk-5.5.0b2 → pangea_sdk-5.5.0b4}/pangea/services/base.py +0 -0
- {pangea_sdk-5.5.0b2 → pangea_sdk-5.5.0b4}/pangea/services/embargo.py +0 -0
- {pangea_sdk-5.5.0b2 → pangea_sdk-5.5.0b4}/pangea/services/file_scan.py +0 -0
- {pangea_sdk-5.5.0b2 → pangea_sdk-5.5.0b4}/pangea/services/intel.py +0 -0
- {pangea_sdk-5.5.0b2 → pangea_sdk-5.5.0b4}/pangea/services/redact.py +0 -0
- {pangea_sdk-5.5.0b2 → pangea_sdk-5.5.0b4}/pangea/services/sanitize.py +0 -0
- {pangea_sdk-5.5.0b2 → pangea_sdk-5.5.0b4}/pangea/services/share/file_format.py +0 -0
- {pangea_sdk-5.5.0b2 → pangea_sdk-5.5.0b4}/pangea/services/vault/models/asymmetric.py +0 -0
- {pangea_sdk-5.5.0b2 → pangea_sdk-5.5.0b4}/pangea/services/vault/models/common.py +0 -0
- {pangea_sdk-5.5.0b2 → pangea_sdk-5.5.0b4}/pangea/services/vault/models/keys.py +0 -0
- {pangea_sdk-5.5.0b2 → pangea_sdk-5.5.0b4}/pangea/services/vault/models/secret.py +0 -0
- {pangea_sdk-5.5.0b2 → pangea_sdk-5.5.0b4}/pangea/services/vault/models/symmetric.py +0 -0
- {pangea_sdk-5.5.0b2 → pangea_sdk-5.5.0b4}/pangea/services/vault/vault.py +0 -0
- {pangea_sdk-5.5.0b2 → pangea_sdk-5.5.0b4}/pangea/tools.py +0 -0
- {pangea_sdk-5.5.0b2 → pangea_sdk-5.5.0b4}/pangea/utils.py +0 -0
- {pangea_sdk-5.5.0b2 → pangea_sdk-5.5.0b4}/pangea/verify_audit.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.3
|
2
2
|
Name: pangea-sdk
|
3
|
-
Version: 5.5.
|
3
|
+
Version: 5.5.0b4
|
4
4
|
Summary: Pangea API SDK
|
5
5
|
License: MIT
|
6
6
|
Keywords: Pangea,SDK,Audit
|
@@ -11,11 +11,11 @@ Classifier: Topic :: Software Development
|
|
11
11
|
Classifier: Topic :: Software Development :: Libraries
|
12
12
|
Requires-Dist: aiohttp (>=3.11.11,<4.0.0)
|
13
13
|
Requires-Dist: cryptography (>=43.0.3,<44.0.0)
|
14
|
-
Requires-Dist: deprecated (>=1.2.
|
15
|
-
Requires-Dist: google-crc32c (>=1.
|
16
|
-
Requires-Dist: pydantic (>=2.10.
|
14
|
+
Requires-Dist: deprecated (>=1.2.18,<2.0.0)
|
15
|
+
Requires-Dist: google-crc32c (>=1.6.0,<2.0.0)
|
16
|
+
Requires-Dist: pydantic (>=2.10.6,<3.0.0)
|
17
17
|
Requires-Dist: python-dateutil (>=2.9.0.post0,<3.0.0)
|
18
|
-
Requires-Dist: requests (>=2.
|
18
|
+
Requires-Dist: requests (>=2.32.3,<3.0.0)
|
19
19
|
Requires-Dist: requests-toolbelt (>=1.0.0,<2.0.0)
|
20
20
|
Requires-Dist: typing-extensions (>=4.12.2,<5.0.0)
|
21
21
|
Description-Content-Type: text/markdown
|
@@ -63,13 +63,13 @@ the same compatibility guarantees as stable releases.
|
|
63
63
|
Via pip:
|
64
64
|
|
65
65
|
```bash
|
66
|
-
$ pip3 install pangea-sdk==5.5.
|
66
|
+
$ pip3 install pangea-sdk==5.5.0b4
|
67
67
|
```
|
68
68
|
|
69
69
|
Via poetry:
|
70
70
|
|
71
71
|
```bash
|
72
|
-
$ poetry add pangea-sdk==5.5.
|
72
|
+
$ poetry add pangea-sdk==5.5.0b4
|
73
73
|
```
|
74
74
|
|
75
75
|
## Usage
|
@@ -41,13 +41,13 @@ the same compatibility guarantees as stable releases.
|
|
41
41
|
Via pip:
|
42
42
|
|
43
43
|
```bash
|
44
|
-
$ pip3 install pangea-sdk==5.5.
|
44
|
+
$ pip3 install pangea-sdk==5.5.0b4
|
45
45
|
```
|
46
46
|
|
47
47
|
Via poetry:
|
48
48
|
|
49
49
|
```bash
|
50
|
-
$ poetry add pangea-sdk==5.5.
|
50
|
+
$ poetry add pangea-sdk==5.5.0b4
|
51
51
|
```
|
52
52
|
|
53
53
|
## Usage
|
@@ -0,0 +1,221 @@
|
|
1
|
+
from __future__ import annotations
|
2
|
+
|
3
|
+
from typing import overload
|
4
|
+
|
5
|
+
from typing_extensions import TypeVar
|
6
|
+
|
7
|
+
from pangea.asyncio.services.base import ServiceBaseAsync
|
8
|
+
from pangea.config import PangeaConfig
|
9
|
+
from pangea.response import PangeaResponse
|
10
|
+
from pangea.services.ai_guard import LogFields, TextGuardResult
|
11
|
+
|
12
|
+
_T = TypeVar("_T")
|
13
|
+
|
14
|
+
|
15
|
+
class AIGuardAsync(ServiceBaseAsync):
|
16
|
+
"""AI Guard service client.
|
17
|
+
|
18
|
+
Provides methods to interact with Pangea's AI Guard service.
|
19
|
+
|
20
|
+
Examples:
|
21
|
+
from pangea import PangeaConfig
|
22
|
+
from pangea.asyncio.services import AIGuardAsync
|
23
|
+
|
24
|
+
config = PangeaConfig(domain="aws.us.pangea.cloud")
|
25
|
+
ai_guard = AIGuardAsync(token="pangea_token", config=config)
|
26
|
+
"""
|
27
|
+
|
28
|
+
service_name = "ai-guard"
|
29
|
+
|
30
|
+
def __init__(
|
31
|
+
self, token: str, config: PangeaConfig | None = None, logger_name: str = "pangea", config_id: str | None = None
|
32
|
+
) -> None:
|
33
|
+
"""
|
34
|
+
AI Guard service client.
|
35
|
+
|
36
|
+
Initializes a new AI Guard client.
|
37
|
+
|
38
|
+
Args:
|
39
|
+
token: Pangea API token.
|
40
|
+
config: Pangea service configuration.
|
41
|
+
logger_name: Logger name.
|
42
|
+
config_id: Configuration ID.
|
43
|
+
|
44
|
+
Examples:
|
45
|
+
from pangea import PangeaConfig
|
46
|
+
from pangea.asyncio.services import AIGuardAsync
|
47
|
+
|
48
|
+
config = PangeaConfig(domain="aws.us.pangea.cloud")
|
49
|
+
ai_guard = AIGuardAsync(token="pangea_token", config=config)
|
50
|
+
"""
|
51
|
+
|
52
|
+
super().__init__(token, config, logger_name, config_id)
|
53
|
+
|
54
|
+
@overload
|
55
|
+
async def guard_text(
|
56
|
+
self,
|
57
|
+
text: str,
|
58
|
+
*,
|
59
|
+
recipe: str | None = None,
|
60
|
+
debug: bool | None = None,
|
61
|
+
llm_info: str | None = None,
|
62
|
+
log_fields: LogFields | None = None,
|
63
|
+
) -> PangeaResponse[TextGuardResult[None]]:
|
64
|
+
"""
|
65
|
+
Text Guard for scanning LLM inputs and outputs
|
66
|
+
|
67
|
+
Analyze and redact text to avoid manipulation of the model, addition of
|
68
|
+
malicious content, and other undesirable data transfers.
|
69
|
+
|
70
|
+
OperationId: ai_guard_post_v1_text_guard
|
71
|
+
|
72
|
+
Args:
|
73
|
+
text: Text to be scanned by AI Guard for PII, sensitive data,
|
74
|
+
malicious content, and other data types defined by the
|
75
|
+
configuration. Supports processing up to 10KB of text.
|
76
|
+
recipe: Recipe key of a configuration of data types and settings
|
77
|
+
defined in the Pangea User Console. It specifies the rules that
|
78
|
+
are to be applied to the text, such as defang malicious URLs.
|
79
|
+
debug: Setting this value to true will provide a detailed analysis
|
80
|
+
of the text data
|
81
|
+
llm_info: Short string hint for the LLM Provider information
|
82
|
+
log_field: Additional fields to include in activity log
|
83
|
+
|
84
|
+
Examples:
|
85
|
+
response = await ai_guard.guard_text("text")
|
86
|
+
"""
|
87
|
+
|
88
|
+
@overload
|
89
|
+
async def guard_text(
|
90
|
+
self,
|
91
|
+
*,
|
92
|
+
messages: _T,
|
93
|
+
recipe: str | None = None,
|
94
|
+
debug: bool | None = None,
|
95
|
+
llm_info: str | None = None,
|
96
|
+
log_fields: LogFields | None = None,
|
97
|
+
) -> PangeaResponse[TextGuardResult[_T]]:
|
98
|
+
"""
|
99
|
+
Text Guard for scanning LLM inputs and outputs
|
100
|
+
|
101
|
+
Analyze and redact text to avoid manipulation of the model, addition of
|
102
|
+
malicious content, and other undesirable data transfers.
|
103
|
+
|
104
|
+
OperationId: ai_guard_post_v1_text_guard
|
105
|
+
|
106
|
+
Args:
|
107
|
+
messages: Structured messages data to be scanned by AI Guard for
|
108
|
+
PII, sensitive data, malicious content, and other data types
|
109
|
+
defined by the configuration. Supports processing up to 10KB of
|
110
|
+
JSON text
|
111
|
+
recipe: Recipe key of a configuration of data types and settings
|
112
|
+
defined in the Pangea User Console. It specifies the rules that
|
113
|
+
are to be applied to the text, such as defang malicious URLs.
|
114
|
+
debug: Setting this value to true will provide a detailed analysis
|
115
|
+
of the text data
|
116
|
+
llm_info: Short string hint for the LLM Provider information
|
117
|
+
log_field: Additional fields to include in activity log
|
118
|
+
|
119
|
+
Examples:
|
120
|
+
response = await ai_guard.guard_text(messages=[{"role": "user", "content": "hello world"}])
|
121
|
+
"""
|
122
|
+
|
123
|
+
@overload
|
124
|
+
async def guard_text(
|
125
|
+
self,
|
126
|
+
*,
|
127
|
+
llm_input: _T,
|
128
|
+
recipe: str | None = None,
|
129
|
+
debug: bool | None = None,
|
130
|
+
llm_info: str | None = None,
|
131
|
+
log_fields: LogFields | None = None,
|
132
|
+
) -> PangeaResponse[TextGuardResult[_T]]:
|
133
|
+
"""
|
134
|
+
Text Guard for scanning LLM inputs and outputs
|
135
|
+
|
136
|
+
Analyze and redact text to avoid manipulation of the model, addition of
|
137
|
+
malicious content, and other undesirable data transfers.
|
138
|
+
|
139
|
+
OperationId: ai_guard_post_v1_text_guard
|
140
|
+
|
141
|
+
Args:
|
142
|
+
llm_input: Structured full llm payload data to be scanned by AI
|
143
|
+
Guard for PII, sensitive data, malicious content, and other data
|
144
|
+
types defined by the configuration. Supports processing up to
|
145
|
+
10KB of JSON text
|
146
|
+
recipe: Recipe key of a configuration of data types and settings
|
147
|
+
defined in the Pangea User Console. It specifies the rules that
|
148
|
+
are to be applied to the text, such as defang malicious URLs.
|
149
|
+
debug: Setting this value to true will provide a detailed analysis
|
150
|
+
of the text data
|
151
|
+
llm_info: Short string hint for the LLM Provider information
|
152
|
+
log_field: Additional fields to include in activity log
|
153
|
+
|
154
|
+
Examples:
|
155
|
+
response = await ai_guard.guard_text(
|
156
|
+
llm_input={"model": "gpt-4o", "messages": [{"role": "user", "content": "hello world"}]}
|
157
|
+
)
|
158
|
+
"""
|
159
|
+
|
160
|
+
async def guard_text( # type: ignore[misc]
|
161
|
+
self,
|
162
|
+
text: str | None = None,
|
163
|
+
*,
|
164
|
+
messages: _T | None = None,
|
165
|
+
llm_input: _T | None = None,
|
166
|
+
recipe: str | None = None,
|
167
|
+
debug: bool | None = None,
|
168
|
+
llm_info: str | None = None,
|
169
|
+
log_fields: LogFields | None = None,
|
170
|
+
) -> PangeaResponse[TextGuardResult[None]]:
|
171
|
+
"""
|
172
|
+
Text Guard for scanning LLM inputs and outputs
|
173
|
+
|
174
|
+
Analyze and redact text to avoid manipulation of the model, addition of
|
175
|
+
malicious content, and other undesirable data transfers.
|
176
|
+
|
177
|
+
OperationId: ai_guard_post_v1_text_guard
|
178
|
+
|
179
|
+
Args:
|
180
|
+
text: Text to be scanned by AI Guard for PII, sensitive data,
|
181
|
+
malicious content, and other data types defined by the
|
182
|
+
configuration. Supports processing up to 10KB of text.
|
183
|
+
messages: Structured messages data to be scanned by AI Guard for
|
184
|
+
PII, sensitive data, malicious content, and other data types
|
185
|
+
defined by the configuration. Supports processing up to 10KB of
|
186
|
+
JSON text
|
187
|
+
llm_input: Structured full llm payload data to be scanned by AI
|
188
|
+
Guard for PII, sensitive data, malicious content, and other data
|
189
|
+
types defined by the configuration. Supports processing up to
|
190
|
+
10KB of JSON text
|
191
|
+
recipe: Recipe key of a configuration of data types and settings
|
192
|
+
defined in the Pangea User Console. It specifies the rules that
|
193
|
+
are to be applied to the text, such as defang malicious URLs.
|
194
|
+
debug: Setting this value to true will provide a detailed analysis
|
195
|
+
of the text data
|
196
|
+
llm_info: Short string hint for the LLM Provider information
|
197
|
+
log_field: Additional fields to include in activity log
|
198
|
+
|
199
|
+
Examples:
|
200
|
+
response = await ai_guard.guard_text("text")
|
201
|
+
"""
|
202
|
+
|
203
|
+
if not any((text, messages, llm_input)):
|
204
|
+
raise ValueError("Exactly one of `text`, `messages`, or `llm_input` must be given")
|
205
|
+
|
206
|
+
if sum((text is not None, messages is not None, llm_input is not None)) > 1:
|
207
|
+
raise ValueError("Only one of `text`, `messages`, or `llm_input` can be given at once")
|
208
|
+
|
209
|
+
return await self.request.post(
|
210
|
+
"v1/text/guard",
|
211
|
+
TextGuardResult,
|
212
|
+
data={
|
213
|
+
"text": text,
|
214
|
+
"messages": messages,
|
215
|
+
"llm_input": llm_input,
|
216
|
+
"recipe": recipe,
|
217
|
+
"debug": debug,
|
218
|
+
"llm_info": llm_info,
|
219
|
+
"log_fields": log_fields,
|
220
|
+
},
|
221
|
+
)
|
@@ -52,20 +52,24 @@ class PromptGuardAsync(ServiceBaseAsync):
|
|
52
52
|
super().__init__(token, config, logger_name, config_id)
|
53
53
|
|
54
54
|
async def guard(
|
55
|
-
self,
|
55
|
+
self,
|
56
|
+
messages: Iterable[Message],
|
57
|
+
*,
|
58
|
+
analyzers: Iterable[str] | None = None,
|
59
|
+
classify: bool | None = None,
|
56
60
|
) -> PangeaResponse[GuardResult]:
|
57
61
|
"""
|
58
|
-
Guard
|
62
|
+
Guard
|
59
63
|
|
60
64
|
Guard messages.
|
61
65
|
|
62
|
-
|
63
|
-
|
64
|
-
OperationId: prompt_guard_post_v1beta_guard
|
66
|
+
OperationId: prompt_guard_post_v1_guard
|
65
67
|
|
66
68
|
Args:
|
67
|
-
messages:
|
68
|
-
|
69
|
+
messages: Prompt content and role array in JSON format. The
|
70
|
+
`content` is the text that will be analyzed for redaction.
|
71
|
+
analyzers: Specific analyzers to be used in the call
|
72
|
+
classify: Boolean to enable classification of the content
|
69
73
|
|
70
74
|
Examples:
|
71
75
|
from pangea.asyncio.services.prompt_guard import Message
|
@@ -73,4 +77,8 @@ class PromptGuardAsync(ServiceBaseAsync):
|
|
73
77
|
response = await prompt_guard.guard([Message(role="user", content="hello world")])
|
74
78
|
"""
|
75
79
|
|
76
|
-
return await self.request.post(
|
80
|
+
return await self.request.post(
|
81
|
+
"v1/guard",
|
82
|
+
GuardResult,
|
83
|
+
data={"messages": messages, "analyzers": analyzers, "classify": classify},
|
84
|
+
)
|
@@ -34,7 +34,7 @@ class ShareAsync(ServiceBaseAsync):
|
|
34
34
|
|
35
35
|
Examples:
|
36
36
|
config = PangeaConfig(domain="aws.us.pangea.cloud")
|
37
|
-
|
37
|
+
share = ShareAsync(token="pangea_token", config=config)
|
38
38
|
"""
|
39
39
|
|
40
40
|
super().__init__(token, config, logger_name, config_id=config_id)
|
@@ -51,7 +51,7 @@ class ShareAsync(ServiceBaseAsync):
|
|
51
51
|
A PangeaResponse. Available response fields can be found in our [API documentation](https://pangea.cloud/docs/api/share).
|
52
52
|
|
53
53
|
Examples:
|
54
|
-
response = share.buckets()
|
54
|
+
response = await share.buckets()
|
55
55
|
"""
|
56
56
|
|
57
57
|
return await self.request.post("v1/buckets", m.BucketsResult)
|
@@ -1,12 +1,29 @@
|
|
1
1
|
from __future__ import annotations
|
2
2
|
|
3
|
-
from typing import Any, Dict, Generic, List,
|
3
|
+
from typing import Any, Dict, Generic, List, Optional, TypeVar, overload
|
4
4
|
|
5
5
|
from pangea.config import PangeaConfig
|
6
|
-
from pangea.response import APIResponseModel, PangeaResponse, PangeaResponseResult
|
6
|
+
from pangea.response import APIRequestModel, APIResponseModel, PangeaResponse, PangeaResponseResult
|
7
7
|
from pangea.services.base import ServiceBase
|
8
8
|
|
9
|
-
|
9
|
+
|
10
|
+
class LogFields(APIRequestModel):
|
11
|
+
"""Additional fields to include in activity log"""
|
12
|
+
|
13
|
+
citations: Optional[str] = None
|
14
|
+
"""Origin or source application of the event"""
|
15
|
+
|
16
|
+
extra_info: Optional[str] = None
|
17
|
+
"""Stores supplementary details related to the event"""
|
18
|
+
|
19
|
+
model: Optional[str] = None
|
20
|
+
"""Model used to perform the event"""
|
21
|
+
|
22
|
+
source: Optional[str] = None
|
23
|
+
"""IP address of user or app or agent"""
|
24
|
+
|
25
|
+
tools: Optional[str] = None
|
26
|
+
"""Tools used to perform the event"""
|
10
27
|
|
11
28
|
|
12
29
|
class AnalyzerResponse(APIResponseModel):
|
@@ -15,7 +32,7 @@ class AnalyzerResponse(APIResponseModel):
|
|
15
32
|
|
16
33
|
|
17
34
|
class PromptInjectionResult(APIResponseModel):
|
18
|
-
action:
|
35
|
+
action: str
|
19
36
|
analyzer_responses: List[AnalyzerResponse]
|
20
37
|
"""Triggered prompt injection analyzers."""
|
21
38
|
|
@@ -23,7 +40,7 @@ class PromptInjectionResult(APIResponseModel):
|
|
23
40
|
class PiiEntity(APIResponseModel):
|
24
41
|
type: str
|
25
42
|
value: str
|
26
|
-
action:
|
43
|
+
action: str
|
27
44
|
start_pos: Optional[int] = None
|
28
45
|
|
29
46
|
|
@@ -34,7 +51,7 @@ class PiiEntityResult(APIResponseModel):
|
|
34
51
|
class MaliciousEntity(APIResponseModel):
|
35
52
|
type: str
|
36
53
|
value: str
|
37
|
-
action:
|
54
|
+
action: str
|
38
55
|
start_pos: Optional[int] = None
|
39
56
|
raw: Optional[Dict[str, Any]] = None
|
40
57
|
|
@@ -46,7 +63,7 @@ class MaliciousEntityResult(APIResponseModel):
|
|
46
63
|
class SecretsEntity(APIResponseModel):
|
47
64
|
type: str
|
48
65
|
value: str
|
49
|
-
action:
|
66
|
+
action: str
|
50
67
|
start_pos: Optional[int] = None
|
51
68
|
redacted_value: Optional[str] = None
|
52
69
|
|
@@ -57,12 +74,12 @@ class SecretsEntityResult(APIResponseModel):
|
|
57
74
|
|
58
75
|
class LanguageDetectionResult(APIResponseModel):
|
59
76
|
language: str
|
60
|
-
action:
|
77
|
+
action: str
|
61
78
|
|
62
79
|
|
63
80
|
class CodeDetectionResult(APIResponseModel):
|
64
81
|
language: str
|
65
|
-
action:
|
82
|
+
action: str
|
66
83
|
|
67
84
|
|
68
85
|
_T = TypeVar("_T")
|
@@ -139,20 +156,20 @@ class AIGuard(ServiceBase):
|
|
139
156
|
@overload
|
140
157
|
def guard_text(
|
141
158
|
self,
|
142
|
-
|
159
|
+
text: str,
|
143
160
|
*,
|
144
|
-
recipe: str =
|
145
|
-
debug: bool =
|
161
|
+
recipe: str | None = None,
|
162
|
+
debug: bool | None = None,
|
163
|
+
llm_info: str | None = None,
|
164
|
+
log_fields: LogFields | None = None,
|
146
165
|
) -> PangeaResponse[TextGuardResult[None]]:
|
147
166
|
"""
|
148
|
-
Text Guard for scanning LLM inputs and outputs
|
167
|
+
Text Guard for scanning LLM inputs and outputs
|
149
168
|
|
150
169
|
Analyze and redact text to avoid manipulation of the model, addition of
|
151
170
|
malicious content, and other undesirable data transfers.
|
152
171
|
|
153
|
-
|
154
|
-
|
155
|
-
OperationId: ai_guard_post_v1beta_text_guard
|
172
|
+
OperationId: ai_guard_post_v1_text_guard
|
156
173
|
|
157
174
|
Args:
|
158
175
|
text: Text to be scanned by AI Guard for PII, sensitive data,
|
@@ -163,6 +180,8 @@ class AIGuard(ServiceBase):
|
|
163
180
|
are to be applied to the text, such as defang malicious URLs.
|
164
181
|
debug: Setting this value to true will provide a detailed analysis
|
165
182
|
of the text data
|
183
|
+
llm_info: Short string hint for the LLM Provider information
|
184
|
+
log_field: Additional fields to include in activity log
|
166
185
|
|
167
186
|
Examples:
|
168
187
|
response = ai_guard.guard_text("text")
|
@@ -171,71 +190,134 @@ class AIGuard(ServiceBase):
|
|
171
190
|
@overload
|
172
191
|
def guard_text(
|
173
192
|
self,
|
174
|
-
text_or_messages: _T,
|
175
193
|
*,
|
176
|
-
|
177
|
-
|
194
|
+
messages: _T,
|
195
|
+
recipe: str | None = None,
|
196
|
+
debug: bool | None = None,
|
197
|
+
llm_info: str | None = None,
|
198
|
+
log_fields: LogFields | None = None,
|
178
199
|
) -> PangeaResponse[TextGuardResult[_T]]:
|
179
200
|
"""
|
180
|
-
Text Guard for scanning LLM inputs and outputs
|
201
|
+
Text Guard for scanning LLM inputs and outputs
|
181
202
|
|
182
203
|
Analyze and redact text to avoid manipulation of the model, addition of
|
183
204
|
malicious content, and other undesirable data transfers.
|
184
205
|
|
185
|
-
|
186
|
-
|
187
|
-
OperationId: ai_guard_post_v1beta_text_guard
|
206
|
+
OperationId: ai_guard_post_v1_text_guard
|
188
207
|
|
189
208
|
Args:
|
190
|
-
|
191
|
-
sensitive data, malicious content, and other data types
|
192
|
-
by the configuration. Supports processing up to 10KB of
|
209
|
+
messages: Structured messages data to be scanned by AI Guard for
|
210
|
+
PII, sensitive data, malicious content, and other data types
|
211
|
+
defined by the configuration. Supports processing up to 10KB of
|
212
|
+
JSON text
|
193
213
|
recipe: Recipe key of a configuration of data types and settings
|
194
214
|
defined in the Pangea User Console. It specifies the rules that
|
195
215
|
are to be applied to the text, such as defang malicious URLs.
|
196
216
|
debug: Setting this value to true will provide a detailed analysis
|
197
217
|
of the text data
|
218
|
+
llm_info: Short string hint for the LLM Provider information
|
219
|
+
log_field: Additional fields to include in activity log
|
198
220
|
|
199
221
|
Examples:
|
200
|
-
response = ai_guard.guard_text([
|
201
|
-
{"role": "user", "content": "hello world"}
|
202
|
-
])
|
222
|
+
response = ai_guard.guard_text(messages=[{"role": "user", "content": "hello world"}])
|
203
223
|
"""
|
204
224
|
|
225
|
+
@overload
|
205
226
|
def guard_text(
|
206
227
|
self,
|
207
|
-
text_or_messages: str | _T,
|
208
228
|
*,
|
209
|
-
|
210
|
-
|
229
|
+
llm_input: _T,
|
230
|
+
recipe: str | None = None,
|
231
|
+
debug: bool | None = None,
|
232
|
+
llm_info: str | None = None,
|
233
|
+
log_fields: LogFields | None = None,
|
211
234
|
) -> PangeaResponse[TextGuardResult[_T]]:
|
212
235
|
"""
|
213
|
-
Text Guard for scanning LLM inputs and outputs
|
236
|
+
Text Guard for scanning LLM inputs and outputs
|
214
237
|
|
215
238
|
Analyze and redact text to avoid manipulation of the model, addition of
|
216
239
|
malicious content, and other undesirable data transfers.
|
217
240
|
|
218
|
-
|
241
|
+
OperationId: ai_guard_post_v1_text_guard
|
219
242
|
|
220
|
-
|
243
|
+
Args:
|
244
|
+
llm_input: Structured full llm payload data to be scanned by AI
|
245
|
+
Guard for PII, sensitive data, malicious content, and other data
|
246
|
+
types defined by the configuration. Supports processing up to
|
247
|
+
10KB of JSON text
|
248
|
+
recipe: Recipe key of a configuration of data types and settings
|
249
|
+
defined in the Pangea User Console. It specifies the rules that
|
250
|
+
are to be applied to the text, such as defang malicious URLs.
|
251
|
+
debug: Setting this value to true will provide a detailed analysis
|
252
|
+
of the text data
|
253
|
+
llm_info: Short string hint for the LLM Provider information
|
254
|
+
log_field: Additional fields to include in activity log
|
255
|
+
|
256
|
+
Examples:
|
257
|
+
response = ai_guard.guard_text(
|
258
|
+
llm_input={"model": "gpt-4o", "messages": [{"role": "user", "content": "hello world"}]}
|
259
|
+
)
|
260
|
+
"""
|
261
|
+
|
262
|
+
def guard_text( # type: ignore[misc]
|
263
|
+
self,
|
264
|
+
text: str | None = None,
|
265
|
+
*,
|
266
|
+
messages: _T | None = None,
|
267
|
+
llm_input: _T | None = None,
|
268
|
+
recipe: str | None = None,
|
269
|
+
debug: bool | None = None,
|
270
|
+
llm_info: str | None = None,
|
271
|
+
log_fields: LogFields | None = None,
|
272
|
+
) -> PangeaResponse[TextGuardResult[None]]:
|
273
|
+
"""
|
274
|
+
Text Guard for scanning LLM inputs and outputs
|
275
|
+
|
276
|
+
Analyze and redact text to avoid manipulation of the model, addition of
|
277
|
+
malicious content, and other undesirable data transfers.
|
278
|
+
|
279
|
+
OperationId: ai_guard_post_v1_text_guard
|
221
280
|
|
222
281
|
Args:
|
223
|
-
|
224
|
-
|
225
|
-
|
282
|
+
text: Text to be scanned by AI Guard for PII, sensitive data,
|
283
|
+
malicious content, and other data types defined by the
|
284
|
+
configuration. Supports processing up to 10KB of text.
|
285
|
+
messages: Structured messages data to be scanned by AI Guard for
|
286
|
+
PII, sensitive data, malicious content, and other data types
|
287
|
+
defined by the configuration. Supports processing up to 10KB of
|
288
|
+
JSON text
|
289
|
+
llm_input: Structured full llm payload data to be scanned by AI
|
290
|
+
Guard for PII, sensitive data, malicious content, and other data
|
291
|
+
types defined by the configuration. Supports processing up to
|
292
|
+
10KB of JSON text
|
226
293
|
recipe: Recipe key of a configuration of data types and settings
|
227
294
|
defined in the Pangea User Console. It specifies the rules that
|
228
295
|
are to be applied to the text, such as defang malicious URLs.
|
229
296
|
debug: Setting this value to true will provide a detailed analysis
|
230
297
|
of the text data
|
298
|
+
llm_info: Short string hint for the LLM Provider information
|
299
|
+
log_field: Additional fields to include in activity log
|
300
|
+
|
301
|
+
Examples:
|
302
|
+
response = ai_guard.guard_text("text")
|
231
303
|
"""
|
232
304
|
|
305
|
+
if not any((text, messages, llm_input)):
|
306
|
+
raise ValueError("At least one of `text`, `messages`, or `llm_input` must be given")
|
307
|
+
|
308
|
+
if sum((text is not None, messages is not None, llm_input is not None)) > 1:
|
309
|
+
raise ValueError("Only one of `text`, `messages`, or `llm_input` can be given at once")
|
310
|
+
|
233
311
|
return self.request.post(
|
234
|
-
"
|
312
|
+
"v1/text/guard",
|
235
313
|
TextGuardResult,
|
236
314
|
data={
|
237
|
-
"text"
|
315
|
+
"text": text,
|
316
|
+
"messages": messages,
|
317
|
+
"llm_input": llm_input,
|
238
318
|
"recipe": recipe,
|
239
319
|
"debug": debug,
|
320
|
+
"llm_info": llm_info,
|
321
|
+
"log_fields": log_fields,
|
240
322
|
},
|
241
323
|
)
|
@@ -19,8 +19,8 @@ class Classification(APIResponseModel):
|
|
19
19
|
category: str
|
20
20
|
"""Classification category"""
|
21
21
|
|
22
|
-
|
23
|
-
"""Classification
|
22
|
+
detected: bool
|
23
|
+
"""Classification detection result"""
|
24
24
|
|
25
25
|
confidence: float
|
26
26
|
"""Confidence score for the classification"""
|
@@ -30,7 +30,7 @@ class GuardResult(PangeaResponseResult):
|
|
30
30
|
detected: bool
|
31
31
|
"""Boolean response for if the prompt was considered malicious or not"""
|
32
32
|
|
33
|
-
type: Optional[Literal["direct", "indirect"]] = None
|
33
|
+
type: Optional[Literal["direct", "indirect", ""]] = None
|
34
34
|
"""Type of analysis, either direct or indirect"""
|
35
35
|
|
36
36
|
analyzer: Optional[str] = None
|
@@ -86,20 +86,24 @@ class PromptGuard(ServiceBase):
|
|
86
86
|
super().__init__(token, config, logger_name, config_id)
|
87
87
|
|
88
88
|
def guard(
|
89
|
-
self,
|
89
|
+
self,
|
90
|
+
messages: Iterable[Message],
|
91
|
+
*,
|
92
|
+
analyzers: Iterable[str] | None = None,
|
93
|
+
classify: bool | None = None,
|
90
94
|
) -> PangeaResponse[GuardResult]:
|
91
95
|
"""
|
92
|
-
Guard
|
96
|
+
Guard
|
93
97
|
|
94
98
|
Guard messages.
|
95
99
|
|
96
|
-
|
97
|
-
|
98
|
-
OperationId: prompt_guard_post_v1beta_guard
|
100
|
+
OperationId: prompt_guard_post_v1_guard
|
99
101
|
|
100
102
|
Args:
|
101
|
-
messages: Prompt content and role array.
|
102
|
-
|
103
|
+
messages: Prompt content and role array in JSON format. The
|
104
|
+
`content` is the text that will be analyzed for redaction.
|
105
|
+
analyzers: Specific analyzers to be used in the call
|
106
|
+
classify: Boolean to enable classification of the content
|
103
107
|
|
104
108
|
Examples:
|
105
109
|
from pangea.services.prompt_guard import Message
|
@@ -107,4 +111,8 @@ class PromptGuard(ServiceBase):
|
|
107
111
|
response = prompt_guard.guard([Message(role="user", content="hello world")])
|
108
112
|
"""
|
109
113
|
|
110
|
-
return self.request.post(
|
114
|
+
return self.request.post(
|
115
|
+
"v1/guard",
|
116
|
+
GuardResult,
|
117
|
+
data={"messages": messages, "analyzers": analyzers, "classify": classify},
|
118
|
+
)
|
@@ -762,7 +762,7 @@ class Share(ServiceBase):
|
|
762
762
|
|
763
763
|
Examples:
|
764
764
|
config = PangeaConfig(domain="aws.us.pangea.cloud")
|
765
|
-
|
765
|
+
share = Share(token="pangea_token", config=config)
|
766
766
|
"""
|
767
767
|
|
768
768
|
super().__init__(token, config, logger_name, config_id=config_id)
|
@@ -1,6 +1,6 @@
|
|
1
1
|
[project]
|
2
2
|
name = "pangea-sdk"
|
3
|
-
version = "5.5.
|
3
|
+
version = "5.5.0beta4"
|
4
4
|
description = "Pangea API SDK"
|
5
5
|
authors = [
|
6
6
|
{name = "Glenn Gallien", email = "glenn.gallien@pangea.cloud"}
|
@@ -18,11 +18,11 @@ requires-python = ">=3.9,<4.0.0"
|
|
18
18
|
dependencies = [
|
19
19
|
"aiohttp (>=3.11.11,<4.0.0)",
|
20
20
|
"cryptography (>=43.0.3,<44.0.0)",
|
21
|
-
"deprecated (>=1.2.
|
22
|
-
"google-crc32c (>=1.
|
23
|
-
"pydantic (>=2.10.
|
21
|
+
"deprecated (>=1.2.18,<2.0.0)",
|
22
|
+
"google-crc32c (>=1.6.0,<2.0.0)",
|
23
|
+
"pydantic (>=2.10.6,<3.0.0)",
|
24
24
|
"python-dateutil (>=2.9.0.post0,<3.0.0)",
|
25
|
-
"requests (>=2.
|
25
|
+
"requests (>=2.32.3,<3.0.0)",
|
26
26
|
"requests-toolbelt (>=1.0.0,<2.0.0)",
|
27
27
|
"typing-extensions (>=4.12.2,<5.0.0)"
|
28
28
|
]
|
@@ -33,10 +33,10 @@ packages = [
|
|
33
33
|
]
|
34
34
|
|
35
35
|
[tool.poetry.group.dev.dependencies]
|
36
|
-
black = "^
|
36
|
+
black = "^25.1.0"
|
37
37
|
docstring-parser = "^0.15"
|
38
|
-
isort = "^
|
39
|
-
mypy = "1.
|
38
|
+
isort = "^6.0.0"
|
39
|
+
mypy = "1.15.0"
|
40
40
|
types-Deprecated = "^1.2.9.3"
|
41
41
|
types-python-dateutil = "^2.8.19.14"
|
42
42
|
types-requests = "^2.31.0.10"
|
@@ -1,156 +0,0 @@
|
|
1
|
-
from __future__ import annotations
|
2
|
-
|
3
|
-
from typing import overload
|
4
|
-
|
5
|
-
from typing_extensions import TypeVar
|
6
|
-
|
7
|
-
from pangea.asyncio.services.base import ServiceBaseAsync
|
8
|
-
from pangea.config import PangeaConfig
|
9
|
-
from pangea.response import PangeaResponse
|
10
|
-
from pangea.services.ai_guard import TextGuardResult
|
11
|
-
|
12
|
-
_T = TypeVar("_T")
|
13
|
-
|
14
|
-
|
15
|
-
class AIGuardAsync(ServiceBaseAsync):
|
16
|
-
"""AI Guard service client.
|
17
|
-
|
18
|
-
Provides methods to interact with Pangea's AI Guard service.
|
19
|
-
|
20
|
-
Examples:
|
21
|
-
from pangea import PangeaConfig
|
22
|
-
from pangea.asyncio.services import AIGuardAsync
|
23
|
-
|
24
|
-
config = PangeaConfig(domain="aws.us.pangea.cloud")
|
25
|
-
ai_guard = AIGuardAsync(token="pangea_token", config=config)
|
26
|
-
"""
|
27
|
-
|
28
|
-
service_name = "ai-guard"
|
29
|
-
|
30
|
-
def __init__(
|
31
|
-
self, token: str, config: PangeaConfig | None = None, logger_name: str = "pangea", config_id: str | None = None
|
32
|
-
) -> None:
|
33
|
-
"""
|
34
|
-
AI Guard service client.
|
35
|
-
|
36
|
-
Initializes a new AI Guard client.
|
37
|
-
|
38
|
-
Args:
|
39
|
-
token: Pangea API token.
|
40
|
-
config: Pangea service configuration.
|
41
|
-
logger_name: Logger name.
|
42
|
-
config_id: Configuration ID.
|
43
|
-
|
44
|
-
Examples:
|
45
|
-
from pangea import PangeaConfig
|
46
|
-
from pangea.asyncio.services import AIGuardAsync
|
47
|
-
|
48
|
-
config = PangeaConfig(domain="aws.us.pangea.cloud")
|
49
|
-
ai_guard = AIGuardAsync(token="pangea_token", config=config)
|
50
|
-
"""
|
51
|
-
|
52
|
-
super().__init__(token, config, logger_name, config_id)
|
53
|
-
|
54
|
-
@overload
|
55
|
-
async def guard_text(
|
56
|
-
self,
|
57
|
-
text_or_messages: str,
|
58
|
-
*,
|
59
|
-
recipe: str = "pangea_prompt_guard",
|
60
|
-
debug: bool = False,
|
61
|
-
) -> PangeaResponse[TextGuardResult[None]]:
|
62
|
-
"""
|
63
|
-
Text Guard for scanning LLM inputs and outputs (Beta)
|
64
|
-
|
65
|
-
Analyze and redact text to avoid manipulation of the model, addition of
|
66
|
-
malicious content, and other undesirable data transfers.
|
67
|
-
|
68
|
-
How to install a [Beta release](https://pangea.cloud/docs/sdk/python/#beta-releases).
|
69
|
-
|
70
|
-
OperationId: ai_guard_post_v1beta_text_guard
|
71
|
-
|
72
|
-
Args:
|
73
|
-
text: Text to be scanned by AI Guard for PII, sensitive data,
|
74
|
-
malicious content, and other data types defined by the
|
75
|
-
configuration. Supports processing up to 10KB of text.
|
76
|
-
recipe: Recipe key of a configuration of data types and settings
|
77
|
-
defined in the Pangea User Console. It specifies the rules that
|
78
|
-
are to be applied to the text, such as defang malicious URLs.
|
79
|
-
debug: Setting this value to true will provide a detailed analysis
|
80
|
-
of the text data
|
81
|
-
|
82
|
-
Examples:
|
83
|
-
response = await ai_guard.guard_text("text")
|
84
|
-
"""
|
85
|
-
|
86
|
-
@overload
|
87
|
-
async def guard_text(
|
88
|
-
self,
|
89
|
-
text_or_messages: _T,
|
90
|
-
*,
|
91
|
-
recipe: str = "pangea_prompt_guard",
|
92
|
-
debug: bool = False,
|
93
|
-
) -> PangeaResponse[TextGuardResult[_T]]:
|
94
|
-
"""
|
95
|
-
Text Guard for scanning LLM inputs and outputs (Beta)
|
96
|
-
|
97
|
-
Analyze and redact text to avoid manipulation of the model, addition of
|
98
|
-
malicious content, and other undesirable data transfers.
|
99
|
-
|
100
|
-
How to install a [Beta release](https://pangea.cloud/docs/sdk/python/#beta-releases).
|
101
|
-
|
102
|
-
OperationId: ai_guard_post_v1beta_text_guard
|
103
|
-
|
104
|
-
Args:
|
105
|
-
text_or_messages: Structured data to be scanned by AI Guard for PII,
|
106
|
-
sensitive data, malicious content, and other data types defined
|
107
|
-
by the configuration. Supports processing up to 10KB of text.
|
108
|
-
recipe: Recipe key of a configuration of data types and settings
|
109
|
-
defined in the Pangea User Console. It specifies the rules that
|
110
|
-
are to be applied to the text, such as defang malicious URLs.
|
111
|
-
debug: Setting this value to true will provide a detailed analysis
|
112
|
-
of the text data
|
113
|
-
|
114
|
-
Examples:
|
115
|
-
response = await ai_guard.guard_text([
|
116
|
-
{"role": "user", "content": "hello world"}
|
117
|
-
])
|
118
|
-
"""
|
119
|
-
|
120
|
-
async def guard_text(
|
121
|
-
self,
|
122
|
-
text_or_messages: str | _T,
|
123
|
-
*,
|
124
|
-
recipe: str = "pangea_prompt_guard",
|
125
|
-
debug: bool = False,
|
126
|
-
) -> PangeaResponse[TextGuardResult[_T]]:
|
127
|
-
"""
|
128
|
-
Text Guard for scanning LLM inputs and outputs (Beta)
|
129
|
-
|
130
|
-
Analyze and redact text to avoid manipulation of the model, addition of
|
131
|
-
malicious content, and other undesirable data transfers.
|
132
|
-
|
133
|
-
How to install a [Beta release](https://pangea.cloud/docs/sdk/python/#beta-releases).
|
134
|
-
|
135
|
-
OperationId: ai_guard_post_v1beta_text_guard
|
136
|
-
|
137
|
-
Args:
|
138
|
-
text_or_messages: Text or structured data to be scanned by AI Guard
|
139
|
-
for PII, sensitive data, malicious content, and other data types
|
140
|
-
defined by the configuration. Supports processing up to 10KB of text.
|
141
|
-
recipe: Recipe key of a configuration of data types and settings
|
142
|
-
defined in the Pangea User Console. It specifies the rules that
|
143
|
-
are to be applied to the text, such as defang malicious URLs.
|
144
|
-
debug: Setting this value to true will provide a detailed analysis
|
145
|
-
of the text data
|
146
|
-
"""
|
147
|
-
|
148
|
-
return await self.request.post(
|
149
|
-
"v1beta/text/guard",
|
150
|
-
TextGuardResult,
|
151
|
-
data={
|
152
|
-
"text" if isinstance(text_or_messages, str) else "messages": text_or_messages,
|
153
|
-
"recipe": recipe,
|
154
|
-
"debug": debug,
|
155
|
-
},
|
156
|
-
)
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|