pangea-sdk 5.4.0b2__py3-none-any.whl → 5.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pangea/__init__.py +1 -1
- pangea/asyncio/services/ai_guard.py +158 -12
- pangea/asyncio/services/prompt_guard.py +16 -8
- pangea/asyncio/services/share.py +2 -2
- pangea/config.py +6 -4
- pangea/services/ai_guard.py +215 -20
- pangea/services/prompt_guard.py +19 -11
- pangea/services/share/share.py +1 -1
- {pangea_sdk-5.4.0b2.dist-info → pangea_sdk-5.5.0.dist-info}/METADATA +13 -13
- {pangea_sdk-5.4.0b2.dist-info → pangea_sdk-5.5.0.dist-info}/RECORD +11 -11
- {pangea_sdk-5.4.0b2.dist-info → pangea_sdk-5.5.0.dist-info}/WHEEL +1 -1
pangea/__init__.py
CHANGED
@@ -1,9 +1,15 @@
|
|
1
1
|
from __future__ import annotations
|
2
2
|
|
3
|
+
from typing import overload
|
4
|
+
|
5
|
+
from typing_extensions import TypeVar
|
6
|
+
|
3
7
|
from pangea.asyncio.services.base import ServiceBaseAsync
|
4
8
|
from pangea.config import PangeaConfig
|
5
9
|
from pangea.response import PangeaResponse
|
6
|
-
from pangea.services.ai_guard import TextGuardResult
|
10
|
+
from pangea.services.ai_guard import LogFields, TextGuardResult
|
11
|
+
|
12
|
+
_T = TypeVar("_T")
|
7
13
|
|
8
14
|
|
9
15
|
class AIGuardAsync(ServiceBaseAsync):
|
@@ -45,31 +51,171 @@ class AIGuardAsync(ServiceBaseAsync):
|
|
45
51
|
|
46
52
|
super().__init__(token, config, logger_name, config_id)
|
47
53
|
|
54
|
+
@overload
|
48
55
|
async def guard_text(
|
49
56
|
self,
|
50
57
|
text: str,
|
51
58
|
*,
|
52
|
-
recipe: str =
|
53
|
-
debug: bool =
|
54
|
-
|
59
|
+
recipe: str | None = None,
|
60
|
+
debug: bool | None = None,
|
61
|
+
llm_info: str | None = None,
|
62
|
+
log_fields: LogFields | None = None,
|
63
|
+
) -> PangeaResponse[TextGuardResult[None]]:
|
64
|
+
"""
|
65
|
+
Text Guard for scanning LLM inputs and outputs
|
66
|
+
|
67
|
+
Analyze and redact text to avoid manipulation of the model, addition of
|
68
|
+
malicious content, and other undesirable data transfers.
|
69
|
+
|
70
|
+
OperationId: ai_guard_post_v1_text_guard
|
71
|
+
|
72
|
+
Args:
|
73
|
+
text: Text to be scanned by AI Guard for PII, sensitive data,
|
74
|
+
malicious content, and other data types defined by the
|
75
|
+
configuration. Supports processing up to 10KB of text.
|
76
|
+
recipe: Recipe key of a configuration of data types and settings
|
77
|
+
defined in the Pangea User Console. It specifies the rules that
|
78
|
+
are to be applied to the text, such as defang malicious URLs.
|
79
|
+
debug: Setting this value to true will provide a detailed analysis
|
80
|
+
of the text data
|
81
|
+
llm_info: Short string hint for the LLM Provider information
|
82
|
+
log_field: Additional fields to include in activity log
|
83
|
+
|
84
|
+
Examples:
|
85
|
+
response = await ai_guard.guard_text("text")
|
86
|
+
"""
|
87
|
+
|
88
|
+
@overload
|
89
|
+
async def guard_text(
|
90
|
+
self,
|
91
|
+
*,
|
92
|
+
messages: _T,
|
93
|
+
recipe: str | None = None,
|
94
|
+
debug: bool | None = None,
|
95
|
+
llm_info: str | None = None,
|
96
|
+
log_fields: LogFields | None = None,
|
97
|
+
) -> PangeaResponse[TextGuardResult[_T]]:
|
98
|
+
"""
|
99
|
+
Text Guard for scanning LLM inputs and outputs
|
100
|
+
|
101
|
+
Analyze and redact text to avoid manipulation of the model, addition of
|
102
|
+
malicious content, and other undesirable data transfers.
|
103
|
+
|
104
|
+
OperationId: ai_guard_post_v1_text_guard
|
105
|
+
|
106
|
+
Args:
|
107
|
+
messages: Structured messages data to be scanned by AI Guard for
|
108
|
+
PII, sensitive data, malicious content, and other data types
|
109
|
+
defined by the configuration. Supports processing up to 10KB of
|
110
|
+
JSON text
|
111
|
+
recipe: Recipe key of a configuration of data types and settings
|
112
|
+
defined in the Pangea User Console. It specifies the rules that
|
113
|
+
are to be applied to the text, such as defang malicious URLs.
|
114
|
+
debug: Setting this value to true will provide a detailed analysis
|
115
|
+
of the text data
|
116
|
+
llm_info: Short string hint for the LLM Provider information
|
117
|
+
log_field: Additional fields to include in activity log
|
118
|
+
|
119
|
+
Examples:
|
120
|
+
response = await ai_guard.guard_text(messages=[{"role": "user", "content": "hello world"}])
|
121
|
+
"""
|
122
|
+
|
123
|
+
@overload
|
124
|
+
async def guard_text(
|
125
|
+
self,
|
126
|
+
*,
|
127
|
+
llm_input: _T,
|
128
|
+
recipe: str | None = None,
|
129
|
+
debug: bool | None = None,
|
130
|
+
llm_info: str | None = None,
|
131
|
+
log_fields: LogFields | None = None,
|
132
|
+
) -> PangeaResponse[TextGuardResult[_T]]:
|
133
|
+
"""
|
134
|
+
Text Guard for scanning LLM inputs and outputs
|
135
|
+
|
136
|
+
Analyze and redact text to avoid manipulation of the model, addition of
|
137
|
+
malicious content, and other undesirable data transfers.
|
138
|
+
|
139
|
+
OperationId: ai_guard_post_v1_text_guard
|
140
|
+
|
141
|
+
Args:
|
142
|
+
llm_input: Structured full llm payload data to be scanned by AI
|
143
|
+
Guard for PII, sensitive data, malicious content, and other data
|
144
|
+
types defined by the configuration. Supports processing up to
|
145
|
+
10KB of JSON text
|
146
|
+
recipe: Recipe key of a configuration of data types and settings
|
147
|
+
defined in the Pangea User Console. It specifies the rules that
|
148
|
+
are to be applied to the text, such as defang malicious URLs.
|
149
|
+
debug: Setting this value to true will provide a detailed analysis
|
150
|
+
of the text data
|
151
|
+
llm_info: Short string hint for the LLM Provider information
|
152
|
+
log_field: Additional fields to include in activity log
|
153
|
+
|
154
|
+
Examples:
|
155
|
+
response = await ai_guard.guard_text(
|
156
|
+
llm_input={"model": "gpt-4o", "messages": [{"role": "user", "content": "hello world"}]}
|
157
|
+
)
|
55
158
|
"""
|
56
|
-
Text guard (Beta)
|
57
159
|
|
58
|
-
|
160
|
+
async def guard_text( # type: ignore[misc]
|
161
|
+
self,
|
162
|
+
text: str | None = None,
|
163
|
+
*,
|
164
|
+
messages: _T | None = None,
|
165
|
+
llm_input: _T | None = None,
|
166
|
+
recipe: str | None = None,
|
167
|
+
debug: bool | None = None,
|
168
|
+
llm_info: str | None = None,
|
169
|
+
log_fields: LogFields | None = None,
|
170
|
+
) -> PangeaResponse[TextGuardResult[None]]:
|
171
|
+
"""
|
172
|
+
Text Guard for scanning LLM inputs and outputs
|
59
173
|
|
60
|
-
|
174
|
+
Analyze and redact text to avoid manipulation of the model, addition of
|
175
|
+
malicious content, and other undesirable data transfers.
|
61
176
|
|
62
|
-
OperationId:
|
177
|
+
OperationId: ai_guard_post_v1_text_guard
|
63
178
|
|
64
179
|
Args:
|
65
|
-
text: Text
|
66
|
-
|
67
|
-
|
180
|
+
text: Text to be scanned by AI Guard for PII, sensitive data,
|
181
|
+
malicious content, and other data types defined by the
|
182
|
+
configuration. Supports processing up to 10KB of text.
|
183
|
+
messages: Structured messages data to be scanned by AI Guard for
|
184
|
+
PII, sensitive data, malicious content, and other data types
|
185
|
+
defined by the configuration. Supports processing up to 10KB of
|
186
|
+
JSON text
|
187
|
+
llm_input: Structured full llm payload data to be scanned by AI
|
188
|
+
Guard for PII, sensitive data, malicious content, and other data
|
189
|
+
types defined by the configuration. Supports processing up to
|
190
|
+
10KB of JSON text
|
191
|
+
recipe: Recipe key of a configuration of data types and settings
|
192
|
+
defined in the Pangea User Console. It specifies the rules that
|
193
|
+
are to be applied to the text, such as defang malicious URLs.
|
194
|
+
debug: Setting this value to true will provide a detailed analysis
|
195
|
+
of the text data
|
196
|
+
llm_info: Short string hint for the LLM Provider information
|
197
|
+
log_field: Additional fields to include in activity log
|
68
198
|
|
69
199
|
Examples:
|
70
200
|
response = await ai_guard.guard_text("text")
|
71
201
|
"""
|
72
202
|
|
203
|
+
if not any((text, messages, llm_input)):
|
204
|
+
raise ValueError("Exactly one of `text`, `messages`, or `llm_input` must be given")
|
205
|
+
|
206
|
+
if sum((text is not None, messages is not None, llm_input is not None)) > 1:
|
207
|
+
raise ValueError("Only one of `text`, `messages`, or `llm_input` can be given at once")
|
208
|
+
|
73
209
|
return await self.request.post(
|
74
|
-
"
|
210
|
+
"v1/text/guard",
|
211
|
+
TextGuardResult,
|
212
|
+
data={
|
213
|
+
"text": text,
|
214
|
+
"messages": messages,
|
215
|
+
"llm_input": llm_input,
|
216
|
+
"recipe": recipe,
|
217
|
+
"debug": debug,
|
218
|
+
"llm_info": llm_info,
|
219
|
+
"log_fields": log_fields,
|
220
|
+
},
|
75
221
|
)
|
@@ -52,20 +52,24 @@ class PromptGuardAsync(ServiceBaseAsync):
|
|
52
52
|
super().__init__(token, config, logger_name, config_id)
|
53
53
|
|
54
54
|
async def guard(
|
55
|
-
self,
|
55
|
+
self,
|
56
|
+
messages: Iterable[Message],
|
57
|
+
*,
|
58
|
+
analyzers: Iterable[str] | None = None,
|
59
|
+
classify: bool | None = None,
|
56
60
|
) -> PangeaResponse[GuardResult]:
|
57
61
|
"""
|
58
|
-
Guard
|
62
|
+
Guard
|
59
63
|
|
60
64
|
Guard messages.
|
61
65
|
|
62
|
-
|
63
|
-
|
64
|
-
OperationId: prompt_guard_post_v1beta_guard
|
66
|
+
OperationId: prompt_guard_post_v1_guard
|
65
67
|
|
66
68
|
Args:
|
67
|
-
messages:
|
68
|
-
|
69
|
+
messages: Prompt content and role array in JSON format. The
|
70
|
+
`content` is the text that will be analyzed for redaction.
|
71
|
+
analyzers: Specific analyzers to be used in the call
|
72
|
+
classify: Boolean to enable classification of the content
|
69
73
|
|
70
74
|
Examples:
|
71
75
|
from pangea.asyncio.services.prompt_guard import Message
|
@@ -73,4 +77,8 @@ class PromptGuardAsync(ServiceBaseAsync):
|
|
73
77
|
response = await prompt_guard.guard([Message(role="user", content="hello world")])
|
74
78
|
"""
|
75
79
|
|
76
|
-
return await self.request.post(
|
80
|
+
return await self.request.post(
|
81
|
+
"v1/guard",
|
82
|
+
GuardResult,
|
83
|
+
data={"messages": messages, "analyzers": analyzers, "classify": classify},
|
84
|
+
)
|
pangea/asyncio/services/share.py
CHANGED
@@ -34,7 +34,7 @@ class ShareAsync(ServiceBaseAsync):
|
|
34
34
|
|
35
35
|
Examples:
|
36
36
|
config = PangeaConfig(domain="aws.us.pangea.cloud")
|
37
|
-
|
37
|
+
share = ShareAsync(token="pangea_token", config=config)
|
38
38
|
"""
|
39
39
|
|
40
40
|
super().__init__(token, config, logger_name, config_id=config_id)
|
@@ -51,7 +51,7 @@ class ShareAsync(ServiceBaseAsync):
|
|
51
51
|
A PangeaResponse. Available response fields can be found in our [API documentation](https://pangea.cloud/docs/api/share).
|
52
52
|
|
53
53
|
Examples:
|
54
|
-
response = share.buckets()
|
54
|
+
response = await share.buckets()
|
55
55
|
"""
|
56
56
|
|
57
57
|
return await self.request.post("v1/buckets", m.BucketsResult)
|
pangea/config.py
CHANGED
@@ -2,7 +2,7 @@
|
|
2
2
|
# Author: Pangea Cyber Corporation
|
3
3
|
|
4
4
|
from dataclasses import dataclass
|
5
|
-
from typing import Optional
|
5
|
+
from typing import Literal, Optional
|
6
6
|
|
7
7
|
|
8
8
|
@dataclass
|
@@ -16,10 +16,12 @@ class PangeaConfig:
|
|
16
16
|
scheme (http:// or https://), subdomain, domain and port.
|
17
17
|
"""
|
18
18
|
|
19
|
-
environment:
|
19
|
+
environment: Literal["production", "local"] = "production"
|
20
20
|
"""
|
21
|
-
|
22
|
-
|
21
|
+
Pangea environment, used to construct service URLs.
|
22
|
+
|
23
|
+
If set to "local", then `domain` must be the full host (i.e., hostname and
|
24
|
+
port) for the Pangea service that this `PangeaConfig` will be used for.
|
23
25
|
"""
|
24
26
|
|
25
27
|
config_id: Optional[str] = None
|
pangea/services/ai_guard.py
CHANGED
@@ -1,18 +1,38 @@
|
|
1
1
|
from __future__ import annotations
|
2
2
|
|
3
|
-
from typing import Any, Dict, Generic, List, Optional, TypeVar
|
3
|
+
from typing import Any, Dict, Generic, List, Optional, TypeVar, overload
|
4
4
|
|
5
5
|
from pangea.config import PangeaConfig
|
6
|
-
from pangea.response import APIResponseModel, PangeaResponse, PangeaResponseResult
|
6
|
+
from pangea.response import APIRequestModel, APIResponseModel, PangeaResponse, PangeaResponseResult
|
7
7
|
from pangea.services.base import ServiceBase
|
8
8
|
|
9
9
|
|
10
|
+
class LogFields(APIRequestModel):
|
11
|
+
"""Additional fields to include in activity log"""
|
12
|
+
|
13
|
+
citations: Optional[str] = None
|
14
|
+
"""Origin or source application of the event"""
|
15
|
+
|
16
|
+
extra_info: Optional[str] = None
|
17
|
+
"""Stores supplementary details related to the event"""
|
18
|
+
|
19
|
+
model: Optional[str] = None
|
20
|
+
"""Model used to perform the event"""
|
21
|
+
|
22
|
+
source: Optional[str] = None
|
23
|
+
"""IP address of user or app or agent"""
|
24
|
+
|
25
|
+
tools: Optional[str] = None
|
26
|
+
"""Tools used to perform the event"""
|
27
|
+
|
28
|
+
|
10
29
|
class AnalyzerResponse(APIResponseModel):
|
11
30
|
analyzer: str
|
12
31
|
confidence: float
|
13
32
|
|
14
33
|
|
15
34
|
class PromptInjectionResult(APIResponseModel):
|
35
|
+
action: str
|
16
36
|
analyzer_responses: List[AnalyzerResponse]
|
17
37
|
"""Triggered prompt injection analyzers."""
|
18
38
|
|
@@ -20,7 +40,7 @@ class PromptInjectionResult(APIResponseModel):
|
|
20
40
|
class PiiEntity(APIResponseModel):
|
21
41
|
type: str
|
22
42
|
value: str
|
23
|
-
|
43
|
+
action: str
|
24
44
|
start_pos: Optional[int] = None
|
25
45
|
|
26
46
|
|
@@ -31,7 +51,7 @@ class PiiEntityResult(APIResponseModel):
|
|
31
51
|
class MaliciousEntity(APIResponseModel):
|
32
52
|
type: str
|
33
53
|
value: str
|
34
|
-
|
54
|
+
action: str
|
35
55
|
start_pos: Optional[int] = None
|
36
56
|
raw: Optional[Dict[str, Any]] = None
|
37
57
|
|
@@ -40,23 +60,58 @@ class MaliciousEntityResult(APIResponseModel):
|
|
40
60
|
entities: List[MaliciousEntity]
|
41
61
|
|
42
62
|
|
43
|
-
|
63
|
+
class SecretsEntity(APIResponseModel):
|
64
|
+
type: str
|
65
|
+
value: str
|
66
|
+
action: str
|
67
|
+
start_pos: Optional[int] = None
|
68
|
+
redacted_value: Optional[str] = None
|
69
|
+
|
70
|
+
|
71
|
+
class SecretsEntityResult(APIResponseModel):
|
72
|
+
entities: List[SecretsEntity]
|
73
|
+
|
74
|
+
|
75
|
+
class LanguageDetectionResult(APIResponseModel):
|
76
|
+
language: str
|
77
|
+
action: str
|
78
|
+
|
44
79
|
|
80
|
+
class CodeDetectionResult(APIResponseModel):
|
81
|
+
language: str
|
82
|
+
action: str
|
45
83
|
|
46
|
-
|
84
|
+
|
85
|
+
_T = TypeVar("_T")
|
86
|
+
|
87
|
+
|
88
|
+
class TextGuardDetector(APIResponseModel, Generic[_T]):
|
47
89
|
detected: bool
|
48
|
-
data: Optional[
|
90
|
+
data: Optional[_T] = None
|
49
91
|
|
50
92
|
|
51
93
|
class TextGuardDetectors(APIResponseModel):
|
52
94
|
prompt_injection: Optional[TextGuardDetector[PromptInjectionResult]] = None
|
53
95
|
pii_entity: Optional[TextGuardDetector[PiiEntityResult]] = None
|
54
96
|
malicious_entity: Optional[TextGuardDetector[MaliciousEntityResult]] = None
|
97
|
+
secrets_detection: Optional[TextGuardDetector[SecretsEntityResult]] = None
|
98
|
+
profanity_and_toxicity: Optional[TextGuardDetector[Any]] = None
|
99
|
+
custom_entity: Optional[TextGuardDetector[Any]] = None
|
100
|
+
language_detection: Optional[TextGuardDetector[LanguageDetectionResult]] = None
|
101
|
+
code_detection: Optional[TextGuardDetector[CodeDetectionResult]] = None
|
55
102
|
|
56
103
|
|
57
|
-
class TextGuardResult(PangeaResponseResult):
|
104
|
+
class TextGuardResult(PangeaResponseResult, Generic[_T]):
|
58
105
|
detectors: TextGuardDetectors
|
59
|
-
prompt
|
106
|
+
"""Result of the recipe analyzing and input prompt."""
|
107
|
+
|
108
|
+
prompt_text: Optional[str] = None
|
109
|
+
"""Updated prompt text, if applicable."""
|
110
|
+
|
111
|
+
prompt_messages: Optional[_T] = None
|
112
|
+
"""Updated structured prompt, if applicable."""
|
113
|
+
|
114
|
+
blocked: bool
|
60
115
|
|
61
116
|
|
62
117
|
class AIGuard(ServiceBase):
|
@@ -98,31 +153,171 @@ class AIGuard(ServiceBase):
|
|
98
153
|
|
99
154
|
super().__init__(token, config, logger_name, config_id)
|
100
155
|
|
156
|
+
@overload
|
101
157
|
def guard_text(
|
102
158
|
self,
|
103
159
|
text: str,
|
104
160
|
*,
|
105
|
-
recipe: str =
|
106
|
-
debug: bool =
|
107
|
-
|
161
|
+
recipe: str | None = None,
|
162
|
+
debug: bool | None = None,
|
163
|
+
llm_info: str | None = None,
|
164
|
+
log_fields: LogFields | None = None,
|
165
|
+
) -> PangeaResponse[TextGuardResult[None]]:
|
166
|
+
"""
|
167
|
+
Text Guard for scanning LLM inputs and outputs
|
168
|
+
|
169
|
+
Analyze and redact text to avoid manipulation of the model, addition of
|
170
|
+
malicious content, and other undesirable data transfers.
|
171
|
+
|
172
|
+
OperationId: ai_guard_post_v1_text_guard
|
173
|
+
|
174
|
+
Args:
|
175
|
+
text: Text to be scanned by AI Guard for PII, sensitive data,
|
176
|
+
malicious content, and other data types defined by the
|
177
|
+
configuration. Supports processing up to 10KB of text.
|
178
|
+
recipe: Recipe key of a configuration of data types and settings
|
179
|
+
defined in the Pangea User Console. It specifies the rules that
|
180
|
+
are to be applied to the text, such as defang malicious URLs.
|
181
|
+
debug: Setting this value to true will provide a detailed analysis
|
182
|
+
of the text data
|
183
|
+
llm_info: Short string hint for the LLM Provider information
|
184
|
+
log_field: Additional fields to include in activity log
|
185
|
+
|
186
|
+
Examples:
|
187
|
+
response = ai_guard.guard_text("text")
|
188
|
+
"""
|
189
|
+
|
190
|
+
@overload
|
191
|
+
def guard_text(
|
192
|
+
self,
|
193
|
+
*,
|
194
|
+
messages: _T,
|
195
|
+
recipe: str | None = None,
|
196
|
+
debug: bool | None = None,
|
197
|
+
llm_info: str | None = None,
|
198
|
+
log_fields: LogFields | None = None,
|
199
|
+
) -> PangeaResponse[TextGuardResult[_T]]:
|
108
200
|
"""
|
109
|
-
Text
|
201
|
+
Text Guard for scanning LLM inputs and outputs
|
202
|
+
|
203
|
+
Analyze and redact text to avoid manipulation of the model, addition of
|
204
|
+
malicious content, and other undesirable data transfers.
|
110
205
|
|
111
|
-
|
206
|
+
OperationId: ai_guard_post_v1_text_guard
|
207
|
+
|
208
|
+
Args:
|
209
|
+
messages: Structured messages data to be scanned by AI Guard for
|
210
|
+
PII, sensitive data, malicious content, and other data types
|
211
|
+
defined by the configuration. Supports processing up to 10KB of
|
212
|
+
JSON text
|
213
|
+
recipe: Recipe key of a configuration of data types and settings
|
214
|
+
defined in the Pangea User Console. It specifies the rules that
|
215
|
+
are to be applied to the text, such as defang malicious URLs.
|
216
|
+
debug: Setting this value to true will provide a detailed analysis
|
217
|
+
of the text data
|
218
|
+
llm_info: Short string hint for the LLM Provider information
|
219
|
+
log_field: Additional fields to include in activity log
|
112
220
|
|
113
|
-
|
221
|
+
Examples:
|
222
|
+
response = ai_guard.guard_text(messages=[{"role": "user", "content": "hello world"}])
|
223
|
+
"""
|
224
|
+
|
225
|
+
@overload
|
226
|
+
def guard_text(
|
227
|
+
self,
|
228
|
+
*,
|
229
|
+
llm_input: _T,
|
230
|
+
recipe: str | None = None,
|
231
|
+
debug: bool | None = None,
|
232
|
+
llm_info: str | None = None,
|
233
|
+
log_fields: LogFields | None = None,
|
234
|
+
) -> PangeaResponse[TextGuardResult[_T]]:
|
235
|
+
"""
|
236
|
+
Text Guard for scanning LLM inputs and outputs
|
114
237
|
|
115
|
-
|
238
|
+
Analyze and redact text to avoid manipulation of the model, addition of
|
239
|
+
malicious content, and other undesirable data transfers.
|
240
|
+
|
241
|
+
OperationId: ai_guard_post_v1_text_guard
|
242
|
+
|
243
|
+
Args:
|
244
|
+
llm_input: Structured full llm payload data to be scanned by AI
|
245
|
+
Guard for PII, sensitive data, malicious content, and other data
|
246
|
+
types defined by the configuration. Supports processing up to
|
247
|
+
10KB of JSON text
|
248
|
+
recipe: Recipe key of a configuration of data types and settings
|
249
|
+
defined in the Pangea User Console. It specifies the rules that
|
250
|
+
are to be applied to the text, such as defang malicious URLs.
|
251
|
+
debug: Setting this value to true will provide a detailed analysis
|
252
|
+
of the text data
|
253
|
+
llm_info: Short string hint for the LLM Provider information
|
254
|
+
log_field: Additional fields to include in activity log
|
255
|
+
|
256
|
+
Examples:
|
257
|
+
response = ai_guard.guard_text(
|
258
|
+
llm_input={"model": "gpt-4o", "messages": [{"role": "user", "content": "hello world"}]}
|
259
|
+
)
|
260
|
+
"""
|
261
|
+
|
262
|
+
def guard_text( # type: ignore[misc]
|
263
|
+
self,
|
264
|
+
text: str | None = None,
|
265
|
+
*,
|
266
|
+
messages: _T | None = None,
|
267
|
+
llm_input: _T | None = None,
|
268
|
+
recipe: str | None = None,
|
269
|
+
debug: bool | None = None,
|
270
|
+
llm_info: str | None = None,
|
271
|
+
log_fields: LogFields | None = None,
|
272
|
+
) -> PangeaResponse[TextGuardResult[None]]:
|
273
|
+
"""
|
274
|
+
Text Guard for scanning LLM inputs and outputs
|
275
|
+
|
276
|
+
Analyze and redact text to avoid manipulation of the model, addition of
|
277
|
+
malicious content, and other undesirable data transfers.
|
278
|
+
|
279
|
+
OperationId: ai_guard_post_v1_text_guard
|
116
280
|
|
117
281
|
Args:
|
118
|
-
text: Text
|
119
|
-
|
120
|
-
|
282
|
+
text: Text to be scanned by AI Guard for PII, sensitive data,
|
283
|
+
malicious content, and other data types defined by the
|
284
|
+
configuration. Supports processing up to 10KB of text.
|
285
|
+
messages: Structured messages data to be scanned by AI Guard for
|
286
|
+
PII, sensitive data, malicious content, and other data types
|
287
|
+
defined by the configuration. Supports processing up to 10KB of
|
288
|
+
JSON text
|
289
|
+
llm_input: Structured full llm payload data to be scanned by AI
|
290
|
+
Guard for PII, sensitive data, malicious content, and other data
|
291
|
+
types defined by the configuration. Supports processing up to
|
292
|
+
10KB of JSON text
|
293
|
+
recipe: Recipe key of a configuration of data types and settings
|
294
|
+
defined in the Pangea User Console. It specifies the rules that
|
295
|
+
are to be applied to the text, such as defang malicious URLs.
|
296
|
+
debug: Setting this value to true will provide a detailed analysis
|
297
|
+
of the text data
|
298
|
+
llm_info: Short string hint for the LLM Provider information
|
299
|
+
log_field: Additional fields to include in activity log
|
121
300
|
|
122
301
|
Examples:
|
123
302
|
response = ai_guard.guard_text("text")
|
124
303
|
"""
|
125
304
|
|
305
|
+
if not any((text, messages, llm_input)):
|
306
|
+
raise ValueError("At least one of `text`, `messages`, or `llm_input` must be given")
|
307
|
+
|
308
|
+
if sum((text is not None, messages is not None, llm_input is not None)) > 1:
|
309
|
+
raise ValueError("Only one of `text`, `messages`, or `llm_input` can be given at once")
|
310
|
+
|
126
311
|
return self.request.post(
|
127
|
-
"
|
312
|
+
"v1/text/guard",
|
313
|
+
TextGuardResult,
|
314
|
+
data={
|
315
|
+
"text": text,
|
316
|
+
"messages": messages,
|
317
|
+
"llm_input": llm_input,
|
318
|
+
"recipe": recipe,
|
319
|
+
"debug": debug,
|
320
|
+
"llm_info": llm_info,
|
321
|
+
"log_fields": log_fields,
|
322
|
+
},
|
128
323
|
)
|
pangea/services/prompt_guard.py
CHANGED
@@ -19,8 +19,8 @@ class Classification(APIResponseModel):
|
|
19
19
|
category: str
|
20
20
|
"""Classification category"""
|
21
21
|
|
22
|
-
|
23
|
-
"""Classification
|
22
|
+
detected: bool
|
23
|
+
"""Classification detection result"""
|
24
24
|
|
25
25
|
confidence: float
|
26
26
|
"""Confidence score for the classification"""
|
@@ -30,7 +30,7 @@ class GuardResult(PangeaResponseResult):
|
|
30
30
|
detected: bool
|
31
31
|
"""Boolean response for if the prompt was considered malicious or not"""
|
32
32
|
|
33
|
-
type: Optional[Literal["direct", "indirect"]] = None
|
33
|
+
type: Optional[Literal["direct", "indirect", ""]] = None
|
34
34
|
"""Type of analysis, either direct or indirect"""
|
35
35
|
|
36
36
|
analyzer: Optional[str] = None
|
@@ -86,20 +86,24 @@ class PromptGuard(ServiceBase):
|
|
86
86
|
super().__init__(token, config, logger_name, config_id)
|
87
87
|
|
88
88
|
def guard(
|
89
|
-
self,
|
89
|
+
self,
|
90
|
+
messages: Iterable[Message],
|
91
|
+
*,
|
92
|
+
analyzers: Iterable[str] | None = None,
|
93
|
+
classify: bool | None = None,
|
90
94
|
) -> PangeaResponse[GuardResult]:
|
91
95
|
"""
|
92
|
-
Guard
|
96
|
+
Guard
|
93
97
|
|
94
98
|
Guard messages.
|
95
99
|
|
96
|
-
|
97
|
-
|
98
|
-
OperationId: prompt_guard_post_v1beta_guard
|
100
|
+
OperationId: prompt_guard_post_v1_guard
|
99
101
|
|
100
102
|
Args:
|
101
|
-
messages: Prompt content and role array.
|
102
|
-
|
103
|
+
messages: Prompt content and role array in JSON format. The
|
104
|
+
`content` is the text that will be analyzed for redaction.
|
105
|
+
analyzers: Specific analyzers to be used in the call
|
106
|
+
classify: Boolean to enable classification of the content
|
103
107
|
|
104
108
|
Examples:
|
105
109
|
from pangea.services.prompt_guard import Message
|
@@ -107,4 +111,8 @@ class PromptGuard(ServiceBase):
|
|
107
111
|
response = prompt_guard.guard([Message(role="user", content="hello world")])
|
108
112
|
"""
|
109
113
|
|
110
|
-
return self.request.post(
|
114
|
+
return self.request.post(
|
115
|
+
"v1/guard",
|
116
|
+
GuardResult,
|
117
|
+
data={"messages": messages, "analyzers": analyzers, "classify": classify},
|
118
|
+
)
|
pangea/services/share/share.py
CHANGED
@@ -762,7 +762,7 @@ class Share(ServiceBase):
|
|
762
762
|
|
763
763
|
Examples:
|
764
764
|
config = PangeaConfig(domain="aws.us.pangea.cloud")
|
765
|
-
|
765
|
+
share = Share(token="pangea_token", config=config)
|
766
766
|
"""
|
767
767
|
|
768
768
|
super().__init__(token, config, logger_name, config_id=config_id)
|
@@ -1,23 +1,23 @@
|
|
1
1
|
Metadata-Version: 2.3
|
2
2
|
Name: pangea-sdk
|
3
|
-
Version: 5.
|
3
|
+
Version: 5.5.0
|
4
4
|
Summary: Pangea API SDK
|
5
5
|
License: MIT
|
6
6
|
Keywords: Pangea,SDK,Audit
|
7
7
|
Author: Glenn Gallien
|
8
8
|
Author-email: glenn.gallien@pangea.cloud
|
9
|
-
Requires-Python: >=3.9
|
9
|
+
Requires-Python: >=3.9,<4.0.0
|
10
10
|
Classifier: Topic :: Software Development
|
11
11
|
Classifier: Topic :: Software Development :: Libraries
|
12
|
-
Requires-Dist: aiohttp
|
13
|
-
Requires-Dist: cryptography
|
14
|
-
Requires-Dist: deprecated
|
15
|
-
Requires-Dist: google-crc32c
|
16
|
-
Requires-Dist: pydantic
|
17
|
-
Requires-Dist: python-dateutil
|
18
|
-
Requires-Dist: requests
|
19
|
-
Requires-Dist: requests-toolbelt
|
20
|
-
Requires-Dist: typing-extensions
|
12
|
+
Requires-Dist: aiohttp (>=3.11.12,<4.0.0)
|
13
|
+
Requires-Dist: cryptography (>=43.0.3,<44.0.0)
|
14
|
+
Requires-Dist: deprecated (>=1.2.18,<2.0.0)
|
15
|
+
Requires-Dist: google-crc32c (>=1.6.0,<2.0.0)
|
16
|
+
Requires-Dist: pydantic (>=2.10.6,<3.0.0)
|
17
|
+
Requires-Dist: python-dateutil (>=2.9.0.post0,<3.0.0)
|
18
|
+
Requires-Dist: requests (>=2.32.3,<3.0.0)
|
19
|
+
Requires-Dist: requests-toolbelt (>=1.0.0,<2.0.0)
|
20
|
+
Requires-Dist: typing-extensions (>=4.12.2,<5.0.0)
|
21
21
|
Description-Content-Type: text/markdown
|
22
22
|
|
23
23
|
<a href="https://pangea.cloud?utm_source=github&utm_medium=python-sdk" target="_blank" rel="noopener noreferrer">
|
@@ -63,13 +63,13 @@ the same compatibility guarantees as stable releases.
|
|
63
63
|
Via pip:
|
64
64
|
|
65
65
|
```bash
|
66
|
-
$ pip3 install pangea-sdk==5.
|
66
|
+
$ pip3 install pangea-sdk==5.5.0b2
|
67
67
|
```
|
68
68
|
|
69
69
|
Via poetry:
|
70
70
|
|
71
71
|
```bash
|
72
|
-
$ poetry add pangea-sdk==5.
|
72
|
+
$ poetry add pangea-sdk==5.5.0b2
|
73
73
|
```
|
74
74
|
|
75
75
|
## Usage
|
@@ -1,9 +1,9 @@
|
|
1
|
-
pangea/__init__.py,sha256=
|
1
|
+
pangea/__init__.py,sha256=a0kUsnqJaBtUKVnRDKXQCE_AXVp9Jiqr746sBlKYvjc,246
|
2
2
|
pangea/asyncio/__init__.py,sha256=kjEMkqMQ521LlMSu5jn3_WgweyArwVZ2C-s3x7mR6Pk,45
|
3
3
|
pangea/asyncio/file_uploader.py,sha256=wI7epib7Rc5jtZw4eJ1L1SlmutDG6CPv59C8N2UPhtY,1436
|
4
4
|
pangea/asyncio/request.py,sha256=lpLY-o405r3-VUfrAE5uxYxI8UjM4hjPqUzAUtOGE5o,18040
|
5
5
|
pangea/asyncio/services/__init__.py,sha256=L6Tdhjfx_ZECHskhLMPaCcOefi-r-imw6q_zlU4j-FY,464
|
6
|
-
pangea/asyncio/services/ai_guard.py,sha256=
|
6
|
+
pangea/asyncio/services/ai_guard.py,sha256=Q_Q_1xKvxXsW6jHsEDjPVHAOYYvz7bmulC5480vRc-s,8541
|
7
7
|
pangea/asyncio/services/audit.py,sha256=rPaCx4cMzj-g9WFMRIysFCJAz6Btp6YrhcKe_exky8k,26283
|
8
8
|
pangea/asyncio/services/authn.py,sha256=rPeLJweL8mYH_t4ebcQn4n_Wglr3kClKNnCXNCimZU4,46622
|
9
9
|
pangea/asyncio/services/authz.py,sha256=B_0_nhDMJcjNpjpCx3Vi2LDRhlmfV9325GKbUZ8reos,10025
|
@@ -11,13 +11,13 @@ pangea/asyncio/services/base.py,sha256=vRFVcO_uEAGJte3OUUBLD43RoiiFB1vC7SPyN6yEM
|
|
11
11
|
pangea/asyncio/services/embargo.py,sha256=ctzj3kip6xos-Eu3JuOskrCGYC8T3JlsgAopZHiPSXM,3068
|
12
12
|
pangea/asyncio/services/file_scan.py,sha256=PLG1O-PL4Yk9uY9D6NbMrZ5LHg70Z311s7bFe46UMZA,7108
|
13
13
|
pangea/asyncio/services/intel.py,sha256=BcxGKSoZ1nJiEHyZM9yOwKSSPJUrB6ibJ19KR27VlgQ,40261
|
14
|
-
pangea/asyncio/services/prompt_guard.py,sha256=
|
14
|
+
pangea/asyncio/services/prompt_guard.py,sha256=NbYt-0tRtO5VH7kLmC1lJ5JSV-ztlb9dNFaKKs_fZUM,2553
|
15
15
|
pangea/asyncio/services/redact.py,sha256=JPJcmeKFloMZRpkjAHAZbpZJpO993WsTfEwA-S5ov18,7951
|
16
16
|
pangea/asyncio/services/sanitize.py,sha256=EbSdq_v9yZWce9xEYWvZharE9bJcxw8cg5Pv8LVxdxc,8627
|
17
|
-
pangea/asyncio/services/share.py,sha256=
|
17
|
+
pangea/asyncio/services/share.py,sha256=Qd2Oh4UsLwu7Zo4Xy1KABHuP4TJ9AtcN-XzldvilFVo,30773
|
18
18
|
pangea/asyncio/services/vault.py,sha256=VqrJGSEdq6MlZRI6cJpkthhIsqLClSQdgVxwYCbIwEk,77079
|
19
19
|
pangea/audit_logger.py,sha256=gRkCfUUT5LDNaycwxkhZUySgY47jDfn1ZeKOul4XCQI,3842
|
20
|
-
pangea/config.py,sha256=
|
20
|
+
pangea/config.py,sha256=qe1ZhvDxNQxNXUpAtzF6nPLjyRpPVG9sjhLZV6Pkyn8,1766
|
21
21
|
pangea/crypto/rsa.py,sha256=mwSiNy571KAGr3F6oEM0CXWkl9D023ch8ldbZZeLj_4,4747
|
22
22
|
pangea/deep_verify.py,sha256=ZGraaL7TCxwRBIDqjBFR0clKlhAC-Yce6kD-1LClhG8,8616
|
23
23
|
pangea/deprecated.py,sha256=IjFYEVvY1E0ld0SMkEYC1o62MAleX3nnT1If2dFVbHo,608
|
@@ -28,7 +28,7 @@ pangea/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
28
28
|
pangea/request.py,sha256=vGB8owXUiNQoeiiACFvfXvg44JJo_L6WfcHlF6ug8co,25082
|
29
29
|
pangea/response.py,sha256=lPAcYsF9Xg166CiyhCofVmQA-W4jevh0MQXxUa8Re68,7737
|
30
30
|
pangea/services/__init__.py,sha256=h36HzyIGaI5kO6l3UCwKHx_Kd-m_9mYVwn5MLRVzblI,408
|
31
|
-
pangea/services/ai_guard.py,sha256
|
31
|
+
pangea/services/ai_guard.py,sha256=tBr3GEbrobjECs51cRR63Q8AICMl-K3tQD1FdyZkR4s,11129
|
32
32
|
pangea/services/audit/audit.py,sha256=7-c9l7jyGtpG7SqRUMpqsAzcUDhMZ5izgPalxHXsUvM,39320
|
33
33
|
pangea/services/audit/exceptions.py,sha256=bhVuYe4ammacOVxwg98CChxvwZf5FKgR2DcgqILOcwc,471
|
34
34
|
pangea/services/audit/models.py,sha256=1h1B9eSYQMYG3f8WNi1UcDX2-impRrET_ErjJYUnj7M,14678
|
@@ -41,11 +41,11 @@ pangea/services/base.py,sha256=43pWQcR9CeT4sGzgctF3Sy4M_h7DaUzkuZD2Z7CcDUU,3845
|
|
41
41
|
pangea/services/embargo.py,sha256=9Wfku4td5ORaIENKmnGmS5jxJJIRfWp6Q51L36Jsy0I,3897
|
42
42
|
pangea/services/file_scan.py,sha256=QiO80uKqB_BnAOiYQKznXfxpa5j40qqETE3-zBRT_QE,7813
|
43
43
|
pangea/services/intel.py,sha256=y1EX2ctYIxQc52lmHp6-Q_UIDM--t3fOpXDssWiRPfo,56474
|
44
|
-
pangea/services/prompt_guard.py,sha256=
|
44
|
+
pangea/services/prompt_guard.py,sha256=uMpofGKltmlNklF8znhRLuY6siyjDf-Zw-4Hwy2oJtc,3446
|
45
45
|
pangea/services/redact.py,sha256=ovIcT0jkXe57O7keGzSClWNCic8y-4NZoemXoSKjjww,12913
|
46
46
|
pangea/services/sanitize.py,sha256=eAN1HhObiKqygy6HHcfl0NmxYfPMvqSKepwEAVVIIEE,12936
|
47
47
|
pangea/services/share/file_format.py,sha256=1svO1ee_aenA9zoO_AaU-Rk5Ulp7kcPOc_KwNoluyQE,2797
|
48
|
-
pangea/services/share/share.py,sha256=
|
48
|
+
pangea/services/share/share.py,sha256=hlhkIr6ScJ5oMFUs9no4HtHNoUEbYU4KoLkiGLxex30,52343
|
49
49
|
pangea/services/vault/models/asymmetric.py,sha256=vspijmEvHm5WXri_fjOWfQc4maYyZfhDkLuaTM8-PZo,4991
|
50
50
|
pangea/services/vault/models/common.py,sha256=PSZRFqHTUtEMJJGwywEFM2AU3aV8S-sbcoo3LLQ6uTc,17981
|
51
51
|
pangea/services/vault/models/keys.py,sha256=duAuTiOby_D7MloRvN4gNj0P-b-jx9sdtplAWFxsShw,2786
|
@@ -55,6 +55,6 @@ pangea/services/vault/vault.py,sha256=ow-Zm7PYzfWIfUcA4UNnpeL2DHfZM4C7inRDmNR3zQ
|
|
55
55
|
pangea/tools.py,sha256=2-Y4SAHWFv6Ocj42J_bWrVy27M5G3wi7a8LJn0dabHc,6427
|
56
56
|
pangea/utils.py,sha256=dZ6MwFVEWXUgXvvDg-k6JnvVfsgslvtaBd7ez7afrqk,4983
|
57
57
|
pangea/verify_audit.py,sha256=nSP17OzoSPdvezRExwfcf45H8ZPZnxZu-CbEp3qFJO0,17354
|
58
|
-
pangea_sdk-5.
|
59
|
-
pangea_sdk-5.
|
60
|
-
pangea_sdk-5.
|
58
|
+
pangea_sdk-5.5.0.dist-info/METADATA,sha256=wJ5S_guOmNKo7qhOLdw9m8jpXwhcbqQDOnAl3DMDKAQ,7015
|
59
|
+
pangea_sdk-5.5.0.dist-info/WHEEL,sha256=7dDg4QLnNKTvwIDR9Ac8jJaAmBC_owJrckbC0jjThyA,88
|
60
|
+
pangea_sdk-5.5.0.dist-info/RECORD,,
|