pangea-sdk 5.5.0b3__tar.gz → 5.5.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. {pangea_sdk-5.5.0b3 → pangea_sdk-5.5.1}/PKG-INFO +4 -4
  2. {pangea_sdk-5.5.0b3 → pangea_sdk-5.5.1}/README.md +2 -2
  3. {pangea_sdk-5.5.0b3 → pangea_sdk-5.5.1}/pangea/__init__.py +1 -1
  4. pangea_sdk-5.5.1/pangea/asyncio/services/ai_guard.py +221 -0
  5. {pangea_sdk-5.5.0b3 → pangea_sdk-5.5.1}/pangea/asyncio/services/prompt_guard.py +4 -9
  6. {pangea_sdk-5.5.0b3 → pangea_sdk-5.5.1}/pangea/services/ai_guard.py +113 -32
  7. {pangea_sdk-5.5.0b3 → pangea_sdk-5.5.1}/pangea/services/prompt_guard.py +7 -12
  8. {pangea_sdk-5.5.0b3 → pangea_sdk-5.5.1}/pyproject.toml +3 -3
  9. pangea_sdk-5.5.0b3/pangea/asyncio/services/ai_guard.py +0 -159
  10. {pangea_sdk-5.5.0b3 → pangea_sdk-5.5.1}/pangea/asyncio/__init__.py +0 -0
  11. {pangea_sdk-5.5.0b3 → pangea_sdk-5.5.1}/pangea/asyncio/file_uploader.py +0 -0
  12. {pangea_sdk-5.5.0b3 → pangea_sdk-5.5.1}/pangea/asyncio/request.py +0 -0
  13. {pangea_sdk-5.5.0b3 → pangea_sdk-5.5.1}/pangea/asyncio/services/__init__.py +0 -0
  14. {pangea_sdk-5.5.0b3 → pangea_sdk-5.5.1}/pangea/asyncio/services/audit.py +0 -0
  15. {pangea_sdk-5.5.0b3 → pangea_sdk-5.5.1}/pangea/asyncio/services/authn.py +0 -0
  16. {pangea_sdk-5.5.0b3 → pangea_sdk-5.5.1}/pangea/asyncio/services/authz.py +0 -0
  17. {pangea_sdk-5.5.0b3 → pangea_sdk-5.5.1}/pangea/asyncio/services/base.py +0 -0
  18. {pangea_sdk-5.5.0b3 → pangea_sdk-5.5.1}/pangea/asyncio/services/embargo.py +0 -0
  19. {pangea_sdk-5.5.0b3 → pangea_sdk-5.5.1}/pangea/asyncio/services/file_scan.py +0 -0
  20. {pangea_sdk-5.5.0b3 → pangea_sdk-5.5.1}/pangea/asyncio/services/intel.py +0 -0
  21. {pangea_sdk-5.5.0b3 → pangea_sdk-5.5.1}/pangea/asyncio/services/redact.py +0 -0
  22. {pangea_sdk-5.5.0b3 → pangea_sdk-5.5.1}/pangea/asyncio/services/sanitize.py +0 -0
  23. {pangea_sdk-5.5.0b3 → pangea_sdk-5.5.1}/pangea/asyncio/services/share.py +0 -0
  24. {pangea_sdk-5.5.0b3 → pangea_sdk-5.5.1}/pangea/asyncio/services/vault.py +0 -0
  25. {pangea_sdk-5.5.0b3 → pangea_sdk-5.5.1}/pangea/audit_logger.py +0 -0
  26. {pangea_sdk-5.5.0b3 → pangea_sdk-5.5.1}/pangea/config.py +0 -0
  27. {pangea_sdk-5.5.0b3 → pangea_sdk-5.5.1}/pangea/crypto/rsa.py +0 -0
  28. {pangea_sdk-5.5.0b3 → pangea_sdk-5.5.1}/pangea/deep_verify.py +0 -0
  29. {pangea_sdk-5.5.0b3 → pangea_sdk-5.5.1}/pangea/deprecated.py +0 -0
  30. {pangea_sdk-5.5.0b3 → pangea_sdk-5.5.1}/pangea/dump_audit.py +0 -0
  31. {pangea_sdk-5.5.0b3 → pangea_sdk-5.5.1}/pangea/exceptions.py +0 -0
  32. {pangea_sdk-5.5.0b3 → pangea_sdk-5.5.1}/pangea/file_uploader.py +0 -0
  33. {pangea_sdk-5.5.0b3 → pangea_sdk-5.5.1}/pangea/py.typed +0 -0
  34. {pangea_sdk-5.5.0b3 → pangea_sdk-5.5.1}/pangea/request.py +0 -0
  35. {pangea_sdk-5.5.0b3 → pangea_sdk-5.5.1}/pangea/response.py +0 -0
  36. {pangea_sdk-5.5.0b3 → pangea_sdk-5.5.1}/pangea/services/__init__.py +0 -0
  37. {pangea_sdk-5.5.0b3 → pangea_sdk-5.5.1}/pangea/services/audit/audit.py +0 -0
  38. {pangea_sdk-5.5.0b3 → pangea_sdk-5.5.1}/pangea/services/audit/exceptions.py +0 -0
  39. {pangea_sdk-5.5.0b3 → pangea_sdk-5.5.1}/pangea/services/audit/models.py +0 -0
  40. {pangea_sdk-5.5.0b3 → pangea_sdk-5.5.1}/pangea/services/audit/signing.py +0 -0
  41. {pangea_sdk-5.5.0b3 → pangea_sdk-5.5.1}/pangea/services/audit/util.py +0 -0
  42. {pangea_sdk-5.5.0b3 → pangea_sdk-5.5.1}/pangea/services/authn/authn.py +0 -0
  43. {pangea_sdk-5.5.0b3 → pangea_sdk-5.5.1}/pangea/services/authn/models.py +0 -0
  44. {pangea_sdk-5.5.0b3 → pangea_sdk-5.5.1}/pangea/services/authz.py +0 -0
  45. {pangea_sdk-5.5.0b3 → pangea_sdk-5.5.1}/pangea/services/base.py +0 -0
  46. {pangea_sdk-5.5.0b3 → pangea_sdk-5.5.1}/pangea/services/embargo.py +0 -0
  47. {pangea_sdk-5.5.0b3 → pangea_sdk-5.5.1}/pangea/services/file_scan.py +0 -0
  48. {pangea_sdk-5.5.0b3 → pangea_sdk-5.5.1}/pangea/services/intel.py +0 -0
  49. {pangea_sdk-5.5.0b3 → pangea_sdk-5.5.1}/pangea/services/redact.py +0 -0
  50. {pangea_sdk-5.5.0b3 → pangea_sdk-5.5.1}/pangea/services/sanitize.py +0 -0
  51. {pangea_sdk-5.5.0b3 → pangea_sdk-5.5.1}/pangea/services/share/file_format.py +0 -0
  52. {pangea_sdk-5.5.0b3 → pangea_sdk-5.5.1}/pangea/services/share/share.py +0 -0
  53. {pangea_sdk-5.5.0b3 → pangea_sdk-5.5.1}/pangea/services/vault/models/asymmetric.py +0 -0
  54. {pangea_sdk-5.5.0b3 → pangea_sdk-5.5.1}/pangea/services/vault/models/common.py +0 -0
  55. {pangea_sdk-5.5.0b3 → pangea_sdk-5.5.1}/pangea/services/vault/models/keys.py +0 -0
  56. {pangea_sdk-5.5.0b3 → pangea_sdk-5.5.1}/pangea/services/vault/models/secret.py +0 -0
  57. {pangea_sdk-5.5.0b3 → pangea_sdk-5.5.1}/pangea/services/vault/models/symmetric.py +0 -0
  58. {pangea_sdk-5.5.0b3 → pangea_sdk-5.5.1}/pangea/services/vault/vault.py +0 -0
  59. {pangea_sdk-5.5.0b3 → pangea_sdk-5.5.1}/pangea/tools.py +0 -0
  60. {pangea_sdk-5.5.0b3 → pangea_sdk-5.5.1}/pangea/utils.py +0 -0
  61. {pangea_sdk-5.5.0b3 → pangea_sdk-5.5.1}/pangea/verify_audit.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: pangea-sdk
3
- Version: 5.5.0b3
3
+ Version: 5.5.1
4
4
  Summary: Pangea API SDK
5
5
  License: MIT
6
6
  Keywords: Pangea,SDK,Audit
@@ -9,7 +9,7 @@ Author-email: glenn.gallien@pangea.cloud
9
9
  Requires-Python: >=3.9,<4.0.0
10
10
  Classifier: Topic :: Software Development
11
11
  Classifier: Topic :: Software Development :: Libraries
12
- Requires-Dist: aiohttp (>=3.11.11,<4.0.0)
12
+ Requires-Dist: aiohttp (>=3.11.12,<4.0.0)
13
13
  Requires-Dist: cryptography (>=43.0.3,<44.0.0)
14
14
  Requires-Dist: deprecated (>=1.2.18,<2.0.0)
15
15
  Requires-Dist: google-crc32c (>=1.6.0,<2.0.0)
@@ -63,13 +63,13 @@ the same compatibility guarantees as stable releases.
63
63
  Via pip:
64
64
 
65
65
  ```bash
66
- $ pip3 install pangea-sdk==5.5.0b3
66
+ $ pip3 install pangea-sdk==5.5.0b2
67
67
  ```
68
68
 
69
69
  Via poetry:
70
70
 
71
71
  ```bash
72
- $ poetry add pangea-sdk==5.5.0b3
72
+ $ poetry add pangea-sdk==5.5.0b2
73
73
  ```
74
74
 
75
75
  ## Usage
@@ -41,13 +41,13 @@ the same compatibility guarantees as stable releases.
41
41
  Via pip:
42
42
 
43
43
  ```bash
44
- $ pip3 install pangea-sdk==5.5.0b3
44
+ $ pip3 install pangea-sdk==5.5.0b2
45
45
  ```
46
46
 
47
47
  Via poetry:
48
48
 
49
49
  ```bash
50
- $ poetry add pangea-sdk==5.5.0b3
50
+ $ poetry add pangea-sdk==5.5.0b2
51
51
  ```
52
52
 
53
53
  ## Usage
@@ -1,4 +1,4 @@
1
- __version__ = "5.5.0beta3"
1
+ __version__ = "5.5.1"
2
2
 
3
3
  from pangea.asyncio.request import PangeaRequestAsync
4
4
  from pangea.config import PangeaConfig
@@ -0,0 +1,221 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import overload
4
+
5
+ from typing_extensions import TypeVar
6
+
7
+ from pangea.asyncio.services.base import ServiceBaseAsync
8
+ from pangea.config import PangeaConfig
9
+ from pangea.response import PangeaResponse
10
+ from pangea.services.ai_guard import LogFields, TextGuardResult
11
+
12
+ _T = TypeVar("_T")
13
+
14
+
15
+ class AIGuardAsync(ServiceBaseAsync):
16
+ """AI Guard service client.
17
+
18
+ Provides methods to interact with Pangea's AI Guard service.
19
+
20
+ Examples:
21
+ from pangea import PangeaConfig
22
+ from pangea.asyncio.services import AIGuardAsync
23
+
24
+ config = PangeaConfig(domain="aws.us.pangea.cloud")
25
+ ai_guard = AIGuardAsync(token="pangea_token", config=config)
26
+ """
27
+
28
+ service_name = "ai-guard"
29
+
30
+ def __init__(
31
+ self, token: str, config: PangeaConfig | None = None, logger_name: str = "pangea", config_id: str | None = None
32
+ ) -> None:
33
+ """
34
+ AI Guard service client.
35
+
36
+ Initializes a new AI Guard client.
37
+
38
+ Args:
39
+ token: Pangea API token.
40
+ config: Pangea service configuration.
41
+ logger_name: Logger name.
42
+ config_id: Configuration ID.
43
+
44
+ Examples:
45
+ from pangea import PangeaConfig
46
+ from pangea.asyncio.services import AIGuardAsync
47
+
48
+ config = PangeaConfig(domain="aws.us.pangea.cloud")
49
+ ai_guard = AIGuardAsync(token="pangea_token", config=config)
50
+ """
51
+
52
+ super().__init__(token, config, logger_name, config_id)
53
+
54
+ @overload
55
+ async def guard_text(
56
+ self,
57
+ text: str,
58
+ *,
59
+ recipe: str | None = None,
60
+ debug: bool | None = None,
61
+ llm_info: str | None = None,
62
+ log_fields: LogFields | None = None,
63
+ ) -> PangeaResponse[TextGuardResult[None]]:
64
+ """
65
+ Text Guard for scanning LLM inputs and outputs
66
+
67
+ Analyze and redact text to avoid manipulation of the model, addition of
68
+ malicious content, and other undesirable data transfers.
69
+
70
+ OperationId: ai_guard_post_v1_text_guard
71
+
72
+ Args:
73
+ text: Text to be scanned by AI Guard for PII, sensitive data,
74
+ malicious content, and other data types defined by the
75
+ configuration. Supports processing up to 10KB of text.
76
+ recipe: Recipe key of a configuration of data types and settings
77
+ defined in the Pangea User Console. It specifies the rules that
78
+ are to be applied to the text, such as defang malicious URLs.
79
+ debug: Setting this value to true will provide a detailed analysis
80
+ of the text data
81
+ llm_info: Short string hint for the LLM Provider information
82
+ log_field: Additional fields to include in activity log
83
+
84
+ Examples:
85
+ response = await ai_guard.guard_text("text")
86
+ """
87
+
88
+ @overload
89
+ async def guard_text(
90
+ self,
91
+ *,
92
+ messages: _T,
93
+ recipe: str | None = None,
94
+ debug: bool | None = None,
95
+ llm_info: str | None = None,
96
+ log_fields: LogFields | None = None,
97
+ ) -> PangeaResponse[TextGuardResult[_T]]:
98
+ """
99
+ Text Guard for scanning LLM inputs and outputs
100
+
101
+ Analyze and redact text to avoid manipulation of the model, addition of
102
+ malicious content, and other undesirable data transfers.
103
+
104
+ OperationId: ai_guard_post_v1_text_guard
105
+
106
+ Args:
107
+ messages: Structured messages data to be scanned by AI Guard for
108
+ PII, sensitive data, malicious content, and other data types
109
+ defined by the configuration. Supports processing up to 10KB of
110
+ JSON text
111
+ recipe: Recipe key of a configuration of data types and settings
112
+ defined in the Pangea User Console. It specifies the rules that
113
+ are to be applied to the text, such as defang malicious URLs.
114
+ debug: Setting this value to true will provide a detailed analysis
115
+ of the text data
116
+ llm_info: Short string hint for the LLM Provider information
117
+ log_field: Additional fields to include in activity log
118
+
119
+ Examples:
120
+ response = await ai_guard.guard_text(messages=[{"role": "user", "content": "hello world"}])
121
+ """
122
+
123
+ @overload
124
+ async def guard_text(
125
+ self,
126
+ *,
127
+ llm_input: _T,
128
+ recipe: str | None = None,
129
+ debug: bool | None = None,
130
+ llm_info: str | None = None,
131
+ log_fields: LogFields | None = None,
132
+ ) -> PangeaResponse[TextGuardResult[_T]]:
133
+ """
134
+ Text Guard for scanning LLM inputs and outputs
135
+
136
+ Analyze and redact text to avoid manipulation of the model, addition of
137
+ malicious content, and other undesirable data transfers.
138
+
139
+ OperationId: ai_guard_post_v1_text_guard
140
+
141
+ Args:
142
+ llm_input: Structured full llm payload data to be scanned by AI
143
+ Guard for PII, sensitive data, malicious content, and other data
144
+ types defined by the configuration. Supports processing up to
145
+ 10KB of JSON text
146
+ recipe: Recipe key of a configuration of data types and settings
147
+ defined in the Pangea User Console. It specifies the rules that
148
+ are to be applied to the text, such as defang malicious URLs.
149
+ debug: Setting this value to true will provide a detailed analysis
150
+ of the text data
151
+ llm_info: Short string hint for the LLM Provider information
152
+ log_field: Additional fields to include in activity log
153
+
154
+ Examples:
155
+ response = await ai_guard.guard_text(
156
+ llm_input={"model": "gpt-4o", "messages": [{"role": "user", "content": "hello world"}]}
157
+ )
158
+ """
159
+
160
+ async def guard_text( # type: ignore[misc]
161
+ self,
162
+ text: str | None = None,
163
+ *,
164
+ messages: _T | None = None,
165
+ llm_input: _T | None = None,
166
+ recipe: str | None = None,
167
+ debug: bool | None = None,
168
+ llm_info: str | None = None,
169
+ log_fields: LogFields | None = None,
170
+ ) -> PangeaResponse[TextGuardResult[None]]:
171
+ """
172
+ Text Guard for scanning LLM inputs and outputs
173
+
174
+ Analyze and redact text to avoid manipulation of the model, addition of
175
+ malicious content, and other undesirable data transfers.
176
+
177
+ OperationId: ai_guard_post_v1_text_guard
178
+
179
+ Args:
180
+ text: Text to be scanned by AI Guard for PII, sensitive data,
181
+ malicious content, and other data types defined by the
182
+ configuration. Supports processing up to 10KB of text.
183
+ messages: Structured messages data to be scanned by AI Guard for
184
+ PII, sensitive data, malicious content, and other data types
185
+ defined by the configuration. Supports processing up to 10KB of
186
+ JSON text
187
+ llm_input: Structured full llm payload data to be scanned by AI
188
+ Guard for PII, sensitive data, malicious content, and other data
189
+ types defined by the configuration. Supports processing up to
190
+ 10KB of JSON text
191
+ recipe: Recipe key of a configuration of data types and settings
192
+ defined in the Pangea User Console. It specifies the rules that
193
+ are to be applied to the text, such as defang malicious URLs.
194
+ debug: Setting this value to true will provide a detailed analysis
195
+ of the text data
196
+ llm_info: Short string hint for the LLM Provider information
197
+ log_field: Additional fields to include in activity log
198
+
199
+ Examples:
200
+ response = await ai_guard.guard_text("text")
201
+ """
202
+
203
+ if not any((text, messages, llm_input)):
204
+ raise ValueError("Exactly one of `text`, `messages`, or `llm_input` must be given")
205
+
206
+ if sum((text is not None, messages is not None, llm_input is not None)) > 1:
207
+ raise ValueError("Only one of `text`, `messages`, or `llm_input` can be given at once")
208
+
209
+ return await self.request.post(
210
+ "v1/text/guard",
211
+ TextGuardResult,
212
+ data={
213
+ "text": text,
214
+ "messages": messages,
215
+ "llm_input": llm_input,
216
+ "recipe": recipe,
217
+ "debug": debug,
218
+ "llm_info": llm_info,
219
+ "log_fields": log_fields,
220
+ },
221
+ )
@@ -57,24 +57,19 @@ class PromptGuardAsync(ServiceBaseAsync):
57
57
  *,
58
58
  analyzers: Iterable[str] | None = None,
59
59
  classify: bool | None = None,
60
- threshold: float | None = None,
61
60
  ) -> PangeaResponse[GuardResult]:
62
61
  """
63
- Guard (Beta)
62
+ Guard
64
63
 
65
64
  Guard messages.
66
65
 
67
- How to install a [Beta release](https://pangea.cloud/docs/sdk/python/#beta-releases).
68
-
69
- OperationId: prompt_guard_post_v1beta_guard
66
+ OperationId: prompt_guard_post_v1_guard
70
67
 
71
68
  Args:
72
69
  messages: Prompt content and role array in JSON format. The
73
70
  `content` is the text that will be analyzed for redaction.
74
71
  analyzers: Specific analyzers to be used in the call
75
72
  classify: Boolean to enable classification of the content
76
- threshold: Threshold for the confidence score to consider the prompt
77
- as malicious
78
73
 
79
74
  Examples:
80
75
  from pangea.asyncio.services.prompt_guard import Message
@@ -83,7 +78,7 @@ class PromptGuardAsync(ServiceBaseAsync):
83
78
  """
84
79
 
85
80
  return await self.request.post(
86
- "v1beta/guard",
81
+ "v1/guard",
87
82
  GuardResult,
88
- data={"messages": messages, "analyzers": analyzers, "classify": classify, "threshold": threshold},
83
+ data={"messages": messages, "analyzers": analyzers, "classify": classify},
89
84
  )
@@ -3,10 +3,29 @@ from __future__ import annotations
3
3
  from typing import Any, Dict, Generic, List, Optional, TypeVar, overload
4
4
 
5
5
  from pangea.config import PangeaConfig
6
- from pangea.response import APIResponseModel, PangeaResponse, PangeaResponseResult
6
+ from pangea.response import APIRequestModel, APIResponseModel, PangeaResponse, PangeaResponseResult
7
7
  from pangea.services.base import ServiceBase
8
8
 
9
9
 
10
+ class LogFields(APIRequestModel):
11
+ """Additional fields to include in activity log"""
12
+
13
+ citations: Optional[str] = None
14
+ """Origin or source application of the event"""
15
+
16
+ extra_info: Optional[str] = None
17
+ """Stores supplementary details related to the event"""
18
+
19
+ model: Optional[str] = None
20
+ """Model used to perform the event"""
21
+
22
+ source: Optional[str] = None
23
+ """IP address of user or app or agent"""
24
+
25
+ tools: Optional[str] = None
26
+ """Tools used to perform the event"""
27
+
28
+
10
29
  class AnalyzerResponse(APIResponseModel):
11
30
  analyzer: str
12
31
  confidence: float
@@ -137,20 +156,20 @@ class AIGuard(ServiceBase):
137
156
  @overload
138
157
  def guard_text(
139
158
  self,
140
- text_or_messages: str,
159
+ text: str,
141
160
  *,
142
- recipe: str = "pangea_prompt_guard",
143
- debug: bool = False,
161
+ recipe: str | None = None,
162
+ debug: bool | None = None,
163
+ llm_info: str | None = None,
164
+ log_fields: LogFields | None = None,
144
165
  ) -> PangeaResponse[TextGuardResult[None]]:
145
166
  """
146
- Text Guard for scanning LLM inputs and outputs (Beta)
167
+ Text Guard for scanning LLM inputs and outputs
147
168
 
148
169
  Analyze and redact text to avoid manipulation of the model, addition of
149
170
  malicious content, and other undesirable data transfers.
150
171
 
151
- How to install a [Beta release](https://pangea.cloud/docs/sdk/python/#beta-releases).
152
-
153
- OperationId: ai_guard_post_v1beta_text_guard
172
+ OperationId: ai_guard_post_v1_text_guard
154
173
 
155
174
  Args:
156
175
  text: Text to be scanned by AI Guard for PII, sensitive data,
@@ -161,6 +180,8 @@ class AIGuard(ServiceBase):
161
180
  are to be applied to the text, such as defang malicious URLs.
162
181
  debug: Setting this value to true will provide a detailed analysis
163
182
  of the text data
183
+ llm_info: Short string hint for the LLM Provider information
184
+ log_field: Additional fields to include in activity log
164
185
 
165
186
  Examples:
166
187
  response = ai_guard.guard_text("text")
@@ -169,74 +190,134 @@ class AIGuard(ServiceBase):
169
190
  @overload
170
191
  def guard_text(
171
192
  self,
172
- text_or_messages: _T,
173
193
  *,
174
- recipe: str = "pangea_prompt_guard",
175
- debug: bool = False,
194
+ messages: _T,
195
+ recipe: str | None = None,
196
+ debug: bool | None = None,
197
+ llm_info: str | None = None,
198
+ log_fields: LogFields | None = None,
176
199
  ) -> PangeaResponse[TextGuardResult[_T]]:
177
200
  """
178
- Text Guard for scanning LLM inputs and outputs (Beta)
201
+ Text Guard for scanning LLM inputs and outputs
179
202
 
180
203
  Analyze and redact text to avoid manipulation of the model, addition of
181
204
  malicious content, and other undesirable data transfers.
182
205
 
183
- How to install a [Beta release](https://pangea.cloud/docs/sdk/python/#beta-releases).
184
-
185
- OperationId: ai_guard_post_v1beta_text_guard
206
+ OperationId: ai_guard_post_v1_text_guard
186
207
 
187
208
  Args:
188
- text_or_messages: Structured data to be scanned by AI Guard for PII,
189
- sensitive data, malicious content, and other data types defined
190
- by the configuration. Supports processing up to 10KB of text.
209
+ messages: Structured messages data to be scanned by AI Guard for
210
+ PII, sensitive data, malicious content, and other data types
211
+ defined by the configuration. Supports processing up to 10KB of
212
+ JSON text
191
213
  recipe: Recipe key of a configuration of data types and settings
192
214
  defined in the Pangea User Console. It specifies the rules that
193
215
  are to be applied to the text, such as defang malicious URLs.
194
216
  debug: Setting this value to true will provide a detailed analysis
195
217
  of the text data
218
+ llm_info: Short string hint for the LLM Provider information
219
+ log_field: Additional fields to include in activity log
196
220
 
197
221
  Examples:
198
- response = ai_guard.guard_text([
199
- {"role": "user", "content": "hello world"}
200
- ])
222
+ response = ai_guard.guard_text(messages=[{"role": "user", "content": "hello world"}])
201
223
  """
202
224
 
225
+ @overload
203
226
  def guard_text(
204
227
  self,
205
- text_or_messages: str | _T,
206
228
  *,
207
- recipe: str = "pangea_prompt_guard",
208
- debug: bool = False,
229
+ llm_input: _T,
230
+ recipe: str | None = None,
231
+ debug: bool | None = None,
232
+ llm_info: str | None = None,
233
+ log_fields: LogFields | None = None,
209
234
  ) -> PangeaResponse[TextGuardResult[_T]]:
210
235
  """
211
- Text Guard for scanning LLM inputs and outputs (Beta)
236
+ Text Guard for scanning LLM inputs and outputs
212
237
 
213
238
  Analyze and redact text to avoid manipulation of the model, addition of
214
239
  malicious content, and other undesirable data transfers.
215
240
 
216
- How to install a [Beta release](https://pangea.cloud/docs/sdk/python/#beta-releases).
241
+ OperationId: ai_guard_post_v1_text_guard
217
242
 
218
- OperationId: ai_guard_post_v1beta_text_guard
243
+ Args:
244
+ llm_input: Structured full llm payload data to be scanned by AI
245
+ Guard for PII, sensitive data, malicious content, and other data
246
+ types defined by the configuration. Supports processing up to
247
+ 10KB of JSON text
248
+ recipe: Recipe key of a configuration of data types and settings
249
+ defined in the Pangea User Console. It specifies the rules that
250
+ are to be applied to the text, such as defang malicious URLs.
251
+ debug: Setting this value to true will provide a detailed analysis
252
+ of the text data
253
+ llm_info: Short string hint for the LLM Provider information
254
+ log_field: Additional fields to include in activity log
255
+
256
+ Examples:
257
+ response = ai_guard.guard_text(
258
+ llm_input={"model": "gpt-4o", "messages": [{"role": "user", "content": "hello world"}]}
259
+ )
260
+ """
261
+
262
+ def guard_text( # type: ignore[misc]
263
+ self,
264
+ text: str | None = None,
265
+ *,
266
+ messages: _T | None = None,
267
+ llm_input: _T | None = None,
268
+ recipe: str | None = None,
269
+ debug: bool | None = None,
270
+ llm_info: str | None = None,
271
+ log_fields: LogFields | None = None,
272
+ ) -> PangeaResponse[TextGuardResult[None]]:
273
+ """
274
+ Text Guard for scanning LLM inputs and outputs
275
+
276
+ Analyze and redact text to avoid manipulation of the model, addition of
277
+ malicious content, and other undesirable data transfers.
278
+
279
+ OperationId: ai_guard_post_v1_text_guard
219
280
 
220
281
  Args:
221
- text_or_messages: Text or structured data to be scanned by AI Guard
222
- for PII, sensitive data, malicious content, and other data types
223
- defined by the configuration. Supports processing up to 10KB of text.
282
+ text: Text to be scanned by AI Guard for PII, sensitive data,
283
+ malicious content, and other data types defined by the
284
+ configuration. Supports processing up to 10KB of text.
285
+ messages: Structured messages data to be scanned by AI Guard for
286
+ PII, sensitive data, malicious content, and other data types
287
+ defined by the configuration. Supports processing up to 10KB of
288
+ JSON text
289
+ llm_input: Structured full llm payload data to be scanned by AI
290
+ Guard for PII, sensitive data, malicious content, and other data
291
+ types defined by the configuration. Supports processing up to
292
+ 10KB of JSON text
224
293
  recipe: Recipe key of a configuration of data types and settings
225
294
  defined in the Pangea User Console. It specifies the rules that
226
295
  are to be applied to the text, such as defang malicious URLs.
227
296
  debug: Setting this value to true will provide a detailed analysis
228
297
  of the text data
298
+ llm_info: Short string hint for the LLM Provider information
299
+ log_field: Additional fields to include in activity log
229
300
 
230
301
  Examples:
231
302
  response = ai_guard.guard_text("text")
232
303
  """
233
304
 
305
+ if not any((text, messages, llm_input)):
306
+ raise ValueError("At least one of `text`, `messages`, or `llm_input` must be given")
307
+
308
+ if sum((text is not None, messages is not None, llm_input is not None)) > 1:
309
+ raise ValueError("Only one of `text`, `messages`, or `llm_input` can be given at once")
310
+
234
311
  return self.request.post(
235
- "v1beta/text/guard",
312
+ "v1/text/guard",
236
313
  TextGuardResult,
237
314
  data={
238
- "text" if isinstance(text_or_messages, str) else "messages": text_or_messages,
315
+ "text": text,
316
+ "messages": messages,
317
+ "llm_input": llm_input,
239
318
  "recipe": recipe,
240
319
  "debug": debug,
320
+ "llm_info": llm_info,
321
+ "log_fields": log_fields,
241
322
  },
242
323
  )
@@ -30,14 +30,14 @@ class GuardResult(PangeaResponseResult):
30
30
  detected: bool
31
31
  """Boolean response for if the prompt was considered malicious or not"""
32
32
 
33
- type: Optional[Literal["direct", "indirect"]] = None
33
+ type: Optional[Literal["direct", "indirect", ""]] = None
34
34
  """Type of analysis, either direct or indirect"""
35
35
 
36
36
  analyzer: Optional[str] = None
37
37
  """Prompt Analyzers for identifying and rejecting properties of prompts"""
38
38
 
39
- confidence: int
40
- """Percent of confidence in the detection result, ranging from 0 to 100"""
39
+ confidence: float
40
+ """Percent of confidence in the detection result, ranging from 0 to 1"""
41
41
 
42
42
  info: Optional[str] = None
43
43
  """Extra information about the detection result"""
@@ -91,24 +91,19 @@ class PromptGuard(ServiceBase):
91
91
  *,
92
92
  analyzers: Iterable[str] | None = None,
93
93
  classify: bool | None = None,
94
- threshold: float | None = None,
95
94
  ) -> PangeaResponse[GuardResult]:
96
95
  """
97
- Guard (Beta)
96
+ Guard
98
97
 
99
98
  Guard messages.
100
99
 
101
- How to install a [Beta release](https://pangea.cloud/docs/sdk/python/#beta-releases).
102
-
103
- OperationId: prompt_guard_post_v1beta_guard
100
+ OperationId: prompt_guard_post_v1_guard
104
101
 
105
102
  Args:
106
103
  messages: Prompt content and role array in JSON format. The
107
104
  `content` is the text that will be analyzed for redaction.
108
105
  analyzers: Specific analyzers to be used in the call
109
106
  classify: Boolean to enable classification of the content
110
- threshold: Threshold for the confidence score to consider the prompt
111
- as malicious
112
107
 
113
108
  Examples:
114
109
  from pangea.services.prompt_guard import Message
@@ -117,7 +112,7 @@ class PromptGuard(ServiceBase):
117
112
  """
118
113
 
119
114
  return self.request.post(
120
- "v1beta/guard",
115
+ "v1/guard",
121
116
  GuardResult,
122
- data={"messages": messages, "analyzers": analyzers, "classify": classify, "threshold": threshold},
117
+ data={"messages": messages, "analyzers": analyzers, "classify": classify},
123
118
  )
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "pangea-sdk"
3
- version = "5.5.0beta3"
3
+ version = "5.5.1"
4
4
  description = "Pangea API SDK"
5
5
  authors = [
6
6
  {name = "Glenn Gallien", email = "glenn.gallien@pangea.cloud"}
@@ -16,7 +16,7 @@ classifiers = [
16
16
  ]
17
17
  requires-python = ">=3.9,<4.0.0"
18
18
  dependencies = [
19
- "aiohttp (>=3.11.11,<4.0.0)",
19
+ "aiohttp (>=3.11.12,<4.0.0)",
20
20
  "cryptography (>=43.0.3,<44.0.0)",
21
21
  "deprecated (>=1.2.18,<2.0.0)",
22
22
  "google-crc32c (>=1.6.0,<2.0.0)",
@@ -36,7 +36,7 @@ packages = [
36
36
  black = "^25.1.0"
37
37
  docstring-parser = "^0.15"
38
38
  isort = "^6.0.0"
39
- mypy = "1.14.1"
39
+ mypy = "1.15.0"
40
40
  types-Deprecated = "^1.2.9.3"
41
41
  types-python-dateutil = "^2.8.19.14"
42
42
  types-requests = "^2.31.0.10"
@@ -1,159 +0,0 @@
1
- from __future__ import annotations
2
-
3
- from typing import overload
4
-
5
- from typing_extensions import TypeVar
6
-
7
- from pangea.asyncio.services.base import ServiceBaseAsync
8
- from pangea.config import PangeaConfig
9
- from pangea.response import PangeaResponse
10
- from pangea.services.ai_guard import TextGuardResult
11
-
12
- _T = TypeVar("_T")
13
-
14
-
15
- class AIGuardAsync(ServiceBaseAsync):
16
- """AI Guard service client.
17
-
18
- Provides methods to interact with Pangea's AI Guard service.
19
-
20
- Examples:
21
- from pangea import PangeaConfig
22
- from pangea.asyncio.services import AIGuardAsync
23
-
24
- config = PangeaConfig(domain="aws.us.pangea.cloud")
25
- ai_guard = AIGuardAsync(token="pangea_token", config=config)
26
- """
27
-
28
- service_name = "ai-guard"
29
-
30
- def __init__(
31
- self, token: str, config: PangeaConfig | None = None, logger_name: str = "pangea", config_id: str | None = None
32
- ) -> None:
33
- """
34
- AI Guard service client.
35
-
36
- Initializes a new AI Guard client.
37
-
38
- Args:
39
- token: Pangea API token.
40
- config: Pangea service configuration.
41
- logger_name: Logger name.
42
- config_id: Configuration ID.
43
-
44
- Examples:
45
- from pangea import PangeaConfig
46
- from pangea.asyncio.services import AIGuardAsync
47
-
48
- config = PangeaConfig(domain="aws.us.pangea.cloud")
49
- ai_guard = AIGuardAsync(token="pangea_token", config=config)
50
- """
51
-
52
- super().__init__(token, config, logger_name, config_id)
53
-
54
- @overload
55
- async def guard_text(
56
- self,
57
- text_or_messages: str,
58
- *,
59
- recipe: str = "pangea_prompt_guard",
60
- debug: bool = False,
61
- ) -> PangeaResponse[TextGuardResult[None]]:
62
- """
63
- Text Guard for scanning LLM inputs and outputs (Beta)
64
-
65
- Analyze and redact text to avoid manipulation of the model, addition of
66
- malicious content, and other undesirable data transfers.
67
-
68
- How to install a [Beta release](https://pangea.cloud/docs/sdk/python/#beta-releases).
69
-
70
- OperationId: ai_guard_post_v1beta_text_guard
71
-
72
- Args:
73
- text: Text to be scanned by AI Guard for PII, sensitive data,
74
- malicious content, and other data types defined by the
75
- configuration. Supports processing up to 10KB of text.
76
- recipe: Recipe key of a configuration of data types and settings
77
- defined in the Pangea User Console. It specifies the rules that
78
- are to be applied to the text, such as defang malicious URLs.
79
- debug: Setting this value to true will provide a detailed analysis
80
- of the text data
81
-
82
- Examples:
83
- response = await ai_guard.guard_text("text")
84
- """
85
-
86
- @overload
87
- async def guard_text(
88
- self,
89
- text_or_messages: _T,
90
- *,
91
- recipe: str = "pangea_prompt_guard",
92
- debug: bool = False,
93
- ) -> PangeaResponse[TextGuardResult[_T]]:
94
- """
95
- Text Guard for scanning LLM inputs and outputs (Beta)
96
-
97
- Analyze and redact text to avoid manipulation of the model, addition of
98
- malicious content, and other undesirable data transfers.
99
-
100
- How to install a [Beta release](https://pangea.cloud/docs/sdk/python/#beta-releases).
101
-
102
- OperationId: ai_guard_post_v1beta_text_guard
103
-
104
- Args:
105
- text_or_messages: Structured data to be scanned by AI Guard for PII,
106
- sensitive data, malicious content, and other data types defined
107
- by the configuration. Supports processing up to 10KB of text.
108
- recipe: Recipe key of a configuration of data types and settings
109
- defined in the Pangea User Console. It specifies the rules that
110
- are to be applied to the text, such as defang malicious URLs.
111
- debug: Setting this value to true will provide a detailed analysis
112
- of the text data
113
-
114
- Examples:
115
- response = await ai_guard.guard_text([
116
- {"role": "user", "content": "hello world"}
117
- ])
118
- """
119
-
120
- async def guard_text(
121
- self,
122
- text_or_messages: str | _T,
123
- *,
124
- recipe: str = "pangea_prompt_guard",
125
- debug: bool = False,
126
- ) -> PangeaResponse[TextGuardResult[_T]]:
127
- """
128
- Text Guard for scanning LLM inputs and outputs (Beta)
129
-
130
- Analyze and redact text to avoid manipulation of the model, addition of
131
- malicious content, and other undesirable data transfers.
132
-
133
- How to install a [Beta release](https://pangea.cloud/docs/sdk/python/#beta-releases).
134
-
135
- OperationId: ai_guard_post_v1beta_text_guard
136
-
137
- Args:
138
- text_or_messages: Text or structured data to be scanned by AI Guard
139
- for PII, sensitive data, malicious content, and other data types
140
- defined by the configuration. Supports processing up to 10KB of text.
141
- recipe: Recipe key of a configuration of data types and settings
142
- defined in the Pangea User Console. It specifies the rules that
143
- are to be applied to the text, such as defang malicious URLs.
144
- debug: Setting this value to true will provide a detailed analysis
145
- of the text data
146
-
147
- Examples:
148
- response = await ai_guard.guard_text("text")
149
- """
150
-
151
- return await self.request.post(
152
- "v1beta/text/guard",
153
- TextGuardResult,
154
- data={
155
- "text" if isinstance(text_or_messages, str) else "messages": text_or_messages,
156
- "recipe": recipe,
157
- "debug": debug,
158
- },
159
- )
File without changes
File without changes
File without changes