pangea-sdk 5.5.0b3__py3-none-any.whl → 5.5.0b4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
pangea/__init__.py CHANGED
@@ -1,4 +1,4 @@
1
- __version__ = "5.5.0beta3"
1
+ __version__ = "5.5.0beta4"
2
2
 
3
3
  from pangea.asyncio.request import PangeaRequestAsync
4
4
  from pangea.config import PangeaConfig
@@ -7,7 +7,7 @@ from typing_extensions import TypeVar
7
7
  from pangea.asyncio.services.base import ServiceBaseAsync
8
8
  from pangea.config import PangeaConfig
9
9
  from pangea.response import PangeaResponse
10
- from pangea.services.ai_guard import TextGuardResult
10
+ from pangea.services.ai_guard import LogFields, TextGuardResult
11
11
 
12
12
  _T = TypeVar("_T")
13
13
 
@@ -54,20 +54,20 @@ class AIGuardAsync(ServiceBaseAsync):
54
54
  @overload
55
55
  async def guard_text(
56
56
  self,
57
- text_or_messages: str,
57
+ text: str,
58
58
  *,
59
- recipe: str = "pangea_prompt_guard",
60
- debug: bool = False,
59
+ recipe: str | None = None,
60
+ debug: bool | None = None,
61
+ llm_info: str | None = None,
62
+ log_fields: LogFields | None = None,
61
63
  ) -> PangeaResponse[TextGuardResult[None]]:
62
64
  """
63
- Text Guard for scanning LLM inputs and outputs (Beta)
65
+ Text Guard for scanning LLM inputs and outputs
64
66
 
65
67
  Analyze and redact text to avoid manipulation of the model, addition of
66
68
  malicious content, and other undesirable data transfers.
67
69
 
68
- How to install a [Beta release](https://pangea.cloud/docs/sdk/python/#beta-releases).
69
-
70
- OperationId: ai_guard_post_v1beta_text_guard
70
+ OperationId: ai_guard_post_v1_text_guard
71
71
 
72
72
  Args:
73
73
  text: Text to be scanned by AI Guard for PII, sensitive data,
@@ -78,6 +78,8 @@ class AIGuardAsync(ServiceBaseAsync):
78
78
  are to be applied to the text, such as defang malicious URLs.
79
79
  debug: Setting this value to true will provide a detailed analysis
80
80
  of the text data
81
+ llm_info: Short string hint for the LLM Provider information
82
+ log_field: Additional fields to include in activity log
81
83
 
82
84
  Examples:
83
85
  response = await ai_guard.guard_text("text")
@@ -86,74 +88,134 @@ class AIGuardAsync(ServiceBaseAsync):
86
88
  @overload
87
89
  async def guard_text(
88
90
  self,
89
- text_or_messages: _T,
90
91
  *,
91
- recipe: str = "pangea_prompt_guard",
92
- debug: bool = False,
92
+ messages: _T,
93
+ recipe: str | None = None,
94
+ debug: bool | None = None,
95
+ llm_info: str | None = None,
96
+ log_fields: LogFields | None = None,
93
97
  ) -> PangeaResponse[TextGuardResult[_T]]:
94
98
  """
95
- Text Guard for scanning LLM inputs and outputs (Beta)
99
+ Text Guard for scanning LLM inputs and outputs
96
100
 
97
101
  Analyze and redact text to avoid manipulation of the model, addition of
98
102
  malicious content, and other undesirable data transfers.
99
103
 
100
- How to install a [Beta release](https://pangea.cloud/docs/sdk/python/#beta-releases).
101
-
102
- OperationId: ai_guard_post_v1beta_text_guard
104
+ OperationId: ai_guard_post_v1_text_guard
103
105
 
104
106
  Args:
105
- text_or_messages: Structured data to be scanned by AI Guard for PII,
106
- sensitive data, malicious content, and other data types defined
107
- by the configuration. Supports processing up to 10KB of text.
107
+ messages: Structured messages data to be scanned by AI Guard for
108
+ PII, sensitive data, malicious content, and other data types
109
+ defined by the configuration. Supports processing up to 10KB of
110
+ JSON text
108
111
  recipe: Recipe key of a configuration of data types and settings
109
112
  defined in the Pangea User Console. It specifies the rules that
110
113
  are to be applied to the text, such as defang malicious URLs.
111
114
  debug: Setting this value to true will provide a detailed analysis
112
115
  of the text data
116
+ llm_info: Short string hint for the LLM Provider information
117
+ log_field: Additional fields to include in activity log
113
118
 
114
119
  Examples:
115
- response = await ai_guard.guard_text([
116
- {"role": "user", "content": "hello world"}
117
- ])
120
+ response = await ai_guard.guard_text(messages=[{"role": "user", "content": "hello world"}])
118
121
  """
119
122
 
123
+ @overload
120
124
  async def guard_text(
121
125
  self,
122
- text_or_messages: str | _T,
123
126
  *,
124
- recipe: str = "pangea_prompt_guard",
125
- debug: bool = False,
127
+ llm_input: _T,
128
+ recipe: str | None = None,
129
+ debug: bool | None = None,
130
+ llm_info: str | None = None,
131
+ log_fields: LogFields | None = None,
126
132
  ) -> PangeaResponse[TextGuardResult[_T]]:
127
133
  """
128
- Text Guard for scanning LLM inputs and outputs (Beta)
134
+ Text Guard for scanning LLM inputs and outputs
129
135
 
130
136
  Analyze and redact text to avoid manipulation of the model, addition of
131
137
  malicious content, and other undesirable data transfers.
132
138
 
133
- How to install a [Beta release](https://pangea.cloud/docs/sdk/python/#beta-releases).
139
+ OperationId: ai_guard_post_v1_text_guard
140
+
141
+ Args:
142
+ llm_input: Structured full llm payload data to be scanned by AI
143
+ Guard for PII, sensitive data, malicious content, and other data
144
+ types defined by the configuration. Supports processing up to
145
+ 10KB of JSON text
146
+ recipe: Recipe key of a configuration of data types and settings
147
+ defined in the Pangea User Console. It specifies the rules that
148
+ are to be applied to the text, such as defang malicious URLs.
149
+ debug: Setting this value to true will provide a detailed analysis
150
+ of the text data
151
+ llm_info: Short string hint for the LLM Provider information
152
+ log_field: Additional fields to include in activity log
153
+
154
+ Examples:
155
+ response = await ai_guard.guard_text(
156
+ llm_input={"model": "gpt-4o", "messages": [{"role": "user", "content": "hello world"}]}
157
+ )
158
+ """
159
+
160
+ async def guard_text( # type: ignore[misc]
161
+ self,
162
+ text: str | None = None,
163
+ *,
164
+ messages: _T | None = None,
165
+ llm_input: _T | None = None,
166
+ recipe: str | None = None,
167
+ debug: bool | None = None,
168
+ llm_info: str | None = None,
169
+ log_fields: LogFields | None = None,
170
+ ) -> PangeaResponse[TextGuardResult[None]]:
171
+ """
172
+ Text Guard for scanning LLM inputs and outputs
134
173
 
135
- OperationId: ai_guard_post_v1beta_text_guard
174
+ Analyze and redact text to avoid manipulation of the model, addition of
175
+ malicious content, and other undesirable data transfers.
176
+
177
+ OperationId: ai_guard_post_v1_text_guard
136
178
 
137
179
  Args:
138
- text_or_messages: Text or structured data to be scanned by AI Guard
139
- for PII, sensitive data, malicious content, and other data types
140
- defined by the configuration. Supports processing up to 10KB of text.
180
+ text: Text to be scanned by AI Guard for PII, sensitive data,
181
+ malicious content, and other data types defined by the
182
+ configuration. Supports processing up to 10KB of text.
183
+ messages: Structured messages data to be scanned by AI Guard for
184
+ PII, sensitive data, malicious content, and other data types
185
+ defined by the configuration. Supports processing up to 10KB of
186
+ JSON text
187
+ llm_input: Structured full llm payload data to be scanned by AI
188
+ Guard for PII, sensitive data, malicious content, and other data
189
+ types defined by the configuration. Supports processing up to
190
+ 10KB of JSON text
141
191
  recipe: Recipe key of a configuration of data types and settings
142
192
  defined in the Pangea User Console. It specifies the rules that
143
193
  are to be applied to the text, such as defang malicious URLs.
144
194
  debug: Setting this value to true will provide a detailed analysis
145
195
  of the text data
196
+ llm_info: Short string hint for the LLM Provider information
197
+ log_field: Additional fields to include in activity log
146
198
 
147
199
  Examples:
148
200
  response = await ai_guard.guard_text("text")
149
201
  """
150
202
 
203
+ if not any((text, messages, llm_input)):
204
+ raise ValueError("Exactly one of `text`, `messages`, or `llm_input` must be given")
205
+
206
+ if sum((text is not None, messages is not None, llm_input is not None)) > 1:
207
+ raise ValueError("Only one of `text`, `messages`, or `llm_input` can be given at once")
208
+
151
209
  return await self.request.post(
152
- "v1beta/text/guard",
210
+ "v1/text/guard",
153
211
  TextGuardResult,
154
212
  data={
155
- "text" if isinstance(text_or_messages, str) else "messages": text_or_messages,
213
+ "text": text,
214
+ "messages": messages,
215
+ "llm_input": llm_input,
156
216
  "recipe": recipe,
157
217
  "debug": debug,
218
+ "llm_info": llm_info,
219
+ "log_fields": log_fields,
158
220
  },
159
221
  )
@@ -57,24 +57,19 @@ class PromptGuardAsync(ServiceBaseAsync):
57
57
  *,
58
58
  analyzers: Iterable[str] | None = None,
59
59
  classify: bool | None = None,
60
- threshold: float | None = None,
61
60
  ) -> PangeaResponse[GuardResult]:
62
61
  """
63
- Guard (Beta)
62
+ Guard
64
63
 
65
64
  Guard messages.
66
65
 
67
- How to install a [Beta release](https://pangea.cloud/docs/sdk/python/#beta-releases).
68
-
69
- OperationId: prompt_guard_post_v1beta_guard
66
+ OperationId: prompt_guard_post_v1_guard
70
67
 
71
68
  Args:
72
69
  messages: Prompt content and role array in JSON format. The
73
70
  `content` is the text that will be analyzed for redaction.
74
71
  analyzers: Specific analyzers to be used in the call
75
72
  classify: Boolean to enable classification of the content
76
- threshold: Threshold for the confidence score to consider the prompt
77
- as malicious
78
73
 
79
74
  Examples:
80
75
  from pangea.asyncio.services.prompt_guard import Message
@@ -83,7 +78,7 @@ class PromptGuardAsync(ServiceBaseAsync):
83
78
  """
84
79
 
85
80
  return await self.request.post(
86
- "v1beta/guard",
81
+ "v1/guard",
87
82
  GuardResult,
88
- data={"messages": messages, "analyzers": analyzers, "classify": classify, "threshold": threshold},
83
+ data={"messages": messages, "analyzers": analyzers, "classify": classify},
89
84
  )
@@ -3,10 +3,29 @@ from __future__ import annotations
3
3
  from typing import Any, Dict, Generic, List, Optional, TypeVar, overload
4
4
 
5
5
  from pangea.config import PangeaConfig
6
- from pangea.response import APIResponseModel, PangeaResponse, PangeaResponseResult
6
+ from pangea.response import APIRequestModel, APIResponseModel, PangeaResponse, PangeaResponseResult
7
7
  from pangea.services.base import ServiceBase
8
8
 
9
9
 
10
+ class LogFields(APIRequestModel):
11
+ """Additional fields to include in activity log"""
12
+
13
+ citations: Optional[str] = None
14
+ """Origin or source application of the event"""
15
+
16
+ extra_info: Optional[str] = None
17
+ """Stores supplementary details related to the event"""
18
+
19
+ model: Optional[str] = None
20
+ """Model used to perform the event"""
21
+
22
+ source: Optional[str] = None
23
+ """IP address of user or app or agent"""
24
+
25
+ tools: Optional[str] = None
26
+ """Tools used to perform the event"""
27
+
28
+
10
29
  class AnalyzerResponse(APIResponseModel):
11
30
  analyzer: str
12
31
  confidence: float
@@ -137,20 +156,20 @@ class AIGuard(ServiceBase):
137
156
  @overload
138
157
  def guard_text(
139
158
  self,
140
- text_or_messages: str,
159
+ text: str,
141
160
  *,
142
- recipe: str = "pangea_prompt_guard",
143
- debug: bool = False,
161
+ recipe: str | None = None,
162
+ debug: bool | None = None,
163
+ llm_info: str | None = None,
164
+ log_fields: LogFields | None = None,
144
165
  ) -> PangeaResponse[TextGuardResult[None]]:
145
166
  """
146
- Text Guard for scanning LLM inputs and outputs (Beta)
167
+ Text Guard for scanning LLM inputs and outputs
147
168
 
148
169
  Analyze and redact text to avoid manipulation of the model, addition of
149
170
  malicious content, and other undesirable data transfers.
150
171
 
151
- How to install a [Beta release](https://pangea.cloud/docs/sdk/python/#beta-releases).
152
-
153
- OperationId: ai_guard_post_v1beta_text_guard
172
+ OperationId: ai_guard_post_v1_text_guard
154
173
 
155
174
  Args:
156
175
  text: Text to be scanned by AI Guard for PII, sensitive data,
@@ -161,6 +180,8 @@ class AIGuard(ServiceBase):
161
180
  are to be applied to the text, such as defang malicious URLs.
162
181
  debug: Setting this value to true will provide a detailed analysis
163
182
  of the text data
183
+ llm_info: Short string hint for the LLM Provider information
184
+ log_field: Additional fields to include in activity log
164
185
 
165
186
  Examples:
166
187
  response = ai_guard.guard_text("text")
@@ -169,74 +190,134 @@ class AIGuard(ServiceBase):
169
190
  @overload
170
191
  def guard_text(
171
192
  self,
172
- text_or_messages: _T,
173
193
  *,
174
- recipe: str = "pangea_prompt_guard",
175
- debug: bool = False,
194
+ messages: _T,
195
+ recipe: str | None = None,
196
+ debug: bool | None = None,
197
+ llm_info: str | None = None,
198
+ log_fields: LogFields | None = None,
176
199
  ) -> PangeaResponse[TextGuardResult[_T]]:
177
200
  """
178
- Text Guard for scanning LLM inputs and outputs (Beta)
201
+ Text Guard for scanning LLM inputs and outputs
179
202
 
180
203
  Analyze and redact text to avoid manipulation of the model, addition of
181
204
  malicious content, and other undesirable data transfers.
182
205
 
183
- How to install a [Beta release](https://pangea.cloud/docs/sdk/python/#beta-releases).
184
-
185
- OperationId: ai_guard_post_v1beta_text_guard
206
+ OperationId: ai_guard_post_v1_text_guard
186
207
 
187
208
  Args:
188
- text_or_messages: Structured data to be scanned by AI Guard for PII,
189
- sensitive data, malicious content, and other data types defined
190
- by the configuration. Supports processing up to 10KB of text.
209
+ messages: Structured messages data to be scanned by AI Guard for
210
+ PII, sensitive data, malicious content, and other data types
211
+ defined by the configuration. Supports processing up to 10KB of
212
+ JSON text
191
213
  recipe: Recipe key of a configuration of data types and settings
192
214
  defined in the Pangea User Console. It specifies the rules that
193
215
  are to be applied to the text, such as defang malicious URLs.
194
216
  debug: Setting this value to true will provide a detailed analysis
195
217
  of the text data
218
+ llm_info: Short string hint for the LLM Provider information
219
+ log_field: Additional fields to include in activity log
196
220
 
197
221
  Examples:
198
- response = ai_guard.guard_text([
199
- {"role": "user", "content": "hello world"}
200
- ])
222
+ response = ai_guard.guard_text(messages=[{"role": "user", "content": "hello world"}])
201
223
  """
202
224
 
225
+ @overload
203
226
  def guard_text(
204
227
  self,
205
- text_or_messages: str | _T,
206
228
  *,
207
- recipe: str = "pangea_prompt_guard",
208
- debug: bool = False,
229
+ llm_input: _T,
230
+ recipe: str | None = None,
231
+ debug: bool | None = None,
232
+ llm_info: str | None = None,
233
+ log_fields: LogFields | None = None,
209
234
  ) -> PangeaResponse[TextGuardResult[_T]]:
210
235
  """
211
- Text Guard for scanning LLM inputs and outputs (Beta)
236
+ Text Guard for scanning LLM inputs and outputs
212
237
 
213
238
  Analyze and redact text to avoid manipulation of the model, addition of
214
239
  malicious content, and other undesirable data transfers.
215
240
 
216
- How to install a [Beta release](https://pangea.cloud/docs/sdk/python/#beta-releases).
241
+ OperationId: ai_guard_post_v1_text_guard
217
242
 
218
- OperationId: ai_guard_post_v1beta_text_guard
243
+ Args:
244
+ llm_input: Structured full llm payload data to be scanned by AI
245
+ Guard for PII, sensitive data, malicious content, and other data
246
+ types defined by the configuration. Supports processing up to
247
+ 10KB of JSON text
248
+ recipe: Recipe key of a configuration of data types and settings
249
+ defined in the Pangea User Console. It specifies the rules that
250
+ are to be applied to the text, such as defang malicious URLs.
251
+ debug: Setting this value to true will provide a detailed analysis
252
+ of the text data
253
+ llm_info: Short string hint for the LLM Provider information
254
+ log_field: Additional fields to include in activity log
255
+
256
+ Examples:
257
+ response = ai_guard.guard_text(
258
+ llm_input={"model": "gpt-4o", "messages": [{"role": "user", "content": "hello world"}]}
259
+ )
260
+ """
261
+
262
+ def guard_text( # type: ignore[misc]
263
+ self,
264
+ text: str | None = None,
265
+ *,
266
+ messages: _T | None = None,
267
+ llm_input: _T | None = None,
268
+ recipe: str | None = None,
269
+ debug: bool | None = None,
270
+ llm_info: str | None = None,
271
+ log_fields: LogFields | None = None,
272
+ ) -> PangeaResponse[TextGuardResult[None]]:
273
+ """
274
+ Text Guard for scanning LLM inputs and outputs
275
+
276
+ Analyze and redact text to avoid manipulation of the model, addition of
277
+ malicious content, and other undesirable data transfers.
278
+
279
+ OperationId: ai_guard_post_v1_text_guard
219
280
 
220
281
  Args:
221
- text_or_messages: Text or structured data to be scanned by AI Guard
222
- for PII, sensitive data, malicious content, and other data types
223
- defined by the configuration. Supports processing up to 10KB of text.
282
+ text: Text to be scanned by AI Guard for PII, sensitive data,
283
+ malicious content, and other data types defined by the
284
+ configuration. Supports processing up to 10KB of text.
285
+ messages: Structured messages data to be scanned by AI Guard for
286
+ PII, sensitive data, malicious content, and other data types
287
+ defined by the configuration. Supports processing up to 10KB of
288
+ JSON text
289
+ llm_input: Structured full llm payload data to be scanned by AI
290
+ Guard for PII, sensitive data, malicious content, and other data
291
+ types defined by the configuration. Supports processing up to
292
+ 10KB of JSON text
224
293
  recipe: Recipe key of a configuration of data types and settings
225
294
  defined in the Pangea User Console. It specifies the rules that
226
295
  are to be applied to the text, such as defang malicious URLs.
227
296
  debug: Setting this value to true will provide a detailed analysis
228
297
  of the text data
298
+ llm_info: Short string hint for the LLM Provider information
299
+ log_field: Additional fields to include in activity log
229
300
 
230
301
  Examples:
231
302
  response = ai_guard.guard_text("text")
232
303
  """
233
304
 
305
+ if not any((text, messages, llm_input)):
306
+ raise ValueError("At least one of `text`, `messages`, or `llm_input` must be given")
307
+
308
+ if sum((text is not None, messages is not None, llm_input is not None)) > 1:
309
+ raise ValueError("Only one of `text`, `messages`, or `llm_input` can be given at once")
310
+
234
311
  return self.request.post(
235
- "v1beta/text/guard",
312
+ "v1/text/guard",
236
313
  TextGuardResult,
237
314
  data={
238
- "text" if isinstance(text_or_messages, str) else "messages": text_or_messages,
315
+ "text": text,
316
+ "messages": messages,
317
+ "llm_input": llm_input,
239
318
  "recipe": recipe,
240
319
  "debug": debug,
320
+ "llm_info": llm_info,
321
+ "log_fields": log_fields,
241
322
  },
242
323
  )
@@ -30,7 +30,7 @@ class GuardResult(PangeaResponseResult):
30
30
  detected: bool
31
31
  """Boolean response for if the prompt was considered malicious or not"""
32
32
 
33
- type: Optional[Literal["direct", "indirect"]] = None
33
+ type: Optional[Literal["direct", "indirect", ""]] = None
34
34
  """Type of analysis, either direct or indirect"""
35
35
 
36
36
  analyzer: Optional[str] = None
@@ -91,24 +91,19 @@ class PromptGuard(ServiceBase):
91
91
  *,
92
92
  analyzers: Iterable[str] | None = None,
93
93
  classify: bool | None = None,
94
- threshold: float | None = None,
95
94
  ) -> PangeaResponse[GuardResult]:
96
95
  """
97
- Guard (Beta)
96
+ Guard
98
97
 
99
98
  Guard messages.
100
99
 
101
- How to install a [Beta release](https://pangea.cloud/docs/sdk/python/#beta-releases).
102
-
103
- OperationId: prompt_guard_post_v1beta_guard
100
+ OperationId: prompt_guard_post_v1_guard
104
101
 
105
102
  Args:
106
103
  messages: Prompt content and role array in JSON format. The
107
104
  `content` is the text that will be analyzed for redaction.
108
105
  analyzers: Specific analyzers to be used in the call
109
106
  classify: Boolean to enable classification of the content
110
- threshold: Threshold for the confidence score to consider the prompt
111
- as malicious
112
107
 
113
108
  Examples:
114
109
  from pangea.services.prompt_guard import Message
@@ -117,7 +112,7 @@ class PromptGuard(ServiceBase):
117
112
  """
118
113
 
119
114
  return self.request.post(
120
- "v1beta/guard",
115
+ "v1/guard",
121
116
  GuardResult,
122
- data={"messages": messages, "analyzers": analyzers, "classify": classify, "threshold": threshold},
117
+ data={"messages": messages, "analyzers": analyzers, "classify": classify},
123
118
  )
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: pangea-sdk
3
- Version: 5.5.0b3
3
+ Version: 5.5.0b4
4
4
  Summary: Pangea API SDK
5
5
  License: MIT
6
6
  Keywords: Pangea,SDK,Audit
@@ -63,13 +63,13 @@ the same compatibility guarantees as stable releases.
63
63
  Via pip:
64
64
 
65
65
  ```bash
66
- $ pip3 install pangea-sdk==5.5.0b3
66
+ $ pip3 install pangea-sdk==5.5.0b4
67
67
  ```
68
68
 
69
69
  Via poetry:
70
70
 
71
71
  ```bash
72
- $ poetry add pangea-sdk==5.5.0b3
72
+ $ poetry add pangea-sdk==5.5.0b4
73
73
  ```
74
74
 
75
75
  ## Usage
@@ -1,9 +1,9 @@
1
- pangea/__init__.py,sha256=oR8inTyWgORDk9j8o76EPF3uU09SynZlOMqk3ZP36dk,251
1
+ pangea/__init__.py,sha256=fCxvmybvVb3077qDVFJbBz4QSz5gOCC6lb36iuU2BxY,251
2
2
  pangea/asyncio/__init__.py,sha256=kjEMkqMQ521LlMSu5jn3_WgweyArwVZ2C-s3x7mR6Pk,45
3
3
  pangea/asyncio/file_uploader.py,sha256=wI7epib7Rc5jtZw4eJ1L1SlmutDG6CPv59C8N2UPhtY,1436
4
4
  pangea/asyncio/request.py,sha256=lpLY-o405r3-VUfrAE5uxYxI8UjM4hjPqUzAUtOGE5o,18040
5
5
  pangea/asyncio/services/__init__.py,sha256=L6Tdhjfx_ZECHskhLMPaCcOefi-r-imw6q_zlU4j-FY,464
6
- pangea/asyncio/services/ai_guard.py,sha256=7Zr4jjCmOcPOFgHesM4MYgDosqfNjF_Foj5e_EyMZ70,5677
6
+ pangea/asyncio/services/ai_guard.py,sha256=Q_Q_1xKvxXsW6jHsEDjPVHAOYYvz7bmulC5480vRc-s,8541
7
7
  pangea/asyncio/services/audit.py,sha256=rPaCx4cMzj-g9WFMRIysFCJAz6Btp6YrhcKe_exky8k,26283
8
8
  pangea/asyncio/services/authn.py,sha256=rPeLJweL8mYH_t4ebcQn4n_Wglr3kClKNnCXNCimZU4,46622
9
9
  pangea/asyncio/services/authz.py,sha256=B_0_nhDMJcjNpjpCx3Vi2LDRhlmfV9325GKbUZ8reos,10025
@@ -11,7 +11,7 @@ pangea/asyncio/services/base.py,sha256=vRFVcO_uEAGJte3OUUBLD43RoiiFB1vC7SPyN6yEM
11
11
  pangea/asyncio/services/embargo.py,sha256=ctzj3kip6xos-Eu3JuOskrCGYC8T3JlsgAopZHiPSXM,3068
12
12
  pangea/asyncio/services/file_scan.py,sha256=PLG1O-PL4Yk9uY9D6NbMrZ5LHg70Z311s7bFe46UMZA,7108
13
13
  pangea/asyncio/services/intel.py,sha256=BcxGKSoZ1nJiEHyZM9yOwKSSPJUrB6ibJ19KR27VlgQ,40261
14
- pangea/asyncio/services/prompt_guard.py,sha256=rTFylG9zyMauhpzb6BsccmmMK3qRwtrsoMjemLDJ2Bs,2835
14
+ pangea/asyncio/services/prompt_guard.py,sha256=NbYt-0tRtO5VH7kLmC1lJ5JSV-ztlb9dNFaKKs_fZUM,2553
15
15
  pangea/asyncio/services/redact.py,sha256=JPJcmeKFloMZRpkjAHAZbpZJpO993WsTfEwA-S5ov18,7951
16
16
  pangea/asyncio/services/sanitize.py,sha256=EbSdq_v9yZWce9xEYWvZharE9bJcxw8cg5Pv8LVxdxc,8627
17
17
  pangea/asyncio/services/share.py,sha256=Qd2Oh4UsLwu7Zo4Xy1KABHuP4TJ9AtcN-XzldvilFVo,30773
@@ -28,7 +28,7 @@ pangea/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
28
28
  pangea/request.py,sha256=vGB8owXUiNQoeiiACFvfXvg44JJo_L6WfcHlF6ug8co,25082
29
29
  pangea/response.py,sha256=lPAcYsF9Xg166CiyhCofVmQA-W4jevh0MQXxUa8Re68,7737
30
30
  pangea/services/__init__.py,sha256=h36HzyIGaI5kO6l3UCwKHx_Kd-m_9mYVwn5MLRVzblI,408
31
- pangea/services/ai_guard.py,sha256=jbXzcUaR7j-6ytW9QRCGn9WtlrCCcB6Ia_2J-3AY-X4,7763
31
+ pangea/services/ai_guard.py,sha256=tBr3GEbrobjECs51cRR63Q8AICMl-K3tQD1FdyZkR4s,11129
32
32
  pangea/services/audit/audit.py,sha256=7-c9l7jyGtpG7SqRUMpqsAzcUDhMZ5izgPalxHXsUvM,39320
33
33
  pangea/services/audit/exceptions.py,sha256=bhVuYe4ammacOVxwg98CChxvwZf5FKgR2DcgqILOcwc,471
34
34
  pangea/services/audit/models.py,sha256=1h1B9eSYQMYG3f8WNi1UcDX2-impRrET_ErjJYUnj7M,14678
@@ -41,7 +41,7 @@ pangea/services/base.py,sha256=43pWQcR9CeT4sGzgctF3Sy4M_h7DaUzkuZD2Z7CcDUU,3845
41
41
  pangea/services/embargo.py,sha256=9Wfku4td5ORaIENKmnGmS5jxJJIRfWp6Q51L36Jsy0I,3897
42
42
  pangea/services/file_scan.py,sha256=QiO80uKqB_BnAOiYQKznXfxpa5j40qqETE3-zBRT_QE,7813
43
43
  pangea/services/intel.py,sha256=y1EX2ctYIxQc52lmHp6-Q_UIDM--t3fOpXDssWiRPfo,56474
44
- pangea/services/prompt_guard.py,sha256=5KqML4IleB_4a7_PDqWLk9WGQVJ0j4vOdqgVGGkQ6z8,3724
44
+ pangea/services/prompt_guard.py,sha256=uMpofGKltmlNklF8znhRLuY6siyjDf-Zw-4Hwy2oJtc,3446
45
45
  pangea/services/redact.py,sha256=ovIcT0jkXe57O7keGzSClWNCic8y-4NZoemXoSKjjww,12913
46
46
  pangea/services/sanitize.py,sha256=eAN1HhObiKqygy6HHcfl0NmxYfPMvqSKepwEAVVIIEE,12936
47
47
  pangea/services/share/file_format.py,sha256=1svO1ee_aenA9zoO_AaU-Rk5Ulp7kcPOc_KwNoluyQE,2797
@@ -55,6 +55,6 @@ pangea/services/vault/vault.py,sha256=ow-Zm7PYzfWIfUcA4UNnpeL2DHfZM4C7inRDmNR3zQ
55
55
  pangea/tools.py,sha256=2-Y4SAHWFv6Ocj42J_bWrVy27M5G3wi7a8LJn0dabHc,6427
56
56
  pangea/utils.py,sha256=dZ6MwFVEWXUgXvvDg-k6JnvVfsgslvtaBd7ez7afrqk,4983
57
57
  pangea/verify_audit.py,sha256=nSP17OzoSPdvezRExwfcf45H8ZPZnxZu-CbEp3qFJO0,17354
58
- pangea_sdk-5.5.0b3.dist-info/METADATA,sha256=_fwk4xCZECycgGNKq8eWhvber6bDr2EWYt1lycQMyi4,7017
59
- pangea_sdk-5.5.0b3.dist-info/WHEEL,sha256=IYZQI976HJqqOpQU6PHkJ8fb3tMNBFjg-Cn-pwAbaFM,88
60
- pangea_sdk-5.5.0b3.dist-info/RECORD,,
58
+ pangea_sdk-5.5.0b4.dist-info/METADATA,sha256=KtUZWuQgJVjZipQZ77PbZw33faRG6M_425ZGLxvxCA4,7017
59
+ pangea_sdk-5.5.0b4.dist-info/WHEEL,sha256=IYZQI976HJqqOpQU6PHkJ8fb3tMNBFjg-Cn-pwAbaFM,88
60
+ pangea_sdk-5.5.0b4.dist-info/RECORD,,