pangea-sdk 6.5.0b1__py3-none-any.whl → 6.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pangea/__init__.py +3 -11
- pangea/_constants.py +4 -0
- pangea/_typing.py +30 -0
- pangea/asyncio/__init__.py +2 -1
- pangea/asyncio/file_uploader.py +3 -2
- pangea/asyncio/request.py +66 -162
- pangea/asyncio/services/__init__.py +19 -3
- pangea/asyncio/services/ai_guard.py +31 -97
- pangea/asyncio/services/audit.py +1 -301
- pangea/asyncio/services/authn.py +25 -8
- pangea/asyncio/services/base.py +21 -6
- pangea/asyncio/services/file_scan.py +1 -1
- pangea/asyncio/services/intel.py +160 -95
- pangea/asyncio/services/prompt_guard.py +7 -113
- pangea/asyncio/services/redact.py +4 -265
- pangea/config.py +4 -2
- pangea/file_uploader.py +4 -1
- pangea/request.py +91 -166
- pangea/response.py +5 -1
- pangea/services/__init__.py +19 -3
- pangea/services/ai_guard.py +180 -597
- pangea/services/audit/audit.py +3 -301
- pangea/services/audit/models.py +1 -273
- pangea/services/audit/util.py +2 -0
- pangea/services/authn/authn.py +4 -5
- pangea/services/base.py +3 -0
- pangea/services/file_scan.py +3 -2
- pangea/services/intel.py +187 -252
- pangea/services/prompt_guard.py +11 -197
- pangea/services/redact.py +7 -473
- pangea/services/vault/vault.py +3 -0
- {pangea_sdk-6.5.0b1.dist-info → pangea_sdk-6.7.0.dist-info}/METADATA +17 -18
- pangea_sdk-6.7.0.dist-info/RECORD +62 -0
- pangea_sdk-6.7.0.dist-info/WHEEL +4 -0
- pangea/asyncio/services/management.py +0 -576
- pangea/services/management.py +0 -720
- pangea_sdk-6.5.0b1.dist-info/RECORD +0 -62
- pangea_sdk-6.5.0b1.dist-info/WHEEL +0 -4
pangea/services/ai_guard.py
CHANGED
@@ -1,13 +1,13 @@
|
|
1
1
|
from __future__ import annotations
|
2
2
|
|
3
3
|
from collections.abc import Mapping, Sequence
|
4
|
-
from typing import Annotated, Any, Generic, Literal, Optional,
|
4
|
+
from typing import Annotated, Any, Generic, Literal, Optional, overload
|
5
5
|
|
6
6
|
from pydantic import BaseModel, ConfigDict, Field, RootModel
|
7
|
-
from typing_extensions import TypeAlias, TypedDict, TypeVar
|
8
7
|
|
8
|
+
from pangea._typing import T
|
9
9
|
from pangea.config import PangeaConfig
|
10
|
-
from pangea.response import APIRequestModel, APIResponseModel,
|
10
|
+
from pangea.response import APIRequestModel, APIResponseModel, PangeaResponse, PangeaResponseResult
|
11
11
|
from pangea.services.base import ServiceBase
|
12
12
|
|
13
13
|
# This is named "prompt injection" in the API spec even though it is also used
|
@@ -22,10 +22,15 @@ PiiEntityAction = Literal["disabled", "report", "block", "mask", "partial_maskin
|
|
22
22
|
|
23
23
|
|
24
24
|
class Message(APIRequestModel):
|
25
|
-
role: str
|
25
|
+
role: Optional[str] = None
|
26
26
|
content: str
|
27
27
|
|
28
28
|
|
29
|
+
class McpToolsMessage(APIRequestModel):
|
30
|
+
role: Literal["tools"]
|
31
|
+
content: list[dict[str, Any]]
|
32
|
+
|
33
|
+
|
29
34
|
class CodeDetectionOverride(APIRequestModel):
|
30
35
|
disabled: Optional[bool] = None
|
31
36
|
action: Optional[Literal["report", "block"]] = None
|
@@ -277,29 +282,31 @@ class CodeDetectionResult(APIResponseModel):
|
|
277
282
|
"""The action taken by this Detector"""
|
278
283
|
|
279
284
|
|
280
|
-
|
281
|
-
|
282
|
-
|
283
|
-
class TextGuardDetector(APIResponseModel, Generic[_T]):
|
285
|
+
class GuardDetector(APIResponseModel, Generic[T]):
|
284
286
|
detected: Optional[bool] = None
|
285
|
-
data: Optional[
|
287
|
+
data: Optional[T] = None
|
286
288
|
|
287
289
|
|
288
290
|
class TextGuardDetectors(APIResponseModel):
|
289
|
-
code_detection: Optional[
|
290
|
-
competitors: Optional[
|
291
|
-
custom_entity: Optional[
|
292
|
-
gibberish: Optional[
|
293
|
-
hardening: Optional[
|
294
|
-
language_detection: Optional[
|
295
|
-
malicious_entity: Optional[
|
296
|
-
pii_entity: Optional[
|
297
|
-
profanity_and_toxicity: Optional[
|
298
|
-
prompt_injection: Optional[
|
299
|
-
secrets_detection: Optional[
|
300
|
-
selfharm: Optional[
|
301
|
-
sentiment: Optional[
|
302
|
-
topic: Optional[
|
291
|
+
code_detection: Optional[GuardDetector[CodeDetectionResult]] = None
|
292
|
+
competitors: Optional[GuardDetector[object]] = None
|
293
|
+
custom_entity: Optional[GuardDetector[object]] = None
|
294
|
+
gibberish: Optional[GuardDetector[object]] = None
|
295
|
+
hardening: Optional[GuardDetector[object]] = None
|
296
|
+
language_detection: Optional[GuardDetector[LanguageDetectionResult]] = None
|
297
|
+
malicious_entity: Optional[GuardDetector[MaliciousEntityResult]] = None
|
298
|
+
pii_entity: Optional[GuardDetector[PiiEntityResult]] = None
|
299
|
+
profanity_and_toxicity: Optional[GuardDetector[object]] = None
|
300
|
+
prompt_injection: Optional[GuardDetector[PromptInjectionResult]] = None
|
301
|
+
secrets_detection: Optional[GuardDetector[SecretsEntityResult]] = None
|
302
|
+
selfharm: Optional[GuardDetector[object]] = None
|
303
|
+
sentiment: Optional[GuardDetector[object]] = None
|
304
|
+
topic: Optional[GuardDetector[TopicDetectionResult]] = None
|
305
|
+
|
306
|
+
|
307
|
+
class PromptMessage(APIResponseModel):
|
308
|
+
role: str
|
309
|
+
content: str
|
303
310
|
|
304
311
|
|
305
312
|
class TextGuardResult(PangeaResponseResult):
|
@@ -318,7 +325,7 @@ class TextGuardResult(PangeaResponseResult):
|
|
318
325
|
unredact.
|
319
326
|
"""
|
320
327
|
|
321
|
-
prompt_messages: Optional[
|
328
|
+
prompt_messages: Optional[list[PromptMessage]] = None
|
322
329
|
"""Updated structured prompt, if applicable."""
|
323
330
|
|
324
331
|
prompt_text: Optional[str] = None
|
@@ -331,537 +338,119 @@ class TextGuardResult(PangeaResponseResult):
|
|
331
338
|
"""Whether or not the original input was transformed."""
|
332
339
|
|
333
340
|
|
334
|
-
class
|
335
|
-
|
336
|
-
|
337
|
-
confidential_and_pii_entity: Optional[object] = None
|
338
|
-
custom_entity: Optional[object] = None
|
339
|
-
language: Optional[object] = None
|
340
|
-
malicious_entity: Optional[object] = None
|
341
|
-
malicious_prompt: Optional[object] = None
|
342
|
-
prompt_hardening: Optional[object] = None
|
343
|
-
secret_and_key_entity: Optional[object] = None
|
344
|
-
topic: Optional[object] = None
|
345
|
-
|
346
|
-
|
347
|
-
class GuardResult(PangeaResponseResult):
|
348
|
-
detectors: GuardDetectors
|
349
|
-
"""Result of the recipe analyzing and input prompt."""
|
350
|
-
|
351
|
-
access_rules: Optional[object] = None
|
352
|
-
"""Result of the recipe evaluating configured rules"""
|
353
|
-
|
354
|
-
blocked: Optional[bool] = None
|
355
|
-
"""Whether or not the prompt triggered a block detection."""
|
356
|
-
|
357
|
-
fpe_context: Optional[str] = None
|
358
|
-
"""
|
359
|
-
If an FPE redaction method returned results, this will be the context passed
|
360
|
-
to unredact.
|
361
|
-
"""
|
362
|
-
|
363
|
-
input_token_count: Optional[float] = None
|
364
|
-
"""Number of tokens counted in the input"""
|
365
|
-
|
366
|
-
output: Optional[object] = None
|
367
|
-
"""Updated structured prompt."""
|
368
|
-
|
369
|
-
output_token_count: Optional[float] = None
|
370
|
-
"""Number of tokens counted in the output"""
|
371
|
-
|
372
|
-
recipe: Optional[str] = None
|
373
|
-
"""The Recipe that was used."""
|
374
|
-
|
375
|
-
transformed: Optional[bool] = None
|
376
|
-
"""Whether or not the original input was transformed."""
|
377
|
-
|
378
|
-
|
379
|
-
class Areas(BaseModel):
|
380
|
-
model_config = ConfigDict(extra="forbid")
|
381
|
-
|
382
|
-
text_guard: bool
|
383
|
-
|
384
|
-
|
385
|
-
class AuditDataActivityConfig(BaseModel):
|
386
|
-
model_config = ConfigDict(extra="forbid")
|
387
|
-
|
388
|
-
enabled: bool
|
389
|
-
audit_service_config_id: str
|
390
|
-
areas: Areas
|
391
|
-
|
392
|
-
|
393
|
-
class PromptGuard(BaseModel):
|
394
|
-
model_config = ConfigDict(extra="forbid")
|
395
|
-
|
396
|
-
enabled: Optional[bool] = None
|
397
|
-
config_id: Optional[str] = None
|
398
|
-
confidence_threshold: Optional[float] = None
|
399
|
-
|
341
|
+
class Tool(RootModel[str]):
|
342
|
+
root: Annotated[str, Field(min_length=1)]
|
343
|
+
"""Tool name"""
|
400
344
|
|
401
|
-
class IpIntel(BaseModel):
|
402
|
-
model_config = ConfigDict(extra="forbid")
|
403
345
|
|
404
|
-
|
405
|
-
|
406
|
-
|
407
|
-
risk_threshold: Optional[float] = None
|
346
|
+
class McpTool(APIRequestModel):
|
347
|
+
server_name: Annotated[str, Field(min_length=1)]
|
348
|
+
"""MCP server name"""
|
408
349
|
|
350
|
+
tools: Annotated[list[Tool], Field(min_length=1)]
|
409
351
|
|
410
|
-
class UserIntel(BaseModel):
|
411
|
-
model_config = ConfigDict(extra="forbid")
|
412
352
|
|
413
|
-
|
414
|
-
|
415
|
-
breach_provider: Optional[str] = None
|
416
|
-
|
417
|
-
|
418
|
-
class UrlIntel(BaseModel):
|
419
|
-
model_config = ConfigDict(extra="forbid")
|
420
|
-
|
421
|
-
enabled: Optional[bool] = None
|
422
|
-
config_id: Optional[str] = None
|
423
|
-
reputation_provider: Optional[str] = None
|
424
|
-
risk_threshold: Optional[float] = None
|
425
|
-
|
426
|
-
|
427
|
-
class DomainIntel(BaseModel):
|
428
|
-
model_config = ConfigDict(extra="forbid")
|
429
|
-
|
430
|
-
enabled: Optional[bool] = None
|
431
|
-
config_id: Optional[str] = None
|
432
|
-
reputation_provider: Optional[str] = None
|
433
|
-
risk_threshold: Optional[float] = None
|
434
|
-
|
435
|
-
|
436
|
-
class FileScan(BaseModel):
|
437
|
-
model_config = ConfigDict(extra="forbid")
|
438
|
-
|
439
|
-
enabled: Optional[bool] = None
|
440
|
-
config_id: Optional[str] = None
|
441
|
-
scan_provider: Optional[str] = None
|
442
|
-
risk_threshold: Optional[float] = None
|
443
|
-
|
444
|
-
|
445
|
-
class Redact(BaseModel):
|
446
|
-
model_config = ConfigDict(extra="forbid")
|
447
|
-
|
448
|
-
enabled: Optional[bool] = None
|
449
|
-
config_id: Optional[str] = None
|
450
|
-
|
451
|
-
|
452
|
-
class Vault(BaseModel):
|
453
|
-
model_config = ConfigDict(extra="forbid")
|
454
|
-
|
455
|
-
config_id: Optional[str] = None
|
456
|
-
|
457
|
-
|
458
|
-
class Lingua(BaseModel):
|
459
|
-
model_config = ConfigDict(extra="forbid")
|
353
|
+
class ExtraInfo(BaseModel):
|
354
|
+
"""(AIDR) Logging schema."""
|
460
355
|
|
461
|
-
|
356
|
+
# Additional properties are allowed here.
|
357
|
+
model_config = ConfigDict(extra="allow")
|
462
358
|
|
359
|
+
app_name: Optional[str] = None
|
360
|
+
"""Name of source application/agent."""
|
463
361
|
|
464
|
-
|
465
|
-
|
362
|
+
app_group: Optional[str] = None
|
363
|
+
"""The group of source application/agent."""
|
466
364
|
|
467
|
-
|
365
|
+
app_version: Optional[str] = None
|
366
|
+
"""Version of the source application/agent."""
|
468
367
|
|
368
|
+
actor_name: Optional[str] = None
|
369
|
+
"""Name of subject actor/service account."""
|
469
370
|
|
470
|
-
|
471
|
-
|
371
|
+
actor_group: Optional[str] = None
|
372
|
+
"""The group of subject actor."""
|
472
373
|
|
473
|
-
|
474
|
-
|
475
|
-
user_intel: Optional[UserIntel] = None
|
476
|
-
url_intel: Optional[UrlIntel] = None
|
477
|
-
domain_intel: Optional[DomainIntel] = None
|
478
|
-
file_scan: Optional[FileScan] = None
|
479
|
-
redact: Optional[Redact] = None
|
480
|
-
vault: Optional[Vault] = None
|
481
|
-
lingua: Optional[Lingua] = None
|
482
|
-
code: Optional[Code] = None
|
374
|
+
source_region: Optional[str] = None
|
375
|
+
"""Geographic region or data center."""
|
483
376
|
|
377
|
+
sub_tenant: Optional[str] = None
|
378
|
+
"""Sub tenant of the user or organization"""
|
379
|
+
mcp_tools: Optional[Sequence[McpTool]] = None
|
484
380
|
|
485
|
-
|
486
|
-
masking_type: Optional[Literal["unmask", "mask"]] = "unmask"
|
487
|
-
unmasked_from_left: Annotated[Optional[int], Field(ge=0)] = None
|
488
|
-
unmasked_from_right: Annotated[Optional[int], Field(ge=0)] = None
|
489
|
-
masked_from_left: Annotated[Optional[int], Field(ge=0)] = None
|
490
|
-
masked_from_right: Annotated[Optional[int], Field(ge=0)] = None
|
491
|
-
chars_to_ignore: Optional[list[CharsToIgnoreItem]] = None
|
492
|
-
masking_char: Annotated[Optional[str], Field(max_length=1, min_length=1)] = "*"
|
381
|
+
"""Each item groups tools for a given MCP server."""
|
493
382
|
|
494
383
|
|
495
|
-
class
|
496
|
-
redaction_type: Literal[
|
497
|
-
"mask",
|
498
|
-
"partial_masking",
|
499
|
-
"replacement",
|
500
|
-
"hash",
|
501
|
-
"detect_only",
|
502
|
-
"fpe",
|
503
|
-
"mask",
|
504
|
-
"detect_only",
|
505
|
-
]
|
506
|
-
"""Redaction method to apply for this rule"""
|
507
|
-
redaction_value: Optional[str] = None
|
508
|
-
partial_masking: Optional[PartialMasking] = None
|
509
|
-
hash: Optional[Hash] = None
|
510
|
-
fpe_alphabet: Optional[
|
511
|
-
Literal[
|
512
|
-
"numeric",
|
513
|
-
"alphalower",
|
514
|
-
"alphaupper",
|
515
|
-
"alpha",
|
516
|
-
"alphanumericlower",
|
517
|
-
"alphanumericupper",
|
518
|
-
"alphanumeric",
|
519
|
-
]
|
520
|
-
] = None
|
521
|
-
|
522
|
-
|
523
|
-
class PartialMasking1(BaseModel):
|
524
|
-
masking_type: Optional[Literal["unmask", "mask"]] = "unmask"
|
525
|
-
unmasked_from_left: Annotated[Optional[int], Field(ge=0)] = None
|
526
|
-
unmasked_from_right: Annotated[Optional[int], Field(ge=0)] = None
|
527
|
-
masked_from_left: Annotated[Optional[int], Field(ge=0)] = None
|
528
|
-
masked_from_right: Annotated[Optional[int], Field(ge=0)] = None
|
529
|
-
chars_to_ignore: Optional[list[CharsToIgnoreItem]] = None
|
530
|
-
masking_char: Annotated[Optional[str], Field(max_length=1, min_length=1)] = "*"
|
531
|
-
|
532
|
-
|
533
|
-
class RuleRedactionConfig2(BaseModel):
|
534
|
-
model_config = ConfigDict(extra="forbid")
|
535
|
-
|
536
|
-
redaction_type: Literal["replacement"]
|
537
|
-
redaction_value: str
|
538
|
-
partial_masking: Optional[PartialMasking1] = None
|
539
|
-
hash: Optional[Hash] = None
|
540
|
-
fpe_alphabet: Optional[
|
541
|
-
Literal[
|
542
|
-
"numeric",
|
543
|
-
"alphalower",
|
544
|
-
"alphaupper",
|
545
|
-
"alpha",
|
546
|
-
"alphanumericlower",
|
547
|
-
"alphanumericupper",
|
548
|
-
"alphanumeric",
|
549
|
-
]
|
550
|
-
] = None
|
551
|
-
|
552
|
-
|
553
|
-
class PartialMasking2(BaseModel):
|
554
|
-
masking_type: Optional[Literal["unmask", "mask"]] = "unmask"
|
555
|
-
unmasked_from_left: Annotated[Optional[int], Field(ge=0)] = None
|
556
|
-
unmasked_from_right: Annotated[Optional[int], Field(ge=0)] = None
|
557
|
-
masked_from_left: Annotated[Optional[int], Field(ge=0)] = None
|
558
|
-
masked_from_right: Annotated[Optional[int], Field(ge=0)] = None
|
559
|
-
chars_to_ignore: Optional[list[CharsToIgnoreItem]] = None
|
560
|
-
masking_char: Annotated[Optional[str], Field(max_length=1, min_length=1)] = "*"
|
561
|
-
|
562
|
-
|
563
|
-
class RuleRedactionConfig3(BaseModel):
|
564
|
-
model_config = ConfigDict(extra="forbid")
|
565
|
-
|
566
|
-
redaction_type: Literal["partial_masking"]
|
567
|
-
redaction_value: str
|
568
|
-
partial_masking: PartialMasking2
|
569
|
-
hash: Optional[Hash] = None
|
570
|
-
fpe_alphabet: Optional[
|
571
|
-
Literal[
|
572
|
-
"numeric",
|
573
|
-
"alphalower",
|
574
|
-
"alphaupper",
|
575
|
-
"alpha",
|
576
|
-
"alphanumericlower",
|
577
|
-
"alphanumericupper",
|
578
|
-
"alphanumeric",
|
579
|
-
]
|
580
|
-
] = None
|
581
|
-
|
582
|
-
|
583
|
-
class PartialMasking3(BaseModel):
|
584
|
-
masking_type: Optional[Literal["unmask", "mask"]] = "unmask"
|
585
|
-
unmasked_from_left: Annotated[Optional[int], Field(ge=0)] = None
|
586
|
-
unmasked_from_right: Annotated[Optional[int], Field(ge=0)] = None
|
587
|
-
masked_from_left: Annotated[Optional[int], Field(ge=0)] = None
|
588
|
-
masked_from_right: Annotated[Optional[int], Field(ge=0)] = None
|
589
|
-
chars_to_ignore: Optional[list[CharsToIgnoreItem]] = None
|
590
|
-
masking_char: Annotated[Optional[str], Field(max_length=1, min_length=1)] = "*"
|
591
|
-
|
592
|
-
|
593
|
-
class RuleRedactionConfig4(BaseModel):
|
594
|
-
model_config = ConfigDict(extra="forbid")
|
595
|
-
|
596
|
-
redaction_type: Literal["hash"]
|
597
|
-
redaction_value: str
|
598
|
-
partial_masking: PartialMasking3
|
599
|
-
hash: Optional[Hash] = None
|
600
|
-
fpe_alphabet: Optional[
|
601
|
-
Literal[
|
602
|
-
"numeric",
|
603
|
-
"alphalower",
|
604
|
-
"alphaupper",
|
605
|
-
"alpha",
|
606
|
-
"alphanumericlower",
|
607
|
-
"alphanumericupper",
|
608
|
-
"alphanumeric",
|
609
|
-
]
|
610
|
-
] = None
|
611
|
-
|
612
|
-
|
613
|
-
class CharsToIgnoreItem(RootModel[str]):
|
614
|
-
root: Annotated[str, Field(max_length=1, min_length=1)]
|
615
|
-
|
616
|
-
|
617
|
-
class PartialMasking4(BaseModel):
|
618
|
-
masking_type: Optional[Literal["unmask", "mask"]] = "unmask"
|
619
|
-
unmasked_from_left: Annotated[Optional[int], Field(ge=0)] = None
|
620
|
-
unmasked_from_right: Annotated[Optional[int], Field(ge=0)] = None
|
621
|
-
masked_from_left: Annotated[Optional[int], Field(ge=0)] = None
|
622
|
-
masked_from_right: Annotated[Optional[int], Field(ge=0)] = None
|
623
|
-
chars_to_ignore: Optional[list[CharsToIgnoreItem]] = None
|
624
|
-
masking_char: Annotated[Optional[str], Field(max_length=1, min_length=1)] = "*"
|
625
|
-
|
626
|
-
|
627
|
-
class Hash(BaseModel):
|
628
|
-
hash_type: Literal["md5", "sha256"]
|
629
|
-
"""The type of hashing algorithm"""
|
630
|
-
|
631
|
-
|
632
|
-
class RuleRedactionConfig5(BaseModel):
|
633
|
-
model_config = ConfigDict(extra="forbid")
|
634
|
-
|
635
|
-
redaction_type: Literal["fpe"]
|
636
|
-
redaction_value: str
|
637
|
-
partial_masking: PartialMasking4
|
638
|
-
hash: Optional[Hash] = None
|
639
|
-
fpe_alphabet: Optional[
|
640
|
-
Literal[
|
641
|
-
"numeric",
|
642
|
-
"alphalower",
|
643
|
-
"alphaupper",
|
644
|
-
"alpha",
|
645
|
-
"alphanumericlower",
|
646
|
-
"alphanumericupper",
|
647
|
-
"alphanumeric",
|
648
|
-
]
|
649
|
-
] = None
|
650
|
-
|
651
|
-
|
652
|
-
class Rule(APIResponseModel):
|
653
|
-
redact_rule_id: str
|
384
|
+
class AccessRuleResult(APIResponseModel):
|
654
385
|
"""
|
655
|
-
|
656
|
-
|
386
|
+
Details about the evaluation of a single rule, including whether it matched,
|
387
|
+
the action to take, the rule name, and optional debugging information.
|
657
388
|
"""
|
658
|
-
redaction: Union[
|
659
|
-
RuleRedactionConfig1,
|
660
|
-
RuleRedactionConfig2,
|
661
|
-
RuleRedactionConfig3,
|
662
|
-
RuleRedactionConfig4,
|
663
|
-
RuleRedactionConfig5,
|
664
|
-
]
|
665
|
-
"""
|
666
|
-
Configuration for the redaction method applied to detected values.
|
667
389
|
|
668
|
-
|
669
|
-
|
670
|
-
Additional parameters may be required depending on the selected redaction
|
671
|
-
type.
|
390
|
+
matched: bool
|
391
|
+
"""Whether this rule's logic evaluated to true for the input."""
|
672
392
|
|
673
|
-
|
674
|
-
documentation.
|
675
|
-
"""
|
676
|
-
block: Optional[bool] = None
|
677
|
-
"""
|
678
|
-
If `true`, indicates that further processing should be stopped when this
|
679
|
-
rule is triggered
|
680
|
-
"""
|
681
|
-
disabled: Optional[bool] = None
|
682
|
-
"""
|
683
|
-
If `true`, disables this specific rule even if the detector is enabled
|
684
|
-
"""
|
685
|
-
reputation_check: Optional[bool] = None
|
686
|
-
"""
|
687
|
-
If `true`, performs a reputation check using the configured intel provider.
|
688
|
-
Applies to the Malicious Entity detector when using IP, URL, or Domain Intel
|
689
|
-
services.
|
690
|
-
"""
|
691
|
-
transform_if_malicious: Optional[bool] = None
|
393
|
+
action: str
|
692
394
|
"""
|
693
|
-
|
694
|
-
|
395
|
+
The action resulting from the rule evaluation. One of 'allowed', 'blocked',
|
396
|
+
or 'reported'.
|
695
397
|
"""
|
696
398
|
|
399
|
+
name: str
|
400
|
+
"""A human-readable name for the rule."""
|
697
401
|
|
698
|
-
|
699
|
-
|
700
|
-
|
701
|
-
|
702
|
-
class DetectorSetting(BaseModel):
|
703
|
-
model_config = ConfigDict(extra="forbid")
|
704
|
-
|
705
|
-
detector_name: str
|
706
|
-
state: Literal["disabled", "enabled"]
|
707
|
-
settings: Settings
|
402
|
+
logic: Optional[dict[str, Any]] = None
|
403
|
+
"""The JSON logic expression evaluated for this rule."""
|
708
404
|
|
405
|
+
attributes: Optional[dict[str, Any]] = None
|
406
|
+
"""The input attribute values that were available during rule evaluation."""
|
709
407
|
|
710
|
-
class RedactConnectorSettings(BaseModel):
|
711
|
-
fpe_tweak_vault_secret_id: Optional[str] = None
|
712
408
|
|
409
|
+
class GuardDetectors(APIResponseModel):
|
410
|
+
"""Result of the recipe analyzing and input prompt."""
|
713
411
|
|
714
|
-
|
715
|
-
|
412
|
+
code: Optional[GuardDetector[CodeDetectionResult]] = None
|
413
|
+
competitors: Optional[GuardDetector[object]] = None
|
414
|
+
confidential_and_pii_entity: Optional[GuardDetector[PiiEntityResult]] = None
|
415
|
+
custom_entity: Optional[GuardDetector[object]] = None
|
416
|
+
language: Optional[GuardDetector[LanguageDetectionResult]] = None
|
417
|
+
malicious_entity: Optional[GuardDetector[MaliciousEntityResult]] = None
|
418
|
+
malicious_prompt: Optional[GuardDetector[PromptInjectionResult]] = None
|
419
|
+
prompt_hardening: Optional[GuardDetector[object]] = None
|
420
|
+
secret_and_key_entity: Optional[GuardDetector[SecretsEntityResult]] = None
|
421
|
+
topic: Optional[GuardDetector[TopicDetectionResult]] = None
|
716
422
|
|
717
|
-
redact: Optional[RedactConnectorSettings] = None
|
718
423
|
|
424
|
+
class GuardResult(PangeaResponseResult):
|
425
|
+
output: Optional[dict[str, Any]] = None
|
426
|
+
"""Updated structured prompt."""
|
719
427
|
|
720
|
-
|
721
|
-
"""
|
722
|
-
Configuration for an individual access rule used in an AI Guard recipe. Each
|
723
|
-
rule defines its matching logic and the action to apply when the logic
|
724
|
-
evaluates to true.
|
725
|
-
"""
|
428
|
+
blocked: Optional[bool] = None
|
429
|
+
"""Whether or not the prompt triggered a block detection."""
|
726
430
|
|
727
|
-
|
728
|
-
"""
|
729
|
-
Unique identifier for this rule. Should be user-readable and consistent
|
730
|
-
across recipe updates.
|
731
|
-
"""
|
732
|
-
name: str
|
733
|
-
"""Display label for the rule shown in user interfaces."""
|
734
|
-
state: Literal["block", "report"]
|
735
|
-
"""
|
736
|
-
Action to apply if the rule matches. Use 'block' to stop further processing
|
737
|
-
or 'report' to simply log the match.
|
738
|
-
"""
|
431
|
+
transformed: Optional[bool] = None
|
432
|
+
"""Whether or not the original input was transformed."""
|
739
433
|
|
434
|
+
recipe: Optional[str] = None
|
435
|
+
"""The Recipe that was used."""
|
740
436
|
|
741
|
-
|
742
|
-
|
743
|
-
"""Human-readable name of the recipe"""
|
744
|
-
description: str
|
745
|
-
"""Detailed description of the recipe's purpose or use case"""
|
746
|
-
version: Optional[str] = "v1"
|
747
|
-
"""Optional version identifier for the recipe. Can be used to track changes."""
|
748
|
-
detectors: Optional[list[DetectorSetting]] = None
|
749
|
-
"""Setting for Detectors"""
|
750
|
-
access_rules: Optional[list[AccessRuleSettings]] = None
|
751
|
-
"""Configuration for access rules used in an AI Guard recipe."""
|
752
|
-
connector_settings: Optional[ConnectorSettings] = None
|
753
|
-
|
754
|
-
|
755
|
-
class ServiceConfig(PangeaResponseResult):
|
756
|
-
id: Optional[str] = None
|
757
|
-
"""ID of an AI Guard service configuration"""
|
758
|
-
name: Optional[str] = None
|
759
|
-
"""Human-readable name of the AI Guard service configuration"""
|
760
|
-
audit_data_activity: Optional[AuditDataActivityConfig] = None
|
761
|
-
connections: Optional[ConnectionsConfig] = None
|
762
|
-
recipes: Optional[dict[str, RecipeConfig]] = None
|
763
|
-
|
764
|
-
|
765
|
-
class ServiceConfigFilter(BaseModel):
|
766
|
-
model_config = ConfigDict(extra="forbid")
|
767
|
-
|
768
|
-
id: Optional[str] = None
|
769
|
-
"""
|
770
|
-
Only records where id equals this value.
|
771
|
-
"""
|
772
|
-
id__contains: Optional[list[str]] = None
|
773
|
-
"""
|
774
|
-
Only records where id includes each substring.
|
775
|
-
"""
|
776
|
-
id__in: Optional[list[str]] = None
|
777
|
-
"""
|
778
|
-
Only records where id equals one of the provided substrings.
|
779
|
-
"""
|
780
|
-
created_at: Optional[PangeaDateTime] = None
|
781
|
-
"""
|
782
|
-
Only records where created_at equals this value.
|
783
|
-
"""
|
784
|
-
created_at__gt: Optional[PangeaDateTime] = None
|
785
|
-
"""
|
786
|
-
Only records where created_at is greater than this value.
|
787
|
-
"""
|
788
|
-
created_at__gte: Optional[PangeaDateTime] = None
|
789
|
-
"""
|
790
|
-
Only records where created_at is greater than or equal to this value.
|
791
|
-
"""
|
792
|
-
created_at__lt: Optional[PangeaDateTime] = None
|
793
|
-
"""
|
794
|
-
Only records where created_at is less than this value.
|
795
|
-
"""
|
796
|
-
created_at__lte: Optional[PangeaDateTime] = None
|
797
|
-
"""
|
798
|
-
Only records where created_at is less than or equal to this value.
|
799
|
-
"""
|
800
|
-
updated_at: Optional[PangeaDateTime] = None
|
801
|
-
"""
|
802
|
-
Only records where updated_at equals this value.
|
803
|
-
"""
|
804
|
-
updated_at__gt: Optional[PangeaDateTime] = None
|
805
|
-
"""
|
806
|
-
Only records where updated_at is greater than this value.
|
807
|
-
"""
|
808
|
-
updated_at__gte: Optional[PangeaDateTime] = None
|
809
|
-
"""
|
810
|
-
Only records where updated_at is greater than or equal to this value.
|
811
|
-
"""
|
812
|
-
updated_at__lt: Optional[PangeaDateTime] = None
|
813
|
-
"""
|
814
|
-
Only records where updated_at is less than this value.
|
815
|
-
"""
|
816
|
-
updated_at__lte: Optional[PangeaDateTime] = None
|
817
|
-
"""
|
818
|
-
Only records where updated_at is less than or equal to this value.
|
819
|
-
"""
|
437
|
+
detectors: GuardDetectors
|
438
|
+
"""Result of the recipe analyzing and input prompt."""
|
820
439
|
|
440
|
+
access_rules: Optional[dict[str, AccessRuleResult]] = None
|
441
|
+
"""Result of the recipe evaluating configured rules"""
|
821
442
|
|
822
|
-
|
823
|
-
count: Optional[int] = None
|
824
|
-
"""The total number of service configs matched by the list request."""
|
825
|
-
last: Optional[str] = None
|
443
|
+
fpe_context: Optional[str] = None
|
826
444
|
"""
|
827
|
-
|
828
|
-
|
445
|
+
If an FPE redaction method returned results, this will be the context passed
|
446
|
+
to unredact.
|
829
447
|
"""
|
830
|
-
items: Optional[list[ServiceConfig]] = None
|
831
|
-
|
832
|
-
|
833
|
-
class ExtraInfoTyped(TypedDict, total=False):
|
834
|
-
"""(AIDR) Logging schema."""
|
835
|
-
|
836
|
-
app_name: str
|
837
|
-
"""Name of source application."""
|
838
|
-
|
839
|
-
app_group: str
|
840
|
-
"""The group of source application."""
|
841
|
-
|
842
|
-
app_version: str
|
843
|
-
"""Version of the source application."""
|
844
|
-
|
845
|
-
actor_name: str
|
846
|
-
"""Name of subject actor."""
|
847
|
-
|
848
|
-
actor_group: str
|
849
|
-
"""The group of subject actor."""
|
850
|
-
|
851
|
-
source_region: str
|
852
|
-
"""Geographic region or data center."""
|
853
|
-
|
854
|
-
data_sensitivity: str
|
855
|
-
"""Sensitivity level of data involved"""
|
856
|
-
|
857
|
-
customer_tier: str
|
858
|
-
"""Tier of the user or organization"""
|
859
|
-
|
860
|
-
use_case: str
|
861
|
-
"""Business-specific use case"""
|
862
448
|
|
449
|
+
input_token_count: Optional[float] = None
|
450
|
+
"""Number of tokens counted in the input"""
|
863
451
|
|
864
|
-
|
452
|
+
output_token_count: Optional[float] = None
|
453
|
+
"""Number of tokens counted in the output"""
|
865
454
|
|
866
455
|
|
867
456
|
class AIGuard(ServiceBase):
|
@@ -938,11 +527,12 @@ class AIGuard(ServiceBase):
|
|
938
527
|
def guard_text(
|
939
528
|
self,
|
940
529
|
*,
|
941
|
-
messages: Sequence[Message],
|
530
|
+
messages: Sequence[Message | McpToolsMessage],
|
942
531
|
recipe: str | None = None,
|
943
532
|
debug: bool | None = None,
|
944
533
|
overrides: Overrides | None = None,
|
945
534
|
log_fields: LogFields | None = None,
|
535
|
+
only_relevant_content: bool = False,
|
946
536
|
) -> PangeaResponse[TextGuardResult]:
|
947
537
|
"""
|
948
538
|
Guard LLM input and output text
|
@@ -965,6 +555,8 @@ class AIGuard(ServiceBase):
|
|
965
555
|
recipe: Recipe key of a configuration of data types and settings
|
966
556
|
defined in the Pangea User Console. It specifies the rules that
|
967
557
|
are to be applied to the text, such as defang malicious URLs.
|
558
|
+
only_relevant_content: Whether or not to only send relevant content
|
559
|
+
to AI Guard.
|
968
560
|
|
969
561
|
Examples:
|
970
562
|
response = ai_guard.guard_text(messages=[Message(role="user", content="hello world")])
|
@@ -974,11 +566,12 @@ class AIGuard(ServiceBase):
|
|
974
566
|
self,
|
975
567
|
text: str | None = None,
|
976
568
|
*,
|
977
|
-
messages: Sequence[Message] | None = None,
|
569
|
+
messages: Sequence[Message | McpToolsMessage] | None = None,
|
978
570
|
debug: bool | None = None,
|
979
571
|
log_fields: LogFields | None = None,
|
980
572
|
overrides: Overrides | None = None,
|
981
573
|
recipe: str | None = None,
|
574
|
+
only_relevant_content: bool = False,
|
982
575
|
) -> PangeaResponse[TextGuardResult]:
|
983
576
|
"""
|
984
577
|
Guard LLM input and output text
|
@@ -1004,6 +597,8 @@ class AIGuard(ServiceBase):
|
|
1004
597
|
recipe: Recipe key of a configuration of data types and settings
|
1005
598
|
defined in the Pangea User Console. It specifies the rules that
|
1006
599
|
are to be applied to the text, such as defang malicious URLs.
|
600
|
+
only_relevant_content: Whether or not to only send relevant content
|
601
|
+
to AI Guard.
|
1007
602
|
|
1008
603
|
Examples:
|
1009
604
|
response = ai_guard.guard_text("text")
|
@@ -1012,7 +607,11 @@ class AIGuard(ServiceBase):
|
|
1012
607
|
if text is not None and messages is not None:
|
1013
608
|
raise ValueError("Exactly one of `text` or `messages` must be given")
|
1014
609
|
|
1015
|
-
|
610
|
+
if only_relevant_content and messages is not None:
|
611
|
+
original_messages = messages
|
612
|
+
messages, original_indices = get_relevant_content(messages)
|
613
|
+
|
614
|
+
response = self.request.post(
|
1016
615
|
"v1/text/guard",
|
1017
616
|
TextGuardResult,
|
1018
617
|
data={
|
@@ -1025,6 +624,13 @@ class AIGuard(ServiceBase):
|
|
1025
624
|
},
|
1026
625
|
)
|
1027
626
|
|
627
|
+
if only_relevant_content and response.result and response.result.prompt_messages:
|
628
|
+
response.result.prompt_messages = patch_messages(
|
629
|
+
original_messages, original_indices, response.result.prompt_messages
|
630
|
+
) # type: ignore[assignment]
|
631
|
+
|
632
|
+
return response
|
633
|
+
|
1028
634
|
def guard(
|
1029
635
|
self,
|
1030
636
|
input: Mapping[str, Any],
|
@@ -1042,8 +648,8 @@ class AIGuard(ServiceBase):
|
|
1042
648
|
source_ip: str | None = None,
|
1043
649
|
source_location: str | None = None,
|
1044
650
|
tenant_id: str | None = None,
|
1045
|
-
event_type: Literal["input", "output"] | None = None,
|
1046
|
-
|
651
|
+
event_type: Literal["input", "output", "tool_input", "tool_output", "tool_listing"] | None = None,
|
652
|
+
collector_instance_id: str | None = None,
|
1047
653
|
extra_info: ExtraInfo | None = None,
|
1048
654
|
count_tokens: bool | None = None,
|
1049
655
|
) -> PangeaResponse[GuardResult]:
|
@@ -1053,7 +659,7 @@ class AIGuard(ServiceBase):
|
|
1053
659
|
Analyze and redact content to avoid manipulation of the model, addition
|
1054
660
|
of malicious content, and other undesirable data transfers.
|
1055
661
|
|
1056
|
-
OperationId:
|
662
|
+
OperationId: ai_guard_post_v1_guard
|
1057
663
|
|
1058
664
|
Args:
|
1059
665
|
input: 'messages' (required) contains Prompt content and role array
|
@@ -1072,12 +678,12 @@ class AIGuard(ServiceBase):
|
|
1072
678
|
source_location: Location of user or app or agent.
|
1073
679
|
tenant_id: For gateway-like integrations with multi-tenant support.
|
1074
680
|
event_type: (AIDR) Event Type.
|
1075
|
-
|
681
|
+
collector_instance_id: (AIDR) collector instance id.
|
1076
682
|
extra_info: (AIDR) Logging schema.
|
1077
683
|
count_tokens: Provide input and output token count.
|
1078
684
|
"""
|
1079
685
|
return self.request.post(
|
1080
|
-
"
|
686
|
+
"v1/guard",
|
1081
687
|
GuardResult,
|
1082
688
|
data={
|
1083
689
|
"input": input,
|
@@ -1095,86 +701,63 @@ class AIGuard(ServiceBase):
|
|
1095
701
|
"source_location": source_location,
|
1096
702
|
"tenant_id": tenant_id,
|
1097
703
|
"event_type": event_type,
|
1098
|
-
"
|
704
|
+
"collector_instance_id": collector_instance_id,
|
1099
705
|
"extra_info": extra_info,
|
1100
706
|
"count_tokens": count_tokens,
|
1101
707
|
},
|
1102
708
|
)
|
1103
709
|
|
1104
|
-
def get_service_config(self, id: str) -> PangeaResponse[ServiceConfig]:
|
1105
|
-
"""
|
1106
|
-
OperationId: ai_guard_post_v1beta_config
|
1107
|
-
"""
|
1108
|
-
return self.request.post("v1beta/config", data={"id": id}, result_class=ServiceConfig)
|
1109
710
|
|
1110
|
-
|
1111
|
-
|
1112
|
-
|
1113
|
-
|
1114
|
-
|
1115
|
-
audit_data_activity: AuditDataActivityConfig | None = None,
|
1116
|
-
connections: ConnectionsConfig | None = None,
|
1117
|
-
recipes: Mapping[str, RecipeConfig] | None = None,
|
1118
|
-
) -> PangeaResponse[ServiceConfig]:
|
1119
|
-
"""
|
1120
|
-
OperationId: ai_guard_post_v1beta_config_create
|
1121
|
-
"""
|
1122
|
-
return self.request.post(
|
1123
|
-
"v1beta/config/create",
|
1124
|
-
data={
|
1125
|
-
"name": name,
|
1126
|
-
"id": id,
|
1127
|
-
"audit_data_activity": audit_data_activity,
|
1128
|
-
"connections": connections,
|
1129
|
-
"recipes": recipes,
|
1130
|
-
},
|
1131
|
-
result_class=ServiceConfig,
|
1132
|
-
)
|
711
|
+
def get_relevant_content(
|
712
|
+
messages: Sequence[Message | McpToolsMessage],
|
713
|
+
) -> tuple[list[Message | McpToolsMessage], list[int]]:
|
714
|
+
"""
|
715
|
+
Returns relevant messages and their indices in the original list.
|
1133
716
|
|
1134
|
-
|
1135
|
-
|
1136
|
-
|
1137
|
-
|
1138
|
-
|
1139
|
-
|
1140
|
-
connections: ConnectionsConfig | None = None,
|
1141
|
-
recipes: Mapping[str, RecipeConfig] | None = None,
|
1142
|
-
) -> PangeaResponse[ServiceConfig]:
|
1143
|
-
"""
|
1144
|
-
OperationId: ai_guard_post_v1beta_config_update
|
1145
|
-
"""
|
1146
|
-
return self.request.post(
|
1147
|
-
"v1beta/config/update",
|
1148
|
-
data={
|
1149
|
-
"id": id,
|
1150
|
-
"name": name,
|
1151
|
-
"audit_data_activity": audit_data_activity,
|
1152
|
-
"connections": connections,
|
1153
|
-
"recipes": recipes,
|
1154
|
-
},
|
1155
|
-
result_class=ServiceConfig,
|
1156
|
-
)
|
717
|
+
1, If last message is "assistant", then the relevant messages are all system
|
718
|
+
messages that come before it, plus that last assistant message.
|
719
|
+
2. Else, find the last "assistant" message. Then the relevant messages are
|
720
|
+
all system messages that come before it, and all messages that come after
|
721
|
+
it.
|
722
|
+
"""
|
1157
723
|
|
1158
|
-
|
1159
|
-
|
1160
|
-
OperationId: ai_guard_post_v1beta_config_delete
|
1161
|
-
"""
|
1162
|
-
return self.request.post("v1beta/config/delete", data={"id": id}, result_class=ServiceConfig)
|
724
|
+
if len(messages) == 0:
|
725
|
+
return [], []
|
1163
726
|
|
1164
|
-
|
1165
|
-
|
1166
|
-
|
1167
|
-
|
1168
|
-
|
1169
|
-
|
1170
|
-
|
1171
|
-
|
1172
|
-
|
1173
|
-
|
1174
|
-
|
1175
|
-
|
1176
|
-
|
1177
|
-
|
1178
|
-
|
1179
|
-
|
1180
|
-
|
727
|
+
system_messages = [msg for msg in messages if msg.role == "system"]
|
728
|
+
system_indices = [i for i, msg in enumerate(messages) if msg.role == "system"]
|
729
|
+
|
730
|
+
# If the last message is assistant, then return all system messages and that
|
731
|
+
# assistant message.
|
732
|
+
if messages[-1].role == "assistant":
|
733
|
+
return system_messages + [messages[-1]], system_indices + [len(messages) - 1]
|
734
|
+
|
735
|
+
# Otherwise, work backwards until we find the last assistant message, then
|
736
|
+
# return all messages after that.
|
737
|
+
last_assistant_index = -1
|
738
|
+
for i in range(len(messages) - 1, -1, -1):
|
739
|
+
if messages[i].role == "assistant":
|
740
|
+
last_assistant_index = i
|
741
|
+
break
|
742
|
+
|
743
|
+
relevant_messages = []
|
744
|
+
indices = []
|
745
|
+
for i, msg in enumerate(messages):
|
746
|
+
if msg.role == "system" or i > last_assistant_index:
|
747
|
+
relevant_messages.append(msg)
|
748
|
+
indices.append(i)
|
749
|
+
|
750
|
+
return relevant_messages, indices
|
751
|
+
|
752
|
+
|
753
|
+
def patch_messages(
|
754
|
+
original: Sequence[Message | McpToolsMessage],
|
755
|
+
original_indices: list[int],
|
756
|
+
transformed: Sequence[PromptMessage],
|
757
|
+
) -> list[Message | McpToolsMessage | PromptMessage]:
|
758
|
+
if len(original) == len(transformed):
|
759
|
+
return list(transformed)
|
760
|
+
|
761
|
+
return [
|
762
|
+
transformed[original_indices.index(i)] if i in original_indices else orig for i, orig in enumerate(original)
|
763
|
+
]
|