pangea-sdk 6.5.0b1__py3-none-any.whl → 6.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,13 +1,13 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  from collections.abc import Mapping, Sequence
4
- from typing import Annotated, Any, Generic, Literal, Optional, Union, overload
4
+ from typing import Annotated, Any, Generic, Literal, Optional, overload
5
5
 
6
6
  from pydantic import BaseModel, ConfigDict, Field, RootModel
7
- from typing_extensions import TypeAlias, TypedDict, TypeVar
8
7
 
8
+ from pangea._typing import T
9
9
  from pangea.config import PangeaConfig
10
- from pangea.response import APIRequestModel, APIResponseModel, PangeaDateTime, PangeaResponse, PangeaResponseResult
10
+ from pangea.response import APIRequestModel, APIResponseModel, PangeaResponse, PangeaResponseResult
11
11
  from pangea.services.base import ServiceBase
12
12
 
13
13
  # This is named "prompt injection" in the API spec even though it is also used
@@ -22,10 +22,15 @@ PiiEntityAction = Literal["disabled", "report", "block", "mask", "partial_maskin
22
22
 
23
23
 
24
24
  class Message(APIRequestModel):
25
- role: str
25
+ role: Optional[str] = None
26
26
  content: str
27
27
 
28
28
 
29
+ class McpToolsMessage(APIRequestModel):
30
+ role: Literal["tools"]
31
+ content: list[dict[str, Any]]
32
+
33
+
29
34
  class CodeDetectionOverride(APIRequestModel):
30
35
  disabled: Optional[bool] = None
31
36
  action: Optional[Literal["report", "block"]] = None
@@ -277,29 +282,31 @@ class CodeDetectionResult(APIResponseModel):
277
282
  """The action taken by this Detector"""
278
283
 
279
284
 
280
- _T = TypeVar("_T")
281
-
282
-
283
- class TextGuardDetector(APIResponseModel, Generic[_T]):
285
+ class GuardDetector(APIResponseModel, Generic[T]):
284
286
  detected: Optional[bool] = None
285
- data: Optional[_T] = None
287
+ data: Optional[T] = None
286
288
 
287
289
 
288
290
  class TextGuardDetectors(APIResponseModel):
289
- code_detection: Optional[TextGuardDetector[CodeDetectionResult]] = None
290
- competitors: Optional[TextGuardDetector[object]] = None
291
- custom_entity: Optional[TextGuardDetector[object]] = None
292
- gibberish: Optional[TextGuardDetector[object]] = None
293
- hardening: Optional[TextGuardDetector[object]] = None
294
- language_detection: Optional[TextGuardDetector[LanguageDetectionResult]] = None
295
- malicious_entity: Optional[TextGuardDetector[MaliciousEntityResult]] = None
296
- pii_entity: Optional[TextGuardDetector[PiiEntityResult]] = None
297
- profanity_and_toxicity: Optional[TextGuardDetector[object]] = None
298
- prompt_injection: Optional[TextGuardDetector[PromptInjectionResult]] = None
299
- secrets_detection: Optional[TextGuardDetector[SecretsEntityResult]] = None
300
- selfharm: Optional[TextGuardDetector[object]] = None
301
- sentiment: Optional[TextGuardDetector[object]] = None
302
- topic: Optional[TextGuardDetector[TopicDetectionResult]] = None
291
+ code_detection: Optional[GuardDetector[CodeDetectionResult]] = None
292
+ competitors: Optional[GuardDetector[object]] = None
293
+ custom_entity: Optional[GuardDetector[object]] = None
294
+ gibberish: Optional[GuardDetector[object]] = None
295
+ hardening: Optional[GuardDetector[object]] = None
296
+ language_detection: Optional[GuardDetector[LanguageDetectionResult]] = None
297
+ malicious_entity: Optional[GuardDetector[MaliciousEntityResult]] = None
298
+ pii_entity: Optional[GuardDetector[PiiEntityResult]] = None
299
+ profanity_and_toxicity: Optional[GuardDetector[object]] = None
300
+ prompt_injection: Optional[GuardDetector[PromptInjectionResult]] = None
301
+ secrets_detection: Optional[GuardDetector[SecretsEntityResult]] = None
302
+ selfharm: Optional[GuardDetector[object]] = None
303
+ sentiment: Optional[GuardDetector[object]] = None
304
+ topic: Optional[GuardDetector[TopicDetectionResult]] = None
305
+
306
+
307
+ class PromptMessage(APIResponseModel):
308
+ role: str
309
+ content: str
303
310
 
304
311
 
305
312
  class TextGuardResult(PangeaResponseResult):
@@ -318,7 +325,7 @@ class TextGuardResult(PangeaResponseResult):
318
325
  unredact.
319
326
  """
320
327
 
321
- prompt_messages: Optional[object] = None
328
+ prompt_messages: Optional[list[PromptMessage]] = None
322
329
  """Updated structured prompt, if applicable."""
323
330
 
324
331
  prompt_text: Optional[str] = None
@@ -331,537 +338,119 @@ class TextGuardResult(PangeaResponseResult):
331
338
  """Whether or not the original input was transformed."""
332
339
 
333
340
 
334
- class GuardDetectors(APIResponseModel):
335
- code: Optional[object] = None
336
- competitors: Optional[object] = None
337
- confidential_and_pii_entity: Optional[object] = None
338
- custom_entity: Optional[object] = None
339
- language: Optional[object] = None
340
- malicious_entity: Optional[object] = None
341
- malicious_prompt: Optional[object] = None
342
- prompt_hardening: Optional[object] = None
343
- secret_and_key_entity: Optional[object] = None
344
- topic: Optional[object] = None
345
-
346
-
347
- class GuardResult(PangeaResponseResult):
348
- detectors: GuardDetectors
349
- """Result of the recipe analyzing and input prompt."""
350
-
351
- access_rules: Optional[object] = None
352
- """Result of the recipe evaluating configured rules"""
353
-
354
- blocked: Optional[bool] = None
355
- """Whether or not the prompt triggered a block detection."""
356
-
357
- fpe_context: Optional[str] = None
358
- """
359
- If an FPE redaction method returned results, this will be the context passed
360
- to unredact.
361
- """
362
-
363
- input_token_count: Optional[float] = None
364
- """Number of tokens counted in the input"""
365
-
366
- output: Optional[object] = None
367
- """Updated structured prompt."""
368
-
369
- output_token_count: Optional[float] = None
370
- """Number of tokens counted in the output"""
371
-
372
- recipe: Optional[str] = None
373
- """The Recipe that was used."""
374
-
375
- transformed: Optional[bool] = None
376
- """Whether or not the original input was transformed."""
377
-
378
-
379
- class Areas(BaseModel):
380
- model_config = ConfigDict(extra="forbid")
381
-
382
- text_guard: bool
383
-
384
-
385
- class AuditDataActivityConfig(BaseModel):
386
- model_config = ConfigDict(extra="forbid")
387
-
388
- enabled: bool
389
- audit_service_config_id: str
390
- areas: Areas
391
-
392
-
393
- class PromptGuard(BaseModel):
394
- model_config = ConfigDict(extra="forbid")
395
-
396
- enabled: Optional[bool] = None
397
- config_id: Optional[str] = None
398
- confidence_threshold: Optional[float] = None
399
-
341
+ class Tool(RootModel[str]):
342
+ root: Annotated[str, Field(min_length=1)]
343
+ """Tool name"""
400
344
 
401
- class IpIntel(BaseModel):
402
- model_config = ConfigDict(extra="forbid")
403
345
 
404
- enabled: Optional[bool] = None
405
- config_id: Optional[str] = None
406
- reputation_provider: Optional[str] = None
407
- risk_threshold: Optional[float] = None
346
+ class McpTool(APIRequestModel):
347
+ server_name: Annotated[str, Field(min_length=1)]
348
+ """MCP server name"""
408
349
 
350
+ tools: Annotated[list[Tool], Field(min_length=1)]
409
351
 
410
- class UserIntel(BaseModel):
411
- model_config = ConfigDict(extra="forbid")
412
352
 
413
- enabled: Optional[bool] = None
414
- config_id: Optional[str] = None
415
- breach_provider: Optional[str] = None
416
-
417
-
418
- class UrlIntel(BaseModel):
419
- model_config = ConfigDict(extra="forbid")
420
-
421
- enabled: Optional[bool] = None
422
- config_id: Optional[str] = None
423
- reputation_provider: Optional[str] = None
424
- risk_threshold: Optional[float] = None
425
-
426
-
427
- class DomainIntel(BaseModel):
428
- model_config = ConfigDict(extra="forbid")
429
-
430
- enabled: Optional[bool] = None
431
- config_id: Optional[str] = None
432
- reputation_provider: Optional[str] = None
433
- risk_threshold: Optional[float] = None
434
-
435
-
436
- class FileScan(BaseModel):
437
- model_config = ConfigDict(extra="forbid")
438
-
439
- enabled: Optional[bool] = None
440
- config_id: Optional[str] = None
441
- scan_provider: Optional[str] = None
442
- risk_threshold: Optional[float] = None
443
-
444
-
445
- class Redact(BaseModel):
446
- model_config = ConfigDict(extra="forbid")
447
-
448
- enabled: Optional[bool] = None
449
- config_id: Optional[str] = None
450
-
451
-
452
- class Vault(BaseModel):
453
- model_config = ConfigDict(extra="forbid")
454
-
455
- config_id: Optional[str] = None
456
-
457
-
458
- class Lingua(BaseModel):
459
- model_config = ConfigDict(extra="forbid")
353
+ class ExtraInfo(BaseModel):
354
+ """(AIDR) Logging schema."""
460
355
 
461
- enabled: Optional[bool] = None
356
+ # Additional properties are allowed here.
357
+ model_config = ConfigDict(extra="allow")
462
358
 
359
+ app_name: Optional[str] = None
360
+ """Name of source application/agent."""
463
361
 
464
- class Code(BaseModel):
465
- model_config = ConfigDict(extra="forbid")
362
+ app_group: Optional[str] = None
363
+ """The group of source application/agent."""
466
364
 
467
- enabled: Optional[bool] = None
365
+ app_version: Optional[str] = None
366
+ """Version of the source application/agent."""
468
367
 
368
+ actor_name: Optional[str] = None
369
+ """Name of subject actor/service account."""
469
370
 
470
- class ConnectionsConfig(BaseModel):
471
- model_config = ConfigDict(extra="forbid")
371
+ actor_group: Optional[str] = None
372
+ """The group of subject actor."""
472
373
 
473
- prompt_guard: Optional[PromptGuard] = None
474
- ip_intel: Optional[IpIntel] = None
475
- user_intel: Optional[UserIntel] = None
476
- url_intel: Optional[UrlIntel] = None
477
- domain_intel: Optional[DomainIntel] = None
478
- file_scan: Optional[FileScan] = None
479
- redact: Optional[Redact] = None
480
- vault: Optional[Vault] = None
481
- lingua: Optional[Lingua] = None
482
- code: Optional[Code] = None
374
+ source_region: Optional[str] = None
375
+ """Geographic region or data center."""
483
376
 
377
+ sub_tenant: Optional[str] = None
378
+ """Sub tenant of the user or organization"""
379
+ mcp_tools: Optional[Sequence[McpTool]] = None
484
380
 
485
- class PartialMasking(BaseModel):
486
- masking_type: Optional[Literal["unmask", "mask"]] = "unmask"
487
- unmasked_from_left: Annotated[Optional[int], Field(ge=0)] = None
488
- unmasked_from_right: Annotated[Optional[int], Field(ge=0)] = None
489
- masked_from_left: Annotated[Optional[int], Field(ge=0)] = None
490
- masked_from_right: Annotated[Optional[int], Field(ge=0)] = None
491
- chars_to_ignore: Optional[list[CharsToIgnoreItem]] = None
492
- masking_char: Annotated[Optional[str], Field(max_length=1, min_length=1)] = "*"
381
+ """Each item groups tools for a given MCP server."""
493
382
 
494
383
 
495
- class RuleRedactionConfig1(APIResponseModel):
496
- redaction_type: Literal[
497
- "mask",
498
- "partial_masking",
499
- "replacement",
500
- "hash",
501
- "detect_only",
502
- "fpe",
503
- "mask",
504
- "detect_only",
505
- ]
506
- """Redaction method to apply for this rule"""
507
- redaction_value: Optional[str] = None
508
- partial_masking: Optional[PartialMasking] = None
509
- hash: Optional[Hash] = None
510
- fpe_alphabet: Optional[
511
- Literal[
512
- "numeric",
513
- "alphalower",
514
- "alphaupper",
515
- "alpha",
516
- "alphanumericlower",
517
- "alphanumericupper",
518
- "alphanumeric",
519
- ]
520
- ] = None
521
-
522
-
523
- class PartialMasking1(BaseModel):
524
- masking_type: Optional[Literal["unmask", "mask"]] = "unmask"
525
- unmasked_from_left: Annotated[Optional[int], Field(ge=0)] = None
526
- unmasked_from_right: Annotated[Optional[int], Field(ge=0)] = None
527
- masked_from_left: Annotated[Optional[int], Field(ge=0)] = None
528
- masked_from_right: Annotated[Optional[int], Field(ge=0)] = None
529
- chars_to_ignore: Optional[list[CharsToIgnoreItem]] = None
530
- masking_char: Annotated[Optional[str], Field(max_length=1, min_length=1)] = "*"
531
-
532
-
533
- class RuleRedactionConfig2(BaseModel):
534
- model_config = ConfigDict(extra="forbid")
535
-
536
- redaction_type: Literal["replacement"]
537
- redaction_value: str
538
- partial_masking: Optional[PartialMasking1] = None
539
- hash: Optional[Hash] = None
540
- fpe_alphabet: Optional[
541
- Literal[
542
- "numeric",
543
- "alphalower",
544
- "alphaupper",
545
- "alpha",
546
- "alphanumericlower",
547
- "alphanumericupper",
548
- "alphanumeric",
549
- ]
550
- ] = None
551
-
552
-
553
- class PartialMasking2(BaseModel):
554
- masking_type: Optional[Literal["unmask", "mask"]] = "unmask"
555
- unmasked_from_left: Annotated[Optional[int], Field(ge=0)] = None
556
- unmasked_from_right: Annotated[Optional[int], Field(ge=0)] = None
557
- masked_from_left: Annotated[Optional[int], Field(ge=0)] = None
558
- masked_from_right: Annotated[Optional[int], Field(ge=0)] = None
559
- chars_to_ignore: Optional[list[CharsToIgnoreItem]] = None
560
- masking_char: Annotated[Optional[str], Field(max_length=1, min_length=1)] = "*"
561
-
562
-
563
- class RuleRedactionConfig3(BaseModel):
564
- model_config = ConfigDict(extra="forbid")
565
-
566
- redaction_type: Literal["partial_masking"]
567
- redaction_value: str
568
- partial_masking: PartialMasking2
569
- hash: Optional[Hash] = None
570
- fpe_alphabet: Optional[
571
- Literal[
572
- "numeric",
573
- "alphalower",
574
- "alphaupper",
575
- "alpha",
576
- "alphanumericlower",
577
- "alphanumericupper",
578
- "alphanumeric",
579
- ]
580
- ] = None
581
-
582
-
583
- class PartialMasking3(BaseModel):
584
- masking_type: Optional[Literal["unmask", "mask"]] = "unmask"
585
- unmasked_from_left: Annotated[Optional[int], Field(ge=0)] = None
586
- unmasked_from_right: Annotated[Optional[int], Field(ge=0)] = None
587
- masked_from_left: Annotated[Optional[int], Field(ge=0)] = None
588
- masked_from_right: Annotated[Optional[int], Field(ge=0)] = None
589
- chars_to_ignore: Optional[list[CharsToIgnoreItem]] = None
590
- masking_char: Annotated[Optional[str], Field(max_length=1, min_length=1)] = "*"
591
-
592
-
593
- class RuleRedactionConfig4(BaseModel):
594
- model_config = ConfigDict(extra="forbid")
595
-
596
- redaction_type: Literal["hash"]
597
- redaction_value: str
598
- partial_masking: PartialMasking3
599
- hash: Optional[Hash] = None
600
- fpe_alphabet: Optional[
601
- Literal[
602
- "numeric",
603
- "alphalower",
604
- "alphaupper",
605
- "alpha",
606
- "alphanumericlower",
607
- "alphanumericupper",
608
- "alphanumeric",
609
- ]
610
- ] = None
611
-
612
-
613
- class CharsToIgnoreItem(RootModel[str]):
614
- root: Annotated[str, Field(max_length=1, min_length=1)]
615
-
616
-
617
- class PartialMasking4(BaseModel):
618
- masking_type: Optional[Literal["unmask", "mask"]] = "unmask"
619
- unmasked_from_left: Annotated[Optional[int], Field(ge=0)] = None
620
- unmasked_from_right: Annotated[Optional[int], Field(ge=0)] = None
621
- masked_from_left: Annotated[Optional[int], Field(ge=0)] = None
622
- masked_from_right: Annotated[Optional[int], Field(ge=0)] = None
623
- chars_to_ignore: Optional[list[CharsToIgnoreItem]] = None
624
- masking_char: Annotated[Optional[str], Field(max_length=1, min_length=1)] = "*"
625
-
626
-
627
- class Hash(BaseModel):
628
- hash_type: Literal["md5", "sha256"]
629
- """The type of hashing algorithm"""
630
-
631
-
632
- class RuleRedactionConfig5(BaseModel):
633
- model_config = ConfigDict(extra="forbid")
634
-
635
- redaction_type: Literal["fpe"]
636
- redaction_value: str
637
- partial_masking: PartialMasking4
638
- hash: Optional[Hash] = None
639
- fpe_alphabet: Optional[
640
- Literal[
641
- "numeric",
642
- "alphalower",
643
- "alphaupper",
644
- "alpha",
645
- "alphanumericlower",
646
- "alphanumericupper",
647
- "alphanumeric",
648
- ]
649
- ] = None
650
-
651
-
652
- class Rule(APIResponseModel):
653
- redact_rule_id: str
384
+ class AccessRuleResult(APIResponseModel):
654
385
  """
655
- Identifier of the redaction rule to apply. This should match a rule defined
656
- in the [Redact service](https://pangea.cloud/docs/redact/using-redact/using-redact).
386
+ Details about the evaluation of a single rule, including whether it matched,
387
+ the action to take, the rule name, and optional debugging information.
657
388
  """
658
- redaction: Union[
659
- RuleRedactionConfig1,
660
- RuleRedactionConfig2,
661
- RuleRedactionConfig3,
662
- RuleRedactionConfig4,
663
- RuleRedactionConfig5,
664
- ]
665
- """
666
- Configuration for the redaction method applied to detected values.
667
389
 
668
- Each rule supports one redaction type, such as masking, replacement,
669
- hashing, Format-Preserving Encryption (FPE), or detection-only mode.
670
- Additional parameters may be required depending on the selected redaction
671
- type.
390
+ matched: bool
391
+ """Whether this rule's logic evaluated to true for the input."""
672
392
 
673
- For more details, see the [AI Guard Recipe Actions](https://pangea.cloud/docs/ai-guard/recipes#actions)
674
- documentation.
675
- """
676
- block: Optional[bool] = None
677
- """
678
- If `true`, indicates that further processing should be stopped when this
679
- rule is triggered
680
- """
681
- disabled: Optional[bool] = None
682
- """
683
- If `true`, disables this specific rule even if the detector is enabled
684
- """
685
- reputation_check: Optional[bool] = None
686
- """
687
- If `true`, performs a reputation check using the configured intel provider.
688
- Applies to the Malicious Entity detector when using IP, URL, or Domain Intel
689
- services.
690
- """
691
- transform_if_malicious: Optional[bool] = None
393
+ action: str
692
394
  """
693
- If `true`, applies redaction or transformation when the detected value is
694
- determined to be malicious by intel analysis
395
+ The action resulting from the rule evaluation. One of 'allowed', 'blocked',
396
+ or 'reported'.
695
397
  """
696
398
 
399
+ name: str
400
+ """A human-readable name for the rule."""
697
401
 
698
- class Settings(BaseModel):
699
- rules: Optional[list[Rule]] = None
700
-
701
-
702
- class DetectorSetting(BaseModel):
703
- model_config = ConfigDict(extra="forbid")
704
-
705
- detector_name: str
706
- state: Literal["disabled", "enabled"]
707
- settings: Settings
402
+ logic: Optional[dict[str, Any]] = None
403
+ """The JSON logic expression evaluated for this rule."""
708
404
 
405
+ attributes: Optional[dict[str, Any]] = None
406
+ """The input attribute values that were available during rule evaluation."""
709
407
 
710
- class RedactConnectorSettings(BaseModel):
711
- fpe_tweak_vault_secret_id: Optional[str] = None
712
408
 
409
+ class GuardDetectors(APIResponseModel):
410
+ """Result of the recipe analyzing and input prompt."""
713
411
 
714
- class ConnectorSettings(BaseModel):
715
- model_config = ConfigDict(extra="forbid")
412
+ code: Optional[GuardDetector[CodeDetectionResult]] = None
413
+ competitors: Optional[GuardDetector[object]] = None
414
+ confidential_and_pii_entity: Optional[GuardDetector[PiiEntityResult]] = None
415
+ custom_entity: Optional[GuardDetector[object]] = None
416
+ language: Optional[GuardDetector[LanguageDetectionResult]] = None
417
+ malicious_entity: Optional[GuardDetector[MaliciousEntityResult]] = None
418
+ malicious_prompt: Optional[GuardDetector[PromptInjectionResult]] = None
419
+ prompt_hardening: Optional[GuardDetector[object]] = None
420
+ secret_and_key_entity: Optional[GuardDetector[SecretsEntityResult]] = None
421
+ topic: Optional[GuardDetector[TopicDetectionResult]] = None
716
422
 
717
- redact: Optional[RedactConnectorSettings] = None
718
423
 
424
+ class GuardResult(PangeaResponseResult):
425
+ output: Optional[dict[str, Any]] = None
426
+ """Updated structured prompt."""
719
427
 
720
- class AccessRuleSettings(APIResponseModel):
721
- """
722
- Configuration for an individual access rule used in an AI Guard recipe. Each
723
- rule defines its matching logic and the action to apply when the logic
724
- evaluates to true.
725
- """
428
+ blocked: Optional[bool] = None
429
+ """Whether or not the prompt triggered a block detection."""
726
430
 
727
- rule_key: Annotated[str, Field(pattern="^([a-zA-Z0-9_][a-zA-Z0-9/|_]*)$")]
728
- """
729
- Unique identifier for this rule. Should be user-readable and consistent
730
- across recipe updates.
731
- """
732
- name: str
733
- """Display label for the rule shown in user interfaces."""
734
- state: Literal["block", "report"]
735
- """
736
- Action to apply if the rule matches. Use 'block' to stop further processing
737
- or 'report' to simply log the match.
738
- """
431
+ transformed: Optional[bool] = None
432
+ """Whether or not the original input was transformed."""
739
433
 
434
+ recipe: Optional[str] = None
435
+ """The Recipe that was used."""
740
436
 
741
- class RecipeConfig(APIResponseModel):
742
- name: str
743
- """Human-readable name of the recipe"""
744
- description: str
745
- """Detailed description of the recipe's purpose or use case"""
746
- version: Optional[str] = "v1"
747
- """Optional version identifier for the recipe. Can be used to track changes."""
748
- detectors: Optional[list[DetectorSetting]] = None
749
- """Setting for Detectors"""
750
- access_rules: Optional[list[AccessRuleSettings]] = None
751
- """Configuration for access rules used in an AI Guard recipe."""
752
- connector_settings: Optional[ConnectorSettings] = None
753
-
754
-
755
- class ServiceConfig(PangeaResponseResult):
756
- id: Optional[str] = None
757
- """ID of an AI Guard service configuration"""
758
- name: Optional[str] = None
759
- """Human-readable name of the AI Guard service configuration"""
760
- audit_data_activity: Optional[AuditDataActivityConfig] = None
761
- connections: Optional[ConnectionsConfig] = None
762
- recipes: Optional[dict[str, RecipeConfig]] = None
763
-
764
-
765
- class ServiceConfigFilter(BaseModel):
766
- model_config = ConfigDict(extra="forbid")
767
-
768
- id: Optional[str] = None
769
- """
770
- Only records where id equals this value.
771
- """
772
- id__contains: Optional[list[str]] = None
773
- """
774
- Only records where id includes each substring.
775
- """
776
- id__in: Optional[list[str]] = None
777
- """
778
- Only records where id equals one of the provided substrings.
779
- """
780
- created_at: Optional[PangeaDateTime] = None
781
- """
782
- Only records where created_at equals this value.
783
- """
784
- created_at__gt: Optional[PangeaDateTime] = None
785
- """
786
- Only records where created_at is greater than this value.
787
- """
788
- created_at__gte: Optional[PangeaDateTime] = None
789
- """
790
- Only records where created_at is greater than or equal to this value.
791
- """
792
- created_at__lt: Optional[PangeaDateTime] = None
793
- """
794
- Only records where created_at is less than this value.
795
- """
796
- created_at__lte: Optional[PangeaDateTime] = None
797
- """
798
- Only records where created_at is less than or equal to this value.
799
- """
800
- updated_at: Optional[PangeaDateTime] = None
801
- """
802
- Only records where updated_at equals this value.
803
- """
804
- updated_at__gt: Optional[PangeaDateTime] = None
805
- """
806
- Only records where updated_at is greater than this value.
807
- """
808
- updated_at__gte: Optional[PangeaDateTime] = None
809
- """
810
- Only records where updated_at is greater than or equal to this value.
811
- """
812
- updated_at__lt: Optional[PangeaDateTime] = None
813
- """
814
- Only records where updated_at is less than this value.
815
- """
816
- updated_at__lte: Optional[PangeaDateTime] = None
817
- """
818
- Only records where updated_at is less than or equal to this value.
819
- """
437
+ detectors: GuardDetectors
438
+ """Result of the recipe analyzing and input prompt."""
820
439
 
440
+ access_rules: Optional[dict[str, AccessRuleResult]] = None
441
+ """Result of the recipe evaluating configured rules"""
821
442
 
822
- class ServiceConfigsPage(PangeaResponseResult):
823
- count: Optional[int] = None
824
- """The total number of service configs matched by the list request."""
825
- last: Optional[str] = None
443
+ fpe_context: Optional[str] = None
826
444
  """
827
- Used to fetch the next page of the current listing when provided in a
828
- repeated request's last parameter.
445
+ If an FPE redaction method returned results, this will be the context passed
446
+ to unredact.
829
447
  """
830
- items: Optional[list[ServiceConfig]] = None
831
-
832
-
833
- class ExtraInfoTyped(TypedDict, total=False):
834
- """(AIDR) Logging schema."""
835
-
836
- app_name: str
837
- """Name of source application."""
838
-
839
- app_group: str
840
- """The group of source application."""
841
-
842
- app_version: str
843
- """Version of the source application."""
844
-
845
- actor_name: str
846
- """Name of subject actor."""
847
-
848
- actor_group: str
849
- """The group of subject actor."""
850
-
851
- source_region: str
852
- """Geographic region or data center."""
853
-
854
- data_sensitivity: str
855
- """Sensitivity level of data involved"""
856
-
857
- customer_tier: str
858
- """Tier of the user or organization"""
859
-
860
- use_case: str
861
- """Business-specific use case"""
862
448
 
449
+ input_token_count: Optional[float] = None
450
+ """Number of tokens counted in the input"""
863
451
 
864
- ExtraInfo: TypeAlias = Union[ExtraInfoTyped, dict[str, object]]
452
+ output_token_count: Optional[float] = None
453
+ """Number of tokens counted in the output"""
865
454
 
866
455
 
867
456
  class AIGuard(ServiceBase):
@@ -938,11 +527,12 @@ class AIGuard(ServiceBase):
938
527
  def guard_text(
939
528
  self,
940
529
  *,
941
- messages: Sequence[Message],
530
+ messages: Sequence[Message | McpToolsMessage],
942
531
  recipe: str | None = None,
943
532
  debug: bool | None = None,
944
533
  overrides: Overrides | None = None,
945
534
  log_fields: LogFields | None = None,
535
+ only_relevant_content: bool = False,
946
536
  ) -> PangeaResponse[TextGuardResult]:
947
537
  """
948
538
  Guard LLM input and output text
@@ -965,6 +555,8 @@ class AIGuard(ServiceBase):
965
555
  recipe: Recipe key of a configuration of data types and settings
966
556
  defined in the Pangea User Console. It specifies the rules that
967
557
  are to be applied to the text, such as defang malicious URLs.
558
+ only_relevant_content: Whether or not to only send relevant content
559
+ to AI Guard.
968
560
 
969
561
  Examples:
970
562
  response = ai_guard.guard_text(messages=[Message(role="user", content="hello world")])
@@ -974,11 +566,12 @@ class AIGuard(ServiceBase):
974
566
  self,
975
567
  text: str | None = None,
976
568
  *,
977
- messages: Sequence[Message] | None = None,
569
+ messages: Sequence[Message | McpToolsMessage] | None = None,
978
570
  debug: bool | None = None,
979
571
  log_fields: LogFields | None = None,
980
572
  overrides: Overrides | None = None,
981
573
  recipe: str | None = None,
574
+ only_relevant_content: bool = False,
982
575
  ) -> PangeaResponse[TextGuardResult]:
983
576
  """
984
577
  Guard LLM input and output text
@@ -1004,6 +597,8 @@ class AIGuard(ServiceBase):
1004
597
  recipe: Recipe key of a configuration of data types and settings
1005
598
  defined in the Pangea User Console. It specifies the rules that
1006
599
  are to be applied to the text, such as defang malicious URLs.
600
+ only_relevant_content: Whether or not to only send relevant content
601
+ to AI Guard.
1007
602
 
1008
603
  Examples:
1009
604
  response = ai_guard.guard_text("text")
@@ -1012,7 +607,11 @@ class AIGuard(ServiceBase):
1012
607
  if text is not None and messages is not None:
1013
608
  raise ValueError("Exactly one of `text` or `messages` must be given")
1014
609
 
1015
- return self.request.post(
610
+ if only_relevant_content and messages is not None:
611
+ original_messages = messages
612
+ messages, original_indices = get_relevant_content(messages)
613
+
614
+ response = self.request.post(
1016
615
  "v1/text/guard",
1017
616
  TextGuardResult,
1018
617
  data={
@@ -1025,6 +624,13 @@ class AIGuard(ServiceBase):
1025
624
  },
1026
625
  )
1027
626
 
627
+ if only_relevant_content and response.result and response.result.prompt_messages:
628
+ response.result.prompt_messages = patch_messages(
629
+ original_messages, original_indices, response.result.prompt_messages
630
+ ) # type: ignore[assignment]
631
+
632
+ return response
633
+
1028
634
  def guard(
1029
635
  self,
1030
636
  input: Mapping[str, Any],
@@ -1042,8 +648,8 @@ class AIGuard(ServiceBase):
1042
648
  source_ip: str | None = None,
1043
649
  source_location: str | None = None,
1044
650
  tenant_id: str | None = None,
1045
- event_type: Literal["input", "output"] | None = None,
1046
- sensor_instance_id: str | None = None,
651
+ event_type: Literal["input", "output", "tool_input", "tool_output", "tool_listing"] | None = None,
652
+ collector_instance_id: str | None = None,
1047
653
  extra_info: ExtraInfo | None = None,
1048
654
  count_tokens: bool | None = None,
1049
655
  ) -> PangeaResponse[GuardResult]:
@@ -1053,7 +659,7 @@ class AIGuard(ServiceBase):
1053
659
  Analyze and redact content to avoid manipulation of the model, addition
1054
660
  of malicious content, and other undesirable data transfers.
1055
661
 
1056
- OperationId: ai_guard_post_v1beta_guard
662
+ OperationId: ai_guard_post_v1_guard
1057
663
 
1058
664
  Args:
1059
665
  input: 'messages' (required) contains Prompt content and role array
@@ -1072,12 +678,12 @@ class AIGuard(ServiceBase):
1072
678
  source_location: Location of user or app or agent.
1073
679
  tenant_id: For gateway-like integrations with multi-tenant support.
1074
680
  event_type: (AIDR) Event Type.
1075
- sensor_instance_id: (AIDR) sensor instance id.
681
+ collector_instance_id: (AIDR) collector instance id.
1076
682
  extra_info: (AIDR) Logging schema.
1077
683
  count_tokens: Provide input and output token count.
1078
684
  """
1079
685
  return self.request.post(
1080
- "v1beta/guard",
686
+ "v1/guard",
1081
687
  GuardResult,
1082
688
  data={
1083
689
  "input": input,
@@ -1095,86 +701,63 @@ class AIGuard(ServiceBase):
1095
701
  "source_location": source_location,
1096
702
  "tenant_id": tenant_id,
1097
703
  "event_type": event_type,
1098
- "sensor_instance_id": sensor_instance_id,
704
+ "collector_instance_id": collector_instance_id,
1099
705
  "extra_info": extra_info,
1100
706
  "count_tokens": count_tokens,
1101
707
  },
1102
708
  )
1103
709
 
1104
- def get_service_config(self, id: str) -> PangeaResponse[ServiceConfig]:
1105
- """
1106
- OperationId: ai_guard_post_v1beta_config
1107
- """
1108
- return self.request.post("v1beta/config", data={"id": id}, result_class=ServiceConfig)
1109
710
 
1110
- def create_service_config(
1111
- self,
1112
- name: str,
1113
- *,
1114
- id: str | None = None,
1115
- audit_data_activity: AuditDataActivityConfig | None = None,
1116
- connections: ConnectionsConfig | None = None,
1117
- recipes: Mapping[str, RecipeConfig] | None = None,
1118
- ) -> PangeaResponse[ServiceConfig]:
1119
- """
1120
- OperationId: ai_guard_post_v1beta_config_create
1121
- """
1122
- return self.request.post(
1123
- "v1beta/config/create",
1124
- data={
1125
- "name": name,
1126
- "id": id,
1127
- "audit_data_activity": audit_data_activity,
1128
- "connections": connections,
1129
- "recipes": recipes,
1130
- },
1131
- result_class=ServiceConfig,
1132
- )
711
+ def get_relevant_content(
712
+ messages: Sequence[Message | McpToolsMessage],
713
+ ) -> tuple[list[Message | McpToolsMessage], list[int]]:
714
+ """
715
+ Returns relevant messages and their indices in the original list.
1133
716
 
1134
- def update_service_config(
1135
- self,
1136
- id: str,
1137
- name: str,
1138
- *,
1139
- audit_data_activity: AuditDataActivityConfig | None = None,
1140
- connections: ConnectionsConfig | None = None,
1141
- recipes: Mapping[str, RecipeConfig] | None = None,
1142
- ) -> PangeaResponse[ServiceConfig]:
1143
- """
1144
- OperationId: ai_guard_post_v1beta_config_update
1145
- """
1146
- return self.request.post(
1147
- "v1beta/config/update",
1148
- data={
1149
- "id": id,
1150
- "name": name,
1151
- "audit_data_activity": audit_data_activity,
1152
- "connections": connections,
1153
- "recipes": recipes,
1154
- },
1155
- result_class=ServiceConfig,
1156
- )
717
+ 1, If last message is "assistant", then the relevant messages are all system
718
+ messages that come before it, plus that last assistant message.
719
+ 2. Else, find the last "assistant" message. Then the relevant messages are
720
+ all system messages that come before it, and all messages that come after
721
+ it.
722
+ """
1157
723
 
1158
- def delete_service_config(self, id: str) -> PangeaResponse[ServiceConfig]:
1159
- """
1160
- OperationId: ai_guard_post_v1beta_config_delete
1161
- """
1162
- return self.request.post("v1beta/config/delete", data={"id": id}, result_class=ServiceConfig)
724
+ if len(messages) == 0:
725
+ return [], []
1163
726
 
1164
- def list_service_configs(
1165
- self,
1166
- *,
1167
- filter: ServiceConfigFilter | None = None,
1168
- last: str | None = None,
1169
- order: Literal["asc", "desc"] | None = None,
1170
- order_by: Literal["id", "created_at", "updated_at"] | None = None,
1171
- size: int | None = None,
1172
- ) -> PangeaResponse[ServiceConfigsPage]:
1173
- """
1174
- OperationId: ai_guard_post_v1beta_config_list
1175
- """
1176
- return self.request.post(
1177
- "v1beta/config/list",
1178
- data={"filter": filter, "last": last, "order": order, "order_by": order_by, "size": size},
1179
- result_class=ServiceConfigsPage,
1180
- )
727
+ system_messages = [msg for msg in messages if msg.role == "system"]
728
+ system_indices = [i for i, msg in enumerate(messages) if msg.role == "system"]
729
+
730
+ # If the last message is assistant, then return all system messages and that
731
+ # assistant message.
732
+ if messages[-1].role == "assistant":
733
+ return system_messages + [messages[-1]], system_indices + [len(messages) - 1]
734
+
735
+ # Otherwise, work backwards until we find the last assistant message, then
736
+ # return all messages after that.
737
+ last_assistant_index = -1
738
+ for i in range(len(messages) - 1, -1, -1):
739
+ if messages[i].role == "assistant":
740
+ last_assistant_index = i
741
+ break
742
+
743
+ relevant_messages = []
744
+ indices = []
745
+ for i, msg in enumerate(messages):
746
+ if msg.role == "system" or i > last_assistant_index:
747
+ relevant_messages.append(msg)
748
+ indices.append(i)
749
+
750
+ return relevant_messages, indices
751
+
752
+
753
+ def patch_messages(
754
+ original: Sequence[Message | McpToolsMessage],
755
+ original_indices: list[int],
756
+ transformed: Sequence[PromptMessage],
757
+ ) -> list[Message | McpToolsMessage | PromptMessage]:
758
+ if len(original) == len(transformed):
759
+ return list(transformed)
760
+
761
+ return [
762
+ transformed[original_indices.index(i)] if i in original_indices else orig for i, orig in enumerate(original)
763
+ ]